Linux-2.6.12-rc2

Initial git repository build. I'm not bothering with the full history,
even though we have it. We can create a separate "historical" git
archive of that later if we want to, and in the meantime it's about
3.2GB when imported into git - space that would just make the early
git days unnecessarily complicated, when we don't have a lot of good
infrastructure for it.

Let it rip!
diff --git a/drivers/message/Makefile b/drivers/message/Makefile
new file mode 100644
index 0000000..97ef5a0
--- /dev/null
+++ b/drivers/message/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for MPT based block devices
+#
+
+obj-$(CONFIG_I2O)	+= i2o/
+obj-$(CONFIG_FUSION)	+= fusion/
diff --git a/drivers/message/fusion/Kconfig b/drivers/message/fusion/Kconfig
new file mode 100644
index 0000000..452418b
--- /dev/null
+++ b/drivers/message/fusion/Kconfig
@@ -0,0 +1,66 @@
+
+menu "Fusion MPT device support"
+
+config FUSION
+	tristate "Fusion MPT (base + ScsiHost) drivers"
+	depends on PCI && SCSI
+	---help---
+	  LSI Logic Fusion(TM) Message Passing Technology (MPT) device support
+	  provides high performance SCSI host initiator, and LAN [1] interface
+	  services to a host system.  The Fusion architecture is capable of
+	  duplexing these protocols on high-speed Fibre Channel
+	  (up to 2 GHz x 2 ports = 4 GHz) and parallel SCSI (up to Ultra-320)
+	  physical medium.
+
+	  [1] LAN is not supported on parallel SCSI medium.
+
+config FUSION_MAX_SGE
+	int "Maximum number of scatter gather entries"
+	depends on FUSION
+	default "40"
+	help
+	  This option allows you to specify the maximum number of scatter-
+	  gather entries per I/O. The driver defaults to 40, a reasonable number
+	  for most systems. However, the user may increase this up to 128.
+	  Increasing this parameter will require significantly more memory 
+	  on a per controller instance. Increasing the parameter is not
+	  necessary (or recommended) unless the user will be running 
+	  large I/O's via the raw interface.
+
+config FUSION_CTL
+	tristate "Fusion MPT misc device (ioctl) driver"
+	depends on FUSION
+	---help---
+	  The Fusion MPT misc device driver provides specialized control
+	  of MPT adapters via system ioctl calls.  Use of ioctl calls to
+	  the MPT driver requires that you create and use a misc device
+	  node ala:
+	  mknod /dev/mptctl c 10 240
+
+	  One use of this ioctl interface is to perform an upgrade (reflash)
+	  of the MPT adapter firmware.  Refer to readme file(s) distributed
+	  with the Fusion MPT linux driver for additional details.
+
+	  If enabled by saying M to this, a driver named: mptctl
+	  will be compiled.
+
+	  If unsure whether you really want or need this, say N.
+
+config FUSION_LAN
+	tristate "Fusion MPT LAN driver"
+	depends on FUSION && NET_FC
+	---help---
+	  This module supports LAN IP traffic over Fibre Channel port(s)
+	  on Fusion MPT compatible hardware (LSIFC9xx chips).
+	  The physical interface used is defined in RFC 2625.
+	  Please refer to that document for details.
+
+	  Installing this driver requires the knowledge to configure and
+	  activate a new network interface, "fc0", using standard Linux tools.
+
+	  If enabled by saying M to this, a driver named: mptlan
+	  will be compiled.
+
+	  If unsure whether you really want or need this, say N.
+
+endmenu
diff --git a/drivers/message/fusion/Makefile b/drivers/message/fusion/Makefile
new file mode 100644
index 0000000..f6fdcaa
--- /dev/null
+++ b/drivers/message/fusion/Makefile
@@ -0,0 +1,52 @@
+#
+# Makefile for the LSI Logic Fusion MPT (Message Passing Technology) drivers.
+#
+# Note! If you want to turn on various debug defines for an extended period of
+# time but don't want them lingering around in the Makefile when you pass it on
+# to someone else, use the MPT_CFLAGS env variable (thanks Steve). -nromer
+
+#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-{ LSI_LOGIC
+
+#  Architecture-specific...
+#			# intel
+#EXTRA_CFLAGS += -g
+#			# sparc64
+#EXTRA_CFLAGS += -gstabs+
+
+EXTRA_CFLAGS += ${MPT_CFLAGS}
+
+# Fusion MPT drivers; recognized debug defines...
+#  MPT general:
+#EXTRA_CFLAGS += -DMPT_DEBUG_SCSI
+#EXTRA_CFLAGS += -DMPT_DEBUG
+#EXTRA_CFLAGS += -DMPT_DEBUG_MSG_FRAME
+#EXTRA_CFLAGS += -DMPT_DEBUG_SG
+
+#
+# driver/module specifics...
+#
+#  For mptbase:
+#CFLAGS_mptbase.o += -DMPT_DEBUG_HANDSHAKE
+#CFLAGS_mptbase.o += -DMPT_DEBUG_IRQ
+#
+#  For mptscsih:
+#CFLAGS_mptscsih.o += -DMPT_DEBUG_SCANDV
+#CFLAGS_mptscsih.o += -DMPT_DEBUG_RESET
+#CFLAGS_mptscsih.o += -DMPT_DEBUG_NEH
+#
+#  For mptctl:
+#CFLAGS_mptctl.o += -DMPT_DEBUG_IOCTL
+#
+#  For mptlan:
+#CFLAGS_mptlan.o += -DMPT_LAN_IO_DEBUG
+#
+#  For isense:
+
+#  EXP...
+##mptscsih-objs	:= scsihost.o scsiherr.o
+
+#=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-} LSI_LOGIC
+
+obj-$(CONFIG_FUSION)		+= mptbase.o mptscsih.o
+obj-$(CONFIG_FUSION_CTL)	+= mptctl.o
+obj-$(CONFIG_FUSION_LAN)	+= mptlan.o
diff --git a/drivers/message/fusion/linux_compat.h b/drivers/message/fusion/linux_compat.h
new file mode 100644
index 0000000..048b5b8
--- /dev/null
+++ b/drivers/message/fusion/linux_compat.h
@@ -0,0 +1,18 @@
+/* drivers/message/fusion/linux_compat.h */
+
+#ifndef FUSION_LINUX_COMPAT_H
+#define FUSION_LINUX_COMPAT_H
+
+#include <linux/version.h>
+#include <scsi/scsi_device.h>
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6))
+static int inline scsi_device_online(struct scsi_device *sdev)
+{
+	return sdev->online;
+}
+#endif
+
+
+/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#endif /* _LINUX_COMPAT_H */
diff --git a/drivers/message/fusion/lsi/fc_log.h b/drivers/message/fusion/lsi/fc_log.h
new file mode 100644
index 0000000..dc98d46
--- /dev/null
+++ b/drivers/message/fusion/lsi/fc_log.h
@@ -0,0 +1,89 @@
+/*
+ *  Copyright (c) 2000-2001 LSI Logic Corporation. All rights reserved.
+ *
+ *  NAME:           fc_log.h
+ *  SUMMARY:        MPI IocLogInfo definitions for the SYMFC9xx chips
+ *  DESCRIPTION:    Contains the enumerated list of values that may be returned
+ *                  in the IOCLogInfo field of a MPI Default Reply Message.
+ *
+ *  CREATION DATE:  6/02/2000
+ *  ID:             $Id: fc_log.h,v 4.6 2001/07/26 14:41:33 sschremm Exp $
+ */
+
+
+/*
+ * MpiIocLogInfo_t enum
+ *
+ * These 32 bit values are used in the IOCLogInfo field of the MPI reply
+ * messages.
+ * The value is 0xabcccccc where
+ *          a = The type of log info as per the MPI spec. Since these codes are
+ *              all for Fibre Channel this value will always be 2.
+ *          b = Specifies a subclass of the firmware where
+ *                  0 = FCP Initiator
+ *                  1 = FCP Target
+ *                  2 = LAN
+ *                  3 = MPI Message Layer
+ *                  4 = FC Link
+ *                  5 = Context Manager
+ *                  6 = Invalid Field Offset
+ *                  7 = State Change Info
+ *                  all others are reserved for future use
+ *          c = A specific value within the subclass.
+ *
+ * NOTE: Any new values should be added to the end of each subclass so that the
+ *       codes remain consistent across firmware releases.
+ */
+typedef enum _MpiIocLogInfoFc
+{
+    MPI_IOCLOGINFO_FC_INIT_BASE                     = 0x20000000,
+    MPI_IOCLOGINFO_FC_INIT_ERROR_OUT_OF_ORDER_FRAME = 0x20000001, /* received an out of order frame - unsupported */
+    MPI_IOCLOGINFO_FC_INIT_ERROR_BAD_START_OF_FRAME = 0x20000002, /* Bad Rx Frame, bad start of frame primative */
+    MPI_IOCLOGINFO_FC_INIT_ERROR_BAD_END_OF_FRAME   = 0x20000003, /* Bad Rx Frame, bad end of frame primative */
+    MPI_IOCLOGINFO_FC_INIT_ERROR_OVER_RUN           = 0x20000004, /* Bad Rx Frame, overrun */
+    MPI_IOCLOGINFO_FC_INIT_ERROR_RX_OTHER           = 0x20000005, /* Other errors caught by IOC which require retries */
+    MPI_IOCLOGINFO_FC_INIT_ERROR_SUBPROC_DEAD       = 0x20000006, /* Main processor could not initialize sub-processor */
+    MPI_IOCLOGINFO_FC_INIT_ERROR_RX_OVERRUN         = 0x20000007, /* Scatter Gather overrun  */
+    MPI_IOCLOGINFO_FC_INIT_ERROR_RX_BAD_STATUS      = 0x20000008, /* Receiver detected context mismatch via invalid header */
+    MPI_IOCLOGINFO_FC_INIT_ERROR_RX_UNEXPECTED_FRAME= 0x20000009, /* CtxMgr detected unsupported frame type  */
+    MPI_IOCLOGINFO_FC_INIT_ERROR_LINK_FAILURE       = 0x2000000A, /* Link failure occurred  */
+    MPI_IOCLOGINFO_FC_INIT_ERROR_TX_TIMEOUT         = 0x2000000B, /* Transmitter timeout error */
+
+    MPI_IOCLOGINFO_FC_TARGET_BASE                   = 0x21000000,
+    MPI_IOCLOGINFO_FC_TARGET_NO_PDISC               = 0x21000001, /* not sent because we are waiting for a PDISC from the initiator */
+    MPI_IOCLOGINFO_FC_TARGET_NO_LOGIN               = 0x21000002, /* not sent because we are not logged in to the remote node */
+    MPI_IOCLOGINFO_FC_TARGET_DOAR_KILLED_BY_LIP     = 0x21000003, /* Data Out, Auto Response, not sent due to a LIP */
+    MPI_IOCLOGINFO_FC_TARGET_DIAR_KILLED_BY_LIP     = 0x21000004, /* Data In, Auto Response, not sent due to a LIP */
+    MPI_IOCLOGINFO_FC_TARGET_DIAR_MISSING_DATA      = 0x21000005, /* Data In, Auto Response, missing data frames */
+    MPI_IOCLOGINFO_FC_TARGET_DONR_KILLED_BY_LIP     = 0x21000006, /* Data Out, No Response, not sent due to a LIP */
+    MPI_IOCLOGINFO_FC_TARGET_WRSP_KILLED_BY_LIP     = 0x21000007, /* Auto-response after a write not sent due to a LIP */
+    MPI_IOCLOGINFO_FC_TARGET_DINR_KILLED_BY_LIP     = 0x21000008, /* Data In, No Response, not completed due to a LIP */
+    MPI_IOCLOGINFO_FC_TARGET_DINR_MISSING_DATA      = 0x21000009, /* Data In, No Response, missing data frames */
+    MPI_IOCLOGINFO_FC_TARGET_MRSP_KILLED_BY_LIP     = 0x2100000a, /* Manual Response not sent due to a LIP */
+    MPI_IOCLOGINFO_FC_TARGET_NO_CLASS_3             = 0x2100000b, /* not sent because remote node does not support Class 3 */
+    MPI_IOCLOGINFO_FC_TARGET_LOGIN_NOT_VALID        = 0x2100000c, /* not sent because login to remote node not validated */
+    MPI_IOCLOGINFO_FC_TARGET_FROM_OUTBOUND          = 0x2100000e, /* cleared from the outbound queue after a logout */
+    MPI_IOCLOGINFO_FC_TARGET_WAITING_FOR_DATA_IN    = 0x2100000f, /* cleared waiting for data after a logout */
+
+    MPI_IOCLOGINFO_FC_LAN_BASE                      = 0x22000000,
+    MPI_IOCLOGINFO_FC_LAN_TRANS_SGL_MISSING         = 0x22000001, /* Transaction Context Sgl Missing */
+    MPI_IOCLOGINFO_FC_LAN_TRANS_WRONG_PLACE         = 0x22000002, /* Transaction Context found before an EOB */
+    MPI_IOCLOGINFO_FC_LAN_TRANS_RES_BITS_SET        = 0x22000003, /* Transaction Context value has reserved bits set */
+    MPI_IOCLOGINFO_FC_LAN_WRONG_SGL_FLAG            = 0x22000004, /* Invalid SGL Flags */
+
+    MPI_IOCLOGINFO_FC_MSG_BASE                      = 0x23000000,
+
+    MPI_IOCLOGINFO_FC_LINK_BASE                     = 0x24000000,
+    MPI_IOCLOGINFO_FC_LINK_LOOP_INIT_TIMEOUT        = 0x24000001, /* Loop initialization timed out */
+    MPI_IOCLOGINFO_FC_LINK_ALREADY_INITIALIZED      = 0x24000002, /* Another system controller already initialized the loop */
+    MPI_IOCLOGINFO_FC_LINK_LINK_NOT_ESTABLISHED     = 0x24000003, /* Not synchronized to signal or still negotiating (possible cable problem) */
+    MPI_IOCLOGINFO_FC_LINK_CRC_ERROR                = 0x24000004, /* CRC check detected error on received frame */
+
+    MPI_IOCLOGINFO_FC_CTX_BASE                      = 0x25000000,
+
+    MPI_IOCLOGINFO_FC_INVALID_FIELD_BYTE_OFFSET     = 0x26000000, /* The lower 24 bits give the byte offset of the field in the request message that is invalid */
+    MPI_IOCLOGINFO_FC_INVALID_FIELD_MAX_OFFSET      = 0x26ffffff,
+
+    MPI_IOCLOGINFO_FC_STATE_CHANGE                  = 0x27000000  /* The lower 24 bits give additional information concerning state change */
+
+} MpiIocLogInfoFc_t;
diff --git a/drivers/message/fusion/lsi/mpi.h b/drivers/message/fusion/lsi/mpi.h
new file mode 100644
index 0000000..9dbb061
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi.h
@@ -0,0 +1,746 @@
+/*
+ *  Copyright (c) 2000-2003 LSI Logic Corporation.
+ *
+ *
+ *           Name:  mpi.h
+ *          Title:  MPI Message independent structures and definitions
+ *  Creation Date:  July 27, 2000
+ *
+ *    mpi.h Version:  01.05.xx
+ *
+ *  Version History
+ *  ---------------
+ *
+ *  Date      Version   Description
+ *  --------  --------  ------------------------------------------------------
+ *  05-08-00  00.10.01  Original release for 0.10 spec dated 4/26/2000.
+ *  05-24-00  00.10.02  Added MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH definition.
+ *  06-06-00  01.00.01  Update MPI_VERSION_MAJOR and MPI_VERSION_MINOR.
+ *  06-22-00  01.00.02  Added MPI_IOCSTATUS_LAN_ definitions.
+ *                      Removed LAN_SUSPEND function definition.
+ *                      Added MPI_MSGFLAGS_CONTINUATION_REPLY definition.
+ *  06-30-00  01.00.03  Added MPI_CONTEXT_REPLY_TYPE_LAN definition.
+ *                      Added MPI_GET/SET_CONTEXT_REPLY_TYPE macros.
+ *  07-27-00  01.00.04  Added MPI_FAULT_ definitions.
+ *                      Removed MPI_IOCSTATUS_MSG/DATA_XFER_ERROR definitions.
+ *                      Added MPI_IOCSTATUS_INTERNAL_ERROR definition.
+ *                      Added MPI_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH.
+ *  11-02-00  01.01.01  Original release for post 1.0 work.
+ *  12-04-00  01.01.02  Added new function codes.
+ *  01-09-01  01.01.03  Added more definitions to the system interface section
+ *                      Added MPI_IOCSTATUS_TARGET_STS_DATA_NOT_SENT.
+ *  01-25-01  01.01.04  Changed MPI_VERSION_MINOR from 0x00 to 0x01.
+ *  02-20-01  01.01.05  Started using MPI_POINTER.
+ *                      Fixed value for MPI_DIAG_RW_ENABLE.
+ *                      Added defines for MPI_DIAG_PREVENT_IOC_BOOT and
+ *                      MPI_DIAG_CLEAR_FLASH_BAD_SIG.
+ *                      Obsoleted MPI_IOCSTATUS_TARGET_FC_ defines.
+ *  02-27-01  01.01.06  Removed MPI_HOST_INDEX_REGISTER define.
+ *                      Added function codes for RAID.
+ *  04-09-01  01.01.07  Added alternate define for MPI_DOORBELL_ACTIVE,
+ *                      MPI_DOORBELL_USED, to better match the spec.
+ *  08-08-01  01.02.01  Original release for v1.2 work.
+ *                      Changed MPI_VERSION_MINOR from 0x01 to 0x02.
+ *                      Added define MPI_FUNCTION_TOOLBOX.
+ *  09-28-01  01.02.02  New function code MPI_SCSI_ENCLOSURE_PROCESSOR.
+ *  11-01-01  01.02.03  Changed name to MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR.
+ *  03-14-02  01.02.04  Added MPI_HEADER_VERSION_ defines.
+ *  05-31-02  01.02.05  Bumped MPI_HEADER_VERSION_UNIT.
+ *  07-12-02  01.02.06  Added define for MPI_FUNCTION_MAILBOX.
+ *  09-16-02  01.02.07  Bumped value for MPI_HEADER_VERSION_UNIT.
+ *  11-15-02  01.02.08  Added define MPI_IOCSTATUS_TARGET_INVALID_IO_INDEX and
+ *                      obsoleted define MPI_IOCSTATUS_TARGET_INVALID_IOCINDEX.
+ *  04-01-03  01.02.09  New IOCStatus code: MPI_IOCSTATUS_FC_EXCHANGE_CANCELED
+ *  06-26-03  01.02.10  Bumped MPI_HEADER_VERSION_UNIT value.
+ *  --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_H
+#define MPI_H
+
+
+/*****************************************************************************
+*
+*        M P I    V e r s i o n    D e f i n i t i o n s
+*
+*****************************************************************************/
+
+#define MPI_VERSION_MAJOR                   (0x01)
+#define MPI_VERSION_MINOR                   (0x05)
+#define MPI_VERSION_MAJOR_MASK              (0xFF00)
+#define MPI_VERSION_MAJOR_SHIFT             (8)
+#define MPI_VERSION_MINOR_MASK              (0x00FF)
+#define MPI_VERSION_MINOR_SHIFT             (0)
+#define MPI_VERSION ((MPI_VERSION_MAJOR << MPI_VERSION_MAJOR_SHIFT) |   \
+                                      MPI_VERSION_MINOR)
+
+#define MPI_VERSION_01_00                   (0x0100)
+#define MPI_VERSION_01_01                   (0x0101)
+#define MPI_VERSION_01_02                   (0x0102)
+#define MPI_VERSION_01_03                   (0x0103)
+#define MPI_VERSION_01_05                   (0x0105)
+/* Note: The major versions of 0xe0 through 0xff are reserved */
+
+/* versioning for this MPI header set */
+#define MPI_HEADER_VERSION_UNIT             (0x00)
+#define MPI_HEADER_VERSION_DEV              (0x00)
+#define MPI_HEADER_VERSION_UNIT_MASK        (0xFF00)
+#define MPI_HEADER_VERSION_UNIT_SHIFT       (8)
+#define MPI_HEADER_VERSION_DEV_MASK         (0x00FF)
+#define MPI_HEADER_VERSION_DEV_SHIFT        (0)
+#define MPI_HEADER_VERSION ((MPI_HEADER_VERSION_UNIT << 8) | MPI_HEADER_VERSION_DEV)
+
+/*****************************************************************************
+*
+*        I O C    S t a t e    D e f i n i t i o n s
+*
+*****************************************************************************/
+
+#define MPI_IOC_STATE_RESET                 (0x00000000)
+#define MPI_IOC_STATE_READY                 (0x10000000)
+#define MPI_IOC_STATE_OPERATIONAL           (0x20000000)
+#define MPI_IOC_STATE_FAULT                 (0x40000000)
+
+#define MPI_IOC_STATE_MASK                  (0xF0000000)
+#define MPI_IOC_STATE_SHIFT                 (28)
+
+/* Fault state codes (product independent range 0x8000-0xFFFF) */
+
+#define MPI_FAULT_REQUEST_MESSAGE_PCI_PARITY_ERROR  (0x8111)
+#define MPI_FAULT_REQUEST_MESSAGE_PCI_BUS_FAULT     (0x8112)
+#define MPI_FAULT_REPLY_MESSAGE_PCI_PARITY_ERROR    (0x8113)
+#define MPI_FAULT_REPLY_MESSAGE_PCI_BUS_FAULT       (0x8114)
+#define MPI_FAULT_DATA_SEND_PCI_PARITY_ERROR        (0x8115)
+#define MPI_FAULT_DATA_SEND_PCI_BUS_FAULT           (0x8116)
+#define MPI_FAULT_DATA_RECEIVE_PCI_PARITY_ERROR     (0x8117)
+#define MPI_FAULT_DATA_RECEIVE_PCI_BUS_FAULT        (0x8118)
+
+
+/*****************************************************************************
+*
+*        P C I    S y s t e m    I n t e r f a c e    R e g i s t e r s
+*
+*****************************************************************************/
+
+/* S y s t e m    D o o r b e l l */
+#define MPI_DOORBELL_OFFSET                 (0x00000000)
+#define MPI_DOORBELL_ACTIVE                 (0x08000000) /* DoorbellUsed */
+#define MPI_DOORBELL_USED                   (MPI_DOORBELL_ACTIVE)
+#define MPI_DOORBELL_ACTIVE_SHIFT           (27)
+#define MPI_DOORBELL_WHO_INIT_MASK          (0x07000000)
+#define MPI_DOORBELL_WHO_INIT_SHIFT         (24)
+#define MPI_DOORBELL_FUNCTION_MASK          (0xFF000000)
+#define MPI_DOORBELL_FUNCTION_SHIFT         (24)
+#define MPI_DOORBELL_ADD_DWORDS_MASK        (0x00FF0000)
+#define MPI_DOORBELL_ADD_DWORDS_SHIFT       (16)
+#define MPI_DOORBELL_DATA_MASK              (0x0000FFFF)
+
+
+#define MPI_WRITE_SEQUENCE_OFFSET           (0x00000004)
+#define MPI_WRSEQ_KEY_VALUE_MASK            (0x0000000F)
+#define MPI_WRSEQ_1ST_KEY_VALUE             (0x04)
+#define MPI_WRSEQ_2ND_KEY_VALUE             (0x0B)
+#define MPI_WRSEQ_3RD_KEY_VALUE             (0x02)
+#define MPI_WRSEQ_4TH_KEY_VALUE             (0x07)
+#define MPI_WRSEQ_5TH_KEY_VALUE             (0x0D)
+
+#define MPI_DIAGNOSTIC_OFFSET               (0x00000008)
+#define MPI_DIAG_CLEAR_FLASH_BAD_SIG        (0x00000400)
+#define MPI_DIAG_PREVENT_IOC_BOOT           (0x00000200)
+#define MPI_DIAG_DRWE                       (0x00000080)
+#define MPI_DIAG_FLASH_BAD_SIG              (0x00000040)
+#define MPI_DIAG_RESET_HISTORY              (0x00000020)
+#define MPI_DIAG_RW_ENABLE                  (0x00000010)
+#define MPI_DIAG_RESET_ADAPTER              (0x00000004)
+#define MPI_DIAG_DISABLE_ARM                (0x00000002)
+#define MPI_DIAG_MEM_ENABLE                 (0x00000001)
+
+#define MPI_TEST_BASE_ADDRESS_OFFSET        (0x0000000C)
+
+#define MPI_DIAG_RW_DATA_OFFSET             (0x00000010)
+
+#define MPI_DIAG_RW_ADDRESS_OFFSET          (0x00000014)
+
+#define MPI_HOST_INTERRUPT_STATUS_OFFSET    (0x00000030)
+#define MPI_HIS_IOP_DOORBELL_STATUS         (0x80000000)
+#define MPI_HIS_REPLY_MESSAGE_INTERRUPT     (0x00000008)
+#define MPI_HIS_DOORBELL_INTERRUPT          (0x00000001)
+
+#define MPI_HOST_INTERRUPT_MASK_OFFSET      (0x00000034)
+#define MPI_HIM_RIM                         (0x00000008)
+#define MPI_HIM_DIM                         (0x00000001)
+
+#define MPI_REQUEST_QUEUE_OFFSET            (0x00000040)
+#define MPI_REQUEST_POST_FIFO_OFFSET        (0x00000040)
+
+#define MPI_REPLY_QUEUE_OFFSET              (0x00000044)
+#define MPI_REPLY_POST_FIFO_OFFSET          (0x00000044)
+#define MPI_REPLY_FREE_FIFO_OFFSET          (0x00000044)
+
+#define MPI_HI_PRI_REQUEST_QUEUE_OFFSET     (0x00000048)
+
+
+
+/*****************************************************************************
+*
+*        M e s s a g e    F r a m e    D e s c r i p t o r s
+*
+*****************************************************************************/
+
+#define MPI_REQ_MF_DESCRIPTOR_NB_MASK       (0x00000003)
+#define MPI_REQ_MF_DESCRIPTOR_F_BIT         (0x00000004)
+#define MPI_REQ_MF_DESCRIPTOR_ADDRESS_MASK  (0xFFFFFFF8)
+
+#define MPI_ADDRESS_REPLY_A_BIT             (0x80000000)
+#define MPI_ADDRESS_REPLY_ADDRESS_MASK      (0x7FFFFFFF)
+
+#define MPI_CONTEXT_REPLY_A_BIT             (0x80000000)
+#define MPI_CONTEXT_REPLY_TYPE_MASK         (0x60000000)
+#define MPI_CONTEXT_REPLY_TYPE_SCSI_INIT    (0x00)
+#define MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET  (0x01)
+#define MPI_CONTEXT_REPLY_TYPE_LAN          (0x02)
+#define MPI_CONTEXT_REPLY_TYPE_SHIFT        (29)
+#define MPI_CONTEXT_REPLY_CONTEXT_MASK      (0x1FFFFFFF)
+
+
+/****************************************************************************/
+/* Context Reply macros                                                     */
+/****************************************************************************/
+
+#define MPI_GET_CONTEXT_REPLY_TYPE(x)  (((x) & MPI_CONTEXT_REPLY_TYPE_MASK) \
+                                          >> MPI_CONTEXT_REPLY_TYPE_SHIFT)
+
+#define MPI_SET_CONTEXT_REPLY_TYPE(x, typ)                                  \
+            ((x) = ((x) & ~MPI_CONTEXT_REPLY_TYPE_MASK) |                   \
+                            (((typ) << MPI_CONTEXT_REPLY_TYPE_SHIFT) &      \
+                                        MPI_CONTEXT_REPLY_TYPE_MASK))
+
+
+/*****************************************************************************
+*
+*        M e s s a g e    F u n c t i o n s
+*              0x80 -> 0x8F reserved for private message use per product
+*
+*
+*****************************************************************************/
+
+#define MPI_FUNCTION_SCSI_IO_REQUEST                (0x00)
+#define MPI_FUNCTION_SCSI_TASK_MGMT                 (0x01)
+#define MPI_FUNCTION_IOC_INIT                       (0x02)
+#define MPI_FUNCTION_IOC_FACTS                      (0x03)
+#define MPI_FUNCTION_CONFIG                         (0x04)
+#define MPI_FUNCTION_PORT_FACTS                     (0x05)
+#define MPI_FUNCTION_PORT_ENABLE                    (0x06)
+#define MPI_FUNCTION_EVENT_NOTIFICATION             (0x07)
+#define MPI_FUNCTION_EVENT_ACK                      (0x08)
+#define MPI_FUNCTION_FW_DOWNLOAD                    (0x09)
+#define MPI_FUNCTION_TARGET_CMD_BUFFER_POST         (0x0A)
+#define MPI_FUNCTION_TARGET_ASSIST                  (0x0B)
+#define MPI_FUNCTION_TARGET_STATUS_SEND             (0x0C)
+#define MPI_FUNCTION_TARGET_MODE_ABORT              (0x0D)
+#define MPI_FUNCTION_FC_LINK_SRVC_BUF_POST          (0x0E)
+#define MPI_FUNCTION_FC_LINK_SRVC_RSP               (0x0F)
+#define MPI_FUNCTION_FC_EX_LINK_SRVC_SEND           (0x10)
+#define MPI_FUNCTION_FC_ABORT                       (0x11)
+#define MPI_FUNCTION_FW_UPLOAD                      (0x12)
+#define MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND       (0x13)
+#define MPI_FUNCTION_FC_PRIMITIVE_SEND              (0x14)
+
+#define MPI_FUNCTION_RAID_ACTION                    (0x15)
+#define MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH       (0x16)
+
+#define MPI_FUNCTION_TOOLBOX                        (0x17)
+
+#define MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR       (0x18)
+
+#define MPI_FUNCTION_MAILBOX                        (0x19)
+
+#define MPI_FUNCTION_SMP_PASSTHROUGH                (0x1A)
+#define MPI_FUNCTION_SAS_IO_UNIT_CONTROL            (0x1B)
+
+#define MPI_DIAG_BUFFER_POST                        (0x1D)
+#define MPI_DIAG_RELEASE                            (0x1E)
+
+#define MPI_FUNCTION_SCSI_IO_32                     (0x1F)
+
+#define MPI_FUNCTION_LAN_SEND                       (0x20)
+#define MPI_FUNCTION_LAN_RECEIVE                    (0x21)
+#define MPI_FUNCTION_LAN_RESET                      (0x22)
+
+#define MPI_FUNCTION_INBAND_BUFFER_POST             (0x28)
+#define MPI_FUNCTION_INBAND_SEND                    (0x29)
+#define MPI_FUNCTION_INBAND_RSP                     (0x2A)
+#define MPI_FUNCTION_INBAND_ABORT                   (0x2B)
+
+#define MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET         (0x40)
+#define MPI_FUNCTION_IO_UNIT_RESET                  (0x41)
+#define MPI_FUNCTION_HANDSHAKE                      (0x42)
+#define MPI_FUNCTION_REPLY_FRAME_REMOVAL            (0x43)
+
+
+/* standard version format */
+typedef struct _MPI_VERSION_STRUCT
+{
+    U8                      Dev;                        /* 00h */
+    U8                      Unit;                       /* 01h */
+    U8                      Minor;                      /* 02h */
+    U8                      Major;                      /* 03h */
+} MPI_VERSION_STRUCT, MPI_POINTER PTR_MPI_VERSION_STRUCT,
+  MpiVersionStruct_t, MPI_POINTER pMpiVersionStruct;
+
+typedef union _MPI_VERSION_FORMAT
+{
+    MPI_VERSION_STRUCT      Struct;
+    U32                     Word;
+} MPI_VERSION_FORMAT, MPI_POINTER PTR_MPI_VERSION_FORMAT,
+  MpiVersionFormat_t, MPI_POINTER pMpiVersionFormat_t;
+
+
+/*****************************************************************************
+*
+*        S c a t t e r    G a t h e r    E l e m e n t s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/*  Simple element structures                                               */
+/****************************************************************************/
+
+typedef struct _SGE_SIMPLE32
+{
+    U32                     FlagsLength;
+    U32                     Address;
+} SGE_SIMPLE32, MPI_POINTER PTR_SGE_SIMPLE32,
+  SGESimple32_t, MPI_POINTER pSGESimple32_t;
+
+typedef struct _SGE_SIMPLE64
+{
+    U32                     FlagsLength;
+    U64                     Address;
+} SGE_SIMPLE64, MPI_POINTER PTR_SGE_SIMPLE64,
+  SGESimple64_t, MPI_POINTER pSGESimple64_t;
+
+typedef struct _SGE_SIMPLE_UNION
+{
+    U32                     FlagsLength;
+    union
+    {
+        U32                 Address32;
+        U64                 Address64;
+    }u;
+} SGESimpleUnion_t, MPI_POINTER pSGESimpleUnion_t,
+  SGE_SIMPLE_UNION, MPI_POINTER PTR_SGE_SIMPLE_UNION;
+
+/****************************************************************************/
+/*  Chain element structures                                                */
+/****************************************************************************/
+
+typedef struct _SGE_CHAIN32
+{
+    U16                     Length;
+    U8                      NextChainOffset;
+    U8                      Flags;
+    U32                     Address;
+} SGE_CHAIN32, MPI_POINTER PTR_SGE_CHAIN32,
+  SGEChain32_t, MPI_POINTER pSGEChain32_t;
+
+typedef struct _SGE_CHAIN64
+{
+    U16                     Length;
+    U8                      NextChainOffset;
+    U8                      Flags;
+    U64                     Address;
+} SGE_CHAIN64, MPI_POINTER PTR_SGE_CHAIN64,
+  SGEChain64_t, MPI_POINTER pSGEChain64_t;
+
+typedef struct _SGE_CHAIN_UNION
+{
+    U16                     Length;
+    U8                      NextChainOffset;
+    U8                      Flags;
+    union
+    {
+        U32                 Address32;
+        U64                 Address64;
+    }u;
+} SGE_CHAIN_UNION, MPI_POINTER PTR_SGE_CHAIN_UNION,
+  SGEChainUnion_t, MPI_POINTER pSGEChainUnion_t;
+
+/****************************************************************************/
+/*  Transaction Context element                                             */
+/****************************************************************************/
+
+typedef struct _SGE_TRANSACTION32
+{
+    U8                      Reserved;
+    U8                      ContextSize;
+    U8                      DetailsLength;
+    U8                      Flags;
+    U32                     TransactionContext[1];
+    U32                     TransactionDetails[1];
+} SGE_TRANSACTION32, MPI_POINTER PTR_SGE_TRANSACTION32,
+  SGETransaction32_t, MPI_POINTER pSGETransaction32_t;
+
+typedef struct _SGE_TRANSACTION64
+{
+    U8                      Reserved;
+    U8                      ContextSize;
+    U8                      DetailsLength;
+    U8                      Flags;
+    U32                     TransactionContext[2];
+    U32                     TransactionDetails[1];
+} SGE_TRANSACTION64, MPI_POINTER PTR_SGE_TRANSACTION64,
+  SGETransaction64_t, MPI_POINTER pSGETransaction64_t;
+
+typedef struct _SGE_TRANSACTION96
+{
+    U8                      Reserved;
+    U8                      ContextSize;
+    U8                      DetailsLength;
+    U8                      Flags;
+    U32                     TransactionContext[3];
+    U32                     TransactionDetails[1];
+} SGE_TRANSACTION96, MPI_POINTER PTR_SGE_TRANSACTION96,
+  SGETransaction96_t, MPI_POINTER pSGETransaction96_t;
+
+typedef struct _SGE_TRANSACTION128
+{
+    U8                      Reserved;
+    U8                      ContextSize;
+    U8                      DetailsLength;
+    U8                      Flags;
+    U32                     TransactionContext[4];
+    U32                     TransactionDetails[1];
+} SGE_TRANSACTION128, MPI_POINTER PTR_SGE_TRANSACTION128,
+  SGETransaction_t128, MPI_POINTER pSGETransaction_t128;
+
+typedef struct _SGE_TRANSACTION_UNION
+{
+    U8                      Reserved;
+    U8                      ContextSize;
+    U8                      DetailsLength;
+    U8                      Flags;
+    union
+    {
+        U32                 TransactionContext32[1];
+        U32                 TransactionContext64[2];
+        U32                 TransactionContext96[3];
+        U32                 TransactionContext128[4];
+    }u;
+    U32                     TransactionDetails[1];
+} SGE_TRANSACTION_UNION, MPI_POINTER PTR_SGE_TRANSACTION_UNION,
+  SGETransactionUnion_t, MPI_POINTER pSGETransactionUnion_t;
+
+
+/****************************************************************************/
+/*  SGE IO types union  for IO SGL's                                        */
+/****************************************************************************/
+
+typedef struct _SGE_IO_UNION
+{
+    union
+    {
+        SGE_SIMPLE_UNION    Simple;
+        SGE_CHAIN_UNION     Chain;
+    } u;
+} SGE_IO_UNION, MPI_POINTER PTR_SGE_IO_UNION,
+  SGEIOUnion_t, MPI_POINTER pSGEIOUnion_t;
+
+/****************************************************************************/
+/*  SGE union for SGL's with Simple and Transaction elements                */
+/****************************************************************************/
+
+typedef struct _SGE_TRANS_SIMPLE_UNION
+{
+    union
+    {
+        SGE_SIMPLE_UNION        Simple;
+        SGE_TRANSACTION_UNION   Transaction;
+    } u;
+} SGE_TRANS_SIMPLE_UNION, MPI_POINTER PTR_SGE_TRANS_SIMPLE_UNION,
+  SGETransSimpleUnion_t, MPI_POINTER pSGETransSimpleUnion_t;
+
+/****************************************************************************/
+/*  All SGE types union                                                     */
+/****************************************************************************/
+
+typedef struct _SGE_MPI_UNION
+{
+    union
+    {
+        SGE_SIMPLE_UNION        Simple;
+        SGE_CHAIN_UNION         Chain;
+        SGE_TRANSACTION_UNION   Transaction;
+    } u;
+} SGE_MPI_UNION, MPI_POINTER PTR_SGE_MPI_UNION,
+  MPI_SGE_UNION_t, MPI_POINTER pMPI_SGE_UNION_t,
+  SGEAllUnion_t, MPI_POINTER pSGEAllUnion_t;
+
+
+/****************************************************************************/
+/*  SGE field definition and masks                                          */
+/****************************************************************************/
+
+/* Flags field bit definitions */
+
+#define MPI_SGE_FLAGS_LAST_ELEMENT              (0x80)
+#define MPI_SGE_FLAGS_END_OF_BUFFER             (0x40)
+#define MPI_SGE_FLAGS_ELEMENT_TYPE_MASK         (0x30)
+#define MPI_SGE_FLAGS_LOCAL_ADDRESS             (0x08)
+#define MPI_SGE_FLAGS_DIRECTION                 (0x04)
+#define MPI_SGE_FLAGS_ADDRESS_SIZE              (0x02)
+#define MPI_SGE_FLAGS_END_OF_LIST               (0x01)
+
+#define MPI_SGE_FLAGS_SHIFT                     (24)
+
+#define MPI_SGE_LENGTH_MASK                     (0x00FFFFFF)
+#define MPI_SGE_CHAIN_LENGTH_MASK               (0x0000FFFF)
+
+/* Element Type */
+
+#define MPI_SGE_FLAGS_TRANSACTION_ELEMENT       (0x00)
+#define MPI_SGE_FLAGS_SIMPLE_ELEMENT            (0x10)
+#define MPI_SGE_FLAGS_CHAIN_ELEMENT             (0x30)
+#define MPI_SGE_FLAGS_ELEMENT_MASK              (0x30)
+
+/* Address location */
+
+#define MPI_SGE_FLAGS_SYSTEM_ADDRESS            (0x00)
+
+/* Direction */
+
+#define MPI_SGE_FLAGS_IOC_TO_HOST               (0x00)
+#define MPI_SGE_FLAGS_HOST_TO_IOC               (0x04)
+
+/* Address Size */
+
+#define MPI_SGE_FLAGS_32_BIT_ADDRESSING         (0x00)
+#define MPI_SGE_FLAGS_64_BIT_ADDRESSING         (0x02)
+
+/* Context Size */
+
+#define MPI_SGE_FLAGS_32_BIT_CONTEXT            (0x00)
+#define MPI_SGE_FLAGS_64_BIT_CONTEXT            (0x02)
+#define MPI_SGE_FLAGS_96_BIT_CONTEXT            (0x04)
+#define MPI_SGE_FLAGS_128_BIT_CONTEXT           (0x06)
+
+#define MPI_SGE_CHAIN_OFFSET_MASK               (0x00FF0000)
+#define MPI_SGE_CHAIN_OFFSET_SHIFT              (16)
+
+
+/****************************************************************************/
+/*  SGE operation Macros                                                    */
+/****************************************************************************/
+
+         /* SIMPLE FlagsLength manipulations... */
+#define  MPI_SGE_SET_FLAGS(f)           ((U32)(f) << MPI_SGE_FLAGS_SHIFT)
+#define  MPI_SGE_GET_FLAGS(fl)          (((fl) & ~MPI_SGE_LENGTH_MASK) >> MPI_SGE_FLAGS_SHIFT)
+#define  MPI_SGE_LENGTH(fl)             ((fl) & MPI_SGE_LENGTH_MASK)
+#define  MPI_SGE_CHAIN_LENGTH(fl)       ((fl) & MPI_SGE_CHAIN_LENGTH_MASK)
+
+#define  MPI_SGE_SET_FLAGS_LENGTH(f,l)  (MPI_SGE_SET_FLAGS(f) | MPI_SGE_LENGTH(l))
+
+#define  MPI_pSGE_GET_FLAGS(psg)        MPI_SGE_GET_FLAGS((psg)->FlagsLength)
+#define  MPI_pSGE_GET_LENGTH(psg)       MPI_SGE_LENGTH((psg)->FlagsLength)
+#define  MPI_pSGE_SET_FLAGS_LENGTH(psg,f,l)  (psg)->FlagsLength = MPI_SGE_SET_FLAGS_LENGTH(f,l)
+         /* CAUTION - The following are READ-MODIFY-WRITE! */
+#define  MPI_pSGE_SET_FLAGS(psg,f)      (psg)->FlagsLength |= MPI_SGE_SET_FLAGS(f)
+#define  MPI_pSGE_SET_LENGTH(psg,l)     (psg)->FlagsLength |= MPI_SGE_LENGTH(l)
+
+#define  MPI_GET_CHAIN_OFFSET(x) ((x&MPI_SGE_CHAIN_OFFSET_MASK)>>MPI_SGE_CHAIN_OFFSET_SHIFT)
+
+
+
+/*****************************************************************************
+*
+*        S t a n d a r d    M e s s a g e    S t r u c t u r e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/* Standard message request header for all request messages                 */
+/****************************************************************************/
+
+typedef struct _MSG_REQUEST_HEADER
+{
+    U8                      Reserved[2];      /* function specific */
+    U8                      ChainOffset;
+    U8                      Function;
+    U8                      Reserved1[3];     /* function specific */
+    U8                      MsgFlags;
+    U32                     MsgContext;
+} MSG_REQUEST_HEADER, MPI_POINTER PTR_MSG_REQUEST_HEADER,
+  MPIHeader_t, MPI_POINTER pMPIHeader_t;
+
+
+/****************************************************************************/
+/*  Default Reply                                                           */
+/****************************************************************************/
+
+typedef struct _MSG_DEFAULT_REPLY
+{
+    U8                      Reserved[2];      /* function specific */
+    U8                      MsgLength;
+    U8                      Function;
+    U8                      Reserved1[3];     /* function specific */
+    U8                      MsgFlags;
+    U32                     MsgContext;
+    U8                      Reserved2[2];     /* function specific */
+    U16                     IOCStatus;
+    U32                     IOCLogInfo;
+} MSG_DEFAULT_REPLY, MPI_POINTER PTR_MSG_DEFAULT_REPLY,
+  MPIDefaultReply_t, MPI_POINTER pMPIDefaultReply_t;
+
+
+/* MsgFlags definition for all replies */
+
+#define MPI_MSGFLAGS_CONTINUATION_REPLY         (0x80)
+
+
+/*****************************************************************************
+*
+*               I O C    S t a t u s   V a l u e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/*  Common IOCStatus values for all replies                                 */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_SUCCESS                   (0x0000)
+#define MPI_IOCSTATUS_INVALID_FUNCTION          (0x0001)
+#define MPI_IOCSTATUS_BUSY                      (0x0002)
+#define MPI_IOCSTATUS_INVALID_SGL               (0x0003)
+#define MPI_IOCSTATUS_INTERNAL_ERROR            (0x0004)
+#define MPI_IOCSTATUS_RESERVED                  (0x0005)
+#define MPI_IOCSTATUS_INSUFFICIENT_RESOURCES    (0x0006)
+#define MPI_IOCSTATUS_INVALID_FIELD             (0x0007)
+#define MPI_IOCSTATUS_INVALID_STATE             (0x0008)
+#define MPI_IOCSTATUS_OP_STATE_NOT_SUPPORTED    (0x0009)
+
+/****************************************************************************/
+/*  Config IOCStatus values                                                 */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_CONFIG_INVALID_ACTION     (0x0020)
+#define MPI_IOCSTATUS_CONFIG_INVALID_TYPE       (0x0021)
+#define MPI_IOCSTATUS_CONFIG_INVALID_PAGE       (0x0022)
+#define MPI_IOCSTATUS_CONFIG_INVALID_DATA       (0x0023)
+#define MPI_IOCSTATUS_CONFIG_NO_DEFAULTS        (0x0024)
+#define MPI_IOCSTATUS_CONFIG_CANT_COMMIT        (0x0025)
+
+/****************************************************************************/
+/*  SCSIIO Reply (SPI & FCP) initiator values                               */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_SCSI_RECOVERED_ERROR      (0x0040)
+#define MPI_IOCSTATUS_SCSI_INVALID_BUS          (0x0041)
+#define MPI_IOCSTATUS_SCSI_INVALID_TARGETID     (0x0042)
+#define MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE     (0x0043)
+#define MPI_IOCSTATUS_SCSI_DATA_OVERRUN         (0x0044)
+#define MPI_IOCSTATUS_SCSI_DATA_UNDERRUN        (0x0045)
+#define MPI_IOCSTATUS_SCSI_IO_DATA_ERROR        (0x0046)
+#define MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR       (0x0047)
+#define MPI_IOCSTATUS_SCSI_TASK_TERMINATED      (0x0048)
+#define MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH    (0x0049)
+#define MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED     (0x004A)
+#define MPI_IOCSTATUS_SCSI_IOC_TERMINATED       (0x004B)
+#define MPI_IOCSTATUS_SCSI_EXT_TERMINATED       (0x004C)
+
+/****************************************************************************/
+/*  For use by SCSI Initiator and SCSI Target end-to-end data protection    */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_EEDP_CRC_ERROR            (0x004D)
+#define MPI_IOCSTATUS_EEDP_LBA_TAG_ERROR        (0x004E)
+#define MPI_IOCSTATUS_EEDP_APP_TAG_ERROR        (0x004F)
+
+
+/****************************************************************************/
+/*  SCSI (SPI & FCP) target values                                          */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_TARGET_PRIORITY_IO         (0x0060)
+#define MPI_IOCSTATUS_TARGET_INVALID_PORT        (0x0061)
+#define MPI_IOCSTATUS_TARGET_INVALID_IOCINDEX    (0x0062)   /* obsolete */
+#define MPI_IOCSTATUS_TARGET_INVALID_IO_INDEX    (0x0062)
+#define MPI_IOCSTATUS_TARGET_ABORTED             (0x0063)
+#define MPI_IOCSTATUS_TARGET_NO_CONN_RETRYABLE   (0x0064)
+#define MPI_IOCSTATUS_TARGET_NO_CONNECTION       (0x0065)
+#define MPI_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH (0x006A)
+#define MPI_IOCSTATUS_TARGET_STS_DATA_NOT_SENT   (0x006B)
+
+/****************************************************************************/
+/*  Additional FCP target values (obsolete)                                 */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_TARGET_FC_ABORTED         (0x0066)    /* obsolete */
+#define MPI_IOCSTATUS_TARGET_FC_RX_ID_INVALID   (0x0067)    /* obsolete */
+#define MPI_IOCSTATUS_TARGET_FC_DID_INVALID     (0x0068)    /* obsolete */
+#define MPI_IOCSTATUS_TARGET_FC_NODE_LOGGED_OUT (0x0069)    /* obsolete */
+
+/****************************************************************************/
+/*  Fibre Channel Direct Access values                                      */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_FC_ABORTED                (0x0066)
+#define MPI_IOCSTATUS_FC_RX_ID_INVALID          (0x0067)
+#define MPI_IOCSTATUS_FC_DID_INVALID            (0x0068)
+#define MPI_IOCSTATUS_FC_NODE_LOGGED_OUT        (0x0069)
+#define MPI_IOCSTATUS_FC_EXCHANGE_CANCELED      (0x006C)
+
+/****************************************************************************/
+/*  LAN values                                                              */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_LAN_DEVICE_NOT_FOUND      (0x0080)
+#define MPI_IOCSTATUS_LAN_DEVICE_FAILURE        (0x0081)
+#define MPI_IOCSTATUS_LAN_TRANSMIT_ERROR        (0x0082)
+#define MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED      (0x0083)
+#define MPI_IOCSTATUS_LAN_RECEIVE_ERROR         (0x0084)
+#define MPI_IOCSTATUS_LAN_RECEIVE_ABORTED       (0x0085)
+#define MPI_IOCSTATUS_LAN_PARTIAL_PACKET        (0x0086)
+#define MPI_IOCSTATUS_LAN_CANCELED              (0x0087)
+
+/****************************************************************************/
+/*  Serial Attached SCSI values                                                              */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_SAS_SMP_REQUEST_FAILED    (0x0090)
+
+/****************************************************************************/
+/*  Inband values                                                           */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_INBAND_ABORTED            (0x0098)
+#define MPI_IOCSTATUS_INBAND_NO_CONNECTION      (0x0099)
+
+/****************************************************************************/
+/*  Diagnostic Tools values                                                 */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_DIAGNOSTIC_RELEASED       (0x00A0)
+
+
+/****************************************************************************/
+/*  IOCStatus flag to indicate that log info is available                   */
+/****************************************************************************/
+
+#define MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE   (0x8000)
+#define MPI_IOCSTATUS_MASK                      (0x7FFF)
+
+/****************************************************************************/
+/*  LogInfo Types                                                           */
+/****************************************************************************/
+
+#define MPI_IOCLOGINFO_TYPE_MASK                (0xF0000000)
+#define MPI_IOCLOGINFO_TYPE_SHIFT               (28)
+#define MPI_IOCLOGINFO_TYPE_NONE                (0x0)
+#define MPI_IOCLOGINFO_TYPE_SCSI                (0x1)
+#define MPI_IOCLOGINFO_TYPE_FC                  (0x2)
+#define MPI_IOCLOGINFO_TYPE_SAS                 (0x3)
+#define MPI_IOCLOGINFO_TYPE_ISCSI               (0x4)
+#define MPI_IOCLOGINFO_LOG_DATA_MASK            (0x0FFFFFFF)
+
+
+#endif
diff --git a/drivers/message/fusion/lsi/mpi_cnfg.h b/drivers/message/fusion/lsi/mpi_cnfg.h
new file mode 100644
index 0000000..a5680d8
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_cnfg.h
@@ -0,0 +1,2105 @@
+/*
+ *  Copyright (c) 2000-2003 LSI Logic Corporation.
+ *
+ *
+ *           Name:  mpi_cnfg.h
+ *          Title:  MPI Config message, structures, and Pages
+ *  Creation Date:  July 27, 2000
+ *
+ *    mpi_cnfg.h Version:  01.05.xx
+ *
+ *  Version History
+ *  ---------------
+ *
+ *  Date      Version   Description
+ *  --------  --------  ------------------------------------------------------
+ *  05-08-00  00.10.01  Original release for 0.10 spec dated 4/26/2000.
+ *  06-06-00  01.00.01  Update version number for 1.0 release.
+ *  06-08-00  01.00.02  Added _PAGEVERSION definitions for all pages.
+ *                      Added FcPhLowestVersion, FcPhHighestVersion, Reserved2
+ *                      fields to FC_DEVICE_0 page, updated the page version.
+ *                      Changed _FREE_RUNNING_CLOCK to _PACING_TRANSFERS in
+ *                      SCSI_PORT_0, SCSI_DEVICE_0 and SCSI_DEVICE_1 pages
+ *                      and updated the page versions.
+ *                      Added _RESPONSE_ID_MASK definition to SCSI_PORT_1
+ *                      page and updated the page version.
+ *                      Added Information field and _INFO_PARAMS_NEGOTIATED
+ *                      definitionto SCSI_DEVICE_0 page.
+ *  06-22-00  01.00.03  Removed batch controls from LAN_0 page and updated the
+ *                      page version.
+ *                      Added BucketsRemaining to LAN_1 page, redefined the
+ *                      state values, and updated the page version.
+ *                      Revised bus width definitions in SCSI_PORT_0,
+ *                      SCSI_DEVICE_0 and SCSI_DEVICE_1 pages.
+ *  06-30-00  01.00.04  Added MaxReplySize to LAN_1 page and updated the page
+ *                      version.
+ *                      Moved FC_DEVICE_0 PageAddress description to spec.
+ *  07-27-00  01.00.05  Corrected the SubsystemVendorID and SubsystemID field
+ *                      widths in IOC_0 page and updated the page version.
+ *  11-02-00  01.01.01  Original release for post 1.0 work
+ *                      Added Manufacturing pages, IO Unit Page 2, SCSI SPI
+ *                      Port Page 2, FC Port Page 4, FC Port Page 5
+ *  11-15-00  01.01.02  Interim changes to match proposals
+ *  12-04-00  01.01.03  Config page changes to match MPI rev 1.00.01.
+ *  12-05-00  01.01.04  Modified config page actions.
+ *  01-09-01  01.01.05  Added defines for page address formats.
+ *                      Data size for Manufacturing pages 2 and 3 no longer
+ *                      defined here.
+ *                      Io Unit Page 2 size is fixed at 4 adapters and some
+ *                      flags were changed.
+ *                      SCSI Port Page 2 Device Settings modified.
+ *                      New fields added to FC Port Page 0 and some flags
+ *                      cleaned up.
+ *                      Removed impedance flash from FC Port Page 1.
+ *                      Added FC Port pages 6 and 7.
+ *  01-25-01  01.01.06  Added MaxInitiators field to FcPortPage0.
+ *  01-29-01  01.01.07  Changed some defines to make them 32 character unique.
+ *                      Added some LinkType defines for FcPortPage0.
+ *  02-20-01  01.01.08  Started using MPI_POINTER.
+ *  02-27-01  01.01.09  Replaced MPI_CONFIG_PAGETYPE_SCSI_LUN with
+ *                      MPI_CONFIG_PAGETYPE_RAID_VOLUME.
+ *                      Added definitions and structures for IOC Page 2 and
+ *                      RAID Volume Page 2.
+ *  03-27-01  01.01.10  Added CONFIG_PAGE_FC_PORT_8 and CONFIG_PAGE_FC_PORT_9.
+ *                      CONFIG_PAGE_FC_PORT_3 now supports persistent by DID.
+ *                      Added VendorId and ProductRevLevel fields to
+ *                      RAIDVOL2_IM_PHYS_ID struct.
+ *                      Modified values for MPI_FCPORTPAGE0_FLAGS_ATTACH_
+ *                      defines to make them compatible to MPI version 1.0.
+ *                      Added structure offset comments.
+ *  04-09-01  01.01.11  Added some new defines for the PageAddress field and
+ *                      removed some obsolete ones.
+ *                      Added IO Unit Page 3.
+ *                      Modified defines for Scsi Port Page 2.
+ *                      Modified RAID Volume Pages.
+ *  08-08-01  01.02.01  Original release for v1.2 work.
+ *                      Added SepID and SepBus to RVP2 IMPhysicalDisk struct.
+ *                      Added defines for the SEP bits in RVP2 VolumeSettings.
+ *                      Modified the DeviceSettings field in RVP2 to use the
+ *                      proper structure.
+ *                      Added defines for SES, SAF-TE, and cross channel for
+ *                      IOCPage2 CapabilitiesFlags.
+ *                      Removed define for MPI_IOUNITPAGE2_FLAGS_RAID_DISABLE.
+ *                      Removed define for
+ *                      MPI_SCSIPORTPAGE2_PORT_FLAGS_PARITY_ENABLE.
+ *                      Added define for MPI_CONFIG_PAGEATTR_RO_PERSISTENT.
+ *  08-29-01 01.02.02   Fixed value for MPI_MANUFACTPAGE_DEVID_53C1035.
+ *                      Added defines for MPI_FCPORTPAGE1_FLAGS_HARD_ALPA_ONLY
+ *                      and MPI_FCPORTPAGE1_FLAGS_IMMEDIATE_ERROR_REPLY.
+ *                      Removed MPI_SCSIPORTPAGE0_CAP_PACING_TRANSFERS,
+ *                      MPI_SCSIDEVPAGE0_NP_PACING_TRANSFERS, and
+ *                      MPI_SCSIDEVPAGE1_RP_PACING_TRANSFERS, and
+ *                      MPI_SCSIDEVPAGE1_CONF_PPR_ALLOWED.
+ *                      Added defines for MPI_SCSIDEVPAGE1_CONF_WDTR_DISALLOWED
+ *                      and MPI_SCSIDEVPAGE1_CONF_SDTR_DISALLOWED.
+ *                      Added OnBusTimerValue to CONFIG_PAGE_SCSI_PORT_1.
+ *                      Added rejected bits to SCSI Device Page 0 Information.
+ *                      Increased size of ALPA array in FC Port Page 2 by one
+ *                      and removed a one byte reserved field.
+ *  09-28-01 01.02.03   Swapped NegWireSpeedLow and NegWireSpeedLow in
+ *                      CONFIG_PAGE_LAN_1 to match preferred 64-bit ordering.
+ *                      Added structures for Manufacturing Page 4, IO Unit
+ *                      Page 3, IOC Page 3, IOC Page 4, RAID Volume Page 0, and
+ *                      RAID PhysDisk Page 0.
+ *  10-04-01 01.02.04   Added define for MPI_CONFIG_PAGETYPE_RAID_PHYSDISK.
+ *                      Modified some of the new defines to make them 32
+ *                      character unique.
+ *                      Modified how variable length pages (arrays) are defined.
+ *                      Added generic defines for hot spare pools and RAID
+ *                      volume types.
+ *  11-01-01 01.02.05   Added define for MPI_IOUNITPAGE1_DISABLE_IR.
+ *  03-14-02 01.02.06   Added PCISlotNum field to CONFIG_PAGE_IOC_1 along with
+ *                      related define, and bumped the page version define.
+ *  05-31-02 01.02.07   Added a Flags field to CONFIG_PAGE_IOC_2_RAID_VOL in a
+ *                      reserved byte and added a define.
+ *                      Added define for
+ *                      MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE.
+ *                      Added new config page: CONFIG_PAGE_IOC_5.
+ *                      Added MaxAliases, MaxHardAliases, and NumCurrentAliases
+ *                      fields to CONFIG_PAGE_FC_PORT_0.
+ *                      Added AltConnector and NumRequestedAliases fields to
+ *                      CONFIG_PAGE_FC_PORT_1.
+ *                      Added new config page: CONFIG_PAGE_FC_PORT_10.
+ *  07-12-02 01.02.08   Added more MPI_MANUFACTPAGE_DEVID_ defines.
+ *                      Added additional MPI_SCSIDEVPAGE0_NP_ defines.
+ *                      Added more MPI_SCSIDEVPAGE1_RP_ defines.
+ *                      Added define for
+ *                      MPI_SCSIDEVPAGE1_CONF_EXTENDED_PARAMS_ENABLE.
+ *                      Added new config page: CONFIG_PAGE_SCSI_DEVICE_3.
+ *                      Modified MPI_FCPORTPAGE5_FLAGS_ defines.
+ *  09-16-02 01.02.09   Added MPI_SCSIDEVPAGE1_CONF_FORCE_PPR_MSG define.
+ *  11-15-02 01.02.10   Added ConnectedID defines for CONFIG_PAGE_SCSI_PORT_0.
+ *                      Added more Flags defines for CONFIG_PAGE_FC_PORT_1.
+ *                      Added more Flags defines for CONFIG_PAGE_FC_DEVICE_0.
+ *  04-01-03 01.02.11   Added RR_TOV field and additional Flags defines for
+ *                      CONFIG_PAGE_FC_PORT_1.
+ *                      Added define MPI_FCPORTPAGE5_FLAGS_DISABLE to disable
+ *                      an alias.
+ *                      Added more device id defines.
+ *  06-26-03 01.02.12   Added MPI_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID define.
+ *                      Added TargetConfig and IDConfig fields to
+ *                      CONFIG_PAGE_SCSI_PORT_1.
+ *                      Added more PortFlags defines for CONFIG_PAGE_SCSI_PORT_2
+ *                      to control DV.
+ *                      Added more Flags defines for CONFIG_PAGE_FC_PORT_1.
+ *                      In CONFIG_PAGE_FC_DEVICE_0, replaced Reserved1 field
+ *                      with ADISCHardALPA.
+ *                      Added MPI_FC_DEVICE_PAGE0_PROT_FCP_RETRY define.
+ *  --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_CNFG_H
+#define MPI_CNFG_H
+
+
+/*****************************************************************************
+*
+*       C o n f i g    M e s s a g e    a n d    S t r u c t u r e s
+*
+*****************************************************************************/
+
+typedef struct _CONFIG_PAGE_HEADER
+{
+    U8                      PageVersion;                /* 00h */
+    U8                      PageLength;                 /* 01h */
+    U8                      PageNumber;                 /* 02h */
+    U8                      PageType;                   /* 03h */
+} fCONFIG_PAGE_HEADER, MPI_POINTER PTR_CONFIG_PAGE_HEADER,
+  ConfigPageHeader_t, MPI_POINTER pConfigPageHeader_t;
+
+typedef union _CONFIG_PAGE_HEADER_UNION
+{
+   ConfigPageHeader_t  Struct;
+   U8                  Bytes[4];
+   U16                 Word16[2];
+   U32                 Word32;
+} ConfigPageHeaderUnion, MPI_POINTER pConfigPageHeaderUnion,
+  fCONFIG_PAGE_HEADER_UNION, MPI_POINTER PTR_CONFIG_PAGE_HEADER_UNION;
+
+typedef struct _CONFIG_EXTENDED_PAGE_HEADER
+{
+    U8                  PageVersion;                /* 00h */
+    U8                  Reserved1;                  /* 01h */
+    U8                  PageNumber;                 /* 02h */
+    U8                  PageType;                   /* 03h */
+    U16                 ExtPageLength;              /* 04h */
+    U8                  ExtPageType;                /* 06h */
+    U8                  Reserved2;                  /* 07h */
+} fCONFIG_EXTENDED_PAGE_HEADER, MPI_POINTER PTR_CONFIG_EXTENDED_PAGE_HEADER,
+  ConfigExtendedPageHeader_t, MPI_POINTER pConfigExtendedPageHeader_t;
+
+
+
+/****************************************************************************
+*   PageType field values
+****************************************************************************/
+#define MPI_CONFIG_PAGEATTR_READ_ONLY               (0x00)
+#define MPI_CONFIG_PAGEATTR_CHANGEABLE              (0x10)
+#define MPI_CONFIG_PAGEATTR_PERSISTENT              (0x20)
+#define MPI_CONFIG_PAGEATTR_RO_PERSISTENT           (0x30)
+#define MPI_CONFIG_PAGEATTR_MASK                    (0xF0)
+
+#define MPI_CONFIG_PAGETYPE_IO_UNIT                 (0x00)
+#define MPI_CONFIG_PAGETYPE_IOC                     (0x01)
+#define MPI_CONFIG_PAGETYPE_BIOS                    (0x02)
+#define MPI_CONFIG_PAGETYPE_SCSI_PORT               (0x03)
+#define MPI_CONFIG_PAGETYPE_SCSI_DEVICE             (0x04)
+#define MPI_CONFIG_PAGETYPE_FC_PORT                 (0x05)
+#define MPI_CONFIG_PAGETYPE_FC_DEVICE               (0x06)
+#define MPI_CONFIG_PAGETYPE_LAN                     (0x07)
+#define MPI_CONFIG_PAGETYPE_RAID_VOLUME             (0x08)
+#define MPI_CONFIG_PAGETYPE_MANUFACTURING           (0x09)
+#define MPI_CONFIG_PAGETYPE_RAID_PHYSDISK           (0x0A)
+#define MPI_CONFIG_PAGETYPE_INBAND                  (0x0B)
+#define MPI_CONFIG_PAGETYPE_EXTENDED                (0x0F)
+#define MPI_CONFIG_PAGETYPE_MASK                    (0x0F)
+
+#define MPI_CONFIG_TYPENUM_MASK                     (0x0FFF)
+
+
+/****************************************************************************
+*   ExtPageType field values
+****************************************************************************/
+#define MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT          (0x10)
+#define MPI_CONFIG_EXTPAGETYPE_SAS_EXPANDER         (0x11)
+#define MPI_CONFIG_EXTPAGETYPE_SAS_DEVICE           (0x12)
+#define MPI_CONFIG_EXTPAGETYPE_SAS_PHY              (0x13)
+
+
+/****************************************************************************
+*   PageAddress field values
+****************************************************************************/
+#define MPI_SCSI_PORT_PGAD_PORT_MASK                (0x000000FF)
+
+#define MPI_SCSI_DEVICE_TARGET_ID_MASK              (0x000000FF)
+#define MPI_SCSI_DEVICE_TARGET_ID_SHIFT             (0)
+#define MPI_SCSI_DEVICE_BUS_MASK                    (0x0000FF00)
+#define MPI_SCSI_DEVICE_BUS_SHIFT                   (8)
+
+#define MPI_FC_PORT_PGAD_PORT_MASK                  (0xF0000000)
+#define MPI_FC_PORT_PGAD_PORT_SHIFT                 (28)
+#define MPI_FC_PORT_PGAD_FORM_MASK                  (0x0F000000)
+#define MPI_FC_PORT_PGAD_FORM_INDEX                 (0x01000000)
+#define MPI_FC_PORT_PGAD_INDEX_MASK                 (0x0000FFFF)
+#define MPI_FC_PORT_PGAD_INDEX_SHIFT                (0)
+
+#define MPI_FC_DEVICE_PGAD_PORT_MASK                (0xF0000000)
+#define MPI_FC_DEVICE_PGAD_PORT_SHIFT               (28)
+#define MPI_FC_DEVICE_PGAD_FORM_MASK                (0x0F000000)
+#define MPI_FC_DEVICE_PGAD_FORM_NEXT_DID            (0x00000000)
+#define MPI_FC_DEVICE_PGAD_ND_PORT_MASK             (0xF0000000)
+#define MPI_FC_DEVICE_PGAD_ND_PORT_SHIFT            (28)
+#define MPI_FC_DEVICE_PGAD_ND_DID_MASK              (0x00FFFFFF)
+#define MPI_FC_DEVICE_PGAD_ND_DID_SHIFT             (0)
+#define MPI_FC_DEVICE_PGAD_FORM_BUS_TID             (0x01000000)
+#define MPI_FC_DEVICE_PGAD_BT_BUS_MASK              (0x0000FF00)
+#define MPI_FC_DEVICE_PGAD_BT_BUS_SHIFT             (8)
+#define MPI_FC_DEVICE_PGAD_BT_TID_MASK              (0x000000FF)
+#define MPI_FC_DEVICE_PGAD_BT_TID_SHIFT             (0)
+
+#define MPI_PHYSDISK_PGAD_PHYSDISKNUM_MASK          (0x000000FF)
+#define MPI_PHYSDISK_PGAD_PHYSDISKNUM_SHIFT         (0)
+
+#define MPI_SAS_DEVICE_PGAD_FORM_MASK               (0xF0000000)
+#define MPI_SAS_DEVICE_PGAD_FORM_SHIFT              (28)
+#define MPI_SAS_DEVICE_PGAD_FORM_GET_NEXT_HANDLE    (0x00000000)
+#define MPI_SAS_DEVICE_PGAD_FORM_BUS_TARGET_ID      (0x00000001)
+#define MPI_SAS_DEVICE_PGAD_FORM_HANDLE             (0x00000002)
+#define MPI_SAS_DEVICE_PGAD_GNH_HANDLE_MASK         (0x0000FFFF)
+#define MPI_SAS_DEVICE_PGAD_GNH_HANDLE_SHIFT        (0)
+#define MPI_SAS_DEVICE_PGAD_BT_BUS_MASK             (0x0000FF00)
+#define MPI_SAS_DEVICE_PGAD_BT_BUS_SHIFT            (8)
+#define MPI_SAS_DEVICE_PGAD_BT_TID_MASK             (0x000000FF)
+#define MPI_SAS_DEVICE_PGAD_BT_TID_SHIFT            (0)
+#define MPI_SAS_DEVICE_PGAD_H_HANDLE_MASK           (0x0000FFFF)
+#define MPI_SAS_DEVICE_PGAD_H_HANDLE_SHIFT          (0)
+
+#define MPI_SAS_PHY_PGAD_PHY_NUMBER_MASK            (0x00FF0000)
+#define MPI_SAS_PHY_PGAD_PHY_NUMBER_SHIFT           (16)
+#define MPI_SAS_PHY_PGAD_DEVHANDLE_MASK             (0x0000FFFF)
+#define MPI_SAS_PHY_PGAD_DEVHANDLE_SHIFT            (0)
+
+
+/****************************************************************************
+*   Config Request Message
+****************************************************************************/
+typedef struct _MSG_CONFIG
+{
+    U8                      Action;                     /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     ExtPageLength;              /* 04h */
+    U8                      ExtPageType;                /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U8                      Reserved2[8];               /* 0Ch */
+    fCONFIG_PAGE_HEADER      Header;                     /* 14h */
+    U32                     PageAddress;                /* 18h */
+    SGE_IO_UNION            PageBufferSGE;              /* 1Ch */
+} MSG_CONFIG, MPI_POINTER PTR_MSG_CONFIG,
+  Config_t, MPI_POINTER pConfig_t;
+
+
+/****************************************************************************
+*   Action field values
+****************************************************************************/
+#define MPI_CONFIG_ACTION_PAGE_HEADER               (0x00)
+#define MPI_CONFIG_ACTION_PAGE_READ_CURRENT         (0x01)
+#define MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT        (0x02)
+#define MPI_CONFIG_ACTION_PAGE_DEFAULT              (0x03)
+#define MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM          (0x04)
+#define MPI_CONFIG_ACTION_PAGE_READ_DEFAULT         (0x05)
+#define MPI_CONFIG_ACTION_PAGE_READ_NVRAM           (0x06)
+
+
+/* Config Reply Message */
+typedef struct _MSG_CONFIG_REPLY
+{
+    U8                      Action;                     /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U8                      MsgLength;                  /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     ExtPageLength;              /* 04h */
+    U8                      ExtPageType;                /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U8                      Reserved2[2];               /* 0Ch */
+    U16                     IOCStatus;                  /* 0Eh */
+    U32                     IOCLogInfo;                 /* 10h */
+    fCONFIG_PAGE_HEADER      Header;                     /* 14h */
+} MSG_CONFIG_REPLY, MPI_POINTER PTR_MSG_CONFIG_REPLY,
+  ConfigReply_t, MPI_POINTER pConfigReply_t;
+
+
+
+/*****************************************************************************
+*
+*               C o n f i g u r a t i o n    P a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************
+*   Manufacturing Config pages
+****************************************************************************/
+#define MPI_MANUFACTPAGE_VENDORID_LSILOGIC          (0x1000)
+/* Fibre Channel */
+#define MPI_MANUFACTPAGE_DEVICEID_FC909             (0x0621)
+#define MPI_MANUFACTPAGE_DEVICEID_FC919             (0x0624)
+#define MPI_MANUFACTPAGE_DEVICEID_FC929             (0x0622)
+#define MPI_MANUFACTPAGE_DEVICEID_FC919X            (0x0628)
+#define MPI_MANUFACTPAGE_DEVICEID_FC929X            (0x0626)
+/* SCSI */
+#define MPI_MANUFACTPAGE_DEVID_53C1030              (0x0030)
+#define MPI_MANUFACTPAGE_DEVID_53C1030ZC            (0x0031)
+#define MPI_MANUFACTPAGE_DEVID_1030_53C1035         (0x0032)
+#define MPI_MANUFACTPAGE_DEVID_1030ZC_53C1035       (0x0033)
+#define MPI_MANUFACTPAGE_DEVID_53C1035              (0x0040)
+#define MPI_MANUFACTPAGE_DEVID_53C1035ZC            (0x0041)
+/* SAS */
+#define MPI_MANUFACTPAGE_DEVID_SAS1064              (0x0050)
+
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_0
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U8                      ChipName[16];               /* 04h */
+    U8                      ChipRevision[8];            /* 14h */
+    U8                      BoardName[16];              /* 1Ch */
+    U8                      BoardAssembly[16];          /* 2Ch */
+    U8                      BoardTracerNumber[16];      /* 3Ch */
+
+} fCONFIG_PAGE_MANUFACTURING_0, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_0,
+  ManufacturingPage0_t, MPI_POINTER pManufacturingPage0_t;
+
+#define MPI_MANUFACTURING0_PAGEVERSION                 (0x00)
+
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_1
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U8                      VPD[256];                   /* 04h */
+} fCONFIG_PAGE_MANUFACTURING_1, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_1,
+  ManufacturingPage1_t, MPI_POINTER pManufacturingPage1_t;
+
+#define MPI_MANUFACTURING1_PAGEVERSION                 (0x00)
+
+
+typedef struct _MPI_CHIP_REVISION_ID
+{
+    U16 DeviceID;                                       /* 00h */
+    U8  PCIRevisionID;                                  /* 02h */
+    U8  Reserved;                                       /* 03h */
+} MPI_CHIP_REVISION_ID, MPI_POINTER PTR_MPI_CHIP_REVISION_ID,
+  MpiChipRevisionId_t, MPI_POINTER pMpiChipRevisionId_t;
+
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_MAN_PAGE_2_HW_SETTINGS_WORDS
+#define MPI_MAN_PAGE_2_HW_SETTINGS_WORDS    (1)
+#endif
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_2
+{
+    fCONFIG_PAGE_HEADER      Header;                                 /* 00h */
+    MPI_CHIP_REVISION_ID    ChipId;                                 /* 04h */
+    U32                     HwSettings[MPI_MAN_PAGE_2_HW_SETTINGS_WORDS];/* 08h */
+} fCONFIG_PAGE_MANUFACTURING_2, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_2,
+  ManufacturingPage2_t, MPI_POINTER pManufacturingPage2_t;
+
+#define MPI_MANUFACTURING2_PAGEVERSION                  (0x00)
+
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_MAN_PAGE_3_INFO_WORDS
+#define MPI_MAN_PAGE_3_INFO_WORDS           (1)
+#endif
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_3
+{
+    fCONFIG_PAGE_HEADER                  Header;                     /* 00h */
+    MPI_CHIP_REVISION_ID                ChipId;                     /* 04h */
+    U32                                 Info[MPI_MAN_PAGE_3_INFO_WORDS];/* 08h */
+} fCONFIG_PAGE_MANUFACTURING_3, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_3,
+  ManufacturingPage3_t, MPI_POINTER pManufacturingPage3_t;
+
+#define MPI_MANUFACTURING3_PAGEVERSION                  (0x00)
+
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_4
+{
+    fCONFIG_PAGE_HEADER              Header;             /* 00h */
+    U32                             Reserved1;          /* 04h */
+    U8                              InfoOffset0;        /* 08h */
+    U8                              InfoSize0;          /* 09h */
+    U8                              InfoOffset1;        /* 0Ah */
+    U8                              InfoSize1;          /* 0Bh */
+    U8                              InquirySize;        /* 0Ch */
+    U8                              Flags;              /* 0Dh */
+    U16                             Reserved2;          /* 0Eh */
+    U8                              InquiryData[56];    /* 10h */
+    U32                             ISVolumeSettings;   /* 48h */
+    U32                             IMEVolumeSettings;  /* 4Ch */
+    U32                             IMVolumeSettings;   /* 50h */
+} fCONFIG_PAGE_MANUFACTURING_4, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_4,
+  ManufacturingPage4_t, MPI_POINTER pManufacturingPage4_t;
+
+#define MPI_MANUFACTURING4_PAGEVERSION                  (0x01)
+
+/* defines for the Flags field */
+#define MPI_MANPAGE4_IR_NO_MIX_SAS_SATA                 (0x01)
+
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_5
+{
+    fCONFIG_PAGE_HEADER              Header;             /* 00h */
+    U64                             BaseWWID;           /* 04h */
+} fCONFIG_PAGE_MANUFACTURING_5, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_5,
+  ManufacturingPage5_t, MPI_POINTER pManufacturingPage5_t;
+
+#define MPI_MANUFACTURING5_PAGEVERSION                  (0x00)
+
+
+typedef struct _CONFIG_PAGE_MANUFACTURING_6
+{
+    fCONFIG_PAGE_HEADER              Header;             /* 00h */
+    U32                             ProductSpecificInfo;/* 04h */
+} fCONFIG_PAGE_MANUFACTURING_6, MPI_POINTER PTR_CONFIG_PAGE_MANUFACTURING_6,
+  ManufacturingPage6_t, MPI_POINTER pManufacturingPage6_t;
+
+#define MPI_MANUFACTURING6_PAGEVERSION                  (0x00)
+
+
+/****************************************************************************
+*   IO Unit Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_IO_UNIT_0
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U64                     UniqueValue;                /* 04h */
+} fCONFIG_PAGE_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_0,
+  IOUnitPage0_t, MPI_POINTER pIOUnitPage0_t;
+
+#define MPI_IOUNITPAGE0_PAGEVERSION                     (0x00)
+
+
+typedef struct _CONFIG_PAGE_IO_UNIT_1
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     Flags;                      /* 04h */
+} fCONFIG_PAGE_IO_UNIT_1, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_1,
+  IOUnitPage1_t, MPI_POINTER pIOUnitPage1_t;
+
+#define MPI_IOUNITPAGE1_PAGEVERSION                     (0x01)
+
+/* IO Unit Page 1 Flags defines */
+#define MPI_IOUNITPAGE1_MULTI_FUNCTION                  (0x00000000)
+#define MPI_IOUNITPAGE1_SINGLE_FUNCTION                 (0x00000001)
+#define MPI_IOUNITPAGE1_MULTI_PATHING                   (0x00000002)
+#define MPI_IOUNITPAGE1_SINGLE_PATHING                  (0x00000000)
+#define MPI_IOUNITPAGE1_IR_USE_STATIC_VOLUME_ID         (0x00000004)
+#define MPI_IOUNITPAGE1_DISABLE_QUEUE_FULL_HANDLING     (0x00000020)
+#define MPI_IOUNITPAGE1_DISABLE_IR                      (0x00000040)
+#define MPI_IOUNITPAGE1_FORCE_32                        (0x00000080)
+#define MPI_IOUNITPAGE1_NATIVE_COMMAND_Q_DISABLE        (0x00000100)
+
+
+typedef struct _MPI_ADAPTER_INFO
+{
+    U8      PciBusNumber;                               /* 00h */
+    U8      PciDeviceAndFunctionNumber;                 /* 01h */
+    U16     AdapterFlags;                               /* 02h */
+} MPI_ADAPTER_INFO, MPI_POINTER PTR_MPI_ADAPTER_INFO,
+  MpiAdapterInfo_t, MPI_POINTER pMpiAdapterInfo_t;
+
+#define MPI_ADAPTER_INFO_FLAGS_EMBEDDED                 (0x0001)
+#define MPI_ADAPTER_INFO_FLAGS_INIT_STATUS              (0x0002)
+
+typedef struct _CONFIG_PAGE_IO_UNIT_2
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     Flags;                      /* 04h */
+    U32                     BiosVersion;                /* 08h */
+    MPI_ADAPTER_INFO        AdapterOrder[4];            /* 0Ch */
+} fCONFIG_PAGE_IO_UNIT_2, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_2,
+  IOUnitPage2_t, MPI_POINTER pIOUnitPage2_t;
+
+#define MPI_IOUNITPAGE2_PAGEVERSION                     (0x00)
+
+#define MPI_IOUNITPAGE2_FLAGS_PAUSE_ON_ERROR            (0x00000002)
+#define MPI_IOUNITPAGE2_FLAGS_VERBOSE_ENABLE            (0x00000004)
+#define MPI_IOUNITPAGE2_FLAGS_COLOR_VIDEO_DISABLE       (0x00000008)
+#define MPI_IOUNITPAGE2_FLAGS_DONT_HOOK_INT_40          (0x00000010)
+
+#define MPI_IOUNITPAGE2_FLAGS_DEV_LIST_DISPLAY_MASK     (0x000000E0)
+#define MPI_IOUNITPAGE2_FLAGS_INSTALLED_DEV_DISPLAY     (0x00000000)
+#define MPI_IOUNITPAGE2_FLAGS_ADAPTER_DISPLAY           (0x00000020)
+#define MPI_IOUNITPAGE2_FLAGS_ADAPTER_DEV_DISPLAY       (0x00000040)
+
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_IO_UNIT_PAGE_3_GPIO_VAL_MAX
+#define MPI_IO_UNIT_PAGE_3_GPIO_VAL_MAX     (1)
+#endif
+
+typedef struct _CONFIG_PAGE_IO_UNIT_3
+{
+    fCONFIG_PAGE_HEADER      Header;                                   /* 00h */
+    U8                      GPIOCount;                                /* 04h */
+    U8                      Reserved1;                                /* 05h */
+    U16                     Reserved2;                                /* 06h */
+    U16                     GPIOVal[MPI_IO_UNIT_PAGE_3_GPIO_VAL_MAX]; /* 08h */
+} fCONFIG_PAGE_IO_UNIT_3, MPI_POINTER PTR_CONFIG_PAGE_IO_UNIT_3,
+  IOUnitPage3_t, MPI_POINTER pIOUnitPage3_t;
+
+#define MPI_IOUNITPAGE3_PAGEVERSION                     (0x01)
+
+#define MPI_IOUNITPAGE3_GPIO_FUNCTION_MASK              (0xFC)
+#define MPI_IOUNITPAGE3_GPIO_FUNCTION_SHIFT             (2)
+#define MPI_IOUNITPAGE3_GPIO_SETTING_OFF                (0x00)
+#define MPI_IOUNITPAGE3_GPIO_SETTING_ON                 (0x01)
+
+
+/****************************************************************************
+*   IOC Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_IOC_0
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     TotalNVStore;               /* 04h */
+    U32                     FreeNVStore;                /* 08h */
+    U16                     VendorID;                   /* 0Ch */
+    U16                     DeviceID;                   /* 0Eh */
+    U8                      RevisionID;                 /* 10h */
+    U8                      Reserved[3];                /* 11h */
+    U32                     ClassCode;                  /* 14h */
+    U16                     SubsystemVendorID;          /* 18h */
+    U16                     SubsystemID;                /* 1Ah */
+} fCONFIG_PAGE_IOC_0, MPI_POINTER PTR_CONFIG_PAGE_IOC_0,
+  IOCPage0_t, MPI_POINTER pIOCPage0_t;
+
+#define MPI_IOCPAGE0_PAGEVERSION                        (0x01)
+
+
+typedef struct _CONFIG_PAGE_IOC_1
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     Flags;                      /* 04h */
+    U32                     CoalescingTimeout;          /* 08h */
+    U8                      CoalescingDepth;            /* 0Ch */
+    U8                      PCISlotNum;                 /* 0Dh */
+    U8                      Reserved[2];                /* 0Eh */
+} fCONFIG_PAGE_IOC_1, MPI_POINTER PTR_CONFIG_PAGE_IOC_1,
+  IOCPage1_t, MPI_POINTER pIOCPage1_t;
+
+#define MPI_IOCPAGE1_PAGEVERSION                        (0x01)
+
+/* defines for the Flags field */
+#define MPI_IOCPAGE1_EEDP_HOST_SUPPORTS_DIF             (0x08000000)
+#define MPI_IOCPAGE1_EEDP_MODE_MASK                     (0x07000000)
+#define MPI_IOCPAGE1_EEDP_MODE_OFF                      (0x00000000)
+#define MPI_IOCPAGE1_EEDP_MODE_T10                      (0x01000000)
+#define MPI_IOCPAGE1_EEDP_MODE_LSI_1                    (0x02000000)
+#define MPI_IOCPAGE1_REPLY_COALESCING                   (0x00000001)
+
+#define MPI_IOCPAGE1_PCISLOTNUM_UNKNOWN                 (0xFF)
+
+
+typedef struct _CONFIG_PAGE_IOC_2_RAID_VOL
+{
+    U8                          VolumeID;               /* 00h */
+    U8                          VolumeBus;              /* 01h */
+    U8                          VolumeIOC;              /* 02h */
+    U8                          VolumePageNumber;       /* 03h */
+    U8                          VolumeType;             /* 04h */
+    U8                          Flags;                  /* 05h */
+    U16                         Reserved3;              /* 06h */
+} fCONFIG_PAGE_IOC_2_RAID_VOL, MPI_POINTER PTR_CONFIG_PAGE_IOC_2_RAID_VOL,
+  ConfigPageIoc2RaidVol_t, MPI_POINTER pConfigPageIoc2RaidVol_t;
+
+/* IOC Page 2 Volume RAID Type values, also used in RAID Volume pages */
+
+#define MPI_RAID_VOL_TYPE_IS                        (0x00)
+#define MPI_RAID_VOL_TYPE_IME                       (0x01)
+#define MPI_RAID_VOL_TYPE_IM                        (0x02)
+
+/* IOC Page 2 Volume Flags values */
+
+#define MPI_IOCPAGE2_FLAG_VOLUME_INACTIVE           (0x08)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_IOC_PAGE_2_RAID_VOLUME_MAX
+#define MPI_IOC_PAGE_2_RAID_VOLUME_MAX      (1)
+#endif
+
+typedef struct _CONFIG_PAGE_IOC_2
+{
+    fCONFIG_PAGE_HEADER          Header;                              /* 00h */
+    U32                         CapabilitiesFlags;                   /* 04h */
+    U8                          NumActiveVolumes;                    /* 08h */
+    U8                          MaxVolumes;                          /* 09h */
+    U8                          NumActivePhysDisks;                  /* 0Ah */
+    U8                          MaxPhysDisks;                        /* 0Bh */
+    fCONFIG_PAGE_IOC_2_RAID_VOL  RaidVolume[MPI_IOC_PAGE_2_RAID_VOLUME_MAX];/* 0Ch */
+} fCONFIG_PAGE_IOC_2, MPI_POINTER PTR_CONFIG_PAGE_IOC_2,
+  IOCPage2_t, MPI_POINTER pIOCPage2_t;
+
+#define MPI_IOCPAGE2_PAGEVERSION                        (0x02)
+
+/* IOC Page 2 Capabilities flags */
+
+#define MPI_IOCPAGE2_CAP_FLAGS_IS_SUPPORT               (0x00000001)
+#define MPI_IOCPAGE2_CAP_FLAGS_IME_SUPPORT              (0x00000002)
+#define MPI_IOCPAGE2_CAP_FLAGS_IM_SUPPORT               (0x00000004)
+#define MPI_IOCPAGE2_CAP_FLAGS_SES_SUPPORT              (0x20000000)
+#define MPI_IOCPAGE2_CAP_FLAGS_SAFTE_SUPPORT            (0x40000000)
+#define MPI_IOCPAGE2_CAP_FLAGS_CROSS_CHANNEL_SUPPORT    (0x80000000)
+
+
+typedef struct _IOC_3_PHYS_DISK
+{
+    U8                          PhysDiskID;             /* 00h */
+    U8                          PhysDiskBus;            /* 01h */
+    U8                          PhysDiskIOC;            /* 02h */
+    U8                          PhysDiskNum;            /* 03h */
+} IOC_3_PHYS_DISK, MPI_POINTER PTR_IOC_3_PHYS_DISK,
+  Ioc3PhysDisk_t, MPI_POINTER pIoc3PhysDisk_t;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_IOC_PAGE_3_PHYSDISK_MAX
+#define MPI_IOC_PAGE_3_PHYSDISK_MAX         (1)
+#endif
+
+typedef struct _CONFIG_PAGE_IOC_3
+{
+    fCONFIG_PAGE_HEADER          Header;                                /* 00h */
+    U8                          NumPhysDisks;                          /* 04h */
+    U8                          Reserved1;                             /* 05h */
+    U16                         Reserved2;                             /* 06h */
+    IOC_3_PHYS_DISK             PhysDisk[MPI_IOC_PAGE_3_PHYSDISK_MAX]; /* 08h */
+} fCONFIG_PAGE_IOC_3, MPI_POINTER PTR_CONFIG_PAGE_IOC_3,
+  IOCPage3_t, MPI_POINTER pIOCPage3_t;
+
+#define MPI_IOCPAGE3_PAGEVERSION                        (0x00)
+
+
+typedef struct _IOC_4_SEP
+{
+    U8                          SEPTargetID;            /* 00h */
+    U8                          SEPBus;                 /* 01h */
+    U16                         Reserved;               /* 02h */
+} IOC_4_SEP, MPI_POINTER PTR_IOC_4_SEP,
+  Ioc4Sep_t, MPI_POINTER pIoc4Sep_t;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_IOC_PAGE_4_SEP_MAX
+#define MPI_IOC_PAGE_4_SEP_MAX              (1)
+#endif
+
+typedef struct _CONFIG_PAGE_IOC_4
+{
+    fCONFIG_PAGE_HEADER          Header;                         /* 00h */
+    U8                          ActiveSEP;                      /* 04h */
+    U8                          MaxSEP;                         /* 05h */
+    U16                         Reserved1;                      /* 06h */
+    IOC_4_SEP                   SEP[MPI_IOC_PAGE_4_SEP_MAX];    /* 08h */
+} fCONFIG_PAGE_IOC_4, MPI_POINTER PTR_CONFIG_PAGE_IOC_4,
+  IOCPage4_t, MPI_POINTER pIOCPage4_t;
+
+#define MPI_IOCPAGE4_PAGEVERSION                        (0x00)
+
+
+typedef struct _IOC_5_HOT_SPARE
+{
+    U8                          PhysDiskNum;            /* 00h */
+    U8                          Reserved;               /* 01h */
+    U8                          HotSparePool;           /* 02h */
+    U8                          Flags;                   /* 03h */
+} IOC_5_HOT_SPARE, MPI_POINTER PTR_IOC_5_HOT_SPARE,
+  Ioc5HotSpare_t, MPI_POINTER pIoc5HotSpare_t;
+
+/* IOC Page 5 HotSpare Flags */
+#define MPI_IOC_PAGE_5_HOT_SPARE_ACTIVE                 (0x01)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_IOC_PAGE_5_HOT_SPARE_MAX
+#define MPI_IOC_PAGE_5_HOT_SPARE_MAX        (1)
+#endif
+
+typedef struct _CONFIG_PAGE_IOC_5
+{
+    fCONFIG_PAGE_HEADER          Header;                         /* 00h */
+    U32                         Reserved1;                      /* 04h */
+    U8                          NumHotSpares;                   /* 08h */
+    U8                          Reserved2;                      /* 09h */
+    U16                         Reserved3;                      /* 0Ah */
+    IOC_5_HOT_SPARE             HotSpare[MPI_IOC_PAGE_5_HOT_SPARE_MAX]; /* 0Ch */
+} fCONFIG_PAGE_IOC_5, MPI_POINTER PTR_CONFIG_PAGE_IOC_5,
+  IOCPage5_t, MPI_POINTER pIOCPage5_t;
+
+#define MPI_IOCPAGE5_PAGEVERSION                        (0x00)
+
+
+/****************************************************************************
+*   BIOS Port Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_BIOS_1
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     BiosOptions;                /* 04h */
+    U32                     IOCSettings;                /* 08h */
+    U32                     Reserved1;                  /* 0Ch */
+    U32                     DeviceSettings;             /* 10h */
+    U16                     NumberOfDevices;            /* 14h */
+    U16                     Reserved2;                  /* 16h */
+    U16                     IOTimeoutBlockDevicesNonRM; /* 18h */
+    U16                     IOTimeoutSequential;        /* 1Ah */
+    U16                     IOTimeoutOther;             /* 1Ch */
+    U16                     IOTimeoutBlockDevicesRM;    /* 1Eh */
+} fCONFIG_PAGE_BIOS_1, MPI_POINTER PTR_CONFIG_PAGE_BIOS_1,
+  BIOSPage1_t, MPI_POINTER pBIOSPage1_t;
+
+#define MPI_BIOSPAGE1_PAGEVERSION                       (0x00)
+
+/* values for the BiosOptions field */
+#define MPI_BIOSPAGE1_OPTIONS_SPI_ENABLE                (0x00000400)
+#define MPI_BIOSPAGE1_OPTIONS_FC_ENABLE                 (0x00000200)
+#define MPI_BIOSPAGE1_OPTIONS_SAS_ENABLE                (0x00000100)
+#define MPI_BIOSPAGE1_OPTIONS_DISABLE_BIOS              (0x00000001)
+
+/* values for the IOCSettings field */
+#define MPI_BIOSPAGE1_IOCSET_MASK_SPINUP_DELAY          (0x00000F00)
+#define MPI_BIOSPAGE1_IOCSET_SHIFT_SPINUP_DELAY         (8)
+
+#define MPI_BIOSPAGE1_IOCSET_MASK_RM_SETTING            (0x000000C0)
+#define MPI_BIOSPAGE1_IOCSET_NONE_RM_SETTING            (0x00000000)
+#define MPI_BIOSPAGE1_IOCSET_BOOT_RM_SETTING            (0x00000040)
+#define MPI_BIOSPAGE1_IOCSET_MEDIA_RM_SETTING           (0x00000080)
+
+#define MPI_BIOSPAGE1_IOCSET_MASK_ADAPTER_SUPPORT       (0x00000030)
+#define MPI_BIOSPAGE1_IOCSET_NO_SUPPORT                 (0x00000000)
+#define MPI_BIOSPAGE1_IOCSET_BIOS_SUPPORT               (0x00000010)
+#define MPI_BIOSPAGE1_IOCSET_OS_SUPPORT                 (0x00000020)
+#define MPI_BIOSPAGE1_IOCSET_ALL_SUPPORT                (0x00000030)
+
+#define MPI_BIOSPAGE1_IOCSET_ALTERNATE_CHS              (0x00000008)
+
+/* values for the DeviceSettings field */
+#define MPI_BIOSPAGE1_DEVSET_DISABLE_SEQ_LUN            (0x00000008)
+#define MPI_BIOSPAGE1_DEVSET_DISABLE_RM_LUN             (0x00000004)
+#define MPI_BIOSPAGE1_DEVSET_DISABLE_NON_RM_LUN         (0x00000002)
+#define MPI_BIOSPAGE1_DEVSET_DISABLE_OTHER_LUN          (0x00000001)
+
+
+/****************************************************************************
+*   SCSI Port Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_SCSI_PORT_0
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     Capabilities;               /* 04h */
+    U32                     PhysicalInterface;          /* 08h */
+} fCONFIG_PAGE_SCSI_PORT_0, MPI_POINTER PTR_CONFIG_PAGE_SCSI_PORT_0,
+  SCSIPortPage0_t, MPI_POINTER pSCSIPortPage0_t;
+
+#define MPI_SCSIPORTPAGE0_PAGEVERSION                   (0x01)
+
+#define MPI_SCSIPORTPAGE0_CAP_IU                        (0x00000001)
+#define MPI_SCSIPORTPAGE0_CAP_DT                        (0x00000002)
+#define MPI_SCSIPORTPAGE0_CAP_QAS                       (0x00000004)
+#define MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK      (0x0000FF00)
+#define MPI_SCSIPORTPAGE0_SYNC_ASYNC                    (0x00)
+#define MPI_SCSIPORTPAGE0_SYNC_5                        (0x32)
+#define MPI_SCSIPORTPAGE0_SYNC_10                       (0x19)
+#define MPI_SCSIPORTPAGE0_SYNC_20                       (0x0C)
+#define MPI_SCSIPORTPAGE0_SYNC_33_33                    (0x0B)
+#define MPI_SCSIPORTPAGE0_SYNC_40                       (0x0A)
+#define MPI_SCSIPORTPAGE0_SYNC_80                       (0x09)
+#define MPI_SCSIPORTPAGE0_SYNC_160                      (0x08)
+#define MPI_SCSIPORTPAGE0_SYNC_UNKNOWN                  (0xFF)
+
+#define MPI_SCSIPORTPAGE0_CAP_SHIFT_MIN_SYNC_PERIOD     (8)
+#define MPI_SCSIPORTPAGE0_CAP_GET_MIN_SYNC_PERIOD(Cap)      \
+    (  ((Cap) & MPI_SCSIPORTPAGE0_CAP_MASK_MIN_SYNC_PERIOD) \
+    >> MPI_SCSIPORTPAGE0_CAP_SHIFT_MIN_SYNC_PERIOD          \
+    )
+#define MPI_SCSIPORTPAGE0_CAP_MAX_SYNC_OFFSET_MASK      (0x00FF0000)
+#define MPI_SCSIPORTPAGE0_CAP_SHIFT_MAX_SYNC_OFFSET     (16)
+#define MPI_SCSIPORTPAGE0_CAP_GET_MAX_SYNC_OFFSET(Cap)      \
+    (  ((Cap) & MPI_SCSIPORTPAGE0_CAP_MASK_MAX_SYNC_OFFSET) \
+    >> MPI_SCSIPORTPAGE0_CAP_SHIFT_MAX_SYNC_OFFSET          \
+    )
+#define MPI_SCSIPORTPAGE0_CAP_WIDE                      (0x20000000)
+#define MPI_SCSIPORTPAGE0_CAP_AIP                       (0x80000000)
+
+#define MPI_SCSIPORTPAGE0_PHY_SIGNAL_TYPE_MASK          (0x00000003)
+#define MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD                (0x01)
+#define MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE                 (0x02)
+#define MPI_SCSIPORTPAGE0_PHY_SIGNAL_LVD                (0x03)
+#define MPI_SCSIPORTPAGE0_PHY_MASK_CONNECTED_ID         (0xFF000000)
+#define MPI_SCSIPORTPAGE0_PHY_SHIFT_CONNECTED_ID        (24)
+#define MPI_SCSIPORTPAGE0_PHY_BUS_FREE_CONNECTED_ID     (0xFE)
+#define MPI_SCSIPORTPAGE0_PHY_UNKNOWN_CONNECTED_ID      (0xFF)
+
+
+typedef struct _CONFIG_PAGE_SCSI_PORT_1
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     Configuration;              /* 04h */
+    U32                     OnBusTimerValue;            /* 08h */
+    U8                      TargetConfig;               /* 0Ch */
+    U8                      Reserved1;                  /* 0Dh */
+    U16                     IDConfig;                   /* 0Eh */
+} fCONFIG_PAGE_SCSI_PORT_1, MPI_POINTER PTR_CONFIG_PAGE_SCSI_PORT_1,
+  SCSIPortPage1_t, MPI_POINTER pSCSIPortPage1_t;
+
+#define MPI_SCSIPORTPAGE1_PAGEVERSION                   (0x03)
+
+/* Configuration values */
+#define MPI_SCSIPORTPAGE1_CFG_PORT_SCSI_ID_MASK         (0x000000FF)
+#define MPI_SCSIPORTPAGE1_CFG_PORT_RESPONSE_ID_MASK     (0xFFFF0000)
+#define MPI_SCSIPORTPAGE1_CFG_SHIFT_PORT_RESPONSE_ID    (16)
+
+/* TargetConfig values */
+#define MPI_SCSIPORTPAGE1_TARGCONFIG_TARG_ONLY        (0x01)
+#define MPI_SCSIPORTPAGE1_TARGCONFIG_INIT_TARG        (0x02)
+
+
+typedef struct _MPI_DEVICE_INFO
+{
+    U8      Timeout;                                    /* 00h */
+    U8      SyncFactor;                                 /* 01h */
+    U16     DeviceFlags;                                /* 02h */
+} MPI_DEVICE_INFO, MPI_POINTER PTR_MPI_DEVICE_INFO,
+  MpiDeviceInfo_t, MPI_POINTER pMpiDeviceInfo_t;
+
+typedef struct _CONFIG_PAGE_SCSI_PORT_2
+{
+    fCONFIG_PAGE_HEADER  Header;                         /* 00h */
+    U32                 PortFlags;                      /* 04h */
+    U32                 PortSettings;                   /* 08h */
+    MPI_DEVICE_INFO     DeviceSettings[16];             /* 0Ch */
+} fCONFIG_PAGE_SCSI_PORT_2, MPI_POINTER PTR_CONFIG_PAGE_SCSI_PORT_2,
+  SCSIPortPage2_t, MPI_POINTER pSCSIPortPage2_t;
+
+#define MPI_SCSIPORTPAGE2_PAGEVERSION                       (0x02)
+
+/* PortFlags values */
+#define MPI_SCSIPORTPAGE2_PORT_FLAGS_SCAN_HIGH_TO_LOW       (0x00000001)
+#define MPI_SCSIPORTPAGE2_PORT_FLAGS_AVOID_SCSI_RESET       (0x00000004)
+#define MPI_SCSIPORTPAGE2_PORT_FLAGS_ALTERNATE_CHS          (0x00000008)
+#define MPI_SCSIPORTPAGE2_PORT_FLAGS_TERMINATION_DISABLE    (0x00000010)
+
+#define MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK                (0x00000060)
+#define MPI_SCSIPORTPAGE2_PORT_FLAGS_FULL_DV                (0x00000000)
+#define MPI_SCSIPORTPAGE2_PORT_FLAGS_BASIC_DV_ONLY          (0x00000020)
+#define MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV                 (0x00000060)
+
+
+/* PortSettings values */
+#define MPI_SCSIPORTPAGE2_PORT_HOST_ID_MASK                 (0x0000000F)
+#define MPI_SCSIPORTPAGE2_PORT_MASK_INIT_HBA                (0x00000030)
+#define MPI_SCSIPORTPAGE2_PORT_DISABLE_INIT_HBA             (0x00000000)
+#define MPI_SCSIPORTPAGE2_PORT_BIOS_INIT_HBA                (0x00000010)
+#define MPI_SCSIPORTPAGE2_PORT_OS_INIT_HBA                  (0x00000020)
+#define MPI_SCSIPORTPAGE2_PORT_BIOS_OS_INIT_HBA             (0x00000030)
+#define MPI_SCSIPORTPAGE2_PORT_REMOVABLE_MEDIA              (0x000000C0)
+#define MPI_SCSIPORTPAGE2_PORT_RM_NONE                      (0x00000000)
+#define MPI_SCSIPORTPAGE2_PORT_RM_BOOT_ONLY                 (0x00000040)
+#define MPI_SCSIPORTPAGE2_PORT_RM_WITH_MEDIA                (0x00000080)
+#define MPI_SCSIPORTPAGE2_PORT_SPINUP_DELAY_MASK            (0x00000F00)
+#define MPI_SCSIPORTPAGE2_PORT_SHIFT_SPINUP_DELAY           (8)
+#define MPI_SCSIPORTPAGE2_PORT_MASK_NEGO_MASTER_SETTINGS    (0x00003000)
+#define MPI_SCSIPORTPAGE2_PORT_NEGO_MASTER_SETTINGS         (0x00000000)
+#define MPI_SCSIPORTPAGE2_PORT_NONE_MASTER_SETTINGS         (0x00001000)
+#define MPI_SCSIPORTPAGE2_PORT_ALL_MASTER_SETTINGS          (0x00003000)
+
+#define MPI_SCSIPORTPAGE2_DEVICE_DISCONNECT_ENABLE          (0x0001)
+#define MPI_SCSIPORTPAGE2_DEVICE_ID_SCAN_ENABLE             (0x0002)
+#define MPI_SCSIPORTPAGE2_DEVICE_LUN_SCAN_ENABLE            (0x0004)
+#define MPI_SCSIPORTPAGE2_DEVICE_TAG_QUEUE_ENABLE           (0x0008)
+#define MPI_SCSIPORTPAGE2_DEVICE_WIDE_DISABLE               (0x0010)
+#define MPI_SCSIPORTPAGE2_DEVICE_BOOT_CHOICE                (0x0020)
+
+
+/****************************************************************************
+*   SCSI Target Device Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_SCSI_DEVICE_0
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     NegotiatedParameters;       /* 04h */
+    U32                     Information;                /* 08h */
+} fCONFIG_PAGE_SCSI_DEVICE_0, MPI_POINTER PTR_CONFIG_PAGE_SCSI_DEVICE_0,
+  SCSIDevicePage0_t, MPI_POINTER pSCSIDevicePage0_t;
+
+#define MPI_SCSIDEVPAGE0_PAGEVERSION                    (0x03)
+
+#define MPI_SCSIDEVPAGE0_NP_IU                          (0x00000001)
+#define MPI_SCSIDEVPAGE0_NP_DT                          (0x00000002)
+#define MPI_SCSIDEVPAGE0_NP_QAS                         (0x00000004)
+#define MPI_SCSIDEVPAGE0_NP_HOLD_MCS                    (0x00000008)
+#define MPI_SCSIDEVPAGE0_NP_WR_FLOW                     (0x00000010)
+#define MPI_SCSIDEVPAGE0_NP_RD_STRM                     (0x00000020)
+#define MPI_SCSIDEVPAGE0_NP_RTI                         (0x00000040)
+#define MPI_SCSIDEVPAGE0_NP_PCOMP_EN                    (0x00000080)
+#define MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK        (0x0000FF00)
+#define MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_PERIOD           (8)
+#define MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK        (0x00FF0000)
+#define MPI_SCSIDEVPAGE0_NP_SHIFT_SYNC_OFFSET           (16)
+#define MPI_SCSIDEVPAGE0_NP_WIDE                        (0x20000000)
+#define MPI_SCSIDEVPAGE0_NP_AIP                         (0x80000000)
+
+#define MPI_SCSIDEVPAGE0_INFO_PARAMS_NEGOTIATED         (0x00000001)
+#define MPI_SCSIDEVPAGE0_INFO_SDTR_REJECTED             (0x00000002)
+#define MPI_SCSIDEVPAGE0_INFO_WDTR_REJECTED             (0x00000004)
+#define MPI_SCSIDEVPAGE0_INFO_PPR_REJECTED              (0x00000008)
+
+
+typedef struct _CONFIG_PAGE_SCSI_DEVICE_1
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     RequestedParameters;        /* 04h */
+    U32                     Reserved;                   /* 08h */
+    U32                     Configuration;              /* 0Ch */
+} fCONFIG_PAGE_SCSI_DEVICE_1, MPI_POINTER PTR_CONFIG_PAGE_SCSI_DEVICE_1,
+  SCSIDevicePage1_t, MPI_POINTER pSCSIDevicePage1_t;
+
+#define MPI_SCSIDEVPAGE1_PAGEVERSION                    (0x04)
+
+#define MPI_SCSIDEVPAGE1_RP_IU                          (0x00000001)
+#define MPI_SCSIDEVPAGE1_RP_DT                          (0x00000002)
+#define MPI_SCSIDEVPAGE1_RP_QAS                         (0x00000004)
+#define MPI_SCSIDEVPAGE1_RP_HOLD_MCS                    (0x00000008)
+#define MPI_SCSIDEVPAGE1_RP_WR_FLOW                     (0x00000010)
+#define MPI_SCSIDEVPAGE1_RP_RD_STRM                     (0x00000020)
+#define MPI_SCSIDEVPAGE1_RP_RTI                         (0x00000040)
+#define MPI_SCSIDEVPAGE1_RP_PCOMP_EN                    (0x00000080)
+#define MPI_SCSIDEVPAGE1_RP_MIN_SYNC_PERIOD_MASK        (0x0000FF00)
+#define MPI_SCSIDEVPAGE1_RP_SHIFT_MIN_SYNC_PERIOD       (8)
+#define MPI_SCSIDEVPAGE1_RP_MAX_SYNC_OFFSET_MASK        (0x00FF0000)
+#define MPI_SCSIDEVPAGE1_RP_SHIFT_MAX_SYNC_OFFSET       (16)
+#define MPI_SCSIDEVPAGE1_RP_WIDE                        (0x20000000)
+#define MPI_SCSIDEVPAGE1_RP_AIP                         (0x80000000)
+
+#define MPI_SCSIDEVPAGE1_CONF_WDTR_DISALLOWED           (0x00000002)
+#define MPI_SCSIDEVPAGE1_CONF_SDTR_DISALLOWED           (0x00000004)
+#define MPI_SCSIDEVPAGE1_CONF_EXTENDED_PARAMS_ENABLE    (0x00000008)
+#define MPI_SCSIDEVPAGE1_CONF_FORCE_PPR_MSG             (0x00000010)
+
+
+typedef struct _CONFIG_PAGE_SCSI_DEVICE_2
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     DomainValidation;           /* 04h */
+    U32                     ParityPipeSelect;           /* 08h */
+    U32                     DataPipeSelect;             /* 0Ch */
+} fCONFIG_PAGE_SCSI_DEVICE_2, MPI_POINTER PTR_CONFIG_PAGE_SCSI_DEVICE_2,
+  SCSIDevicePage2_t, MPI_POINTER pSCSIDevicePage2_t;
+
+#define MPI_SCSIDEVPAGE2_PAGEVERSION                    (0x01)
+
+#define MPI_SCSIDEVPAGE2_DV_ISI_ENABLE                  (0x00000010)
+#define MPI_SCSIDEVPAGE2_DV_SECONDARY_DRIVER_ENABLE     (0x00000020)
+#define MPI_SCSIDEVPAGE2_DV_SLEW_RATE_CTRL              (0x00000380)
+#define MPI_SCSIDEVPAGE2_DV_PRIM_DRIVE_STR_CTRL         (0x00001C00)
+#define MPI_SCSIDEVPAGE2_DV_SECOND_DRIVE_STR_CTRL       (0x0000E000)
+#define MPI_SCSIDEVPAGE2_DV_XCLKH_ST                    (0x10000000)
+#define MPI_SCSIDEVPAGE2_DV_XCLKS_ST                    (0x20000000)
+#define MPI_SCSIDEVPAGE2_DV_XCLKH_DT                    (0x40000000)
+#define MPI_SCSIDEVPAGE2_DV_XCLKS_DT                    (0x80000000)
+
+#define MPI_SCSIDEVPAGE2_PPS_PPS_MASK                   (0x00000003)
+
+#define MPI_SCSIDEVPAGE2_DPS_BIT_0_PL_SELECT_MASK       (0x00000003)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_1_PL_SELECT_MASK       (0x0000000C)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_2_PL_SELECT_MASK       (0x00000030)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_3_PL_SELECT_MASK       (0x000000C0)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_4_PL_SELECT_MASK       (0x00000300)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_5_PL_SELECT_MASK       (0x00000C00)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_6_PL_SELECT_MASK       (0x00003000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_7_PL_SELECT_MASK       (0x0000C000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_8_PL_SELECT_MASK       (0x00030000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_9_PL_SELECT_MASK       (0x000C0000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_10_PL_SELECT_MASK      (0x00300000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_11_PL_SELECT_MASK      (0x00C00000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_12_PL_SELECT_MASK      (0x03000000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_13_PL_SELECT_MASK      (0x0C000000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_14_PL_SELECT_MASK      (0x30000000)
+#define MPI_SCSIDEVPAGE2_DPS_BIT_15_PL_SELECT_MASK      (0xC0000000)
+
+
+typedef struct _CONFIG_PAGE_SCSI_DEVICE_3
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U16                     MsgRejectCount;             /* 04h */
+    U16                     PhaseErrorCount;            /* 06h */
+    U16                     ParityErrorCount;           /* 08h */
+    U16                     Reserved;                   /* 0Ah */
+} fCONFIG_PAGE_SCSI_DEVICE_3, MPI_POINTER PTR_CONFIG_PAGE_SCSI_DEVICE_3,
+  SCSIDevicePage3_t, MPI_POINTER pSCSIDevicePage3_t;
+
+#define MPI_SCSIDEVPAGE3_PAGEVERSION                    (0x00)
+
+#define MPI_SCSIDEVPAGE3_MAX_COUNTER                    (0xFFFE)
+#define MPI_SCSIDEVPAGE3_UNSUPPORTED_COUNTER            (0xFFFF)
+
+
+/****************************************************************************
+*   FC Port Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_FC_PORT_0
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     Flags;                      /* 04h */
+    U8                      MPIPortNumber;              /* 08h */
+    U8                      LinkType;                   /* 09h */
+    U8                      PortState;                  /* 0Ah */
+    U8                      Reserved;                   /* 0Bh */
+    U32                     PortIdentifier;             /* 0Ch */
+    U64                     WWNN;                       /* 10h */
+    U64                     WWPN;                       /* 18h */
+    U32                     SupportedServiceClass;      /* 20h */
+    U32                     SupportedSpeeds;            /* 24h */
+    U32                     CurrentSpeed;               /* 28h */
+    U32                     MaxFrameSize;               /* 2Ch */
+    U64                     FabricWWNN;                 /* 30h */
+    U64                     FabricWWPN;                 /* 38h */
+    U32                     DiscoveredPortsCount;       /* 40h */
+    U32                     MaxInitiators;              /* 44h */
+    U8                      MaxAliasesSupported;        /* 48h */
+    U8                      MaxHardAliasesSupported;    /* 49h */
+    U8                      NumCurrentAliases;          /* 4Ah */
+    U8                      Reserved1;                  /* 4Bh */
+} fCONFIG_PAGE_FC_PORT_0, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_0,
+  FCPortPage0_t, MPI_POINTER pFCPortPage0_t;
+
+#define MPI_FCPORTPAGE0_PAGEVERSION                     (0x02)
+
+#define MPI_FCPORTPAGE0_FLAGS_PROT_MASK                 (0x0000000F)
+#define MPI_FCPORTPAGE0_FLAGS_PROT_FCP_INIT             (MPI_PORTFACTS_PROTOCOL_INITIATOR)
+#define MPI_FCPORTPAGE0_FLAGS_PROT_FCP_TARG             (MPI_PORTFACTS_PROTOCOL_TARGET)
+#define MPI_FCPORTPAGE0_FLAGS_PROT_LAN                  (MPI_PORTFACTS_PROTOCOL_LAN)
+#define MPI_FCPORTPAGE0_FLAGS_PROT_LOGBUSADDR           (MPI_PORTFACTS_PROTOCOL_LOGBUSADDR)
+
+#define MPI_FCPORTPAGE0_FLAGS_ALIAS_ALPA_SUPPORTED      (0x00000010)
+#define MPI_FCPORTPAGE0_FLAGS_ALIAS_WWN_SUPPORTED       (0x00000020)
+#define MPI_FCPORTPAGE0_FLAGS_FABRIC_WWN_VALID          (0x00000040)
+
+#define MPI_FCPORTPAGE0_FLAGS_ATTACH_TYPE_MASK          (0x00000F00)
+#define MPI_FCPORTPAGE0_FLAGS_ATTACH_NO_INIT            (0x00000000)
+#define MPI_FCPORTPAGE0_FLAGS_ATTACH_POINT_TO_POINT     (0x00000100)
+#define MPI_FCPORTPAGE0_FLAGS_ATTACH_PRIVATE_LOOP       (0x00000200)
+#define MPI_FCPORTPAGE0_FLAGS_ATTACH_FABRIC_DIRECT      (0x00000400)
+#define MPI_FCPORTPAGE0_FLAGS_ATTACH_PUBLIC_LOOP        (0x00000800)
+
+#define MPI_FCPORTPAGE0_LTYPE_RESERVED                  (0x00)
+#define MPI_FCPORTPAGE0_LTYPE_OTHER                     (0x01)
+#define MPI_FCPORTPAGE0_LTYPE_UNKNOWN                   (0x02)
+#define MPI_FCPORTPAGE0_LTYPE_COPPER                    (0x03)
+#define MPI_FCPORTPAGE0_LTYPE_SINGLE_1300               (0x04)
+#define MPI_FCPORTPAGE0_LTYPE_SINGLE_1500               (0x05)
+#define MPI_FCPORTPAGE0_LTYPE_50_LASER_MULTI            (0x06)
+#define MPI_FCPORTPAGE0_LTYPE_50_LED_MULTI              (0x07)
+#define MPI_FCPORTPAGE0_LTYPE_62_LASER_MULTI            (0x08)
+#define MPI_FCPORTPAGE0_LTYPE_62_LED_MULTI              (0x09)
+#define MPI_FCPORTPAGE0_LTYPE_MULTI_LONG_WAVE           (0x0A)
+#define MPI_FCPORTPAGE0_LTYPE_MULTI_SHORT_WAVE          (0x0B)
+#define MPI_FCPORTPAGE0_LTYPE_LASER_SHORT_WAVE          (0x0C)
+#define MPI_FCPORTPAGE0_LTYPE_LED_SHORT_WAVE            (0x0D)
+#define MPI_FCPORTPAGE0_LTYPE_1300_LONG_WAVE            (0x0E)
+#define MPI_FCPORTPAGE0_LTYPE_1500_LONG_WAVE            (0x0F)
+
+#define MPI_FCPORTPAGE0_PORTSTATE_UNKNOWN               (0x01)      /*(SNIA)HBA_PORTSTATE_UNKNOWN       1 Unknown */
+#define MPI_FCPORTPAGE0_PORTSTATE_ONLINE                (0x02)      /*(SNIA)HBA_PORTSTATE_ONLINE        2 Operational */
+#define MPI_FCPORTPAGE0_PORTSTATE_OFFLINE               (0x03)      /*(SNIA)HBA_PORTSTATE_OFFLINE       3 User Offline */
+#define MPI_FCPORTPAGE0_PORTSTATE_BYPASSED              (0x04)      /*(SNIA)HBA_PORTSTATE_BYPASSED      4 Bypassed */
+#define MPI_FCPORTPAGE0_PORTSTATE_DIAGNOST              (0x05)      /*(SNIA)HBA_PORTSTATE_DIAGNOSTICS   5 In diagnostics mode */
+#define MPI_FCPORTPAGE0_PORTSTATE_LINKDOWN              (0x06)      /*(SNIA)HBA_PORTSTATE_LINKDOWN      6 Link Down */
+#define MPI_FCPORTPAGE0_PORTSTATE_ERROR                 (0x07)      /*(SNIA)HBA_PORTSTATE_ERROR         7 Port Error */
+#define MPI_FCPORTPAGE0_PORTSTATE_LOOPBACK              (0x08)      /*(SNIA)HBA_PORTSTATE_LOOPBACK      8 Loopback */
+
+#define MPI_FCPORTPAGE0_SUPPORT_CLASS_1                 (0x00000001)
+#define MPI_FCPORTPAGE0_SUPPORT_CLASS_2                 (0x00000002)
+#define MPI_FCPORTPAGE0_SUPPORT_CLASS_3                 (0x00000004)
+
+#define MPI_FCPORTPAGE0_SUPPORT_SPEED_UKNOWN            (0x00000000) /* (SNIA)HBA_PORTSPEED_UNKNOWN 0   Unknown - transceiver incapable of reporting */
+#define MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED             (0x00000001) /* (SNIA)HBA_PORTSPEED_1GBIT   1   1 GBit/sec */
+#define MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED             (0x00000002) /* (SNIA)HBA_PORTSPEED_2GBIT   2   2 GBit/sec */
+#define MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED            (0x00000004) /* (SNIA)HBA_PORTSPEED_10GBIT  4  10 GBit/sec */
+#define MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED             (0x00000008) /* (SNIA)HBA_PORTSPEED_4GBIT   8   4 GBit/sec */
+
+#define MPI_FCPORTPAGE0_CURRENT_SPEED_UKNOWN            MPI_FCPORTPAGE0_SUPPORT_SPEED_UKNOWN
+#define MPI_FCPORTPAGE0_CURRENT_SPEED_1GBIT             MPI_FCPORTPAGE0_SUPPORT_1GBIT_SPEED
+#define MPI_FCPORTPAGE0_CURRENT_SPEED_2GBIT             MPI_FCPORTPAGE0_SUPPORT_2GBIT_SPEED
+#define MPI_FCPORTPAGE0_CURRENT_SPEED_10GBIT            MPI_FCPORTPAGE0_SUPPORT_10GBIT_SPEED
+#define MPI_FCPORTPAGE0_CURRENT_SPEED_4GBIT             MPI_FCPORTPAGE0_SUPPORT_4GBIT_SPEED
+#define MPI_FCPORTPAGE0_CURRENT_SPEED_NOT_NEGOTIATED    (0x00008000)        /* (SNIA)HBA_PORTSPEED_NOT_NEGOTIATED (1<<15) Speed not established */
+
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_1
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     Flags;                      /* 04h */
+    U64                     NoSEEPROMWWNN;              /* 08h */
+    U64                     NoSEEPROMWWPN;              /* 10h */
+    U8                      HardALPA;                   /* 18h */
+    U8                      LinkConfig;                 /* 19h */
+    U8                      TopologyConfig;             /* 1Ah */
+    U8                      AltConnector;               /* 1Bh */
+    U8                      NumRequestedAliases;        /* 1Ch */
+    U8                      RR_TOV;                     /* 1Dh */
+    U8                      InitiatorDeviceTimeout;     /* 1Eh */
+    U8                      InitiatorIoPendTimeout;     /* 1Fh */
+} fCONFIG_PAGE_FC_PORT_1, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_1,
+  FCPortPage1_t, MPI_POINTER pFCPortPage1_t;
+
+#define MPI_FCPORTPAGE1_PAGEVERSION                     (0x06)
+
+#define MPI_FCPORTPAGE1_FLAGS_EXT_FCP_STATUS_EN         (0x08000000)
+#define MPI_FCPORTPAGE1_FLAGS_IMMEDIATE_ERROR_REPLY     (0x04000000)
+#define MPI_FCPORTPAGE1_FLAGS_FORCE_USE_NOSEEPROM_WWNS  (0x02000000)
+#define MPI_FCPORTPAGE1_FLAGS_VERBOSE_RESCAN_EVENTS     (0x01000000)
+#define MPI_FCPORTPAGE1_FLAGS_TARGET_MODE_OXID          (0x00800000)
+#define MPI_FCPORTPAGE1_FLAGS_PORT_OFFLINE              (0x00400000)
+#define MPI_FCPORTPAGE1_FLAGS_SOFT_ALPA_FALLBACK        (0x00200000)
+#define MPI_FCPORTPAGE1_FLAGS_MASK_RR_TOV_UNITS         (0x00000070)
+#define MPI_FCPORTPAGE1_FLAGS_SUPPRESS_PROT_REG         (0x00000008)
+#define MPI_FCPORTPAGE1_FLAGS_PLOGI_ON_LOGO             (0x00000004)
+#define MPI_FCPORTPAGE1_FLAGS_MAINTAIN_LOGINS           (0x00000002)
+#define MPI_FCPORTPAGE1_FLAGS_SORT_BY_DID               (0x00000001)
+#define MPI_FCPORTPAGE1_FLAGS_SORT_BY_WWN               (0x00000000)
+
+#define MPI_FCPORTPAGE1_FLAGS_PROT_MASK                 (0xF0000000)
+#define MPI_FCPORTPAGE1_FLAGS_PROT_SHIFT                (28)
+#define MPI_FCPORTPAGE1_FLAGS_PROT_FCP_INIT             ((U32)MPI_PORTFACTS_PROTOCOL_INITIATOR << MPI_FCPORTPAGE1_FLAGS_PROT_SHIFT)
+#define MPI_FCPORTPAGE1_FLAGS_PROT_FCP_TARG             ((U32)MPI_PORTFACTS_PROTOCOL_TARGET << MPI_FCPORTPAGE1_FLAGS_PROT_SHIFT)
+#define MPI_FCPORTPAGE1_FLAGS_PROT_LAN                  ((U32)MPI_PORTFACTS_PROTOCOL_LAN << MPI_FCPORTPAGE1_FLAGS_PROT_SHIFT)
+#define MPI_FCPORTPAGE1_FLAGS_PROT_LOGBUSADDR           ((U32)MPI_PORTFACTS_PROTOCOL_LOGBUSADDR << MPI_FCPORTPAGE1_FLAGS_PROT_SHIFT)
+
+#define MPI_FCPORTPAGE1_FLAGS_NONE_RR_TOV_UNITS         (0x00000000)
+#define MPI_FCPORTPAGE1_FLAGS_THOUSANDTH_RR_TOV_UNITS   (0x00000010)
+#define MPI_FCPORTPAGE1_FLAGS_TENTH_RR_TOV_UNITS        (0x00000030)
+#define MPI_FCPORTPAGE1_FLAGS_TEN_RR_TOV_UNITS          (0x00000050)
+
+#define MPI_FCPORTPAGE1_HARD_ALPA_NOT_USED              (0xFF)
+
+#define MPI_FCPORTPAGE1_LCONFIG_SPEED_MASK              (0x0F)
+#define MPI_FCPORTPAGE1_LCONFIG_SPEED_1GIG              (0x00)
+#define MPI_FCPORTPAGE1_LCONFIG_SPEED_2GIG              (0x01)
+#define MPI_FCPORTPAGE1_LCONFIG_SPEED_4GIG              (0x02)
+#define MPI_FCPORTPAGE1_LCONFIG_SPEED_10GIG             (0x03)
+#define MPI_FCPORTPAGE1_LCONFIG_SPEED_AUTO              (0x0F)
+
+#define MPI_FCPORTPAGE1_TOPOLOGY_MASK                   (0x0F)
+#define MPI_FCPORTPAGE1_TOPOLOGY_NLPORT                 (0x01)
+#define MPI_FCPORTPAGE1_TOPOLOGY_NPORT                  (0x02)
+#define MPI_FCPORTPAGE1_TOPOLOGY_AUTO                   (0x0F)
+
+#define MPI_FCPORTPAGE1_ALT_CONN_UNKNOWN                (0x00)
+
+#define MPI_FCPORTPAGE1_INITIATOR_DEV_TIMEOUT_MASK      (0x7F)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_2
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U8                      NumberActive;               /* 04h */
+    U8                      ALPA[127];                  /* 05h */
+} fCONFIG_PAGE_FC_PORT_2, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_2,
+  FCPortPage2_t, MPI_POINTER pFCPortPage2_t;
+
+#define MPI_FCPORTPAGE2_PAGEVERSION                     (0x01)
+
+
+typedef struct _WWN_FORMAT
+{
+    U64                     WWNN;                       /* 00h */
+    U64                     WWPN;                       /* 08h */
+} WWN_FORMAT, MPI_POINTER PTR_WWN_FORMAT,
+  WWNFormat, MPI_POINTER pWWNFormat;
+
+typedef union _FC_PORT_PERSISTENT_PHYSICAL_ID
+{
+    WWN_FORMAT              WWN;
+    U32                     Did;
+} FC_PORT_PERSISTENT_PHYSICAL_ID, MPI_POINTER PTR_FC_PORT_PERSISTENT_PHYSICAL_ID,
+  PersistentPhysicalId_t, MPI_POINTER pPersistentPhysicalId_t;
+
+typedef struct _FC_PORT_PERSISTENT
+{
+    FC_PORT_PERSISTENT_PHYSICAL_ID  PhysicalIdentifier; /* 00h */
+    U8                              TargetID;           /* 10h */
+    U8                              Bus;                /* 11h */
+    U16                             Flags;              /* 12h */
+} FC_PORT_PERSISTENT, MPI_POINTER PTR_FC_PORT_PERSISTENT,
+  PersistentData_t, MPI_POINTER pPersistentData_t;
+
+#define MPI_PERSISTENT_FLAGS_SHIFT                      (16)
+#define MPI_PERSISTENT_FLAGS_ENTRY_VALID                (0x0001)
+#define MPI_PERSISTENT_FLAGS_SCAN_ID                    (0x0002)
+#define MPI_PERSISTENT_FLAGS_SCAN_LUNS                  (0x0004)
+#define MPI_PERSISTENT_FLAGS_BOOT_DEVICE                (0x0008)
+#define MPI_PERSISTENT_FLAGS_BY_DID                     (0x0080)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_FC_PORT_PAGE_3_ENTRY_MAX
+#define MPI_FC_PORT_PAGE_3_ENTRY_MAX        (1)
+#endif
+
+typedef struct _CONFIG_PAGE_FC_PORT_3
+{
+    fCONFIG_PAGE_HEADER      Header;                                 /* 00h */
+    FC_PORT_PERSISTENT      Entry[MPI_FC_PORT_PAGE_3_ENTRY_MAX];    /* 04h */
+} fCONFIG_PAGE_FC_PORT_3, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_3,
+  FCPortPage3_t, MPI_POINTER pFCPortPage3_t;
+
+#define MPI_FCPORTPAGE3_PAGEVERSION                     (0x01)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_4
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     PortFlags;                  /* 04h */
+    U32                     PortSettings;               /* 08h */
+} fCONFIG_PAGE_FC_PORT_4, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_4,
+  FCPortPage4_t, MPI_POINTER pFCPortPage4_t;
+
+#define MPI_FCPORTPAGE4_PAGEVERSION                     (0x00)
+
+#define MPI_FCPORTPAGE4_PORT_FLAGS_ALTERNATE_CHS        (0x00000008)
+
+#define MPI_FCPORTPAGE4_PORT_MASK_INIT_HBA              (0x00000030)
+#define MPI_FCPORTPAGE4_PORT_DISABLE_INIT_HBA           (0x00000000)
+#define MPI_FCPORTPAGE4_PORT_BIOS_INIT_HBA              (0x00000010)
+#define MPI_FCPORTPAGE4_PORT_OS_INIT_HBA                (0x00000020)
+#define MPI_FCPORTPAGE4_PORT_BIOS_OS_INIT_HBA           (0x00000030)
+#define MPI_FCPORTPAGE4_PORT_REMOVABLE_MEDIA            (0x000000C0)
+#define MPI_FCPORTPAGE4_PORT_SPINUP_DELAY_MASK          (0x00000F00)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_5_ALIAS_INFO
+{
+    U8      Flags;                                      /* 00h */
+    U8      AliasAlpa;                                  /* 01h */
+    U16     Reserved;                                   /* 02h */
+    U64     AliasWWNN;                                  /* 04h */
+    U64     AliasWWPN;                                  /* 0Ch */
+} fCONFIG_PAGE_FC_PORT_5_ALIAS_INFO,
+  MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_5_ALIAS_INFO,
+  FcPortPage5AliasInfo_t, MPI_POINTER pFcPortPage5AliasInfo_t;
+
+typedef struct _CONFIG_PAGE_FC_PORT_5
+{
+    fCONFIG_PAGE_HEADER                  Header;         /* 00h */
+    fCONFIG_PAGE_FC_PORT_5_ALIAS_INFO    AliasInfo;      /* 04h */
+} fCONFIG_PAGE_FC_PORT_5, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_5,
+  FCPortPage5_t, MPI_POINTER pFCPortPage5_t;
+
+#define MPI_FCPORTPAGE5_PAGEVERSION                     (0x02)
+
+#define MPI_FCPORTPAGE5_FLAGS_ALPA_ACQUIRED             (0x01)
+#define MPI_FCPORTPAGE5_FLAGS_HARD_ALPA                 (0x02)
+#define MPI_FCPORTPAGE5_FLAGS_HARD_WWNN                 (0x04)
+#define MPI_FCPORTPAGE5_FLAGS_HARD_WWPN                 (0x08)
+#define MPI_FCPORTPAGE5_FLAGS_DISABLE                   (0x10)
+
+typedef struct _CONFIG_PAGE_FC_PORT_6
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     Reserved;                   /* 04h */
+    U64                     TimeSinceReset;             /* 08h */
+    U64                     TxFrames;                   /* 10h */
+    U64                     RxFrames;                   /* 18h */
+    U64                     TxWords;                    /* 20h */
+    U64                     RxWords;                    /* 28h */
+    U64                     LipCount;                   /* 30h */
+    U64                     NosCount;                   /* 38h */
+    U64                     ErrorFrames;                /* 40h */
+    U64                     DumpedFrames;               /* 48h */
+    U64                     LinkFailureCount;           /* 50h */
+    U64                     LossOfSyncCount;            /* 58h */
+    U64                     LossOfSignalCount;          /* 60h */
+    U64                     PrimativeSeqErrCount;       /* 68h */
+    U64                     InvalidTxWordCount;         /* 70h */
+    U64                     InvalidCrcCount;            /* 78h */
+    U64                     FcpInitiatorIoCount;        /* 80h */
+} fCONFIG_PAGE_FC_PORT_6, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_6,
+  FCPortPage6_t, MPI_POINTER pFCPortPage6_t;
+
+#define MPI_FCPORTPAGE6_PAGEVERSION                     (0x00)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_7
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     Reserved;                   /* 04h */
+    U8                      PortSymbolicName[256];      /* 08h */
+} fCONFIG_PAGE_FC_PORT_7, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_7,
+  FCPortPage7_t, MPI_POINTER pFCPortPage7_t;
+
+#define MPI_FCPORTPAGE7_PAGEVERSION                     (0x00)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_8
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     BitVector[8];               /* 04h */
+} fCONFIG_PAGE_FC_PORT_8, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_8,
+  FCPortPage8_t, MPI_POINTER pFCPortPage8_t;
+
+#define MPI_FCPORTPAGE8_PAGEVERSION                     (0x00)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_9
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U32                     Reserved;                   /* 04h */
+    U64                     GlobalWWPN;                 /* 08h */
+    U64                     GlobalWWNN;                 /* 10h */
+    U32                     UnitType;                   /* 18h */
+    U32                     PhysicalPortNumber;         /* 1Ch */
+    U32                     NumAttachedNodes;           /* 20h */
+    U16                     IPVersion;                  /* 24h */
+    U16                     UDPPortNumber;              /* 26h */
+    U8                      IPAddress[16];              /* 28h */
+    U16                     Reserved1;                  /* 38h */
+    U16                     TopologyDiscoveryFlags;     /* 3Ah */
+} fCONFIG_PAGE_FC_PORT_9, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_9,
+  FCPortPage9_t, MPI_POINTER pFCPortPage9_t;
+
+#define MPI_FCPORTPAGE9_PAGEVERSION                     (0x00)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_10_BASE_SFP_DATA
+{
+    U8                      Id;                         /* 10h */
+    U8                      ExtId;                      /* 11h */
+    U8                      Connector;                  /* 12h */
+    U8                      Transceiver[8];             /* 13h */
+    U8                      Encoding;                   /* 1Bh */
+    U8                      BitRate_100mbs;             /* 1Ch */
+    U8                      Reserved1;                  /* 1Dh */
+    U8                      Length9u_km;                /* 1Eh */
+    U8                      Length9u_100m;              /* 1Fh */
+    U8                      Length50u_10m;              /* 20h */
+    U8                      Length62p5u_10m;            /* 21h */
+    U8                      LengthCopper_m;             /* 22h */
+    U8                      Reseverved2;                /* 22h */
+    U8                      VendorName[16];             /* 24h */
+    U8                      Reserved3;                  /* 34h */
+    U8                      VendorOUI[3];               /* 35h */
+    U8                      VendorPN[16];               /* 38h */
+    U8                      VendorRev[4];               /* 48h */
+    U16                     Reserved4;                  /* 4Ch */
+    U8                      Reserved5;                  /* 4Eh */
+    U8                      CC_BASE;                    /* 4Fh */
+} fCONFIG_PAGE_FC_PORT_10_BASE_SFP_DATA,
+  MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_10_BASE_SFP_DATA,
+  FCPortPage10BaseSfpData_t, MPI_POINTER pFCPortPage10BaseSfpData_t;
+
+#define MPI_FCPORT10_BASE_ID_UNKNOWN        (0x00)
+#define MPI_FCPORT10_BASE_ID_GBIC           (0x01)
+#define MPI_FCPORT10_BASE_ID_FIXED          (0x02)
+#define MPI_FCPORT10_BASE_ID_SFP            (0x03)
+#define MPI_FCPORT10_BASE_ID_SFP_MIN        (0x04)
+#define MPI_FCPORT10_BASE_ID_SFP_MAX        (0x7F)
+#define MPI_FCPORT10_BASE_ID_VEND_SPEC_MASK (0x80)
+
+#define MPI_FCPORT10_BASE_EXTID_UNKNOWN     (0x00)
+#define MPI_FCPORT10_BASE_EXTID_MODDEF1     (0x01)
+#define MPI_FCPORT10_BASE_EXTID_MODDEF2     (0x02)
+#define MPI_FCPORT10_BASE_EXTID_MODDEF3     (0x03)
+#define MPI_FCPORT10_BASE_EXTID_SEEPROM     (0x04)
+#define MPI_FCPORT10_BASE_EXTID_MODDEF5     (0x05)
+#define MPI_FCPORT10_BASE_EXTID_MODDEF6     (0x06)
+#define MPI_FCPORT10_BASE_EXTID_MODDEF7     (0x07)
+#define MPI_FCPORT10_BASE_EXTID_VNDSPC_MASK (0x80)
+
+#define MPI_FCPORT10_BASE_CONN_UNKNOWN      (0x00)
+#define MPI_FCPORT10_BASE_CONN_SC           (0x01)
+#define MPI_FCPORT10_BASE_CONN_COPPER1      (0x02)
+#define MPI_FCPORT10_BASE_CONN_COPPER2      (0x03)
+#define MPI_FCPORT10_BASE_CONN_BNC_TNC      (0x04)
+#define MPI_FCPORT10_BASE_CONN_COAXIAL      (0x05)
+#define MPI_FCPORT10_BASE_CONN_FIBERJACK    (0x06)
+#define MPI_FCPORT10_BASE_CONN_LC           (0x07)
+#define MPI_FCPORT10_BASE_CONN_MT_RJ        (0x08)
+#define MPI_FCPORT10_BASE_CONN_MU           (0x09)
+#define MPI_FCPORT10_BASE_CONN_SG           (0x0A)
+#define MPI_FCPORT10_BASE_CONN_OPT_PIGT     (0x0B)
+#define MPI_FCPORT10_BASE_CONN_RSV1_MIN     (0x0C)
+#define MPI_FCPORT10_BASE_CONN_RSV1_MAX     (0x1F)
+#define MPI_FCPORT10_BASE_CONN_HSSDC_II     (0x20)
+#define MPI_FCPORT10_BASE_CONN_CPR_PIGT     (0x21)
+#define MPI_FCPORT10_BASE_CONN_RSV2_MIN     (0x22)
+#define MPI_FCPORT10_BASE_CONN_RSV2_MAX     (0x7F)
+#define MPI_FCPORT10_BASE_CONN_VNDSPC_MASK  (0x80)
+
+#define MPI_FCPORT10_BASE_ENCODE_UNSPEC     (0x00)
+#define MPI_FCPORT10_BASE_ENCODE_8B10B      (0x01)
+#define MPI_FCPORT10_BASE_ENCODE_4B5B       (0x02)
+#define MPI_FCPORT10_BASE_ENCODE_NRZ        (0x03)
+#define MPI_FCPORT10_BASE_ENCODE_MANCHESTER (0x04)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_10_EXTENDED_SFP_DATA
+{
+    U8                      Options[2];                 /* 50h */
+    U8                      BitRateMax;                 /* 52h */
+    U8                      BitRateMin;                 /* 53h */
+    U8                      VendorSN[16];               /* 54h */
+    U8                      DateCode[8];                /* 64h */
+    U8                      Reserved5[3];               /* 6Ch */
+    U8                      CC_EXT;                     /* 6Fh */
+} fCONFIG_PAGE_FC_PORT_10_EXTENDED_SFP_DATA,
+  MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_10_EXTENDED_SFP_DATA,
+  FCPortPage10ExtendedSfpData_t, MPI_POINTER pFCPortPage10ExtendedSfpData_t;
+
+#define MPI_FCPORT10_EXT_OPTION1_RATESEL    (0x20)
+#define MPI_FCPORT10_EXT_OPTION1_TX_DISABLE (0x10)
+#define MPI_FCPORT10_EXT_OPTION1_TX_FAULT   (0x08)
+#define MPI_FCPORT10_EXT_OPTION1_LOS_INVERT (0x04)
+#define MPI_FCPORT10_EXT_OPTION1_LOS        (0x02)
+
+
+typedef struct _CONFIG_PAGE_FC_PORT_10
+{
+    fCONFIG_PAGE_HEADER                          Header;             /* 00h */
+    U8                                          Flags;              /* 04h */
+    U8                                          Reserved1;          /* 05h */
+    U16                                         Reserved2;          /* 06h */
+    U32                                         HwConfig1;          /* 08h */
+    U32                                         HwConfig2;          /* 0Ch */
+    fCONFIG_PAGE_FC_PORT_10_BASE_SFP_DATA        Base;               /* 10h */
+    fCONFIG_PAGE_FC_PORT_10_EXTENDED_SFP_DATA    Extended;           /* 50h */
+    U8                                          VendorSpecific[32]; /* 70h */
+} fCONFIG_PAGE_FC_PORT_10, MPI_POINTER PTR_CONFIG_PAGE_FC_PORT_10,
+  FCPortPage10_t, MPI_POINTER pFCPortPage10_t;
+
+#define MPI_FCPORTPAGE10_PAGEVERSION                    (0x00)
+
+/* standard MODDEF pin definitions (from GBIC spec.) */
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_MASK              (0x00000007)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF2                  (0x00000001)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF1                  (0x00000002)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF0                  (0x00000004)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_NOGBIC            (0x00000007)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_CPR_IEEE_CX       (0x00000006)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_COPPER            (0x00000005)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_OPTICAL_LW        (0x00000004)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_SEEPROM           (0x00000003)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_SW_OPTICAL        (0x00000002)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_LX_IEEE_OPT_LW    (0x00000001)
+#define MPI_FCPORTPAGE10_FLAGS_MODDEF_SX_IEEE_OPT_SW    (0x00000000)
+
+#define MPI_FCPORTPAGE10_FLAGS_CC_BASE_OK               (0x00000010)
+#define MPI_FCPORTPAGE10_FLAGS_CC_EXT_OK                (0x00000020)
+
+
+/****************************************************************************
+*   FC Device Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_FC_DEVICE_0
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    U64                     WWNN;                       /* 04h */
+    U64                     WWPN;                       /* 0Ch */
+    U32                     PortIdentifier;             /* 14h */
+    U8                      Protocol;                   /* 18h */
+    U8                      Flags;                      /* 19h */
+    U16                     BBCredit;                   /* 1Ah */
+    U16                     MaxRxFrameSize;             /* 1Ch */
+    U8                      ADISCHardALPA;              /* 1Eh */
+    U8                      PortNumber;                 /* 1Fh */
+    U8                      FcPhLowestVersion;          /* 20h */
+    U8                      FcPhHighestVersion;         /* 21h */
+    U8                      CurrentTargetID;            /* 22h */
+    U8                      CurrentBus;                 /* 23h */
+} fCONFIG_PAGE_FC_DEVICE_0, MPI_POINTER PTR_CONFIG_PAGE_FC_DEVICE_0,
+  FCDevicePage0_t, MPI_POINTER pFCDevicePage0_t;
+
+#define MPI_FC_DEVICE_PAGE0_PAGEVERSION                 (0x03)
+
+#define MPI_FC_DEVICE_PAGE0_FLAGS_TARGETID_BUS_VALID    (0x01)
+#define MPI_FC_DEVICE_PAGE0_FLAGS_PLOGI_INVALID         (0x02)
+#define MPI_FC_DEVICE_PAGE0_FLAGS_PRLI_INVALID          (0x04)
+
+#define MPI_FC_DEVICE_PAGE0_PROT_IP                     (0x01)
+#define MPI_FC_DEVICE_PAGE0_PROT_FCP_TARGET             (0x02)
+#define MPI_FC_DEVICE_PAGE0_PROT_FCP_INITIATOR          (0x04)
+#define MPI_FC_DEVICE_PAGE0_PROT_FCP_RETRY              (0x08)
+
+#define MPI_FC_DEVICE_PAGE0_PGAD_PORT_MASK      (MPI_FC_DEVICE_PGAD_PORT_MASK)
+#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_MASK      (MPI_FC_DEVICE_PGAD_FORM_MASK)
+#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_NEXT_DID  (MPI_FC_DEVICE_PGAD_FORM_NEXT_DID)
+#define MPI_FC_DEVICE_PAGE0_PGAD_FORM_BUS_TID   (MPI_FC_DEVICE_PGAD_FORM_BUS_TID)
+#define MPI_FC_DEVICE_PAGE0_PGAD_DID_MASK       (MPI_FC_DEVICE_PGAD_ND_DID_MASK)
+#define MPI_FC_DEVICE_PAGE0_PGAD_BUS_MASK       (MPI_FC_DEVICE_PGAD_BT_BUS_MASK)
+#define MPI_FC_DEVICE_PAGE0_PGAD_BUS_SHIFT      (MPI_FC_DEVICE_PGAD_BT_BUS_SHIFT)
+#define MPI_FC_DEVICE_PAGE0_PGAD_TID_MASK       (MPI_FC_DEVICE_PGAD_BT_TID_MASK)
+
+#define MPI_FC_DEVICE_PAGE0_HARD_ALPA_UNKNOWN   (0xFF)
+
+/****************************************************************************
+*   RAID Volume Config Pages
+****************************************************************************/
+
+typedef struct _RAID_VOL0_PHYS_DISK
+{
+    U16                         Reserved;               /* 00h */
+    U8                          PhysDiskMap;            /* 02h */
+    U8                          PhysDiskNum;            /* 03h */
+} RAID_VOL0_PHYS_DISK, MPI_POINTER PTR_RAID_VOL0_PHYS_DISK,
+  RaidVol0PhysDisk_t, MPI_POINTER pRaidVol0PhysDisk_t;
+
+#define MPI_RAIDVOL0_PHYSDISK_PRIMARY                   (0x01)
+#define MPI_RAIDVOL0_PHYSDISK_SECONDARY                 (0x02)
+
+typedef struct _RAID_VOL0_STATUS
+{
+    U8                          Flags;                  /* 00h */
+    U8                          State;                  /* 01h */
+    U16                         Reserved;               /* 02h */
+} RAID_VOL0_STATUS, MPI_POINTER PTR_RAID_VOL0_STATUS,
+  RaidVol0Status_t, MPI_POINTER pRaidVol0Status_t;
+
+/* RAID Volume Page 0 VolumeStatus defines */
+
+#define MPI_RAIDVOL0_STATUS_FLAG_ENABLED                (0x01)
+#define MPI_RAIDVOL0_STATUS_FLAG_QUIESCED               (0x02)
+#define MPI_RAIDVOL0_STATUS_FLAG_RESYNC_IN_PROGRESS     (0x04)
+#define MPI_RAIDVOL0_STATUS_FLAG_VOLUME_INACTIVE        (0x08)
+
+#define MPI_RAIDVOL0_STATUS_STATE_OPTIMAL               (0x00)
+#define MPI_RAIDVOL0_STATUS_STATE_DEGRADED              (0x01)
+#define MPI_RAIDVOL0_STATUS_STATE_FAILED                (0x02)
+
+typedef struct _RAID_VOL0_SETTINGS
+{
+    U16                         Settings;       /* 00h */
+    U8                          HotSparePool;   /* 01h */ /* MPI_RAID_HOT_SPARE_POOL_ */
+    U8                          Reserved;       /* 02h */
+} RAID_VOL0_SETTINGS, MPI_POINTER PTR_RAID_VOL0_SETTINGS,
+  RaidVol0Settings, MPI_POINTER pRaidVol0Settings;
+
+/* RAID Volume Page 0 VolumeSettings defines */
+
+#define MPI_RAIDVOL0_SETTING_WRITE_CACHING_ENABLE       (0x0001)
+#define MPI_RAIDVOL0_SETTING_OFFLINE_ON_SMART           (0x0002)
+#define MPI_RAIDVOL0_SETTING_AUTO_CONFIGURE             (0x0004)
+#define MPI_RAIDVOL0_SETTING_PRIORITY_RESYNC            (0x0008)
+#define MPI_RAIDVOL0_SETTING_USE_PRODUCT_ID_SUFFIX      (0x0010)
+#define MPI_RAIDVOL0_SETTING_USE_DEFAULTS               (0x8000)
+
+/* RAID Volume Page 0 HotSparePool defines, also used in RAID Physical Disk */
+#define MPI_RAID_HOT_SPARE_POOL_0                       (0x01)
+#define MPI_RAID_HOT_SPARE_POOL_1                       (0x02)
+#define MPI_RAID_HOT_SPARE_POOL_2                       (0x04)
+#define MPI_RAID_HOT_SPARE_POOL_3                       (0x08)
+#define MPI_RAID_HOT_SPARE_POOL_4                       (0x10)
+#define MPI_RAID_HOT_SPARE_POOL_5                       (0x20)
+#define MPI_RAID_HOT_SPARE_POOL_6                       (0x40)
+#define MPI_RAID_HOT_SPARE_POOL_7                       (0x80)
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX
+#define MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX        (1)
+#endif
+
+typedef struct _CONFIG_PAGE_RAID_VOL_0
+{
+    fCONFIG_PAGE_HEADER      Header;         /* 00h */
+    U8                      VolumeID;       /* 04h */
+    U8                      VolumeBus;      /* 05h */
+    U8                      VolumeIOC;      /* 06h */
+    U8                      VolumeType;     /* 07h */ /* MPI_RAID_VOL_TYPE_ */
+    RAID_VOL0_STATUS        VolumeStatus;   /* 08h */
+    RAID_VOL0_SETTINGS      VolumeSettings; /* 0Ch */
+    U32                     MaxLBA;         /* 10h */
+    U32                     Reserved1;      /* 14h */
+    U32                     StripeSize;     /* 18h */
+    U32                     Reserved2;      /* 1Ch */
+    U32                     Reserved3;      /* 20h */
+    U8                      NumPhysDisks;   /* 24h */
+    U8                      Reserved4;      /* 25h */
+    U16                     Reserved5;      /* 26h */
+    RAID_VOL0_PHYS_DISK     PhysDisk[MPI_RAID_VOL_PAGE_0_PHYSDISK_MAX];/* 28h */
+} fCONFIG_PAGE_RAID_VOL_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_VOL_0,
+  RaidVolumePage0_t, MPI_POINTER pRaidVolumePage0_t;
+
+#define MPI_RAIDVOLPAGE0_PAGEVERSION                    (0x01)
+
+
+/****************************************************************************
+*   RAID Physical Disk Config Pages
+****************************************************************************/
+
+typedef struct _RAID_PHYS_DISK0_ERROR_DATA
+{
+    U8                      ErrorCdbByte;               /* 00h */
+    U8                      ErrorSenseKey;              /* 01h */
+    U16                     Reserved;                   /* 02h */
+    U16                     ErrorCount;                 /* 04h */
+    U8                      ErrorASC;                   /* 06h */
+    U8                      ErrorASCQ;                  /* 07h */
+    U16                     SmartCount;                 /* 08h */
+    U8                      SmartASC;                   /* 0Ah */
+    U8                      SmartASCQ;                  /* 0Bh */
+} RAID_PHYS_DISK0_ERROR_DATA, MPI_POINTER PTR_RAID_PHYS_DISK0_ERROR_DATA,
+  RaidPhysDisk0ErrorData_t, MPI_POINTER pRaidPhysDisk0ErrorData_t;
+
+typedef struct _RAID_PHYS_DISK_INQUIRY_DATA
+{
+    U8                          VendorID[8];            /* 00h */
+    U8                          ProductID[16];          /* 08h */
+    U8                          ProductRevLevel[4];     /* 18h */
+    U8                          Info[32];               /* 1Ch */
+} RAID_PHYS_DISK0_INQUIRY_DATA, MPI_POINTER PTR_RAID_PHYS_DISK0_INQUIRY_DATA,
+  RaidPhysDisk0InquiryData, MPI_POINTER pRaidPhysDisk0InquiryData;
+
+typedef struct _RAID_PHYS_DISK0_SETTINGS
+{
+    U8              SepID;              /* 00h */
+    U8              SepBus;             /* 01h */
+    U8              HotSparePool;       /* 02h */ /* MPI_RAID_HOT_SPARE_POOL_ */
+    U8              PhysDiskSettings;   /* 03h */
+} RAID_PHYS_DISK0_SETTINGS, MPI_POINTER PTR_RAID_PHYS_DISK0_SETTINGS,
+  RaidPhysDiskSettings_t, MPI_POINTER pRaidPhysDiskSettings_t;
+
+typedef struct _RAID_PHYS_DISK0_STATUS
+{
+    U8                              Flags;              /* 00h */
+    U8                              State;              /* 01h */
+    U16                             Reserved;           /* 02h */
+} RAID_PHYS_DISK0_STATUS, MPI_POINTER PTR_RAID_PHYS_DISK0_STATUS,
+  RaidPhysDiskStatus_t, MPI_POINTER pRaidPhysDiskStatus_t;
+
+/* RAID Volume 2 IM Physical Disk DiskStatus flags */
+
+#define MPI_PHYSDISK0_STATUS_FLAG_OUT_OF_SYNC           (0x01)
+#define MPI_PHYSDISK0_STATUS_FLAG_QUIESCED              (0x02)
+
+#define MPI_PHYSDISK0_STATUS_ONLINE                     (0x00)
+#define MPI_PHYSDISK0_STATUS_MISSING                    (0x01)
+#define MPI_PHYSDISK0_STATUS_NOT_COMPATIBLE             (0x02)
+#define MPI_PHYSDISK0_STATUS_FAILED                     (0x03)
+#define MPI_PHYSDISK0_STATUS_INITIALIZING               (0x04)
+#define MPI_PHYSDISK0_STATUS_OFFLINE_REQUESTED          (0x05)
+#define MPI_PHYSDISK0_STATUS_FAILED_REQUESTED           (0x06)
+#define MPI_PHYSDISK0_STATUS_OTHER_OFFLINE              (0xFF)
+
+typedef struct _CONFIG_PAGE_RAID_PHYS_DISK_0
+{
+    fCONFIG_PAGE_HEADER              Header;             /* 00h */
+    U8                              PhysDiskID;         /* 04h */
+    U8                              PhysDiskBus;        /* 05h */
+    U8                              PhysDiskIOC;        /* 06h */
+    U8                              PhysDiskNum;        /* 07h */
+    RAID_PHYS_DISK0_SETTINGS        PhysDiskSettings;   /* 08h */
+    U32                             Reserved1;          /* 0Ch */
+    U32                             Reserved2;          /* 10h */
+    U32                             Reserved3;          /* 14h */
+    U8                              DiskIdentifier[16]; /* 18h */
+    RAID_PHYS_DISK0_INQUIRY_DATA    InquiryData;        /* 28h */
+    RAID_PHYS_DISK0_STATUS          PhysDiskStatus;     /* 64h */
+    U32                             MaxLBA;             /* 68h */
+    RAID_PHYS_DISK0_ERROR_DATA      ErrorData;          /* 6Ch */
+} fCONFIG_PAGE_RAID_PHYS_DISK_0, MPI_POINTER PTR_CONFIG_PAGE_RAID_PHYS_DISK_0,
+  RaidPhysDiskPage0_t, MPI_POINTER pRaidPhysDiskPage0_t;
+
+#define MPI_RAIDPHYSDISKPAGE0_PAGEVERSION           (0x00)
+
+
+/****************************************************************************
+*   LAN Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_LAN_0
+{
+    ConfigPageHeader_t      Header;                     /* 00h */
+    U16                     TxRxModes;                  /* 04h */
+    U16                     Reserved;                   /* 06h */
+    U32                     PacketPrePad;               /* 08h */
+} fCONFIG_PAGE_LAN_0, MPI_POINTER PTR_CONFIG_PAGE_LAN_0,
+  LANPage0_t, MPI_POINTER pLANPage0_t;
+
+#define MPI_LAN_PAGE0_PAGEVERSION                       (0x01)
+
+#define MPI_LAN_PAGE0_RETURN_LOOPBACK                   (0x0000)
+#define MPI_LAN_PAGE0_SUPPRESS_LOOPBACK                 (0x0001)
+#define MPI_LAN_PAGE0_LOOPBACK_MASK                     (0x0001)
+
+typedef struct _CONFIG_PAGE_LAN_1
+{
+    ConfigPageHeader_t      Header;                     /* 00h */
+    U16                     Reserved;                   /* 04h */
+    U8                      CurrentDeviceState;         /* 06h */
+    U8                      Reserved1;                  /* 07h */
+    U32                     MinPacketSize;              /* 08h */
+    U32                     MaxPacketSize;              /* 0Ch */
+    U32                     HardwareAddressLow;         /* 10h */
+    U32                     HardwareAddressHigh;        /* 14h */
+    U32                     MaxWireSpeedLow;            /* 18h */
+    U32                     MaxWireSpeedHigh;           /* 1Ch */
+    U32                     BucketsRemaining;           /* 20h */
+    U32                     MaxReplySize;               /* 24h */
+    U32                     NegWireSpeedLow;            /* 28h */
+    U32                     NegWireSpeedHigh;           /* 2Ch */
+} fCONFIG_PAGE_LAN_1, MPI_POINTER PTR_CONFIG_PAGE_LAN_1,
+  LANPage1_t, MPI_POINTER pLANPage1_t;
+
+#define MPI_LAN_PAGE1_PAGEVERSION                       (0x03)
+
+#define MPI_LAN_PAGE1_DEV_STATE_RESET                   (0x00)
+#define MPI_LAN_PAGE1_DEV_STATE_OPERATIONAL             (0x01)
+
+
+/****************************************************************************
+*   Inband Config Pages
+****************************************************************************/
+
+typedef struct _CONFIG_PAGE_INBAND_0
+{
+    fCONFIG_PAGE_HEADER      Header;                     /* 00h */
+    MPI_VERSION_FORMAT      InbandVersion;              /* 04h */
+    U16                     MaximumBuffers;             /* 08h */
+    U16                     Reserved1;                  /* 0Ah */
+} fCONFIG_PAGE_INBAND_0, MPI_POINTER PTR_CONFIG_PAGE_INBAND_0,
+  InbandPage0_t, MPI_POINTER pInbandPage0_t;
+
+#define MPI_INBAND_PAGEVERSION          (0x00)
+
+
+
+/****************************************************************************
+*   SAS IO Unit Config Pages
+****************************************************************************/
+
+typedef struct _MPI_SAS_IO_UNIT0_PHY_DATA
+{
+    U8          Port;                   /* 00h */
+    U8          PortFlags;              /* 01h */
+    U8          PhyFlags;               /* 02h */
+    U8          NegotiatedLinkRate;     /* 03h */
+    U32         ControllerPhyDeviceInfo;/* 04h */
+    U16         AttachedDeviceHandle;   /* 08h */
+    U16         ControllerDevHandle;    /* 0Ah */
+    U32         Reserved2;              /* 0Ch */
+} MPI_SAS_IO_UNIT0_PHY_DATA, MPI_POINTER PTR_MPI_SAS_IO_UNIT0_PHY_DATA,
+  SasIOUnit0PhyData, MPI_POINTER pSasIOUnit0PhyData;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_SAS_IOUNIT0_PHY_MAX
+#define MPI_SAS_IOUNIT0_PHY_MAX         (1)
+#endif
+
+typedef struct _CONFIG_PAGE_SAS_IO_UNIT_0
+{
+    fCONFIG_EXTENDED_PAGE_HEADER     Header;                             /* 00h */
+    U32                             Reserved1;                          /* 08h */
+    U8                              NumPhys;                            /* 0Ch */
+    U8                              Reserved2;                          /* 0Dh */
+    U16                             Reserved3;                          /* 0Eh */
+    MPI_SAS_IO_UNIT0_PHY_DATA       PhyData[MPI_SAS_IOUNIT0_PHY_MAX];   /* 10h */
+} fCONFIG_PAGE_SAS_IO_UNIT_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_0,
+  SasIOUnitPage0_t, MPI_POINTER pSasIOUnitPage0_t;
+
+#define MPI_SASIOUNITPAGE0_PAGEVERSION      (0x00)
+
+/* values for SAS IO Unit Page 0 PortFlags */
+#define MPI_SAS_IOUNIT0_PORT_FLAGS_DISCOVERY_IN_PROGRESS    (0x08)
+#define MPI_SAS_IOUNIT0_PORT_FLAGS_0_TARGET_IOC_NUM         (0x00)
+#define MPI_SAS_IOUNIT0_PORT_FLAGS_1_TARGET_IOC_NUM         (0x04)
+#define MPI_SAS_IOUNIT0_PORT_FLAGS_WAIT_FOR_PORTENABLE      (0x02)
+#define MPI_SAS_IOUNIT0_PORT_FLAGS_AUTO_PORT_CONFIG         (0x01)
+
+/* values for SAS IO Unit Page 0 PhyFlags */
+#define MPI_SAS_IOUNIT0_PHY_FLAGS_PHY_DISABLED              (0x04)
+#define MPI_SAS_IOUNIT0_PHY_FLAGS_TX_INVERT                 (0x02)
+#define MPI_SAS_IOUNIT0_PHY_FLAGS_RX_INVERT                 (0x01)
+
+/* values for SAS IO Unit Page 0 NegotiatedLinkRate */
+#define MPI_SAS_IOUNIT0_RATE_UNKNOWN                        (0x00)
+#define MPI_SAS_IOUNIT0_RATE_PHY_DISABLED                   (0x01)
+#define MPI_SAS_IOUNIT0_RATE_FAILED_SPEED_NEGOTIATION       (0x02)
+#define MPI_SAS_IOUNIT0_RATE_SATA_OOB_COMPLETE              (0x03)
+#define MPI_SAS_IOUNIT0_RATE_1_5                            (0x08)
+#define MPI_SAS_IOUNIT0_RATE_3_0                            (0x09)
+
+/* see mpi_sas.h for values for SAS IO Unit Page 0 ControllerPhyDeviceInfo values */
+
+
+typedef struct _MPI_SAS_IO_UNIT1_PHY_DATA
+{
+    U8          Port;                   /* 00h */
+    U8          PortFlags;              /* 01h */
+    U8          PhyFlags;               /* 02h */
+    U8          MaxMinLinkRate;         /* 03h */
+    U32         ControllerPhyDeviceInfo;/* 04h */
+    U32         Reserved1;              /* 08h */
+} MPI_SAS_IO_UNIT1_PHY_DATA, MPI_POINTER PTR_MPI_SAS_IO_UNIT1_PHY_DATA,
+  SasIOUnit1PhyData, MPI_POINTER pSasIOUnit1PhyData;
+
+/*
+ * Host code (drivers, BIOS, utilities, etc.) should leave this define set to
+ * one and check Header.PageLength at runtime.
+ */
+#ifndef MPI_SAS_IOUNIT1_PHY_MAX
+#define MPI_SAS_IOUNIT1_PHY_MAX         (1)
+#endif
+
+typedef struct _CONFIG_PAGE_SAS_IO_UNIT_1
+{
+    fCONFIG_EXTENDED_PAGE_HEADER Header;                             /* 00h */
+    U32                         Reserved1;                          /* 08h */
+    U8                          NumPhys;                            /* 0Ch */
+    U8                          Reserved2;                          /* 0Dh */
+    U16                         Reserved3;                          /* 0Eh */
+    MPI_SAS_IO_UNIT1_PHY_DATA   PhyData[MPI_SAS_IOUNIT1_PHY_MAX];   /* 10h */
+} fCONFIG_PAGE_SAS_IO_UNIT_1, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_1,
+  SasIOUnitPage1_t, MPI_POINTER pSasIOUnitPage1_t;
+
+#define MPI_SASIOUNITPAGE1_PAGEVERSION      (0x00)
+
+/* values for SAS IO Unit Page 0 PortFlags */
+#define MPI_SAS_IOUNIT1_PORT_FLAGS_0_TARGET_IOC_NUM         (0x00)
+#define MPI_SAS_IOUNIT1_PORT_FLAGS_1_TARGET_IOC_NUM         (0x04)
+#define MPI_SAS_IOUNIT1_PORT_FLAGS_WAIT_FOR_PORTENABLE      (0x02)
+#define MPI_SAS_IOUNIT1_PORT_FLAGS_AUTO_PORT_CONFIG         (0x01)
+
+/* values for SAS IO Unit Page 0 PhyFlags */
+#define MPI_SAS_IOUNIT1_PHY_FLAGS_PHY_DISABLE               (0x04)
+#define MPI_SAS_IOUNIT1_PHY_FLAGS_TX_INVERT                 (0x02)
+#define MPI_SAS_IOUNIT1_PHY_FLAGS_RX_INVERT                 (0x01)
+
+/* values for SAS IO Unit Page 0 MaxMinLinkRate */
+#define MPI_SAS_IOUNIT1_MAX_RATE_MASK                       (0xF0)
+#define MPI_SAS_IOUNIT1_MAX_RATE_1_5                        (0x80)
+#define MPI_SAS_IOUNIT1_MAX_RATE_3_0                        (0x90)
+#define MPI_SAS_IOUNIT1_MIN_RATE_MASK                       (0x0F)
+#define MPI_SAS_IOUNIT1_MIN_RATE_1_5                        (0x08)
+#define MPI_SAS_IOUNIT1_MIN_RATE_3_0                        (0x09)
+
+/* see mpi_sas.h for values for SAS IO Unit Page 1 ControllerPhyDeviceInfo values */
+
+
+typedef struct _CONFIG_PAGE_SAS_IO_UNIT_2
+{
+    fCONFIG_EXTENDED_PAGE_HEADER         Header;                 /* 00h */
+    U32                                 Reserved1;              /* 08h */
+    U16                                 MaxPersistentIDs;       /* 0Ch */
+    U16                                 NumPersistentIDsUsed;   /* 0Eh */
+    U8                                  Status;                 /* 10h */
+    U8                                  Flags;                  /* 11h */
+    U16                                 Reserved2;              /* 12h */
+} fCONFIG_PAGE_SAS_IO_UNIT_2, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_2,
+  SasIOUnitPage2_t, MPI_POINTER pSasIOUnitPage2_t;
+
+#define MPI_SASIOUNITPAGE2_PAGEVERSION      (0x00)
+
+/* values for SAS IO Unit Page 2 Status field */
+#define MPI_SAS_IOUNIT2_STATUS_DISABLED_PERSISTENT_MAPPINGS (0x02)
+#define MPI_SAS_IOUNIT2_STATUS_FULL_PERSISTENT_MAPPINGS     (0x01)
+
+/* values for SAS IO Unit Page 2 Flags field */
+#define MPI_SAS_IOUNIT2_FLAGS_DISABLE_PERSISTENT_MAPPINGS   (0x01)
+
+
+typedef struct _CONFIG_PAGE_SAS_IO_UNIT_3
+{
+    fCONFIG_EXTENDED_PAGE_HEADER Header;                         /* 00h */
+    U32                         Reserved1;                      /* 08h */
+    U32                         MaxInvalidDwordCount;           /* 0Ch */
+    U32                         InvalidDwordCountTime;          /* 10h */
+    U32                         MaxRunningDisparityErrorCount;  /* 14h */
+    U32                         RunningDisparityErrorTime;      /* 18h */
+    U32                         MaxLossDwordSynchCount;         /* 1Ch */
+    U32                         LossDwordSynchCountTime;        /* 20h */
+    U32                         MaxPhyResetProblemCount;        /* 24h */
+    U32                         PhyResetProblemTime;            /* 28h */
+} fCONFIG_PAGE_SAS_IO_UNIT_3, MPI_POINTER PTR_CONFIG_PAGE_SAS_IO_UNIT_3,
+  SasIOUnitPage3_t, MPI_POINTER pSasIOUnitPage3_t;
+
+#define MPI_SASIOUNITPAGE3_PAGEVERSION      (0x00)
+
+
+typedef struct _CONFIG_PAGE_SAS_EXPANDER_0
+{
+    fCONFIG_EXTENDED_PAGE_HEADER         Header;                 /* 00h */
+    U32                                 Reserved1;              /* 08h */
+    U64                                 SASAddress;             /* 0Ch */
+    U32                                 Reserved2;              /* 14h */
+    U16                                 DevHandle;              /* 18h */
+    U16                                 ParentDevHandle;        /* 1Ah */
+    U16                                 ExpanderChangeCount;    /* 1Ch */
+    U16                                 ExpanderRouteIndexes;   /* 1Eh */
+    U8                                  NumPhys;                /* 20h */
+    U8                                  SASLevel;               /* 21h */
+    U8                                  Flags;                  /* 22h */
+    U8                                  Reserved3;              /* 23h */
+} fCONFIG_PAGE_SAS_EXPANDER_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_EXPANDER_0,
+  SasExpanderPage0_t, MPI_POINTER pSasExpanderPage0_t;
+
+#define MPI_SASEXPANDER0_PAGEVERSION        (0x00)
+
+/* values for SAS Expander Page 0 Flags field */
+#define MPI_SAS_EXPANDER0_FLAGS_ROUTE_TABLE_CONFIG      (0x02)
+#define MPI_SAS_EXPANDER0_FLAGS_CONFIG_IN_PROGRESS      (0x01)
+
+
+typedef struct _CONFIG_PAGE_SAS_DEVICE_0
+{
+    fCONFIG_EXTENDED_PAGE_HEADER         Header;                 /* 00h */
+    U32                                 Reserved1;              /* 08h */
+    U64                                 SASAddress;             /* 0Ch */
+    U32                                 Reserved2;              /* 14h */
+    U16                                 DevHandle;              /* 18h */
+    U8                                  TargetID;               /* 1Ah */
+    U8                                  Bus;                    /* 1Bh */
+    U32                                 DeviceInfo;             /* 1Ch */
+    U16                                 Flags;                  /* 20h */
+    U8                                  PhysicalPort;           /* 22h */
+    U8                                  Reserved3;              /* 23h */
+} fCONFIG_PAGE_SAS_DEVICE_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_DEVICE_0,
+  SasDevicePage0_t, MPI_POINTER pSasDevicePage0_t;
+
+#define MPI_SASDEVICE0_PAGEVERSION          (0x00)
+
+/* values for SAS Device Page 0 Flags field */
+#define MPI_SAS_DEVICE0_FLAGS_MAPPING_PERSISTENT    (0x04)
+#define MPI_SAS_DEVICE0_FLAGS_DEVICE_MAPPED         (0x02)
+#define MPI_SAS_DEVICE0_FLAGS_DEVICE_PRESENT        (0x01)
+
+/* see mpi_sas.h for values for SAS Device Page 0 DeviceInfo values */
+
+
+typedef struct _CONFIG_PAGE_SAS_DEVICE_1
+{
+    fCONFIG_EXTENDED_PAGE_HEADER         Header;                 /* 00h */
+    U32                                 Reserved1;              /* 08h */
+    U64                                 SASAddress;             /* 0Ch */
+    U32                                 Reserved2;              /* 14h */
+    U16                                 DevHandle;              /* 18h */
+    U8                                  TargetID;               /* 1Ah */
+    U8                                  Bus;                    /* 1Bh */
+    U8                                  InitialRegDeviceFIS[20];/* 1Ch */
+} fCONFIG_PAGE_SAS_DEVICE_1, MPI_POINTER PTR_CONFIG_PAGE_SAS_DEVICE_1,
+  SasDevicePage1_t, MPI_POINTER pSasDevicePage1_t;
+
+#define MPI_SASDEVICE1_PAGEVERSION          (0x00)
+
+
+typedef struct _CONFIG_PAGE_SAS_PHY_0
+{
+    fCONFIG_EXTENDED_PAGE_HEADER         Header;                 /* 00h */
+    U32                                 Reserved1;              /* 08h */
+    U64                                 SASAddress;             /* 0Ch */
+    U16                                 AttachedDevHandle;      /* 14h */
+    U8                                  AttachedPhyIdentifier;  /* 16h */
+    U8                                  Reserved2;              /* 17h */
+    U32                                 AttachedDeviceInfo;     /* 18h */
+    U8                                  ProgrammedLinkRate;     /* 20h */
+    U8                                  HwLinkRate;             /* 21h */
+    U8                                  ChangeCount;            /* 22h */
+    U8                                  Reserved3;              /* 23h */
+    U32                                 PhyInfo;                /* 24h */
+} fCONFIG_PAGE_SAS_PHY_0, MPI_POINTER PTR_CONFIG_PAGE_SAS_PHY_0,
+  SasPhyPage0_t, MPI_POINTER pSasPhyPage0_t;
+
+#define MPI_SASPHY0_PAGEVERSION             (0x00)
+
+/* values for SAS PHY Page 0 ProgrammedLinkRate field */
+#define MPI_SAS_PHY0_PRATE_MAX_RATE_MASK                        (0xF0)
+#define MPI_SAS_PHY0_PRATE_MAX_RATE_NOT_PROGRAMMABLE            (0x00)
+#define MPI_SAS_PHY0_PRATE_MAX_RATE_1_5                         (0x80)
+#define MPI_SAS_PHY0_PRATE_MAX_RATE_3_0                         (0x90)
+#define MPI_SAS_PHY0_PRATE_MIN_RATE_MASK                        (0x0F)
+#define MPI_SAS_PHY0_PRATE_MIN_RATE_NOT_PROGRAMMABLE            (0x00)
+#define MPI_SAS_PHY0_PRATE_MIN_RATE_1_5                         (0x08)
+#define MPI_SAS_PHY0_PRATE_MIN_RATE_3_0                         (0x09)
+
+/* values for SAS PHY Page 0 HwLinkRate field */
+#define MPI_SAS_PHY0_HWRATE_MAX_RATE_MASK                       (0xF0)
+#define MPI_SAS_PHY0_HWRATE_MAX_RATE_1_5                        (0x80)
+#define MPI_SAS_PHY0_HWRATE_MAX_RATE_3_0                        (0x90)
+#define MPI_SAS_PHY0_HWRATE_MIN_RATE_MASK                       (0x0F)
+#define MPI_SAS_PHY0_HWRATE_MIN_RATE_1_5                        (0x08)
+#define MPI_SAS_PHY0_HWRATE_MIN_RATE_3_0                        (0x09)
+
+/* values for SAS PHY Page 0 PhyInfo field */
+#define MPI_SAS_PHY0_PHYINFO_SATA_PORT_ACTIVE                   (0x00004000)
+#define MPI_SAS_PHY0_PHYINFO_SATA_PORT_SELECTOR                 (0x00002000)
+#define MPI_SAS_PHY0_PHYINFO_VIRTUAL_PHY                        (0x00001000)
+
+#define MPI_SAS_PHY0_PHYINFO_MASK_PARTIAL_PATHWAY_TIME          (0x00000F00)
+#define MPI_SAS_PHY0_PHYINFO_SHIFT_PARTIAL_PATHWAY_TIME         (8)
+
+#define MPI_SAS_PHY0_PHYINFO_MASK_ROUTING_ATTRIBUTE             (0x000000F0)
+#define MPI_SAS_PHY0_PHYINFO_DIRECT_ROUTING                     (0x00000000)
+#define MPI_SAS_PHY0_PHYINFO_SUBTRACTIVE_ROUTING                (0x00000010)
+#define MPI_SAS_PHY0_PHYINFO_TABLE_ROUTING                      (0x00000020)
+
+#define MPI_SAS_PHY0_PHYINFO_MASK_LINK_RATE                     (0x0000000F)
+#define MPI_SAS_PHY0_PHYINFO_UNKNOWN_LINK_RATE                  (0x00000000)
+#define MPI_SAS_PHY0_PHYINFO_PHY_DISABLED                       (0x00000001)
+#define MPI_SAS_PHY0_PHYINFO_NEGOTIATION_FAILED                 (0x00000002)
+#define MPI_SAS_PHY0_PHYINFO_SATA_OOB_COMPLETE                  (0x00000003)
+#define MPI_SAS_PHY0_PHYINFO_RATE_1_5                           (0x00000008)
+#define MPI_SAS_PHY0_PHYINFO_RATE_3_0                           (0x00000009)
+
+
+typedef struct _CONFIG_PAGE_SAS_PHY_1
+{
+    fCONFIG_EXTENDED_PAGE_HEADER Header;                     /* 00h */
+    U32                         Reserved1;                  /* 08h */
+    U32                         InvalidDwordCount;          /* 0Ch */
+    U32                         RunningDisparityErrorCount; /* 10h */
+    U32                         LossDwordSynchCount;        /* 14h */
+    U32                         PhyResetProblemCount;       /* 18h */
+} fCONFIG_PAGE_SAS_PHY_1, MPI_POINTER PTR_CONFIG_PAGE_SAS_PHY_1,
+  SasPhyPage1_t, MPI_POINTER pSasPhyPage1_t;
+
+#define MPI_SASPHY1_PAGEVERSION             (0x00)
+
+
+#endif
+
diff --git a/drivers/message/fusion/lsi/mpi_fc.h b/drivers/message/fusion/lsi/mpi_fc.h
new file mode 100644
index 0000000..ea266b2
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_fc.h
@@ -0,0 +1,363 @@
+/*
+ *  Copyright (c) 2000-2003 LSI Logic Corporation.
+ *
+ *
+ *           Name:  mpi_fc.h
+ *          Title:  MPI Fibre Channel messages and structures
+ *  Creation Date:  June 12, 2000
+ *
+ *    mpi_fc.h Version:  01.05.xx
+ *
+ *  Version History
+ *  ---------------
+ *
+ *  Date      Version   Description
+ *  --------  --------  ------------------------------------------------------
+ *  05-08-00  00.10.01  Original release for 0.10 spec dated 4/26/2000.
+ *  06-06-00  01.00.01  Update version number for 1.0 release.
+ *  06-12-00  01.00.02  Added _MSG_FC_ABORT_REPLY structure.
+ *  11-02-00  01.01.01  Original release for post 1.0 work
+ *  12-04-00  01.01.02  Added messages for Common Transport Send and
+ *                      Primitive Send.
+ *  01-09-01  01.01.03  Modifed some of the new flags to have an MPI prefix
+ *                      and modified the FcPrimitiveSend flags.
+ *  01-25-01  01.01.04  Move InitiatorIndex in LinkServiceRsp reply to a larger
+ *                      field.
+ *                      Added FC_ABORT_TYPE_CT_SEND_REQUEST and
+ *                      FC_ABORT_TYPE_EXLINKSEND_REQUEST for FcAbort request.
+ *                      Added MPI_FC_PRIM_SEND_FLAGS_STOP_SEND.
+ *  02-20-01  01.01.05  Started using MPI_POINTER.
+ *  03-27-01  01.01.06  Added Flags field to MSG_LINK_SERVICE_BUFFER_POST_REPLY
+ *                      and defined MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED.
+ *                      Added MPI_FC_PRIM_SEND_FLAGS_RESET_LINK define.
+ *                      Added structure offset comments.
+ *  04-09-01  01.01.07  Added RspLength field to MSG_LINK_SERVICE_RSP_REQUEST.
+ *  08-08-01  01.02.01  Original release for v1.2 work.
+ *  09-28-01  01.02.02  Change name of reserved field in
+ *                      MSG_LINK_SERVICE_RSP_REPLY.
+ *  05-31-02  01.02.03  Adding AliasIndex to FC Direct Access requests.
+ *  --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_FC_H
+#define MPI_FC_H
+
+
+/*****************************************************************************
+*
+*        F C    D i r e c t    A c c e s s     M e s s a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/* Link Service Buffer Post messages                                        */
+/****************************************************************************/
+
+typedef struct _MSG_LINK_SERVICE_BUFFER_POST_REQUEST
+{
+    U8                      BufferPostFlags;    /* 00h */
+    U8                      BufferCount;        /* 01h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved;           /* 04h */
+    U8                      Reserved1;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    SGE_TRANS_SIMPLE_UNION  SGL;
+} MSG_LINK_SERVICE_BUFFER_POST_REQUEST,
+ MPI_POINTER PTR_MSG_LINK_SERVICE_BUFFER_POST_REQUEST,
+  LinkServiceBufferPostRequest_t, MPI_POINTER pLinkServiceBufferPostRequest_t;
+
+#define LINK_SERVICE_BUFFER_POST_FLAGS_PORT_MASK (0x01)
+
+typedef struct _WWNFORMAT
+{
+    U32                     PortNameHigh;       /* 00h */
+    U32                     PortNameLow;        /* 04h */
+    U32                     NodeNameHigh;       /* 08h */
+    U32                     NodeNameLow;        /* 0Ch */
+} WWNFORMAT,
+  WwnFormat_t;
+
+/* Link Service Buffer Post Reply */
+typedef struct _MSG_LINK_SERVICE_BUFFER_POST_REPLY
+{
+    U8                      Flags;              /* 00h */
+    U8                      Reserved;           /* 01h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved1;          /* 04h */
+    U8                      PortNumber;         /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     Reserved2;          /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+    U32                     TransferLength;     /* 14h */
+    U32                     TransactionContext; /* 18h */
+    U32                     Rctl_Did;           /* 1Ch */
+    U32                     Csctl_Sid;          /* 20h */
+    U32                     Type_Fctl;          /* 24h */
+    U16                     SeqCnt;             /* 28h */
+    U8                      Dfctl;              /* 2Ah */
+    U8                      SeqId;              /* 2Bh */
+    U16                     Rxid;               /* 2Ch */
+    U16                     Oxid;               /* 2Eh */
+    U32                     Parameter;          /* 30h */
+    WWNFORMAT               Wwn;                /* 34h */
+} MSG_LINK_SERVICE_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_LINK_SERVICE_BUFFER_POST_REPLY,
+  LinkServiceBufferPostReply_t, MPI_POINTER pLinkServiceBufferPostReply_t;
+
+#define MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED    (0x80)
+
+#define MPI_FC_DID_MASK                             (0x00FFFFFF)
+#define MPI_FC_DID_SHIFT                            (0)
+#define MPI_FC_RCTL_MASK                            (0xFF000000)
+#define MPI_FC_RCTL_SHIFT                           (24)
+#define MPI_FC_SID_MASK                             (0x00FFFFFF)
+#define MPI_FC_SID_SHIFT                            (0)
+#define MPI_FC_CSCTL_MASK                           (0xFF000000)
+#define MPI_FC_CSCTL_SHIFT                          (24)
+#define MPI_FC_FCTL_MASK                            (0x00FFFFFF)
+#define MPI_FC_FCTL_SHIFT                           (0)
+#define MPI_FC_TYPE_MASK                            (0xFF000000)
+#define MPI_FC_TYPE_SHIFT                           (24)
+
+/* obsolete name for the above */
+#define FCP_TARGET_DID_MASK                         (0x00FFFFFF)
+#define FCP_TARGET_DID_SHIFT                        (0)
+#define FCP_TARGET_RCTL_MASK                        (0xFF000000)
+#define FCP_TARGET_RCTL_SHIFT                       (24)
+#define FCP_TARGET_SID_MASK                         (0x00FFFFFF)
+#define FCP_TARGET_SID_SHIFT                        (0)
+#define FCP_TARGET_CSCTL_MASK                       (0xFF000000)
+#define FCP_TARGET_CSCTL_SHIFT                      (24)
+#define FCP_TARGET_FCTL_MASK                        (0x00FFFFFF)
+#define FCP_TARGET_FCTL_SHIFT                       (0)
+#define FCP_TARGET_TYPE_MASK                        (0xFF000000)
+#define FCP_TARGET_TYPE_SHIFT                       (24)
+
+
+/****************************************************************************/
+/* Link Service Response messages                                           */
+/****************************************************************************/
+
+typedef struct _MSG_LINK_SERVICE_RSP_REQUEST
+{
+    U8                      RspFlags;           /* 00h */
+    U8                      RspLength;          /* 01h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved1;          /* 04h */
+    U8                      Reserved2;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U32                     Rctl_Did;           /* 0Ch */
+    U32                     Csctl_Sid;          /* 10h */
+    U32                     Type_Fctl;          /* 14h */
+    U16                     SeqCnt;             /* 18h */
+    U8                      Dfctl;              /* 1Ah */
+    U8                      SeqId;              /* 1Bh */
+    U16                     Rxid;               /* 1Ch */
+    U16                     Oxid;               /* 1Eh */
+    U32                     Parameter;          /* 20h */
+    SGE_SIMPLE_UNION        SGL;                /* 24h */
+} MSG_LINK_SERVICE_RSP_REQUEST, MPI_POINTER PTR_MSG_LINK_SERVICE_RSP_REQUEST,
+  LinkServiceRspRequest_t, MPI_POINTER pLinkServiceRspRequest_t;
+
+#define LINK_SERVICE_RSP_FLAGS_IMMEDIATE        (0x80)
+#define LINK_SERVICE_RSP_FLAGS_PORT_MASK        (0x01)
+
+
+/* Link Service Response Reply  */
+typedef struct _MSG_LINK_SERVICE_RSP_REPLY
+{
+    U16                     Reserved;           /* 00h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved1;          /* 04h */
+    U8                      Reserved_0100_InitiatorIndex; /* 06h */ /* obsolete InitiatorIndex */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     Reserved3;          /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+    U32                     InitiatorIndex;     /* 14h */
+} MSG_LINK_SERVICE_RSP_REPLY, MPI_POINTER PTR_MSG_LINK_SERVICE_RSP_REPLY,
+  LinkServiceRspReply_t, MPI_POINTER pLinkServiceRspReply_t;
+
+
+/****************************************************************************/
+/* Extended Link Service Send messages                                      */
+/****************************************************************************/
+
+typedef struct _MSG_EXLINK_SERVICE_SEND_REQUEST
+{
+    U8                      SendFlags;          /* 00h */
+    U8                      AliasIndex;         /* 01h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U32                     MsgFlags_Did;       /* 04h */
+    U32                     MsgContext;         /* 08h */
+    U32                     ElsCommandCode;     /* 0Ch */
+    SGE_SIMPLE_UNION        SGL;                /* 10h */
+} MSG_EXLINK_SERVICE_SEND_REQUEST, MPI_POINTER PTR_MSG_EXLINK_SERVICE_SEND_REQUEST,
+  ExLinkServiceSendRequest_t, MPI_POINTER pExLinkServiceSendRequest_t;
+
+#define EX_LINK_SERVICE_SEND_DID_MASK           (0x00FFFFFF)
+#define EX_LINK_SERVICE_SEND_DID_SHIFT          (0)
+#define EX_LINK_SERVICE_SEND_MSGFLAGS_MASK      (0xFF000000)
+#define EX_LINK_SERVICE_SEND_MSGFLAGS_SHIFT     (24)
+
+
+/* Extended Link Service Send Reply */
+typedef struct _MSG_EXLINK_SERVICE_SEND_REPLY
+{
+    U8                      Reserved;           /* 00h */
+    U8                      AliasIndex;         /* 01h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved1;          /* 04h */
+    U8                      Reserved2;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     Reserved3;          /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+    U32                     ResponseLength;     /* 14h */
+} MSG_EXLINK_SERVICE_SEND_REPLY, MPI_POINTER PTR_MSG_EXLINK_SERVICE_SEND_REPLY,
+  ExLinkServiceSendReply_t, MPI_POINTER pExLinkServiceSendReply_t;
+
+/****************************************************************************/
+/* FC Abort messages                                                        */
+/****************************************************************************/
+
+typedef struct _MSG_FC_ABORT_REQUEST
+{
+    U8                      AbortFlags;                 /* 00h */
+    U8                      AbortType;                  /* 01h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     Reserved1;                  /* 04h */
+    U8                      Reserved2;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U32                     TransactionContextToAbort;  /* 0Ch */
+} MSG_FC_ABORT_REQUEST, MPI_POINTER PTR_MSG_FC_ABORT_REQUEST,
+  FcAbortRequest_t, MPI_POINTER pFcAbortRequest_t;
+
+#define FC_ABORT_FLAG_PORT_MASK                 (0x01)
+
+#define FC_ABORT_TYPE_ALL_FC_BUFFERS            (0x00)
+#define FC_ABORT_TYPE_EXACT_FC_BUFFER           (0x01)
+#define FC_ABORT_TYPE_CT_SEND_REQUEST           (0x02)
+#define FC_ABORT_TYPE_EXLINKSEND_REQUEST        (0x03)
+
+/* FC Abort Reply */
+typedef struct _MSG_FC_ABORT_REPLY
+{
+    U16                     Reserved;           /* 00h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved1;          /* 04h */
+    U8                      Reserved2;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     Reserved3;          /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+} MSG_FC_ABORT_REPLY, MPI_POINTER PTR_MSG_FC_ABORT_REPLY,
+  FcAbortReply_t, MPI_POINTER pFcAbortReply_t;
+
+
+/****************************************************************************/
+/* FC Common Transport Send messages                                        */
+/****************************************************************************/
+
+typedef struct _MSG_FC_COMMON_TRANSPORT_SEND_REQUEST
+{
+    U8                      SendFlags;          /* 00h */
+    U8                      AliasIndex;         /* 01h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U32                     MsgFlags_Did;       /* 04h */
+    U32                     MsgContext;         /* 08h */
+    U16                     CTCommandCode;      /* 0Ch */
+    U8                      FsType;             /* 0Eh */
+    U8                      Reserved1;          /* 0Fh */
+    SGE_SIMPLE_UNION        SGL;                /* 10h */
+} MSG_FC_COMMON_TRANSPORT_SEND_REQUEST,
+ MPI_POINTER PTR_MSG_FC_COMMON_TRANSPORT_SEND_REQUEST,
+  FcCommonTransportSendRequest_t, MPI_POINTER pFcCommonTransportSendRequest_t;
+
+#define MPI_FC_CT_SEND_DID_MASK                 (0x00FFFFFF)
+#define MPI_FC_CT_SEND_DID_SHIFT                (0)
+#define MPI_FC_CT_SEND_MSGFLAGS_MASK            (0xFF000000)
+#define MPI_FC_CT_SEND_MSGFLAGS_SHIFT           (24)
+
+
+/* FC Common Transport Send Reply */
+typedef struct _MSG_FC_COMMON_TRANSPORT_SEND_REPLY
+{
+    U8                      Reserved;           /* 00h */
+    U8                      AliasIndex;         /* 01h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved1;          /* 04h */
+    U8                      Reserved2;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     Reserved3;          /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+    U32                     ResponseLength;     /* 14h */
+} MSG_FC_COMMON_TRANSPORT_SEND_REPLY, MPI_POINTER PTR_MSG_FC_COMMON_TRANSPORT_SEND_REPLY,
+  FcCommonTransportSendReply_t, MPI_POINTER pFcCommonTransportSendReply_t;
+
+
+/****************************************************************************/
+/* FC Primitive Send messages                                               */
+/****************************************************************************/
+
+typedef struct _MSG_FC_PRIMITIVE_SEND_REQUEST
+{
+    U8                      SendFlags;          /* 00h */
+    U8                      Reserved;           /* 01h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved1;          /* 04h */
+    U8                      Reserved2;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U8                      FcPrimitive[4];     /* 0Ch */
+} MSG_FC_PRIMITIVE_SEND_REQUEST, MPI_POINTER PTR_MSG_FC_PRIMITIVE_SEND_REQUEST,
+  FcPrimitiveSendRequest_t, MPI_POINTER pFcPrimitiveSendRequest_t;
+
+#define MPI_FC_PRIM_SEND_FLAGS_PORT_MASK       (0x01)
+#define MPI_FC_PRIM_SEND_FLAGS_ML_RESET_LINK   (0x02)
+#define MPI_FC_PRIM_SEND_FLAGS_RESET_LINK      (0x04)
+#define MPI_FC_PRIM_SEND_FLAGS_STOP_SEND       (0x08)
+#define MPI_FC_PRIM_SEND_FLAGS_SEND_ONCE       (0x10)
+#define MPI_FC_PRIM_SEND_FLAGS_SEND_AROUND     (0x20)
+#define MPI_FC_PRIM_SEND_FLAGS_UNTIL_FULL      (0x40)
+#define MPI_FC_PRIM_SEND_FLAGS_FOREVER         (0x80)
+
+/* FC Primitive Send Reply */
+typedef struct _MSG_FC_PRIMITIVE_SEND_REPLY
+{
+    U8                      SendFlags;          /* 00h */
+    U8                      Reserved;           /* 01h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved1;          /* 04h */
+    U8                      Reserved2;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     Reserved3;          /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+} MSG_FC_PRIMITIVE_SEND_REPLY, MPI_POINTER PTR_MSG_FC_PRIMITIVE_SEND_REPLY,
+  FcPrimitiveSendReply_t, MPI_POINTER pFcPrimitiveSendReply_t;
+
+#endif
+
diff --git a/drivers/message/fusion/lsi/mpi_history.txt b/drivers/message/fusion/lsi/mpi_history.txt
new file mode 100644
index 0000000..0deb772
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_history.txt
@@ -0,0 +1,276 @@
+
+ ==============================
+ MPI Header File Change History
+ ==============================
+
+ Copyright (c) 2000-2001 LSI Logic Corporation.
+
+ ---------------------------------------
+ Header Set Release Version:    01.01.10
+ Header Set Release Date:       04-09-01
+ ---------------------------------------
+
+ Filename               Current version     Prior version
+ ----------             ---------------     -------------
+ mpi.h                  01.01.07            01.01.06
+ mpi_ioc.h              01.01.07            01.01.06
+ mpi_cnfg.h             01.01.11            01.01.10
+ mpi_init.h             01.01.05            01.01.04
+ mpi_targ.h             01.01.04            01.01.04
+ mpi_fc.h               01.01.07            01.01.06
+ mpi_lan.h              01.01.03            01.01.03
+ mpi_raid.h             01.01.02            01.01.02
+ mpi_type.h             01.01.02            01.01.02
+ mpi_history.txt        01.01.09            01.01.09
+
+
+ *  Date      Version   Description
+ *  --------  --------  ------------------------------------------------------
+
+mpi.h
+ *  05-08-00  00.10.01  Original release for 0.10 spec dated 4/26/2000.
+ *  05-24-00  00.10.02  Added MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH definition.
+ *  06-06-00  01.00.01  Update MPI_VERSION_MAJOR and MPI_VERSION_MINOR.
+ *  06-22-00  01.00.02  Added MPI_IOCSTATUS_LAN_ definitions.
+ *                      Removed LAN_SUSPEND function definition.
+ *                      Added MPI_MSGFLAGS_CONTINUATION_REPLY definition.
+ *  06-30-00  01.00.03  Added MPI_CONTEXT_REPLY_TYPE_LAN definition.
+ *                      Added MPI_GET/SET_CONTEXT_REPLY_TYPE macros.
+ *  07-27-00  01.00.04  Added MPI_FAULT_ definitions.
+ *                      Removed MPI_IOCSTATUS_MSG/DATA_XFER_ERROR definitions.
+ *                      Added MPI_IOCSTATUS_INTERNAL_ERROR definition.
+ *                      Added MPI_IOCSTATUS_TARGET_XFER_COUNT_MISMATCH.
+ *  11-02-00  01.01.01  Original release for post 1.0 work
+ *  12-04-00  01.01.02  Added new function codes.
+ *  01-09-01  01.01.03  Added more definitions to the system interface section
+ *                      Added MPI_IOCSTATUS_TARGET_STS_DATA_NOT_SENT.
+ *  01-25-01  01.01.04  Changed MPI_VERSION_MINOR from 0x00 to 0x01.
+ *  02-20-01  01.01.05  Started using MPI_POINTER.
+ *                      Added defines for MPI_DIAG_PREVENT_IOC_BOOT and
+ *                      MPI_DIAG_CLEAR_FLASH_BAD_SIG.
+ *                      Obsoleted MPI_IOCSTATUS_TARGET_FC_ defines.
+ *  02-27-01  01.01.06  Removed MPI_HOST_INDEX_REGISTER define.
+ *                      Added function codes for RAID.
+ *  04-09-01  01.01.07  Added alternate define for MPI_DOORBELL_ACTIVE,
+ *                      MPI_DOORBELL_USED, to better match the spec.
+ *  --------------------------------------------------------------------------
+
+mpi_ioc.h
+ *  05-08-00  00.10.01  Original release for 0.10 spec dated 4/26/2000.
+ *  05-24-00  00.10.02  Added _MSG_IOC_INIT_REPLY structure.
+ *  06-06-00  01.00.01  Added CurReplyFrameSize field to _MSG_IOC_FACTS_REPLY.
+ *  06-12-00  01.00.02  Added _MSG_PORT_ENABLE_REPLY structure.
+ *                      Added _MSG_EVENT_ACK_REPLY structure.
+ *                      Added _MSG_FW_DOWNLOAD_REPLY structure.
+ *                      Added _MSG_TOOLBOX_REPLY structure.
+ *  06-30-00  01.00.03  Added MaxLanBuckets to _PORT_FACT_REPLY structure.
+ *  07-27-00  01.00.04  Added _EVENT_DATA structure definitions for _SCSI,
+ *                      _LINK_STATUS, _LOOP_STATE and _LOGOUT.
+ *  08-11-00  01.00.05  Switched positions of MsgLength and Function fields in
+ *                      _MSG_EVENT_ACK_REPLY structure to match specification.
+ *  11-02-00  01.01.01  Original release for post 1.0 work
+ *                      Added a value for Manufacturer to WhoInit
+ *  12-04-00  01.01.02  Modified IOCFacts reply, added FWUpload messages, and
+ *                      removed toolbox message.
+ *  01-09-01  01.01.03  Added event enabled and disabled defines.
+ *                      Added structures for FwHeader and DataHeader.
+ *                      Added ImageType to FwUpload reply.
+ *  02-20-01  01.01.04  Started using MPI_POINTER.
+ *  02-27-01  01.01.05  Added event for RAID status change and its event data.
+ *                      Added IocNumber field to MSG_IOC_FACTS_REPLY.
+ *  03-27-01  01.01.06  Added defines for ProductId field of MPI_FW_HEADER.
+ *                      Added structure offset comments.
+ *  04-09-01  01.01.07  Added structure EVENT_DATA_EVENT_CHANGE.
+ *  --------------------------------------------------------------------------
+
+mpi_cnfg.h
+ *  05-08-00  00.10.01  Original release for 0.10 spec dated 4/26/2000.
+ *  06-06-00  01.00.01  Update version number for 1.0 release.
+ *  06-08-00  01.00.02  Added _PAGEVERSION definitions for all pages.
+ *                      Added FcPhLowestVersion, FcPhHighestVersion, Reserved2
+ *                      fields to FC_DEVICE_0 page, updated the page version.
+ *                      Changed _FREE_RUNNING_CLOCK to _PACING_TRANSFERS in
+ *                      SCSI_PORT_0, SCSI_DEVICE_0 and SCSI_DEVICE_1 pages
+ *                      and updated the page versions.
+ *                      Added _RESPONSE_ID_MASK definition to SCSI_PORT_1
+ *                      page and updated the page version.
+ *                      Added Information field and _INFO_PARAMS_NEGOTIATED
+ *                      definitionto SCSI_DEVICE_0 page.
+ *  06-22-00  01.00.03  Removed batch controls from LAN_0 page and updated the
+ *                      page version.
+ *                      Added BucketsRemaining to LAN_1 page, redefined the
+ *                      state values, and updated the page version.
+ *                      Revised bus width definitions in SCSI_PORT_0,
+ *                      SCSI_DEVICE_0 and SCSI_DEVICE_1 pages.
+ *  06-30-00  01.00.04  Added MaxReplySize to LAN_1 page and updated the page
+ *                      version.
+ *                      Moved FC_DEVICE_0 PageAddress description to spec.
+ *  07-27-00  01.00.05  Corrected the SubsystemVendorID and SubsystemID field
+ *                      widths in IOC_0 page and updated the page version.
+ *  11-02-00  01.01.01  Original release for post 1.0 work
+ *                      Added Manufacturing pages, IO Unit Page 2, SCSI SPI
+ *                      Port Page 2, FC Port Page 4, FC Port Page 5
+ *  12-04-00  01.01.03  Config page changes to match MPI rev 1.00.01.
+ *  12-05-00  01.01.04  Modified config page actions.
+ *  01-09-01  01.01.05  Added defines for page address formats.
+ *                      Data size for Manufacturing pages 2 and 3 no longer
+ *                      defined here.
+ *                      Io Unit Page 2 size is fixed at 4 adapters and some
+ *                      flags were changed.
+ *                      SCSI Port Page 2 Device Settings modified.
+ *                      New fields added to FC Port Page 0 and some flags
+ *                      cleaned up.
+ *                      Removed impedance flash from FC Port Page 1.
+ *                      Added FC Port pages 6 and 7.
+ *  01-25-01  01.01.06  Added MaxInitiators field to FcPortPage0.
+ *  01-29-01  01.01.07  Changed some defines to make them 32 character unique.
+ *                      Added some LinkType defines for FcPortPage0.
+ *  02-20-01  01.01.08  Started using MPI_POINTER.
+ *  02-27-01  01.01.09  Replaced MPI_CONFIG_PAGETYPE_SCSI_LUN with
+ *                      MPI_CONFIG_PAGETYPE_RAID_VOLUME.
+ *                      Added definitions and structures for IOC Page 2 and
+ *                      RAID Volume Page 2.
+ *  03-27-01  01.01.10  Added CONFIG_PAGE_FC_PORT_8 and CONFIG_PAGE_FC_PORT_9.
+ *                      CONFIG_PAGE_FC_PORT_3 now supports persistent by DID.
+ *                      Added VendorId and ProductRevLevel fields to
+ *                      RAIDVOL2_IM_PHYS_ID struct.
+ *                      Modified values for MPI_FCPORTPAGE0_FLAGS_ATTACH_
+ *                      defines to make them compatible to MPI version 1.0.
+ *                      Added structure offset comments.
+ *  04-09-01  01.01.11  Added some new defines for the PageAddress field and
+ *                      removed some obsolete ones.
+ *                      Added IO Unit Page 3.
+ *                      Modified defines for Scsi Port Page 2.
+ *                      Modified RAID Volume Pages.
+ *  --------------------------------------------------------------------------
+
+mpi_init.h
+ *  05-08-00  00.10.01  Original release for 0.10 spec dated 4/26/2000.
+ *  05-24-00  00.10.02  Added SenseBufferLength to _MSG_SCSI_IO_REPLY.
+ *  06-06-00  01.00.01  Update version number for 1.0 release.
+ *  06-08-00  01.00.02  Added MPI_SCSI_RSP_INFO_ definitions.
+ *  11-02-00  01.01.01  Original release for post 1.0 work
+ *  12-04-00  01.01.02  Added MPI_SCSIIO_CONTROL_NO_DISCONNECT.
+ *  02-20-01  01.01.03  Started using MPI_POINTER.
+ *  03-27-01  01.01.04  Added structure offset comments.
+ *  04-10-01  01.01.05  Added new MsgFlag for MSG_SCSI_TASK_MGMT.
+ *  --------------------------------------------------------------------------
+
+mpi_targ.h
+ *  05-08-00  00.10.01  Original release for 0.10 spec dated 4/26/2000.
+ *  06-06-00  01.00.01  Update version number for 1.0 release.
+ *  06-22-00  01.00.02  Added _MSG_TARGET_CMD_BUFFER_POST_REPLY structure.
+ *                      Corrected DECSRIPTOR typo to DESCRIPTOR.
+ *  11-02-00  01.01.01  Original release for post 1.0 work
+ *                      Modified target mode to use IoIndex instead of
+ *                      HostIndex and IocIndex. Added Alias.
+ *  01-09-01  01.01.02  Added defines for TARGET_ASSIST_FLAGS_REPOST_CMD_BUFFER
+ *                      and TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER.
+ *  02-20-01  01.01.03  Started using MPI_POINTER.
+ *                      Added structures for MPI_TARGET_SCSI_SPI_CMD_BUFFER and
+ *                      MPI_TARGET_FCP_CMD_BUFFER.
+ *  03-27-01  01.01.04  Added structure offset comments.
+ *  --------------------------------------------------------------------------
+
+mpi_fc.h
+ *  05-08-00  00.10.01  Original release for 0.10 spec dated 4/26/2000.
+ *  06-06-00  01.00.01  Update version number for 1.0 release.
+ *  06-12-00  01.00.02  Added _MSG_FC_ABORT_REPLY structure.
+ *  11-02-00  01.01.01  Original release for post 1.0 work
+ *  12-04-00  01.01.02  Added messages for Common Transport Send and
+ *                      Primitive Send.
+ *  01-09-01  01.01.03  Modifed some of the new flags to have an MPI prefix
+ *                      and modified the FcPrimitiveSend flags.
+ *  01-25-01  01.01.04  Move InitiatorIndex in LinkServiceRsp reply to a larger
+ *                      field.
+ *                      Added FC_ABORT_TYPE_CT_SEND_REQUEST and
+ *                      FC_ABORT_TYPE_EXLINKSEND_REQUEST for FcAbort request.
+ *                      Added MPI_FC_PRIM_SEND_FLAGS_STOP_SEND.
+ *  02-20-01  01.01.05  Started using MPI_POINTER.
+ *  03-27-01  01.01.06  Added Flags field to MSG_LINK_SERVICE_BUFFER_POST_REPLY
+ *                      and defined MPI_LS_BUF_POST_REPLY_FLAG_NO_RSP_NEEDED.
+ *                      Added MPI_FC_PRIM_SEND_FLAGS_RESET_LINK define.
+ *                      Added structure offset comments.
+ *  04-09-01  01.01.07  Added RspLength field to MSG_LINK_SERVICE_RSP_REQUEST.
+ *  --------------------------------------------------------------------------
+
+mpi_lan.h
+ *  05-08-00  00.10.01  Original release for 0.10 spec dated 4/26/2000.
+ *  05-24-00  00.10.02  Added LANStatus field to _MSG_LAN_SEND_REPLY.
+ *                      Added LANStatus field to _MSG_LAN_RECEIVE_POST_REPLY.
+ *                      Moved ListCount field in _MSG_LAN_RECEIVE_POST_REPLY.
+ *  06-06-00  01.00.01  Update version number for 1.0 release.
+ *  06-12-00  01.00.02  Added MPI_ to BUCKETSTATUS_ definitions.
+ *  06-22-00  01.00.03  Major changes to match new LAN definition in 1.0 spec.
+ *  06-30-00  01.00.04  Added Context Reply definitions per revised proposal.
+ *                      Changed transaction context usage to bucket/buffer.
+ *  07-05-00  01.00.05  Removed LAN_RECEIVE_POST_BUCKET_CONTEXT_MASK definition
+ *                      to lan private header file
+ *  11-02-00  01.01.01  Original release for post 1.0 work
+ *  02-20-01  01.01.02  Started using MPI_POINTER.
+ *  03-27-01  01.01.03  Added structure offset comments.
+ *  --------------------------------------------------------------------------
+
+mpi_raid.h
+ *  02-27-01  01.01.01  Original release for this file.
+ *  03-27-01  01.01.02  Added structure offset comments.
+ *  --------------------------------------------------------------------------
+
+mpi_type.h
+ *  05-08-00  00.10.01  Original release for 0.10 spec dated 4/26/2000.
+ *  06-06-00  01.00.01  Update version number for 1.0 release.
+ *  11-02-00  01.01.01  Original release for post 1.0 work
+ *  02-20-01  01.01.02  Added define and ifdef for MPI_POINTER.
+ *  --------------------------------------------------------------------------
+
+mpi_history.txt         Parts list history
+
+Filename    01.01.10
+----------  --------
+mpi.h       01.01.07
+mpi_ioc.h   01.01.07
+mpi_cnfg.h  01.01.11
+mpi_init.h  01.01.05
+mpi_targ.h  01.01.04
+mpi_fc.h    01.01.07
+mpi_lan.h   01.01.03
+mpi_raid.h  01.01.02
+mpi_type.h  01.01.02
+
+Filename    01.01.09   01.01.08   01.01.07   01.01.06   01.01.05   01.01.04
+----------  --------   --------   --------   --------   --------   --------
+mpi.h       01.01.06   01.01.06   01.01.05   01.01.04   01.01.04   01.01.03
+mpi_ioc.h   01.01.06   01.01.05   01.01.04   01.01.03   01.01.03   01.01.03
+mpi_cnfg.h  01.01.10   01.01.09   01.01.08   01.01.07   01.01.06   01.01.05
+mpi_init.h  01.01.04   01.01.03   01.01.03   01.01.02   01.01.02   01.01.02
+mpi_targ.h  01.01.04   01.01.03   01.01.03   01.01.02   01.01.02   01.01.02
+mpi_fc.h    01.01.06   01.01.05   01.01.05   01.01.04   01.01.04   01.01.03
+mpi_lan.h   01.01.03   01.01.02   01.01.02   01.01.01   01.01.01   01.01.01
+mpi_raid.h  01.01.02   01.01.01
+mpi_type.h  01.01.02   01.01.02   01.01.02   01.01.01   01.01.01   01.01.01
+
+Filename    01.01.03   01.01.02   01.01.01   01.00.07   01.00.06   01.00.05
+----------  --------   --------   --------   --------   --------   --------
+mpi.h       01.01.02   01.01.02   01.01.01   01.00.04   01.00.04   01.00.03
+mpi_ioc.h   01.01.02   01.01.02   01.01.01   01.00.05   01.00.04   01.00.03
+mpi_cnfg.h  01.01.04   01.01.03   01.01.01   01.00.05   01.00.05   01.00.04
+mpi_init.h  01.01.02   01.01.02   01.01.01   01.00.02   01.00.02   01.00.02
+mpi_targ.h  01.01.01   01.01.01   01.01.01   01.00.02   01.00.02   01.00.02
+mpi_fc.h    01.01.02   01.01.02   01.01.01   01.00.02   01.00.02   01.00.02
+mpi_lan.h   01.01.01   01.01.01   01.01.01   01.00.05   01.00.05   01.00.05
+mpi_type.h  01.01.01   01.01.01   01.01.01   01.00.01   01.00.01   01.00.01
+
+Filename     01.00.04   01.00.03   01.00.02   01.00.01   00.10.02   00.10.01
+----------   --------   --------   --------   --------   --------   --------
+mpi.h        01.00.02   01.00.01   01.00.01   01.00.01   00.10.02   00.10.01
+mpi_ioc.h    01.00.02   01.00.02   01.00.01   01.00.01   00.10.02   00.10.01
+mpi_cnfg.h   01.00.03   01.00.02   01.00.02   01.00.01   00.10.01   00.10.01
+mpi_init.h   01.00.02   01.00.02   01.00.02   01.00.01   00.10.02   00.10.01
+mpi_targ.h   01.00.02   01.00.01   01.00.01   01.00.01   00.10.01   00.10.01
+mpi_fc.h     01.00.02   01.00.02   01.00.01   01.00.01   00.10.01   00.10.01
+mpi_lan.h    01.00.03   01.00.02   01.00.01   01.00.01   00.10.02   00.10.01
+mpi_type.h   01.00.01   01.00.01   01.00.01   01.00.01   00.10.01   00.10.01
+
+
+ *  --------------------------------------------------------------------------
+
diff --git a/drivers/message/fusion/lsi/mpi_inb.h b/drivers/message/fusion/lsi/mpi_inb.h
new file mode 100644
index 0000000..dae29fb
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_inb.h
@@ -0,0 +1,220 @@
+/*
+ *  Copyright (c) 2003 LSI Logic Corporation.
+ *
+ *
+ *           Name:  mpi_inb.h
+ *          Title:  MPI Inband structures and definitions
+ *  Creation Date:  September 30, 2003
+ *
+ *    mpi_inb.h Version:  01.03.xx
+ *
+ *  Version History
+ *  ---------------
+ *
+ *  Date      Version   Description
+ *  --------  --------  ------------------------------------------------------
+ *  ??-??-??  01.03.01  Original release.
+ *  --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_INB_H
+#define MPI_INB_H
+
+/******************************************************************************
+*
+*        I n b a n d    M e s s a g e s
+*
+*******************************************************************************/
+
+
+/****************************************************************************/
+/* Inband Buffer Post Request                                               */
+/****************************************************************************/
+
+typedef struct _MSG_INBAND_BUFFER_POST_REQUEST
+{
+    U8                      Reserved1;          /* 00h */
+    U8                      BufferCount;        /* 01h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved2;          /* 04h */
+    U8                      Reserved3;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U32                     Reserved4;          /* 0Ch */
+    SGE_TRANS_SIMPLE_UNION  SGL;                /* 10h */
+} MSG_INBAND_BUFFER_POST_REQUEST, MPI_POINTER PTR_MSG_INBAND_BUFFER_POST_REQUEST,
+  MpiInbandBufferPostRequest_t , MPI_POINTER pMpiInbandBufferPostRequest_t;
+
+
+typedef struct _WWN_FC_FORMAT
+{
+    U64                     NodeName;           /* 00h */
+    U64                     PortName;           /* 08h */
+} WWN_FC_FORMAT, MPI_POINTER PTR_WWN_FC_FORMAT,
+  WwnFcFormat_t, MPI_POINTER pWwnFcFormat_t;
+
+typedef struct _WWN_SAS_FORMAT
+{
+    U64                     WorldWideID;        /* 00h */
+    U32                     Reserved1;          /* 08h */
+    U32                     Reserved2;          /* 0Ch */
+} WWN_SAS_FORMAT, MPI_POINTER PTR_WWN_SAS_FORMAT,
+  WwnSasFormat_t, MPI_POINTER pWwnSasFormat_t;
+
+typedef union _WWN_INBAND_FORMAT
+{
+    WWN_FC_FORMAT           Fc;
+    WWN_SAS_FORMAT          Sas;
+} WWN_INBAND_FORMAT, MPI_POINTER PTR_WWN_INBAND_FORMAT,
+  WwnInbandFormat, MPI_POINTER pWwnInbandFormat;
+
+
+/* Inband Buffer Post reply message */
+
+typedef struct _MSG_INBAND_BUFFER_POST_REPLY
+{
+    U16                     Reserved1;          /* 00h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved2;          /* 04h */
+    U8                      Reserved3;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     Reserved4;          /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+    U32                     TransferLength;     /* 14h */
+    U32                     TransactionContext; /* 18h */
+    WWN_INBAND_FORMAT       Wwn;                /* 1Ch */
+    U32                     IOCIdentifier[4];   /* 2Ch */
+} MSG_INBAND_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_INBAND_BUFFER_POST_REPLY,
+  MpiInbandBufferPostReply_t, MPI_POINTER pMpiInbandBufferPostReply_t;
+
+
+/****************************************************************************/
+/* Inband Send Request                                                      */
+/****************************************************************************/
+
+typedef struct _MSG_INBAND_SEND_REQUEST
+{
+    U16                     Reserved1;          /* 00h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved2;          /* 04h */
+    U8                      Reserved3;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U32                     Reserved4;          /* 0Ch */
+    WWN_INBAND_FORMAT       Wwn;                /* 10h */
+    U32                     Reserved5;          /* 20h */
+    SGE_IO_UNION            SGL;                /* 24h */
+} MSG_INBAND_SEND_REQUEST, MPI_POINTER PTR_MSG_INBAND_SEND_REQUEST,
+  MpiInbandSendRequest_t , MPI_POINTER pMpiInbandSendRequest_t;
+
+
+/* Inband Send reply message */
+
+typedef struct _MSG_INBAND_SEND_REPLY
+{
+    U16                     Reserved1;          /* 00h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved2;          /* 04h */
+    U8                      Reserved3;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     Reserved4;          /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+    U32                     ResponseLength;     /* 14h */
+} MSG_INBAND_SEND_REPLY, MPI_POINTER PTR_MSG_INBAND_SEND_REPLY,
+  MpiInbandSendReply_t, MPI_POINTER pMpiInbandSendReply_t;
+
+
+/****************************************************************************/
+/* Inband Response Request                                                  */
+/****************************************************************************/
+
+typedef struct _MSG_INBAND_RSP_REQUEST
+{
+    U16                     Reserved1;          /* 00h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved2;          /* 04h */
+    U8                      Reserved3;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U32                     Reserved4;          /* 0Ch */
+    WWN_INBAND_FORMAT       Wwn;                /* 10h */
+    U32                     IOCIdentifier[4];   /* 20h */
+    U32                     ResponseLength;     /* 30h */
+    SGE_IO_UNION            SGL;                /* 34h */
+} MSG_INBAND_RSP_REQUEST, MPI_POINTER PTR_MSG_INBAND_RSP_REQUEST,
+  MpiInbandRspRequest_t , MPI_POINTER pMpiInbandRspRequest_t;
+
+
+/* Inband Response reply message */
+
+typedef struct _MSG_INBAND_RSP_REPLY
+{
+    U16                     Reserved1;          /* 00h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved2;          /* 04h */
+    U8                      Reserved3;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     Reserved4;          /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+} MSG_INBAND_RSP_REPLY, MPI_POINTER PTR_MSG_INBAND_RSP_REPLY,
+  MpiInbandRspReply_t, MPI_POINTER pMpiInbandRspReply_t;
+
+
+/****************************************************************************/
+/* Inband Abort Request                                                     */
+/****************************************************************************/
+
+typedef struct _MSG_INBAND_ABORT_REQUEST
+{
+    U8                      Reserved1;          /* 00h */
+    U8                      AbortType;          /* 01h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved2;          /* 04h */
+    U8                      Reserved3;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U32                     Reserved4;          /* 0Ch */
+    U32                     ContextToAbort;     /* 10h */
+} MSG_INBAND_ABORT_REQUEST, MPI_POINTER PTR_MSG_INBAND_ABORT_REQUEST,
+  MpiInbandAbortRequest_t , MPI_POINTER pMpiInbandAbortRequest_t;
+
+#define MPI_INBAND_ABORT_TYPE_ALL_BUFFERS       (0x00)
+#define MPI_INBAND_ABORT_TYPE_EXACT_BUFFER      (0x01)
+#define MPI_INBAND_ABORT_TYPE_SEND_REQUEST      (0x02)
+#define MPI_INBAND_ABORT_TYPE_RESPONSE_REQUEST  (0x03)
+
+
+/* Inband Abort reply message */
+
+typedef struct _MSG_INBAND_ABORT_REPLY
+{
+    U8                      Reserved1;          /* 00h */
+    U8                      AbortType;          /* 01h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved2;          /* 04h */
+    U8                      Reserved3;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     Reserved4;          /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+} MSG_INBAND_ABORT_REPLY, MPI_POINTER PTR_MSG_INBAND_ABORT_REPLY,
+  MpiInbandAbortReply_t, MPI_POINTER pMpiInbandAbortReply_t;
+
+
+#endif
+
diff --git a/drivers/message/fusion/lsi/mpi_init.h b/drivers/message/fusion/lsi/mpi_init.h
new file mode 100644
index 0000000..b3c95fd
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_init.h
@@ -0,0 +1,362 @@
+/*
+ *  Copyright (c) 2000-2003 LSI Logic Corporation.
+ *
+ *
+ *           Name:  mpi_init.h
+ *          Title:  MPI initiator mode messages and structures
+ *  Creation Date:  June 8, 2000
+ *
+ *    mpi_init.h Version:  01.05.xx
+ *
+ *  Version History
+ *  ---------------
+ *
+ *  Date      Version   Description
+ *  --------  --------  ------------------------------------------------------
+ *  05-08-00  00.10.01  Original release for 0.10 spec dated 4/26/2000.
+ *  05-24-00  00.10.02  Added SenseBufferLength to _MSG_SCSI_IO_REPLY.
+ *  06-06-00  01.00.01  Update version number for 1.0 release.
+ *  06-08-00  01.00.02  Added MPI_SCSI_RSP_INFO_ definitions.
+ *  11-02-00  01.01.01  Original release for post 1.0 work.
+ *  12-04-00  01.01.02  Added MPI_SCSIIO_CONTROL_NO_DISCONNECT.
+ *  02-20-01  01.01.03  Started using MPI_POINTER.
+ *  03-27-01  01.01.04  Added structure offset comments.
+ *  04-10-01  01.01.05  Added new MsgFlag for MSG_SCSI_TASK_MGMT.
+ *  08-08-01  01.02.01  Original release for v1.2 work.
+ *  08-29-01  01.02.02  Added MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET.
+ *                      Added MPI_SCSI_STATE_QUEUE_TAG_REJECTED for
+ *                      MSG_SCSI_IO_REPLY.
+ *  09-28-01  01.02.03  Added structures and defines for SCSI Enclosure
+ *                      Processor messages.
+ *  10-04-01  01.02.04  Added defines for SEP request Action field.
+ *  05-31-02  01.02.05  Added MPI_SCSIIO_MSGFLGS_CMD_DETERMINES_DATA_DIR define
+ *                      for SCSI IO requests.
+ *  11-15-02  01.02.06  Added special extended SCSI Status defines for FCP.
+ *  06-26-03  01.02.07  Added MPI_SCSI_STATUS_FCPEXT_UNASSIGNED define.
+ *  --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_INIT_H
+#define MPI_INIT_H
+
+
+/*****************************************************************************
+*
+*               S C S I    I n i t i a t o r    M e s s a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/*  SCSI IO messages and associated structures                              */
+/****************************************************************************/
+
+typedef struct _MSG_SCSI_IO_REQUEST
+{
+    U8                      TargetID;           /* 00h */
+    U8                      Bus;                /* 01h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U8                      CDBLength;          /* 04h */
+    U8                      SenseBufferLength;  /* 05h */
+    U8                      Reserved;           /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U8                      LUN[8];             /* 0Ch */
+    U32                     Control;            /* 14h */
+    U8                      CDB[16];            /* 18h */
+    U32                     DataLength;         /* 28h */
+    U32                     SenseBufferLowAddr; /* 2Ch */
+    SGE_IO_UNION            SGL;                /* 30h */
+} MSG_SCSI_IO_REQUEST, MPI_POINTER PTR_MSG_SCSI_IO_REQUEST,
+  SCSIIORequest_t, MPI_POINTER pSCSIIORequest_t;
+
+
+/* SCSI IO MsgFlags bits */
+
+#define MPI_SCSIIO_MSGFLGS_SENSE_WIDTH              (0x01)
+#define MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32           (0x00)
+#define MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64           (0x01)
+#define MPI_SCSIIO_MSGFLGS_SENSE_LOCATION           (0x02)
+#define MPI_SCSIIO_MSGFLGS_SENSE_LOC_HOST           (0x00)
+#define MPI_SCSIIO_MSGFLGS_SENSE_LOC_IOC            (0x02)
+#define MPI_SCSIIO_MSGFLGS_CMD_DETERMINES_DATA_DIR  (0x04)
+#define MPI_SCSIIO_MSGFLGS_EEDP_TYPE_MASK           (0xE0)
+#define MPI_SCSIIO_MSGFLGS_EEDP_NONE                (0x00)
+#define MPI_SCSIIO_MSGFLGS_EEDP_RDPROTECT_T10       (0x20)
+#define MPI_SCSIIO_MSGFLGS_EEDP_VRPROTECT_T10       (0x40)
+#define MPI_SCSIIO_MSGFLGS_EEDP_WRPROTECT_T10       (0x60)
+#define MPI_SCSIIO_MSGFLGS_EEDP_520_READ_MODE1      (0x20)
+#define MPI_SCSIIO_MSGFLGS_EEDP_520_WRITE_MODE1     (0x40)
+#define MPI_SCSIIO_MSGFLGS_EEDP_8_9_READ_MODE1      (0x60)
+#define MPI_SCSIIO_MSGFLGS_EEDP_8_9_WRITE_MODE1     (0x80)
+
+
+/* SCSI IO LUN fields */
+
+#define MPI_SCSIIO_LUN_FIRST_LEVEL_ADDRESSING   (0x0000FFFF)
+#define MPI_SCSIIO_LUN_SECOND_LEVEL_ADDRESSING  (0xFFFF0000)
+#define MPI_SCSIIO_LUN_THIRD_LEVEL_ADDRESSING   (0x0000FFFF)
+#define MPI_SCSIIO_LUN_FOURTH_LEVEL_ADDRESSING  (0xFFFF0000)
+#define MPI_SCSIIO_LUN_LEVEL_1_WORD             (0xFF00)
+#define MPI_SCSIIO_LUN_LEVEL_1_DWORD            (0x0000FF00)
+
+/* SCSI IO Control bits */
+
+#define MPI_SCSIIO_CONTROL_DATADIRECTION_MASK   (0x03000000)
+#define MPI_SCSIIO_CONTROL_NODATATRANSFER       (0x00000000)
+#define MPI_SCSIIO_CONTROL_WRITE                (0x01000000)
+#define MPI_SCSIIO_CONTROL_READ                 (0x02000000)
+
+#define MPI_SCSIIO_CONTROL_ADDCDBLEN_MASK       (0x3C000000)
+#define MPI_SCSIIO_CONTROL_ADDCDBLEN_SHIFT      (26)
+
+#define MPI_SCSIIO_CONTROL_TASKATTRIBUTE_MASK   (0x00000700)
+#define MPI_SCSIIO_CONTROL_SIMPLEQ              (0x00000000)
+#define MPI_SCSIIO_CONTROL_HEADOFQ              (0x00000100)
+#define MPI_SCSIIO_CONTROL_ORDEREDQ             (0x00000200)
+#define MPI_SCSIIO_CONTROL_ACAQ                 (0x00000400)
+#define MPI_SCSIIO_CONTROL_UNTAGGED             (0x00000500)
+#define MPI_SCSIIO_CONTROL_NO_DISCONNECT        (0x00000700)
+
+#define MPI_SCSIIO_CONTROL_TASKMANAGE_MASK      (0x00FF0000)
+#define MPI_SCSIIO_CONTROL_OBSOLETE             (0x00800000)
+#define MPI_SCSIIO_CONTROL_CLEAR_ACA_RSV        (0x00400000)
+#define MPI_SCSIIO_CONTROL_TARGET_RESET         (0x00200000)
+#define MPI_SCSIIO_CONTROL_LUN_RESET_RSV        (0x00100000)
+#define MPI_SCSIIO_CONTROL_RESERVED             (0x00080000)
+#define MPI_SCSIIO_CONTROL_CLR_TASK_SET_RSV     (0x00040000)
+#define MPI_SCSIIO_CONTROL_ABORT_TASK_SET       (0x00020000)
+#define MPI_SCSIIO_CONTROL_RESERVED2            (0x00010000)
+
+
+/* SCSI IO reply structure */
+typedef struct _MSG_SCSI_IO_REPLY
+{
+    U8                      TargetID;           /* 00h */
+    U8                      Bus;                /* 01h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U8                      CDBLength;          /* 04h */
+    U8                      SenseBufferLength;  /* 05h */
+    U8                      Reserved;           /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U8                      SCSIStatus;         /* 0Ch */
+    U8                      SCSIState;          /* 0Dh */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+    U32                     TransferCount;      /* 14h */
+    U32                     SenseCount;         /* 18h */
+    U32                     ResponseInfo;       /* 1Ch */
+} MSG_SCSI_IO_REPLY, MPI_POINTER PTR_MSG_SCSI_IO_REPLY,
+  SCSIIOReply_t, MPI_POINTER pSCSIIOReply_t;
+
+
+/* SCSI IO Reply SCSIStatus values (SAM-2 status codes) */
+
+#define MPI_SCSI_STATUS_SUCCESS                 (0x00)
+#define MPI_SCSI_STATUS_CHECK_CONDITION         (0x02)
+#define MPI_SCSI_STATUS_CONDITION_MET           (0x04)
+#define MPI_SCSI_STATUS_BUSY                    (0x08)
+#define MPI_SCSI_STATUS_INTERMEDIATE            (0x10)
+#define MPI_SCSI_STATUS_INTERMEDIATE_CONDMET    (0x14)
+#define MPI_SCSI_STATUS_RESERVATION_CONFLICT    (0x18)
+#define MPI_SCSI_STATUS_COMMAND_TERMINATED      (0x22)
+#define MPI_SCSI_STATUS_TASK_SET_FULL           (0x28)
+#define MPI_SCSI_STATUS_ACA_ACTIVE              (0x30)
+
+#define MPI_SCSI_STATUS_FCPEXT_DEVICE_LOGGED_OUT    (0x80)
+#define MPI_SCSI_STATUS_FCPEXT_NO_LINK              (0x81)
+#define MPI_SCSI_STATUS_FCPEXT_UNASSIGNED           (0x82)
+
+
+/* SCSI IO Reply SCSIState values */
+
+#define MPI_SCSI_STATE_AUTOSENSE_VALID          (0x01)
+#define MPI_SCSI_STATE_AUTOSENSE_FAILED         (0x02)
+#define MPI_SCSI_STATE_NO_SCSI_STATUS           (0x04)
+#define MPI_SCSI_STATE_TERMINATED               (0x08)
+#define MPI_SCSI_STATE_RESPONSE_INFO_VALID      (0x10)
+#define MPI_SCSI_STATE_QUEUE_TAG_REJECTED       (0x20)
+
+/* SCSI IO Reply ResponseInfo values */
+/* (FCP-1 RSP_CODE values and SPI-3 Packetized Failure codes) */
+
+#define MPI_SCSI_RSP_INFO_FUNCTION_COMPLETE     (0x00000000)
+#define MPI_SCSI_RSP_INFO_FCP_BURST_LEN_ERROR   (0x01000000)
+#define MPI_SCSI_RSP_INFO_CMND_FIELDS_INVALID   (0x02000000)
+#define MPI_SCSI_RSP_INFO_FCP_DATA_RO_ERROR     (0x03000000)
+#define MPI_SCSI_RSP_INFO_TASK_MGMT_UNSUPPORTED (0x04000000)
+#define MPI_SCSI_RSP_INFO_TASK_MGMT_FAILED      (0x05000000)
+#define MPI_SCSI_RSP_INFO_SPI_LQ_INVALID_TYPE   (0x06000000)
+
+
+/****************************************************************************/
+/*  SCSI IO 32 Request message structure                                    */
+/****************************************************************************/
+
+typedef struct _MSG_SCSI_IO32_REQUEST
+{
+    U8                      TargetID;           /* 00h */
+    U8                      Bus;                /* 01h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U8                      CDBLength;          /* 04h */
+    U8                      SenseBufferLength;  /* 05h */
+    U8                      Reserved;           /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U8                      LUN[8];             /* 0Ch */
+    U32                     Control;            /* 14h */
+    U8                      CDB[32];            /* 18h */
+    U32                     DataLength;         /* 38h */
+    U32                     SenseBufferLowAddr; /* 3Ch */
+    SGE_IO_UNION            SGL;                /* 40h */
+} MSG_SCSI_IO32_REQUEST, MPI_POINTER PTR_MSG_SCSI_IO32_REQUEST,
+  SCSIIO32Request_t, MPI_POINTER pSCSIIO32Request_t;
+
+/* SCSI IO 32 uses the same defines as above for SCSI IO */
+
+
+/****************************************************************************/
+/*  SCSI Task Management messages                                           */
+/****************************************************************************/
+
+typedef struct _MSG_SCSI_TASK_MGMT
+{
+    U8                      TargetID;           /* 00h */
+    U8                      Bus;                /* 01h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U8                      Reserved;           /* 04h */
+    U8                      TaskType;           /* 05h */
+    U8                      Reserved1;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U8                      LUN[8];             /* 0Ch */
+    U32                     Reserved2[7];       /* 14h */
+    U32                     TaskMsgContext;     /* 30h */
+} MSG_SCSI_TASK_MGMT, MPI_POINTER PTR_SCSI_TASK_MGMT,
+  SCSITaskMgmt_t, MPI_POINTER pSCSITaskMgmt_t;
+
+/* TaskType values */
+
+#define MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK            (0x01)
+#define MPI_SCSITASKMGMT_TASKTYPE_ABRT_TASK_SET         (0x02)
+#define MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET          (0x03)
+#define MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS             (0x04)
+#define MPI_SCSITASKMGMT_TASKTYPE_LOGICAL_UNIT_RESET    (0x05)
+#define MPI_SCSITASKMGMT_TASKTYPE_CLEAR_TASK_SET        (0x06)
+
+/* MsgFlags bits */
+#define MPI_SCSITASKMGMT_MSGFLAGS_TARGET_RESET_OPTION   (0x00)
+#define MPI_SCSITASKMGMT_MSGFLAGS_LIP_RESET_OPTION      (0x02)
+#define MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION (0x04)
+
+/* SCSI Task Management Reply */
+typedef struct _MSG_SCSI_TASK_MGMT_REPLY
+{
+    U8                      TargetID;           /* 00h */
+    U8                      Bus;                /* 01h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U8                      Reserved;           /* 04h */
+    U8                      TaskType;           /* 05h */
+    U8                      Reserved1;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U8                      Reserved2[2];       /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+    U32                     TerminationCount;   /* 14h */
+} MSG_SCSI_TASK_MGMT_REPLY, MPI_POINTER PTR_MSG_SCSI_TASK_MGMT_REPLY,
+  SCSITaskMgmtReply_t, MPI_POINTER pSCSITaskMgmtReply_t;
+
+
+/****************************************************************************/
+/*  SCSI Enclosure Processor messages                                       */
+/****************************************************************************/
+
+typedef struct _MSG_SEP_REQUEST
+{
+    U8                      TargetID;           /* 00h */
+    U8                      Bus;                /* 01h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U8                      Action;             /* 04h */
+    U8                      Reserved1;          /* 05h */
+    U8                      Reserved2;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U32                     SlotStatus;         /* 0Ch */
+} MSG_SEP_REQUEST, MPI_POINTER PTR_MSG_SEP_REQUEST,
+  SEPRequest_t, MPI_POINTER pSEPRequest_t;
+
+/* Action defines */
+#define MPI_SEP_REQ_ACTION_WRITE_STATUS                 (0x00)
+#define MPI_SEP_REQ_ACTION_READ_STATUS                  (0x01)
+
+/* SlotStatus bits for MSG_SEP_REQUEST */
+#define MPI_SEP_REQ_SLOTSTATUS_NO_ERROR                 (0x00000001)
+#define MPI_SEP_REQ_SLOTSTATUS_DEV_FAULTY               (0x00000002)
+#define MPI_SEP_REQ_SLOTSTATUS_DEV_REBUILDING           (0x00000004)
+#define MPI_SEP_REQ_SLOTSTATUS_IN_FAILED_ARRAY          (0x00000008)
+#define MPI_SEP_REQ_SLOTSTATUS_IN_CRITICAL_ARRAY        (0x00000010)
+#define MPI_SEP_REQ_SLOTSTATUS_PARITY_CHECK             (0x00000020)
+#define MPI_SEP_REQ_SLOTSTATUS_PREDICTED_FAULT          (0x00000040)
+#define MPI_SEP_REQ_SLOTSTATUS_UNCONFIGURED             (0x00000080)
+#define MPI_SEP_REQ_SLOTSTATUS_HOT_SPARE                (0x00000100)
+#define MPI_SEP_REQ_SLOTSTATUS_REBUILD_STOPPED          (0x00000200)
+#define MPI_SEP_REQ_SLOTSTATUS_IDENTIFY_REQUEST         (0x00020000)
+#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_REMOVE           (0x00040000)
+#define MPI_SEP_REQ_SLOTSTATUS_REQUEST_INSERT           (0x00080000)
+#define MPI_SEP_REQ_SLOTSTATUS_DO_NOT_MOVE              (0x00400000)
+#define MPI_SEP_REQ_SLOTSTATUS_B_ENABLE_BYPASS          (0x04000000)
+#define MPI_SEP_REQ_SLOTSTATUS_A_ENABLE_BYPASS          (0x08000000)
+#define MPI_SEP_REQ_SLOTSTATUS_DEV_OFF                  (0x10000000)
+#define MPI_SEP_REQ_SLOTSTATUS_SWAP_RESET               (0x80000000)
+
+
+typedef struct _MSG_SEP_REPLY
+{
+    U8                      TargetID;           /* 00h */
+    U8                      Bus;                /* 01h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U8                      Action;             /* 04h */
+    U8                      Reserved1;          /* 05h */
+    U8                      Reserved2;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     Reserved3;          /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+    U32                     SlotStatus;         /* 14h */
+} MSG_SEP_REPLY, MPI_POINTER PTR_MSG_SEP_REPLY,
+  SEPReply_t, MPI_POINTER pSEPReply_t;
+
+/* SlotStatus bits for MSG_SEP_REPLY */
+#define MPI_SEP_REPLY_SLOTSTATUS_NO_ERROR               (0x00000001)
+#define MPI_SEP_REPLY_SLOTSTATUS_DEV_FAULTY             (0x00000002)
+#define MPI_SEP_REPLY_SLOTSTATUS_DEV_REBUILDING         (0x00000004)
+#define MPI_SEP_REPLY_SLOTSTATUS_IN_FAILED_ARRAY        (0x00000008)
+#define MPI_SEP_REPLY_SLOTSTATUS_IN_CRITICAL_ARRAY      (0x00000010)
+#define MPI_SEP_REPLY_SLOTSTATUS_PARITY_CHECK           (0x00000020)
+#define MPI_SEP_REPLY_SLOTSTATUS_PREDICTED_FAULT        (0x00000040)
+#define MPI_SEP_REPLY_SLOTSTATUS_UNCONFIGURED           (0x00000080)
+#define MPI_SEP_REPLY_SLOTSTATUS_HOT_SPARE              (0x00000100)
+#define MPI_SEP_REPLY_SLOTSTATUS_REBUILD_STOPPED        (0x00000200)
+#define MPI_SEP_REPLY_SLOTSTATUS_REPORT                 (0x00010000)
+#define MPI_SEP_REPLY_SLOTSTATUS_IDENTIFY_REQUEST       (0x00020000)
+#define MPI_SEP_REPLY_SLOTSTATUS_REMOVE_READY           (0x00040000)
+#define MPI_SEP_REPLY_SLOTSTATUS_INSERT_READY           (0x00080000)
+#define MPI_SEP_REPLY_SLOTSTATUS_DO_NOT_REMOVE          (0x00400000)
+#define MPI_SEP_REPLY_SLOTSTATUS_B_BYPASS_ENABLED       (0x01000000)
+#define MPI_SEP_REPLY_SLOTSTATUS_A_BYPASS_ENABLED       (0x02000000)
+#define MPI_SEP_REPLY_SLOTSTATUS_B_ENABLE_BYPASS        (0x04000000)
+#define MPI_SEP_REPLY_SLOTSTATUS_A_ENABLE_BYPASS        (0x08000000)
+#define MPI_SEP_REPLY_SLOTSTATUS_DEV_OFF                (0x10000000)
+#define MPI_SEP_REPLY_SLOTSTATUS_FAULT_SENSED           (0x40000000)
+#define MPI_SEP_REPLY_SLOTSTATUS_SWAPPED                (0x80000000)
+
+#endif
diff --git a/drivers/message/fusion/lsi/mpi_ioc.h b/drivers/message/fusion/lsi/mpi_ioc.h
new file mode 100644
index 0000000..82445d1
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_ioc.h
@@ -0,0 +1,770 @@
+/*
+ *  Copyright (c) 2000-2003 LSI Logic Corporation.
+ *
+ *
+ *           Name:  mpi_ioc.h
+ *          Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
+ *  Creation Date:  August 11, 2000
+ *
+ *    mpi_ioc.h Version:  01.05.xx
+ *
+ *  Version History
+ *  ---------------
+ *
+ *  Date      Version   Description
+ *  --------  --------  ------------------------------------------------------
+ *  05-08-00  00.10.01  Original release for 0.10 spec dated 4/26/2000.
+ *  05-24-00  00.10.02  Added _MSG_IOC_INIT_REPLY structure.
+ *  06-06-00  01.00.01  Added CurReplyFrameSize field to _MSG_IOC_FACTS_REPLY.
+ *  06-12-00  01.00.02  Added _MSG_PORT_ENABLE_REPLY structure.
+ *                      Added _MSG_EVENT_ACK_REPLY structure.
+ *                      Added _MSG_FW_DOWNLOAD_REPLY structure.
+ *                      Added _MSG_TOOLBOX_REPLY structure.
+ *  06-30-00  01.00.03  Added MaxLanBuckets to _PORT_FACT_REPLY structure.
+ *  07-27-00  01.00.04  Added _EVENT_DATA structure definitions for _SCSI,
+ *                      _LINK_STATUS, _LOOP_STATE and _LOGOUT.
+ *  08-11-00  01.00.05  Switched positions of MsgLength and Function fields in
+ *                      _MSG_EVENT_ACK_REPLY structure to match specification.
+ *  11-02-00  01.01.01  Original release for post 1.0 work.
+ *                      Added a value for Manufacturer to WhoInit.
+ *  12-04-00  01.01.02  Modified IOCFacts reply, added FWUpload messages, and
+ *                      removed toolbox message.
+ *  01-09-01  01.01.03  Added event enabled and disabled defines.
+ *                      Added structures for FwHeader and DataHeader.
+ *                      Added ImageType to FwUpload reply.
+ *  02-20-01  01.01.04  Started using MPI_POINTER.
+ *  02-27-01  01.01.05  Added event for RAID status change and its event data.
+ *                      Added IocNumber field to MSG_IOC_FACTS_REPLY.
+ *  03-27-01  01.01.06  Added defines for ProductId field of MPI_FW_HEADER.
+ *                      Added structure offset comments.
+ *  04-09-01  01.01.07  Added structure EVENT_DATA_EVENT_CHANGE.
+ *  08-08-01  01.02.01  Original release for v1.2 work.
+ *                      New format for FWVersion and ProductId in
+ *                      MSG_IOC_FACTS_REPLY and MPI_FW_HEADER.
+ *  08-31-01  01.02.02  Addded event MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE and
+ *                      related structure and defines.
+ *                      Added event MPI_EVENT_ON_BUS_TIMER_EXPIRED.
+ *                      Added MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE.
+ *                      Replaced a reserved field in MSG_IOC_FACTS_REPLY with
+ *                      IOCExceptions and changed DataImageSize to reserved.
+ *                      Added MPI_FW_DOWNLOAD_ITYPE_NVSTORE_DATA and
+ *                      MPI_FW_UPLOAD_ITYPE_NVDATA.
+ *  09-28-01  01.02.03  Modified Event Data for Integrated RAID.
+ *  11-01-01  01.02.04  Added defines for MPI_EXT_IMAGE_HEADER ImageType field.
+ *  03-14-02  01.02.05  Added HeaderVersion field to MSG_IOC_FACTS_REPLY.
+ *  05-31-02  01.02.06  Added define for
+ *                      MPI_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID.
+ *                      Added AliasIndex to EVENT_DATA_LOGOUT structure.
+ *  04-01-03  01.02.07  Added defines for MPI_FW_HEADER_SIGNATURE_.
+ *  06-26-03  01.02.08  Added new values to the product family defines.
+ *  --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_IOC_H
+#define MPI_IOC_H
+
+
+/*****************************************************************************
+*
+*               I O C    M e s s a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/*  IOCInit message                                                         */
+/****************************************************************************/
+
+typedef struct _MSG_IOC_INIT
+{
+    U8                      WhoInit;                    /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U8                      Flags;                      /* 04h */
+    U8                      MaxDevices;                 /* 05h */
+    U8                      MaxBuses;                   /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U16                     ReplyFrameSize;             /* 0Ch */
+    U8                      Reserved1[2];               /* 0Eh */
+    U32                     HostMfaHighAddr;            /* 10h */
+    U32                     SenseBufferHighAddr;        /* 14h */
+    U32                     ReplyFifoHostSignalingAddr; /* 18h */
+} MSG_IOC_INIT, MPI_POINTER PTR_MSG_IOC_INIT,
+  IOCInit_t, MPI_POINTER pIOCInit_t;
+
+/* WhoInit values */
+#define MPI_WHOINIT_NO_ONE                          (0x00)
+#define MPI_WHOINIT_SYSTEM_BIOS                     (0x01)
+#define MPI_WHOINIT_ROM_BIOS                        (0x02)
+#define MPI_WHOINIT_PCI_PEER                        (0x03)
+#define MPI_WHOINIT_HOST_DRIVER                     (0x04)
+#define MPI_WHOINIT_MANUFACTURER                    (0x05)
+
+/* Flags values */
+#define MPI_IOCINIT_FLAGS_DISCARD_FW_IMAGE          (0x01)
+#define MPI_IOCINIT_FLAGS_REPLY_FIFO_HOST_SIGNAL    (0x02)
+
+typedef struct _MSG_IOC_INIT_REPLY
+{
+    U8                      WhoInit;                    /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U8                      MsgLength;                  /* 02h */
+    U8                      Function;                   /* 03h */
+    U8                      Flags;                      /* 04h */
+    U8                      MaxDevices;                 /* 05h */
+    U8                      MaxBuses;                   /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U16                     Reserved2;                  /* 0Ch */
+    U16                     IOCStatus;                  /* 0Eh */
+    U32                     IOCLogInfo;                 /* 10h */
+} MSG_IOC_INIT_REPLY, MPI_POINTER PTR_MSG_IOC_INIT_REPLY,
+  IOCInitReply_t, MPI_POINTER pIOCInitReply_t;
+
+
+
+/****************************************************************************/
+/*  IOC Facts message                                                       */
+/****************************************************************************/
+
+typedef struct _MSG_IOC_FACTS
+{
+    U8                      Reserved[2];                /* 00h */
+    U8                      ChainOffset;                /* 01h */
+    U8                      Function;                   /* 02h */
+    U8                      Reserved1[3];               /* 03h */
+    U8                      MsgFlags;                   /* 04h */
+    U32                     MsgContext;                 /* 08h */
+} MSG_IOC_FACTS, MPI_POINTER PTR_IOC_FACTS,
+  IOCFacts_t, MPI_POINTER pIOCFacts_t;
+
+typedef struct _MPI_FW_VERSION_STRUCT
+{
+    U8                      Dev;                        /* 00h */
+    U8                      Unit;                       /* 01h */
+    U8                      Minor;                      /* 02h */
+    U8                      Major;                      /* 03h */
+} MPI_FW_VERSION_STRUCT;
+
+typedef union _MPI_FW_VERSION
+{
+    MPI_FW_VERSION_STRUCT   Struct;
+    U32                     Word;
+} MPI_FW_VERSION;
+
+/* IOC Facts Reply */
+typedef struct _MSG_IOC_FACTS_REPLY
+{
+    U16                     MsgVersion;                 /* 00h */
+    U8                      MsgLength;                  /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     HeaderVersion;              /* 04h */
+    U8                      IOCNumber;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U16                     IOCExceptions;              /* 0Ch */
+    U16                     IOCStatus;                  /* 0Eh */
+    U32                     IOCLogInfo;                 /* 10h */
+    U8                      MaxChainDepth;              /* 14h */
+    U8                      WhoInit;                    /* 15h */
+    U8                      BlockSize;                  /* 16h */
+    U8                      Flags;                      /* 17h */
+    U16                     ReplyQueueDepth;            /* 18h */
+    U16                     RequestFrameSize;           /* 1Ah */
+    U16                     Reserved_0101_FWVersion;    /* 1Ch */ /* obsolete 16-bit FWVersion */
+    U16                     ProductID;                  /* 1Eh */
+    U32                     CurrentHostMfaHighAddr;     /* 20h */
+    U16                     GlobalCredits;              /* 24h */
+    U8                      NumberOfPorts;              /* 26h */
+    U8                      EventState;                 /* 27h */
+    U32                     CurrentSenseBufferHighAddr; /* 28h */
+    U16                     CurReplyFrameSize;          /* 2Ch */
+    U8                      MaxDevices;                 /* 2Eh */
+    U8                      MaxBuses;                   /* 2Fh */
+    U32                     FWImageSize;                /* 30h */
+    U32                     IOCCapabilities;            /* 34h */
+    MPI_FW_VERSION          FWVersion;                  /* 38h */
+    U16                     HighPriorityQueueDepth;     /* 3Ch */
+    U16                     Reserved2;                  /* 3Eh */
+} MSG_IOC_FACTS_REPLY, MPI_POINTER PTR_MSG_IOC_FACTS_REPLY,
+  IOCFactsReply_t, MPI_POINTER pIOCFactsReply_t;
+
+#define MPI_IOCFACTS_MSGVERSION_MAJOR_MASK          (0xFF00)
+#define MPI_IOCFACTS_MSGVERSION_MINOR_MASK          (0x00FF)
+
+#define MPI_IOCFACTS_HEADERVERSION_UNIT_MASK        (0xFF00)
+#define MPI_IOCFACTS_HEADERVERSION_DEV_MASK         (0x00FF)
+
+#define MPI_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL    (0x0001)
+#define MPI_IOCFACTS_EXCEPT_RAID_CONFIG_INVALID     (0x0002)
+#define MPI_IOCFACTS_EXCEPT_FW_CHECKSUM_FAIL        (0x0004)
+#define MPI_IOCFACTS_EXCEPT_PERSISTENT_TABLE_FULL   (0x0008)
+
+#define MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT         (0x01)
+
+#define MPI_IOCFACTS_EVENTSTATE_DISABLED            (0x00)
+#define MPI_IOCFACTS_EVENTSTATE_ENABLED             (0x01)
+
+#define MPI_IOCFACTS_CAPABILITY_HIGH_PRI_Q          (0x00000001)
+#define MPI_IOCFACTS_CAPABILITY_REPLY_HOST_SIGNAL   (0x00000002)
+#define MPI_IOCFACTS_CAPABILITY_QUEUE_FULL_HANDLING (0x00000004)
+#define MPI_IOCFACTS_CAPABILITY_DIAG_TRACE_BUFFER   (0x00000008)
+#define MPI_IOCFACTS_CAPABILITY_SNAPSHOT_BUFFER     (0x00000010)
+#define MPI_IOCFACTS_CAPABILITY_EXTENDED_BUFFER     (0x00000020)
+#define MPI_IOCFACTS_CAPABILITY_EEDP                (0x00000040)
+
+
+
+/*****************************************************************************
+*
+*               P o r t    M e s s a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/*  Port Facts message and Reply                                            */
+/****************************************************************************/
+
+typedef struct _MSG_PORT_FACTS
+{
+     U8                     Reserved[2];                /* 00h */
+     U8                     ChainOffset;                /* 02h */
+     U8                     Function;                   /* 03h */
+     U8                     Reserved1[2];               /* 04h */
+     U8                     PortNumber;                 /* 06h */
+     U8                     MsgFlags;                   /* 07h */
+     U32                    MsgContext;                 /* 08h */
+} MSG_PORT_FACTS, MPI_POINTER PTR_MSG_PORT_FACTS,
+  PortFacts_t, MPI_POINTER pPortFacts_t;
+
+typedef struct _MSG_PORT_FACTS_REPLY
+{
+     U16                    Reserved;                   /* 00h */
+     U8                     MsgLength;                  /* 02h */
+     U8                     Function;                   /* 03h */
+     U16                    Reserved1;                  /* 04h */
+     U8                     PortNumber;                 /* 06h */
+     U8                     MsgFlags;                   /* 07h */
+     U32                    MsgContext;                 /* 08h */
+     U16                    Reserved2;                  /* 0Ch */
+     U16                    IOCStatus;                  /* 0Eh */
+     U32                    IOCLogInfo;                 /* 10h */
+     U8                     Reserved3;                  /* 14h */
+     U8                     PortType;                   /* 15h */
+     U16                    MaxDevices;                 /* 16h */
+     U16                    PortSCSIID;                 /* 18h */
+     U16                    ProtocolFlags;              /* 1Ah */
+     U16                    MaxPostedCmdBuffers;        /* 1Ch */
+     U16                    MaxPersistentIDs;           /* 1Eh */
+     U16                    MaxLanBuckets;              /* 20h */
+     U16                    Reserved4;                  /* 22h */
+     U32                    Reserved5;                  /* 24h */
+} MSG_PORT_FACTS_REPLY, MPI_POINTER PTR_MSG_PORT_FACTS_REPLY,
+  PortFactsReply_t, MPI_POINTER pPortFactsReply_t;
+
+
+/* PortTypes values */
+
+#define MPI_PORTFACTS_PORTTYPE_INACTIVE         (0x00)
+#define MPI_PORTFACTS_PORTTYPE_SCSI             (0x01)
+#define MPI_PORTFACTS_PORTTYPE_FC               (0x10)
+#define MPI_PORTFACTS_PORTTYPE_ISCSI            (0x20)
+#define MPI_PORTFACTS_PORTTYPE_SAS              (0x30)
+
+/* ProtocolFlags values */
+
+#define MPI_PORTFACTS_PROTOCOL_LOGBUSADDR       (0x01)
+#define MPI_PORTFACTS_PROTOCOL_LAN              (0x02)
+#define MPI_PORTFACTS_PROTOCOL_TARGET           (0x04)
+#define MPI_PORTFACTS_PROTOCOL_INITIATOR        (0x08)
+
+
+/****************************************************************************/
+/*  Port Enable Message                                                     */
+/****************************************************************************/
+
+typedef struct _MSG_PORT_ENABLE
+{
+    U8                      Reserved[2];                /* 00h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U8                      Reserved1[2];               /* 04h */
+    U8                      PortNumber;                 /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+} MSG_PORT_ENABLE, MPI_POINTER PTR_MSG_PORT_ENABLE,
+  PortEnable_t, MPI_POINTER pPortEnable_t;
+
+typedef struct _MSG_PORT_ENABLE_REPLY
+{
+    U8                      Reserved[2];                /* 00h */
+    U8                      MsgLength;                  /* 02h */
+    U8                      Function;                   /* 03h */
+    U8                      Reserved1[2];               /* 04h */
+    U8                      PortNumber;                 /* 05h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U16                     Reserved2;                  /* 0Ch */
+    U16                     IOCStatus;                  /* 0Eh */
+    U32                     IOCLogInfo;                 /* 10h */
+} MSG_PORT_ENABLE_REPLY, MPI_POINTER PTR_MSG_PORT_ENABLE_REPLY,
+  PortEnableReply_t, MPI_POINTER pPortEnableReply_t;
+
+
+/*****************************************************************************
+*
+*               E v e n t    M e s s a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/*  Event Notification messages                                             */
+/****************************************************************************/
+
+typedef struct _MSG_EVENT_NOTIFY
+{
+    U8                      Switch;                     /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U8                      Reserved1[3];               /* 04h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+} MSG_EVENT_NOTIFY, MPI_POINTER PTR_MSG_EVENT_NOTIFY,
+  EventNotification_t, MPI_POINTER pEventNotification_t;
+
+/* Event Notification Reply */
+
+typedef struct _MSG_EVENT_NOTIFY_REPLY
+{
+     U16                    EventDataLength;            /* 00h */
+     U8                     MsgLength;                  /* 02h */
+     U8                     Function;                   /* 03h */
+     U8                     Reserved1[2];               /* 04h */
+     U8                     AckRequired;                /* 06h */
+     U8                     MsgFlags;                   /* 07h */
+     U32                    MsgContext;                 /* 08h */
+     U8                     Reserved2[2];               /* 0Ch */
+     U16                    IOCStatus;                  /* 0Eh */
+     U32                    IOCLogInfo;                 /* 10h */
+     U32                    Event;                      /* 14h */
+     U32                    EventContext;               /* 18h */
+     U32                    Data[1];                    /* 1Ch */
+} MSG_EVENT_NOTIFY_REPLY, MPI_POINTER PTR_MSG_EVENT_NOTIFY_REPLY,
+  EventNotificationReply_t, MPI_POINTER pEventNotificationReply_t;
+
+/* Event Acknowledge */
+
+typedef struct _MSG_EVENT_ACK
+{
+    U8                      Reserved[2];                /* 00h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U8                      Reserved1[3];               /* 04h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U32                     Event;                      /* 0Ch */
+    U32                     EventContext;               /* 10h */
+} MSG_EVENT_ACK, MPI_POINTER PTR_MSG_EVENT_ACK,
+  EventAck_t, MPI_POINTER pEventAck_t;
+
+typedef struct _MSG_EVENT_ACK_REPLY
+{
+    U8                      Reserved[2];                /* 00h */
+    U8                      MsgLength;                  /* 02h */
+    U8                      Function;                   /* 03h */
+    U8                      Reserved1[3];               /* 04h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U16                     Reserved2;                  /* 0Ch */
+    U16                     IOCStatus;                  /* 0Eh */
+    U32                     IOCLogInfo;                 /* 10h */
+} MSG_EVENT_ACK_REPLY, MPI_POINTER PTR_MSG_EVENT_ACK_REPLY,
+  EventAckReply_t, MPI_POINTER pEventAckReply_t;
+
+/* Switch */
+
+#define MPI_EVENT_NOTIFICATION_SWITCH_OFF   (0x00)
+#define MPI_EVENT_NOTIFICATION_SWITCH_ON    (0x01)
+
+/* Event */
+
+#define MPI_EVENT_NONE                      (0x00000000)
+#define MPI_EVENT_LOG_DATA                  (0x00000001)
+#define MPI_EVENT_STATE_CHANGE              (0x00000002)
+#define MPI_EVENT_UNIT_ATTENTION            (0x00000003)
+#define MPI_EVENT_IOC_BUS_RESET             (0x00000004)
+#define MPI_EVENT_EXT_BUS_RESET             (0x00000005)
+#define MPI_EVENT_RESCAN                    (0x00000006)
+#define MPI_EVENT_LINK_STATUS_CHANGE        (0x00000007)
+#define MPI_EVENT_LOOP_STATE_CHANGE         (0x00000008)
+#define MPI_EVENT_LOGOUT                    (0x00000009)
+#define MPI_EVENT_EVENT_CHANGE              (0x0000000A)
+#define MPI_EVENT_INTEGRATED_RAID           (0x0000000B)
+#define MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE (0x0000000C)
+#define MPI_EVENT_ON_BUS_TIMER_EXPIRED      (0x0000000D)
+#define MPI_EVENT_QUEUE_FULL                (0x0000000E)
+#define MPI_EVENT_SAS_DEVICE_STATUS_CHANGE  (0x0000000F)
+#define MPI_EVENT_SAS_SES                   (0x00000010)
+#define MPI_EVENT_PERSISTENT_TABLE_FULL     (0x00000011)
+
+/* AckRequired field values */
+
+#define MPI_EVENT_NOTIFICATION_ACK_NOT_REQUIRED (0x00)
+#define MPI_EVENT_NOTIFICATION_ACK_REQUIRED     (0x01)
+
+/* EventChange Event data */
+
+typedef struct _EVENT_DATA_EVENT_CHANGE
+{
+    U8                      EventState;                 /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U16                     Reserved1;                  /* 02h */
+} EVENT_DATA_EVENT_CHANGE, MPI_POINTER PTR_EVENT_DATA_EVENT_CHANGE,
+  EventDataEventChange_t, MPI_POINTER pEventDataEventChange_t;
+
+/* SCSI Event data for Port, Bus and Device forms */
+
+typedef struct _EVENT_DATA_SCSI
+{
+    U8                      TargetID;                   /* 00h */
+    U8                      BusPort;                    /* 01h */
+    U16                     Reserved;                   /* 02h */
+} EVENT_DATA_SCSI, MPI_POINTER PTR_EVENT_DATA_SCSI,
+  EventDataScsi_t, MPI_POINTER pEventDataScsi_t;
+
+/* SCSI Device Status Change Event data */
+
+typedef struct _EVENT_DATA_SCSI_DEVICE_STATUS_CHANGE
+{
+    U8                      TargetID;                   /* 00h */
+    U8                      Bus;                        /* 01h */
+    U8                      ReasonCode;                 /* 02h */
+    U8                      LUN;                        /* 03h */
+    U8                      ASC;                        /* 04h */
+    U8                      ASCQ;                       /* 05h */
+    U16                     Reserved;                   /* 06h */
+} EVENT_DATA_SCSI_DEVICE_STATUS_CHANGE,
+  MPI_POINTER PTR_EVENT_DATA_SCSI_DEVICE_STATUS_CHANGE,
+  MpiEventDataScsiDeviceStatusChange_t,
+  MPI_POINTER pMpiEventDataScsiDeviceStatusChange_t;
+
+/* MPI SCSI Device Status Change Event data ReasonCode values */
+#define MPI_EVENT_SCSI_DEV_STAT_RC_ADDED                (0x03)
+#define MPI_EVENT_SCSI_DEV_STAT_RC_NOT_RESPONDING       (0x04)
+#define MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA           (0x05)
+
+/* SAS Device Status Change Event data */
+
+typedef struct _EVENT_DATA_SAS_DEVICE_STATUS_CHANGE
+{
+    U8                      TargetID;                   /* 00h */
+    U8                      Bus;                        /* 01h */
+    U8                      ReasonCode;                 /* 02h */
+    U8                      Reserved;                   /* 03h */
+    U8                      ASC;                        /* 04h */
+    U8                      ASCQ;                       /* 05h */
+    U16                     DevHandle;                  /* 06h */
+    U32                     DeviceInfo;                 /* 08h */
+} EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
+  MPI_POINTER PTR_EVENT_DATA_SAS_DEVICE_STATUS_CHANGE,
+  MpiEventDataSasDeviceStatusChange_t,
+  MPI_POINTER pMpiEventDataSasDeviceStatusChange_t;
+
+/* MPI SAS Device Status Change Event data ReasonCode values */
+#define MPI_EVENT_SAS_DEV_STAT_RC_ADDED                 (0x03)
+#define MPI_EVENT_SAS_DEV_STAT_RC_NOT_RESPONDING        (0x04)
+#define MPI_EVENT_SAS_DEV_STAT_RC_SMART_DATA            (0x05)
+#define MPI_EVENT_SAS_DEV_STAT_RC_NO_PERSIST_ADDED      (0x06)
+
+/* SCSI Event data for Queue Full event */
+
+typedef struct _EVENT_DATA_QUEUE_FULL
+{
+    U8                      TargetID;                   /* 00h */
+    U8                      Bus;                        /* 01h */
+    U16                     CurrentDepth;               /* 02h */
+} EVENT_DATA_QUEUE_FULL, MPI_POINTER PTR_EVENT_DATA_QUEUE_FULL,
+  EventDataQueueFull_t, MPI_POINTER pEventDataQueueFull_t;
+
+/* MPI Link Status Change Event data */
+
+typedef struct _EVENT_DATA_LINK_STATUS
+{
+    U8                      State;                      /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U16                     Reserved1;                  /* 02h */
+    U8                      Reserved2;                  /* 04h */
+    U8                      Port;                       /* 05h */
+    U16                     Reserved3;                  /* 06h */
+} EVENT_DATA_LINK_STATUS, MPI_POINTER PTR_EVENT_DATA_LINK_STATUS,
+  EventDataLinkStatus_t, MPI_POINTER pEventDataLinkStatus_t;
+
+#define MPI_EVENT_LINK_STATUS_FAILURE       (0x00000000)
+#define MPI_EVENT_LINK_STATUS_ACTIVE        (0x00000001)
+
+/* MPI Loop State Change Event data */
+
+typedef struct _EVENT_DATA_LOOP_STATE
+{
+    U8                      Character4;                 /* 00h */
+    U8                      Character3;                 /* 01h */
+    U8                      Type;                       /* 02h */
+    U8                      Reserved;                   /* 03h */
+    U8                      Reserved1;                  /* 04h */
+    U8                      Port;                       /* 05h */
+    U16                     Reserved2;                  /* 06h */
+} EVENT_DATA_LOOP_STATE, MPI_POINTER PTR_EVENT_DATA_LOOP_STATE,
+  EventDataLoopState_t, MPI_POINTER pEventDataLoopState_t;
+
+#define MPI_EVENT_LOOP_STATE_CHANGE_LIP     (0x0001)
+#define MPI_EVENT_LOOP_STATE_CHANGE_LPE     (0x0002)
+#define MPI_EVENT_LOOP_STATE_CHANGE_LPB     (0x0003)
+
+/* MPI LOGOUT Event data */
+
+typedef struct _EVENT_DATA_LOGOUT
+{
+    U32                     NPortID;                    /* 00h */
+    U8                      AliasIndex;                 /* 04h */
+    U8                      Port;                       /* 05h */
+    U16                     Reserved1;                  /* 06h */
+} EVENT_DATA_LOGOUT, MPI_POINTER PTR_EVENT_DATA_LOGOUT,
+  EventDataLogout_t, MPI_POINTER pEventDataLogout_t;
+
+#define MPI_EVENT_LOGOUT_ALL_ALIASES        (0xFF)
+
+
+/* MPI Integrated RAID Event data */
+
+typedef struct _EVENT_DATA_RAID
+{
+    U8                      VolumeID;                   /* 00h */
+    U8                      VolumeBus;                  /* 01h */
+    U8                      ReasonCode;                 /* 02h */
+    U8                      PhysDiskNum;                /* 03h */
+    U8                      ASC;                        /* 04h */
+    U8                      ASCQ;                       /* 05h */
+    U16                     Reserved;                   /* 06h */
+    U32                     SettingsStatus;             /* 08h */
+} EVENT_DATA_RAID, MPI_POINTER PTR_EVENT_DATA_RAID,
+  MpiEventDataRaid_t, MPI_POINTER pMpiEventDataRaid_t;
+
+/* MPI Integrated RAID Event data ReasonCode values */
+#define MPI_EVENT_RAID_RC_VOLUME_CREATED                (0x00)
+#define MPI_EVENT_RAID_RC_VOLUME_DELETED                (0x01)
+#define MPI_EVENT_RAID_RC_VOLUME_SETTINGS_CHANGED       (0x02)
+#define MPI_EVENT_RAID_RC_VOLUME_STATUS_CHANGED         (0x03)
+#define MPI_EVENT_RAID_RC_VOLUME_PHYSDISK_CHANGED       (0x04)
+#define MPI_EVENT_RAID_RC_PHYSDISK_CREATED              (0x05)
+#define MPI_EVENT_RAID_RC_PHYSDISK_DELETED              (0x06)
+#define MPI_EVENT_RAID_RC_PHYSDISK_SETTINGS_CHANGED     (0x07)
+#define MPI_EVENT_RAID_RC_PHYSDISK_STATUS_CHANGED       (0x08)
+#define MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED             (0x09)
+#define MPI_EVENT_RAID_RC_SMART_DATA                    (0x0A)
+#define MPI_EVENT_RAID_RC_REPLACE_ACTION_STARTED        (0x0B)
+
+
+/*****************************************************************************
+*
+*               F i r m w a r e    L o a d    M e s s a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/*  Firmware Download message and associated structures                     */
+/****************************************************************************/
+
+typedef struct _MSG_FW_DOWNLOAD
+{
+    U8                      ImageType;                  /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U8                      Reserved1[3];               /* 04h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    SGE_MPI_UNION           SGL;                        /* 0Ch */
+} MSG_FW_DOWNLOAD, MPI_POINTER PTR_MSG_FW_DOWNLOAD,
+  FWDownload_t, MPI_POINTER pFWDownload_t;
+
+#define MPI_FW_DOWNLOAD_ITYPE_RESERVED      (0x00)
+#define MPI_FW_DOWNLOAD_ITYPE_FW            (0x01)
+#define MPI_FW_DOWNLOAD_ITYPE_BIOS          (0x02)
+#define MPI_FW_DOWNLOAD_ITYPE_NVDATA        (0x03)
+#define MPI_FW_DOWNLOAD_ITYPE_BOOTLOADER    (0x04)
+
+
+typedef struct _FWDownloadTCSGE
+{
+    U8                      Reserved;                   /* 00h */
+    U8                      ContextSize;                /* 01h */
+    U8                      DetailsLength;              /* 02h */
+    U8                      Flags;                      /* 03h */
+    U32                     Reserved_0100_Checksum;     /* 04h */ /* obsolete Checksum */
+    U32                     ImageOffset;                /* 08h */
+    U32                     ImageSize;                  /* 0Ch */
+} FW_DOWNLOAD_TCSGE, MPI_POINTER PTR_FW_DOWNLOAD_TCSGE,
+  FWDownloadTCSGE_t, MPI_POINTER pFWDownloadTCSGE_t;
+
+/* Firmware Download reply */
+typedef struct _MSG_FW_DOWNLOAD_REPLY
+{
+    U8                      ImageType;                  /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U8                      MsgLength;                  /* 02h */
+    U8                      Function;                   /* 03h */
+    U8                      Reserved1[3];               /* 04h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U16                     Reserved2;                  /* 0Ch */
+    U16                     IOCStatus;                  /* 0Eh */
+    U32                     IOCLogInfo;                 /* 10h */
+} MSG_FW_DOWNLOAD_REPLY, MPI_POINTER PTR_MSG_FW_DOWNLOAD_REPLY,
+  FWDownloadReply_t, MPI_POINTER pFWDownloadReply_t;
+
+
+/****************************************************************************/
+/*  Firmware Upload message and associated structures                       */
+/****************************************************************************/
+
+typedef struct _MSG_FW_UPLOAD
+{
+    U8                      ImageType;                  /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U8                      Reserved1[3];               /* 04h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    SGE_MPI_UNION           SGL;                        /* 0Ch */
+} MSG_FW_UPLOAD, MPI_POINTER PTR_MSG_FW_UPLOAD,
+  FWUpload_t, MPI_POINTER pFWUpload_t;
+
+#define MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM      (0x00)
+#define MPI_FW_UPLOAD_ITYPE_FW_FLASH        (0x01)
+#define MPI_FW_UPLOAD_ITYPE_BIOS_FLASH      (0x02)
+#define MPI_FW_UPLOAD_ITYPE_NVDATA          (0x03)
+#define MPI_FW_UPLOAD_ITYPE_BOOTLOADER      (0x04)
+
+typedef struct _FWUploadTCSGE
+{
+    U8                      Reserved;                   /* 00h */
+    U8                      ContextSize;                /* 01h */
+    U8                      DetailsLength;              /* 02h */
+    U8                      Flags;                      /* 03h */
+    U32                     Reserved1;                  /* 04h */
+    U32                     ImageOffset;                /* 08h */
+    U32                     ImageSize;                  /* 0Ch */
+} FW_UPLOAD_TCSGE, MPI_POINTER PTR_FW_UPLOAD_TCSGE,
+  FWUploadTCSGE_t, MPI_POINTER pFWUploadTCSGE_t;
+
+/* Firmware Upload reply */
+typedef struct _MSG_FW_UPLOAD_REPLY
+{
+    U8                      ImageType;                  /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U8                      MsgLength;                  /* 02h */
+    U8                      Function;                   /* 03h */
+    U8                      Reserved1[3];               /* 04h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U16                     Reserved2;                  /* 0Ch */
+    U16                     IOCStatus;                  /* 0Eh */
+    U32                     IOCLogInfo;                 /* 10h */
+    U32                     ActualImageSize;            /* 14h */
+} MSG_FW_UPLOAD_REPLY, MPI_POINTER PTR_MSG_FW_UPLOAD_REPLY,
+  FWUploadReply_t, MPI_POINTER pFWUploadReply_t;
+
+
+typedef struct _MPI_FW_HEADER
+{
+    U32                     ArmBranchInstruction0;      /* 00h */
+    U32                     Signature0;                 /* 04h */
+    U32                     Signature1;                 /* 08h */
+    U32                     Signature2;                 /* 0Ch */
+    U32                     ArmBranchInstruction1;      /* 10h */
+    U32                     ArmBranchInstruction2;      /* 14h */
+    U32                     Reserved;                   /* 18h */
+    U32                     Checksum;                   /* 1Ch */
+    U16                     VendorId;                   /* 20h */
+    U16                     ProductId;                  /* 22h */
+    MPI_FW_VERSION          FWVersion;                  /* 24h */
+    U32                     SeqCodeVersion;             /* 28h */
+    U32                     ImageSize;                  /* 2Ch */
+    U32                     NextImageHeaderOffset;      /* 30h */
+    U32                     LoadStartAddress;           /* 34h */
+    U32                     IopResetVectorValue;        /* 38h */
+    U32                     IopResetRegAddr;            /* 3Ch */
+    U32                     VersionNameWhat;            /* 40h */
+    U8                      VersionName[32];            /* 44h */
+    U32                     VendorNameWhat;             /* 64h */
+    U8                      VendorName[32];             /* 68h */
+} MPI_FW_HEADER, MPI_POINTER PTR_MPI_FW_HEADER,
+  MpiFwHeader_t, MPI_POINTER pMpiFwHeader_t;
+
+#define MPI_FW_HEADER_WHAT_SIGNATURE        (0x29232840)
+
+/* defines for using the ProductId field */
+#define MPI_FW_HEADER_PID_TYPE_MASK             (0xF000)
+#define MPI_FW_HEADER_PID_TYPE_SCSI             (0x0000)
+#define MPI_FW_HEADER_PID_TYPE_FC               (0x1000)
+#define MPI_FW_HEADER_PID_TYPE_SAS              (0x2000)
+
+#define MPI_FW_HEADER_SIGNATURE_0               (0x5AEAA55A)
+#define MPI_FW_HEADER_SIGNATURE_1               (0xA55AEAA5)
+#define MPI_FW_HEADER_SIGNATURE_2               (0x5AA55AEA)
+
+#define MPI_FW_HEADER_PID_PROD_MASK                     (0x0F00)
+#define MPI_FW_HEADER_PID_PROD_INITIATOR_SCSI           (0x0100)
+#define MPI_FW_HEADER_PID_PROD_TARGET_INITIATOR_SCSI    (0x0200)
+#define MPI_FW_HEADER_PID_PROD_TARGET_SCSI              (0x0300)
+#define MPI_FW_HEADER_PID_PROD_IM_SCSI                  (0x0400)
+#define MPI_FW_HEADER_PID_PROD_IS_SCSI                  (0x0500)
+#define MPI_FW_HEADER_PID_PROD_CTX_SCSI                 (0x0600)
+
+#define MPI_FW_HEADER_PID_FAMILY_MASK           (0x00FF)
+/* SCSI */
+#define MPI_FW_HEADER_PID_FAMILY_1030A0_SCSI    (0x0001)
+#define MPI_FW_HEADER_PID_FAMILY_1030B0_SCSI    (0x0002)
+#define MPI_FW_HEADER_PID_FAMILY_1030B1_SCSI    (0x0003)
+#define MPI_FW_HEADER_PID_FAMILY_1030C0_SCSI    (0x0004)
+#define MPI_FW_HEADER_PID_FAMILY_1020A0_SCSI    (0x0005)
+#define MPI_FW_HEADER_PID_FAMILY_1020B0_SCSI    (0x0006)
+#define MPI_FW_HEADER_PID_FAMILY_1020B1_SCSI    (0x0007)
+#define MPI_FW_HEADER_PID_FAMILY_1020C0_SCSI    (0x0008)
+#define MPI_FW_HEADER_PID_FAMILY_1035A0_SCSI    (0x0009)
+#define MPI_FW_HEADER_PID_FAMILY_1035B0_SCSI    (0x000A)
+#define MPI_FW_HEADER_PID_FAMILY_1030TA0_SCSI   (0x000B)
+#define MPI_FW_HEADER_PID_FAMILY_1020TA0_SCSI   (0x000C)
+/* Fibre Channel */
+#define MPI_FW_HEADER_PID_FAMILY_909_FC         (0x0000)
+#define MPI_FW_HEADER_PID_FAMILY_919_FC         (0x0001)
+#define MPI_FW_HEADER_PID_FAMILY_919X_FC        (0x0002)
+#define MPI_FW_HEADER_PID_FAMILY_919XL_FC       (0x0003)
+#define MPI_FW_HEADER_PID_FAMILY_949_FC         (0x0004)
+#define MPI_FW_HEADER_PID_FAMILY_959_FC         (0x0005)
+/* SAS */
+#define MPI_FW_HEADER_PID_FAMILY_1064_SAS       (0x0001)
+
+typedef struct _MPI_EXT_IMAGE_HEADER
+{
+    U8                      ImageType;                  /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U16                     Reserved1;                  /* 02h */
+    U32                     Checksum;                   /* 04h */
+    U32                     ImageSize;                  /* 08h */
+    U32                     NextImageHeaderOffset;      /* 0Ch */
+    U32                     LoadStartAddress;           /* 10h */
+    U32                     Reserved2;                  /* 14h */
+} MPI_EXT_IMAGE_HEADER, MPI_POINTER PTR_MPI_EXT_IMAGE_HEADER,
+  MpiExtImageHeader_t, MPI_POINTER pMpiExtImageHeader_t;
+
+/* defines for the ImageType field */
+#define MPI_EXT_IMAGE_TYPE_UNSPECIFIED          (0x00)
+#define MPI_EXT_IMAGE_TYPE_FW                   (0x01)
+#define MPI_EXT_IMAGE_TYPE_NVDATA               (0x03)
+#define MPI_EXT_IMAGE_TYPE_BOOTLOADER           (0x04)
+
+#endif
diff --git a/drivers/message/fusion/lsi/mpi_lan.h b/drivers/message/fusion/lsi/mpi_lan.h
new file mode 100644
index 0000000..3ced127
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_lan.h
@@ -0,0 +1,212 @@
+/*
+ *  Copyright (c) 2000-2003 LSI Logic Corporation.
+ *
+ *
+ *           Name:  mpi_lan.h
+ *          Title:  MPI LAN messages and structures
+ *  Creation Date:  June 30, 2000
+ *
+ *    mpi_lan.h Version:  01.05.xx
+ *
+ *  Version History
+ *  ---------------
+ *
+ *  Date      Version   Description
+ *  --------  --------  ------------------------------------------------------
+ *  05-08-00  00.10.01  Original release for 0.10 spec dated 4/26/2000.
+ *  05-24-00  00.10.02  Added LANStatus field to _MSG_LAN_SEND_REPLY.
+ *                      Added LANStatus field to _MSG_LAN_RECEIVE_POST_REPLY.
+ *                      Moved ListCount field in _MSG_LAN_RECEIVE_POST_REPLY.
+ *  06-06-00  01.00.01  Update version number for 1.0 release.
+ *  06-12-00  01.00.02  Added MPI_ to BUCKETSTATUS_ definitions.
+ *  06-22-00  01.00.03  Major changes to match new LAN definition in 1.0 spec.
+ *  06-30-00  01.00.04  Added Context Reply definitions per revised proposal.
+ *                      Changed transaction context usage to bucket/buffer.
+ *  07-05-00  01.00.05  Removed LAN_RECEIVE_POST_BUCKET_CONTEXT_MASK definition
+ *                      to lan private header file
+ *  11-02-00  01.01.01  Original release for post 1.0 work
+ *  02-20-01  01.01.02  Started using MPI_POINTER.
+ *  03-27-01  01.01.03  Added structure offset comments.
+ *  08-08-01  01.02.01  Original release for v1.2 work.
+ *  --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_LAN_H
+#define MPI_LAN_H
+
+
+/******************************************************************************
+*
+*               L A N    M e s s a g e s
+*
+*******************************************************************************/
+
+/* LANSend messages */
+
+typedef struct _MSG_LAN_SEND_REQUEST
+{
+    U16                     Reserved;           /* 00h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved2;          /* 04h */
+    U8                      PortNumber;         /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    SGE_MPI_UNION           SG_List[1];         /* 0Ch */
+} MSG_LAN_SEND_REQUEST, MPI_POINTER PTR_MSG_LAN_SEND_REQUEST,
+  LANSendRequest_t, MPI_POINTER pLANSendRequest_t;
+
+
+typedef struct _MSG_LAN_SEND_REPLY
+{
+    U16                     Reserved;           /* 00h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U8                      Reserved2;          /* 04h */
+    U8                      NumberOfContexts;   /* 05h */
+    U8                      PortNumber;         /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     Reserved3;          /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+    U32                     BufferContext;      /* 14h */
+} MSG_LAN_SEND_REPLY, MPI_POINTER PTR_MSG_LAN_SEND_REPLY,
+  LANSendReply_t, MPI_POINTER pLANSendReply_t;
+
+
+/* LANReceivePost */
+
+typedef struct _MSG_LAN_RECEIVE_POST_REQUEST
+{
+    U16                     Reserved;           /* 00h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved2;          /* 04h */
+    U8                      PortNumber;         /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U32                     BucketCount;        /* 0Ch */
+    SGE_MPI_UNION           SG_List[1];         /* 10h */
+} MSG_LAN_RECEIVE_POST_REQUEST, MPI_POINTER PTR_MSG_LAN_RECEIVE_POST_REQUEST,
+  LANReceivePostRequest_t, MPI_POINTER pLANReceivePostRequest_t;
+
+
+typedef struct _MSG_LAN_RECEIVE_POST_REPLY
+{
+    U16                     Reserved;           /* 00h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U8                      Reserved2;          /* 04h */
+    U8                      NumberOfContexts;   /* 05h */
+    U8                      PortNumber;         /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     Reserved3;          /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+    U32                     BucketsRemaining;   /* 14h */
+    U32                     PacketOffset;       /* 18h */
+    U32                     PacketLength;       /* 1Ch */
+    U32                     BucketContext[1];   /* 20h */
+} MSG_LAN_RECEIVE_POST_REPLY, MPI_POINTER PTR_MSG_LAN_RECEIVE_POST_REPLY,
+  LANReceivePostReply_t, MPI_POINTER pLANReceivePostReply_t;
+
+
+/* LANReset */
+
+typedef struct _MSG_LAN_RESET_REQUEST
+{
+    U16                     Reserved;           /* 00h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved2;          /* 04h */
+    U8                      PortNumber;         /* 05h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+} MSG_LAN_RESET_REQUEST, MPI_POINTER PTR_MSG_LAN_RESET_REQUEST,
+  LANResetRequest_t, MPI_POINTER pLANResetRequest_t;
+
+
+typedef struct _MSG_LAN_RESET_REPLY
+{
+    U16                     Reserved;           /* 00h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved2;          /* 04h */
+    U8                      PortNumber;         /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     Reserved3;          /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+} MSG_LAN_RESET_REPLY, MPI_POINTER PTR_MSG_LAN_RESET_REPLY,
+  LANResetReply_t, MPI_POINTER pLANResetReply_t;
+
+
+/****************************************************************************/
+/* LAN Context Reply defines and macros                                     */
+/****************************************************************************/
+
+#define LAN_REPLY_PACKET_LENGTH_MASK            (0x0000FFFF)
+#define LAN_REPLY_PACKET_LENGTH_SHIFT           (0)
+#define LAN_REPLY_BUCKET_CONTEXT_MASK           (0x07FF0000)
+#define LAN_REPLY_BUCKET_CONTEXT_SHIFT          (16)
+#define LAN_REPLY_BUFFER_CONTEXT_MASK           (0x07FFFFFF)
+#define LAN_REPLY_BUFFER_CONTEXT_SHIFT          (0)
+#define LAN_REPLY_FORM_MASK                     (0x18000000)
+#define LAN_REPLY_FORM_RECEIVE_SINGLE           (0x00)
+#define LAN_REPLY_FORM_RECEIVE_MULTIPLE         (0x01)
+#define LAN_REPLY_FORM_SEND_SINGLE              (0x02)
+#define LAN_REPLY_FORM_MESSAGE_CONTEXT          (0x03)
+#define LAN_REPLY_FORM_SHIFT                    (27)
+
+#define GET_LAN_PACKET_LENGTH(x)    (((x) & LAN_REPLY_PACKET_LENGTH_MASK)   \
+                                        >> LAN_REPLY_PACKET_LENGTH_SHIFT)
+
+#define SET_LAN_PACKET_LENGTH(x, lth)                                       \
+            ((x) = ((x) & ~LAN_REPLY_PACKET_LENGTH_MASK) |                  \
+                            (((lth) << LAN_REPLY_PACKET_LENGTH_SHIFT) &     \
+                                        LAN_REPLY_PACKET_LENGTH_MASK))
+
+#define GET_LAN_BUCKET_CONTEXT(x)   (((x) & LAN_REPLY_BUCKET_CONTEXT_MASK)  \
+                                        >> LAN_REPLY_BUCKET_CONTEXT_SHIFT)
+
+#define SET_LAN_BUCKET_CONTEXT(x, ctx)                                      \
+            ((x) = ((x) & ~LAN_REPLY_BUCKET_CONTEXT_MASK) |                 \
+                            (((ctx) << LAN_REPLY_BUCKET_CONTEXT_SHIFT) &    \
+                                        LAN_REPLY_BUCKET_CONTEXT_MASK))
+
+#define GET_LAN_BUFFER_CONTEXT(x)   (((x) & LAN_REPLY_BUFFER_CONTEXT_MASK)  \
+                                        >> LAN_REPLY_BUFFER_CONTEXT_SHIFT)
+
+#define SET_LAN_BUFFER_CONTEXT(x, ctx)                                      \
+            ((x) = ((x) & ~LAN_REPLY_BUFFER_CONTEXT_MASK) |                 \
+                            (((ctx) << LAN_REPLY_BUFFER_CONTEXT_SHIFT) &    \
+                                        LAN_REPLY_BUFFER_CONTEXT_MASK))
+
+#define GET_LAN_FORM(x)             (((x) & LAN_REPLY_FORM_MASK)            \
+                                        >> LAN_REPLY_FORM_SHIFT)
+
+#define SET_LAN_FORM(x, frm)                                                \
+            ((x) = ((x) & ~LAN_REPLY_FORM_MASK) |                           \
+                            (((frm) << LAN_REPLY_FORM_SHIFT) &              \
+                                        LAN_REPLY_FORM_MASK))
+
+
+/****************************************************************************/
+/* LAN Current Device State defines                                         */
+/****************************************************************************/
+
+#define MPI_LAN_DEVICE_STATE_RESET                     (0x00)
+#define MPI_LAN_DEVICE_STATE_OPERATIONAL               (0x01)
+
+
+/****************************************************************************/
+/* LAN Loopback defines                                                     */
+/****************************************************************************/
+
+#define MPI_LAN_TX_MODES_ENABLE_LOOPBACK_SUPPRESSION   (0x01)
+
+#endif
+
diff --git a/drivers/message/fusion/lsi/mpi_raid.h b/drivers/message/fusion/lsi/mpi_raid.h
new file mode 100644
index 0000000..9580a9d
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_raid.h
@@ -0,0 +1,232 @@
+/*
+ *  Copyright (c) 2001-2003 LSI Logic Corporation.
+ *
+ *
+ *           Name:  mpi_raid.h
+ *          Title:  MPI RAID message and structures
+ *  Creation Date:  February 27, 2001
+ *
+ *    mpi_raid.h Version:  01.05.xx
+ *
+ *  Version History
+ *  ---------------
+ *
+ *  Date      Version   Description
+ *  --------  --------  ------------------------------------------------------
+ *  02-27-01  01.01.01  Original release for this file.
+ *  03-27-01  01.01.02  Added structure offset comments.
+ *  08-08-01  01.02.01  Original release for v1.2 work.
+ *  09-28-01  01.02.02  Major rework for MPI v1.2 Integrated RAID changes.
+ *  10-04-01  01.02.03  Added ActionData defines for
+ *                      MPI_RAID_ACTION_DELETE_VOLUME action.
+ *  11-01-01  01.02.04  Added define for MPI_RAID_ACTION_ADATA_DO_NOT_SYNC.
+ *  03-14-02  01.02.05  Added define for MPI_RAID_ACTION_ADATA_LOW_LEVEL_INIT.
+ *  05-07-02  01.02.06  Added define for MPI_RAID_ACTION_ACTIVATE_VOLUME,
+ *                      MPI_RAID_ACTION_INACTIVATE_VOLUME, and
+ *                      MPI_RAID_ACTION_ADATA_INACTIVATE_ALL.
+ *  07-12-02  01.02.07  Added structures for Mailbox request and reply.
+ *  11-15-02  01.02.08  Added missing MsgContext field to MSG_MAILBOX_REQUEST.
+ *  04-01-03  01.02.09  New action data option flag for
+ *                      MPI_RAID_ACTION_DELETE_VOLUME.
+ *  --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_RAID_H
+#define MPI_RAID_H
+
+
+/******************************************************************************
+*
+*        R A I D    M e s s a g e s
+*
+*******************************************************************************/
+
+
+/****************************************************************************/
+/* RAID Action Request                                                      */
+/****************************************************************************/
+
+typedef struct _MSG_RAID_ACTION
+{
+    U8                      Action;             /* 00h */
+    U8                      Reserved1;          /* 01h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U8                      VolumeID;           /* 04h */
+    U8                      VolumeBus;          /* 05h */
+    U8                      PhysDiskNum;        /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U32                     Reserved2;          /* 0Ch */
+    U32                     ActionDataWord;     /* 10h */
+    SGE_SIMPLE_UNION        ActionDataSGE;      /* 14h */
+} MSG_RAID_ACTION_REQUEST, MPI_POINTER PTR_MSG_RAID_ACTION_REQUEST,
+  MpiRaidActionRequest_t , MPI_POINTER pMpiRaidActionRequest_t;
+
+
+/* RAID Action request Action values */
+
+#define MPI_RAID_ACTION_STATUS                      (0x00)
+#define MPI_RAID_ACTION_INDICATOR_STRUCT            (0x01)
+#define MPI_RAID_ACTION_CREATE_VOLUME               (0x02)
+#define MPI_RAID_ACTION_DELETE_VOLUME               (0x03)
+#define MPI_RAID_ACTION_DISABLE_VOLUME              (0x04)
+#define MPI_RAID_ACTION_ENABLE_VOLUME               (0x05)
+#define MPI_RAID_ACTION_QUIESCE_PHYS_IO             (0x06)
+#define MPI_RAID_ACTION_ENABLE_PHYS_IO              (0x07)
+#define MPI_RAID_ACTION_CHANGE_VOLUME_SETTINGS      (0x08)
+#define MPI_RAID_ACTION_PHYSDISK_OFFLINE            (0x0A)
+#define MPI_RAID_ACTION_PHYSDISK_ONLINE             (0x0B)
+#define MPI_RAID_ACTION_CHANGE_PHYSDISK_SETTINGS    (0x0C)
+#define MPI_RAID_ACTION_CREATE_PHYSDISK             (0x0D)
+#define MPI_RAID_ACTION_DELETE_PHYSDISK             (0x0E)
+#define MPI_RAID_ACTION_FAIL_PHYSDISK               (0x0F)
+#define MPI_RAID_ACTION_REPLACE_PHYSDISK            (0x10)
+#define MPI_RAID_ACTION_ACTIVATE_VOLUME             (0x11)
+#define MPI_RAID_ACTION_INACTIVATE_VOLUME           (0x12)
+
+/* ActionDataWord defines for use with MPI_RAID_ACTION_CREATE_VOLUME action */
+#define MPI_RAID_ACTION_ADATA_DO_NOT_SYNC           (0x00000001)
+#define MPI_RAID_ACTION_ADATA_LOW_LEVEL_INIT        (0x00000002)
+
+/* ActionDataWord defines for use with MPI_RAID_ACTION_DELETE_VOLUME action */
+#define MPI_RAID_ACTION_ADATA_KEEP_PHYS_DISKS       (0x00000000)
+#define MPI_RAID_ACTION_ADATA_DEL_PHYS_DISKS        (0x00000001)
+
+#define MPI_RAID_ACTION_ADATA_KEEP_LBA0             (0x00000000)
+#define MPI_RAID_ACTION_ADATA_ZERO_LBA0             (0x00000002)
+
+/* ActionDataWord defines for use with MPI_RAID_ACTION_ACTIVATE_VOLUME action */
+#define MPI_RAID_ACTION_ADATA_INACTIVATE_ALL        (0x00000001)
+
+
+/* RAID Action reply message */
+
+typedef struct _MSG_RAID_ACTION_REPLY
+{
+    U8                      Action;             /* 00h */
+    U8                      Reserved;           /* 01h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U8                      VolumeID;           /* 04h */
+    U8                      VolumeBus;          /* 05h */
+    U8                      PhysDiskNum;        /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     ActionStatus;       /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+    U32                     VolumeStatus;       /* 14h */
+    U32                     ActionData;         /* 18h */
+} MSG_RAID_ACTION_REPLY, MPI_POINTER PTR_MSG_RAID_ACTION_REPLY,
+  MpiRaidActionReply_t, MPI_POINTER pMpiRaidActionReply_t;
+
+
+/* RAID Volume reply ActionStatus values */
+
+#define MPI_RAID_ACTION_ASTATUS_SUCCESS             (0x0000)
+#define MPI_RAID_ACTION_ASTATUS_INVALID_ACTION      (0x0001)
+#define MPI_RAID_ACTION_ASTATUS_FAILURE             (0x0002)
+#define MPI_RAID_ACTION_ASTATUS_IN_PROGRESS         (0x0003)
+
+
+/* RAID Volume reply RAID Volume Indicator structure */
+
+typedef struct _MPI_RAID_VOL_INDICATOR
+{
+    U64                     TotalBlocks;        /* 00h */
+    U64                     BlocksRemaining;    /* 08h */
+} MPI_RAID_VOL_INDICATOR, MPI_POINTER PTR_MPI_RAID_VOL_INDICATOR,
+  MpiRaidVolIndicator_t, MPI_POINTER pMpiRaidVolIndicator_t;
+
+
+/****************************************************************************/
+/* SCSI IO RAID Passthrough Request                                         */
+/****************************************************************************/
+
+typedef struct _MSG_SCSI_IO_RAID_PT_REQUEST
+{
+    U8                      PhysDiskNum;        /* 00h */
+    U8                      Reserved1;          /* 01h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U8                      CDBLength;          /* 04h */
+    U8                      SenseBufferLength;  /* 05h */
+    U8                      Reserved2;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U8                      LUN[8];             /* 0Ch */
+    U32                     Control;            /* 14h */
+    U8                      CDB[16];            /* 18h */
+    U32                     DataLength;         /* 28h */
+    U32                     SenseBufferLowAddr; /* 2Ch */
+    SGE_IO_UNION            SGL;                /* 30h */
+} MSG_SCSI_IO_RAID_PT_REQUEST, MPI_POINTER PTR_MSG_SCSI_IO_RAID_PT_REQUEST,
+  SCSIIORaidPassthroughRequest_t, MPI_POINTER pSCSIIORaidPassthroughRequest_t;
+
+
+/* SCSI IO RAID Passthrough reply structure */
+
+typedef struct _MSG_SCSI_IO_RAID_PT_REPLY
+{
+    U8                      PhysDiskNum;        /* 00h */
+    U8                      Reserved1;          /* 01h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U8                      CDBLength;          /* 04h */
+    U8                      SenseBufferLength;  /* 05h */
+    U8                      Reserved2;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U8                      SCSIStatus;         /* 0Ch */
+    U8                      SCSIState;          /* 0Dh */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+    U32                     TransferCount;      /* 14h */
+    U32                     SenseCount;         /* 18h */
+    U32                     ResponseInfo;       /* 1Ch */
+} MSG_SCSI_IO_RAID_PT_REPLY, MPI_POINTER PTR_MSG_SCSI_IO_RAID_PT_REPLY,
+  SCSIIORaidPassthroughReply_t, MPI_POINTER pSCSIIORaidPassthroughReply_t;
+
+
+/****************************************************************************/
+/* Mailbox reqeust structure */
+/****************************************************************************/
+
+typedef struct _MSG_MAILBOX_REQUEST
+{
+    U16                     Reserved1;
+    U8                      ChainOffset;
+    U8                      Function;
+    U16                     Reserved2;
+    U8                      Reserved3;
+    U8                      MsgFlags;
+    U32                     MsgContext;
+    U8                      Command[10];
+    U16                     Reserved4;
+    SGE_IO_UNION            SGL;
+} MSG_MAILBOX_REQUEST, MPI_POINTER PTR_MSG_MAILBOX_REQUEST,
+  MailboxRequest_t, MPI_POINTER pMailboxRequest_t;
+
+
+/* Mailbox reply structure */
+typedef struct _MSG_MAILBOX_REPLY
+{
+    U16                     Reserved1;          /* 00h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved2;          /* 04h */
+    U8                      Reserved3;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     MailboxStatus;      /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+    U32                     Reserved4;          /* 14h */
+} MSG_MAILBOX_REPLY, MPI_POINTER PTR_MSG_MAILBOX_REPLY,
+  MailboxReply_t, MPI_POINTER pMailboxReply_t;
+
+#endif
+
+
+
diff --git a/drivers/message/fusion/lsi/mpi_sas.h b/drivers/message/fusion/lsi/mpi_sas.h
new file mode 100644
index 0000000..cb878f9
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_sas.h
@@ -0,0 +1,181 @@
+/*
+ *  Copyright (c) 2003 LSI Logic Corporation.
+ *
+ *
+ *           Name:  mpi_sas.h
+ *          Title:  MPI Serial Attached SCSI structures and definitions
+ *  Creation Date:  April 23, 2003
+ *
+ *    mpi_sas.h Version:  01.05.xx
+ *
+ *  Version History
+ *  ---------------
+ *
+ *  Date      Version   Description
+ *  --------  --------  ------------------------------------------------------
+ *  xx-yy-zz  01.05.01  Original release.
+ *  --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_SAS_H
+#define MPI_SAS_H
+
+/*****************************************************************************
+*
+*        S e r i a l    A t t a c h e d    S C S I     M e s s a g e s
+*
+*****************************************************************************/
+
+/****************************************************************************/
+/* Serial Management Protocol Passthrough Request                           */
+/****************************************************************************/
+
+typedef struct _MSG_SMP_PASSTHROUGH_REQUEST
+{
+    U8                      PassthroughFlags;   /* 00h */
+    U8                      PhysicalPort;       /* 01h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     RequestDataLength;  /* 04h */
+    U8                      ConnectionRate;     /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U32                     Reserved1;          /* 0Ch */
+    U64                     SASAddress;         /* 10h */
+    U32                     Reserved2;          /* 18h */
+    U32                     Reserved3;          /* 1Ch */
+    SGE_SIMPLE_UNION        SGL;                /* 20h */
+} MSG_SMP_PASSTHROUGH_REQUEST, MPI_POINTER PTR_MSG_SMP_PASSTHROUGH_REQUEST,
+  SmpPassthroughRequest_t, MPI_POINTER pSmpPassthroughRequest_t;
+
+#define MPI_SMP_PT_REQ_PT_FLAGS_IMMEDIATE       (0x80)
+
+#define MPI_SMP_PT_REQ_CONNECT_RATE_NEGOTIATED  (0x00)
+#define MPI_SMP_PT_REQ_CONNECT_RATE_1_5         (0x08)
+#define MPI_SMP_PT_REQ_CONNECT_RATE_3_0         (0x09)
+
+
+/* Serial Management Protocol Passthrough Reply */
+typedef struct _MSG_SMP_PASSTHROUGH_REPLY
+{
+    U8                      PassthroughFlags;   /* 00h */
+    U8                      PhysicalPort;       /* 01h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     ResponseDataLength; /* 04h */
+    U8                      Reserved1;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U8                      Reserved2;          /* 0Ch */
+    U8                      SASStatus;          /* 0Dh */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+    U32                     Reserved3;          /* 14h */
+    U8                      ResponseData[4];    /* 18h */
+} MSG_SMP_PASSTHROUGH_REPLY, MPI_POINTER PTR_MSG_SMP_PASSTHROUGH_REPLY,
+  SmpPassthroughReply_t, MPI_POINTER pSmpPassthroughReply_t;
+
+#define MPI_SMP_PT_REPLY_PT_FLAGS_IMMEDIATE     (0x80)
+
+/* values for the SASStatus field */
+#define MPI_SASSTATUS_SUCCESS                           (0x00)
+#define MPI_SASSTATUS_UNKNOWN_ERROR                     (0x01)
+#define MPI_SASSTATUS_INVALID_FRAME                     (0x02)
+#define MPI_SASSTATUS_UTC_BAD_DEST                      (0x03)
+#define MPI_SASSTATUS_UTC_BREAK_RECEIVED                (0x04)
+#define MPI_SASSTATUS_UTC_CONNECT_RATE_NOT_SUPPORTED    (0x05)
+#define MPI_SASSTATUS_UTC_PORT_LAYER_REQUEST            (0x06)
+#define MPI_SASSTATUS_UTC_PROTOCOL_NOT_SUPPORTED        (0x07)
+#define MPI_SASSTATUS_UTC_STP_RESOURCES_BUSY            (0x08)
+#define MPI_SASSTATUS_UTC_WRONG_DESTINATION             (0x09)
+#define MPI_SASSTATUS_SHORT_INFORMATION_UNIT            (0x0A)
+#define MPI_SASSTATUS_LONG_INFORMATION_UNIT             (0x0B)
+#define MPI_SASSTATUS_XFER_RDY_INCORRECT_WRITE_DATA     (0x0C)
+#define MPI_SASSTATUS_XFER_RDY_REQUEST_OFFSET_ERROR     (0x0D)
+#define MPI_SASSTATUS_XFER_RDY_NOT_EXPECTED             (0x0E)
+#define MPI_SASSTATUS_DATA_INCORRECT_DATA_LENGTH        (0x0F)
+#define MPI_SASSTATUS_DATA_TOO_MUCH_READ_DATA           (0x10)
+#define MPI_SASSTATUS_DATA_OFFSET_ERROR                 (0x11)
+#define MPI_SASSTATUS_SDSF_NAK_RECEIVED                 (0x12)
+#define MPI_SASSTATUS_SDSF_CONNECTION_FAILED            (0x13)
+#define MPI_SASSTATUS_INITIATOR_RESPONSE_TIMEOUT        (0x14)
+
+
+/*
+ * Values for the SAS DeviceInfo field used in SAS Device Status Change Event
+ * data and SAS IO Unit Configuration pages.
+ */
+#define MPI_SAS_DEVICE_INFO_ATAPI_DEVICE        (0x00002000)
+#define MPI_SAS_DEVICE_INFO_LSI_DEVICE          (0x00001000)
+#define MPI_SAS_DEVICE_INFO_DIRECT_ATTACH       (0x00000800)
+#define MPI_SAS_DEVICE_INFO_SSP_TARGET          (0x00000400)
+#define MPI_SAS_DEVICE_INFO_STP_TARGET          (0x00000200)
+#define MPI_SAS_DEVICE_INFO_SMP_TARGET          (0x00000100)
+#define MPI_SAS_DEVICE_INFO_SATA_DEVICE         (0x00000080)
+#define MPI_SAS_DEVICE_INFO_SSP_INITIATOR       (0x00000040)
+#define MPI_SAS_DEVICE_INFO_STP_INITIATOR       (0x00000020)
+#define MPI_SAS_DEVICE_INFO_SMP_INITIATOR       (0x00000010)
+#define MPI_SAS_DEVICE_INFO_SATA_HOST           (0x00000008)
+
+#define MPI_SAS_DEVICE_INFO_MASK_DEVICE_TYPE    (0x00000007)
+#define MPI_SAS_DEVICE_INFO_NO_DEVICE           (0x00000000)
+#define MPI_SAS_DEVICE_INFO_END_DEVICE          (0x00000001)
+#define MPI_SAS_DEVICE_INFO_EDGE_EXPANDER       (0x00000002)
+#define MPI_SAS_DEVICE_INFO_FANOUT_EXPANDER     (0x00000003)
+
+
+/****************************************************************************/
+/* SAS IO Unit Control Request                                              */
+/****************************************************************************/
+
+typedef struct _MSG_SAS_IOUNIT_CONTROL_REQUEST
+{
+    U8                      Operation;          /* 00h */
+    U8                      Reserved1;          /* 01h */
+    U8                      ChainOffset;        /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved2;          /* 04h */
+    U8                      Reserved3;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U8                      TargetID;           /* 0Ch */
+    U8                      Bus;                /* 0Dh */
+    U8                      PhyNum;             /* 0Eh */
+    U8                      Reserved4;          /* 0Fh */
+    U32                     Reserved5;          /* 10h */
+    U64                     SASAddress;         /* 14h */
+    U32                     Reserved6;          /* 1Ch */
+} MSG_SAS_IOUNIT_CONTROL_REQUEST, MPI_POINTER PTR_MSG_SAS_IOUNIT_CONTROL_REQUEST,
+  SasIoUnitControlRequest_t, MPI_POINTER pSasIoUnitControlRequest_t;
+
+/* values for the ... field */
+#define MPI_SAS_OP_CLEAR_NOT_PRESENT             (0x01)
+#define MPI_SAS_OP_CLEAR_ALL                     (0x02)
+#define MPI_SAS_OP_MAP                           (0x03)
+#define MPI_SAS_OP_MOVE                          (0x04)
+#define MPI_SAS_OP_CLEAR                         (0x05)
+#define MPI_SAS_OP_PHY_LINK_RESET                (0x06)
+#define MPI_SAS_OP_PHY_HARD_RESET                (0x07)
+#define MPI_SAS_OP_PHY_CLEAR_ERROR_LOG           (0x08)
+
+
+/* SAS IO Unit Control Reply */
+typedef struct _MSG_SAS_IOUNIT_CONTROL_REPLY
+{
+    U8                      Operation;          /* 00h */
+    U8                      Reserved1;          /* 01h */
+    U8                      MsgLength;          /* 02h */
+    U8                      Function;           /* 03h */
+    U16                     Reserved2;          /* 04h */
+    U8                      Reserved3;          /* 06h */
+    U8                      MsgFlags;           /* 07h */
+    U32                     MsgContext;         /* 08h */
+    U16                     Reserved4;          /* 0Ch */
+    U16                     IOCStatus;          /* 0Eh */
+    U32                     IOCLogInfo;         /* 10h */
+} MSG_SAS_IOUNIT_CONTROL_REPLY, MPI_POINTER PTR_MSG_SAS_IOUNIT_CONTROL_REPLY,
+  SasIoUnitControlReply_t, MPI_POINTER pSasIoUnitControlReply_t;
+
+#endif
+
+
diff --git a/drivers/message/fusion/lsi/mpi_targ.h b/drivers/message/fusion/lsi/mpi_targ.h
new file mode 100644
index 0000000..804dc85
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_targ.h
@@ -0,0 +1,435 @@
+/*
+ *  Copyright (c) 2000-2003 LSI Logic Corporation.
+ *
+ *
+ *           Name:  mpi_targ.h
+ *          Title:  MPI Target mode messages and structures
+ *  Creation Date:  June 22, 2000
+ *
+ *    mpi_targ.h Version:  01.05.xx
+ *
+ *  Version History
+ *  ---------------
+ *
+ *  Date      Version   Description
+ *  --------  --------  ------------------------------------------------------
+ *  05-08-00  00.10.01  Original release for 0.10 spec dated 4/26/2000.
+ *  06-06-00  01.00.01  Update version number for 1.0 release.
+ *  06-22-00  01.00.02  Added _MSG_TARGET_CMD_BUFFER_POST_REPLY structure.
+ *                      Corrected DECSRIPTOR typo to DESCRIPTOR.
+ *  11-02-00  01.01.01  Original release for post 1.0 work
+ *                      Modified target mode to use IoIndex instead of
+ *                      HostIndex and IocIndex. Added Alias.
+ *  01-09-01  01.01.02  Added defines for TARGET_ASSIST_FLAGS_REPOST_CMD_BUFFER
+ *                      and TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER.
+ *  02-20-01  01.01.03  Started using MPI_POINTER.
+ *                      Added structures for MPI_TARGET_SCSI_SPI_CMD_BUFFER and
+ *                      MPI_TARGET_FCP_CMD_BUFFER.
+ *  03-27-01  01.01.04  Added structure offset comments.
+ *  08-08-01  01.02.01  Original release for v1.2 work.
+ *  09-28-01  01.02.02  Added structure for MPI_TARGET_SCSI_SPI_STATUS_IU.
+ *                      Added PriorityReason field to some replies and
+ *                      defined more PriorityReason codes.
+ *                      Added some defines for to support previous version
+ *                      of MPI.
+ *  10-04-01  01.02.03  Added PriorityReason to MSG_TARGET_ERROR_REPLY.
+ *  11-01-01  01.02.04  Added define for TARGET_STATUS_SEND_FLAGS_HIGH_PRIORITY.
+ *  03-14-02  01.02.05  Modified MPI_TARGET_FCP_RSP_BUFFER to get the proper
+ *                      byte ordering.
+ *  05-31-02  01.02.06  Modified TARGET_MODE_REPLY_ALIAS_MASK to only include
+ *                      one bit.
+ *                      Added AliasIndex field to MPI_TARGET_FCP_CMD_BUFFER.
+ *  09-16-02  01.02.07  Added flags for confirmed completion.
+ *                      Added PRIORITY_REASON_TARGET_BUSY.
+ *  11-15-02  01.02.08  Added AliasID field to MPI_TARGET_SCSI_SPI_CMD_BUFFER.
+ *  04-01-03  01.02.09  Added OptionalOxid field to MPI_TARGET_FCP_CMD_BUFFER.
+ *  --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_TARG_H
+#define MPI_TARG_H
+
+
+/******************************************************************************
+*
+*        S C S I    T a r g e t    M e s s a g e s
+*
+*******************************************************************************/
+
+typedef struct _CMD_BUFFER_DESCRIPTOR
+{
+    U16                     IoIndex;                    /* 00h */
+    U16                     Reserved;                   /* 02h */
+    union                                               /* 04h */
+    {
+        U32                 PhysicalAddress32;
+        U64                 PhysicalAddress64;
+    } u;
+} CMD_BUFFER_DESCRIPTOR, MPI_POINTER PTR_CMD_BUFFER_DESCRIPTOR,
+  CmdBufferDescriptor_t, MPI_POINTER pCmdBufferDescriptor_t;
+
+
+/****************************************************************************/
+/* Target Command Buffer Post Request                                       */
+/****************************************************************************/
+
+typedef struct _MSG_TARGET_CMD_BUFFER_POST_REQUEST
+{
+    U8                      BufferPostFlags;            /* 00h */
+    U8                      BufferCount;                /* 01h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U8                      BufferLength;               /* 04h */
+    U8                      Reserved;                   /* 05h */
+    U8                      Reserved1;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    CMD_BUFFER_DESCRIPTOR   Buffer[1];                  /* 0Ch */
+} MSG_TARGET_CMD_BUFFER_POST_REQUEST, MPI_POINTER PTR_MSG_TARGET_CMD_BUFFER_POST_REQUEST,
+  TargetCmdBufferPostRequest_t, MPI_POINTER pTargetCmdBufferPostRequest_t;
+
+#define CMD_BUFFER_POST_FLAGS_PORT_MASK         (0x01)
+#define CMD_BUFFER_POST_FLAGS_ADDR_MODE_MASK    (0x80)
+#define CMD_BUFFER_POST_FLAGS_ADDR_MODE_32      (0)
+#define CMD_BUFFER_POST_FLAGS_ADDR_MODE_64      (1)
+#define CMD_BUFFER_POST_FLAGS_64_BIT_ADDR       (0x80)
+
+#define CMD_BUFFER_POST_IO_INDEX_MASK           (0x00003FFF)
+#define CMD_BUFFER_POST_IO_INDEX_MASK_0100      (0x000003FF) /* obsolete */
+
+
+typedef struct _MSG_TARGET_CMD_BUFFER_POST_REPLY
+{
+    U8                      BufferPostFlags;            /* 00h */
+    U8                      BufferCount;                /* 01h */
+    U8                      MsgLength;                  /* 02h */
+    U8                      Function;                   /* 03h */
+    U8                      BufferLength;               /* 04h */
+    U8                      Reserved;                   /* 05h */
+    U8                      Reserved1;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U16                     Reserved2;                  /* 0Ch */
+    U16                     IOCStatus;                  /* 0Eh */
+    U32                     IOCLogInfo;                 /* 10h */
+} MSG_TARGET_CMD_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_TARGET_CMD_BUFFER_POST_REPLY,
+  TargetCmdBufferPostReply_t, MPI_POINTER pTargetCmdBufferPostReply_t;
+
+/* the following structure is obsolete as of MPI v1.2 */
+typedef struct _MSG_PRIORITY_CMD_RECEIVED_REPLY
+{
+    U16                     Reserved;                   /* 00h */
+    U8                      MsgLength;                  /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     Reserved1;                  /* 04h */
+    U8                      Reserved2;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U8                      PriorityReason;             /* 0Ch */
+    U8                      Reserved3;                  /* 0Dh */
+    U16                     IOCStatus;                  /* 0Eh */
+    U32                     IOCLogInfo;                 /* 10h */
+    U32                     ReplyWord;                  /* 14h */
+} MSG_PRIORITY_CMD_RECEIVED_REPLY, MPI_POINTER PTR_MSG_PRIORITY_CMD_RECEIVED_REPLY,
+  PriorityCommandReceivedReply_t, MPI_POINTER pPriorityCommandReceivedReply_t;
+
+#define PRIORITY_REASON_NO_DISCONNECT           (0x00)
+#define PRIORITY_REASON_SCSI_TASK_MANAGEMENT    (0x01)
+#define PRIORITY_REASON_CMD_PARITY_ERR          (0x02)
+#define PRIORITY_REASON_MSG_OUT_PARITY_ERR      (0x03)
+#define PRIORITY_REASON_LQ_CRC_ERR              (0x04)
+#define PRIORITY_REASON_CMD_CRC_ERR             (0x05)
+#define PRIORITY_REASON_PROTOCOL_ERR            (0x06)
+#define PRIORITY_REASON_DATA_OUT_PARITY_ERR     (0x07)
+#define PRIORITY_REASON_DATA_OUT_CRC_ERR        (0x08)
+#define PRIORITY_REASON_TARGET_BUSY             (0x09)
+#define PRIORITY_REASON_UNKNOWN                 (0xFF)
+
+
+typedef struct _MSG_TARGET_CMD_BUFFER_POST_ERROR_REPLY
+{
+    U16                     Reserved;                   /* 00h */
+    U8                      MsgLength;                  /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     Reserved1;                  /* 04h */
+    U8                      Reserved2;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U8                      PriorityReason;             /* 0Ch */
+    U8                      Reserved3;                  /* 0Dh */
+    U16                     IOCStatus;                  /* 0Eh */
+    U32                     IOCLogInfo;                 /* 10h */
+    U32                     ReplyWord;                  /* 14h */
+} MSG_TARGET_CMD_BUFFER_POST_ERROR_REPLY,
+  MPI_POINTER PTR_MSG_TARGET_CMD_BUFFER_POST_ERROR_REPLY,
+  TargetCmdBufferPostErrorReply_t, MPI_POINTER pTargetCmdBufferPostErrorReply_t;
+
+
+typedef struct _MPI_TARGET_FCP_CMD_BUFFER
+{
+    U8      FcpLun[8];                                  /* 00h */
+    U8      FcpCntl[4];                                 /* 08h */
+    U8      FcpCdb[16];                                 /* 0Ch */
+    U32     FcpDl;                                      /* 1Ch */
+    U8      AliasIndex;                                 /* 20h */
+    U8      Reserved1;                                  /* 21h */
+    U16     OptionalOxid;                               /* 22h */
+} MPI_TARGET_FCP_CMD_BUFFER, MPI_POINTER PTR_MPI_TARGET_FCP_CMD_BUFFER,
+  MpiTargetFcpCmdBuffer, MPI_POINTER pMpiTargetFcpCmdBuffer;
+
+
+typedef struct _MPI_TARGET_SCSI_SPI_CMD_BUFFER
+{
+    /* SPI L_Q information unit */
+    U8      L_QType;                                    /* 00h */
+    U8      Reserved;                                   /* 01h */
+    U16     Tag;                                        /* 02h */
+    U8      LogicalUnitNumber[8];                       /* 04h */
+    U32     DataLength;                                 /* 0Ch */
+    /* SPI command information unit */
+    U8      ReservedFirstByteOfCommandIU;               /* 10h */
+    U8      TaskAttribute;                              /* 11h */
+    U8      TaskManagementFlags;                        /* 12h */
+    U8      AdditionalCDBLength;                        /* 13h */
+    U8      CDB[16];                                    /* 14h */
+    /* Alias ID */
+    U8      AliasID;                                    /* 24h */
+    U8      Reserved1;                                  /* 25h */
+    U16     Reserved2;                                  /* 26h */
+} MPI_TARGET_SCSI_SPI_CMD_BUFFER,
+  MPI_POINTER PTR_MPI_TARGET_SCSI_SPI_CMD_BUFFER,
+  MpiTargetScsiSpiCmdBuffer, MPI_POINTER pMpiTargetScsiSpiCmdBuffer;
+
+
+/****************************************************************************/
+/* Target Assist Request                                                    */
+/****************************************************************************/
+
+typedef struct _MSG_TARGET_ASSIST_REQUEST
+{
+    U8                      StatusCode;                 /* 00h */
+    U8                      TargetAssistFlags;          /* 01h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     QueueTag;                   /* 04h */
+    U8                      Reserved;                   /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U32                     ReplyWord;                  /* 0Ch */
+    U8                      LUN[8];                     /* 10h */
+    U32                     RelativeOffset;             /* 18h */
+    U32                     DataLength;                 /* 1Ch */
+    SGE_IO_UNION            SGL[1];                     /* 20h */
+} MSG_TARGET_ASSIST_REQUEST, MPI_POINTER PTR_MSG_TARGET_ASSIST_REQUEST,
+  TargetAssistRequest_t, MPI_POINTER pTargetAssistRequest_t;
+
+#define TARGET_ASSIST_FLAGS_DATA_DIRECTION          (0x01)
+#define TARGET_ASSIST_FLAGS_AUTO_STATUS             (0x02)
+#define TARGET_ASSIST_FLAGS_HIGH_PRIORITY           (0x04)
+#define TARGET_ASSIST_FLAGS_CONFIRMED               (0x08)
+#define TARGET_ASSIST_FLAGS_REPOST_CMD_BUFFER       (0x80)
+
+
+typedef struct _MSG_TARGET_ERROR_REPLY
+{
+    U16                     Reserved;                   /* 00h */
+    U8                      MsgLength;                  /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     Reserved1;                  /* 04h */
+    U8                      Reserved2;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U8                      PriorityReason;             /* 0Ch */
+    U8                      Reserved3;                  /* 0Dh */
+    U16                     IOCStatus;                  /* 0Eh */
+    U32                     IOCLogInfo;                 /* 10h */
+    U32                     ReplyWord;                  /* 14h */
+    U32                     TransferCount;              /* 18h */
+} MSG_TARGET_ERROR_REPLY, MPI_POINTER PTR_MSG_TARGET_ERROR_REPLY,
+  TargetErrorReply_t, MPI_POINTER pTargetErrorReply_t;
+
+
+/****************************************************************************/
+/* Target Status Send Request                                               */
+/****************************************************************************/
+
+typedef struct _MSG_TARGET_STATUS_SEND_REQUEST
+{
+    U8                      StatusCode;                 /* 00h */
+    U8                      StatusFlags;                /* 01h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     QueueTag;                   /* 04h */
+    U8                      Reserved;                   /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U32                     ReplyWord;                  /* 0Ch */
+    U8                      LUN[8];                     /* 10h */
+    SGE_SIMPLE_UNION        StatusDataSGE;              /* 18h */
+} MSG_TARGET_STATUS_SEND_REQUEST, MPI_POINTER PTR_MSG_TARGET_STATUS_SEND_REQUEST,
+  TargetStatusSendRequest_t, MPI_POINTER pTargetStatusSendRequest_t;
+
+#define TARGET_STATUS_SEND_FLAGS_AUTO_GOOD_STATUS   (0x01)
+#define TARGET_STATUS_SEND_FLAGS_HIGH_PRIORITY      (0x04)
+#define TARGET_STATUS_SEND_FLAGS_CONFIRMED          (0x08)
+#define TARGET_STATUS_SEND_FLAGS_REPOST_CMD_BUFFER  (0x80)
+
+/*
+ * NOTE: FCP_RSP data is big-endian. When used on a little-endian system, this
+ * structure properly orders the bytes.
+ */
+typedef struct _MPI_TARGET_FCP_RSP_BUFFER
+{
+    U8      Reserved0[8];                               /* 00h */
+    U8      Reserved1[2];                               /* 08h */
+    U8      FcpFlags;                                   /* 0Ah */
+    U8      FcpStatus;                                  /* 0Bh */
+    U32     FcpResid;                                   /* 0Ch */
+    U32     FcpSenseLength;                             /* 10h */
+    U32     FcpResponseLength;                          /* 14h */
+    U8      FcpResponseData[8];                         /* 18h */
+    U8      FcpSenseData[32]; /* Pad to 64 bytes */     /* 20h */
+} MPI_TARGET_FCP_RSP_BUFFER, MPI_POINTER PTR_MPI_TARGET_FCP_RSP_BUFFER,
+  MpiTargetFcpRspBuffer, MPI_POINTER pMpiTargetFcpRspBuffer;
+
+/*
+ * NOTE: The SPI status IU is big-endian. When used on a little-endian system,
+ * this structure properly orders the bytes.
+ */
+typedef struct _MPI_TARGET_SCSI_SPI_STATUS_IU
+{
+    U8      Reserved0;                                  /* 00h */
+    U8      Reserved1;                                  /* 01h */
+    U8      Valid;                                      /* 02h */
+    U8      Status;                                     /* 03h */
+    U32     SenseDataListLength;                        /* 04h */
+    U32     PktFailuresListLength;                      /* 08h */
+    U8      SenseData[52]; /* Pad the IU to 64 bytes */ /* 0Ch */
+} MPI_TARGET_SCSI_SPI_STATUS_IU, MPI_POINTER PTR_MPI_TARGET_SCSI_SPI_STATUS_IU,
+  TargetScsiSpiStatusIU_t, MPI_POINTER pTargetScsiSpiStatusIU_t;
+
+/****************************************************************************/
+/* Target Mode Abort Request                                                */
+/****************************************************************************/
+
+typedef struct _MSG_TARGET_MODE_ABORT_REQUEST
+{
+    U8                      AbortType;                  /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     Reserved1;                  /* 04h */
+    U8                      Reserved2;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U32                     ReplyWord;                  /* 0Ch */
+    U32                     MsgContextToAbort;          /* 10h */
+} MSG_TARGET_MODE_ABORT, MPI_POINTER PTR_MSG_TARGET_MODE_ABORT,
+  TargetModeAbort_t, MPI_POINTER pTargetModeAbort_t;
+
+#define TARGET_MODE_ABORT_TYPE_ALL_CMD_BUFFERS      (0x00)
+#define TARGET_MODE_ABORT_TYPE_ALL_IO               (0x01)
+#define TARGET_MODE_ABORT_TYPE_EXACT_IO             (0x02)
+#define TARGET_MODE_ABORT_TYPE_EXACT_IO_REQUEST     (0x03)
+
+/* Target Mode Abort Reply */
+
+typedef struct _MSG_TARGET_MODE_ABORT_REPLY
+{
+    U16                     Reserved;                   /* 00h */
+    U8                      MsgLength;                  /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     Reserved1;                  /* 04h */
+    U8                      Reserved2;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U16                     Reserved3;                  /* 0Ch */
+    U16                     IOCStatus;                  /* 0Eh */
+    U32                     IOCLogInfo;                 /* 10h */
+    U32                     AbortCount;                 /* 14h */
+} MSG_TARGET_MODE_ABORT_REPLY, MPI_POINTER PTR_MSG_TARGET_MODE_ABORT_REPLY,
+  TargetModeAbortReply_t, MPI_POINTER pTargetModeAbortReply_t;
+
+
+/****************************************************************************/
+/* Target Mode Context Reply                                                */
+/****************************************************************************/
+
+#define TARGET_MODE_REPLY_IO_INDEX_MASK         (0x00003FFF)
+#define TARGET_MODE_REPLY_IO_INDEX_SHIFT        (0)
+#define TARGET_MODE_REPLY_INITIATOR_INDEX_MASK  (0x03FFC000)
+#define TARGET_MODE_REPLY_INITIATOR_INDEX_SHIFT (14)
+#define TARGET_MODE_REPLY_ALIAS_MASK            (0x04000000)
+#define TARGET_MODE_REPLY_ALIAS_SHIFT           (26)
+#define TARGET_MODE_REPLY_PORT_MASK             (0x10000000)
+#define TARGET_MODE_REPLY_PORT_SHIFT            (28)
+
+
+#define GET_IO_INDEX(x)     (((x) & TARGET_MODE_REPLY_IO_INDEX_MASK)           \
+                                    >> TARGET_MODE_REPLY_IO_INDEX_SHIFT)
+
+#define SET_IO_INDEX(t, i)                                                     \
+            ((t) = ((t) & ~TARGET_MODE_REPLY_IO_INDEX_MASK) |                  \
+                              (((i) << TARGET_MODE_REPLY_IO_INDEX_SHIFT) &     \
+                                             TARGET_MODE_REPLY_IO_INDEX_MASK))
+
+#define GET_INITIATOR_INDEX(x) (((x) & TARGET_MODE_REPLY_INITIATOR_INDEX_MASK) \
+                                   >> TARGET_MODE_REPLY_INITIATOR_INDEX_SHIFT)
+
+#define SET_INITIATOR_INDEX(t, ii)                                             \
+        ((t) = ((t) & ~TARGET_MODE_REPLY_INITIATOR_INDEX_MASK) |               \
+                        (((ii) << TARGET_MODE_REPLY_INITIATOR_INDEX_SHIFT) &   \
+                                      TARGET_MODE_REPLY_INITIATOR_INDEX_MASK))
+
+#define GET_ALIAS(x) (((x) & TARGET_MODE_REPLY_ALIAS_MASK)                     \
+                                               >> TARGET_MODE_REPLY_ALIAS_SHIFT)
+
+#define SET_ALIAS(t, a)  ((t) = ((t) & ~TARGET_MODE_REPLY_ALIAS_MASK) |        \
+                                    (((a) << TARGET_MODE_REPLY_ALIAS_SHIFT) &  \
+                                                 TARGET_MODE_REPLY_ALIAS_MASK))
+
+#define GET_PORT(x) (((x) & TARGET_MODE_REPLY_PORT_MASK)                       \
+                                               >> TARGET_MODE_REPLY_PORT_SHIFT)
+
+#define SET_PORT(t, p)  ((t) = ((t) & ~TARGET_MODE_REPLY_PORT_MASK) |          \
+                                    (((p) << TARGET_MODE_REPLY_PORT_SHIFT) &   \
+                                                  TARGET_MODE_REPLY_PORT_MASK))
+
+/* the following obsolete values are for MPI v1.0 support */
+#define TARGET_MODE_REPLY_0100_MASK_HOST_INDEX       (0x000003FF)
+#define TARGET_MODE_REPLY_0100_SHIFT_HOST_INDEX      (0)
+#define TARGET_MODE_REPLY_0100_MASK_IOC_INDEX        (0x001FF800)
+#define TARGET_MODE_REPLY_0100_SHIFT_IOC_INDEX       (11)
+#define TARGET_MODE_REPLY_0100_PORT_MASK             (0x00400000)
+#define TARGET_MODE_REPLY_0100_PORT_SHIFT            (22)
+#define TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX  (0x1F800000)
+#define TARGET_MODE_REPLY_0100_SHIFT_INITIATOR_INDEX (23)
+
+#define GET_HOST_INDEX_0100(x) (((x) & TARGET_MODE_REPLY_0100_MASK_HOST_INDEX) \
+                                  >> TARGET_MODE_REPLY_0100_SHIFT_HOST_INDEX)
+
+#define SET_HOST_INDEX_0100(t, hi)                                             \
+            ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_HOST_INDEX) |           \
+                         (((hi) << TARGET_MODE_REPLY_0100_SHIFT_HOST_INDEX) &  \
+                                      TARGET_MODE_REPLY_0100_MASK_HOST_INDEX))
+
+#define GET_IOC_INDEX_0100(x)   (((x) & TARGET_MODE_REPLY_0100_MASK_IOC_INDEX) \
+                                  >> TARGET_MODE_REPLY_0100_SHIFT_IOC_INDEX)
+
+#define SET_IOC_INDEX_0100(t, ii)                                              \
+            ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_IOC_INDEX) |            \
+                        (((ii) << TARGET_MODE_REPLY_0100_SHIFT_IOC_INDEX) &    \
+                                     TARGET_MODE_REPLY_0100_MASK_IOC_INDEX))
+
+#define GET_INITIATOR_INDEX_0100(x)                                            \
+            (((x) & TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX)               \
+                              >> TARGET_MODE_REPLY_0100_SHIFT_INITIATOR_INDEX)
+
+#define SET_INITIATOR_INDEX_0100(t, ii)                                        \
+        ((t) = ((t) & ~TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX) |          \
+                   (((ii) << TARGET_MODE_REPLY_0100_SHIFT_INITIATOR_INDEX) &   \
+                                TARGET_MODE_REPLY_0100_MASK_INITIATOR_INDEX))
+
+
+#endif
+
diff --git a/drivers/message/fusion/lsi/mpi_tool.h b/drivers/message/fusion/lsi/mpi_tool.h
new file mode 100644
index 0000000..536d197
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_tool.h
@@ -0,0 +1,305 @@
+/*
+ *  Copyright (c) 2001-2003 LSI Logic Corporation.
+ *
+ *
+ *           Name:  mpi_tool.h
+ *          Title:  MPI Toolbox structures and definitions
+ *  Creation Date:  July 30, 2001
+ *
+ *    mpi_tool.h Version:  01.05.xx
+ *
+ *  Version History
+ *  ---------------
+ *
+ *  Date      Version   Description
+ *  --------  --------  ------------------------------------------------------
+ *  08-08-01  01.02.01  Original release.
+ *  08-29-01  01.02.02  Added DIAG_DATA_UPLOAD_HEADER and related defines.
+ *  --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_TOOL_H
+#define MPI_TOOL_H
+
+#define MPI_TOOLBOX_CLEAN_TOOL                      (0x00)
+#define MPI_TOOLBOX_MEMORY_MOVE_TOOL                (0x01)
+#define MPI_TOOLBOX_DIAG_DATA_UPLOAD_TOOL           (0x02)
+#define MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL           (0x03)
+#define MPI_TOOLBOX_FC_MANAGEMENT_TOOL              (0x04)
+
+
+/****************************************************************************/
+/* Toolbox reply                                                            */
+/****************************************************************************/
+
+typedef struct _MSG_TOOLBOX_REPLY
+{
+    U8                      Tool;                       /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U8                      MsgLength;                  /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     Reserved1;                  /* 04h */
+    U8                      Reserved2;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U16                     Reserved3;                  /* 0Ch */
+    U16                     IOCStatus;                  /* 0Eh */
+    U32                     IOCLogInfo;                 /* 10h */
+} MSG_TOOLBOX_REPLY, MPI_POINTER PTR_MSG_TOOLBOX_REPLY,
+  ToolboxReply_t, MPI_POINTER pToolboxReply_t;
+
+
+/****************************************************************************/
+/* Toolbox Clean Tool request                                               */
+/****************************************************************************/
+
+typedef struct _MSG_TOOLBOX_CLEAN_REQUEST
+{
+    U8                      Tool;                       /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     Reserved1;                  /* 04h */
+    U8                      Reserved2;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U32                     Flags;                      /* 0Ch */
+} MSG_TOOLBOX_CLEAN_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_CLEAN_REQUEST,
+  ToolboxCleanRequest_t, MPI_POINTER pToolboxCleanRequest_t;
+
+#define MPI_TOOLBOX_CLEAN_NVSRAM                    (0x00000001)
+#define MPI_TOOLBOX_CLEAN_SEEPROM                   (0x00000002)
+#define MPI_TOOLBOX_CLEAN_FLASH                     (0x00000004)
+#define MPI_TOOLBOX_CLEAN_BOOTLOADER                (0x04000000)
+#define MPI_TOOLBOX_CLEAN_FW_BACKUP                 (0x08000000)
+#define MPI_TOOLBOX_CLEAN_FW_CURRENT                (0x10000000)
+#define MPI_TOOLBOX_CLEAN_OTHER_PERSIST_PAGES       (0x20000000)
+#define MPI_TOOLBOX_CLEAN_PERSIST_MANUFACT_PAGES    (0x40000000)
+#define MPI_TOOLBOX_CLEAN_BOOT_SERVICES             (0x80000000)
+
+
+/****************************************************************************/
+/* Toolbox Memory Move request                                              */
+/****************************************************************************/
+
+typedef struct _MSG_TOOLBOX_MEM_MOVE_REQUEST
+{
+    U8                      Tool;                       /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     Reserved1;                  /* 04h */
+    U8                      Reserved2;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    SGE_SIMPLE_UNION        SGL;                        /* 0Ch */
+} MSG_TOOLBOX_MEM_MOVE_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_MEM_MOVE_REQUEST,
+  ToolboxMemMoveRequest_t, MPI_POINTER pToolboxMemMoveRequest_t;
+
+
+/****************************************************************************/
+/* Toolbox Diagnostic Data Upload request                                   */
+/****************************************************************************/
+
+typedef struct _MSG_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST
+{
+    U8                      Tool;                       /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     Reserved1;                  /* 04h */
+    U8                      Reserved2;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U32                     Flags;                      /* 0Ch */
+    U32                     Reserved3;                  /* 10h */
+    SGE_SIMPLE_UNION        SGL;                        /* 14h */
+} MSG_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_DIAG_DATA_UPLOAD_REQUEST,
+  ToolboxDiagDataUploadRequest_t, MPI_POINTER pToolboxDiagDataUploadRequest_t;
+
+typedef struct _DIAG_DATA_UPLOAD_HEADER
+{
+    U32                     DiagDataLength;             /* 00h */
+    U8                      FormatCode;                 /* 04h */
+    U8                      Reserved;                   /* 05h */
+    U16                     Reserved1;                  /* 06h */
+} DIAG_DATA_UPLOAD_HEADER, MPI_POINTER PTR_DIAG_DATA_UPLOAD_HEADER,
+  DiagDataUploadHeader_t, MPI_POINTER pDiagDataUploadHeader_t;
+
+#define MPI_TB_DIAG_FORMAT_SCSI_PRINTF_1            (0x01)
+#define MPI_TB_DIAG_FORMAT_SCSI_2                   (0x02)
+#define MPI_TB_DIAG_FORMAT_SCSI_3                   (0x03)
+#define MPI_TB_DIAG_FORMAT_FC_TRACE_1               (0x04)
+
+
+/****************************************************************************/
+/* Toolbox ISTWI Read Write request                                         */
+/****************************************************************************/
+
+typedef struct _MSG_TOOLBOX_ISTWI_READ_WRITE_REQUEST
+{
+    U8                      Tool;                       /* 00h */
+    U8                      Reserved;                   /* 01h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     Reserved1;                  /* 04h */
+    U8                      Reserved2;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U8                      Flags;                      /* 0Ch */
+    U8                      BusNum;                     /* 0Dh */
+    U16                     Reserved3;                  /* 0Eh */
+    U8                      NumAddressBytes;            /* 10h */
+    U8                      Reserved4;                  /* 11h */
+    U16                     DataLength;                 /* 12h */
+    U8                      DeviceAddr;                 /* 14h */
+    U8                      Addr1;                      /* 15h */
+    U8                      Addr2;                      /* 16h */
+    U8                      Addr3;                      /* 17h */
+    U32                     Reserved5;                  /* 18h */
+    SGE_SIMPLE_UNION        SGL;                        /* 1Ch */
+} MSG_TOOLBOX_ISTWI_READ_WRITE_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_ISTWI_READ_WRITE_REQUEST,
+  ToolboxIstwiReadWriteRequest_t, MPI_POINTER pToolboxIstwiReadWriteRequest_t;
+
+#define MPI_TB_ISTWI_FLAGS_WRITE                    (0x00)
+#define MPI_TB_ISTWI_FLAGS_READ                     (0x01)
+
+
+/****************************************************************************/
+/* Toolbox FC Management request                                            */
+/****************************************************************************/
+
+/* ActionInfo for Bus and TargetId */
+typedef struct _MPI_TB_FC_MANAGE_BUS_TID_AI
+{
+    U16                     Reserved;                   /* 00h */
+    U8                      Bus;                        /* 02h */
+    U8                      TargetId;                   /* 03h */
+} MPI_TB_FC_MANAGE_BUS_TID_AI, MPI_POINTER PTR_MPI_TB_FC_MANAGE_BUS_TID_AI,
+  MpiTbFcManageBusTidAi_t, MPI_POINTER pMpiTbFcManageBusTidAi_t;
+
+/* ActionInfo for port identifier */
+typedef struct _MPI_TB_FC_MANAGE_PID_AI
+{
+    U32                     PortIdentifier;             /* 00h */
+} MPI_TB_FC_MANAGE_PID_AI, MPI_POINTER PTR_MPI_TB_FC_MANAGE_PID_AI,
+  MpiTbFcManagePidAi_t, MPI_POINTER pMpiTbFcManagePidAi_t;
+
+/* union of ActionInfo */
+typedef union _MPI_TB_FC_MANAGE_AI_UNION
+{
+    MPI_TB_FC_MANAGE_BUS_TID_AI     BusTid;
+    MPI_TB_FC_MANAGE_PID_AI         Port;
+} MPI_TB_FC_MANAGE_AI_UNION, MPI_POINTER PTR_MPI_TB_FC_MANAGE_AI_UNION,
+  MpiTbFcManageAiUnion_t, MPI_POINTER pMpiTbFcManageAiUnion_t;
+
+typedef struct _MSG_TOOLBOX_FC_MANAGE_REQUEST
+{
+    U8                          Tool;                   /* 00h */
+    U8                          Reserved;               /* 01h */
+    U8                          ChainOffset;            /* 02h */
+    U8                          Function;               /* 03h */
+    U16                         Reserved1;              /* 04h */
+    U8                          Reserved2;              /* 06h */
+    U8                          MsgFlags;               /* 07h */
+    U32                         MsgContext;             /* 08h */
+    U8                          Action;                 /* 0Ch */
+    U8                          Reserved3;              /* 0Dh */
+    U16                         Reserved4;              /* 0Eh */
+    MPI_TB_FC_MANAGE_AI_UNION   ActionInfo;             /* 10h */
+} MSG_TOOLBOX_FC_MANAGE_REQUEST, MPI_POINTER PTR_MSG_TOOLBOX_FC_MANAGE_REQUEST,
+  ToolboxFcManageRequest_t, MPI_POINTER pToolboxFcManageRequest_t;
+
+/* defines for the Action field */
+#define MPI_TB_FC_MANAGE_ACTION_DISC_ALL            (0x00)
+#define MPI_TB_FC_MANAGE_ACTION_DISC_PID            (0x01)
+#define MPI_TB_FC_MANAGE_ACTION_DISC_BUS_TID        (0x02)
+
+
+/****************************************************************************/
+/* Diagnostic Buffer Post request                                           */
+/****************************************************************************/
+
+typedef struct _MSG_DIAG_BUFFER_POST_REQUEST
+{
+    U8                      TraceLevel;                 /* 00h */
+    U8                      BufferType;                 /* 01h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     Reserved1;                  /* 04h */
+    U8                      Reserved2;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U32                     ExtendedType;               /* 0Ch */
+    U32                     BufferLength;               /* 10h */
+    U32                     ProductSpecific[4];         /* 14h */
+    U32                     Reserved3;                  /* 18h */
+    SGE_SIMPLE_UNION        SGL;                        /* 28h */
+} MSG_DIAG_BUFFER_POST_REQUEST, MPI_POINTER PTR_MSG_DIAG_BUFFER_POST_REQUEST,
+  DiagBufferPostRequest_t, MPI_POINTER pDiagBufferPostRequest_t;
+
+#define MPI_DIAG_BUF_TYPE_TRACE                     (0x00)
+#define MPI_DIAG_BUF_TYPE_SNAPSHOT                  (0x01)
+#define MPI_DIAG_BUF_TYPE_EXTENDED                  (0x02)
+
+#define MPI_DIAG_EXTENDED_QTAG                      (0x00000001)
+
+
+/* Diagnostic Buffer Post reply */
+typedef struct _MSG_DIAG_BUFFER_POST_REPLY
+{
+    U8                      Reserved1;                  /* 00h */
+    U8                      BufferType;                 /* 01h */
+    U8                      MsgLength;                  /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     Reserved2;                  /* 04h */
+    U8                      Reserved3;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U16                     Reserved4;                  /* 0Ch */
+    U16                     IOCStatus;                  /* 0Eh */
+    U32                     IOCLogInfo;                 /* 10h */
+    U32                     TransferLength;             /* 14h */
+} MSG_DIAG_BUFFER_POST_REPLY, MPI_POINTER PTR_MSG_DIAG_BUFFER_POST_REPLY,
+  DiagBufferPostReply_t, MPI_POINTER pDiagBufferPostReply_t;
+
+
+/****************************************************************************/
+/* Diagnostic Release request                                               */
+/****************************************************************************/
+
+typedef struct _MSG_DIAG_RELEASE_REQUEST
+{
+    U8                      Reserved1;                  /* 00h */
+    U8                      BufferType;                 /* 01h */
+    U8                      ChainOffset;                /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     Reserved2;                  /* 04h */
+    U8                      Reserved3;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+} MSG_DIAG_RELEASE_REQUEST, MPI_POINTER PTR_MSG_DIAG_RELEASE_REQUEST,
+  DiagReleaseRequest_t, MPI_POINTER pDiagReleaseRequest_t;
+
+
+/* Diagnostic Release reply */
+typedef struct _MSG_DIAG_RELEASE_REPLY
+{
+    U8                      Reserved1;                  /* 00h */
+    U8                      BufferType;                 /* 01h */
+    U8                      MsgLength;                  /* 02h */
+    U8                      Function;                   /* 03h */
+    U16                     Reserved2;                  /* 04h */
+    U8                      Reserved3;                  /* 06h */
+    U8                      MsgFlags;                   /* 07h */
+    U32                     MsgContext;                 /* 08h */
+    U16                     Reserved4;                  /* 0Ch */
+    U16                     IOCStatus;                  /* 0Eh */
+    U32                     IOCLogInfo;                 /* 10h */
+} MSG_DIAG_RELEASE_REPLY, MPI_POINTER PTR_MSG_DIAG_RELEASE_REPLY,
+  DiagReleaseReply_t, MPI_POINTER pDiagReleaseReply_t;
+
+
+#endif
+
+
diff --git a/drivers/message/fusion/lsi/mpi_type.h b/drivers/message/fusion/lsi/mpi_type.h
new file mode 100644
index 0000000..239328a
--- /dev/null
+++ b/drivers/message/fusion/lsi/mpi_type.h
@@ -0,0 +1,86 @@
+/*
+ *  Copyright (c) 2000-2003 LSI Logic Corporation.
+ *
+ *
+ *           Name:  mpi_type.h
+ *          Title:  MPI Basic type definitions
+ *  Creation Date:  June 6, 2000
+ *
+ *    mpi_type.h Version:  01.05.xx
+ *
+ *  Version History
+ *  ---------------
+ *
+ *  Date      Version   Description
+ *  --------  --------  ------------------------------------------------------
+ *  05-08-00  00.10.01  Original release for 0.10 spec dated 4/26/2000.
+ *  06-06-00  01.00.01  Update version number for 1.0 release.
+ *  11-02-00  01.01.01  Original release for post 1.0 work
+ *  02-20-01  01.01.02  Added define and ifdef for MPI_POINTER.
+ *  08-08-01  01.02.01  Original release for v1.2 work.
+ *  --------------------------------------------------------------------------
+ */
+
+#ifndef MPI_TYPE_H
+#define MPI_TYPE_H
+
+
+/*******************************************************************************
+ * Define MPI_POINTER if it hasn't already been defined. By default MPI_POINTER
+ * is defined to be a near pointer. MPI_POINTER can be defined as a far pointer
+ * by defining MPI_POINTER as "far *" before this header file is included.
+ */
+#ifndef MPI_POINTER
+#define MPI_POINTER     *
+#endif
+
+
+/*****************************************************************************
+*
+*               B a s i c    T y p e s
+*
+*****************************************************************************/
+
+typedef signed   char   S8;
+typedef unsigned char   U8;
+typedef signed   short  S16;
+typedef unsigned short  U16;
+
+
+typedef int32_t   S32;
+typedef u_int32_t U32;
+
+/*
+ *  The only way crap below could work on big-endian boxen would be if it
+ *  wasn't used at all.
+ */
+
+typedef struct _S64
+{
+    U32          Low;
+    S32          High;
+} S64;
+
+typedef struct _U64
+{
+    U32          Low;
+    U32          High;
+} U64;
+
+
+/****************************************************************************/
+/*  Pointers                                                                */
+/****************************************************************************/
+
+typedef S8      *PS8;
+typedef U8      *PU8;
+typedef S16     *PS16;
+typedef U16     *PU16;
+typedef S32     *PS32;
+typedef U32     *PU32;
+typedef S64     *PS64;
+typedef U64     *PU64;
+
+
+#endif
+
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
new file mode 100644
index 0000000..942cc1c
--- /dev/null
+++ b/drivers/message/fusion/mptbase.c
@@ -0,0 +1,5946 @@
+/*
+ *  linux/drivers/message/fusion/mptbase.c
+ *      High performance SCSI + LAN / Fibre Channel device drivers.
+ *      This is the Fusion MPT base driver which supports multiple
+ *      (SCSI + LAN) specialized protocol drivers.
+ *      For use with PCI chip/adapter(s):
+ *          LSIFC9xx/LSI409xx Fibre Channel
+ *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
+ *
+ *  Credits:
+ *      There are lots of people not mentioned below that deserve credit
+ *      and thanks but won't get it here - sorry in advance that you
+ *      got overlooked.
+ *
+ *      This driver would not exist if not for Alan Cox's development
+ *      of the linux i2o driver.
+ *
+ *      A special thanks to Noah Romer (LSI Logic) for tons of work
+ *      and tough debugging on the LAN driver, especially early on;-)
+ *      And to Roger Hickerson (LSI Logic) for tirelessly supporting
+ *      this driver project.
+ *
+ *      A special thanks to Pamela Delaney (LSI Logic) for tons of work
+ *      and countless enhancements while adding support for the 1030
+ *      chip family.  Pam has been instrumental in the development of
+ *      of the 2.xx.xx series fusion drivers, and her contributions are
+ *      far too numerous to hope to list in one place.
+ *
+ *      All manner of help from Stephen Shirron (LSI Logic):
+ *      low-level FC analysis, debug + various fixes in FCxx firmware,
+ *      initial port to alpha platform, various driver code optimizations,
+ *      being a faithful sounding board on all sorts of issues & ideas,
+ *      etc.
+ *
+ *      A huge debt of gratitude is owed to David S. Miller (DaveM)
+ *      for fixing much of the stupid and broken stuff in the early
+ *      driver while porting to sparc64 platform.  THANK YOU!
+ *
+ *      Special thanks goes to the I2O LAN driver people at the
+ *      University of Helsinki, who, unbeknownst to them, provided
+ *      the inspiration and initial structure for this driver.
+ *
+ *      A really huge debt of gratitude is owed to Eddie C. Dost
+ *      for gobs of hard work fixing and optimizing LAN code.
+ *      THANK YOU!
+ *
+ *  Copyright (c) 1999-2004 LSI Logic Corporation
+ *  Originally By: Steven J. Ralston
+ *  (mailto:sjralston1@netscape.net)
+ *  (mailto:mpt_linux_developer@lsil.com)
+ *
+ *  $Id: mptbase.c,v 1.126 2002/12/16 15:28:45 pdelaney Exp $
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; version 2 of the License.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    NO WARRANTY
+    THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+    CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+    LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+    solely responsible for determining the appropriateness of using and
+    distributing the Program and assumes all risks associated with its
+    exercise of rights under this Agreement, including but not limited to
+    the risks and costs of program errors, damage to or loss of data,
+    programs or equipment, and unavailability or interruption of operations.
+
+    DISCLAIMER OF LIABILITY
+    NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+    ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+    TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+    USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+    HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include <linux/config.h>
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>		/* needed for in_interrupt() proto */
+#include <asm/io.h>
+#ifdef CONFIG_MTRR
+#include <asm/mtrr.h>
+#endif
+#ifdef __sparc__
+#include <asm/irq.h>			/* needed for __irq_itoa() proto */
+#endif
+
+#include "mptbase.h"
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#define my_NAME		"Fusion MPT base driver"
+#define my_VERSION	MPT_LINUX_VERSION_COMMON
+#define MYNAM		"mptbase"
+
+MODULE_AUTHOR(MODULEAUTHOR);
+MODULE_DESCRIPTION(my_NAME);
+MODULE_LICENSE("GPL");
+
+/*
+ *  cmd line parameters
+ */
+#ifdef MFCNT
+static int mfcounter = 0;
+#define PRINT_MF_COUNT 20000
+#endif
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  Public data...
+ */
+int mpt_lan_index = -1;
+int mpt_stm_index = -1;
+
+struct proc_dir_entry *mpt_proc_root_dir;
+
+#define WHOINIT_UNKNOWN		0xAA
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  Private data...
+ */
+					/* Adapter link list */
+LIST_HEAD(ioc_list);
+					/* Callback lookup table */
+static MPT_CALLBACK		 MptCallbacks[MPT_MAX_PROTOCOL_DRIVERS];
+					/* Protocol driver class lookup table */
+static int			 MptDriverClass[MPT_MAX_PROTOCOL_DRIVERS];
+					/* Event handler lookup table */
+static MPT_EVHANDLER		 MptEvHandlers[MPT_MAX_PROTOCOL_DRIVERS];
+					/* Reset handler lookup table */
+static MPT_RESETHANDLER		 MptResetHandlers[MPT_MAX_PROTOCOL_DRIVERS];
+static struct mpt_pci_driver 	*MptDeviceDriverHandlers[MPT_MAX_PROTOCOL_DRIVERS];
+
+static int	mpt_base_index = -1;
+static int	last_drv_idx = -1;
+
+static DECLARE_WAIT_QUEUE_HEAD(mpt_waitq);
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  Forward protos...
+ */
+static irqreturn_t mpt_interrupt(int irq, void *bus_id, struct pt_regs *r);
+static int	mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
+static int	mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes,
+			u32 *req, int replyBytes, u16 *u16reply, int maxwait,
+			int sleepFlag);
+static int	mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag);
+static void	mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev);
+static void	mpt_adapter_disable(MPT_ADAPTER *ioc);
+static void	mpt_adapter_dispose(MPT_ADAPTER *ioc);
+
+static void	MptDisplayIocCapabilities(MPT_ADAPTER *ioc);
+static int	MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag);
+//static u32	mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
+static int	GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason);
+static int	GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
+static int	SendIocInit(MPT_ADAPTER *ioc, int sleepFlag);
+static int	SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag);
+static int	mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag);
+static int	mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag);
+static int	mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
+static int	KickStart(MPT_ADAPTER *ioc, int ignore, int sleepFlag);
+static int	SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag);
+static int	PrimeIocFifos(MPT_ADAPTER *ioc);
+static int	WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
+static int	WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
+static int	WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag);
+static int	GetLanConfigPages(MPT_ADAPTER *ioc);
+static int	GetFcPortPage0(MPT_ADAPTER *ioc, int portnum);
+static int	GetIoUnitPage2(MPT_ADAPTER *ioc);
+static int	mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum);
+static int	mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum);
+static void 	mpt_read_ioc_pg_1(MPT_ADAPTER *ioc);
+static void 	mpt_read_ioc_pg_4(MPT_ADAPTER *ioc);
+static void	mpt_timer_expired(unsigned long data);
+static int	SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch);
+static int	SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp);
+
+#ifdef CONFIG_PROC_FS
+static int	procmpt_summary_read(char *buf, char **start, off_t offset,
+				int request, int *eof, void *data);
+static int	procmpt_version_read(char *buf, char **start, off_t offset,
+				int request, int *eof, void *data);
+static int	procmpt_iocinfo_read(char *buf, char **start, off_t offset,
+				int request, int *eof, void *data);
+#endif
+static void	mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc);
+
+//int		mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
+static int	ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply, int *evHandlers);
+static void	mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf);
+static void	mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info);
+static void	mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info);
+
+/* module entry point */
+static int  __devinit mptbase_probe (struct pci_dev *, const struct pci_device_id *);
+static void __devexit mptbase_remove(struct pci_dev *);
+static void mptbase_shutdown(struct device * );
+static int  __init    fusion_init  (void);
+static void __exit    fusion_exit  (void);
+
+/****************************************************************************
+ * Supported hardware
+ */
+
+static struct pci_device_id mptbase_pci_table[] = {
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC909,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC929,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC919,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC929X,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_FC919X,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_53C1030,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{ PCI_VENDOR_ID_LSI_LOGIC, PCI_DEVICE_ID_LSI_1030_53C1035,
+		PCI_ANY_ID, PCI_ANY_ID },
+	{0}	/* Terminating entry */
+};
+MODULE_DEVICE_TABLE(pci, mptbase_pci_table);
+
+#define CHIPREG_READ32(addr) 		readl_relaxed(addr)
+#define CHIPREG_READ32_dmasync(addr)	readl(addr)
+#define CHIPREG_WRITE32(addr,val) 	writel(val, addr)
+#define CHIPREG_PIO_WRITE32(addr,val)	outl(val, (unsigned long)addr)
+#define CHIPREG_PIO_READ32(addr) 	inl((unsigned long)addr)
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mpt_interrupt - MPT adapter (IOC) specific interrupt handler.
+ *	@irq: irq number (not used)
+ *	@bus_id: bus identifier cookie == pointer to MPT_ADAPTER structure
+ *	@r: pt_regs pointer (not used)
+ *
+ *	This routine is registered via the request_irq() kernel API call,
+ *	and handles all interrupts generated from a specific MPT adapter
+ *	(also referred to as a IO Controller or IOC).
+ *	This routine must clear the interrupt from the adapter and does
+ *	so by reading the reply FIFO.  Multiple replies may be processed
+ *	per single call to this routine; up to MPT_MAX_REPLIES_PER_ISR
+ *	which is currently set to 32 in mptbase.h.
+ *
+ *	This routine handles register-level access of the adapter but
+ *	dispatches (calls) a protocol-specific callback routine to handle
+ *	the protocol-specific details of the MPT request completion.
+ */
+static irqreturn_t
+mpt_interrupt(int irq, void *bus_id, struct pt_regs *r)
+{
+	MPT_ADAPTER	*ioc;
+	MPT_FRAME_HDR	*mf;
+	MPT_FRAME_HDR	*mr;
+	u32		 pa;
+	int		 req_idx;
+	int		 cb_idx;
+	int		 type;
+	int		 freeme;
+
+	ioc = (MPT_ADAPTER *)bus_id;
+
+	/*
+	 *  Drain the reply FIFO!
+	 *
+	 * NOTES: I've seen up to 10 replies processed in this loop, so far...
+	 * Update: I've seen up to 9182 replies processed in this loop! ??
+	 * Update: Limit ourselves to processing max of N replies
+	 *	(bottom of loop).
+	 */
+	while (1) {
+
+		if ((pa = CHIPREG_READ32_dmasync(&ioc->chip->ReplyFifo)) == 0xFFFFFFFF)
+			return IRQ_HANDLED;
+
+		cb_idx = 0;
+		freeme = 0;
+
+		/*
+		 *  Check for non-TURBO reply!
+		 */
+		if (pa & MPI_ADDRESS_REPLY_A_BIT) {
+			u32 reply_dma_low;
+			u16 ioc_stat;
+
+			/* non-TURBO reply!  Hmmm, something may be up...
+			 *  Newest turbo reply mechanism; get address
+			 *  via left shift 1 (get rid of MPI_ADDRESS_REPLY_A_BIT)!
+			 */
+
+			/* Map DMA address of reply header to cpu address.
+			 * pa is 32 bits - but the dma address may be 32 or 64 bits
+			 * get offset based only only the low addresses
+			 */
+			reply_dma_low = (pa = (pa << 1));
+			mr = (MPT_FRAME_HDR *)((u8 *)ioc->reply_frames +
+					 (reply_dma_low - ioc->reply_frames_low_dma));
+
+			req_idx = le16_to_cpu(mr->u.frame.hwhdr.msgctxu.fld.req_idx);
+			cb_idx = mr->u.frame.hwhdr.msgctxu.fld.cb_idx;
+			mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
+
+			dmfprintk((MYIOC_s_INFO_FMT "Got non-TURBO reply=%p req_idx=%x\n",
+					ioc->name, mr, req_idx));
+			DBG_DUMP_REPLY_FRAME(mr)
+
+			/* NEW!  20010301 -sralston
+			 *  Check/log IOC log info
+			 */
+			ioc_stat = le16_to_cpu(mr->u.reply.IOCStatus);
+			if (ioc_stat & MPI_IOCSTATUS_FLAG_LOG_INFO_AVAILABLE) {
+				u32	 log_info = le32_to_cpu(mr->u.reply.IOCLogInfo);
+				if (ioc->bus_type == FC)
+					mpt_fc_log_info(ioc, log_info);
+				else if (ioc->bus_type == SCSI)
+					mpt_sp_log_info(ioc, log_info);
+			}
+			if (ioc_stat & MPI_IOCSTATUS_MASK) {
+				if (ioc->bus_type == SCSI)
+					mpt_sp_ioc_info(ioc, (u32)ioc_stat, mf);
+			}
+		} else {
+			/*
+			 *  Process turbo (context) reply...
+			 */
+			dmfprintk((MYIOC_s_INFO_FMT "Got TURBO reply req_idx=%08x\n", ioc->name, pa));
+			type = (pa >> MPI_CONTEXT_REPLY_TYPE_SHIFT);
+			if (type == MPI_CONTEXT_REPLY_TYPE_SCSI_TARGET) {
+				cb_idx = mpt_stm_index;
+				mf = NULL;
+				mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
+			} else if (type == MPI_CONTEXT_REPLY_TYPE_LAN) {
+				cb_idx = mpt_lan_index;
+				/*
+				 * BUG FIX!  20001218 -sralston
+				 *  Blind set of mf to NULL here was fatal
+				 *  after lan_reply says "freeme"
+				 *  Fix sort of combined with an optimization here;
+				 *  added explicit check for case where lan_reply
+				 *  was just returning 1 and doing nothing else.
+				 *  For this case skip the callback, but set up
+				 *  proper mf value first here:-)
+				 */
+				if ((pa & 0x58000000) == 0x58000000) {
+					req_idx = pa & 0x0000FFFF;
+					mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
+					freeme = 1;
+					/*
+					 *  IMPORTANT!  Invalidate the callback!
+					 */
+					cb_idx = 0;
+				} else {
+					mf = NULL;
+				}
+				mr = (MPT_FRAME_HDR *) CAST_U32_TO_PTR(pa);
+			} else {
+				req_idx = pa & 0x0000FFFF;
+				cb_idx = (pa & 0x00FF0000) >> 16;
+				mf = MPT_INDEX_2_MFPTR(ioc, req_idx);
+				mr = NULL;
+			}
+			pa = 0;					/* No reply flush! */
+		}
+
+#ifdef MPT_DEBUG_IRQ
+		if (ioc->bus_type == SCSI) {
+			/* Verify mf, mr are reasonable.
+			 */
+			if ((mf) && ((mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))
+				|| (mf < ioc->req_frames)) ) {
+				printk(MYIOC_s_WARN_FMT
+					"mpt_interrupt: Invalid mf (%p) req_idx (%d)!\n", ioc->name, (void *)mf, req_idx);
+				cb_idx = 0;
+				pa = 0;
+				freeme = 0;
+			}
+			if ((pa) && (mr) && ((mr >= MPT_INDEX_2_RFPTR(ioc, ioc->req_depth))
+				|| (mr < ioc->reply_frames)) ) {
+				printk(MYIOC_s_WARN_FMT
+					"mpt_interrupt: Invalid rf (%p)!\n", ioc->name, (void *)mr);
+				cb_idx = 0;
+				pa = 0;
+				freeme = 0;
+			}
+			if (cb_idx > (MPT_MAX_PROTOCOL_DRIVERS-1)) {
+				printk(MYIOC_s_WARN_FMT
+					"mpt_interrupt: Invalid cb_idx (%d)!\n", ioc->name, cb_idx);
+				cb_idx = 0;
+				pa = 0;
+				freeme = 0;
+			}
+		}
+#endif
+
+		/*  Check for (valid) IO callback!  */
+		if (cb_idx) {
+			/*  Do the callback!  */
+			freeme = (*(MptCallbacks[cb_idx]))(ioc, mf, mr);
+		}
+
+		if (pa) {
+			/*  Flush (non-TURBO) reply with a WRITE!  */
+			CHIPREG_WRITE32(&ioc->chip->ReplyFifo, pa);
+		}
+
+		if (freeme) {
+			unsigned long flags;
+
+			/*  Put Request back on FreeQ!  */
+			spin_lock_irqsave(&ioc->FreeQlock, flags);
+			list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
+#ifdef MFCNT
+			ioc->mfcnt--;
+#endif
+			spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+		}
+
+		mb();
+	}	/* drain reply FIFO */
+
+	return IRQ_HANDLED;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mpt_base_reply - MPT base driver's callback routine; all base driver
+ *	"internal" request/reply processing is routed here.
+ *	Currently used for EventNotification and EventAck handling.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@mf: Pointer to original MPT request frame
+ *	@reply: Pointer to MPT reply frame (NULL if TurboReply)
+ *
+	*	Returns 1 indicating original alloc'd request frame ptr
+ *	should be freed, or 0 if it shouldn't.
+ */
+static int
+mpt_base_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
+{
+	int freereq = 1;
+	u8 func;
+
+	dprintk((MYIOC_s_INFO_FMT "mpt_base_reply() called\n", ioc->name));
+
+	if ((mf == NULL) ||
+	    (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) {
+		printk(MYIOC_s_ERR_FMT "NULL or BAD request frame ptr! (=%p)\n",
+				ioc->name, (void *)mf);
+		return 1;
+	}
+
+	if (reply == NULL) {
+		dprintk((MYIOC_s_ERR_FMT "Unexpected NULL Event (turbo?) reply!\n",
+				ioc->name));
+		return 1;
+	}
+
+	if (!(reply->u.hdr.MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)) {
+		dmfprintk((KERN_INFO MYNAM ": Original request frame (@%p) header\n", mf));
+		DBG_DUMP_REQUEST_FRAME_HDR(mf)
+	}
+
+	func = reply->u.hdr.Function;
+	dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, Function=%02Xh\n",
+			ioc->name, func));
+
+	if (func == MPI_FUNCTION_EVENT_NOTIFICATION) {
+		EventNotificationReply_t *pEvReply = (EventNotificationReply_t *) reply;
+		int evHandlers = 0;
+		int results;
+
+		results = ProcessEventNotification(ioc, pEvReply, &evHandlers);
+		if (results != evHandlers) {
+			/* CHECKME! Any special handling needed here? */
+			devtprintk((MYIOC_s_WARN_FMT "Called %d event handlers, sum results = %d\n",
+					ioc->name, evHandlers, results));
+		}
+
+		/*
+		 *	Hmmm...  It seems that EventNotificationReply is an exception
+		 *	to the rule of one reply per request.
+		 */
+		if (pEvReply->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY)
+			freereq = 0;
+
+#ifdef CONFIG_PROC_FS
+//		LogEvent(ioc, pEvReply);
+#endif
+
+	} else if (func == MPI_FUNCTION_EVENT_ACK) {
+		dprintk((MYIOC_s_INFO_FMT "mpt_base_reply, EventAck reply received\n",
+				ioc->name));
+	} else if (func == MPI_FUNCTION_CONFIG ||
+		   func == MPI_FUNCTION_TOOLBOX) {
+		CONFIGPARMS *pCfg;
+		unsigned long flags;
+
+		dcprintk((MYIOC_s_INFO_FMT "config_complete (mf=%p,mr=%p)\n",
+				ioc->name, mf, reply));
+
+		pCfg = * ((CONFIGPARMS **)((u8 *) mf + ioc->req_sz - sizeof(void *)));
+
+		if (pCfg) {
+			/* disable timer and remove from linked list */
+			del_timer(&pCfg->timer);
+
+			spin_lock_irqsave(&ioc->FreeQlock, flags);
+			list_del(&pCfg->linkage);
+			spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+			/*
+			 *	If IOC Status is SUCCESS, save the header
+			 *	and set the status code to GOOD.
+			 */
+			pCfg->status = MPT_CONFIG_ERROR;
+			if (reply) {
+				ConfigReply_t	*pReply = (ConfigReply_t *)reply;
+				u16		 status;
+
+				status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+				dcprintk((KERN_NOTICE "  IOCStatus=%04xh, IOCLogInfo=%08xh\n",
+				     status, le32_to_cpu(pReply->IOCLogInfo)));
+
+				pCfg->status = status;
+				if (status == MPI_IOCSTATUS_SUCCESS) {
+					pCfg->hdr->PageVersion = pReply->Header.PageVersion;
+					pCfg->hdr->PageLength = pReply->Header.PageLength;
+					pCfg->hdr->PageNumber = pReply->Header.PageNumber;
+					pCfg->hdr->PageType = pReply->Header.PageType;
+				}
+			}
+
+			/*
+			 *	Wake up the original calling thread
+			 */
+			pCfg->wait_done = 1;
+			wake_up(&mpt_waitq);
+		}
+	} else {
+		printk(MYIOC_s_ERR_FMT "Unexpected msg function (=%02Xh) reply received!\n",
+				ioc->name, func);
+	}
+
+	/*
+	 *	Conditionally tell caller to free the original
+	 *	EventNotification/EventAck/unexpected request frame!
+	 */
+	return freereq;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_register - Register protocol-specific main callback handler.
+ *	@cbfunc: callback function pointer
+ *	@dclass: Protocol driver's class (%MPT_DRIVER_CLASS enum value)
+ *
+ *	This routine is called by a protocol-specific driver (SCSI host,
+ *	LAN, SCSI target) to register it's reply callback routine.  Each
+ *	protocol-specific driver must do this before it will be able to
+ *	use any IOC resources, such as obtaining request frames.
+ *
+ *	NOTES: The SCSI protocol driver currently calls this routine thrice
+ *	in order to register separate callbacks; one for "normal" SCSI IO;
+ *	one for MptScsiTaskMgmt requests; one for Scan/DV requests.
+ *
+ *	Returns a positive integer valued "handle" in the
+ *	range (and S.O.D. order) {N,...,7,6,5,...,1} if successful.
+ *	Any non-positive return value (including zero!) should be considered
+ *	an error by the caller.
+ */
+int
+mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass)
+{
+	int i;
+
+	last_drv_idx = -1;
+
+	/*
+	 *  Search for empty callback slot in this order: {N,...,7,6,5,...,1}
+	 *  (slot/handle 0 is reserved!)
+	 */
+	for (i = MPT_MAX_PROTOCOL_DRIVERS-1; i; i--) {
+		if (MptCallbacks[i] == NULL) {
+			MptCallbacks[i] = cbfunc;
+			MptDriverClass[i] = dclass;
+			MptEvHandlers[i] = NULL;
+			last_drv_idx = i;
+			break;
+		}
+	}
+
+	return last_drv_idx;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_deregister - Deregister a protocol drivers resources.
+ *	@cb_idx: previously registered callback handle
+ *
+ *	Each protocol-specific driver should call this routine when it's
+ *	module is unloaded.
+ */
+void
+mpt_deregister(int cb_idx)
+{
+	if ((cb_idx >= 0) && (cb_idx < MPT_MAX_PROTOCOL_DRIVERS)) {
+		MptCallbacks[cb_idx] = NULL;
+		MptDriverClass[cb_idx] = MPTUNKNOWN_DRIVER;
+		MptEvHandlers[cb_idx] = NULL;
+
+		last_drv_idx++;
+	}
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_event_register - Register protocol-specific event callback
+ *	handler.
+ *	@cb_idx: previously registered (via mpt_register) callback handle
+ *	@ev_cbfunc: callback function
+ *
+ *	This routine can be called by one or more protocol-specific drivers
+ *	if/when they choose to be notified of MPT events.
+ *
+ *	Returns 0 for success.
+ */
+int
+mpt_event_register(int cb_idx, MPT_EVHANDLER ev_cbfunc)
+{
+	if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
+		return -1;
+
+	MptEvHandlers[cb_idx] = ev_cbfunc;
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_event_deregister - Deregister protocol-specific event callback
+ *	handler.
+ *	@cb_idx: previously registered callback handle
+ *
+ *	Each protocol-specific driver should call this routine
+ *	when it does not (or can no longer) handle events,
+ *	or when it's module is unloaded.
+ */
+void
+mpt_event_deregister(int cb_idx)
+{
+	if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
+		return;
+
+	MptEvHandlers[cb_idx] = NULL;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_reset_register - Register protocol-specific IOC reset handler.
+ *	@cb_idx: previously registered (via mpt_register) callback handle
+ *	@reset_func: reset function
+ *
+ *	This routine can be called by one or more protocol-specific drivers
+ *	if/when they choose to be notified of IOC resets.
+ *
+ *	Returns 0 for success.
+ */
+int
+mpt_reset_register(int cb_idx, MPT_RESETHANDLER reset_func)
+{
+	if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
+		return -1;
+
+	MptResetHandlers[cb_idx] = reset_func;
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_reset_deregister - Deregister protocol-specific IOC reset handler.
+ *	@cb_idx: previously registered callback handle
+ *
+ *	Each protocol-specific driver should call this routine
+ *	when it does not (or can no longer) handle IOC reset handling,
+ *	or when it's module is unloaded.
+ */
+void
+mpt_reset_deregister(int cb_idx)
+{
+	if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
+		return;
+
+	MptResetHandlers[cb_idx] = NULL;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_device_driver_register - Register device driver hooks
+ */
+int
+mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx)
+{
+	MPT_ADAPTER	*ioc;
+	int 		error=0;
+
+	if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS) {
+		error= -EINVAL;
+		return error;
+	}
+
+	MptDeviceDriverHandlers[cb_idx] = dd_cbfunc;
+
+	/* call per pci device probe entry point */
+	list_for_each_entry(ioc, &ioc_list, list) {
+		if(dd_cbfunc->probe) {
+			error = dd_cbfunc->probe(ioc->pcidev,
+			  ioc->pcidev->driver->id_table);
+			if(error != 0)
+				return error;
+  		}
+	 }
+
+	return error;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_device_driver_deregister - DeRegister device driver hooks
+ */
+void
+mpt_device_driver_deregister(int cb_idx)
+{
+	struct mpt_pci_driver *dd_cbfunc;
+	MPT_ADAPTER	*ioc;
+
+	if (cb_idx < 1 || cb_idx >= MPT_MAX_PROTOCOL_DRIVERS)
+		return;
+
+	dd_cbfunc = MptDeviceDriverHandlers[cb_idx];
+
+	list_for_each_entry(ioc, &ioc_list, list) {
+		if (dd_cbfunc->remove)
+			dd_cbfunc->remove(ioc->pcidev);
+	}
+	
+	MptDeviceDriverHandlers[cb_idx] = NULL;
+}
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_get_msg_frame - Obtain a MPT request frame from the pool (of 1024)
+ *	allocated per MPT adapter.
+ *	@handle: Handle of registered MPT protocol driver
+ *	@ioc: Pointer to MPT adapter structure
+ *
+ *	Returns pointer to a MPT request frame or %NULL if none are available
+ *	or IOC is not active.
+ */
+MPT_FRAME_HDR*
+mpt_get_msg_frame(int handle, MPT_ADAPTER *ioc)
+{
+	MPT_FRAME_HDR *mf;
+	unsigned long flags;
+	u16	 req_idx;	/* Request index */
+
+	/* validate handle and ioc identifier */
+
+#ifdef MFCNT
+	if (!ioc->active)
+		printk(KERN_WARNING "IOC Not Active! mpt_get_msg_frame returning NULL!\n");
+#endif
+
+	/* If interrupts are not attached, do not return a request frame */
+	if (!ioc->active)
+		return NULL;
+
+	spin_lock_irqsave(&ioc->FreeQlock, flags);
+	if (!list_empty(&ioc->FreeQ)) {
+		int req_offset;
+
+		mf = list_entry(ioc->FreeQ.next, MPT_FRAME_HDR,
+				u.frame.linkage.list);
+		list_del(&mf->u.frame.linkage.list);
+		mf->u.frame.hwhdr.msgctxu.fld.cb_idx = handle;	/* byte */
+		req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
+								/* u16! */
+		req_idx = cpu_to_le16(req_offset / ioc->req_sz);
+		mf->u.frame.hwhdr.msgctxu.fld.req_idx = req_idx;
+		mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
+		ioc->RequestNB[req_idx] = ioc->NB_for_64_byte_frame; /* Default, will be changed if necessary in SG generation */
+#ifdef MFCNT
+		ioc->mfcnt++;
+#endif
+	}
+	else
+		mf = NULL;
+	spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+#ifdef MFCNT
+	if (mf == NULL)
+		printk(KERN_WARNING "IOC Active. No free Msg Frames! Count 0x%x Max 0x%x\n", ioc->mfcnt, ioc->req_depth);
+	mfcounter++;
+	if (mfcounter == PRINT_MF_COUNT)
+		printk(KERN_INFO "MF Count 0x%x Max 0x%x \n", ioc->mfcnt, ioc->req_depth);
+#endif
+
+	dmfprintk((KERN_INFO MYNAM ": %s: mpt_get_msg_frame(%d,%d), got mf=%p\n",
+			ioc->name, handle, ioc->id, mf));
+	return mf;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_put_msg_frame - Send a protocol specific MPT request frame
+ *	to a IOC.
+ *	@handle: Handle of registered MPT protocol driver
+ *	@ioc: Pointer to MPT adapter structure
+ *	@mf: Pointer to MPT request frame
+ *
+ *	This routine posts a MPT request frame to the request post FIFO of a
+ *	specific MPT adapter.
+ */
+void
+mpt_put_msg_frame(int handle, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
+{
+	u32 mf_dma_addr;
+	int req_offset;
+	u16	 req_idx;	/* Request index */
+
+	/* ensure values are reset properly! */
+	mf->u.frame.hwhdr.msgctxu.fld.cb_idx = handle;		/* byte */
+	req_offset = (u8 *)mf - (u8 *)ioc->req_frames;
+								/* u16! */
+	req_idx = cpu_to_le16(req_offset / ioc->req_sz);
+	mf->u.frame.hwhdr.msgctxu.fld.req_idx = req_idx;
+	mf->u.frame.hwhdr.msgctxu.fld.rsvd = 0;
+
+#ifdef MPT_DEBUG_MSG_FRAME
+	{
+		u32	*m = mf->u.frame.hwhdr.__hdr;
+		int	 ii, n;
+
+		printk(KERN_INFO MYNAM ": %s: About to Put msg frame @ %p:\n" KERN_INFO " ",
+				ioc->name, m);
+		n = ioc->req_sz/4 - 1;
+		while (m[n] == 0)
+			n--;
+		for (ii=0; ii<=n; ii++) {
+			if (ii && ((ii%8)==0))
+				printk("\n" KERN_INFO " ");
+			printk(" %08x", le32_to_cpu(m[ii]));
+		}
+		printk("\n");
+	}
+#endif
+
+	mf_dma_addr = (ioc->req_frames_low_dma + req_offset) | ioc->RequestNB[req_idx];  
+	dsgprintk((MYIOC_s_INFO_FMT "mf_dma_addr=%x req_idx=%d RequestNB=%x\n", ioc->name, mf_dma_addr, req_idx, ioc->RequestNB[req_idx]));
+	CHIPREG_WRITE32(&ioc->chip->RequestFifo, mf_dma_addr);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_free_msg_frame - Place MPT request frame back on FreeQ.
+ *	@handle: Handle of registered MPT protocol driver
+ *	@ioc: Pointer to MPT adapter structure
+ *	@mf: Pointer to MPT request frame
+ *
+ *	This routine places a MPT request frame back on the MPT adapter's
+ *	FreeQ.
+ */
+void
+mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf)
+{
+	unsigned long flags;
+
+	/*  Put Request back on FreeQ!  */
+	spin_lock_irqsave(&ioc->FreeQlock, flags);
+	list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
+#ifdef MFCNT
+	ioc->mfcnt--;
+#endif
+	spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_add_sge - Place a simple SGE at address pAddr.
+ *	@pAddr: virtual address for SGE
+ *	@flagslength: SGE flags and data transfer length
+ *	@dma_addr: Physical address
+ *
+ *	This routine places a MPT request frame back on the MPT adapter's
+ *	FreeQ.
+ */
+void
+mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr)
+{
+	if (sizeof(dma_addr_t) == sizeof(u64)) {
+		SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
+		u32 tmp = dma_addr & 0xFFFFFFFF;
+
+		pSge->FlagsLength = cpu_to_le32(flagslength);
+		pSge->Address.Low = cpu_to_le32(tmp);
+		tmp = (u32) ((u64)dma_addr >> 32);
+		pSge->Address.High = cpu_to_le32(tmp);
+
+	} else {
+		SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
+		pSge->FlagsLength = cpu_to_le32(flagslength);
+		pSge->Address = cpu_to_le32(dma_addr);
+	}
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_send_handshake_request - Send MPT request via doorbell
+ *	handshake method.
+ *	@handle: Handle of registered MPT protocol driver
+ *	@ioc: Pointer to MPT adapter structure
+ *	@reqBytes: Size of the request in bytes
+ *	@req: Pointer to MPT request frame
+ *	@sleepFlag: Use schedule if CAN_SLEEP else use udelay.
+ *
+ *	This routine is used exclusively to send MptScsiTaskMgmt
+ *	requests since they are required to be sent via doorbell handshake.
+ *
+ *	NOTE: It is the callers responsibility to byte-swap fields in the
+ *	request which are greater than 1 byte in size.
+ *
+ *	Returns 0 for success, non-zero for failure.
+ */
+int
+mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag)
+{
+	int		 r = 0;
+	u8	*req_as_bytes;
+	int	 ii;
+
+	/* State is known to be good upon entering
+	 * this function so issue the bus reset
+	 * request.
+	 */
+
+	/*
+	 * Emulate what mpt_put_msg_frame() does /wrt to sanity
+	 * setting cb_idx/req_idx.  But ONLY if this request
+	 * is in proper (pre-alloc'd) request buffer range...
+	 */
+	ii = MFPTR_2_MPT_INDEX(ioc,(MPT_FRAME_HDR*)req);
+	if (reqBytes >= 12 && ii >= 0 && ii < ioc->req_depth) {
+		MPT_FRAME_HDR *mf = (MPT_FRAME_HDR*)req;
+		mf->u.frame.hwhdr.msgctxu.fld.req_idx = cpu_to_le16(ii);
+		mf->u.frame.hwhdr.msgctxu.fld.cb_idx = handle;
+	}
+
+	/* Make sure there are no doorbells */
+	CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+	
+	CHIPREG_WRITE32(&ioc->chip->Doorbell,
+			((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) |
+			 ((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
+
+	/* Wait for IOC doorbell int */
+	if ((ii = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0) {
+		return ii;
+	}
+
+	/* Read doorbell and check for active bit */
+	if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE))
+		return -5;
+
+	dhsprintk((KERN_INFO MYNAM ": %s: mpt_send_handshake_request start, WaitCnt=%d\n",
+			ioc->name, ii));
+
+	CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+	if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
+		return -2;
+	}
+		
+	/* Send request via doorbell handshake */
+	req_as_bytes = (u8 *) req;
+	for (ii = 0; ii < reqBytes/4; ii++) {
+		u32 word;
+
+		word = ((req_as_bytes[(ii*4) + 0] <<  0) |
+			(req_as_bytes[(ii*4) + 1] <<  8) |
+			(req_as_bytes[(ii*4) + 2] << 16) |
+			(req_as_bytes[(ii*4) + 3] << 24));
+		CHIPREG_WRITE32(&ioc->chip->Doorbell, word);
+		if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0) {
+			r = -3;
+			break;
+		}
+	}
+
+	if (r >= 0 && WaitForDoorbellInt(ioc, 10, sleepFlag) >= 0)
+		r = 0;
+	else
+		r = -4;
+
+	/* Make sure there are no doorbells */
+	CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+	
+	return r;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_verify_adapter - Given a unique IOC identifier, set pointer to
+ *	the associated MPT adapter structure.
+ *	@iocid: IOC unique identifier (integer)
+ *	@iocpp: Pointer to pointer to IOC adapter
+ *
+ *	Returns iocid and sets iocpp.
+ */
+int
+mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp)
+{
+	MPT_ADAPTER *ioc;
+
+	list_for_each_entry(ioc,&ioc_list,list) {
+		if (ioc->id == iocid) {
+			*iocpp =ioc;
+			return iocid;
+		} 
+	}
+	
+	*iocpp = NULL;
+	return -1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptbase_probe - Install a PCI intelligent MPT adapter.
+ *	@pdev: Pointer to pci_dev structure
+ *
+ *	This routine performs all the steps necessary to bring the IOC of
+ *	a MPT adapter to a OPERATIONAL state.  This includes registering
+ *	memory regions, registering the interrupt, and allocating request
+ *	and reply memory pools.
+ *
+ *	This routine also pre-fetches the LAN MAC address of a Fibre Channel
+ *	MPT adapter.
+ *
+ *	Returns 0 for success, non-zero for failure.
+ *
+ *	TODO: Add support for polled controllers
+ */
+static int __devinit
+mptbase_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	MPT_ADAPTER	*ioc;
+	u8		__iomem *mem;
+	unsigned long	 mem_phys;
+	unsigned long	 port;
+	u32		 msize;
+	u32		 psize;
+	int		 ii;
+	int		 r = -ENODEV;
+	u64		 mask = 0xffffffffffffffffULL;
+	u8		 revision;
+	u8		 pcixcmd;
+	static int	 mpt_ids = 0;
+#ifdef CONFIG_PROC_FS
+	struct proc_dir_entry *dent, *ent;
+#endif
+
+	if (pci_enable_device(pdev))
+		return r;
+	
+	dinitprintk((KERN_WARNING MYNAM ": mpt_adapter_install\n"));
+	
+	if (!pci_set_dma_mask(pdev, mask)) {
+		dprintk((KERN_INFO MYNAM
+			": 64 BIT PCI BUS DMA ADDRESSING SUPPORTED\n"));
+	} else if (pci_set_dma_mask(pdev, (u64) 0xffffffff)) {
+		printk(KERN_WARNING MYNAM ": 32 BIT PCI BUS DMA ADDRESSING NOT SUPPORTED\n");
+		return r;
+	}
+
+	if (!pci_set_consistent_dma_mask(pdev, mask))
+		dprintk((KERN_INFO MYNAM
+			": Using 64 bit consistent mask\n"));
+	else
+		dprintk((KERN_INFO MYNAM
+			": Not using 64 bit consistent mask\n"));
+
+	ioc = kmalloc(sizeof(MPT_ADAPTER), GFP_ATOMIC);
+	if (ioc == NULL) {
+		printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n");
+		return -ENOMEM;
+	}
+	memset(ioc, 0, sizeof(MPT_ADAPTER));
+	ioc->alloc_total = sizeof(MPT_ADAPTER);
+	ioc->req_sz = MPT_DEFAULT_FRAME_SIZE;		/* avoid div by zero! */
+	ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
+	
+	ioc->pcidev = pdev;
+	ioc->diagPending = 0;
+	spin_lock_init(&ioc->diagLock);
+
+	/* Initialize the event logging.
+	 */
+	ioc->eventTypes = 0;	/* None */
+	ioc->eventContext = 0;
+	ioc->eventLogSize = 0;
+	ioc->events = NULL;
+
+#ifdef MFCNT
+	ioc->mfcnt = 0;
+#endif
+
+	ioc->cached_fw = NULL;
+
+	/* Initilize SCSI Config Data structure
+	 */
+	memset(&ioc->spi_data, 0, sizeof(ScsiCfgData));
+
+	/* Initialize the running configQ head.
+	 */
+	INIT_LIST_HEAD(&ioc->configQ);
+
+	/* Find lookup slot. */
+	INIT_LIST_HEAD(&ioc->list);
+	ioc->id = mpt_ids++;
+	
+	mem_phys = msize = 0;
+	port = psize = 0;
+	for (ii=0; ii < DEVICE_COUNT_RESOURCE; ii++) {
+		if (pci_resource_flags(pdev, ii) & PCI_BASE_ADDRESS_SPACE_IO) {
+			/* Get I/O space! */
+			port = pci_resource_start(pdev, ii);
+			psize = pci_resource_len(pdev,ii);
+		} else {
+			/* Get memmap */
+			mem_phys = pci_resource_start(pdev, ii);
+			msize = pci_resource_len(pdev,ii);
+			break;
+		}
+	}
+	ioc->mem_size = msize;
+
+	if (ii == DEVICE_COUNT_RESOURCE) {
+		printk(KERN_ERR MYNAM ": ERROR - MPT adapter has no memory regions defined!\n");
+		kfree(ioc);
+		return -EINVAL;
+	}
+
+	dinitprintk((KERN_INFO MYNAM ": MPT adapter @ %lx, msize=%dd bytes\n", mem_phys, msize));
+	dinitprintk((KERN_INFO MYNAM ": (port i/o @ %lx, psize=%dd bytes)\n", port, psize));
+
+	mem = NULL;
+	/* Get logical ptr for PciMem0 space */
+	/*mem = ioremap(mem_phys, msize);*/
+	mem = ioremap(mem_phys, 0x100);
+	if (mem == NULL) {
+		printk(KERN_ERR MYNAM ": ERROR - Unable to map adapter memory!\n");
+		kfree(ioc);
+		return -EINVAL;
+	}
+	ioc->memmap = mem;
+	dinitprintk((KERN_INFO MYNAM ": mem = %p, mem_phys = %lx\n", mem, mem_phys));
+
+	dinitprintk((KERN_INFO MYNAM ": facts @ %p, pfacts[0] @ %p\n",
+			&ioc->facts, &ioc->pfacts[0]));
+
+	ioc->mem_phys = mem_phys;
+	ioc->chip = (SYSIF_REGS __iomem *)mem;
+
+	/* Save Port IO values in case we need to do downloadboot */
+	{
+		u8 *pmem = (u8*)port;
+		ioc->pio_mem_phys = port;
+		ioc->pio_chip = (SYSIF_REGS __iomem *)pmem;
+	}
+
+	if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC909) {
+		ioc->prod_name = "LSIFC909";
+		ioc->bus_type = FC;
+	}
+	if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC929) {
+		ioc->prod_name = "LSIFC929";
+		ioc->bus_type = FC;
+	}
+	else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC919) {
+		ioc->prod_name = "LSIFC919";
+		ioc->bus_type = FC;
+	}
+	else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC929X) {
+		pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+		ioc->bus_type = FC;
+		if (revision < XL_929) {
+			ioc->prod_name = "LSIFC929X";
+			/* 929X Chip Fix. Set Split transactions level
+		 	* for PCIX. Set MOST bits to zero.
+		 	*/
+			pci_read_config_byte(pdev, 0x6a, &pcixcmd);
+			pcixcmd &= 0x8F;
+			pci_write_config_byte(pdev, 0x6a, pcixcmd);
+		} else {
+			ioc->prod_name = "LSIFC929XL";
+			/* 929XL Chip Fix. Set MMRBC to 0x08.
+		 	*/
+			pci_read_config_byte(pdev, 0x6a, &pcixcmd);
+			pcixcmd |= 0x08;
+			pci_write_config_byte(pdev, 0x6a, pcixcmd);
+		}
+	}
+	else if (pdev->device == MPI_MANUFACTPAGE_DEVICEID_FC919X) {
+		ioc->prod_name = "LSIFC919X";
+		ioc->bus_type = FC;
+		/* 919X Chip Fix. Set Split transactions level
+		 * for PCIX. Set MOST bits to zero.
+		 */
+		pci_read_config_byte(pdev, 0x6a, &pcixcmd);
+		pcixcmd &= 0x8F;
+		pci_write_config_byte(pdev, 0x6a, pcixcmd);
+	}
+	else if (pdev->device == MPI_MANUFACTPAGE_DEVID_53C1030) {
+		ioc->prod_name = "LSI53C1030";
+		ioc->bus_type = SCSI;
+		/* 1030 Chip Fix. Disable Split transactions
+		 * for PCIX. Set MOST bits to zero if Rev < C0( = 8).
+		 */
+		pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+		if (revision < C0_1030) {
+			pci_read_config_byte(pdev, 0x6a, &pcixcmd);
+			pcixcmd &= 0x8F;
+			pci_write_config_byte(pdev, 0x6a, pcixcmd);
+		}
+	}
+	else if (pdev->device == MPI_MANUFACTPAGE_DEVID_1030_53C1035) {
+		ioc->prod_name = "LSI53C1035";
+		ioc->bus_type = SCSI;
+	}
+
+	sprintf(ioc->name, "ioc%d", ioc->id);
+
+	spin_lock_init(&ioc->FreeQlock);
+
+	/* Disable all! */
+	CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
+	ioc->active = 0;
+	CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+	/* Set lookup ptr. */
+	list_add_tail(&ioc->list, &ioc_list);
+
+	ioc->pci_irq = -1;
+	if (pdev->irq) {
+		r = request_irq(pdev->irq, mpt_interrupt, SA_SHIRQ, ioc->name, ioc);
+
+		if (r < 0) {
+#ifndef __sparc__
+			printk(MYIOC_s_ERR_FMT "Unable to allocate interrupt %d!\n",
+					ioc->name, pdev->irq);
+#else
+			printk(MYIOC_s_ERR_FMT "Unable to allocate interrupt %s!\n",
+					ioc->name, __irq_itoa(pdev->irq));
+#endif
+			list_del(&ioc->list);
+			iounmap(mem);
+			kfree(ioc);
+			return -EBUSY;
+		}
+
+		ioc->pci_irq = pdev->irq;
+
+		pci_set_master(pdev);			/* ?? */
+		pci_set_drvdata(pdev, ioc);
+
+#ifndef __sparc__
+		dprintk((KERN_INFO MYNAM ": %s installed at interrupt %d\n", ioc->name, pdev->irq));
+#else
+		dprintk((KERN_INFO MYNAM ": %s installed at interrupt %s\n", ioc->name, __irq_itoa(pdev->irq)));
+#endif
+	}
+
+	/* NEW!  20010220 -sralston
+	 * Check for "bound ports" (929, 929X, 1030, 1035) to reduce redundant resets.
+	 */
+	mpt_detect_bound_ports(ioc, pdev);
+
+	if ((r = mpt_do_ioc_recovery(ioc,
+	  MPT_HOSTEVENT_IOC_BRINGUP, CAN_SLEEP)) != 0) {
+		printk(KERN_WARNING MYNAM
+		  ": WARNING - %s did not initialize properly! (%d)\n",
+		  ioc->name, r);
+
+		list_del(&ioc->list);
+		free_irq(ioc->pci_irq, ioc);
+		iounmap(mem);
+		kfree(ioc);
+		pci_set_drvdata(pdev, NULL);
+		return r;
+	}
+
+	/* call per device driver probe entry point */
+	for(ii=0; ii<MPT_MAX_PROTOCOL_DRIVERS; ii++) {
+		if(MptDeviceDriverHandlers[ii] &&
+		  MptDeviceDriverHandlers[ii]->probe) {
+			MptDeviceDriverHandlers[ii]->probe(pdev,id);
+		}
+	}
+
+#ifdef CONFIG_PROC_FS
+	/*
+	 *  Create "/proc/mpt/iocN" subdirectory entry for each MPT adapter.
+	 */
+	dent = proc_mkdir(ioc->name, mpt_proc_root_dir);
+	if (dent) {
+		ent = create_proc_entry("info", S_IFREG|S_IRUGO, dent);
+		if (ent) {
+			ent->read_proc = procmpt_iocinfo_read;
+			ent->data = ioc;
+		}
+		ent = create_proc_entry("summary", S_IFREG|S_IRUGO, dent);
+		if (ent) {
+			ent->read_proc = procmpt_summary_read;
+			ent->data = ioc;
+		}
+	}
+#endif
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptbase_remove - Remove a PCI intelligent MPT adapter.
+ *	@pdev: Pointer to pci_dev structure
+ *
+ */
+
+static void __devexit
+mptbase_remove(struct pci_dev *pdev)
+{
+	MPT_ADAPTER 	*ioc = pci_get_drvdata(pdev);
+	char pname[32];
+	int ii;
+
+	sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/summary", ioc->name);
+	remove_proc_entry(pname, NULL);
+	sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s/info", ioc->name);
+	remove_proc_entry(pname, NULL);
+	sprintf(pname, MPT_PROCFS_MPTBASEDIR "/%s", ioc->name);
+	remove_proc_entry(pname, NULL);
+	
+	/* call per device driver remove entry point */
+	for(ii=0; ii<MPT_MAX_PROTOCOL_DRIVERS; ii++) {
+		if(MptDeviceDriverHandlers[ii] &&
+		  MptDeviceDriverHandlers[ii]->remove) {
+			MptDeviceDriverHandlers[ii]->remove(pdev);
+		}
+	}
+	
+	/* Disable interrupts! */
+	CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
+
+	ioc->active = 0;
+	synchronize_irq(pdev->irq);
+
+	/* Clear any lingering interrupt */
+	CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+	CHIPREG_READ32(&ioc->chip->IntStatus);
+
+	mpt_adapter_dispose(ioc);
+
+	pci_set_drvdata(pdev, NULL);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptbase_shutdown -
+ *
+ */
+static void
+mptbase_shutdown(struct device * dev)
+{
+	int ii;
+
+	/* call per device driver shutdown entry point */
+	for(ii=0; ii<MPT_MAX_PROTOCOL_DRIVERS; ii++) {
+		if(MptDeviceDriverHandlers[ii] &&
+		  MptDeviceDriverHandlers[ii]->shutdown) {
+			MptDeviceDriverHandlers[ii]->shutdown(dev);
+		}
+	}
+
+}
+
+
+/**************************************************************************
+ * Power Management
+ */
+#ifdef CONFIG_PM
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptbase_suspend - Fusion MPT base driver suspend routine.
+ *
+ *
+ */
+static int
+mptbase_suspend(struct pci_dev *pdev, u32 state)
+{
+	u32 device_state;
+	MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+	int ii;
+
+	switch(state)
+	{
+		case 1: /* S1 */
+			device_state=1; /* D1 */;
+			break;
+		case 3: /* S3 */
+		case 4: /* S4 */
+			device_state=3; /* D3 */;
+			break;
+		default:
+			return -EAGAIN /*FIXME*/;
+			break;
+	}
+
+	printk(MYIOC_s_INFO_FMT
+	"pci-suspend: pdev=0x%p, slot=%s, Entering operating state [D%d]\n",
+		ioc->name, pdev, pci_name(pdev), device_state);
+
+	/* call per device driver suspend entry point */
+	for(ii=0; ii<MPT_MAX_PROTOCOL_DRIVERS; ii++) {
+		if(MptDeviceDriverHandlers[ii] &&
+		  MptDeviceDriverHandlers[ii]->suspend) {
+			MptDeviceDriverHandlers[ii]->suspend(pdev, state);
+		}
+	}
+
+	pci_save_state(pdev);
+
+	/* put ioc into READY_STATE */
+	if(SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, CAN_SLEEP)) {
+		printk(MYIOC_s_ERR_FMT
+		"pci-suspend:  IOC msg unit reset failed!\n", ioc->name);
+	}
+
+	/* disable interrupts */
+	CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
+	ioc->active = 0;
+
+	/* Clear any lingering interrupt */
+	CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, device_state);
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptbase_resume - Fusion MPT base driver resume routine.
+ *
+ *
+ */
+static int
+mptbase_resume(struct pci_dev *pdev)
+{
+	MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+	u32 device_state = pdev->current_state;
+	int recovery_state;
+	int ii;
+
+	printk(MYIOC_s_INFO_FMT
+	"pci-resume: pdev=0x%p, slot=%s, Previous operating state [D%d]\n",
+		ioc->name, pdev, pci_name(pdev), device_state);
+
+	pci_set_power_state(pdev, 0);
+	pci_restore_state(pdev);
+	pci_enable_device(pdev);
+
+	/* enable interrupts */
+	CHIPREG_WRITE32(&ioc->chip->IntMask, ~(MPI_HIM_RIM));
+	ioc->active = 1;
+
+	/* F/W not running */
+	if(!CHIPREG_READ32(&ioc->chip->Doorbell)) {
+		/* enable domain validation flags */
+		for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
+			ioc->spi_data.dvStatus[ii] |= MPT_SCSICFG_NEED_DV;
+		}
+	}
+
+	printk(MYIOC_s_INFO_FMT
+		"pci-resume: ioc-state=0x%x,doorbell=0x%x\n",
+		ioc->name,
+		(mpt_GetIocState(ioc, 1) >> MPI_IOC_STATE_SHIFT),
+		CHIPREG_READ32(&ioc->chip->Doorbell));
+
+	/* bring ioc to operational state */
+	if ((recovery_state = mpt_do_ioc_recovery(ioc,
+	    MPT_HOSTEVENT_IOC_RECOVER, CAN_SLEEP)) != 0) {
+		printk(MYIOC_s_INFO_FMT
+			"pci-resume: Cannot recover, error:[%x]\n",
+			ioc->name, recovery_state);
+	} else {
+		printk(MYIOC_s_INFO_FMT
+			"pci-resume: success\n", ioc->name);
+	}
+
+	/* call per device driver resume entry point */
+	for(ii=0; ii<MPT_MAX_PROTOCOL_DRIVERS; ii++) {
+		if(MptDeviceDriverHandlers[ii] &&
+		  MptDeviceDriverHandlers[ii]->resume) {
+			MptDeviceDriverHandlers[ii]->resume(pdev);
+		}
+	}
+
+	return 0;
+}
+#endif
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mpt_do_ioc_recovery - Initialize or recover MPT adapter.
+ *	@ioc: Pointer to MPT adapter structure
+ *	@reason: Event word / reason
+ *	@sleepFlag: Use schedule if CAN_SLEEP else use udelay.
+ *
+ *	This routine performs all the steps necessary to bring the IOC
+ *	to a OPERATIONAL state.
+ *
+ *	This routine also pre-fetches the LAN MAC address of a Fibre Channel
+ *	MPT adapter.
+ *
+ *	Returns:
+ *		 0 for success
+ *		-1 if failed to get board READY
+ *		-2 if READY but IOCFacts Failed
+ *		-3 if READY but PrimeIOCFifos Failed
+ *		-4 if READY but IOCInit Failed
+ */
+static int
+mpt_do_ioc_recovery(MPT_ADAPTER *ioc, u32 reason, int sleepFlag)
+{
+	int	 hard_reset_done = 0;
+	int	 alt_ioc_ready = 0;
+	int	 hard;
+	int	 rc=0;
+	int	 ii;
+	int	 handlers;
+	int	 ret = 0;
+	int	 reset_alt_ioc_active = 0;
+
+	printk(KERN_INFO MYNAM ": Initiating %s %s\n",
+			ioc->name, reason==MPT_HOSTEVENT_IOC_BRINGUP ? "bringup" : "recovery");
+
+	/* Disable reply interrupts (also blocks FreeQ) */
+	CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
+	ioc->active = 0;
+
+	if (ioc->alt_ioc) {
+		if (ioc->alt_ioc->active)
+			reset_alt_ioc_active = 1;
+
+		/* Disable alt-IOC's reply interrupts (and FreeQ) for a bit ... */
+		CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, 0xFFFFFFFF);
+		ioc->alt_ioc->active = 0;
+	}
+
+	hard = 1;
+	if (reason == MPT_HOSTEVENT_IOC_BRINGUP)
+		hard = 0;
+
+	if ((hard_reset_done = MakeIocReady(ioc, hard, sleepFlag)) < 0) {
+		if (hard_reset_done == -4) {
+			printk(KERN_WARNING MYNAM ": %s Owned by PEER..skipping!\n",
+					ioc->name);
+
+			if (reset_alt_ioc_active && ioc->alt_ioc) {
+				/* (re)Enable alt-IOC! (reply interrupt, FreeQ) */
+				dprintk((KERN_INFO MYNAM ": alt-%s reply irq re-enabled\n",
+						ioc->alt_ioc->name));
+				CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, ~(MPI_HIM_RIM));
+				ioc->alt_ioc->active = 1;
+			}
+
+		} else {
+			printk(KERN_WARNING MYNAM ": %s NOT READY WARNING!\n",
+					ioc->name);
+		}
+		return -1;
+	}
+
+	/* hard_reset_done = 0 if a soft reset was performed
+	 * and 1 if a hard reset was performed.
+	 */
+	if (hard_reset_done && reset_alt_ioc_active && ioc->alt_ioc) {
+		if ((rc = MakeIocReady(ioc->alt_ioc, 0, sleepFlag)) == 0)
+			alt_ioc_ready = 1;
+		else
+			printk(KERN_WARNING MYNAM
+					": alt-%s: Not ready WARNING!\n",
+					ioc->alt_ioc->name);
+	}
+
+	for (ii=0; ii<5; ii++) {
+		/* Get IOC facts! Allow 5 retries */
+		if ((rc = GetIocFacts(ioc, sleepFlag, reason)) == 0)
+			break;
+	}
+	
+
+	if (ii == 5) {
+		dinitprintk((MYIOC_s_INFO_FMT "Retry IocFacts failed rc=%x\n", ioc->name, rc));
+		ret = -2;
+	} else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
+		MptDisplayIocCapabilities(ioc);
+	}
+	
+	if (alt_ioc_ready) {
+		if ((rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason)) != 0) {
+			dinitprintk((MYIOC_s_INFO_FMT "Initial Alt IocFacts failed rc=%x\n", ioc->name, rc));
+			/* Retry - alt IOC was initialized once
+			 */
+			rc = GetIocFacts(ioc->alt_ioc, sleepFlag, reason);
+		}
+		if (rc) {
+			dinitprintk((MYIOC_s_INFO_FMT "Retry Alt IocFacts failed rc=%x\n", ioc->name, rc));
+			alt_ioc_ready = 0;
+			reset_alt_ioc_active = 0;
+		} else if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
+			MptDisplayIocCapabilities(ioc->alt_ioc);
+		}
+	}
+
+	/* Prime reply & request queues!
+	 * (mucho alloc's) Must be done prior to
+	 * init as upper addresses are needed for init.
+	 * If fails, continue with alt-ioc processing
+	 */
+	if ((ret == 0) && ((rc = PrimeIocFifos(ioc)) != 0))
+		ret = -3;
+
+	/* May need to check/upload firmware & data here!
+	 * If fails, continue with alt-ioc processing
+	 */
+	if ((ret == 0) && ((rc = SendIocInit(ioc, sleepFlag)) != 0))
+		ret = -4;
+// NEW!
+	if (alt_ioc_ready && ((rc = PrimeIocFifos(ioc->alt_ioc)) != 0)) {
+		printk(KERN_WARNING MYNAM ": alt-%s: (%d) FIFO mgmt alloc WARNING!\n",
+				ioc->alt_ioc->name, rc);
+		alt_ioc_ready = 0;
+		reset_alt_ioc_active = 0;
+	}
+
+	if (alt_ioc_ready) {
+		if ((rc = SendIocInit(ioc->alt_ioc, sleepFlag)) != 0) {
+			alt_ioc_ready = 0;
+			reset_alt_ioc_active = 0;
+			printk(KERN_WARNING MYNAM
+				": alt-%s: (%d) init failure WARNING!\n",
+					ioc->alt_ioc->name, rc);
+		}
+	}
+
+	if (reason == MPT_HOSTEVENT_IOC_BRINGUP){
+		if (ioc->upload_fw) {
+			ddlprintk((MYIOC_s_INFO_FMT
+				"firmware upload required!\n", ioc->name));
+
+			/* Controller is not operational, cannot do upload
+			 */
+			if (ret == 0) {
+				rc = mpt_do_upload(ioc, sleepFlag);
+				if (rc != 0)
+					printk(KERN_WARNING MYNAM ": firmware upload failure!\n");
+			}
+		}
+	}
+
+	if (ret == 0) {
+		/* Enable! (reply interrupt) */
+		CHIPREG_WRITE32(&ioc->chip->IntMask, ~(MPI_HIM_RIM));
+		ioc->active = 1;
+	}
+
+	if (reset_alt_ioc_active && ioc->alt_ioc) {
+		/* (re)Enable alt-IOC! (reply interrupt) */
+		dprintk((KERN_INFO MYNAM ": alt-%s reply irq re-enabled\n",
+				ioc->alt_ioc->name));
+		CHIPREG_WRITE32(&ioc->alt_ioc->chip->IntMask, ~(MPI_HIM_RIM));
+		ioc->alt_ioc->active = 1;
+	}
+
+	/* NEW!  20010120 -sralston
+	 *  Enable MPT base driver management of EventNotification
+	 *  and EventAck handling.
+	 */
+	if ((ret == 0) && (!ioc->facts.EventState))
+		(void) SendEventNotification(ioc, 1);	/* 1=Enable EventNotification */
+
+	if (ioc->alt_ioc && alt_ioc_ready && !ioc->alt_ioc->facts.EventState)
+		(void) SendEventNotification(ioc->alt_ioc, 1);	/* 1=Enable EventNotification */
+
+	/* (Bugzilla:fibrebugs, #513)
+	 * Bug fix (part 2)!  20010905 -sralston
+	 *	Add additional "reason" check before call to GetLanConfigPages
+	 *	(combined with GetIoUnitPage2 call).  This prevents a somewhat
+	 *	recursive scenario; GetLanConfigPages times out, timer expired
+	 *	routine calls HardResetHandler, which calls into here again,
+	 *	and we try GetLanConfigPages again...
+	 */
+	if ((ret == 0) && (reason == MPT_HOSTEVENT_IOC_BRINGUP)) {
+		if (ioc->bus_type == FC) {
+			/*
+			 *  Pre-fetch FC port WWN and stuff...
+			 *  (FCPortPage0_t stuff)
+			 */
+			for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
+				(void) GetFcPortPage0(ioc, ii);
+			}
+
+			if ((ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) &&
+			    (ioc->lan_cnfg_page0.Header.PageLength == 0)) {
+				/*
+				 *  Pre-fetch the ports LAN MAC address!
+				 *  (LANPage1_t stuff)
+				 */
+				(void) GetLanConfigPages(ioc);
+#ifdef MPT_DEBUG
+				{
+					u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
+					dprintk((MYIOC_s_INFO_FMT "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
+							ioc->name, a[5], a[4], a[3], a[2], a[1], a[0] ));
+				}
+#endif
+			}
+		} else {
+			/* Get NVRAM and adapter maximums from SPP 0 and 2
+			 */
+			mpt_GetScsiPortSettings(ioc, 0);
+
+			/* Get version and length of SDP 1
+			 */
+			mpt_readScsiDevicePageHeaders(ioc, 0);
+
+			/* Find IM volumes
+			 */
+			if (ioc->facts.MsgVersion >= 0x0102)
+				mpt_findImVolumes(ioc);
+
+			/* Check, and possibly reset, the coalescing value
+			 */
+			mpt_read_ioc_pg_1(ioc);
+
+			mpt_read_ioc_pg_4(ioc);
+		}
+
+		GetIoUnitPage2(ioc);
+	}
+
+	/*
+	 * Call each currently registered protocol IOC reset handler
+	 * with post-reset indication.
+	 * NOTE: If we're doing _IOC_BRINGUP, there can be no
+	 * MptResetHandlers[] registered yet.
+	 */
+	if (hard_reset_done) {
+		rc = handlers = 0;
+		for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) {
+			if ((ret == 0) && MptResetHandlers[ii]) {
+				dprintk((MYIOC_s_INFO_FMT "Calling IOC post_reset handler #%d\n",
+						ioc->name, ii));
+				rc += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_POST_RESET);
+				handlers++;
+			}
+
+			if (alt_ioc_ready && MptResetHandlers[ii]) {
+				dprintk((MYIOC_s_INFO_FMT "Calling alt-%s post_reset handler #%d\n",
+						ioc->name, ioc->alt_ioc->name, ii));
+				rc += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_POST_RESET);
+				handlers++;
+			}
+		}
+		/* FIXME?  Examine results here? */
+	}
+
+	return ret;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mpt_detect_bound_ports - Search for PCI bus/dev_function
+ *	which matches PCI bus/dev_function (+/-1) for newly discovered 929,
+ *	929X, 1030 or 1035.
+ *	@ioc: Pointer to MPT adapter structure
+ *	@pdev: Pointer to (struct pci_dev) structure
+ *
+ *	If match on PCI dev_function +/-1 is found, bind the two MPT adapters
+ *	using alt_ioc pointer fields in their %MPT_ADAPTER structures.
+ */
+static void
+mpt_detect_bound_ports(MPT_ADAPTER *ioc, struct pci_dev *pdev)
+{
+	unsigned int match_lo, match_hi;
+	MPT_ADAPTER *ioc_srch;
+
+	match_lo = pdev->devfn-1;
+	match_hi = pdev->devfn+1;
+	dprintk((MYIOC_s_INFO_FMT "PCI bus/devfn=%x/%x, searching for devfn match on %x or %x\n",
+			ioc->name, pdev->bus->number, pdev->devfn, match_lo, match_hi));
+
+	list_for_each_entry(ioc_srch, &ioc_list, list) {
+		struct pci_dev *_pcidev = ioc_srch->pcidev;
+
+		if ((_pcidev->device == pdev->device) &&
+		    (_pcidev->bus->number == pdev->bus->number) &&
+		    (_pcidev->devfn == match_lo || _pcidev->devfn == match_hi) ) {
+			/* Paranoia checks */
+			if (ioc->alt_ioc != NULL) {
+				printk(KERN_WARNING MYNAM ": Oops, already bound (%s <==> %s)!\n",
+						ioc->name, ioc->alt_ioc->name);
+				break;
+			} else if (ioc_srch->alt_ioc != NULL) {
+				printk(KERN_WARNING MYNAM ": Oops, already bound (%s <==> %s)!\n",
+						ioc_srch->name, ioc_srch->alt_ioc->name);
+				break;
+			}
+			dprintk((KERN_INFO MYNAM ": FOUND! binding %s <==> %s\n",
+					ioc->name, ioc_srch->name));
+			ioc_srch->alt_ioc = ioc;
+			ioc->alt_ioc = ioc_srch;
+			break;
+		}
+	}
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mpt_adapter_disable - Disable misbehaving MPT adapter.
+ *	@this: Pointer to MPT adapter structure
+ */
+static void
+mpt_adapter_disable(MPT_ADAPTER *ioc)
+{
+	int sz;
+	int ret;
+
+	if (ioc->cached_fw != NULL) {
+		ddlprintk((KERN_INFO MYNAM ": mpt_adapter_disable: Pushing FW onto adapter\n"));
+		if ((ret = mpt_downloadboot(ioc, NO_SLEEP)) < 0) {
+			printk(KERN_WARNING MYNAM
+				": firmware downloadboot failure (%d)!\n", ret);
+		}
+	}
+
+	/* Disable adapter interrupts! */
+	CHIPREG_WRITE32(&ioc->chip->IntMask, 0xFFFFFFFF);
+	ioc->active = 0;
+	/* Clear any lingering interrupt */
+	CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+	if (ioc->alloc != NULL) {
+		sz = ioc->alloc_sz;
+		dexitprintk((KERN_INFO MYNAM ": %s.free  @ %p, sz=%d bytes\n",
+		 	ioc->name, ioc->alloc, ioc->alloc_sz));
+		pci_free_consistent(ioc->pcidev, sz,
+				ioc->alloc, ioc->alloc_dma);
+		ioc->reply_frames = NULL;
+		ioc->req_frames = NULL;
+		ioc->alloc = NULL;
+		ioc->alloc_total -= sz;
+	}
+
+	if (ioc->sense_buf_pool != NULL) {
+		sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
+		pci_free_consistent(ioc->pcidev, sz,
+				ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+		ioc->sense_buf_pool = NULL;
+		ioc->alloc_total -= sz;
+	}
+
+	if (ioc->events != NULL){
+		sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS);
+		kfree(ioc->events);
+		ioc->events = NULL;
+		ioc->alloc_total -= sz;
+	}
+
+	if (ioc->cached_fw != NULL) {
+		sz = ioc->facts.FWImageSize;
+		pci_free_consistent(ioc->pcidev, sz,
+			ioc->cached_fw, ioc->cached_fw_dma);
+		ioc->cached_fw = NULL;
+		ioc->alloc_total -= sz;
+	}
+
+	if (ioc->spi_data.nvram != NULL) {
+		kfree(ioc->spi_data.nvram);
+		ioc->spi_data.nvram = NULL;
+	}
+
+	if (ioc->spi_data.pIocPg3 != NULL) {
+		kfree(ioc->spi_data.pIocPg3);
+		ioc->spi_data.pIocPg3 = NULL;
+	}
+
+	if (ioc->spi_data.pIocPg4 != NULL) {
+		sz = ioc->spi_data.IocPg4Sz;
+		pci_free_consistent(ioc->pcidev, sz, 
+			ioc->spi_data.pIocPg4,
+			ioc->spi_data.IocPg4_dma);
+		ioc->spi_data.pIocPg4 = NULL;
+		ioc->alloc_total -= sz;
+	}
+
+	if (ioc->ReqToChain != NULL) {
+		kfree(ioc->ReqToChain);
+		kfree(ioc->RequestNB);
+		ioc->ReqToChain = NULL;
+	}
+
+	if (ioc->ChainToChain != NULL) {
+		kfree(ioc->ChainToChain);
+		ioc->ChainToChain = NULL;
+	}
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mpt_adapter_dispose - Free all resources associated with a MPT
+ *	adapter.
+ *	@ioc: Pointer to MPT adapter structure
+ *
+ *	This routine unregisters h/w resources and frees all alloc'd memory
+ *	associated with a MPT adapter structure.
+ */
+static void
+mpt_adapter_dispose(MPT_ADAPTER *ioc)
+{
+	if (ioc != NULL) {
+		int sz_first, sz_last;
+
+		sz_first = ioc->alloc_total;
+
+		mpt_adapter_disable(ioc);
+
+		if (ioc->pci_irq != -1) {
+			free_irq(ioc->pci_irq, ioc);
+			ioc->pci_irq = -1;
+		}
+
+		if (ioc->memmap != NULL)
+			iounmap(ioc->memmap);
+
+#if defined(CONFIG_MTRR) && 0
+		if (ioc->mtrr_reg > 0) {
+			mtrr_del(ioc->mtrr_reg, 0, 0);
+			dprintk((KERN_INFO MYNAM ": %s: MTRR region de-registered\n", ioc->name));
+		}
+#endif
+
+		/*  Zap the adapter lookup ptr!  */
+		list_del(&ioc->list);
+
+		sz_last = ioc->alloc_total;
+		dprintk((KERN_INFO MYNAM ": %s: free'd %d of %d bytes\n",
+				ioc->name, sz_first-sz_last+(int)sizeof(*ioc), sz_first));
+		kfree(ioc);
+	}
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	MptDisplayIocCapabilities - Disply IOC's capacilities.
+ *	@ioc: Pointer to MPT adapter structure
+ */
+static void
+MptDisplayIocCapabilities(MPT_ADAPTER *ioc)
+{
+	int i = 0;
+
+	printk(KERN_INFO "%s: ", ioc->name);
+	if (ioc->prod_name && strlen(ioc->prod_name) > 3)
+		printk("%s: ", ioc->prod_name+3);
+	printk("Capabilities={");
+
+	if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_INITIATOR) {
+		printk("Initiator");
+		i++;
+	}
+
+	if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
+		printk("%sTarget", i ? "," : "");
+		i++;
+	}
+
+	if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
+		printk("%sLAN", i ? "," : "");
+		i++;
+	}
+
+#if 0
+	/*
+	 *  This would probably evoke more questions than it's worth
+	 */
+	if (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_TARGET) {
+		printk("%sLogBusAddr", i ? "," : "");
+		i++;
+	}
+#endif
+
+	printk("}\n");
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	MakeIocReady - Get IOC to a READY state, using KickStart if needed.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@force: Force hard KickStart of IOC
+ *	@sleepFlag: Specifies whether the process can sleep
+ *
+ *	Returns:
+ *		 1 - DIAG reset and READY
+ *		 0 - READY initially OR soft reset and READY
+ *		-1 - Any failure on KickStart
+ *		-2 - Msg Unit Reset Failed
+ *		-3 - IO Unit Reset Failed
+ *		-4 - IOC owned by a PEER
+ */
+static int
+MakeIocReady(MPT_ADAPTER *ioc, int force, int sleepFlag)
+{
+	u32	 ioc_state;
+	int	 statefault = 0;
+	int	 cntdn;
+	int	 hard_reset_done = 0;
+	int	 r;
+	int	 ii;
+	int	 whoinit;
+
+	/* Get current [raw] IOC state  */
+	ioc_state = mpt_GetIocState(ioc, 0);
+	dhsprintk((KERN_INFO MYNAM "::MakeIocReady, %s [raw] state=%08x\n", ioc->name, ioc_state));
+
+	/*
+	 *	Check to see if IOC got left/stuck in doorbell handshake
+	 *	grip of death.  If so, hard reset the IOC.
+	 */
+	if (ioc_state & MPI_DOORBELL_ACTIVE) {
+		statefault = 1;
+		printk(MYIOC_s_WARN_FMT "Unexpected doorbell active!\n",
+				ioc->name);
+	}
+
+	/* Is it already READY? */
+	if (!statefault && (ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_READY) 
+		return 0;
+
+	/*
+	 *	Check to see if IOC is in FAULT state.
+	 */
+	if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_FAULT) {
+		statefault = 2;
+		printk(MYIOC_s_WARN_FMT "IOC is in FAULT state!!!\n",
+				ioc->name);
+		printk(KERN_WARNING "           FAULT code = %04xh\n",
+				ioc_state & MPI_DOORBELL_DATA_MASK);
+	}
+
+	/*
+	 *	Hmmm...  Did it get left operational?
+	 */
+	if ((ioc_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL) {
+		dinitprintk((MYIOC_s_WARN_FMT "IOC operational unexpected\n",
+				ioc->name));
+
+		/* Check WhoInit.
+		 * If PCI Peer, exit.
+		 * Else, if no fault conditions are present, issue a MessageUnitReset
+		 * Else, fall through to KickStart case
+		 */
+		whoinit = (ioc_state & MPI_DOORBELL_WHO_INIT_MASK) >> MPI_DOORBELL_WHO_INIT_SHIFT;
+		dprintk((KERN_WARNING MYNAM
+			": whoinit 0x%x\n statefault %d force %d\n",
+			whoinit, statefault, force));
+		if (whoinit == MPI_WHOINIT_PCI_PEER)
+			return -4;
+		else {
+			if ((statefault == 0 ) && (force == 0)) {
+				if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) == 0)
+					return 0;
+			}
+			statefault = 3;
+		}
+	}
+
+	hard_reset_done = KickStart(ioc, statefault||force, sleepFlag);
+	if (hard_reset_done < 0)
+		return -1;
+
+	/*
+	 *  Loop here waiting for IOC to come READY.
+	 */
+	ii = 0;
+	cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 15;	/* 15 seconds */
+
+	while ((ioc_state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
+		if (ioc_state == MPI_IOC_STATE_OPERATIONAL) {
+			/*
+			 *  BIOS or previous driver load left IOC in OP state.
+			 *  Reset messaging FIFOs.
+			 */
+			if ((r = SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag)) != 0) {
+				printk(MYIOC_s_ERR_FMT "IOC msg unit reset failed!\n", ioc->name);
+				return -2;
+			}
+		} else if (ioc_state == MPI_IOC_STATE_RESET) {
+			/*
+			 *  Something is wrong.  Try to get IOC back
+			 *  to a known state.
+			 */
+			if ((r = SendIocReset(ioc, MPI_FUNCTION_IO_UNIT_RESET, sleepFlag)) != 0) {
+				printk(MYIOC_s_ERR_FMT "IO unit reset failed!\n", ioc->name);
+				return -3;
+			}
+		}
+
+		ii++; cntdn--;
+		if (!cntdn) {
+			printk(MYIOC_s_ERR_FMT "Wait IOC_READY state timeout(%d)!\n",
+					ioc->name, (int)((ii+5)/HZ));
+			return -ETIME;
+		}
+
+		if (sleepFlag == CAN_SLEEP) {
+			msleep_interruptible(1);
+		} else {
+			mdelay (1);	/* 1 msec delay */
+		}
+
+	}
+
+	if (statefault < 3) {
+		printk(MYIOC_s_INFO_FMT "Recovered from %s\n",
+				ioc->name,
+				statefault==1 ? "stuck handshake" : "IOC FAULT");
+	}
+
+	return hard_reset_done;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mpt_GetIocState - Get the current state of a MPT adapter.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@cooked: Request raw or cooked IOC state
+ *
+ *	Returns all IOC Doorbell register bits if cooked==0, else just the
+ *	Doorbell bits in MPI_IOC_STATE_MASK.
+ */
+u32
+mpt_GetIocState(MPT_ADAPTER *ioc, int cooked)
+{
+	u32 s, sc;
+
+	/*  Get!  */
+	s = CHIPREG_READ32(&ioc->chip->Doorbell);
+//	dprintk((MYIOC_s_INFO_FMT "raw state = %08x\n", ioc->name, s));
+	sc = s & MPI_IOC_STATE_MASK;
+
+	/*  Save!  */
+	ioc->last_state = sc;
+
+	return cooked ? sc : s;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	GetIocFacts - Send IOCFacts request to MPT adapter.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@sleepFlag: Specifies whether the process can sleep
+ *	@reason: If recovery, only update facts.
+ *
+ *	Returns 0 for success, non-zero for failure.
+ */
+static int
+GetIocFacts(MPT_ADAPTER *ioc, int sleepFlag, int reason)
+{
+	IOCFacts_t		 get_facts;
+	IOCFactsReply_t		*facts;
+	int			 r;
+	int			 req_sz;
+	int			 reply_sz;
+	int			 sz;
+	u32			 status, vv;
+	u8			 shiftFactor=1;
+
+	/* IOC *must* NOT be in RESET state! */
+	if (ioc->last_state == MPI_IOC_STATE_RESET) {
+		printk(KERN_ERR MYNAM ": ERROR - Can't get IOCFacts, %s NOT READY! (%08x)\n",
+				ioc->name,
+				ioc->last_state );
+		return -44;
+	}
+
+	facts = &ioc->facts;
+
+	/* Destination (reply area)... */
+	reply_sz = sizeof(*facts);
+	memset(facts, 0, reply_sz);
+
+	/* Request area (get_facts on the stack right now!) */
+	req_sz = sizeof(get_facts);
+	memset(&get_facts, 0, req_sz);
+
+	get_facts.Function = MPI_FUNCTION_IOC_FACTS;
+	/* Assert: All other get_facts fields are zero! */
+
+	dinitprintk((MYIOC_s_INFO_FMT 
+	    "Sending get IocFacts request req_sz=%d reply_sz=%d\n", 
+	    ioc->name, req_sz, reply_sz));
+
+	/* No non-zero fields in the get_facts request are greater than
+	 * 1 byte in size, so we can just fire it off as is.
+	 */
+	r = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_facts,
+			reply_sz, (u16*)facts, 5 /*seconds*/, sleepFlag);
+	if (r != 0)
+		return r;
+
+	/*
+	 * Now byte swap (GRRR) the necessary fields before any further
+	 * inspection of reply contents.
+	 *
+	 * But need to do some sanity checks on MsgLength (byte) field
+	 * to make sure we don't zero IOC's req_sz!
+	 */
+	/* Did we get a valid reply? */
+	if (facts->MsgLength > offsetof(IOCFactsReply_t, RequestFrameSize)/sizeof(u32)) {
+		if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
+			/*
+			 * If not been here, done that, save off first WhoInit value
+			 */
+			if (ioc->FirstWhoInit == WHOINIT_UNKNOWN)
+				ioc->FirstWhoInit = facts->WhoInit;
+		}
+
+		facts->MsgVersion = le16_to_cpu(facts->MsgVersion);
+		facts->MsgContext = le32_to_cpu(facts->MsgContext);
+		facts->IOCExceptions = le16_to_cpu(facts->IOCExceptions);
+		facts->IOCStatus = le16_to_cpu(facts->IOCStatus);
+		facts->IOCLogInfo = le32_to_cpu(facts->IOCLogInfo);
+		status = facts->IOCStatus & MPI_IOCSTATUS_MASK;
+		/* CHECKME! IOCStatus, IOCLogInfo */
+
+		facts->ReplyQueueDepth = le16_to_cpu(facts->ReplyQueueDepth);
+		facts->RequestFrameSize = le16_to_cpu(facts->RequestFrameSize);
+
+		/*
+		 * FC f/w version changed between 1.1 and 1.2
+		 *	Old: u16{Major(4),Minor(4),SubMinor(8)}
+		 *	New: u32{Major(8),Minor(8),Unit(8),Dev(8)}
+		 */
+		if (facts->MsgVersion < 0x0102) {
+			/*
+			 *	Handle old FC f/w style, convert to new...
+			 */
+			u16	 oldv = le16_to_cpu(facts->Reserved_0101_FWVersion);
+			facts->FWVersion.Word =
+					((oldv<<12) & 0xFF000000) |
+					((oldv<<8)  & 0x000FFF00);
+		} else
+			facts->FWVersion.Word = le32_to_cpu(facts->FWVersion.Word);
+
+		facts->ProductID = le16_to_cpu(facts->ProductID);
+		facts->CurrentHostMfaHighAddr =
+				le32_to_cpu(facts->CurrentHostMfaHighAddr);
+		facts->GlobalCredits = le16_to_cpu(facts->GlobalCredits);
+		facts->CurrentSenseBufferHighAddr =
+				le32_to_cpu(facts->CurrentSenseBufferHighAddr);
+		facts->CurReplyFrameSize =
+				le16_to_cpu(facts->CurReplyFrameSize);
+
+		/*
+		 * Handle NEW (!) IOCFactsReply fields in MPI-1.01.xx
+		 * Older MPI-1.00.xx struct had 13 dwords, and enlarged
+		 * to 14 in MPI-1.01.0x.
+		 */
+		if (facts->MsgLength >= (offsetof(IOCFactsReply_t,FWImageSize) + 7)/4 &&
+		    facts->MsgVersion > 0x0100) {
+			facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
+		}
+
+		sz = facts->FWImageSize;
+		if ( sz & 0x01 )
+			sz += 1;
+		if ( sz & 0x02 )
+			sz += 2;
+		facts->FWImageSize = sz;
+		
+		if (!facts->RequestFrameSize) {
+			/*  Something is wrong!  */
+			printk(MYIOC_s_ERR_FMT "IOC reported invalid 0 request size!\n",
+					ioc->name);
+			return -55;
+		}
+
+		r = sz = le32_to_cpu(facts->BlockSize);
+		vv = ((63 / (sz * 4)) + 1) & 0x03;
+		ioc->NB_for_64_byte_frame = vv;
+		while ( sz )
+		{
+			shiftFactor++;
+			sz = sz >> 1;
+		}
+		ioc->NBShiftFactor  = shiftFactor;
+		dinitprintk((MYIOC_s_INFO_FMT "NB_for_64_byte_frame=%x NBShiftFactor=%x BlockSize=%x\n",
+					ioc->name, vv, shiftFactor, r));
+    
+		if (reason == MPT_HOSTEVENT_IOC_BRINGUP) {
+			/*
+			 * Set values for this IOC's request & reply frame sizes,
+			 * and request & reply queue depths...
+			 */
+			ioc->req_sz = min(MPT_DEFAULT_FRAME_SIZE, facts->RequestFrameSize * 4);
+			ioc->req_depth = min_t(int, MPT_MAX_REQ_DEPTH, facts->GlobalCredits);
+			ioc->reply_sz = MPT_REPLY_FRAME_SIZE;
+			ioc->reply_depth = min_t(int, MPT_DEFAULT_REPLY_DEPTH, facts->ReplyQueueDepth);
+
+			dinitprintk((MYIOC_s_INFO_FMT "reply_sz=%3d, reply_depth=%4d\n",
+				ioc->name, ioc->reply_sz, ioc->reply_depth));
+			dinitprintk((MYIOC_s_INFO_FMT "req_sz  =%3d, req_depth  =%4d\n",
+				ioc->name, ioc->req_sz, ioc->req_depth));
+
+			/* Get port facts! */
+			if ( (r = GetPortFacts(ioc, 0, sleepFlag)) != 0 )
+				return r;
+		}
+	} else {
+		printk(MYIOC_s_ERR_FMT 
+		     "Invalid IOC facts reply, msgLength=%d offsetof=%zd!\n",
+		     ioc->name, facts->MsgLength, (offsetof(IOCFactsReply_t,
+		     RequestFrameSize)/sizeof(u32)));
+		return -66;
+	}
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	GetPortFacts - Send PortFacts request to MPT adapter.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@portnum: Port number
+ *	@sleepFlag: Specifies whether the process can sleep
+ *
+ *	Returns 0 for success, non-zero for failure.
+ */
+static int
+GetPortFacts(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
+{
+	PortFacts_t		 get_pfacts;
+	PortFactsReply_t	*pfacts;
+	int			 ii;
+	int			 req_sz;
+	int			 reply_sz;
+
+	/* IOC *must* NOT be in RESET state! */
+	if (ioc->last_state == MPI_IOC_STATE_RESET) {
+		printk(KERN_ERR MYNAM ": ERROR - Can't get PortFacts, %s NOT READY! (%08x)\n",
+				ioc->name,
+				ioc->last_state );
+		return -4;
+	}
+
+	pfacts = &ioc->pfacts[portnum];
+
+	/* Destination (reply area)...  */
+	reply_sz = sizeof(*pfacts);
+	memset(pfacts, 0, reply_sz);
+
+	/* Request area (get_pfacts on the stack right now!) */
+	req_sz = sizeof(get_pfacts);
+	memset(&get_pfacts, 0, req_sz);
+
+	get_pfacts.Function = MPI_FUNCTION_PORT_FACTS;
+	get_pfacts.PortNumber = portnum;
+	/* Assert: All other get_pfacts fields are zero! */
+
+	dinitprintk((MYIOC_s_INFO_FMT "Sending get PortFacts(%d) request\n",
+			ioc->name, portnum));
+
+	/* No non-zero fields in the get_pfacts request are greater than
+	 * 1 byte in size, so we can just fire it off as is.
+	 */
+	ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&get_pfacts,
+				reply_sz, (u16*)pfacts, 5 /*seconds*/, sleepFlag);
+	if (ii != 0)
+		return ii;
+
+	/* Did we get a valid reply? */
+
+	/* Now byte swap the necessary fields in the response. */
+	pfacts->MsgContext = le32_to_cpu(pfacts->MsgContext);
+	pfacts->IOCStatus = le16_to_cpu(pfacts->IOCStatus);
+	pfacts->IOCLogInfo = le32_to_cpu(pfacts->IOCLogInfo);
+	pfacts->MaxDevices = le16_to_cpu(pfacts->MaxDevices);
+	pfacts->PortSCSIID = le16_to_cpu(pfacts->PortSCSIID);
+	pfacts->ProtocolFlags = le16_to_cpu(pfacts->ProtocolFlags);
+	pfacts->MaxPostedCmdBuffers = le16_to_cpu(pfacts->MaxPostedCmdBuffers);
+	pfacts->MaxPersistentIDs = le16_to_cpu(pfacts->MaxPersistentIDs);
+	pfacts->MaxLanBuckets = le16_to_cpu(pfacts->MaxLanBuckets);
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	SendIocInit - Send IOCInit request to MPT adapter.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@sleepFlag: Specifies whether the process can sleep
+ *
+ *	Send IOCInit followed by PortEnable to bring IOC to OPERATIONAL state.
+ *
+ *	Returns 0 for success, non-zero for failure.
+ */
+static int
+SendIocInit(MPT_ADAPTER *ioc, int sleepFlag)
+{
+	IOCInit_t		 ioc_init;
+	MPIDefaultReply_t	 init_reply;
+	u32			 state;
+	int			 r;
+	int			 count;
+	int			 cntdn;
+
+	memset(&ioc_init, 0, sizeof(ioc_init));
+	memset(&init_reply, 0, sizeof(init_reply));
+
+	ioc_init.WhoInit = MPI_WHOINIT_HOST_DRIVER;
+	ioc_init.Function = MPI_FUNCTION_IOC_INIT;
+
+	/* If we are in a recovery mode and we uploaded the FW image,
+	 * then this pointer is not NULL. Skip the upload a second time.
+	 * Set this flag if cached_fw set for either IOC.
+	 */
+	if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
+		ioc->upload_fw = 1;
+	else
+		ioc->upload_fw = 0;
+	ddlprintk((MYIOC_s_INFO_FMT "upload_fw %d facts.Flags=%x\n",
+		   ioc->name, ioc->upload_fw, ioc->facts.Flags));
+
+	if (ioc->bus_type == FC)
+		ioc_init.MaxDevices = MPT_MAX_FC_DEVICES;
+	else
+		ioc_init.MaxDevices = MPT_MAX_SCSI_DEVICES;
+	
+	ioc_init.MaxBuses = MPT_MAX_BUS;
+
+	ioc_init.ReplyFrameSize = cpu_to_le16(ioc->reply_sz);	/* in BYTES */
+
+	if (sizeof(dma_addr_t) == sizeof(u64)) {
+		/* Save the upper 32-bits of the request
+		 * (reply) and sense buffers.
+		 */
+		ioc_init.HostMfaHighAddr = cpu_to_le32((u32)((u64)ioc->alloc_dma >> 32));
+		ioc_init.SenseBufferHighAddr = cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32));
+	} else {
+		/* Force 32-bit addressing */
+		ioc_init.HostMfaHighAddr = cpu_to_le32(0);
+		ioc_init.SenseBufferHighAddr = cpu_to_le32(0);
+	}
+		
+	ioc->facts.CurrentHostMfaHighAddr = ioc_init.HostMfaHighAddr;
+	ioc->facts.CurrentSenseBufferHighAddr = ioc_init.SenseBufferHighAddr;
+
+	dhsprintk((MYIOC_s_INFO_FMT "Sending IOCInit (req @ %p)\n",
+			ioc->name, &ioc_init));
+
+	r = mpt_handshake_req_reply_wait(ioc, sizeof(IOCInit_t), (u32*)&ioc_init,
+				sizeof(MPIDefaultReply_t), (u16*)&init_reply, 10 /*seconds*/, sleepFlag);
+	if (r != 0)
+		return r;
+
+	/* No need to byte swap the multibyte fields in the reply
+	 * since we don't even look at it's contents.
+	 */
+
+	dhsprintk((MYIOC_s_INFO_FMT "Sending PortEnable (req @ %p)\n",
+			ioc->name, &ioc_init));
+	
+	if ((r = SendPortEnable(ioc, 0, sleepFlag)) != 0)
+		return r;
+
+	/* YIKES!  SUPER IMPORTANT!!!
+	 *  Poll IocState until _OPERATIONAL while IOC is doing
+	 *  LoopInit and TargetDiscovery!
+	 */
+	count = 0;
+	cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 60;	/* 60 seconds */
+	state = mpt_GetIocState(ioc, 1);
+	while (state != MPI_IOC_STATE_OPERATIONAL && --cntdn) {
+		if (sleepFlag == CAN_SLEEP) {
+			msleep_interruptible(1);
+		} else {
+			mdelay(1);
+		}
+
+		if (!cntdn) {
+			printk(MYIOC_s_ERR_FMT "Wait IOC_OP state timeout(%d)!\n",
+					ioc->name, (int)((count+5)/HZ));
+			return -9;
+		}
+
+		state = mpt_GetIocState(ioc, 1);
+		count++;
+	}
+	dhsprintk((MYIOC_s_INFO_FMT "INFO - Wait IOC_OPERATIONAL state (cnt=%d)\n",
+			ioc->name, count));
+
+	return r;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	SendPortEnable - Send PortEnable request to MPT adapter port.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@portnum: Port number to enable
+ *	@sleepFlag: Specifies whether the process can sleep
+ *
+ *	Send PortEnable to bring IOC to OPERATIONAL state.
+ *
+ *	Returns 0 for success, non-zero for failure.
+ */
+static int
+SendPortEnable(MPT_ADAPTER *ioc, int portnum, int sleepFlag)
+{
+	PortEnable_t		 port_enable;
+	MPIDefaultReply_t	 reply_buf;
+	int	 ii;
+	int	 req_sz;
+	int	 reply_sz;
+
+	/*  Destination...  */
+	reply_sz = sizeof(MPIDefaultReply_t);
+	memset(&reply_buf, 0, reply_sz);
+
+	req_sz = sizeof(PortEnable_t);
+	memset(&port_enable, 0, req_sz);
+
+	port_enable.Function = MPI_FUNCTION_PORT_ENABLE;
+	port_enable.PortNumber = portnum;
+/*	port_enable.ChainOffset = 0;		*/
+/*	port_enable.MsgFlags = 0;		*/
+/*	port_enable.MsgContext = 0;		*/
+
+	dinitprintk((MYIOC_s_INFO_FMT "Sending Port(%d)Enable (req @ %p)\n",
+			ioc->name, portnum, &port_enable));
+
+	/* RAID FW may take a long time to enable
+	 */
+	if (ioc->bus_type == FC) {
+		ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
+				reply_sz, (u16*)&reply_buf, 65 /*seconds*/, sleepFlag);
+	} else {
+		ii = mpt_handshake_req_reply_wait(ioc, req_sz, (u32*)&port_enable,
+				reply_sz, (u16*)&reply_buf, 300 /*seconds*/, sleepFlag);
+	}
+
+	if (ii != 0)
+		return ii;
+
+	/* We do not even look at the reply, so we need not
+	 * swap the multi-byte fields.
+	 */
+
+	return 0;
+}
+
+/*
+ *	ioc: Pointer to MPT_ADAPTER structure
+ *      size - total FW bytes
+ */
+void
+mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size)
+{
+	if (ioc->cached_fw)
+		return;  /* use already allocated memory */
+	if (ioc->alt_ioc && ioc->alt_ioc->cached_fw) {
+		ioc->cached_fw = ioc->alt_ioc->cached_fw;  /* use alt_ioc's memory */
+		ioc->cached_fw_dma = ioc->alt_ioc->cached_fw_dma;
+	} else {
+		if ( (ioc->cached_fw = pci_alloc_consistent(ioc->pcidev, size, &ioc->cached_fw_dma) ) )
+			ioc->alloc_total += size;
+	}
+}
+/*
+ * If alt_img is NULL, delete from ioc structure.
+ * Else, delete a secondary image in same format.
+ */
+void
+mpt_free_fw_memory(MPT_ADAPTER *ioc)
+{
+	int sz;
+
+	sz = ioc->facts.FWImageSize;
+	dinitprintk((KERN_WARNING MYNAM "free_fw_memory: FW Image  @ %p[%p], sz=%d[%x] bytes\n",
+		 ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
+	pci_free_consistent(ioc->pcidev, sz,
+			ioc->cached_fw, ioc->cached_fw_dma);
+	ioc->cached_fw = NULL;
+
+	return;
+}
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mpt_do_upload - Construct and Send FWUpload request to MPT adapter port.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@sleepFlag: Specifies whether the process can sleep
+ *
+ *	Returns 0 for success, >0 for handshake failure
+ *		<0 for fw upload failure.
+ *
+ *	Remark: If bound IOC and a successful FWUpload was performed
+ *	on the bound IOC, the second image is discarded
+ *	and memory is free'd. Both channels must upload to prevent
+ *	IOC from running in degraded mode.
+ */
+static int
+mpt_do_upload(MPT_ADAPTER *ioc, int sleepFlag)
+{
+	u8			 request[ioc->req_sz];
+	u8			 reply[sizeof(FWUploadReply_t)];
+	FWUpload_t		*prequest;
+	FWUploadReply_t		*preply;
+	FWUploadTCSGE_t		*ptcsge;
+	int			 sgeoffset;
+	u32			 flagsLength;
+	int			 ii, sz, reply_sz;
+	int			 cmdStatus;
+
+	/* If the image size is 0, we are done.
+	 */
+	if ((sz = ioc->facts.FWImageSize) == 0)
+		return 0;
+
+	mpt_alloc_fw_memory(ioc, sz);
+
+	dinitprintk((KERN_WARNING MYNAM ": FW Image  @ %p[%p], sz=%d[%x] bytes\n",
+		 ioc->cached_fw, (void *)(ulong)ioc->cached_fw_dma, sz, sz));
+	
+	if (ioc->cached_fw == NULL) {
+		/* Major Failure.
+		 */
+		return -ENOMEM;
+	}
+
+	prequest = (FWUpload_t *)&request;
+	preply = (FWUploadReply_t *)&reply;
+
+	/*  Destination...  */
+	memset(prequest, 0, ioc->req_sz);
+
+	reply_sz = sizeof(reply);
+	memset(preply, 0, reply_sz);
+
+	prequest->ImageType = MPI_FW_UPLOAD_ITYPE_FW_IOC_MEM;
+	prequest->Function = MPI_FUNCTION_FW_UPLOAD;
+
+	ptcsge = (FWUploadTCSGE_t *) &prequest->SGL;
+	ptcsge->DetailsLength = 12;
+	ptcsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
+	ptcsge->ImageSize = cpu_to_le32(sz);
+
+	sgeoffset = sizeof(FWUpload_t) - sizeof(SGE_MPI_UNION) + sizeof(FWUploadTCSGE_t);
+
+	flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | sz;
+	mpt_add_sge(&request[sgeoffset], flagsLength, ioc->cached_fw_dma);
+
+	sgeoffset += sizeof(u32) + sizeof(dma_addr_t);
+	dinitprintk((KERN_WARNING MYNAM "Sending FW Upload (req @ %p) sgeoffset=%d \n",
+			prequest, sgeoffset));
+	DBG_DUMP_FW_REQUEST_FRAME(prequest)
+
+	ii = mpt_handshake_req_reply_wait(ioc, sgeoffset, (u32*)prequest,
+				reply_sz, (u16*)preply, 65 /*seconds*/, sleepFlag);
+
+	dinitprintk((KERN_WARNING MYNAM "FW Upload completed rc=%x \n", ii));
+
+	cmdStatus = -EFAULT;
+	if (ii == 0) {
+		/* Handshake transfer was complete and successful.
+		 * Check the Reply Frame.
+		 */
+		int status, transfer_sz;
+		status = le16_to_cpu(preply->IOCStatus);
+		if (status == MPI_IOCSTATUS_SUCCESS) {
+			transfer_sz = le32_to_cpu(preply->ActualImageSize);
+			if (transfer_sz == sz)
+				cmdStatus = 0;
+		}
+	}
+	dinitprintk((MYIOC_s_INFO_FMT ": do_upload status %d \n",
+			ioc->name, cmdStatus));
+
+	
+	if (cmdStatus) {
+
+		ddlprintk((MYIOC_s_INFO_FMT ": fw upload failed, freeing image \n",
+			ioc->name));
+		mpt_free_fw_memory(ioc);
+	}
+
+	return cmdStatus;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mpt_downloadboot - DownloadBoot code
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@flag: Specify which part of IOC memory is to be uploaded.
+ *	@sleepFlag: Specifies whether the process can sleep
+ *
+ *	FwDownloadBoot requires Programmed IO access.
+ *
+ *	Returns 0 for success
+ *		-1 FW Image size is 0
+ *		-2 No valid cached_fw Pointer
+ *		<0 for fw upload failure.
+ */
+static int
+mpt_downloadboot(MPT_ADAPTER *ioc, int sleepFlag)
+{
+	MpiFwHeader_t		*pFwHeader;
+	MpiExtImageHeader_t	*pExtImage;
+	u32			 fwSize;
+	u32			 diag0val;
+	int			 count;
+	u32			*ptrFw;
+	u32			 diagRwData;
+	u32			 nextImage;
+	u32			 load_addr;
+	u32 			 ioc_state=0;
+
+	ddlprintk((MYIOC_s_INFO_FMT "downloadboot: fw size 0x%x, ioc FW Ptr %p\n",
+				ioc->name, ioc->facts.FWImageSize, ioc->cached_fw));
+
+	if ( ioc->facts.FWImageSize == 0 )
+		return -1;
+
+	if (ioc->cached_fw == NULL)
+		return -2;
+
+	/* prevent a second downloadboot and memory free with alt_ioc */
+	if (ioc->alt_ioc && ioc->alt_ioc->cached_fw)
+		ioc->alt_ioc->cached_fw = NULL;
+	
+	CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
+	CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
+	CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
+	CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
+	CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
+	CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
+
+	CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM));
+
+	/* wait 1 msec */
+	if (sleepFlag == CAN_SLEEP) {
+		msleep_interruptible(1);
+	} else {
+		mdelay (1);
+	}
+
+	diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+	CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER);
+
+	for (count = 0; count < 30; count ++) {
+		diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+		if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
+			ddlprintk((MYIOC_s_INFO_FMT "RESET_ADAPTER cleared, count=%d\n",
+				ioc->name, count));
+			break;
+		}
+		/* wait 1 sec */
+		if (sleepFlag == CAN_SLEEP) {
+			msleep_interruptible (1000);
+		} else {
+			mdelay (1000);
+		}
+	}
+
+	if ( count == 30 ) {
+		ddlprintk((MYIOC_s_INFO_FMT "downloadboot failed! Unable to RESET_ADAPTER diag0val=%x\n",
+		ioc->name, diag0val));
+		return -3;
+	}
+
+	CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
+	CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
+	CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
+	CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
+	CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
+	CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
+
+	/* Set the DiagRwEn and Disable ARM bits */
+	CHIPREG_WRITE32(&ioc->chip->Diagnostic, (MPI_DIAG_RW_ENABLE | MPI_DIAG_DISABLE_ARM));
+
+	pFwHeader = (MpiFwHeader_t *) ioc->cached_fw;
+	fwSize = (pFwHeader->ImageSize + 3)/4;
+	ptrFw = (u32 *) pFwHeader;
+
+	/* Write the LoadStartAddress to the DiagRw Address Register
+	 * using Programmed IO
+	 */
+	CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->LoadStartAddress);
+	ddlprintk((MYIOC_s_INFO_FMT "LoadStart addr written 0x%x \n",
+		ioc->name, pFwHeader->LoadStartAddress));
+
+	ddlprintk((MYIOC_s_INFO_FMT "Write FW Image: 0x%x bytes @ %p\n",
+				ioc->name, fwSize*4, ptrFw));
+	while (fwSize--) {
+		CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptrFw++);
+	}
+
+	nextImage = pFwHeader->NextImageHeaderOffset;
+	while (nextImage) {
+		pExtImage = (MpiExtImageHeader_t *) ((char *)pFwHeader + nextImage);
+
+		load_addr = pExtImage->LoadStartAddress;
+
+		fwSize = (pExtImage->ImageSize + 3) >> 2;
+		ptrFw = (u32 *)pExtImage;
+
+		ddlprintk((MYIOC_s_INFO_FMT "Write Ext Image: 0x%x bytes @ %p load_addr=%x\n",
+						ioc->name, fwSize*4, ptrFw, load_addr));
+		CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, load_addr);
+
+		while (fwSize--) {
+			CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, *ptrFw++);
+		}
+		nextImage = pExtImage->NextImageHeaderOffset;
+	}
+
+	/* Write the IopResetVectorRegAddr */
+	ddlprintk((MYIOC_s_INFO_FMT "Write IopResetVector Addr=%x! \n", ioc->name, 	pFwHeader->IopResetRegAddr));
+	CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, pFwHeader->IopResetRegAddr);
+
+	/* Write the IopResetVectorValue */
+	ddlprintk((MYIOC_s_INFO_FMT "Write IopResetVector Value=%x! \n", ioc->name, pFwHeader->IopResetVectorValue));
+	CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, pFwHeader->IopResetVectorValue);
+
+	/* Clear the internal flash bad bit - autoincrementing register,
+	 * so must do two writes.
+	 */
+	CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
+	diagRwData = CHIPREG_PIO_READ32(&ioc->pio_chip->DiagRwData);
+	diagRwData |= 0x4000000;
+	CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwAddress, 0x3F000000);
+	CHIPREG_PIO_WRITE32(&ioc->pio_chip->DiagRwData, diagRwData);
+
+	diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+	ddlprintk((MYIOC_s_INFO_FMT "downloadboot diag0val=%x, turning off PREVENT_IOC_BOOT, DISABLE_ARM\n",
+		ioc->name, diag0val));
+	diag0val &= ~(MPI_DIAG_PREVENT_IOC_BOOT | MPI_DIAG_DISABLE_ARM);
+	ddlprintk((MYIOC_s_INFO_FMT "downloadboot now diag0val=%x\n",
+		ioc->name, diag0val));
+	CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
+
+	/* Write 0xFF to reset the sequencer */
+	CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
+
+	for (count=0; count<HZ*20; count++) {
+		if ((ioc_state = mpt_GetIocState(ioc, 0)) & MPI_IOC_STATE_READY) {
+			ddlprintk((MYIOC_s_INFO_FMT "downloadboot successful! (count=%d) IocState=%x\n",
+					ioc->name, count, ioc_state));
+			if ((SendIocInit(ioc, sleepFlag)) != 0) {
+				ddlprintk((MYIOC_s_INFO_FMT "downloadboot: SendIocInit failed\n",
+					ioc->name));
+				return -EFAULT;
+			}
+			ddlprintk((MYIOC_s_INFO_FMT "downloadboot: SendIocInit successful\n",
+					ioc->name));
+			return 0;
+		}
+		if (sleepFlag == CAN_SLEEP) {
+			msleep_interruptible (10);
+		} else {
+			mdelay (10);
+		}
+	}
+	ddlprintk((MYIOC_s_INFO_FMT "downloadboot failed! IocState=%x\n",
+		ioc->name, ioc_state));
+	return -EFAULT;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	KickStart - Perform hard reset of MPT adapter.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@force: Force hard reset
+ *	@sleepFlag: Specifies whether the process can sleep
+ *
+ *	This routine places MPT adapter in diagnostic mode via the
+ *	WriteSequence register, and then performs a hard reset of adapter
+ *	via the Diagnostic register.
+ *
+ *	Inputs:   sleepflag - CAN_SLEEP (non-interrupt thread)
+ *			or NO_SLEEP (interrupt thread, use mdelay)
+ *		  force - 1 if doorbell active, board fault state
+ *				board operational, IOC_RECOVERY or
+ *				IOC_BRINGUP and there is an alt_ioc.
+ *			  0 else
+ *
+ *	Returns:
+ *		 1 - hard reset, READY	
+ *		 0 - no reset due to History bit, READY	
+ *		-1 - no reset due to History bit but not READY	
+ *		     OR reset but failed to come READY
+ *		-2 - no reset, could not enter DIAG mode
+ *		-3 - reset but bad FW bit
+ */
+static int
+KickStart(MPT_ADAPTER *ioc, int force, int sleepFlag)
+{
+	int hard_reset_done = 0;
+	u32 ioc_state=0;
+	int cnt,cntdn;
+
+	dinitprintk((KERN_WARNING MYNAM ": KickStarting %s!\n", ioc->name));
+	if (ioc->bus_type == SCSI) {
+		/* Always issue a Msg Unit Reset first. This will clear some
+		 * SCSI bus hang conditions.
+		 */
+		SendIocReset(ioc, MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET, sleepFlag);
+
+		if (sleepFlag == CAN_SLEEP) {
+			msleep_interruptible (1000);
+		} else {
+			mdelay (1000);
+		}
+	}
+
+	hard_reset_done = mpt_diag_reset(ioc, force, sleepFlag);
+	if (hard_reset_done < 0)
+		return hard_reset_done;
+
+	dinitprintk((MYIOC_s_INFO_FMT "Diagnostic reset successful!\n",
+			ioc->name));
+
+	cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 2;	/* 2 seconds */
+	for (cnt=0; cnt<cntdn; cnt++) {
+		ioc_state = mpt_GetIocState(ioc, 1);
+		if ((ioc_state == MPI_IOC_STATE_READY) || (ioc_state == MPI_IOC_STATE_OPERATIONAL)) {
+			dinitprintk((MYIOC_s_INFO_FMT "KickStart successful! (cnt=%d)\n",
+ 					ioc->name, cnt));
+			return hard_reset_done;
+		}
+		if (sleepFlag == CAN_SLEEP) {
+			msleep_interruptible (10);
+		} else {
+			mdelay (10);
+		}
+	}
+
+	printk(MYIOC_s_ERR_FMT "Failed to come READY after reset! IocState=%x\n",
+			ioc->name, ioc_state);
+	return -1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mpt_diag_reset - Perform hard reset of the adapter.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@ignore: Set if to honor and clear to ignore
+ *		the reset history bit
+ *	@sleepflag: CAN_SLEEP if called in a non-interrupt thread,
+ *		else set to NO_SLEEP (use mdelay instead)
+ *
+ *	This routine places the adapter in diagnostic mode via the
+ *	WriteSequence register and then performs a hard reset of adapter
+ *	via the Diagnostic register. Adapter should be in ready state
+ *	upon successful completion.
+ *
+ *	Returns:  1  hard reset successful
+ *		  0  no reset performed because reset history bit set
+ *		 -2  enabling diagnostic mode failed
+ *		 -3  diagnostic reset failed
+ */
+static int
+mpt_diag_reset(MPT_ADAPTER *ioc, int ignore, int sleepFlag)
+{
+	u32 diag0val;
+	u32 doorbell;
+	int hard_reset_done = 0;
+	int count = 0;
+#ifdef MPT_DEBUG
+	u32 diag1val = 0;
+#endif
+
+	/* Clear any existing interrupts */
+	CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+	/* Use "Diagnostic reset" method! (only thing available!) */
+	diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+
+#ifdef MPT_DEBUG
+	if (ioc->alt_ioc)
+		diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
+	dprintk((MYIOC_s_INFO_FMT "DbG1: diag0=%08x, diag1=%08x\n",
+			ioc->name, diag0val, diag1val));
+#endif
+
+	/* Do the reset if we are told to ignore the reset history
+	 * or if the reset history is 0
+	 */
+	if (ignore || !(diag0val & MPI_DIAG_RESET_HISTORY)) {
+		while ((diag0val & MPI_DIAG_DRWE) == 0) {
+			/* Write magic sequence to WriteSequence register
+			 * Loop until in diagnostic mode
+			 */
+			CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
+			CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
+			CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
+			CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
+			CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
+			CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
+
+			/* wait 100 msec */
+			if (sleepFlag == CAN_SLEEP) {
+				msleep_interruptible (100);
+			} else {
+				mdelay (100);
+			}
+
+			count++;
+			if (count > 20) {
+				printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n",
+						ioc->name, diag0val);
+				return -2;
+
+			}
+
+			diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+
+			dprintk((MYIOC_s_INFO_FMT "Wrote magic DiagWriteEn sequence (%x)\n",
+					ioc->name, diag0val));
+		}
+
+#ifdef MPT_DEBUG
+		if (ioc->alt_ioc)
+			diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
+		dprintk((MYIOC_s_INFO_FMT "DbG2: diag0=%08x, diag1=%08x\n",
+				ioc->name, diag0val, diag1val));
+#endif
+		/*
+		 * Disable the ARM (Bug fix)
+		 *
+		 */
+		CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_DISABLE_ARM);
+		mdelay (1);
+
+		/*
+		 * Now hit the reset bit in the Diagnostic register
+		 * (THE BIG HAMMER!) (Clears DRWE bit).
+		 */
+		CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val | MPI_DIAG_RESET_ADAPTER);
+		hard_reset_done = 1;
+		dprintk((MYIOC_s_INFO_FMT "Diagnostic reset performed\n",
+				ioc->name));
+
+		/*
+		 * Call each currently registered protocol IOC reset handler
+		 * with pre-reset indication.
+		 * NOTE: If we're doing _IOC_BRINGUP, there can be no
+		 * MptResetHandlers[] registered yet.
+		 */
+		{
+			int	 ii;
+			int	 r = 0;
+
+			for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) {
+				if (MptResetHandlers[ii]) {
+					dprintk((MYIOC_s_INFO_FMT "Calling IOC pre_reset handler #%d\n",
+							ioc->name, ii));
+					r += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_PRE_RESET);
+					if (ioc->alt_ioc) {
+						dprintk((MYIOC_s_INFO_FMT "Calling alt-%s pre_reset handler #%d\n",
+								ioc->name, ioc->alt_ioc->name, ii));
+						r += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_PRE_RESET);
+					}
+				}
+			}
+			/* FIXME?  Examine results here? */
+		}
+
+		if (ioc->cached_fw) {
+			/* If the DownloadBoot operation fails, the
+			 * IOC will be left unusable. This is a fatal error
+			 * case.  _diag_reset will return < 0
+			 */
+			for (count = 0; count < 30; count ++) {
+				diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+				if (!(diag0val & MPI_DIAG_RESET_ADAPTER)) {
+					break;
+				}
+
+				/* wait 1 sec */
+				if (sleepFlag == CAN_SLEEP) {
+					ssleep(1);
+				} else {
+					mdelay (1000);
+				}
+			}
+			if ((count = mpt_downloadboot(ioc, sleepFlag)) < 0) {
+				printk(KERN_WARNING MYNAM
+					": firmware downloadboot failure (%d)!\n", count);
+			}
+
+		} else {
+			/* Wait for FW to reload and for board
+			 * to go to the READY state.
+			 * Maximum wait is 60 seconds.
+			 * If fail, no error will check again
+			 * with calling program.
+			 */
+			for (count = 0; count < 60; count ++) {
+				doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
+				doorbell &= MPI_IOC_STATE_MASK;
+
+				if (doorbell == MPI_IOC_STATE_READY) {
+					break;
+				}
+
+				/* wait 1 sec */
+				if (sleepFlag == CAN_SLEEP) {
+					msleep_interruptible (1000);
+				} else {
+					mdelay (1000);
+				}
+			}
+		}
+	}
+
+	diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+#ifdef MPT_DEBUG
+	if (ioc->alt_ioc)
+		diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
+	dprintk((MYIOC_s_INFO_FMT "DbG3: diag0=%08x, diag1=%08x\n",
+		ioc->name, diag0val, diag1val));
+#endif
+
+	/* Clear RESET_HISTORY bit!  Place board in the
+	 * diagnostic mode to update the diag register.
+	 */
+	diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+	count = 0;
+	while ((diag0val & MPI_DIAG_DRWE) == 0) {
+		/* Write magic sequence to WriteSequence register
+		 * Loop until in diagnostic mode
+		 */
+		CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFF);
+		CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_1ST_KEY_VALUE);
+		CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_2ND_KEY_VALUE);
+		CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_3RD_KEY_VALUE);
+		CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_4TH_KEY_VALUE);
+		CHIPREG_WRITE32(&ioc->chip->WriteSequence, MPI_WRSEQ_5TH_KEY_VALUE);
+
+		/* wait 100 msec */
+		if (sleepFlag == CAN_SLEEP) {
+			msleep_interruptible (100);
+		} else {
+			mdelay (100);
+		}
+
+		count++;
+		if (count > 20) {
+			printk(MYIOC_s_ERR_FMT "Enable Diagnostic mode FAILED! (%02xh)\n",
+					ioc->name, diag0val);
+			break;
+		}
+		diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+	}
+	diag0val &= ~MPI_DIAG_RESET_HISTORY;
+	CHIPREG_WRITE32(&ioc->chip->Diagnostic, diag0val);
+	diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+	if (diag0val & MPI_DIAG_RESET_HISTORY) {
+		printk(MYIOC_s_WARN_FMT "ResetHistory bit failed to clear!\n",
+				ioc->name);
+	}
+
+	/* Disable Diagnostic Mode
+	 */
+	CHIPREG_WRITE32(&ioc->chip->WriteSequence, 0xFFFFFFFF);
+
+	/* Check FW reload status flags.
+	 */
+	diag0val = CHIPREG_READ32(&ioc->chip->Diagnostic);
+	if (diag0val & (MPI_DIAG_FLASH_BAD_SIG | MPI_DIAG_RESET_ADAPTER | MPI_DIAG_DISABLE_ARM)) {
+		printk(MYIOC_s_ERR_FMT "Diagnostic reset FAILED! (%02xh)\n",
+				ioc->name, diag0val);
+		return -3;
+	}
+
+#ifdef MPT_DEBUG
+	if (ioc->alt_ioc)
+		diag1val = CHIPREG_READ32(&ioc->alt_ioc->chip->Diagnostic);
+	dprintk((MYIOC_s_INFO_FMT "DbG4: diag0=%08x, diag1=%08x\n",
+			ioc->name, diag0val, diag1val));
+#endif
+
+	/*
+	 * Reset flag that says we've enabled event notification
+	 */
+	ioc->facts.EventState = 0;
+
+	if (ioc->alt_ioc)
+		ioc->alt_ioc->facts.EventState = 0;
+
+	return hard_reset_done;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	SendIocReset - Send IOCReset request to MPT adapter.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@reset_type: reset type, expected values are
+ *	%MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET or %MPI_FUNCTION_IO_UNIT_RESET
+ *
+ *	Send IOCReset request to the MPT adapter.
+ *
+ *	Returns 0 for success, non-zero for failure.
+ */
+static int
+SendIocReset(MPT_ADAPTER *ioc, u8 reset_type, int sleepFlag)
+{
+	int r;
+	u32 state;
+	int cntdn, count;
+
+	drsprintk((KERN_WARNING MYNAM ": %s: Sending IOC reset(0x%02x)!\n",
+			ioc->name, reset_type));
+	CHIPREG_WRITE32(&ioc->chip->Doorbell, reset_type<<MPI_DOORBELL_FUNCTION_SHIFT);
+	if ((r = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
+		return r;
+
+	/* FW ACK'd request, wait for READY state
+	 */
+	count = 0;
+	cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * 15;	/* 15 seconds */
+
+	while ((state = mpt_GetIocState(ioc, 1)) != MPI_IOC_STATE_READY) {
+		cntdn--;
+		count++;
+		if (!cntdn) {
+			if (sleepFlag != CAN_SLEEP)
+				count *= 10;
+
+			printk(KERN_ERR MYNAM ": %s: ERROR - Wait IOC_READY state timeout(%d)!\n",
+					ioc->name, (int)((count+5)/HZ));
+			return -ETIME;
+		}
+
+		if (sleepFlag == CAN_SLEEP) {
+			msleep_interruptible(1);
+		} else {
+			mdelay (1);	/* 1 msec delay */
+		}
+	}
+
+	/* TODO!
+	 *  Cleanup all event stuff for this IOC; re-issue EventNotification
+	 *  request if needed.
+	 */
+	if (ioc->facts.Function)
+		ioc->facts.EventState = 0;
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	initChainBuffers - Allocate memory for and initialize
+ *	chain buffers, chain buffer control arrays and spinlock.
+ *	@hd: Pointer to MPT_SCSI_HOST structure
+ *	@init: If set, initialize the spin lock.
+ */
+static int
+initChainBuffers(MPT_ADAPTER *ioc)
+{
+	u8		*mem;
+	int		sz, ii, num_chain;
+	int 		scale, num_sge, numSGE;
+
+	/* ReqToChain size must equal the req_depth
+	 * index = req_idx
+	 */
+	if (ioc->ReqToChain == NULL) {
+		sz = ioc->req_depth * sizeof(int);
+		mem = kmalloc(sz, GFP_ATOMIC);
+		if (mem == NULL)
+			return -1;
+
+		ioc->ReqToChain = (int *) mem;
+		dinitprintk((KERN_INFO MYNAM ": %s ReqToChain alloc  @ %p, sz=%d bytes\n",
+			 	ioc->name, mem, sz));
+		mem = kmalloc(sz, GFP_ATOMIC);
+		if (mem == NULL)
+			return -1;
+
+		ioc->RequestNB = (int *) mem;
+		dinitprintk((KERN_INFO MYNAM ": %s RequestNB alloc  @ %p, sz=%d bytes\n",
+			 	ioc->name, mem, sz));
+	}
+	for (ii = 0; ii < ioc->req_depth; ii++) {
+		ioc->ReqToChain[ii] = MPT_HOST_NO_CHAIN;
+	}
+
+	/* ChainToChain size must equal the total number
+	 * of chain buffers to be allocated.
+	 * index = chain_idx
+	 *
+	 * Calculate the number of chain buffers needed(plus 1) per I/O
+	 * then multiply the the maximum number of simultaneous cmds
+	 *
+	 * num_sge = num sge in request frame + last chain buffer
+	 * scale = num sge per chain buffer if no chain element
+	 */
+	scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
+	if (sizeof(dma_addr_t) == sizeof(u64))
+		num_sge =  scale + (ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32));
+	else
+		num_sge =  1+ scale + (ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32));
+
+	if (sizeof(dma_addr_t) == sizeof(u64)) {
+		numSGE = (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale +
+			(ioc->req_sz - 60) / (sizeof(dma_addr_t) + sizeof(u32));
+	} else {
+		numSGE = 1 + (scale - 1) * (ioc->facts.MaxChainDepth-1) + scale +
+			(ioc->req_sz - 64) / (sizeof(dma_addr_t) + sizeof(u32));
+	}
+	dinitprintk((KERN_INFO MYNAM ": %s num_sge=%d numSGE=%d\n",
+		ioc->name, num_sge, numSGE));
+
+	if ( numSGE > MPT_SCSI_SG_DEPTH	)
+		numSGE = MPT_SCSI_SG_DEPTH;
+
+	num_chain = 1;
+	while (numSGE - num_sge > 0) {
+		num_chain++;
+		num_sge += (scale - 1);
+	}
+	num_chain++;
+
+	dinitprintk((KERN_INFO MYNAM ": %s Now numSGE=%d num_sge=%d num_chain=%d\n",
+		ioc->name, numSGE, num_sge, num_chain));
+
+	if (ioc->bus_type == SCSI)
+		num_chain *= MPT_SCSI_CAN_QUEUE;
+	else
+		num_chain *= MPT_FC_CAN_QUEUE;
+
+	ioc->num_chain = num_chain;
+
+	sz = num_chain * sizeof(int);
+	if (ioc->ChainToChain == NULL) {
+		mem = kmalloc(sz, GFP_ATOMIC);
+		if (mem == NULL)
+			return -1;
+
+		ioc->ChainToChain = (int *) mem;
+		dinitprintk((KERN_INFO MYNAM ": %s ChainToChain alloc @ %p, sz=%d bytes\n",
+			 	ioc->name, mem, sz));
+	} else {
+		mem = (u8 *) ioc->ChainToChain;
+	}
+	memset(mem, 0xFF, sz);
+	return num_chain;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	PrimeIocFifos - Initialize IOC request and reply FIFOs.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *
+ *	This routine allocates memory for the MPT reply and request frame
+ *	pools (if necessary), and primes the IOC reply FIFO with
+ *	reply frames.
+ *
+ *	Returns 0 for success, non-zero for failure.
+ */
+static int
+PrimeIocFifos(MPT_ADAPTER *ioc)
+{
+	MPT_FRAME_HDR *mf;
+	unsigned long flags;
+	dma_addr_t alloc_dma;
+	u8 *mem;
+	int i, reply_sz, sz, total_size, num_chain;
+
+	/*  Prime reply FIFO...  */
+
+	if (ioc->reply_frames == NULL) {
+		if ( (num_chain = initChainBuffers(ioc)) < 0)
+			return -1;
+
+		total_size = reply_sz = (ioc->reply_sz * ioc->reply_depth);
+		dinitprintk((KERN_INFO MYNAM ": %s.ReplyBuffer sz=%d bytes, ReplyDepth=%d\n",
+			 	ioc->name, ioc->reply_sz, ioc->reply_depth));
+		dinitprintk((KERN_INFO MYNAM ": %s.ReplyBuffer sz=%d[%x] bytes\n",
+			 	ioc->name, reply_sz, reply_sz));
+
+		sz = (ioc->req_sz * ioc->req_depth);
+		dinitprintk((KERN_INFO MYNAM ": %s.RequestBuffer sz=%d bytes, RequestDepth=%d\n",
+			 	ioc->name, ioc->req_sz, ioc->req_depth));
+		dinitprintk((KERN_INFO MYNAM ": %s.RequestBuffer sz=%d[%x] bytes\n",
+			 	ioc->name, sz, sz));
+		total_size += sz;
+
+		sz = num_chain * ioc->req_sz; /* chain buffer pool size */
+		dinitprintk((KERN_INFO MYNAM ": %s.ChainBuffer sz=%d bytes, ChainDepth=%d\n",
+			 	ioc->name, ioc->req_sz, num_chain));
+		dinitprintk((KERN_INFO MYNAM ": %s.ChainBuffer sz=%d[%x] bytes num_chain=%d\n",
+			 	ioc->name, sz, sz, num_chain));
+
+		total_size += sz;
+		mem = pci_alloc_consistent(ioc->pcidev, total_size, &alloc_dma);
+		if (mem == NULL) {
+			printk(MYIOC_s_ERR_FMT "Unable to allocate Reply, Request, Chain Buffers!\n",
+				ioc->name);
+			goto out_fail;
+		}
+
+		dinitprintk((KERN_INFO MYNAM ": %s.Total alloc @ %p[%p], sz=%d[%x] bytes\n",
+			 	ioc->name, mem, (void *)(ulong)alloc_dma, total_size, total_size));
+
+		memset(mem, 0, total_size);
+		ioc->alloc_total += total_size;
+		ioc->alloc = mem;
+		ioc->alloc_dma = alloc_dma;
+		ioc->alloc_sz = total_size;
+		ioc->reply_frames = (MPT_FRAME_HDR *) mem;
+		ioc->reply_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
+
+		alloc_dma += reply_sz;
+		mem += reply_sz;
+
+		/*  Request FIFO - WE manage this!  */
+
+		ioc->req_frames = (MPT_FRAME_HDR *) mem;
+		ioc->req_frames_dma = alloc_dma;
+
+		dinitprintk((KERN_INFO MYNAM ": %s.RequestBuffers @ %p[%p]\n",
+			 	ioc->name, mem, (void *)(ulong)alloc_dma));
+
+		ioc->req_frames_low_dma = (u32) (alloc_dma & 0xFFFFFFFF);
+
+#if defined(CONFIG_MTRR) && 0
+		/*
+		 *  Enable Write Combining MTRR for IOC's memory region.
+		 *  (at least as much as we can; "size and base must be
+		 *  multiples of 4 kiB"
+		 */
+		ioc->mtrr_reg = mtrr_add(ioc->req_frames_dma,
+					 sz,
+					 MTRR_TYPE_WRCOMB, 1);
+		dprintk((MYIOC_s_INFO_FMT "MTRR region registered (base:size=%08x:%x)\n",
+				ioc->name, ioc->req_frames_dma, sz));
+#endif
+
+		for (i = 0; i < ioc->req_depth; i++) {
+			alloc_dma += ioc->req_sz;
+			mem += ioc->req_sz;
+		}
+
+		ioc->ChainBuffer = mem;
+		ioc->ChainBufferDMA = alloc_dma;
+
+		dinitprintk((KERN_INFO MYNAM " :%s.ChainBuffers @ %p(%p)\n",
+			ioc->name, ioc->ChainBuffer, (void *)(ulong)ioc->ChainBufferDMA));
+
+		/* Initialize the free chain Q.
+	 	*/
+
+		INIT_LIST_HEAD(&ioc->FreeChainQ);
+
+		/* Post the chain buffers to the FreeChainQ.
+	 	*/
+		mem = (u8 *)ioc->ChainBuffer;
+		for (i=0; i < num_chain; i++) {
+			mf = (MPT_FRAME_HDR *) mem;
+			list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeChainQ);
+			mem += ioc->req_sz;
+		}
+
+		/* Initialize Request frames linked list
+		 */
+		alloc_dma = ioc->req_frames_dma;
+		mem = (u8 *) ioc->req_frames;
+
+		spin_lock_irqsave(&ioc->FreeQlock, flags);
+		INIT_LIST_HEAD(&ioc->FreeQ);
+		for (i = 0; i < ioc->req_depth; i++) {
+			mf = (MPT_FRAME_HDR *) mem;
+
+			/*  Queue REQUESTs *internally*!  */
+			list_add_tail(&mf->u.frame.linkage.list, &ioc->FreeQ);
+
+			mem += ioc->req_sz;
+		}
+		spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+		sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
+		ioc->sense_buf_pool =
+			pci_alloc_consistent(ioc->pcidev, sz, &ioc->sense_buf_pool_dma);
+		if (ioc->sense_buf_pool == NULL) {
+			printk(MYIOC_s_ERR_FMT "Unable to allocate Sense Buffers!\n",
+				ioc->name);
+			goto out_fail;
+		}
+
+		ioc->sense_buf_low_dma = (u32) (ioc->sense_buf_pool_dma & 0xFFFFFFFF);
+		ioc->alloc_total += sz;
+		dinitprintk((KERN_INFO MYNAM ": %s.SenseBuffers @ %p[%p]\n",
+ 			ioc->name, ioc->sense_buf_pool, (void *)(ulong)ioc->sense_buf_pool_dma));
+
+	}
+
+	/* Post Reply frames to FIFO
+	 */
+	alloc_dma = ioc->alloc_dma;
+	dinitprintk((KERN_INFO MYNAM ": %s.ReplyBuffers @ %p[%p]\n",
+	 	ioc->name, ioc->reply_frames, (void *)(ulong)alloc_dma));
+
+	for (i = 0; i < ioc->reply_depth; i++) {
+		/*  Write each address to the IOC!  */
+		CHIPREG_WRITE32(&ioc->chip->ReplyFifo, alloc_dma);
+		alloc_dma += ioc->reply_sz;
+	}
+
+	return 0;
+
+out_fail:
+	if (ioc->alloc != NULL) {
+		sz = ioc->alloc_sz;
+		pci_free_consistent(ioc->pcidev,
+				sz,
+				ioc->alloc, ioc->alloc_dma);
+		ioc->reply_frames = NULL;
+		ioc->req_frames = NULL;
+		ioc->alloc_total -= sz;
+	}
+	if (ioc->sense_buf_pool != NULL) {
+		sz = (ioc->req_depth * MPT_SENSE_BUFFER_ALLOC);
+		pci_free_consistent(ioc->pcidev,
+				sz,
+				ioc->sense_buf_pool, ioc->sense_buf_pool_dma);
+		ioc->sense_buf_pool = NULL;
+	}
+	return -1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_handshake_req_reply_wait - Send MPT request to and receive reply
+ *	from IOC via doorbell handshake method.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@reqBytes: Size of the request in bytes
+ *	@req: Pointer to MPT request frame
+ *	@replyBytes: Expected size of the reply in bytes
+ *	@u16reply: Pointer to area where reply should be written
+ *	@maxwait: Max wait time for a reply (in seconds)
+ *	@sleepFlag: Specifies whether the process can sleep
+ *
+ *	NOTES: It is the callers responsibility to byte-swap fields in the
+ *	request which are greater than 1 byte in size.  It is also the
+ *	callers responsibility to byte-swap response fields which are
+ *	greater than 1 byte in size.
+ *
+ *	Returns 0 for success, non-zero for failure.
+ */
+static int
+mpt_handshake_req_reply_wait(MPT_ADAPTER *ioc, int reqBytes, u32 *req,
+				int replyBytes, u16 *u16reply, int maxwait, int sleepFlag)
+{
+	MPIDefaultReply_t *mptReply;
+	int failcnt = 0;
+	int t;
+
+	/*
+	 * Get ready to cache a handshake reply
+	 */
+	ioc->hs_reply_idx = 0;
+	mptReply = (MPIDefaultReply_t *) ioc->hs_reply;
+	mptReply->MsgLength = 0;
+
+	/*
+	 * Make sure there are no doorbells (WRITE 0 to IntStatus reg),
+	 * then tell IOC that we want to handshake a request of N words.
+	 * (WRITE u32val to Doorbell reg).
+	 */
+	CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+	CHIPREG_WRITE32(&ioc->chip->Doorbell,
+			((MPI_FUNCTION_HANDSHAKE<<MPI_DOORBELL_FUNCTION_SHIFT) |
+			 ((reqBytes/4)<<MPI_DOORBELL_ADD_DWORDS_SHIFT)));
+
+	/*
+	 * Wait for IOC's doorbell handshake int
+	 */
+	if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
+		failcnt++;
+
+	dhsprintk((MYIOC_s_INFO_FMT "HandShake request start reqBytes=%d, WaitCnt=%d%s\n",
+			ioc->name, reqBytes, t, failcnt ? " - MISSING DOORBELL HANDSHAKE!" : ""));
+
+	/* Read doorbell and check for active bit */
+	if (!(CHIPREG_READ32(&ioc->chip->Doorbell) & MPI_DOORBELL_ACTIVE))
+			return -1;
+
+	/*
+	 * Clear doorbell int (WRITE 0 to IntStatus reg),
+	 * then wait for IOC to ACKnowledge that it's ready for
+	 * our handshake request.
+	 */
+	CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+	if (!failcnt && (t = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
+		failcnt++;
+
+	if (!failcnt) {
+		int	 ii;
+		u8	*req_as_bytes = (u8 *) req;
+
+		/*
+		 * Stuff request words via doorbell handshake,
+		 * with ACK from IOC for each.
+		 */
+		for (ii = 0; !failcnt && ii < reqBytes/4; ii++) {
+			u32 word = ((req_as_bytes[(ii*4) + 0] <<  0) |
+				    (req_as_bytes[(ii*4) + 1] <<  8) |
+				    (req_as_bytes[(ii*4) + 2] << 16) |
+				    (req_as_bytes[(ii*4) + 3] << 24));
+
+			CHIPREG_WRITE32(&ioc->chip->Doorbell, word);
+			if ((t = WaitForDoorbellAck(ioc, 5, sleepFlag)) < 0)
+				failcnt++;
+		}
+
+		dhsprintk((KERN_INFO MYNAM ": Handshake request frame (@%p) header\n", req));
+		DBG_DUMP_REQUEST_FRAME_HDR(req)
+
+		dhsprintk((MYIOC_s_INFO_FMT "HandShake request post done, WaitCnt=%d%s\n",
+				ioc->name, t, failcnt ? " - MISSING DOORBELL ACK!" : ""));
+
+		/*
+		 * Wait for completion of doorbell handshake reply from the IOC
+		 */
+		if (!failcnt && (t = WaitForDoorbellReply(ioc, maxwait, sleepFlag)) < 0)
+			failcnt++;
+		
+		dhsprintk((MYIOC_s_INFO_FMT "HandShake reply count=%d%s\n",
+				ioc->name, t, failcnt ? " - MISSING DOORBELL REPLY!" : ""));
+
+		/*
+		 * Copy out the cached reply...
+		 */
+		for (ii=0; ii < min(replyBytes/2,mptReply->MsgLength*2); ii++)
+			u16reply[ii] = ioc->hs_reply[ii];
+	} else {
+		return -99;
+	}
+
+	return -failcnt;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	WaitForDoorbellAck - Wait for IOC to clear the IOP_DOORBELL_STATUS bit
+ *	in it's IntStatus register.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@howlong: How long to wait (in seconds)
+ *	@sleepFlag: Specifies whether the process can sleep
+ *
+ *	This routine waits (up to ~2 seconds max) for IOC doorbell
+ *	handshake ACKnowledge.
+ *
+ *	Returns a negative value on failure, else wait loop count.
+ */
+static int
+WaitForDoorbellAck(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
+{
+	int cntdn;
+	int count = 0;
+	u32 intstat=0;
+
+	cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * howlong;
+
+	if (sleepFlag == CAN_SLEEP) {
+		while (--cntdn) {
+			intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
+			if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
+				break;
+			msleep_interruptible (1);
+			count++;
+		}
+	} else {
+		while (--cntdn) {
+			intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
+			if (! (intstat & MPI_HIS_IOP_DOORBELL_STATUS))
+				break;
+			mdelay (1);
+			count++;
+		}
+	}
+
+	if (cntdn) {
+		dprintk((MYIOC_s_INFO_FMT "WaitForDoorbell ACK (count=%d)\n",
+				ioc->name, count));
+		return count;
+	}
+
+	printk(MYIOC_s_ERR_FMT "Doorbell ACK timeout (count=%d), IntStatus=%x!\n",
+			ioc->name, count, intstat);
+	return -1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	WaitForDoorbellInt - Wait for IOC to set the HIS_DOORBELL_INTERRUPT bit
+ *	in it's IntStatus register.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@howlong: How long to wait (in seconds)
+ *	@sleepFlag: Specifies whether the process can sleep
+ *
+ *	This routine waits (up to ~2 seconds max) for IOC doorbell interrupt.
+ *
+ *	Returns a negative value on failure, else wait loop count.
+ */
+static int
+WaitForDoorbellInt(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
+{
+	int cntdn;
+	int count = 0;
+	u32 intstat=0;
+
+	cntdn = ((sleepFlag == CAN_SLEEP) ? HZ : 1000) * howlong;
+	if (sleepFlag == CAN_SLEEP) {
+		while (--cntdn) {
+			intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
+			if (intstat & MPI_HIS_DOORBELL_INTERRUPT)
+				break;
+			msleep_interruptible(1);
+			count++;
+		}
+	} else {
+		while (--cntdn) {
+			intstat = CHIPREG_READ32(&ioc->chip->IntStatus);
+			if (intstat & MPI_HIS_DOORBELL_INTERRUPT)
+				break;
+			mdelay(1);
+			count++;
+		}
+	}
+
+	if (cntdn) {
+		dprintk((MYIOC_s_INFO_FMT "WaitForDoorbell INT (cnt=%d) howlong=%d\n",
+				ioc->name, count, howlong));
+		return count;
+	}
+
+	printk(MYIOC_s_ERR_FMT "Doorbell INT timeout (count=%d), IntStatus=%x!\n",
+			ioc->name, count, intstat);
+	return -1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	WaitForDoorbellReply - Wait for and capture a IOC handshake reply.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@howlong: How long to wait (in seconds)
+ *	@sleepFlag: Specifies whether the process can sleep
+ *
+ *	This routine polls the IOC for a handshake reply, 16 bits at a time.
+ *	Reply is cached to IOC private area large enough to hold a maximum
+ *	of 128 bytes of reply data.
+ *
+ *	Returns a negative value on failure, else size of reply in WORDS.
+ */
+static int
+WaitForDoorbellReply(MPT_ADAPTER *ioc, int howlong, int sleepFlag)
+{
+	int u16cnt = 0;
+	int failcnt = 0;
+	int t;
+	u16 *hs_reply = ioc->hs_reply;
+	volatile MPIDefaultReply_t *mptReply = (MPIDefaultReply_t *) ioc->hs_reply;
+	u16 hword;
+
+	hs_reply[0] = hs_reply[1] = hs_reply[7] = 0;
+
+	/*
+	 * Get first two u16's so we can look at IOC's intended reply MsgLength
+	 */
+	u16cnt=0;
+	if ((t = WaitForDoorbellInt(ioc, howlong, sleepFlag)) < 0) {
+		failcnt++;
+	} else {
+		hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
+		CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+		if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
+			failcnt++;
+		else {
+			hs_reply[u16cnt++] = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
+			CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+		}
+	}
+
+	dhsprintk((MYIOC_s_INFO_FMT "WaitCnt=%d First handshake reply word=%08x%s\n",
+			ioc->name, t, le32_to_cpu(*(u32 *)hs_reply), 
+			failcnt ? " - MISSING DOORBELL HANDSHAKE!" : ""));
+
+	/*
+	 * If no error (and IOC said MsgLength is > 0), piece together
+	 * reply 16 bits at a time.
+	 */
+	for (u16cnt=2; !failcnt && u16cnt < (2 * mptReply->MsgLength); u16cnt++) {
+		if ((t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
+			failcnt++;
+		hword = le16_to_cpu(CHIPREG_READ32(&ioc->chip->Doorbell) & 0x0000FFFF);
+		/* don't overflow our IOC hs_reply[] buffer! */
+		if (u16cnt < sizeof(ioc->hs_reply) / sizeof(ioc->hs_reply[0]))
+			hs_reply[u16cnt] = hword;
+		CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+	}
+
+	if (!failcnt && (t = WaitForDoorbellInt(ioc, 5, sleepFlag)) < 0)
+		failcnt++;
+	CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
+
+	if (failcnt) {
+		printk(MYIOC_s_ERR_FMT "Handshake reply failure!\n",
+				ioc->name);
+		return -failcnt;
+	}
+#if 0
+	else if (u16cnt != (2 * mptReply->MsgLength)) {
+		return -101;
+	}
+	else if ((mptReply->IOCStatus & MPI_IOCSTATUS_MASK) != MPI_IOCSTATUS_SUCCESS) {
+		return -102;
+	}
+#endif
+
+	dhsprintk((MYIOC_s_INFO_FMT "Got Handshake reply:\n", ioc->name));
+	DBG_DUMP_REPLY_FRAME(mptReply)
+
+	dhsprintk((MYIOC_s_INFO_FMT "WaitForDoorbell REPLY WaitCnt=%d (sz=%d)\n",
+			ioc->name, t, u16cnt/2));
+	return u16cnt/2;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	GetLanConfigPages - Fetch LANConfig pages.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *
+ *	Return: 0 for success
+ *	-ENOMEM if no memory available
+ *		-EPERM if not allowed due to ISR context
+ *		-EAGAIN if no msg frames currently available
+ *		-EFAULT for non-successful reply or no reply (timeout)
+ */
+static int
+GetLanConfigPages(MPT_ADAPTER *ioc)
+{
+	ConfigPageHeader_t	 hdr;
+	CONFIGPARMS		 cfg;
+	LANPage0_t		*ppage0_alloc;
+	dma_addr_t		 page0_dma;
+	LANPage1_t		*ppage1_alloc;
+	dma_addr_t		 page1_dma;
+	int			 rc = 0;
+	int			 data_sz;
+	int			 copy_sz;
+
+	/* Get LAN Page 0 header */
+	hdr.PageVersion = 0;
+	hdr.PageLength = 0;
+	hdr.PageNumber = 0;
+	hdr.PageType = MPI_CONFIG_PAGETYPE_LAN;
+	cfg.hdr = &hdr;
+	cfg.physAddr = -1;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+	cfg.dir = 0;
+	cfg.pageAddr = 0;
+	cfg.timeout = 0;
+
+	if ((rc = mpt_config(ioc, &cfg)) != 0)
+		return rc;
+
+	if (hdr.PageLength > 0) {
+		data_sz = hdr.PageLength * 4;
+		ppage0_alloc = (LANPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
+		rc = -ENOMEM;
+		if (ppage0_alloc) {
+			memset((u8 *)ppage0_alloc, 0, data_sz);
+			cfg.physAddr = page0_dma;
+			cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+			if ((rc = mpt_config(ioc, &cfg)) == 0) {
+				/* save the data */
+				copy_sz = min_t(int, sizeof(LANPage0_t), data_sz);
+				memcpy(&ioc->lan_cnfg_page0, ppage0_alloc, copy_sz);
+
+			}
+
+			pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma);
+
+			/* FIXME!
+			 *	Normalize endianness of structure data,
+			 *	by byte-swapping all > 1 byte fields!
+			 */
+
+		}
+
+		if (rc)
+			return rc;
+	}
+
+	/* Get LAN Page 1 header */
+	hdr.PageVersion = 0;
+	hdr.PageLength = 0;
+	hdr.PageNumber = 1;
+	hdr.PageType = MPI_CONFIG_PAGETYPE_LAN;
+	cfg.hdr = &hdr;
+	cfg.physAddr = -1;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+	cfg.dir = 0;
+	cfg.pageAddr = 0;
+
+	if ((rc = mpt_config(ioc, &cfg)) != 0)
+		return rc;
+
+	if (hdr.PageLength == 0)
+		return 0;
+
+	data_sz = hdr.PageLength * 4;
+	rc = -ENOMEM;
+	ppage1_alloc = (LANPage1_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page1_dma);
+	if (ppage1_alloc) {
+		memset((u8 *)ppage1_alloc, 0, data_sz);
+		cfg.physAddr = page1_dma;
+		cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+		if ((rc = mpt_config(ioc, &cfg)) == 0) {
+			/* save the data */
+			copy_sz = min_t(int, sizeof(LANPage1_t), data_sz);
+			memcpy(&ioc->lan_cnfg_page1, ppage1_alloc, copy_sz);
+		}
+
+		pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage1_alloc, page1_dma);
+
+		/* FIXME!
+		 *	Normalize endianness of structure data,
+		 *	by byte-swapping all > 1 byte fields!
+		 */
+
+	}
+
+	return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	GetFcPortPage0 - Fetch FCPort config Page0.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@portnum: IOC Port number
+ *
+ *	Return: 0 for success
+ *	-ENOMEM if no memory available
+ *		-EPERM if not allowed due to ISR context
+ *		-EAGAIN if no msg frames currently available
+ *		-EFAULT for non-successful reply or no reply (timeout)
+ */
+static int
+GetFcPortPage0(MPT_ADAPTER *ioc, int portnum)
+{
+	ConfigPageHeader_t	 hdr;
+	CONFIGPARMS		 cfg;
+	FCPortPage0_t		*ppage0_alloc;
+	FCPortPage0_t		*pp0dest;
+	dma_addr_t		 page0_dma;
+	int			 data_sz;
+	int			 copy_sz;
+	int			 rc;
+
+	/* Get FCPort Page 0 header */
+	hdr.PageVersion = 0;
+	hdr.PageLength = 0;
+	hdr.PageNumber = 0;
+	hdr.PageType = MPI_CONFIG_PAGETYPE_FC_PORT;
+	cfg.hdr = &hdr;
+	cfg.physAddr = -1;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+	cfg.dir = 0;
+	cfg.pageAddr = portnum;
+	cfg.timeout = 0;
+
+	if ((rc = mpt_config(ioc, &cfg)) != 0)
+		return rc;
+
+	if (hdr.PageLength == 0)
+		return 0;
+
+	data_sz = hdr.PageLength * 4;
+	rc = -ENOMEM;
+	ppage0_alloc = (FCPortPage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page0_dma);
+	if (ppage0_alloc) {
+		memset((u8 *)ppage0_alloc, 0, data_sz);
+		cfg.physAddr = page0_dma;
+		cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+		if ((rc = mpt_config(ioc, &cfg)) == 0) {
+			/* save the data */
+			pp0dest = &ioc->fc_port_page0[portnum];
+			copy_sz = min_t(int, sizeof(FCPortPage0_t), data_sz);
+			memcpy(pp0dest, ppage0_alloc, copy_sz);
+
+			/*
+			 *	Normalize endianness of structure data,
+			 *	by byte-swapping all > 1 byte fields!
+			 */
+			pp0dest->Flags = le32_to_cpu(pp0dest->Flags);
+			pp0dest->PortIdentifier = le32_to_cpu(pp0dest->PortIdentifier);
+			pp0dest->WWNN.Low = le32_to_cpu(pp0dest->WWNN.Low);
+			pp0dest->WWNN.High = le32_to_cpu(pp0dest->WWNN.High);
+			pp0dest->WWPN.Low = le32_to_cpu(pp0dest->WWPN.Low);
+			pp0dest->WWPN.High = le32_to_cpu(pp0dest->WWPN.High);
+			pp0dest->SupportedServiceClass = le32_to_cpu(pp0dest->SupportedServiceClass);
+			pp0dest->SupportedSpeeds = le32_to_cpu(pp0dest->SupportedSpeeds);
+			pp0dest->CurrentSpeed = le32_to_cpu(pp0dest->CurrentSpeed);
+			pp0dest->MaxFrameSize = le32_to_cpu(pp0dest->MaxFrameSize);
+			pp0dest->FabricWWNN.Low = le32_to_cpu(pp0dest->FabricWWNN.Low);
+			pp0dest->FabricWWNN.High = le32_to_cpu(pp0dest->FabricWWNN.High);
+			pp0dest->FabricWWPN.Low = le32_to_cpu(pp0dest->FabricWWPN.Low);
+			pp0dest->FabricWWPN.High = le32_to_cpu(pp0dest->FabricWWPN.High);
+			pp0dest->DiscoveredPortsCount = le32_to_cpu(pp0dest->DiscoveredPortsCount);
+			pp0dest->MaxInitiators = le32_to_cpu(pp0dest->MaxInitiators);
+
+		}
+
+		pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage0_alloc, page0_dma);
+	}
+
+	return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	GetIoUnitPage2 - Retrieve BIOS version and boot order information.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *
+ *	Returns: 0 for success
+ *	-ENOMEM if no memory available
+ *		-EPERM if not allowed due to ISR context
+ *		-EAGAIN if no msg frames currently available
+ *		-EFAULT for non-successful reply or no reply (timeout)
+ */
+static int
+GetIoUnitPage2(MPT_ADAPTER *ioc)
+{
+	ConfigPageHeader_t	 hdr;
+	CONFIGPARMS		 cfg;
+	IOUnitPage2_t		*ppage_alloc;
+	dma_addr_t		 page_dma;
+	int			 data_sz;
+	int			 rc;
+
+	/* Get the page header */
+	hdr.PageVersion = 0;
+	hdr.PageLength = 0;
+	hdr.PageNumber = 2;
+	hdr.PageType = MPI_CONFIG_PAGETYPE_IO_UNIT;
+	cfg.hdr = &hdr;
+	cfg.physAddr = -1;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+	cfg.dir = 0;
+	cfg.pageAddr = 0;
+	cfg.timeout = 0;
+
+	if ((rc = mpt_config(ioc, &cfg)) != 0)
+		return rc;
+
+	if (hdr.PageLength == 0)
+		return 0;
+
+	/* Read the config page */
+	data_sz = hdr.PageLength * 4;
+	rc = -ENOMEM;
+	ppage_alloc = (IOUnitPage2_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
+	if (ppage_alloc) {
+		memset((u8 *)ppage_alloc, 0, data_sz);
+		cfg.physAddr = page_dma;
+		cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+		/* If Good, save data */
+		if ((rc = mpt_config(ioc, &cfg)) == 0)
+			ioc->biosVersion = le32_to_cpu(ppage_alloc->BiosVersion);
+
+		pci_free_consistent(ioc->pcidev, data_sz, (u8 *) ppage_alloc, page_dma);
+	}
+
+	return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*	mpt_GetScsiPortSettings - read SCSI Port Page 0 and 2
+ *	@ioc: Pointer to a Adapter Strucutre
+ *	@portnum: IOC port number
+ *
+ *	Return: -EFAULT if read of config page header fails
+ *			or if no nvram
+ *	If read of SCSI Port Page 0 fails,
+ *		NVRAM = MPT_HOST_NVRAM_INVALID  (0xFFFFFFFF)
+ *		Adapter settings: async, narrow
+ *		Return 1
+ *	If read of SCSI Port Page 2 fails,
+ *		Adapter settings valid
+ *		NVRAM = MPT_HOST_NVRAM_INVALID  (0xFFFFFFFF)
+ *		Return 1
+ *	Else
+ *		Both valid
+ *		Return 0
+ *	CHECK - what type of locking mechanisms should be used????
+ */
+static int
+mpt_GetScsiPortSettings(MPT_ADAPTER *ioc, int portnum)
+{
+	u8			*pbuf;
+	dma_addr_t		 buf_dma;
+	CONFIGPARMS		 cfg;
+	ConfigPageHeader_t	 header;
+	int			 ii;
+	int			 data, rc = 0;
+
+	/* Allocate memory
+	 */
+	if (!ioc->spi_data.nvram) {
+		int	 sz;
+		u8	*mem;
+		sz = MPT_MAX_SCSI_DEVICES * sizeof(int);
+		mem = kmalloc(sz, GFP_ATOMIC);
+		if (mem == NULL)
+			return -EFAULT;
+
+		ioc->spi_data.nvram = (int *) mem;
+
+		dprintk((MYIOC_s_INFO_FMT "SCSI device NVRAM settings @ %p, sz=%d\n",
+			ioc->name, ioc->spi_data.nvram, sz));
+	}
+
+	/* Invalidate NVRAM information
+	 */
+	for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
+		ioc->spi_data.nvram[ii] = MPT_HOST_NVRAM_INVALID;
+	}
+
+	/* Read SPP0 header, allocate memory, then read page.
+	 */
+	header.PageVersion = 0;
+	header.PageLength = 0;
+	header.PageNumber = 0;
+	header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT;
+	cfg.hdr = &header;
+	cfg.physAddr = -1;
+	cfg.pageAddr = portnum;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+	cfg.dir = 0;
+	cfg.timeout = 0;	/* use default */
+	if (mpt_config(ioc, &cfg) != 0)
+		 return -EFAULT;
+
+	if (header.PageLength > 0) {
+		pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma);
+		if (pbuf) {
+			cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+			cfg.physAddr = buf_dma;
+			if (mpt_config(ioc, &cfg) != 0) {
+				ioc->spi_data.maxBusWidth = MPT_NARROW;
+				ioc->spi_data.maxSyncOffset = 0;
+				ioc->spi_data.minSyncFactor = MPT_ASYNC;
+				ioc->spi_data.busType = MPT_HOST_BUS_UNKNOWN;
+				rc = 1;
+			} else {
+				/* Save the Port Page 0 data
+				 */
+				SCSIPortPage0_t  *pPP0 = (SCSIPortPage0_t  *) pbuf;
+				pPP0->Capabilities = le32_to_cpu(pPP0->Capabilities);
+				pPP0->PhysicalInterface = le32_to_cpu(pPP0->PhysicalInterface);
+
+				if ( (pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_QAS) == 0 ) {
+					ioc->spi_data.noQas |= MPT_TARGET_NO_NEGO_QAS;
+					dinitprintk((KERN_INFO MYNAM " :%s noQas due to Capabilities=%x\n",
+						ioc->name, pPP0->Capabilities));
+				}
+				ioc->spi_data.maxBusWidth = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_WIDE ? 1 : 0;
+				data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MAX_SYNC_OFFSET_MASK;
+				if (data) {
+					ioc->spi_data.maxSyncOffset = (u8) (data >> 16);
+					data = pPP0->Capabilities & MPI_SCSIPORTPAGE0_CAP_MIN_SYNC_PERIOD_MASK;
+					ioc->spi_data.minSyncFactor = (u8) (data >> 8);
+				} else {
+					ioc->spi_data.maxSyncOffset = 0;
+					ioc->spi_data.minSyncFactor = MPT_ASYNC;
+				}
+
+				ioc->spi_data.busType = pPP0->PhysicalInterface & MPI_SCSIPORTPAGE0_PHY_SIGNAL_TYPE_MASK;
+
+				/* Update the minSyncFactor based on bus type.
+				 */
+				if ((ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_HVD) ||
+					(ioc->spi_data.busType == MPI_SCSIPORTPAGE0_PHY_SIGNAL_SE))  {
+
+					if (ioc->spi_data.minSyncFactor < MPT_ULTRA)
+						ioc->spi_data.minSyncFactor = MPT_ULTRA;
+				}
+			}
+			if (pbuf) {
+				pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma);
+			}
+		}
+	}
+
+	/* SCSI Port Page 2 - Read the header then the page.
+	 */
+	header.PageVersion = 0;
+	header.PageLength = 0;
+	header.PageNumber = 2;
+	header.PageType = MPI_CONFIG_PAGETYPE_SCSI_PORT;
+	cfg.hdr = &header;
+	cfg.physAddr = -1;
+	cfg.pageAddr = portnum;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+	cfg.dir = 0;
+	if (mpt_config(ioc, &cfg) != 0)
+		return -EFAULT;
+
+	if (header.PageLength > 0) {
+		/* Allocate memory and read SCSI Port Page 2
+		 */
+		pbuf = pci_alloc_consistent(ioc->pcidev, header.PageLength * 4, &buf_dma);
+		if (pbuf) {
+			cfg.action = MPI_CONFIG_ACTION_PAGE_READ_NVRAM;
+			cfg.physAddr = buf_dma;
+			if (mpt_config(ioc, &cfg) != 0) {
+				/* Nvram data is left with INVALID mark
+				 */
+				rc = 1;
+			} else {
+				SCSIPortPage2_t *pPP2 = (SCSIPortPage2_t  *) pbuf;
+				MpiDeviceInfo_t	*pdevice = NULL;
+
+				/* Save the Port Page 2 data
+				 * (reformat into a 32bit quantity)
+				 */
+				data = le32_to_cpu(pPP2->PortFlags) & MPI_SCSIPORTPAGE2_PORT_FLAGS_DV_MASK;
+				ioc->spi_data.PortFlags = data;
+				for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
+					pdevice = &pPP2->DeviceSettings[ii];
+					data = (le16_to_cpu(pdevice->DeviceFlags) << 16) |
+						(pdevice->SyncFactor << 8) | pdevice->Timeout;
+					ioc->spi_data.nvram[ii] = data;
+				}
+			}
+
+			pci_free_consistent(ioc->pcidev, header.PageLength * 4, pbuf, buf_dma);
+		}
+	}
+
+	/* Update Adapter limits with those from NVRAM
+	 * Comment: Don't need to do this. Target performance
+	 * parameters will never exceed the adapters limits.
+	 */
+
+	return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*	mpt_readScsiDevicePageHeaders - save version and length of SDP1
+ *	@ioc: Pointer to a Adapter Strucutre
+ *	@portnum: IOC port number
+ *
+ *	Return: -EFAULT if read of config page header fails
+ *		or 0 if success.
+ */
+static int
+mpt_readScsiDevicePageHeaders(MPT_ADAPTER *ioc, int portnum)
+{
+	CONFIGPARMS		 cfg;
+	ConfigPageHeader_t	 header;
+
+	/* Read the SCSI Device Page 1 header
+	 */
+	header.PageVersion = 0;
+	header.PageLength = 0;
+	header.PageNumber = 1;
+	header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
+	cfg.hdr = &header;
+	cfg.physAddr = -1;
+	cfg.pageAddr = portnum;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+	cfg.dir = 0;
+	cfg.timeout = 0;
+	if (mpt_config(ioc, &cfg) != 0)
+		 return -EFAULT;
+
+	ioc->spi_data.sdp1version = cfg.hdr->PageVersion;
+	ioc->spi_data.sdp1length = cfg.hdr->PageLength;
+
+	header.PageVersion = 0;
+	header.PageLength = 0;
+	header.PageNumber = 0;
+	header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
+	if (mpt_config(ioc, &cfg) != 0)
+		 return -EFAULT;
+
+	ioc->spi_data.sdp0version = cfg.hdr->PageVersion;
+	ioc->spi_data.sdp0length = cfg.hdr->PageLength;
+
+	dcprintk((MYIOC_s_INFO_FMT "Headers: 0: version %d length %d\n",
+			ioc->name, ioc->spi_data.sdp0version, ioc->spi_data.sdp0length));
+
+	dcprintk((MYIOC_s_INFO_FMT "Headers: 1: version %d length %d\n",
+			ioc->name, ioc->spi_data.sdp1version, ioc->spi_data.sdp1length));
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_findImVolumes - Identify IDs of hidden disks and RAID Volumes
+ *	@ioc: Pointer to a Adapter Strucutre
+ *	@portnum: IOC port number
+ *
+ *	Return:
+ *	0 on success
+ *	-EFAULT if read of config page header fails or data pointer not NULL
+ *	-ENOMEM if pci_alloc failed
+ */
+int
+mpt_findImVolumes(MPT_ADAPTER *ioc)
+{
+	IOCPage2_t		*pIoc2;
+	u8			*mem;
+	ConfigPageIoc2RaidVol_t	*pIocRv;
+	dma_addr_t		 ioc2_dma;
+	CONFIGPARMS		 cfg;
+	ConfigPageHeader_t	 header;
+	int			 jj;
+	int			 rc = 0;
+	int			 iocpage2sz;
+	u8			 nVols, nPhys;
+	u8			 vid, vbus, vioc;
+
+	/* Read IOCP2 header then the page.
+	 */
+	header.PageVersion = 0;
+	header.PageLength = 0;
+	header.PageNumber = 2;
+	header.PageType = MPI_CONFIG_PAGETYPE_IOC;
+	cfg.hdr = &header;
+	cfg.physAddr = -1;
+	cfg.pageAddr = 0;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+	cfg.dir = 0;
+	cfg.timeout = 0;
+	if (mpt_config(ioc, &cfg) != 0)
+		 return -EFAULT;
+
+	if (header.PageLength == 0)
+		return -EFAULT;
+
+	iocpage2sz = header.PageLength * 4;
+	pIoc2 = pci_alloc_consistent(ioc->pcidev, iocpage2sz, &ioc2_dma);
+	if (!pIoc2)
+		return -ENOMEM;
+
+	cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+	cfg.physAddr = ioc2_dma;
+	if (mpt_config(ioc, &cfg) != 0)
+		goto done_and_free;
+
+	if ( (mem = (u8 *)ioc->spi_data.pIocPg2) == NULL ) {
+		mem = kmalloc(iocpage2sz, GFP_ATOMIC);
+		if (mem) {
+			ioc->spi_data.pIocPg2 = (IOCPage2_t *) mem;
+		} else {
+			goto done_and_free;
+		}
+	}
+	memcpy(mem, (u8 *)pIoc2, iocpage2sz);
+
+	/* Identify RAID Volume Id's */
+	nVols = pIoc2->NumActiveVolumes;
+	if ( nVols == 0) {
+		/* No RAID Volume.
+		 */
+		goto done_and_free;
+	} else {
+		/* At least 1 RAID Volume
+		 */
+		pIocRv = pIoc2->RaidVolume;
+		ioc->spi_data.isRaid = 0;
+		for (jj = 0; jj < nVols; jj++, pIocRv++) {
+			vid = pIocRv->VolumeID;
+			vbus = pIocRv->VolumeBus;
+			vioc = pIocRv->VolumeIOC;
+
+			/* find the match
+			 */
+			if (vbus == 0) {
+				ioc->spi_data.isRaid |= (1 << vid);
+			} else {
+				/* Error! Always bus 0
+				 */
+			}
+		}
+	}
+
+	/* Identify Hidden Physical Disk Id's */
+	nPhys = pIoc2->NumActivePhysDisks;
+	if (nPhys == 0) {
+		/* No physical disks.
+		 */
+	} else {
+		mpt_read_ioc_pg_3(ioc);
+	}
+
+done_and_free:
+	pci_free_consistent(ioc->pcidev, iocpage2sz, pIoc2, ioc2_dma);
+
+	return rc;
+}
+
+int
+mpt_read_ioc_pg_3(MPT_ADAPTER *ioc)
+{
+	IOCPage3_t		*pIoc3;
+	u8			*mem;
+	CONFIGPARMS		 cfg;
+	ConfigPageHeader_t	 header;
+	dma_addr_t		 ioc3_dma;
+	int			 iocpage3sz = 0;
+
+	/* Free the old page
+	 */
+	if (ioc->spi_data.pIocPg3) {
+		kfree(ioc->spi_data.pIocPg3);
+		ioc->spi_data.pIocPg3 = NULL;
+	}
+
+	/* There is at least one physical disk.
+	 * Read and save IOC Page 3
+	 */
+	header.PageVersion = 0;
+	header.PageLength = 0;
+	header.PageNumber = 3;
+	header.PageType = MPI_CONFIG_PAGETYPE_IOC;
+	cfg.hdr = &header;
+	cfg.physAddr = -1;
+	cfg.pageAddr = 0;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+	cfg.dir = 0;
+	cfg.timeout = 0;
+	if (mpt_config(ioc, &cfg) != 0)
+		return 0;
+
+	if (header.PageLength == 0)
+		return 0;
+
+	/* Read Header good, alloc memory
+	 */
+	iocpage3sz = header.PageLength * 4;
+	pIoc3 = pci_alloc_consistent(ioc->pcidev, iocpage3sz, &ioc3_dma);
+	if (!pIoc3)
+		return 0;
+
+	/* Read the Page and save the data
+	 * into malloc'd memory.
+	 */
+	cfg.physAddr = ioc3_dma;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+	if (mpt_config(ioc, &cfg) == 0) {
+		mem = kmalloc(iocpage3sz, GFP_ATOMIC);
+		if (mem) {
+			memcpy(mem, (u8 *)pIoc3, iocpage3sz);
+			ioc->spi_data.pIocPg3 = (IOCPage3_t *) mem;
+		}
+	}
+
+	pci_free_consistent(ioc->pcidev, iocpage3sz, pIoc3, ioc3_dma);
+
+	return 0;
+}
+
+static void
+mpt_read_ioc_pg_4(MPT_ADAPTER *ioc)
+{
+	IOCPage4_t		*pIoc4;
+	CONFIGPARMS		 cfg;
+	ConfigPageHeader_t	 header;
+	dma_addr_t		 ioc4_dma;
+	int			 iocpage4sz;
+
+	/* Read and save IOC Page 4
+	 */
+	header.PageVersion = 0;
+	header.PageLength = 0;
+	header.PageNumber = 4;
+	header.PageType = MPI_CONFIG_PAGETYPE_IOC;
+	cfg.hdr = &header;
+	cfg.physAddr = -1;
+	cfg.pageAddr = 0;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+	cfg.dir = 0;
+	cfg.timeout = 0;
+	if (mpt_config(ioc, &cfg) != 0)
+		return;
+
+	if (header.PageLength == 0)
+		return;
+
+	if ( (pIoc4 = ioc->spi_data.pIocPg4) == NULL ) {
+		iocpage4sz = (header.PageLength + 4) * 4; /* Allow 4 additional SEP's */
+		pIoc4 = pci_alloc_consistent(ioc->pcidev, iocpage4sz, &ioc4_dma);
+		if (!pIoc4)
+			return;
+	} else {
+		ioc4_dma = ioc->spi_data.IocPg4_dma;
+		iocpage4sz = ioc->spi_data.IocPg4Sz;
+	}
+
+	/* Read the Page into dma memory.
+	 */
+	cfg.physAddr = ioc4_dma;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+	if (mpt_config(ioc, &cfg) == 0) {
+		ioc->spi_data.pIocPg4 = (IOCPage4_t *) pIoc4;
+		ioc->spi_data.IocPg4_dma = ioc4_dma;
+		ioc->spi_data.IocPg4Sz = iocpage4sz;
+	} else {
+		pci_free_consistent(ioc->pcidev, iocpage4sz, pIoc4, ioc4_dma);
+		ioc->spi_data.pIocPg4 = NULL;
+	}
+}
+
+static void
+mpt_read_ioc_pg_1(MPT_ADAPTER *ioc)
+{
+	IOCPage1_t		*pIoc1;
+	CONFIGPARMS		 cfg;
+	ConfigPageHeader_t	 header;
+	dma_addr_t		 ioc1_dma;
+	int			 iocpage1sz = 0;
+	u32			 tmp;
+
+	/* Check the Coalescing Timeout in IOC Page 1
+	 */
+	header.PageVersion = 0;
+	header.PageLength = 0;
+	header.PageNumber = 1;
+	header.PageType = MPI_CONFIG_PAGETYPE_IOC;
+	cfg.hdr = &header;
+	cfg.physAddr = -1;
+	cfg.pageAddr = 0;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+	cfg.dir = 0;
+	cfg.timeout = 0;
+	if (mpt_config(ioc, &cfg) != 0)
+		return;
+
+	if (header.PageLength == 0)
+		return;
+
+	/* Read Header good, alloc memory
+	 */
+	iocpage1sz = header.PageLength * 4;
+	pIoc1 = pci_alloc_consistent(ioc->pcidev, iocpage1sz, &ioc1_dma);
+	if (!pIoc1)
+		return;
+
+	/* Read the Page and check coalescing timeout
+	 */
+	cfg.physAddr = ioc1_dma;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+	if (mpt_config(ioc, &cfg) == 0) {
+		
+		tmp = le32_to_cpu(pIoc1->Flags) & MPI_IOCPAGE1_REPLY_COALESCING;
+		if (tmp == MPI_IOCPAGE1_REPLY_COALESCING) {
+			tmp = le32_to_cpu(pIoc1->CoalescingTimeout);
+
+			dprintk((MYIOC_s_INFO_FMT "Coalescing Enabled Timeout = %d\n",
+					ioc->name, tmp));
+
+			if (tmp > MPT_COALESCING_TIMEOUT) {
+				pIoc1->CoalescingTimeout = cpu_to_le32(MPT_COALESCING_TIMEOUT);
+
+				/* Write NVRAM and current
+				 */
+				cfg.dir = 1;
+				cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+				if (mpt_config(ioc, &cfg) == 0) {
+					dprintk((MYIOC_s_INFO_FMT "Reset Current Coalescing Timeout to = %d\n",
+							ioc->name, MPT_COALESCING_TIMEOUT));
+
+					cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_NVRAM;
+					if (mpt_config(ioc, &cfg) == 0) {
+						dprintk((MYIOC_s_INFO_FMT "Reset NVRAM Coalescing Timeout to = %d\n",
+								ioc->name, MPT_COALESCING_TIMEOUT));
+					} else {
+						dprintk((MYIOC_s_INFO_FMT "Reset NVRAM Coalescing Timeout Failed\n",
+									ioc->name));
+					}
+
+				} else {
+					dprintk((MYIOC_s_WARN_FMT "Reset of Current Coalescing Timeout Failed!\n",
+								ioc->name));
+				}
+			}
+
+		} else {
+			dprintk((MYIOC_s_WARN_FMT "Coalescing Disabled\n", ioc->name));
+		}
+	}
+
+	pci_free_consistent(ioc->pcidev, iocpage1sz, pIoc1, ioc1_dma);
+
+	return;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	SendEventNotification - Send EventNotification (on or off) request
+ *	to MPT adapter.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@EvSwitch: Event switch flags
+ */
+static int
+SendEventNotification(MPT_ADAPTER *ioc, u8 EvSwitch)
+{
+	EventNotification_t	*evnp;
+
+	evnp = (EventNotification_t *) mpt_get_msg_frame(mpt_base_index, ioc);
+	if (evnp == NULL) {
+		dprintk((MYIOC_s_WARN_FMT "Unable to allocate event request frame!\n",
+				ioc->name));
+		return 0;
+	}
+	memset(evnp, 0, sizeof(*evnp));
+
+	dprintk((MYIOC_s_INFO_FMT "Sending EventNotification(%d)\n", ioc->name, EvSwitch));
+
+	evnp->Function = MPI_FUNCTION_EVENT_NOTIFICATION;
+	evnp->ChainOffset = 0;
+	evnp->MsgFlags = 0;
+	evnp->Switch = EvSwitch;
+
+	mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)evnp);
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	SendEventAck - Send EventAck request to MPT adapter.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@evnp: Pointer to original EventNotification request
+ */
+static int
+SendEventAck(MPT_ADAPTER *ioc, EventNotificationReply_t *evnp)
+{
+	EventAck_t	*pAck;
+
+	if ((pAck = (EventAck_t *) mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
+		printk(MYIOC_s_WARN_FMT "Unable to allocate event ACK request frame!\n",
+				ioc->name);
+		return -1;
+	}
+	memset(pAck, 0, sizeof(*pAck));
+
+	dprintk((MYIOC_s_INFO_FMT "Sending EventAck\n", ioc->name));
+
+	pAck->Function     = MPI_FUNCTION_EVENT_ACK;
+	pAck->ChainOffset  = 0;
+	pAck->MsgFlags     = 0;
+	pAck->Event        = evnp->Event;
+	pAck->EventContext = evnp->EventContext;
+
+	mpt_put_msg_frame(mpt_base_index, ioc, (MPT_FRAME_HDR *)pAck);
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_config - Generic function to issue config message
+ *	@ioc - Pointer to an adapter structure
+ *	@cfg - Pointer to a configuration structure. Struct contains
+ *		action, page address, direction, physical address
+ *		and pointer to a configuration page header
+ *		Page header is updated.
+ *
+ *	Returns 0 for success
+ *	-EPERM if not allowed due to ISR context
+ *	-EAGAIN if no msg frames currently available
+ *	-EFAULT for non-successful reply or no reply (timeout)
+ */
+int
+mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
+{
+	Config_t	*pReq;
+	MPT_FRAME_HDR	*mf;
+	unsigned long	 flags;
+	int		 ii, rc;
+	u32		 flagsLength;
+	int		 in_isr;
+
+	/* (Bugzilla:fibrebugs, #513)
+	 * Bug fix (part 1)!  20010905 -sralston
+	 *	Prevent calling wait_event() (below), if caller happens
+	 *	to be in ISR context, because that is fatal!
+	 */
+	in_isr = in_interrupt();
+	if (in_isr) {
+		dcprintk((MYIOC_s_WARN_FMT "Config request not allowed in ISR context!\n",
+				ioc->name));
+		return -EPERM;
+	}
+
+	/* Get and Populate a free Frame
+	 */
+	if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
+		dcprintk((MYIOC_s_WARN_FMT "mpt_config: no msg frames!\n",
+				ioc->name));
+		return -EAGAIN;
+	}
+	pReq = (Config_t *)mf;
+	pReq->Action = pCfg->action;
+	pReq->Reserved = 0;
+	pReq->ChainOffset = 0;
+	pReq->Function = MPI_FUNCTION_CONFIG;
+	pReq->ExtPageLength = 0;
+	pReq->ExtPageType = 0;
+	pReq->MsgFlags = 0;
+	for (ii=0; ii < 8; ii++)
+		pReq->Reserved2[ii] = 0;
+
+	pReq->Header.PageVersion = pCfg->hdr->PageVersion;
+	pReq->Header.PageLength = pCfg->hdr->PageLength;
+	pReq->Header.PageNumber = pCfg->hdr->PageNumber;
+	pReq->Header.PageType = (pCfg->hdr->PageType & MPI_CONFIG_PAGETYPE_MASK);
+	pReq->PageAddress = cpu_to_le32(pCfg->pageAddr);
+
+	/* Add a SGE to the config request.
+	 */
+	if (pCfg->dir)
+		flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE;
+	else
+		flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
+
+	flagsLength |= pCfg->hdr->PageLength * 4;
+
+	mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, pCfg->physAddr);
+
+	dcprintk((MYIOC_s_INFO_FMT "Sending Config request type %d, page %d and action %d\n",
+		ioc->name, pReq->Header.PageType, pReq->Header.PageNumber, pReq->Action));
+
+	/* Append pCfg pointer to end of mf
+	 */
+	*((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) =  (void *) pCfg;
+
+	/* Initalize the timer
+	 */
+	init_timer(&pCfg->timer);
+	pCfg->timer.data = (unsigned long) ioc;
+	pCfg->timer.function = mpt_timer_expired;
+	pCfg->wait_done = 0;
+
+	/* Set the timer; ensure 10 second minimum */
+	if (pCfg->timeout < 10)
+		pCfg->timer.expires = jiffies + HZ*10;
+	else
+		pCfg->timer.expires = jiffies + HZ*pCfg->timeout;
+
+	/* Add to end of Q, set timer and then issue this command */
+	spin_lock_irqsave(&ioc->FreeQlock, flags);
+	list_add_tail(&pCfg->linkage, &ioc->configQ);
+	spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+	add_timer(&pCfg->timer);
+	mpt_put_msg_frame(mpt_base_index, ioc, mf);
+	wait_event(mpt_waitq, pCfg->wait_done);
+
+	/* mf has been freed - do not access */
+
+	rc = pCfg->status;
+
+	return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_toolbox - Generic function to issue toolbox message
+ *	@ioc - Pointer to an adapter structure
+ *	@cfg - Pointer to a toolbox structure. Struct contains
+ *		action, page address, direction, physical address
+ *		and pointer to a configuration page header
+ *		Page header is updated.
+ *
+ *	Returns 0 for success
+ *	-EPERM if not allowed due to ISR context
+ *	-EAGAIN if no msg frames currently available
+ *	-EFAULT for non-successful reply or no reply (timeout)
+ */
+int
+mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
+{
+	ToolboxIstwiReadWriteRequest_t	*pReq;
+	MPT_FRAME_HDR	*mf;
+	struct pci_dev	*pdev;
+	unsigned long	 flags;
+	int		 rc;
+	u32		 flagsLength;
+	int		 in_isr;
+
+	/* (Bugzilla:fibrebugs, #513)
+	 * Bug fix (part 1)!  20010905 -sralston
+	 *	Prevent calling wait_event() (below), if caller happens
+	 *	to be in ISR context, because that is fatal!
+	 */
+	in_isr = in_interrupt();
+	if (in_isr) {
+		dcprintk((MYIOC_s_WARN_FMT "toobox request not allowed in ISR context!\n",
+				ioc->name));
+		return -EPERM;
+	}
+
+	/* Get and Populate a free Frame
+	 */
+	if ((mf = mpt_get_msg_frame(mpt_base_index, ioc)) == NULL) {
+		dcprintk((MYIOC_s_WARN_FMT "mpt_toolbox: no msg frames!\n",
+				ioc->name));
+		return -EAGAIN;
+	}
+	pReq = (ToolboxIstwiReadWriteRequest_t	*)mf;
+	pReq->Tool = pCfg->action;
+	pReq->Reserved = 0;
+	pReq->ChainOffset = 0;
+	pReq->Function = MPI_FUNCTION_TOOLBOX;
+	pReq->Reserved1 = 0;
+	pReq->Reserved2 = 0;
+	pReq->MsgFlags = 0;
+	pReq->Flags = pCfg->dir;
+	pReq->BusNum = 0;
+	pReq->Reserved3 = 0;
+	pReq->NumAddressBytes = 0x01;
+	pReq->Reserved4 = 0;
+	pReq->DataLength = 0x04;
+	pdev = (struct pci_dev *) ioc->pcidev;
+	if (pdev->devfn & 1)
+		pReq->DeviceAddr = 0xB2;
+	else
+		pReq->DeviceAddr = 0xB0;
+	pReq->Addr1 = 0;
+	pReq->Addr2 = 0;
+	pReq->Addr3 = 0;
+	pReq->Reserved5 = 0;
+
+	/* Add a SGE to the config request.
+	 */
+
+	flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ | 4;
+
+	mpt_add_sge((char *)&pReq->SGL, flagsLength, pCfg->physAddr);
+
+	dcprintk((MYIOC_s_INFO_FMT "Sending Toolbox request, Tool=%x\n",
+		ioc->name, pReq->Tool));
+
+	/* Append pCfg pointer to end of mf
+	 */
+	*((void **) (((u8 *) mf) + (ioc->req_sz - sizeof(void *)))) =  (void *) pCfg;
+
+	/* Initalize the timer
+	 */
+	init_timer(&pCfg->timer);
+	pCfg->timer.data = (unsigned long) ioc;
+	pCfg->timer.function = mpt_timer_expired;
+	pCfg->wait_done = 0;
+
+	/* Set the timer; ensure 10 second minimum */
+	if (pCfg->timeout < 10)
+		pCfg->timer.expires = jiffies + HZ*10;
+	else
+		pCfg->timer.expires = jiffies + HZ*pCfg->timeout;
+
+	/* Add to end of Q, set timer and then issue this command */
+	spin_lock_irqsave(&ioc->FreeQlock, flags);
+	list_add_tail(&pCfg->linkage, &ioc->configQ);
+	spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+	add_timer(&pCfg->timer);
+	mpt_put_msg_frame(mpt_base_index, ioc, mf);
+	wait_event(mpt_waitq, pCfg->wait_done);
+
+	/* mf has been freed - do not access */
+
+	rc = pCfg->status;
+
+	return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mpt_timer_expired - Call back for timer process.
+ *	Used only internal config functionality.
+ *	@data: Pointer to MPT_SCSI_HOST recast as an unsigned long
+ */
+static void
+mpt_timer_expired(unsigned long data)
+{
+	MPT_ADAPTER *ioc = (MPT_ADAPTER *) data;
+
+	dcprintk((MYIOC_s_WARN_FMT "mpt_timer_expired! \n", ioc->name));
+
+	/* Perform a FW reload */
+	if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0)
+		printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name);
+
+	/* No more processing.
+	 * Hard reset clean-up will wake up
+	 * process and free all resources.
+	 */
+	dcprintk((MYIOC_s_WARN_FMT "mpt_timer_expired complete!\n", ioc->name));
+
+	return;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mpt_ioc_reset - Base cleanup for hard reset
+ *	@ioc: Pointer to the adapter structure
+ *	@reset_phase: Indicates pre- or post-reset functionality
+ *
+ *	Remark: Free's resources with internally generated commands.
+ */
+static int
+mpt_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
+{
+	CONFIGPARMS *pCfg;
+	unsigned long flags;
+
+	dprintk((KERN_WARNING MYNAM
+			": IOC %s_reset routed to MPT base driver!\n",
+			reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
+			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
+
+	if (reset_phase == MPT_IOC_SETUP_RESET) {
+		;
+	} else if (reset_phase == MPT_IOC_PRE_RESET) {
+		/* If the internal config Q is not empty -
+		 * delete timer. MF resources will be freed when
+		 * the FIFO's are primed.
+		 */
+		spin_lock_irqsave(&ioc->FreeQlock, flags);
+		list_for_each_entry(pCfg, &ioc->configQ, linkage)
+			del_timer(&pCfg->timer);
+		spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+	} else {
+		CONFIGPARMS *pNext;
+
+		/* Search the configQ for internal commands.
+		 * Flush the Q, and wake up all suspended threads.
+		 */
+		spin_lock_irqsave(&ioc->FreeQlock, flags);
+		list_for_each_entry_safe(pCfg, pNext, &ioc->configQ, linkage) {
+			list_del(&pCfg->linkage);
+
+			pCfg->status = MPT_CONFIG_ERROR;
+			pCfg->wait_done = 1;
+			wake_up(&mpt_waitq);
+		}
+		spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+	}
+
+	return 1;		/* currently means nothing really */
+}
+
+
+#ifdef CONFIG_PROC_FS		/* { */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	procfs (%MPT_PROCFS_MPTBASEDIR/...) support stuff...
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	procmpt_create - Create %MPT_PROCFS_MPTBASEDIR entries.
+ *
+ *	Returns 0 for success, non-zero for failure.
+ */
+static int
+procmpt_create(void)
+{
+	struct proc_dir_entry	*ent;
+
+	mpt_proc_root_dir = proc_mkdir(MPT_PROCFS_MPTBASEDIR, NULL);
+	if (mpt_proc_root_dir == NULL)
+		return -ENOTDIR;
+
+	ent = create_proc_entry("summary", S_IFREG|S_IRUGO, mpt_proc_root_dir);
+	if (ent)
+		ent->read_proc = procmpt_summary_read;
+
+	ent = create_proc_entry("version", S_IFREG|S_IRUGO, mpt_proc_root_dir);
+	if (ent)
+		ent->read_proc = procmpt_version_read;
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	procmpt_destroy - Tear down %MPT_PROCFS_MPTBASEDIR entries.
+ *
+ *	Returns 0 for success, non-zero for failure.
+ */
+static void
+procmpt_destroy(void)
+{
+	remove_proc_entry("version", mpt_proc_root_dir);
+	remove_proc_entry("summary", mpt_proc_root_dir);
+	remove_proc_entry(MPT_PROCFS_MPTBASEDIR, NULL);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	procmpt_summary_read - Handle read request from /proc/mpt/summary
+ *	or from /proc/mpt/iocN/summary.
+ *	@buf: Pointer to area to write information
+ *	@start: Pointer to start pointer
+ *	@offset: Offset to start writing
+ *	@request:
+ *	@eof: Pointer to EOF integer
+ *	@data: Pointer
+ *
+ *	Returns number of characters written to process performing the read.
+ */
+static int
+procmpt_summary_read(char *buf, char **start, off_t offset, int request, int *eof, void *data)
+{
+	MPT_ADAPTER *ioc;
+	char *out = buf;
+	int len;
+
+	if (data) {
+		int more = 0;
+
+		ioc = data;
+		mpt_print_ioc_summary(ioc, out, &more, 0, 1);
+
+		out += more;
+	} else {
+		list_for_each_entry(ioc, &ioc_list, list) {
+			int	more = 0;
+
+			mpt_print_ioc_summary(ioc, out, &more, 0, 1);
+
+			out += more;
+			if ((out-buf) >= request)
+				break;
+		}
+	}
+
+	len = out - buf;
+
+	MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	procmpt_version_read - Handle read request from /proc/mpt/version.
+ *	@buf: Pointer to area to write information
+ *	@start: Pointer to start pointer
+ *	@offset: Offset to start writing
+ *	@request:
+ *	@eof: Pointer to EOF integer
+ *	@data: Pointer
+ *
+ *	Returns number of characters written to process performing the read.
+ */
+static int
+procmpt_version_read(char *buf, char **start, off_t offset, int request, int *eof, void *data)
+{
+	int	 ii;
+	int	 scsi, lan, ctl, targ, dmp;
+	char	*drvname;
+	int	 len;
+
+	len = sprintf(buf, "%s-%s\n", "mptlinux", MPT_LINUX_VERSION_COMMON);
+	len += sprintf(buf+len, "  Fusion MPT base driver\n");
+
+	scsi = lan = ctl = targ = dmp = 0;
+	for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) {
+		drvname = NULL;
+		if (MptCallbacks[ii]) {
+			switch (MptDriverClass[ii]) {
+			case MPTSCSIH_DRIVER:
+				if (!scsi++) drvname = "SCSI host";
+				break;
+			case MPTLAN_DRIVER:
+				if (!lan++) drvname = "LAN";
+				break;
+			case MPTSTM_DRIVER:
+				if (!targ++) drvname = "SCSI target";
+				break;
+			case MPTCTL_DRIVER:
+				if (!ctl++) drvname = "ioctl";
+				break;
+			}
+
+			if (drvname)
+				len += sprintf(buf+len, "  Fusion MPT %s driver\n", drvname);
+		}
+	}
+
+	MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	procmpt_iocinfo_read - Handle read request from /proc/mpt/iocN/info.
+ *	@buf: Pointer to area to write information
+ *	@start: Pointer to start pointer
+ *	@offset: Offset to start writing
+ *	@request:
+ *	@eof: Pointer to EOF integer
+ *	@data: Pointer
+ *
+ *	Returns number of characters written to process performing the read.
+ */
+static int
+procmpt_iocinfo_read(char *buf, char **start, off_t offset, int request, int *eof, void *data)
+{
+	MPT_ADAPTER	*ioc = data;
+	int		 len;
+	char		 expVer[32];
+	int		 sz;
+	int		 p;
+
+	mpt_get_fw_exp_ver(expVer, ioc);
+
+	len = sprintf(buf, "%s:", ioc->name);
+	if (ioc->facts.Flags & MPI_IOCFACTS_FLAGS_FW_DOWNLOAD_BOOT)
+		len += sprintf(buf+len, "  (f/w download boot flag set)");
+//	if (ioc->facts.IOCExceptions & MPI_IOCFACTS_EXCEPT_CONFIG_CHECKSUM_FAIL)
+//		len += sprintf(buf+len, "  CONFIG_CHECKSUM_FAIL!");
+
+	len += sprintf(buf+len, "\n  ProductID = 0x%04x (%s)\n",
+			ioc->facts.ProductID,
+			ioc->prod_name);
+	len += sprintf(buf+len, "  FWVersion = 0x%08x%s", ioc->facts.FWVersion.Word, expVer);
+	if (ioc->facts.FWImageSize)
+		len += sprintf(buf+len, " (fw_size=%d)", ioc->facts.FWImageSize);
+	len += sprintf(buf+len, "\n  MsgVersion = 0x%04x\n", ioc->facts.MsgVersion);
+	len += sprintf(buf+len, "  FirstWhoInit = 0x%02x\n", ioc->FirstWhoInit);
+	len += sprintf(buf+len, "  EventState = 0x%02x\n", ioc->facts.EventState);
+
+	len += sprintf(buf+len, "  CurrentHostMfaHighAddr = 0x%08x\n",
+			ioc->facts.CurrentHostMfaHighAddr);
+	len += sprintf(buf+len, "  CurrentSenseBufferHighAddr = 0x%08x\n",
+			ioc->facts.CurrentSenseBufferHighAddr);
+
+	len += sprintf(buf+len, "  MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
+	len += sprintf(buf+len, "  MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
+
+	len += sprintf(buf+len, "  RequestFrames @ 0x%p (Dma @ 0x%p)\n",
+					(void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
+	/*
+	 *  Rounding UP to nearest 4-kB boundary here...
+	 */
+	sz = (ioc->req_sz * ioc->req_depth) + 128;
+	sz = ((sz + 0x1000UL - 1UL) / 0x1000) * 0x1000;
+	len += sprintf(buf+len, "    {CurReqSz=%d} x {CurReqDepth=%d} = %d bytes ^= 0x%x\n",
+					ioc->req_sz, ioc->req_depth, ioc->req_sz*ioc->req_depth, sz);
+	len += sprintf(buf+len, "    {MaxReqSz=%d}   {MaxReqDepth=%d}\n",
+					4*ioc->facts.RequestFrameSize,
+					ioc->facts.GlobalCredits);
+
+	len += sprintf(buf+len, "  Frames   @ 0x%p (Dma @ 0x%p)\n",
+					(void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
+	sz = (ioc->reply_sz * ioc->reply_depth) + 128;
+	len += sprintf(buf+len, "    {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
+					ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
+	len += sprintf(buf+len, "    {MaxRepSz=%d}   {MaxRepDepth=%d}\n",
+					ioc->facts.CurReplyFrameSize,
+					ioc->facts.ReplyQueueDepth);
+
+	len += sprintf(buf+len, "  MaxDevices = %d\n",
+			(ioc->facts.MaxDevices==0) ? 255 : ioc->facts.MaxDevices);
+	len += sprintf(buf+len, "  MaxBuses = %d\n", ioc->facts.MaxBuses);
+
+	/* per-port info */
+	for (p=0; p < ioc->facts.NumberOfPorts; p++) {
+		len += sprintf(buf+len, "  PortNumber = %d (of %d)\n",
+				p+1,
+				ioc->facts.NumberOfPorts);
+		if (ioc->bus_type == FC) {
+			if (ioc->pfacts[p].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN) {
+				u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
+				len += sprintf(buf+len, "    LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
+						a[5], a[4], a[3], a[2], a[1], a[0]);
+			}
+			len += sprintf(buf+len, "    WWN = %08X%08X:%08X%08X\n",
+					ioc->fc_port_page0[p].WWNN.High,
+					ioc->fc_port_page0[p].WWNN.Low,
+					ioc->fc_port_page0[p].WWPN.High,
+					ioc->fc_port_page0[p].WWPN.Low);
+		}
+	}
+
+	MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len);
+}
+
+#endif		/* CONFIG_PROC_FS } */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static void
+mpt_get_fw_exp_ver(char *buf, MPT_ADAPTER *ioc)
+{
+	buf[0] ='\0';
+	if ((ioc->facts.FWVersion.Word >> 24) == 0x0E) {
+		sprintf(buf, " (Exp %02d%02d)",
+			(ioc->facts.FWVersion.Word >> 16) & 0x00FF,	/* Month */
+			(ioc->facts.FWVersion.Word >> 8) & 0x1F);	/* Day */
+
+		/* insider hack! */
+		if ((ioc->facts.FWVersion.Word >> 8) & 0x80)
+			strcat(buf, " [MDBG]");
+	}
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_print_ioc_summary - Write ASCII summary of IOC to a buffer.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@buffer: Pointer to buffer where IOC summary info should be written
+ *	@size: Pointer to number of bytes we wrote (set by this routine)
+ *	@len: Offset at which to start writing in buffer
+ *	@showlan: Display LAN stuff?
+ *
+ *	This routine writes (english readable) ASCII text, which represents
+ *	a summary of IOC information, to a buffer.
+ */
+void
+mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buffer, int *size, int len, int showlan)
+{
+	char expVer[32];
+	int y;
+
+	mpt_get_fw_exp_ver(expVer, ioc);
+
+	/*
+	 *  Shorter summary of attached ioc's...
+	 */
+	y = sprintf(buffer+len, "%s: %s, %s%08xh%s, Ports=%d, MaxQ=%d",
+			ioc->name,
+			ioc->prod_name,
+			MPT_FW_REV_MAGIC_ID_STRING,	/* "FwRev=" or somesuch */
+			ioc->facts.FWVersion.Word,
+			expVer,
+			ioc->facts.NumberOfPorts,
+			ioc->req_depth);
+
+	if (showlan && (ioc->pfacts[0].ProtocolFlags & MPI_PORTFACTS_PROTOCOL_LAN)) {
+		u8 *a = (u8*)&ioc->lan_cnfg_page1.HardwareAddressLow;
+		y += sprintf(buffer+len+y, ", LanAddr=%02X:%02X:%02X:%02X:%02X:%02X",
+			a[5], a[4], a[3], a[2], a[1], a[0]);
+	}
+
+#ifndef __sparc__
+	y += sprintf(buffer+len+y, ", IRQ=%d", ioc->pci_irq);
+#else
+	y += sprintf(buffer+len+y, ", IRQ=%s", __irq_itoa(ioc->pci_irq));
+#endif
+
+	if (!ioc->active)
+		y += sprintf(buffer+len+y, " (disabled)");
+
+	y += sprintf(buffer+len+y, "\n");
+
+	*size = y;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	Reset Handling
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mpt_HardResetHandler - Generic reset handler, issue SCSI Task
+ *	Management call based on input arg values.  If TaskMgmt fails,
+ *	return associated SCSI request.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@sleepFlag: Indicates if sleep or schedule must be called.
+ *
+ *	Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
+ *	or a non-interrupt thread.  In the former, must not call schedule().
+ *
+ *	Remark: A return of -1 is a FATAL error case, as it means a
+ *	FW reload/initialization failed.
+ *
+ *	Returns 0 for SUCCESS or -1 if FAILED.
+ */
+int
+mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
+{
+	int		 rc;
+	unsigned long	 flags;
+
+	dtmprintk((MYIOC_s_INFO_FMT "HardResetHandler Entered!\n", ioc->name));
+#ifdef MFCNT
+	printk(MYIOC_s_INFO_FMT "HardResetHandler Entered!\n", ioc->name);
+	printk("MF count 0x%x !\n", ioc->mfcnt);
+#endif
+
+	/* Reset the adapter. Prevent more than 1 call to
+	 * mpt_do_ioc_recovery at any instant in time.
+	 */
+	spin_lock_irqsave(&ioc->diagLock, flags);
+	if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)){
+		spin_unlock_irqrestore(&ioc->diagLock, flags);
+		return 0;
+	} else {
+		ioc->diagPending = 1;
+	}
+	spin_unlock_irqrestore(&ioc->diagLock, flags);
+
+	/* FIXME: If do_ioc_recovery fails, repeat....
+	 */
+
+	/* The SCSI driver needs to adjust timeouts on all current
+	 * commands prior to the diagnostic reset being issued.
+	 * Prevents timeouts occuring during a diagnostic reset...very bad.
+	 * For all other protocol drivers, this is a no-op.
+	 */
+	{
+		int	 ii;
+		int	 r = 0;
+
+		for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) {
+			if (MptResetHandlers[ii]) {
+				dtmprintk((MYIOC_s_INFO_FMT "Calling IOC reset_setup handler #%d\n",
+						ioc->name, ii));
+				r += (*(MptResetHandlers[ii]))(ioc, MPT_IOC_SETUP_RESET);
+				if (ioc->alt_ioc) {
+					dtmprintk((MYIOC_s_INFO_FMT "Calling alt-%s setup reset handler #%d\n",
+							ioc->name, ioc->alt_ioc->name, ii));
+					r += (*(MptResetHandlers[ii]))(ioc->alt_ioc, MPT_IOC_SETUP_RESET);
+				}
+			}
+		}
+	}
+
+	if ((rc = mpt_do_ioc_recovery(ioc, MPT_HOSTEVENT_IOC_RECOVER, sleepFlag)) != 0) {
+		printk(KERN_WARNING MYNAM ": WARNING - (%d) Cannot recover %s\n",
+			rc, ioc->name);
+	}
+	ioc->reload_fw = 0;
+	if (ioc->alt_ioc)
+		ioc->alt_ioc->reload_fw = 0;
+
+	spin_lock_irqsave(&ioc->diagLock, flags);
+	ioc->diagPending = 0;
+	if (ioc->alt_ioc)
+		ioc->alt_ioc->diagPending = 0;
+	spin_unlock_irqrestore(&ioc->diagLock, flags);
+
+	dtmprintk((MYIOC_s_INFO_FMT "HardResetHandler rc = %d!\n", ioc->name, rc));
+
+	return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static char *
+EventDescriptionStr(u8 event, u32 evData0)
+{
+	char *ds;
+
+	switch(event) {
+	case MPI_EVENT_NONE:
+		ds = "None";
+		break;
+	case MPI_EVENT_LOG_DATA:
+		ds = "Log Data";
+		break;
+	case MPI_EVENT_STATE_CHANGE:
+		ds = "State Change";
+		break;
+	case MPI_EVENT_UNIT_ATTENTION:
+		ds = "Unit Attention";
+		break;
+	case MPI_EVENT_IOC_BUS_RESET:
+		ds = "IOC Bus Reset";
+		break;
+	case MPI_EVENT_EXT_BUS_RESET:
+		ds = "External Bus Reset";
+		break;
+	case MPI_EVENT_RESCAN:
+		ds = "Bus Rescan Event";
+		/* Ok, do we need to do anything here? As far as
+		   I can tell, this is when a new device gets added
+		   to the loop. */
+		break;
+	case MPI_EVENT_LINK_STATUS_CHANGE:
+		if (evData0 == MPI_EVENT_LINK_STATUS_FAILURE)
+			ds = "Link Status(FAILURE) Change";
+		else
+			ds = "Link Status(ACTIVE) Change";
+		break;
+	case MPI_EVENT_LOOP_STATE_CHANGE:
+		if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LIP)
+			ds = "Loop State(LIP) Change";
+		else if (evData0 == MPI_EVENT_LOOP_STATE_CHANGE_LPE)
+			ds = "Loop State(LPE) Change";			/* ??? */
+		else
+			ds = "Loop State(LPB) Change";			/* ??? */
+		break;
+	case MPI_EVENT_LOGOUT:
+		ds = "Logout";
+		break;
+	case MPI_EVENT_EVENT_CHANGE:
+		if (evData0)
+			ds = "Events(ON) Change";
+		else
+			ds = "Events(OFF) Change";
+		break;
+	case MPI_EVENT_INTEGRATED_RAID:
+		ds = "Integrated Raid";
+		break;
+	/*
+	 *  MPT base "custom" events may be added here...
+	 */
+	default:
+		ds = "Unknown";
+		break;
+	}
+	return ds;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	ProcessEventNotification - Route a received EventNotificationReply to
+ *	all currently regeistered event handlers.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@pEventReply: Pointer to EventNotification reply frame
+ *	@evHandlers: Pointer to integer, number of event handlers
+ *
+ *	Returns sum of event handlers return values.
+ */
+static int
+ProcessEventNotification(MPT_ADAPTER *ioc, EventNotificationReply_t *pEventReply, int *evHandlers)
+{
+	u16 evDataLen;
+	u32 evData0 = 0;
+//	u32 evCtx;
+	int ii;
+	int r = 0;
+	int handlers = 0;
+	char *evStr;
+	u8 event;
+
+	/*
+	 *  Do platform normalization of values
+	 */
+	event = le32_to_cpu(pEventReply->Event) & 0xFF;
+//	evCtx = le32_to_cpu(pEventReply->EventContext);
+	evDataLen = le16_to_cpu(pEventReply->EventDataLength);
+	if (evDataLen) {
+		evData0 = le32_to_cpu(pEventReply->Data[0]);
+	}
+
+	evStr = EventDescriptionStr(event, evData0);
+	devtprintk((MYIOC_s_INFO_FMT "MPT event (%s=%02Xh) detected!\n",
+			ioc->name,
+			evStr,
+			event));
+
+#if defined(MPT_DEBUG) || defined(MPT_DEBUG_EVENTS)
+	printk(KERN_INFO MYNAM ": Event data:\n" KERN_INFO);
+	for (ii = 0; ii < evDataLen; ii++)
+		printk(" %08x", le32_to_cpu(pEventReply->Data[ii]));
+	printk("\n");
+#endif
+
+	/*
+	 *  Do general / base driver event processing
+	 */
+	switch(event) {
+	case MPI_EVENT_NONE:			/* 00 */
+	case MPI_EVENT_LOG_DATA:		/* 01 */
+	case MPI_EVENT_STATE_CHANGE:		/* 02 */
+	case MPI_EVENT_UNIT_ATTENTION:		/* 03 */
+	case MPI_EVENT_IOC_BUS_RESET:		/* 04 */
+	case MPI_EVENT_EXT_BUS_RESET:		/* 05 */
+	case MPI_EVENT_RESCAN:			/* 06 */
+	case MPI_EVENT_LINK_STATUS_CHANGE:	/* 07 */
+	case MPI_EVENT_LOOP_STATE_CHANGE:	/* 08 */
+	case MPI_EVENT_LOGOUT:			/* 09 */
+	case MPI_EVENT_INTEGRATED_RAID:		/* 0B */
+	case MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE:	/* 0C */
+	default:
+		break;
+	case MPI_EVENT_EVENT_CHANGE:		/* 0A */
+		if (evDataLen) {
+			u8 evState = evData0 & 0xFF;
+
+			/* CHECKME! What if evState unexpectedly says OFF (0)? */
+
+			/* Update EventState field in cached IocFacts */
+			if (ioc->facts.Function) {
+				ioc->facts.EventState = evState;
+			}
+		}
+		break;
+	}
+
+	/*
+	 * Should this event be logged? Events are written sequentially.
+	 * When buffer is full, start again at the top.
+	 */
+	if (ioc->events && (ioc->eventTypes & ( 1 << event))) {
+		int idx;
+
+		idx = ioc->eventContext % ioc->eventLogSize;
+
+		ioc->events[idx].event = event;
+		ioc->events[idx].eventContext = ioc->eventContext;
+
+		for (ii = 0; ii < 2; ii++) {
+			if (ii < evDataLen)
+				ioc->events[idx].data[ii] = le32_to_cpu(pEventReply->Data[ii]);
+			else
+				ioc->events[idx].data[ii] =  0;
+		}
+
+		ioc->eventContext++;
+	}
+
+
+	/*
+	 *  Call each currently registered protocol event handler.
+	 */
+	for (ii=MPT_MAX_PROTOCOL_DRIVERS-1; ii; ii--) {
+		if (MptEvHandlers[ii]) {
+			devtprintk((MYIOC_s_INFO_FMT "Routing Event to event handler #%d\n",
+					ioc->name, ii));
+			r += (*(MptEvHandlers[ii]))(ioc, pEventReply);
+			handlers++;
+		}
+	}
+	/* FIXME?  Examine results here? */
+
+	/*
+	 *  If needed, send (a single) EventAck.
+	 */
+	if (pEventReply->AckRequired == MPI_EVENT_NOTIFICATION_ACK_REQUIRED) {
+		if ((ii = SendEventAck(ioc, pEventReply)) != 0) {
+			devtprintk((MYIOC_s_WARN_FMT "SendEventAck returned %d\n",
+					ioc->name, ii));
+		}
+	}
+
+	*evHandlers = handlers;
+	return r;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mpt_fc_log_info - Log information returned from Fibre Channel IOC.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@log_info: U32 LogInfo reply word from the IOC
+ *
+ *	Refer to lsi/fc_log.h.
+ */
+static void
+mpt_fc_log_info(MPT_ADAPTER *ioc, u32 log_info)
+{
+	static char *subcl_str[8] = {
+		"FCP Initiator", "FCP Target", "LAN", "MPI Message Layer",
+		"FC Link", "Context Manager", "Invalid Field Offset", "State Change Info"
+	};
+	u8 subcl = (log_info >> 24) & 0x7;
+
+	printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): SubCl={%s}\n",
+			ioc->name, log_info, subcl_str[subcl]);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mpt_sp_log_info - Log information returned from SCSI Parallel IOC.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@mr: Pointer to MPT reply frame
+ *	@log_info: U32 LogInfo word from the IOC
+ *
+ *	Refer to lsi/sp_log.h.
+ */
+static void
+mpt_sp_log_info(MPT_ADAPTER *ioc, u32 log_info)
+{
+	u32 info = log_info & 0x00FF0000;
+	char *desc = "unknown";
+
+	switch (info) {
+	case 0x00010000:
+		desc = "bug! MID not found";
+		if (ioc->reload_fw == 0)
+			ioc->reload_fw++;
+		break;
+
+	case 0x00020000:
+		desc = "Parity Error";
+		break;
+
+	case 0x00030000:
+		desc = "ASYNC Outbound Overrun";
+		break;
+
+	case 0x00040000:
+		desc = "SYNC Offset Error";
+		break;
+
+	case 0x00050000:
+		desc = "BM Change";
+		break;
+
+	case 0x00060000:
+		desc = "Msg In Overflow";
+		break;
+
+	case 0x00070000:
+		desc = "DMA Error";
+		break;
+
+	case 0x00080000:
+		desc = "Outbound DMA Overrun";
+		break;
+	
+	case 0x00090000:
+		desc = "Task Management";
+		break;
+
+	case 0x000A0000:
+		desc = "Device Problem";
+		break;
+
+	case 0x000B0000:
+		desc = "Invalid Phase Change";
+		break;
+
+	case 0x000C0000:
+		desc = "Untagged Table Size";
+		break;
+	
+	}
+
+	printk(MYIOC_s_INFO_FMT "LogInfo(0x%08x): F/W: %s\n", ioc->name, log_info, desc);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mpt_sp_ioc_info - IOC information returned from SCSI Parallel IOC.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@ioc_status: U32 IOCStatus word from IOC
+ *	@mf: Pointer to MPT request frame
+ *
+ *	Refer to lsi/mpi.h.
+ */
+static void
+mpt_sp_ioc_info(MPT_ADAPTER *ioc, u32 ioc_status, MPT_FRAME_HDR *mf)
+{
+	u32 status = ioc_status & MPI_IOCSTATUS_MASK;
+	char *desc = "";
+
+	switch (status) {
+	case MPI_IOCSTATUS_INVALID_FUNCTION: /* 0x0001 */
+		desc = "Invalid Function";
+		break;
+
+	case MPI_IOCSTATUS_BUSY: /* 0x0002 */
+		desc = "Busy";
+		break;
+
+	case MPI_IOCSTATUS_INVALID_SGL: /* 0x0003 */
+		desc = "Invalid SGL";
+		break;
+
+	case MPI_IOCSTATUS_INTERNAL_ERROR: /* 0x0004 */
+		desc = "Internal Error";
+		break;
+
+	case MPI_IOCSTATUS_RESERVED: /* 0x0005 */
+		desc = "Reserved";
+		break;
+
+	case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES: /* 0x0006 */
+		desc = "Insufficient Resources";
+		break;
+
+	case MPI_IOCSTATUS_INVALID_FIELD: /* 0x0007 */
+		desc = "Invalid Field";
+		break;
+
+	case MPI_IOCSTATUS_INVALID_STATE: /* 0x0008 */
+		desc = "Invalid State";
+		break;
+
+	case MPI_IOCSTATUS_CONFIG_INVALID_ACTION: /* 0x0020 */
+	case MPI_IOCSTATUS_CONFIG_INVALID_TYPE:   /* 0x0021 */
+	case MPI_IOCSTATUS_CONFIG_INVALID_PAGE:   /* 0x0022 */
+	case MPI_IOCSTATUS_CONFIG_INVALID_DATA:   /* 0x0023 */
+	case MPI_IOCSTATUS_CONFIG_NO_DEFAULTS:    /* 0x0024 */
+	case MPI_IOCSTATUS_CONFIG_CANT_COMMIT:    /* 0x0025 */
+		/* No message for Config IOCStatus values */
+		break;
+
+	case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR: /* 0x0040 */
+		/* No message for recovered error
+		desc = "SCSI Recovered Error";
+		*/
+		break;
+
+	case MPI_IOCSTATUS_SCSI_INVALID_BUS: /* 0x0041 */
+		desc = "SCSI Invalid Bus";
+		break;
+
+	case MPI_IOCSTATUS_SCSI_INVALID_TARGETID: /* 0x0042 */
+		desc = "SCSI Invalid TargetID";
+		break;
+
+	case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE: /* 0x0043 */
+	  {
+		SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf;
+		U8 cdb = pScsiReq->CDB[0];
+		if (cdb != 0x12) { /* Inquiry is issued for device scanning */
+			desc = "SCSI Device Not There";
+		}
+		break;
+	  }
+
+	case MPI_IOCSTATUS_SCSI_DATA_OVERRUN: /* 0x0044 */
+		desc = "SCSI Data Overrun";
+		break;
+
+	case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN: /* 0x0045 */
+		/* This error is checked in scsi_io_done(). Skip. 
+		desc = "SCSI Data Underrun";
+		*/
+		break;
+
+	case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR: /* 0x0046 */
+		desc = "SCSI I/O Data Error";
+		break;
+
+	case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR: /* 0x0047 */
+		desc = "SCSI Protocol Error";
+		break;
+
+	case MPI_IOCSTATUS_SCSI_TASK_TERMINATED: /* 0x0048 */
+		desc = "SCSI Task Terminated";
+		break;
+
+	case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH: /* 0x0049 */
+		desc = "SCSI Residual Mismatch";
+		break;
+
+	case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED: /* 0x004A */
+		desc = "SCSI Task Management Failed";
+		break;
+
+	case MPI_IOCSTATUS_SCSI_IOC_TERMINATED: /* 0x004B */
+		desc = "SCSI IOC Terminated";
+		break;
+
+	case MPI_IOCSTATUS_SCSI_EXT_TERMINATED: /* 0x004C */
+		desc = "SCSI Ext Terminated";
+		break;
+
+	default:
+		desc = "Others";
+		break;
+	}
+	if (desc != "")
+		printk(MYIOC_s_INFO_FMT "IOCStatus(0x%04x): %s\n", ioc->name, status, desc);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+EXPORT_SYMBOL(ioc_list);
+EXPORT_SYMBOL(mpt_proc_root_dir);
+EXPORT_SYMBOL(mpt_register);
+EXPORT_SYMBOL(mpt_deregister);
+EXPORT_SYMBOL(mpt_event_register);
+EXPORT_SYMBOL(mpt_event_deregister);
+EXPORT_SYMBOL(mpt_reset_register);
+EXPORT_SYMBOL(mpt_reset_deregister);
+EXPORT_SYMBOL(mpt_device_driver_register);
+EXPORT_SYMBOL(mpt_device_driver_deregister);
+EXPORT_SYMBOL(mpt_get_msg_frame);
+EXPORT_SYMBOL(mpt_put_msg_frame);
+EXPORT_SYMBOL(mpt_free_msg_frame);
+EXPORT_SYMBOL(mpt_add_sge);
+EXPORT_SYMBOL(mpt_send_handshake_request);
+EXPORT_SYMBOL(mpt_verify_adapter);
+EXPORT_SYMBOL(mpt_GetIocState);
+EXPORT_SYMBOL(mpt_print_ioc_summary);
+EXPORT_SYMBOL(mpt_lan_index);
+EXPORT_SYMBOL(mpt_stm_index);
+EXPORT_SYMBOL(mpt_HardResetHandler);
+EXPORT_SYMBOL(mpt_config);
+EXPORT_SYMBOL(mpt_toolbox);
+EXPORT_SYMBOL(mpt_findImVolumes);
+EXPORT_SYMBOL(mpt_read_ioc_pg_3);
+EXPORT_SYMBOL(mpt_alloc_fw_memory);
+EXPORT_SYMBOL(mpt_free_fw_memory);
+
+static struct pci_driver mptbase_driver = {
+	.name		= "mptbase",
+	.id_table	= mptbase_pci_table,
+	.probe		= mptbase_probe,
+	.remove		= __devexit_p(mptbase_remove),
+	.driver         = {
+		.shutdown = mptbase_shutdown,
+        },
+#ifdef CONFIG_PM
+	.suspend	= mptbase_suspend,
+	.resume		= mptbase_resume,
+#endif
+};
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	fusion_init - Fusion MPT base driver initialization routine.
+ *
+ *	Returns 0 for success, non-zero for failure.
+ */
+static int __init
+fusion_init(void)
+{
+	int i;
+	int r;
+
+	show_mptmod_ver(my_NAME, my_VERSION);
+	printk(KERN_INFO COPYRIGHT "\n");
+
+	for (i = 0; i < MPT_MAX_PROTOCOL_DRIVERS; i++) {
+		MptCallbacks[i] = NULL;
+		MptDriverClass[i] = MPTUNKNOWN_DRIVER;
+		MptEvHandlers[i] = NULL;
+		MptResetHandlers[i] = NULL;
+	}
+
+	/* NEW!  20010120 -sralston
+	 *  Register ourselves (mptbase) in order to facilitate
+	 *  EventNotification handling.
+	 */
+	mpt_base_index = mpt_register(mpt_base_reply, MPTBASE_DRIVER);
+
+	/* Register for hard reset handling callbacks.
+	 */
+	if (mpt_reset_register(mpt_base_index, mpt_ioc_reset) == 0) {
+		dprintk((KERN_INFO MYNAM ": Register for IOC reset notification\n"));
+	} else {
+		/* FIXME! */
+	}
+
+#ifdef CONFIG_PROC_FS
+	(void) procmpt_create();
+#endif
+	r = pci_register_driver(&mptbase_driver);
+	if(r)
+		return(r);
+
+	return r;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	fusion_exit - Perform driver unload cleanup.
+ *
+ *	This routine frees all resources associated with each MPT adapter
+ *	and removes all %MPT_PROCFS_MPTBASEDIR entries.
+ */
+static void __exit
+fusion_exit(void)
+{
+
+	dexitprintk((KERN_INFO MYNAM ": fusion_exit() called!\n"));
+
+	pci_unregister_driver(&mptbase_driver);
+	mpt_reset_deregister(mpt_base_index);
+
+#ifdef CONFIG_PROC_FS
+	procmpt_destroy();
+#endif
+}
+
+
+module_init(fusion_init);
+module_exit(fusion_exit);
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
new file mode 100644
index 0000000..be67307
--- /dev/null
+++ b/drivers/message/fusion/mptbase.h
@@ -0,0 +1,1021 @@
+/*
+ *  linux/drivers/message/fusion/mptbase.h
+ *      High performance SCSI + LAN / Fibre Channel device drivers.
+ *      For use with PCI chip/adapter(s):
+ *          LSIFC9xx/LSI409xx Fibre Channel
+ *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
+ *
+ *  Credits:
+ *     (see mptbase.c)
+ *
+ *  Copyright (c) 1999-2004 LSI Logic Corporation
+ *  Originally By: Steven J. Ralston
+ *  (mailto:sjralston1@netscape.net)
+ *  (mailto:mpt_linux_developer@lsil.com)
+ *
+ *  $Id: mptbase.h,v 1.144 2003/01/28 21:31:56 pdelaney Exp $
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; version 2 of the License.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    NO WARRANTY
+    THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+    CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+    LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+    solely responsible for determining the appropriateness of using and
+    distributing the Program and assumes all risks associated with its
+    exercise of rights under this Agreement, including but not limited to
+    the risks and costs of program errors, damage to or loss of data,
+    programs or equipment, and unavailability or interruption of operations.
+
+    DISCLAIMER OF LIABILITY
+    NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+    ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+    TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+    USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+    HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+
+#ifndef MPTBASE_H_INCLUDED
+#define MPTBASE_H_INCLUDED
+/*{-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include <linux/version.h>
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+#include "lsi/mpi_type.h"
+#include "lsi/mpi.h"		/* Fusion MPI(nterface) basic defs */
+#include "lsi/mpi_ioc.h"	/* Fusion MPT IOC(ontroller) defs */
+#include "lsi/mpi_cnfg.h"	/* IOC configuration support */
+#include "lsi/mpi_init.h"	/* SCSI Host (initiator) protocol support */
+#include "lsi/mpi_lan.h"	/* LAN over FC protocol support */
+#include "lsi/mpi_raid.h"	/* Integrated Mirroring support */
+
+#include "lsi/mpi_fc.h"		/* Fibre Channel (lowlevel) support */
+#include "lsi/mpi_targ.h"	/* SCSI/FCP Target protcol support */
+#include "lsi/mpi_tool.h"	/* Tools support */
+#include "lsi/fc_log.h"
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#ifndef MODULEAUTHOR
+#define MODULEAUTHOR	"LSI Logic Corporation"
+#endif
+
+#ifndef COPYRIGHT
+#define COPYRIGHT	"Copyright (c) 1999-2004 " MODULEAUTHOR
+#endif
+
+#define MPT_LINUX_VERSION_COMMON	"3.01.20"
+#define MPT_LINUX_PACKAGE_NAME		"@(#)mptlinux-3.01.20"
+#define WHAT_MAGIC_STRING		"@" "(" "#" ")"
+
+#define show_mptmod_ver(s,ver)  \
+	printk(KERN_INFO "%s %s\n", s, ver);
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  Fusion MPT(linux) driver configurable stuff...
+ */
+#define MPT_MAX_ADAPTERS		18
+#define MPT_MAX_PROTOCOL_DRIVERS	16
+#define MPT_MAX_BUS			1	/* Do not change */
+#define MPT_MAX_FC_DEVICES		255
+#define MPT_MAX_SCSI_DEVICES		16
+#define MPT_LAST_LUN			255
+#define MPT_SENSE_BUFFER_ALLOC		64
+	/* allow for 256 max sense alloc, but only 255 max request */
+#if MPT_SENSE_BUFFER_ALLOC >= 256
+#	undef MPT_SENSE_BUFFER_ALLOC
+#	define MPT_SENSE_BUFFER_ALLOC	256
+#	define MPT_SENSE_BUFFER_SIZE	255
+#else
+#	define MPT_SENSE_BUFFER_SIZE	MPT_SENSE_BUFFER_ALLOC
+#endif
+
+#define MPT_NAME_LENGTH			32
+
+#define MPT_PROCFS_MPTBASEDIR		"mpt"
+						/* chg it to "driver/fusion" ? */
+#define MPT_PROCFS_SUMMARY_ALL_NODE		MPT_PROCFS_MPTBASEDIR "/summary"
+#define MPT_PROCFS_SUMMARY_ALL_PATHNAME		"/proc/" MPT_PROCFS_SUMMARY_ALL_NODE
+#define MPT_FW_REV_MAGIC_ID_STRING		"FwRev="
+
+#define  MPT_MAX_REQ_DEPTH		1023
+#define  MPT_DEFAULT_REQ_DEPTH		256
+#define  MPT_MIN_REQ_DEPTH		128
+
+#define  MPT_MAX_REPLY_DEPTH		MPT_MAX_REQ_DEPTH
+#define  MPT_DEFAULT_REPLY_DEPTH	128
+#define  MPT_MIN_REPLY_DEPTH		8
+#define  MPT_MAX_REPLIES_PER_ISR	32
+
+#define  MPT_MAX_FRAME_SIZE		128
+#define  MPT_DEFAULT_FRAME_SIZE		128
+
+#define  MPT_REPLY_FRAME_SIZE		0x40  /* Must be a multiple of 8 */
+
+#define  MPT_SG_REQ_128_SCALE		1
+#define  MPT_SG_REQ_96_SCALE		2
+#define  MPT_SG_REQ_64_SCALE		4
+
+#define	 CAN_SLEEP			1
+#define  NO_SLEEP			0
+
+#define MPT_COALESCING_TIMEOUT		0x10
+
+/*
+ * SCSI transfer rate defines.
+ */
+#define MPT_ULTRA320			0x08
+#define MPT_ULTRA160			0x09
+#define MPT_ULTRA2			0x0A
+#define MPT_ULTRA			0x0C
+#define MPT_FAST			0x19
+#define MPT_SCSI			0x32
+#define MPT_ASYNC			0xFF
+
+#define MPT_NARROW			0
+#define MPT_WIDE			1
+
+#define C0_1030				0x08
+#define XL_929				0x01
+
+
+/*
+ *	Try to keep these at 2^N-1
+ */
+#define MPT_FC_CAN_QUEUE	127
+#define MPT_SCSI_CAN_QUEUE	127
+
+/*
+ * Set the MAX_SGE value based on user input.
+ */
+#ifdef  CONFIG_FUSION_MAX_SGE
+#if     CONFIG_FUSION_MAX_SGE  < 16
+#define MPT_SCSI_SG_DEPTH	16
+#elif   CONFIG_FUSION_MAX_SGE  > 128
+#define MPT_SCSI_SG_DEPTH	128
+#else
+#define MPT_SCSI_SG_DEPTH	CONFIG_FUSION_MAX_SGE
+#endif
+#else
+#define MPT_SCSI_SG_DEPTH	40
+#endif
+
+#ifdef __KERNEL__	/* { */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include <linux/proc_fs.h>
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Attempt semi-consistent error & warning msgs across
+ * MPT drivers.  NOTE: Users of these macro defs must
+ * themselves define their own MYNAM.
+ */
+#define MYIOC_s_INFO_FMT		KERN_INFO MYNAM ": %s: "
+#define MYIOC_s_NOTE_FMT		KERN_NOTICE MYNAM ": %s: "
+#define MYIOC_s_WARN_FMT		KERN_WARNING MYNAM ": %s: WARNING - "
+#define MYIOC_s_ERR_FMT			KERN_ERR MYNAM ": %s: ERROR - "
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  MPT protocol driver defs...
+ */
+typedef enum {
+	MPTBASE_DRIVER,		/* MPT base class */
+	MPTCTL_DRIVER,		/* MPT ioctl class */
+	MPTSCSIH_DRIVER,	/* MPT SCSI host (initiator) class */
+	MPTLAN_DRIVER,		/* MPT LAN class */
+	MPTSTM_DRIVER,		/* MPT SCSI target mode class */
+	MPTUNKNOWN_DRIVER
+} MPT_DRIVER_CLASS;
+
+struct mpt_pci_driver{
+	int  (*probe) (struct pci_dev *dev, const struct pci_device_id *id);
+	void (*remove) (struct pci_dev *dev);
+	void (*shutdown) (struct device * dev);
+#ifdef CONFIG_PM
+	int  (*resume) (struct pci_dev *dev);
+	int  (*suspend) (struct pci_dev *dev, u32 state);
+#endif
+};
+
+/*
+ *  MPT adapter / port / bus / device info structures...
+ */
+
+typedef union _MPT_FRAME_TRACKER {
+	struct {
+		struct list_head	list;
+		u32			 arg1;
+		u32			 pad;
+		void			*argp1;
+	} linkage;
+	/*
+	 * NOTE: When request frames are free, on the linkage structure
+	 * contets are valid.  All other values are invalid.
+	 * In particular, do NOT reply on offset [2]
+	 * (in words) being the * message context.
+	 * The message context must be reset (computed via base address
+	 * + an offset) prior to issuing any command.
+	 *
+	 * NOTE2: On non-32-bit systems, where pointers are LARGE,
+	 * using the linkage pointers destroys our sacred MsgContext
+	 * field contents.  But we don't care anymore because these
+	 * are now reset in mpt_put_msg_frame() just prior to sending
+	 * a request off to the IOC.
+	 */
+	struct {
+		u32 __hdr[2];
+		/*
+		 * The following _MUST_ match the location of the
+		 * MsgContext field in the MPT message headers.
+		 */
+		union {
+			u32		 MsgContext;
+			struct {
+				u16	 req_idx;	/* Request index */
+				u8	 cb_idx;	/* callback function index */
+				u8	 rsvd;
+			} fld;
+		} msgctxu;
+	} hwhdr;
+	/*
+	 * Remark: 32 bit identifier:
+	 *  31-24: reserved
+	 *  23-16: call back index
+	 *  15-0 : request index
+	 */
+} MPT_FRAME_TRACKER;
+
+/*
+ *  We might want to view/access a frame as:
+ *    1) generic request header
+ *    2) SCSIIORequest
+ *    3) SCSIIOReply
+ *    4) MPIDefaultReply
+ *    5) frame tracker
+ */
+typedef struct _MPT_FRAME_HDR {
+	union {
+		MPIHeader_t		hdr;
+		SCSIIORequest_t		scsireq;
+		SCSIIOReply_t		sreply;
+		ConfigReply_t		configreply;
+		MPIDefaultReply_t	reply;
+		MPT_FRAME_TRACKER	frame;
+	} u;
+} MPT_FRAME_HDR;
+
+#define MPT_REQ_MSGFLAGS_DROPME		0x80
+
+typedef struct _MPT_SGL_HDR {
+	SGESimple32_t	 sge[1];
+} MPT_SGL_HDR;
+
+typedef struct _MPT_SGL64_HDR {
+	SGESimple64_t	 sge[1];
+} MPT_SGL64_HDR;
+
+/*
+ *  System interface register set
+ */
+
+typedef struct _SYSIF_REGS
+{
+	u32	Doorbell;	/* 00     System<->IOC Doorbell reg  */
+	u32	WriteSequence;	/* 04     Write Sequence register    */
+	u32	Diagnostic;	/* 08     Diagnostic register        */
+	u32	TestBase;	/* 0C     Test Base Address          */
+	u32	DiagRwData;	/* 10     Read Write Data (fw download)   */
+	u32	DiagRwAddress;	/* 14     Read Write Address (fw download)*/
+	u32	Reserved1[6];	/* 18-2F  reserved for future use    */
+	u32	IntStatus;	/* 30     Interrupt Status           */
+	u32	IntMask;	/* 34     Interrupt Mask             */
+	u32	Reserved2[2];	/* 38-3F  reserved for future use    */
+	u32	RequestFifo;	/* 40     Request Post/Free FIFO     */
+	u32	ReplyFifo;	/* 44     Reply   Post/Free FIFO     */
+	u32	Reserved3[2];	/* 48-4F  reserved for future use    */
+	u32	HostIndex;	/* 50     Host Index register        */
+	u32	Reserved4[15];	/* 54-8F                             */
+	u32	Fubar;		/* 90     For Fubar usage            */
+	u32	Reserved5[27];	/* 94-FF                             */
+} SYSIF_REGS;
+
+/*
+ * NOTE: Use MPI_{DOORBELL,WRITESEQ,DIAG}_xxx defs in lsi/mpi.h
+ * in conjunction with SYSIF_REGS accesses!
+ */
+
+
+/*
+ *	Dynamic Multi-Pathing specific stuff...
+ */
+
+/* VirtDevice negoFlags field */
+#define MPT_TARGET_NO_NEGO_WIDE		0x01
+#define MPT_TARGET_NO_NEGO_SYNC		0x02
+#define MPT_TARGET_NO_NEGO_QAS		0x04
+#define MPT_TAPE_NEGO_IDP     		0x08
+
+/*
+ *	VirtDevice - FC LUN device or SCSI target device
+ */
+typedef struct _VirtDevice {
+	struct scsi_device	*device;
+	u8			 tflags;
+	u8			 ioc_id;
+	u8			 target_id;
+	u8			 bus_id;
+	u8			 minSyncFactor;	/* 0xFF is async */
+	u8			 maxOffset;	/* 0 if async */
+	u8			 maxWidth;	/* 0 if narrow, 1 if wide */
+	u8			 negoFlags;	/* bit field, see above */
+	u8			 raidVolume;	/* set, if RAID Volume */
+	u8			 type;		/* byte 0 of Inquiry data */
+	u8			 cflags;	/* controller flags */
+	u8			 rsvd1raid;
+	u16			 fc_phys_lun;
+	u16			 fc_xlat_lun;
+	u32			 num_luns;
+	u32			 luns[8];		/* Max LUNs is 256 */
+	u8			 pad[4];
+	u8			 inq_data[8];
+		/* IEEE Registered Extended Identifier
+		   obtained via INQUIRY VPD page 0x83 */
+		/* NOTE: Do not separate uniq_prepad and uniq_data
+		   as they are treateed as a single entity in the code */
+	u8			 uniq_prepad[8];
+	u8			 uniq_data[20];
+	u8			 pad2[4];
+} VirtDevice;
+
+/*
+ *  Fibre Channel (SCSI) target device and associated defines...
+ */
+#define MPT_TARGET_DEFAULT_DV_STATUS	0x00
+#define MPT_TARGET_FLAGS_VALID_NEGO	0x01
+#define MPT_TARGET_FLAGS_VALID_INQUIRY	0x02
+#define MPT_TARGET_FLAGS_Q_YES		0x08
+#define MPT_TARGET_FLAGS_VALID_56	0x10
+#define MPT_TARGET_FLAGS_SAF_TE_ISSUED	0x20
+
+/*
+ *	/proc/mpt interface
+ */
+typedef struct {
+	const char	*name;
+	mode_t		 mode;
+	int		 pad;
+	read_proc_t	*read_proc;
+	write_proc_t	*write_proc;
+} mpt_proc_entry_t;
+
+#define MPT_PROC_READ_RETURN(buf,start,offset,request,eof,len) \
+do { \
+	len -= offset;			\
+	if (len < request) {		\
+		*eof = 1;		\
+		if (len <= 0)		\
+			return 0;	\
+	} else				\
+		len = request;		\
+	*start = buf + offset;		\
+	return len;			\
+} while (0)
+
+
+/*
+ *	IOCTL structure and associated defines
+ */
+
+#define MPT_IOCTL_STATUS_DID_IOCRESET	0x01	/* IOC Reset occurred on the current*/
+#define MPT_IOCTL_STATUS_RF_VALID	0x02	/* The Reply Frame is VALID */
+#define MPT_IOCTL_STATUS_TIMER_ACTIVE	0x04	/* The timer is running */
+#define MPT_IOCTL_STATUS_SENSE_VALID	0x08	/* Sense data is valid */
+#define MPT_IOCTL_STATUS_COMMAND_GOOD	0x10	/* Command Status GOOD */
+#define MPT_IOCTL_STATUS_TMTIMER_ACTIVE	0x20	/* The TM timer is running */
+#define MPT_IOCTL_STATUS_TM_FAILED	0x40	/* User TM request failed */
+
+#define MPTCTL_RESET_OK			0x01	/* Issue Bus Reset */
+
+typedef struct _MPT_IOCTL {
+	struct _MPT_ADAPTER	*ioc;
+	u8			 ReplyFrame[MPT_DEFAULT_FRAME_SIZE];	/* reply frame data */
+	u8			 sense[MPT_SENSE_BUFFER_ALLOC];
+	int			 wait_done;	/* wake-up value for this ioc */
+	u8			 rsvd;
+	u8			 status;	/* current command status */
+	u8			 reset;		/* 1 if bus reset allowed */
+	u8			 target;	/* target for reset */
+	struct semaphore	 sem_ioc;
+} MPT_IOCTL;
+
+/*
+ *  Event Structure and define
+ */
+#define MPTCTL_EVENT_LOG_SIZE		(0x0000000A)
+typedef struct _mpt_ioctl_events {
+	u32	event;		/* Specified by define above */
+	u32	eventContext;	/* Index or counter */
+	int	data[2];	/* First 8 bytes of Event Data */
+} MPT_IOCTL_EVENTS;
+
+/*
+ * CONFIGPARM status  defines
+ */
+#define MPT_CONFIG_GOOD		MPI_IOCSTATUS_SUCCESS
+#define MPT_CONFIG_ERROR	0x002F
+
+/*
+ *	Substructure to store SCSI specific configuration page data
+ */
+						/* dvStatus defines: */
+#define MPT_SCSICFG_NEGOTIATE		0x01	/* Negotiate on next IO */
+#define MPT_SCSICFG_NEED_DV		0x02	/* Schedule DV */
+#define MPT_SCSICFG_DV_PENDING		0x04	/* DV on this physical id pending */
+#define MPT_SCSICFG_DV_NOT_DONE		0x08	/* DV has not been performed */
+#define MPT_SCSICFG_BLK_NEGO		0x10	/* WriteSDP1 with WDTR and SDTR disabled */
+#define MPT_SCSICFG_RELOAD_IOC_PG3	0x20	/* IOC Pg 3 data is obsolete */
+						/* Args passed to writeSDP1: */
+#define MPT_SCSICFG_USE_NVRAM		0x01	/* WriteSDP1 using NVRAM */
+#define MPT_SCSICFG_ALL_IDS		0x02	/* WriteSDP1 to all IDS */
+/* #define MPT_SCSICFG_BLK_NEGO		0x10	   WriteSDP1 with WDTR and SDTR disabled */
+
+typedef	struct _ScsiCfgData {
+	u32		 PortFlags;
+	int		*nvram;			/* table of device NVRAM values */
+	IOCPage2_t	*pIocPg2;		/* table of Raid Volumes */
+	IOCPage3_t	*pIocPg3;		/* table of physical disks */
+	IOCPage4_t	*pIocPg4;		/* SEP devices addressing */
+	dma_addr_t	 IocPg4_dma;		/* Phys Addr of IOCPage4 data */
+	int		 IocPg4Sz;		/* IOCPage4 size */
+	u8		 dvStatus[MPT_MAX_SCSI_DEVICES];
+	int		 isRaid;		/* bit field, 1 if RAID */
+	u8		 minSyncFactor;		/* 0xFF if async */
+	u8		 maxSyncOffset;		/* 0 if async */
+	u8		 maxBusWidth;		/* 0 if narrow, 1 if wide */
+	u8		 busType;		/* SE, LVD, HD */
+	u8		 sdp1version;		/* SDP1 version */
+	u8		 sdp1length;		/* SDP1 length  */
+	u8		 sdp0version;		/* SDP0 version */
+	u8		 sdp0length;		/* SDP0 length  */
+	u8		 dvScheduled;		/* 1 if scheduled */
+	u8		 forceDv;		/* 1 to force DV scheduling */
+	u8		 noQas;			/* Disable QAS for this adapter */
+	u8		 Saf_Te;		/* 1 to force all Processors as SAF-TE if Inquiry data length is too short to check for SAF-TE */
+	u8		 rsvd[1];
+} ScsiCfgData;
+
+/*
+ *  Adapter Structure - pci_dev specific. Maximum: MPT_MAX_ADAPTERS
+ */
+typedef struct _MPT_ADAPTER
+{
+	int			 id;		/* Unique adapter id N {0,1,2,...} */
+	int			 pci_irq;	/* This irq           */
+	char			 name[MPT_NAME_LENGTH];	/* "iocN"             */
+	char			*prod_name;	/* "LSIFC9x9"         */
+	SYSIF_REGS __iomem	*chip;		/* == c8817000 (mmap) */
+	SYSIF_REGS __iomem	*pio_chip;	/* Programmed IO (downloadboot) */
+	u8			 bus_type;
+	u32			 mem_phys;	/* == f4020000 (mmap) */
+	u32			 pio_mem_phys;	/* Programmed IO (downloadboot) */
+	int			 mem_size;	/* mmap memory size */
+	int			 alloc_total;
+	u32			 last_state;
+	int			 active;
+	u8			*alloc;		/* frames alloc ptr */
+	dma_addr_t		 alloc_dma;
+	u32			 alloc_sz;
+	MPT_FRAME_HDR		*reply_frames;	/* Reply msg frames - rounded up! */
+	u32			 reply_frames_low_dma;
+	int			 reply_depth;	/* Num Allocated reply frames */
+	int			 reply_sz;	/* Reply frame size */
+	int			 num_chain;	/* Number of chain buffers */
+		/* Pool of buffers for chaining. ReqToChain
+		 * and ChainToChain track index of chain buffers.
+		 * ChainBuffer (DMA) virt/phys addresses.
+		 * FreeChainQ (lock) locking mechanisms.
+		 */
+	int			*ReqToChain;
+	int			*RequestNB;
+	int			*ChainToChain;
+	u8			*ChainBuffer;
+	dma_addr_t		 ChainBufferDMA;
+	struct list_head	 FreeChainQ;
+	spinlock_t		 FreeChainQlock;
+		/* We (host driver) get to manage our own RequestQueue! */
+	dma_addr_t		 req_frames_dma;
+	MPT_FRAME_HDR		*req_frames;	/* Request msg frames - rounded up! */
+	u32			 req_frames_low_dma;
+	int			 req_depth;	/* Number of request frames */
+	int			 req_sz;	/* Request frame size (bytes) */
+	spinlock_t		 FreeQlock;
+	struct list_head	 FreeQ;
+		/* Pool of SCSI sense buffers for commands coming from
+		 * the SCSI mid-layer.  We have one 256 byte sense buffer
+		 * for each REQ entry.
+		 */
+	u8			*sense_buf_pool;
+	dma_addr_t		 sense_buf_pool_dma;
+	u32			 sense_buf_low_dma;
+	int			 mtrr_reg;
+	struct pci_dev		*pcidev;	/* struct pci_dev pointer */
+	u8			__iomem *memmap;	/* mmap address */
+	struct Scsi_Host	*sh;		/* Scsi Host pointer */
+	ScsiCfgData		spi_data;	/* Scsi config. data */
+	MPT_IOCTL		*ioctl;		/* ioctl data pointer */
+	struct proc_dir_entry	*ioc_dentry;
+	struct _MPT_ADAPTER	*alt_ioc;	/* ptr to 929 bound adapter port */
+	spinlock_t		 diagLock;	/* diagnostic reset lock */
+	int			 diagPending;
+	u32			 biosVersion;	/* BIOS version from IO Unit Page 2 */
+	int			 eventTypes;	/* Event logging parameters */
+	int			 eventContext;	/* Next event context */
+	int			 eventLogSize;	/* Max number of cached events */
+	struct _mpt_ioctl_events *events;	/* pointer to event log */
+	u8			*cached_fw;	/* Pointer to FW */
+	dma_addr_t	 	cached_fw_dma;
+	struct list_head	 configQ;	/* linked list of config. requests */
+	int			 hs_reply_idx;
+#ifndef MFCNT
+	u32			 pad0;
+#else
+	u32			 mfcnt;
+#endif
+	u32			 NB_for_64_byte_frame;       
+	u32			 hs_req[MPT_MAX_FRAME_SIZE/sizeof(u32)];
+	u16			 hs_reply[MPT_MAX_FRAME_SIZE/sizeof(u16)];
+	IOCFactsReply_t		 facts;
+	PortFactsReply_t	 pfacts[2];
+	FCPortPage0_t		 fc_port_page0[2];
+	LANPage0_t		 lan_cnfg_page0;
+	LANPage1_t		 lan_cnfg_page1;
+	u8			 FirstWhoInit;
+	u8			 upload_fw;	/* If set, do a fw upload */
+	u8			 reload_fw;	/* Force a FW Reload on next reset */
+	u8			 NBShiftFactor;  /* NB Shift Factor based on Block Size (Facts)  */     
+	u8			 pad1[4];
+	struct list_head	 list; 
+	struct net_device	*netdev;
+} MPT_ADAPTER;
+
+/*
+ *  New return value convention:
+ *    1 = Ok to free associated request frame
+ *    0 = not Ok ...
+ */
+typedef int (*MPT_CALLBACK)(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply);
+typedef int (*MPT_EVHANDLER)(MPT_ADAPTER *ioc, EventNotificationReply_t *evReply);
+typedef int (*MPT_RESETHANDLER)(MPT_ADAPTER *ioc, int reset_phase);
+/* reset_phase defs */
+#define MPT_IOC_PRE_RESET		0
+#define MPT_IOC_POST_RESET		1
+#define MPT_IOC_SETUP_RESET		2
+
+/*
+ * Invent MPT host event (super-set of MPI Events)
+ * Fitted to 1030's 64-byte [max] request frame size
+ */
+typedef struct _MPT_HOST_EVENT {
+	EventNotificationReply_t	 MpiEvent;	/* 8 32-bit words! */
+	u32				 pad[6];
+	void				*next;
+} MPT_HOST_EVENT;
+
+#define MPT_HOSTEVENT_IOC_BRINGUP	0x91
+#define MPT_HOSTEVENT_IOC_RECOVER	0x92
+
+/* Define the generic types based on the size
+ * of the dma_addr_t type.
+ */
+typedef struct _mpt_sge {
+	u32		FlagsLength;
+	dma_addr_t	Address;
+} MptSge_t;
+
+#define mpt_addr_size() \
+	((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SGE_FLAGS_64_BIT_ADDRESSING : \
+		MPI_SGE_FLAGS_32_BIT_ADDRESSING)
+
+#define mpt_msg_flags() \
+	((sizeof(dma_addr_t) == sizeof(u64)) ? MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_64 : \
+		MPI_SCSIIO_MSGFLGS_SENSE_WIDTH_32)
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  Funky (private) macros...
+ */
+#ifdef MPT_DEBUG
+#define dprintk(x)  printk x
+#else
+#define dprintk(x)
+#endif
+
+#ifdef MPT_DEBUG_INIT
+#define dinitprintk(x)  printk x
+#define DBG_DUMP_FW_REQUEST_FRAME(mfp) \
+	{	int  i, n = 10;						\
+		u32 *m = (u32 *)(mfp);					\
+		printk(KERN_INFO " ");					\
+		for (i=0; i<n; i++)					\
+			printk(" %08x", le32_to_cpu(m[i]));		\
+		printk("\n");						\
+	}
+#else
+#define dinitprintk(x)
+#define DBG_DUMP_FW_REQUEST_FRAME(mfp)
+#endif
+
+#ifdef MPT_DEBUG_EXIT
+#define dexitprintk(x)  printk x
+#else
+#define dexitprintk(x)
+#endif
+
+#if defined MPT_DEBUG_FAIL || defined (MPT_DEBUG_SG)
+#define dfailprintk(x) printk x
+#else
+#define dfailprintk(x)
+#endif
+
+#ifdef MPT_DEBUG_HANDSHAKE
+#define dhsprintk(x)  printk x
+#else
+#define dhsprintk(x)
+#endif
+
+#ifdef MPT_DEBUG_EVENTS
+#define devtprintk(x)  printk x
+#else
+#define devtprintk(x)
+#endif
+
+#ifdef MPT_DEBUG_RESET
+#define drsprintk(x)  printk x
+#else
+#define drsprintk(x)
+#endif
+
+//#if defined(MPT_DEBUG) || defined(MPT_DEBUG_MSG_FRAME)
+#if defined(MPT_DEBUG_MSG_FRAME)
+#define dmfprintk(x)  printk x
+#define DBG_DUMP_REQUEST_FRAME(mfp) \
+	{	int  i, n = 24;						\
+		u32 *m = (u32 *)(mfp);					\
+		for (i=0; i<n; i++) {					\
+			if (i && ((i%8)==0))				\
+				printk("\n");				\
+			printk("%08x ", le32_to_cpu(m[i]));		\
+		}							\
+		printk("\n");						\
+	}
+#else
+#define dmfprintk(x)
+#define DBG_DUMP_REQUEST_FRAME(mfp)
+#endif
+
+#ifdef MPT_DEBUG_IRQ
+#define dirqprintk(x)  printk x
+#else
+#define dirqprintk(x)
+#endif
+
+#ifdef MPT_DEBUG_SG
+#define dsgprintk(x)  printk x
+#else
+#define dsgprintk(x)
+#endif
+
+#if defined(MPT_DEBUG_DL) || defined(MPT_DEBUG)
+#define ddlprintk(x)  printk x
+#else
+#define ddlprintk(x)
+#endif
+
+#ifdef MPT_DEBUG_DV
+#define ddvprintk(x)  printk x
+#else
+#define ddvprintk(x)
+#endif
+
+#ifdef MPT_DEBUG_NEGO
+#define dnegoprintk(x)  printk x
+#else
+#define dnegoprintk(x)
+#endif
+
+#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY)
+#define ddvtprintk(x)  printk x
+#else
+#define ddvtprintk(x)
+#endif
+
+#ifdef MPT_DEBUG_IOCTL
+#define dctlprintk(x) printk x
+#else
+#define dctlprintk(x)
+#endif
+
+#ifdef MPT_DEBUG_REPLY
+#define dreplyprintk(x) printk x
+#else
+#define dreplyprintk(x)
+#endif
+
+#ifdef MPT_DEBUG_TM
+#define dtmprintk(x) printk x
+#define DBG_DUMP_TM_REQUEST_FRAME(mfp) \
+	{	u32 *m = (u32 *)(mfp);					\
+		int  i, n = 13;						\
+		printk("TM_REQUEST:\n");				\
+		for (i=0; i<n; i++) {					\
+			if (i && ((i%8)==0))				\
+				printk("\n");				\
+			printk("%08x ", le32_to_cpu(m[i]));		\
+		}							\
+		printk("\n");						\
+	}
+#define DBG_DUMP_TM_REPLY_FRAME(mfp) \
+	{	u32 *m = (u32 *)(mfp);					\
+		int  i, n = (le32_to_cpu(m[0]) & 0x00FF0000) >> 16;	\
+		printk("TM_REPLY MessageLength=%d:\n", n);		\
+		for (i=0; i<n; i++) {					\
+			if (i && ((i%8)==0))				\
+				printk("\n");				\
+			printk(" %08x", le32_to_cpu(m[i]));		\
+		}							\
+		printk("\n");						\
+	}
+#else
+#define dtmprintk(x)
+#define DBG_DUMP_TM_REQUEST_FRAME(mfp)
+#define DBG_DUMP_TM_REPLY_FRAME(mfp)
+#endif
+
+#ifdef MPT_DEBUG_NEH
+#define nehprintk(x) printk x
+#else
+#define nehprintk(x)
+#endif
+
+#if defined(MPT_DEBUG_CONFIG) || defined(MPT_DEBUG)
+#define dcprintk(x) printk x
+#else
+#define dcprintk(x)
+#endif
+
+#if defined(MPT_DEBUG_SCSI) || defined(MPT_DEBUG) || defined(MPT_DEBUG_MSG_FRAME)
+#define dsprintk(x) printk x
+#else
+#define dsprintk(x)
+#endif
+
+
+#define MPT_INDEX_2_MFPTR(ioc,idx) \
+	(MPT_FRAME_HDR*)( (u8*)(ioc)->req_frames + (ioc)->req_sz * (idx) )
+
+#define MFPTR_2_MPT_INDEX(ioc,mf) \
+	(int)( ((u8*)mf - (u8*)(ioc)->req_frames) / (ioc)->req_sz )
+
+#define MPT_INDEX_2_RFPTR(ioc,idx) \
+	(MPT_FRAME_HDR*)( (u8*)(ioc)->reply_frames + (ioc)->req_sz * (idx) )
+
+#if defined(MPT_DEBUG) || defined(MPT_DEBUG_MSG_FRAME)
+#define DBG_DUMP_REPLY_FRAME(mfp) \
+	{	u32 *m = (u32 *)(mfp);					\
+		int  i, n = (le32_to_cpu(m[0]) & 0x00FF0000) >> 16;	\
+		printk(KERN_INFO " ");					\
+		for (i=0; i<n; i++)					\
+			printk(" %08x", le32_to_cpu(m[i]));		\
+		printk("\n");						\
+	}
+#define DBG_DUMP_REQUEST_FRAME_HDR(mfp) \
+	{	int  i, n = 3;						\
+		u32 *m = (u32 *)(mfp);					\
+		printk(KERN_INFO " ");					\
+		for (i=0; i<n; i++)					\
+			printk(" %08x", le32_to_cpu(m[i]));		\
+		printk("\n");						\
+	}
+#else
+#define DBG_DUMP_REPLY_FRAME(mfp)
+#define DBG_DUMP_REQUEST_FRAME_HDR(mfp)
+#endif
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#define SCSI_STD_SENSE_BYTES    18
+#define SCSI_STD_INQUIRY_BYTES  36
+#define SCSI_MAX_INQUIRY_BYTES  96
+
+/*
+ * MPT_SCSI_HOST defines - Used by the IOCTL and the SCSI drivers
+ * Private to the driver.
+ */
+/* LOCAL structure and fields used when processing
+ * internally generated commands. These include:
+ * bus scan, dv and config requests.
+ */
+typedef struct _MPT_LOCAL_REPLY {
+	ConfigPageHeader_t header;
+	int	completion;
+	u8	sense[SCSI_STD_SENSE_BYTES];
+	u8	scsiStatus;
+	u8	skip;
+	u32	pad;
+} MPT_LOCAL_REPLY;
+
+#define MPT_HOST_BUS_UNKNOWN		(0xFF)
+#define MPT_HOST_TOO_MANY_TM		(0x05)
+#define MPT_HOST_NVRAM_INVALID		(0xFFFFFFFF)
+#define MPT_HOST_NO_CHAIN		(0xFFFFFFFF)
+#define MPT_NVRAM_MASK_TIMEOUT		(0x000000FF)
+#define MPT_NVRAM_SYNC_MASK		(0x0000FF00)
+#define MPT_NVRAM_SYNC_SHIFT		(8)
+#define MPT_NVRAM_DISCONNECT_ENABLE	(0x00010000)
+#define MPT_NVRAM_ID_SCAN_ENABLE	(0x00020000)
+#define MPT_NVRAM_LUN_SCAN_ENABLE	(0x00040000)
+#define MPT_NVRAM_TAG_QUEUE_ENABLE	(0x00080000)
+#define MPT_NVRAM_WIDE_DISABLE		(0x00100000)
+#define MPT_NVRAM_BOOT_CHOICE		(0x00200000)
+
+/* The TM_STATE variable is used to provide strict single threading of TM
+ * requests as well as communicate TM error conditions.
+ */
+#define TM_STATE_NONE          (0)
+#define	TM_STATE_IN_PROGRESS   (1)
+#define	TM_STATE_ERROR	       (2)
+
+typedef enum {
+	FC,
+	SCSI,
+	SAS
+} BUS_TYPE;
+
+typedef struct _MPT_SCSI_HOST {
+	MPT_ADAPTER		 *ioc;
+	int			  port;
+	u32			  pad0;
+	struct scsi_cmnd	**ScsiLookup;
+	VirtDevice		**Targets;
+	MPT_LOCAL_REPLY		 *pLocal;		/* used for internal commands */
+	struct timer_list	  timer;
+		/* Pool of memory for holding SCpnts before doing
+		 * OS callbacks. freeQ is the free pool.
+		 */
+	u8			  tmPending;
+	u8			  resetPending;
+	u8			  negoNvram;		/* DV disabled, nego NVRAM */
+	u8			  pad1;
+	u8                        tmState;
+	u8			  rsvd[2];
+	MPT_FRAME_HDR		 *cmdPtr;		/* Ptr to nonOS request */
+	struct scsi_cmnd	 *abortSCpnt;
+	MPT_LOCAL_REPLY		  localReply;		/* internal cmd reply struct */
+	unsigned long		  hard_resets;		/* driver forced bus resets count */
+	unsigned long		  soft_resets;		/* fw/external bus resets count */
+	unsigned long		  timeouts;		/* cmd timeouts */
+	ushort			  sel_timeout[MPT_MAX_FC_DEVICES];
+} MPT_SCSI_HOST;
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	More Dynamic Multi-Pathing stuff...
+ */
+
+/* Forward decl, a strange C thing, to prevent gcc compiler warnings */
+struct scsi_cmnd;
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Generic structure passed to the base mpt_config function.
+ */
+typedef struct _x_config_parms {
+	struct list_head	 linkage;	/* linked list */
+	struct timer_list	 timer;		/* timer function for this request  */
+	ConfigPageHeader_t	*hdr;
+	dma_addr_t		 physAddr;
+	int			 wait_done;	/* wait for this request */
+	u32			 pageAddr;	/* properly formatted */
+	u8			 action;
+	u8			 dir;
+	u8			 timeout;	/* seconds */
+	u8			 pad1;
+	u16			 status;
+	u16			 pad2;
+} CONFIGPARMS;
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  Public entry points...
+ */
+extern int	 mpt_register(MPT_CALLBACK cbfunc, MPT_DRIVER_CLASS dclass);
+extern void	 mpt_deregister(int cb_idx);
+extern int	 mpt_event_register(int cb_idx, MPT_EVHANDLER ev_cbfunc);
+extern void	 mpt_event_deregister(int cb_idx);
+extern int	 mpt_reset_register(int cb_idx, MPT_RESETHANDLER reset_func);
+extern void	 mpt_reset_deregister(int cb_idx);
+extern int	 mpt_device_driver_register(struct mpt_pci_driver * dd_cbfunc, int cb_idx);
+extern void	 mpt_device_driver_deregister(int cb_idx);
+extern MPT_FRAME_HDR	*mpt_get_msg_frame(int handle, MPT_ADAPTER *ioc);
+extern void	 mpt_free_msg_frame(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
+extern void	 mpt_put_msg_frame(int handle, MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf);
+extern void	 mpt_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr);
+
+extern int	 mpt_send_handshake_request(int handle, MPT_ADAPTER *ioc, int reqBytes, u32 *req, int sleepFlag);
+extern int	 mpt_verify_adapter(int iocid, MPT_ADAPTER **iocpp);
+extern u32	 mpt_GetIocState(MPT_ADAPTER *ioc, int cooked);
+extern void	 mpt_print_ioc_summary(MPT_ADAPTER *ioc, char *buf, int *size, int len, int showlan);
+extern int	 mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag);
+extern int	 mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
+extern int	 mpt_toolbox(MPT_ADAPTER *ioc, CONFIGPARMS *cfg);
+extern void	 mpt_alloc_fw_memory(MPT_ADAPTER *ioc, int size);
+extern void	 mpt_free_fw_memory(MPT_ADAPTER *ioc);
+extern int	 mpt_findImVolumes(MPT_ADAPTER *ioc);
+extern int	 mpt_read_ioc_pg_3(MPT_ADAPTER *ioc);
+
+/*
+ *  Public data decl's...
+ */
+extern struct list_head	  ioc_list;
+extern struct proc_dir_entry	*mpt_proc_root_dir;
+
+extern int		  mpt_lan_index;	/* needed by mptlan.c */
+extern int		  mpt_stm_index;	/* needed by mptstm.c */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#endif		/* } __KERNEL__ */
+
+#if defined(__alpha__) || defined(__sparc_v9__) || defined(__ia64__) || defined(__x86_64__)
+#define CAST_U32_TO_PTR(x)	((void *)(u64)x)
+#define CAST_PTR_TO_U32(x)	((u32)(u64)x)
+#else
+#define CAST_U32_TO_PTR(x)	((void *)x)
+#define CAST_PTR_TO_U32(x)	((u32)x)
+#endif
+
+#define MPT_PROTOCOL_FLAGS_c_c_c_c(pflags) \
+	((pflags) & MPI_PORTFACTS_PROTOCOL_INITIATOR)	? 'I' : 'i',	\
+	((pflags) & MPI_PORTFACTS_PROTOCOL_TARGET)	? 'T' : 't',	\
+	((pflags) & MPI_PORTFACTS_PROTOCOL_LAN)		? 'L' : 'l',	\
+	((pflags) & MPI_PORTFACTS_PROTOCOL_LOGBUSADDR)	? 'B' : 'b'
+
+/*
+ *  Shifted SGE Defines - Use in SGE with FlagsLength member.
+ *  Otherwise, use MPI_xxx defines (refer to "lsi/mpi.h" header).
+ *  Defaults: 32 bit SGE, SYSTEM_ADDRESS if direction bit is 0, read
+ */
+#define MPT_TRANSFER_IOC_TO_HOST		(0x00000000)
+#define MPT_TRANSFER_HOST_TO_IOC		(0x04000000)
+#define MPT_SGE_FLAGS_LAST_ELEMENT		(0x80000000)
+#define MPT_SGE_FLAGS_END_OF_BUFFER		(0x40000000)
+#define MPT_SGE_FLAGS_LOCAL_ADDRESS		(0x08000000)
+#define MPT_SGE_FLAGS_DIRECTION			(0x04000000)
+#define MPT_SGE_FLAGS_ADDRESSING		(mpt_addr_size() << MPI_SGE_FLAGS_SHIFT)
+#define MPT_SGE_FLAGS_END_OF_LIST		(0x01000000)
+
+#define MPT_SGE_FLAGS_TRANSACTION_ELEMENT	(0x00000000)
+#define MPT_SGE_FLAGS_SIMPLE_ELEMENT		(0x10000000)
+#define MPT_SGE_FLAGS_CHAIN_ELEMENT		(0x30000000)
+#define MPT_SGE_FLAGS_ELEMENT_MASK		(0x30000000)
+
+#define MPT_SGE_FLAGS_SSIMPLE_READ \
+	(MPT_SGE_FLAGS_LAST_ELEMENT |	\
+	 MPT_SGE_FLAGS_END_OF_BUFFER |	\
+	 MPT_SGE_FLAGS_END_OF_LIST |	\
+	 MPT_SGE_FLAGS_SIMPLE_ELEMENT |	\
+	 MPT_SGE_FLAGS_ADDRESSING | \
+	 MPT_TRANSFER_IOC_TO_HOST)
+#define MPT_SGE_FLAGS_SSIMPLE_WRITE \
+	(MPT_SGE_FLAGS_LAST_ELEMENT |	\
+	 MPT_SGE_FLAGS_END_OF_BUFFER |	\
+	 MPT_SGE_FLAGS_END_OF_LIST |	\
+	 MPT_SGE_FLAGS_SIMPLE_ELEMENT |	\
+	 MPT_SGE_FLAGS_ADDRESSING | \
+	 MPT_TRANSFER_HOST_TO_IOC)
+
+/*}-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#endif
+
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
new file mode 100644
index 0000000..70b0cfb
--- /dev/null
+++ b/drivers/message/fusion/mptctl.c
@@ -0,0 +1,2878 @@
+/*
+ *  linux/drivers/message/fusion/mptctl.c
+ *      Fusion MPT misc device (ioctl) driver.
+ *      For use with PCI chip/adapter(s):
+ *          LSIFC9xx/LSI409xx Fibre Channel
+ *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
+ *
+ *  Credits:
+ *      This driver would not exist if not for Alan Cox's development
+ *      of the linux i2o driver.
+ *
+ *      A special thanks to Pamela Delaney (LSI Logic) for tons of work
+ *      and countless enhancements while adding support for the 1030
+ *      chip family.  Pam has been instrumental in the development of
+ *      of the 2.xx.xx series fusion drivers, and her contributions are
+ *      far too numerous to hope to list in one place.
+ *
+ *      A huge debt of gratitude is owed to David S. Miller (DaveM)
+ *      for fixing much of the stupid and broken stuff in the early
+ *      driver while porting to sparc64 platform.  THANK YOU!
+ *
+ *      A big THANKS to Eddie C. Dost for fixing the ioctl path
+ *      and most importantly f/w download on sparc64 platform!
+ *      (plus Eddie's other helpful hints and insights)
+ *
+ *      Thanks to Arnaldo Carvalho de Melo for finding and patching
+ *      a potential memory leak in mptctl_do_fw_download(),
+ *      and for some kmalloc insight:-)
+ *
+ *      (see also mptbase.c)
+ *
+ *  Copyright (c) 1999-2004 LSI Logic Corporation
+ *  Originally By: Steven J. Ralston, Noah Romer
+ *  (mailto:sjralston1@netscape.net)
+ *  (mailto:mpt_linux_developer@lsil.com)
+ *
+ *  $Id: mptctl.c,v 1.63 2002/12/03 21:26:33 pdelaney Exp $
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; version 2 of the License.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    NO WARRANTY
+    THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+    CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+    LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+    solely responsible for determining the appropriateness of using and
+    distributing the Program and assumes all risks associated with its
+    exercise of rights under this Agreement, including but not limited to
+    the risks and costs of program errors, damage to or loss of data,
+    programs or equipment, and unavailability or interruption of operations.
+
+    DISCLAIMER OF LIABILITY
+    NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+    ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+    TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+    USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+    HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>	/* for mdelay */
+#include <linux/miscdevice.h>
+#include <linux/smp_lock.h>
+#include <linux/compat.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+
+#define COPYRIGHT	"Copyright (c) 1999-2004 LSI Logic Corporation"
+#define MODULEAUTHOR	"Steven J. Ralston, Noah Romer, Pamela Delaney"
+#include "mptbase.h"
+#include "mptctl.h"
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#define my_NAME		"Fusion MPT misc device (ioctl) driver"
+#define my_VERSION	MPT_LINUX_VERSION_COMMON
+#define MYNAM		"mptctl"
+
+MODULE_AUTHOR(MODULEAUTHOR);
+MODULE_DESCRIPTION(my_NAME);
+MODULE_LICENSE("GPL");
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+static int mptctl_id = -1;
+
+static DECLARE_WAIT_QUEUE_HEAD ( mptctl_wait );
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+struct buflist {
+	u8	*kptr;
+	int	 len;
+};
+
+/*
+ * Function prototypes. Called from OS entry point mptctl_ioctl.
+ * arg contents specific to function.
+ */
+static int mptctl_fw_download(unsigned long arg);
+static int mptctl_getiocinfo (unsigned long arg, unsigned int cmd);
+static int mptctl_gettargetinfo (unsigned long arg);
+static int mptctl_readtest (unsigned long arg);
+static int mptctl_mpt_command (unsigned long arg);
+static int mptctl_eventquery (unsigned long arg);
+static int mptctl_eventenable (unsigned long arg);
+static int mptctl_eventreport (unsigned long arg);
+static int mptctl_replace_fw (unsigned long arg);
+
+static int mptctl_do_reset(unsigned long arg);
+static int mptctl_hp_hostinfo(unsigned long arg, unsigned int cmd);
+static int mptctl_hp_targetinfo(unsigned long arg);
+
+static int  mptctl_probe(struct pci_dev *, const struct pci_device_id *);
+static void mptctl_remove(struct pci_dev *);
+
+#ifdef CONFIG_COMPAT
+static long compat_mpctl_ioctl(struct file *f, unsigned cmd, unsigned long arg);
+#endif
+/*
+ * Private function calls.
+ */
+static int mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr);
+static int mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen);
+static MptSge_t *kbuf_alloc_2_sgl( int bytes, u32 dir, int sge_offset, int *frags,
+		struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc);
+static void kfree_sgl( MptSge_t *sgl, dma_addr_t sgl_dma,
+		struct buflist *buflist, MPT_ADAPTER *ioc);
+static void mptctl_timeout_expired (MPT_IOCTL *ioctl);
+static int  mptctl_bus_reset(MPT_IOCTL *ioctl);
+static int mptctl_set_tm_flags(MPT_SCSI_HOST *hd);
+static void mptctl_free_tm_flags(MPT_ADAPTER *ioc);
+
+/*
+ * Reset Handler cleanup function
+ */
+static int  mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Scatter gather list (SGL) sizes and limits...
+ */
+//#define MAX_SCSI_FRAGS	9
+#define MAX_FRAGS_SPILL1	9
+#define MAX_FRAGS_SPILL2	15
+#define FRAGS_PER_BUCKET	(MAX_FRAGS_SPILL2 + 1)
+
+//#define MAX_CHAIN_FRAGS	64
+//#define MAX_CHAIN_FRAGS	(15+15+15+16)
+#define MAX_CHAIN_FRAGS		(4 * MAX_FRAGS_SPILL2 + 1)
+
+//  Define max sg LIST bytes ( == (#frags + #chains) * 8 bytes each)
+//  Works out to: 592d bytes!     (9+1)*8 + 4*(15+1)*8
+//                  ^----------------- 80 + 512
+#define MAX_SGL_BYTES		((MAX_FRAGS_SPILL1 + 1 + (4 * FRAGS_PER_BUCKET)) * 8)
+
+/* linux only seems to ever give 128kB MAX contiguous (GFP_USER) mem bytes */
+#define MAX_KMALLOC_SZ		(128*1024)
+
+#define MPT_IOCTL_DEFAULT_TIMEOUT 10	/* Default timeout value (seconds) */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptctl_syscall_down - Down the MPT adapter syscall semaphore.
+ *	@ioc: Pointer to MPT adapter
+ *	@nonblock: boolean, non-zero if O_NONBLOCK is set
+ *
+ *	All of the ioctl commands can potentially sleep, which is illegal
+ *	with a spinlock held, thus we perform mutual exclusion here.
+ *
+ *	Returns negative errno on error, or zero for success.
+ */
+static inline int
+mptctl_syscall_down(MPT_ADAPTER *ioc, int nonblock)
+{
+	int rc = 0;
+	dctlprintk((KERN_INFO MYNAM "::mptctl_syscall_down(%p,%d) called\n", ioc, nonblock));
+
+	if (nonblock) {
+		if (down_trylock(&ioc->ioctl->sem_ioc))
+			rc = -EAGAIN;
+	} else {
+		if (down_interruptible(&ioc->ioctl->sem_ioc))
+			rc = -ERESTARTSYS;
+	}
+	dctlprintk((KERN_INFO MYNAM "::mptctl_syscall_down return %d\n", rc));
+	return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  This is the callback for any message we have posted. The message itself
+ *  will be returned to the message pool when we return from the IRQ
+ *
+ *  This runs in irq context so be short and sweet.
+ */
+static int
+mptctl_reply(MPT_ADAPTER *ioc, MPT_FRAME_HDR *req, MPT_FRAME_HDR *reply)
+{
+	char *sense_data;
+	int sz, req_index;
+	u16 iocStatus;
+	u8 cmd;
+
+	dctlprintk(("mptctl_reply()!\n"));
+	if (req)
+		 cmd = req->u.hdr.Function;
+	else
+		return 1;
+
+	if (ioc->ioctl) {
+
+		if (reply==NULL) {
+
+			dctlprintk(("mptctl_reply() NULL Reply "
+				"Function=%x!\n", cmd));
+
+			ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
+			ioc->ioctl->reset &= ~MPTCTL_RESET_OK;
+
+			/* We are done, issue wake up
+	 		*/
+			ioc->ioctl->wait_done = 1;
+			wake_up (&mptctl_wait);
+			return 1;
+
+		}
+
+		dctlprintk(("mptctl_reply() with req=%p "
+			"reply=%p Function=%x!\n", req, reply, cmd));
+
+		/* Copy the reply frame (which much exist
+		 * for non-SCSI I/O) to the IOC structure.
+		 */
+		dctlprintk(("Copying Reply Frame @%p to ioc%d!\n",
+			reply, ioc->id));
+		memcpy(ioc->ioctl->ReplyFrame, reply,
+			min(ioc->reply_sz, 4*reply->u.reply.MsgLength));
+		ioc->ioctl->status |= MPT_IOCTL_STATUS_RF_VALID;
+
+		/* Set the command status to GOOD if IOC Status is GOOD
+		 * OR if SCSI I/O cmd and data underrun or recovered error.
+		 */
+		iocStatus = reply->u.reply.IOCStatus & MPI_IOCSTATUS_MASK;
+		if (iocStatus  == MPI_IOCSTATUS_SUCCESS)
+			ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
+
+		if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) ||
+			(cmd == MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH)) {
+			ioc->ioctl->reset &= ~MPTCTL_RESET_OK;
+
+			if ((iocStatus == MPI_IOCSTATUS_SCSI_DATA_UNDERRUN) ||
+			(iocStatus == MPI_IOCSTATUS_SCSI_RECOVERED_ERROR)) {
+			ioc->ioctl->status |= MPT_IOCTL_STATUS_COMMAND_GOOD;
+			}
+		}
+
+		/* Copy the sense data - if present
+		 */
+		if ((cmd == MPI_FUNCTION_SCSI_IO_REQUEST) &&
+			(reply->u.sreply.SCSIState &
+			 MPI_SCSI_STATE_AUTOSENSE_VALID)){
+			sz = req->u.scsireq.SenseBufferLength;
+			req_index =
+			    le16_to_cpu(req->u.frame.hwhdr.msgctxu.fld.req_idx);
+			sense_data =
+			    ((u8 *)ioc->sense_buf_pool +
+			     (req_index * MPT_SENSE_BUFFER_ALLOC));
+			memcpy(ioc->ioctl->sense, sense_data, sz);
+			ioc->ioctl->status |= MPT_IOCTL_STATUS_SENSE_VALID;
+		}
+
+		if (cmd == MPI_FUNCTION_SCSI_TASK_MGMT)
+			mptctl_free_tm_flags(ioc);
+
+		/* We are done, issue wake up
+		 */
+		ioc->ioctl->wait_done = 1;
+		wake_up (&mptctl_wait);
+	}
+	return 1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* mptctl_timeout_expired
+ *
+ * Expecting an interrupt, however timed out.
+ *
+ */
+static void mptctl_timeout_expired (MPT_IOCTL *ioctl)
+{
+	int rc = 1;
+
+	dctlprintk((KERN_NOTICE MYNAM ": Timeout Expired! Host %d\n",
+				ioctl->ioc->id));
+	if (ioctl == NULL)
+		return;
+
+	ioctl->wait_done = 0;
+	if (ioctl->reset & MPTCTL_RESET_OK)
+		rc = mptctl_bus_reset(ioctl);
+
+	if (rc) {
+		/* Issue a reset for this device.
+		 * The IOC is not responding.
+		 */
+		dctlprintk((MYIOC_s_INFO_FMT "Calling HardReset! \n",
+			 ioctl->ioc->name));
+		mpt_HardResetHandler(ioctl->ioc, NO_SLEEP);
+	}
+	return;
+
+}
+
+/* mptctl_bus_reset
+ *
+ * Bus reset code.
+ *
+ */
+static int mptctl_bus_reset(MPT_IOCTL *ioctl)
+{
+	MPT_FRAME_HDR	*mf;
+	SCSITaskMgmt_t	*pScsiTm;
+	MPT_SCSI_HOST	*hd;
+	int		 ii;
+	int		 retval;
+
+
+	ioctl->reset &= ~MPTCTL_RESET_OK;
+
+	if (ioctl->ioc->sh == NULL)
+		return -EPERM;
+
+	hd = (MPT_SCSI_HOST *) ioctl->ioc->sh->hostdata;
+	if (hd == NULL)
+		return -EPERM;
+
+	/* Single threading ....
+	 */
+	if (mptctl_set_tm_flags(hd) != 0)
+		return -EPERM;
+
+	/* Send request
+	 */
+	if ((mf = mpt_get_msg_frame(mptctl_id, ioctl->ioc)) == NULL) {
+		dctlprintk((MYIOC_s_WARN_FMT "IssueTaskMgmt, no msg frames!!\n",
+				ioctl->ioc->name));
+
+		mptctl_free_tm_flags(ioctl->ioc);
+		return -ENOMEM;
+	}
+
+	dtmprintk((MYIOC_s_INFO_FMT "IssueTaskMgmt request @ %p\n",
+			ioctl->ioc->name, mf));
+
+	pScsiTm = (SCSITaskMgmt_t *) mf;
+	pScsiTm->TargetID = ioctl->target;
+	pScsiTm->Bus = hd->port;	/* 0 */
+	pScsiTm->ChainOffset = 0;
+	pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
+	pScsiTm->Reserved = 0;
+	pScsiTm->TaskType = MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS;
+	pScsiTm->Reserved1 = 0;
+	pScsiTm->MsgFlags = MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION;
+
+	for (ii= 0; ii < 8; ii++)
+		pScsiTm->LUN[ii] = 0;
+
+	for (ii=0; ii < 7; ii++)
+		pScsiTm->Reserved2[ii] = 0;
+
+	pScsiTm->TaskMsgContext = 0;
+	dtmprintk((MYIOC_s_INFO_FMT
+		"mptctl_bus_reset: issued.\n", ioctl->ioc->name));
+
+	DBG_DUMP_TM_REQUEST_FRAME((u32 *)mf);
+
+	ioctl->wait_done=0;
+	if ((retval = mpt_send_handshake_request(mptctl_id, ioctl->ioc,
+	     sizeof(SCSITaskMgmt_t), (u32*)pScsiTm, CAN_SLEEP)) != 0) {
+		dfailprintk((MYIOC_s_ERR_FMT "_send_handshake FAILED!"
+			" (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd,
+			hd->ioc, mf));
+		goto mptctl_bus_reset_done;
+	}
+
+	/* Now wait for the command to complete */
+	ii = wait_event_interruptible_timeout(mptctl_wait,
+	     ioctl->wait_done == 1,
+	     HZ*5 /* 5 second timeout */);
+
+	if(ii <=0 && (ioctl->wait_done != 1 ))  {
+		ioctl->wait_done = 0;
+		retval = -1; /* return failure */
+	}
+
+mptctl_bus_reset_done:
+
+	mpt_free_msg_frame(hd->ioc, mf);
+	mptctl_free_tm_flags(ioctl->ioc);
+	return retval;
+}
+
+static int
+mptctl_set_tm_flags(MPT_SCSI_HOST *hd) {
+	unsigned long flags;
+
+	spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
+
+	if (hd->tmState == TM_STATE_NONE) {
+		hd->tmState = TM_STATE_IN_PROGRESS;
+		hd->tmPending = 1;
+		spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
+	} else {
+		spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
+		return -EBUSY;
+	}
+
+	return 0;
+}
+
+static void
+mptctl_free_tm_flags(MPT_ADAPTER *ioc)
+{
+	MPT_SCSI_HOST * hd;
+	unsigned long flags;
+
+	hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
+	if (hd == NULL)
+		return;
+
+	spin_lock_irqsave(&ioc->FreeQlock, flags);
+
+	hd->tmState = TM_STATE_NONE;
+	hd->tmPending = 0;
+	spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+	return;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* mptctl_ioc_reset
+ *
+ * Clean-up functionality. Used only if there has been a
+ * reload of the FW due.
+ *
+ */
+static int
+mptctl_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
+{
+	MPT_IOCTL *ioctl = ioc->ioctl;
+	dctlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to IOCTL driver!\n",
+		reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
+		reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
+
+	if(ioctl == NULL)
+		return 1;
+
+	switch(reset_phase) {
+	case MPT_IOC_SETUP_RESET:
+		ioctl->status |= MPT_IOCTL_STATUS_DID_IOCRESET;
+		break;
+	case MPT_IOC_POST_RESET:
+		ioctl->status &= ~MPT_IOCTL_STATUS_DID_IOCRESET;
+		break;
+	case MPT_IOC_PRE_RESET:
+	default:
+		break;
+	}
+
+	return 1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  MPT ioctl handler
+ *  cmd - specify the particular IOCTL command to be issued
+ *  arg - data specific to the command. Must not be null.
+ */
+static long
+__mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	mpt_ioctl_header __user *uhdr = (void __user *) arg;
+	mpt_ioctl_header	 khdr;
+	int iocnum;
+	unsigned iocnumX;
+	int nonblock = (file->f_flags & O_NONBLOCK);
+	int ret;
+	MPT_ADAPTER *iocp = NULL;
+
+	dctlprintk(("mptctl_ioctl() called\n"));
+
+	if (copy_from_user(&khdr, uhdr, sizeof(khdr))) {
+		printk(KERN_ERR "%s::mptctl_ioctl() @%d - "
+				"Unable to copy mpt_ioctl_header data @ %p\n",
+				__FILE__, __LINE__, uhdr);
+		return -EFAULT;
+	}
+	ret = -ENXIO;				/* (-6) No such device or address */
+
+	/* Verify intended MPT adapter - set iocnum and the adapter
+	 * pointer (iocp)
+	 */
+	iocnumX = khdr.iocnum & 0xFF;
+	if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
+	    (iocp == NULL)) {
+		dctlprintk((KERN_ERR "%s::mptctl_ioctl() @%d - ioc%d not found!\n",
+				__FILE__, __LINE__, iocnumX));
+		return -ENODEV;
+	}
+
+	if (!iocp->active) {
+		printk(KERN_ERR "%s::mptctl_ioctl() @%d - Controller disabled.\n",
+				__FILE__, __LINE__);
+		return -EFAULT;
+	}
+
+	/* Handle those commands that are just returning
+	 * information stored in the driver.
+	 * These commands should never time out and are unaffected
+	 * by TM and FW reloads.
+	 */
+	if ((cmd & ~IOCSIZE_MASK) == (MPTIOCINFO & ~IOCSIZE_MASK)) {
+		return mptctl_getiocinfo(arg, _IOC_SIZE(cmd));
+	} else if (cmd == MPTTARGETINFO) {
+		return mptctl_gettargetinfo(arg);
+	} else if (cmd == MPTTEST) {
+		return mptctl_readtest(arg);
+	} else if (cmd == MPTEVENTQUERY) {
+		return mptctl_eventquery(arg);
+	} else if (cmd == MPTEVENTENABLE) {
+		return mptctl_eventenable(arg);
+	} else if (cmd == MPTEVENTREPORT) {
+		return mptctl_eventreport(arg);
+	} else if (cmd == MPTFWREPLACE) {
+		return mptctl_replace_fw(arg);
+	}
+
+	/* All of these commands require an interrupt or
+	 * are unknown/illegal.
+	 */
+	if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0)
+		return ret;
+
+	dctlprintk((MYIOC_s_INFO_FMT ": mptctl_ioctl()\n", iocp->name));
+
+	if (cmd == MPTFWDOWNLOAD)
+		ret = mptctl_fw_download(arg);
+	else if (cmd == MPTCOMMAND)
+		ret = mptctl_mpt_command(arg);
+	else if (cmd == MPTHARDRESET)
+		ret = mptctl_do_reset(arg);
+	else if ((cmd & ~IOCSIZE_MASK) == (HP_GETHOSTINFO & ~IOCSIZE_MASK))
+		ret = mptctl_hp_hostinfo(arg, _IOC_SIZE(cmd));
+	else if (cmd == HP_GETTARGETINFO)
+		ret = mptctl_hp_targetinfo(arg);
+	else
+		ret = -EINVAL;
+
+	up(&iocp->ioctl->sem_ioc);
+
+	return ret;
+}
+
+static long
+mptctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	long ret;
+	lock_kernel();
+	ret = __mptctl_ioctl(file, cmd, arg);
+	unlock_kernel();
+	return ret;
+}
+
+static int mptctl_do_reset(unsigned long arg)
+{
+	struct mpt_ioctl_diag_reset __user *urinfo = (void __user *) arg;
+	struct mpt_ioctl_diag_reset krinfo;
+	MPT_ADAPTER		*iocp;
+
+	dctlprintk((KERN_INFO "mptctl_do_reset called.\n"));
+
+	if (copy_from_user(&krinfo, urinfo, sizeof(struct mpt_ioctl_diag_reset))) {
+		printk(KERN_ERR "%s@%d::mptctl_do_reset - "
+				"Unable to copy mpt_ioctl_diag_reset struct @ %p\n",
+				__FILE__, __LINE__, urinfo);
+		return -EFAULT;
+	}
+
+	if (mpt_verify_adapter(krinfo.hdr.iocnum, &iocp) < 0) {
+		dctlprintk((KERN_ERR "%s@%d::mptctl_do_reset - ioc%d not found!\n",
+				__FILE__, __LINE__, krinfo.hdr.iocnum));
+		return -ENODEV; /* (-6) No such device or address */
+	}
+
+	if (mpt_HardResetHandler(iocp, CAN_SLEEP) != 0) {
+		printk (KERN_ERR "%s@%d::mptctl_do_reset - reset failed.\n",
+			__FILE__, __LINE__);
+		return -1;
+	}
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * MPT FW download function.  Cast the arg into the mpt_fw_xfer structure.
+ * This structure contains: iocnum, firmware length (bytes),
+ *      pointer to user space memory where the fw image is stored.
+ *
+ * Outputs:	None.
+ * Return:	0 if successful
+ *		-EFAULT if data unavailable
+ *		-ENXIO  if no such device
+ *		-EAGAIN if resource problem
+ *		-ENOMEM if no memory for SGE
+ *		-EMLINK if too many chain buffers required
+ *		-EBADRQC if adapter does not support FW download
+ *		-EBUSY if adapter is busy
+ *		-ENOMSG if FW upload returned bad status
+ */
+static int
+mptctl_fw_download(unsigned long arg)
+{
+	struct mpt_fw_xfer __user *ufwdl = (void __user *) arg;
+	struct mpt_fw_xfer	 kfwdl;
+
+	dctlprintk((KERN_INFO "mptctl_fwdl called. mptctl_id = %xh\n", mptctl_id)); //tc
+	if (copy_from_user(&kfwdl, ufwdl, sizeof(struct mpt_fw_xfer))) {
+		printk(KERN_ERR "%s@%d::_ioctl_fwdl - "
+				"Unable to copy mpt_fw_xfer struct @ %p\n",
+				__FILE__, __LINE__, ufwdl);
+		return -EFAULT;
+	}
+
+	return mptctl_do_fw_download(kfwdl.iocnum, kfwdl.bufp, kfwdl.fwlen);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * FW Download engine.
+ * Outputs:	None.
+ * Return:	0 if successful
+ *		-EFAULT if data unavailable
+ *		-ENXIO  if no such device
+ *		-EAGAIN if resource problem
+ *		-ENOMEM if no memory for SGE
+ *		-EMLINK if too many chain buffers required
+ *		-EBADRQC if adapter does not support FW download
+ *		-EBUSY if adapter is busy
+ *		-ENOMSG if FW upload returned bad status
+ */
+static int
+mptctl_do_fw_download(int ioc, char __user *ufwbuf, size_t fwlen)
+{
+	FWDownload_t		*dlmsg;
+	MPT_FRAME_HDR		*mf;
+	MPT_ADAPTER		*iocp;
+	FWDownloadTCSGE_t	*ptsge;
+	MptSge_t		*sgl, *sgIn;
+	char			*sgOut;
+	struct buflist		*buflist;
+	struct buflist		*bl;
+	dma_addr_t		 sgl_dma;
+	int			 ret;
+	int			 numfrags = 0;
+	int			 maxfrags;
+	int			 n = 0;
+	u32			 sgdir;
+	u32			 nib;
+	int			 fw_bytes_copied = 0;
+	int			 i;
+	int			 sge_offset = 0;
+	u16			 iocstat;
+	pFWDownloadReply_t	 ReplyMsg = NULL;
+
+	dctlprintk((KERN_INFO "mptctl_do_fwdl called. mptctl_id = %xh.\n", mptctl_id));
+
+	dctlprintk((KERN_INFO "DbG: kfwdl.bufp  = %p\n", ufwbuf));
+	dctlprintk((KERN_INFO "DbG: kfwdl.fwlen = %d\n", (int)fwlen));
+	dctlprintk((KERN_INFO "DbG: kfwdl.ioc   = %04xh\n", ioc));
+
+	if ((ioc = mpt_verify_adapter(ioc, &iocp)) < 0) {
+		dctlprintk(("%s@%d::_ioctl_fwdl - ioc%d not found!\n",
+				__FILE__, __LINE__, ioc));
+		return -ENODEV; /* (-6) No such device or address */
+	}
+
+	/*  Valid device. Get a message frame and construct the FW download message.
+	 */
+	if ((mf = mpt_get_msg_frame(mptctl_id, iocp)) == NULL)
+		return -EAGAIN;
+	dlmsg = (FWDownload_t*) mf;
+	ptsge = (FWDownloadTCSGE_t *) &dlmsg->SGL;
+	sgOut = (char *) (ptsge + 1);
+
+	/*
+	 * Construct f/w download request
+	 */
+	dlmsg->ImageType = MPI_FW_DOWNLOAD_ITYPE_FW;
+	dlmsg->Reserved = 0;
+	dlmsg->ChainOffset = 0;
+	dlmsg->Function = MPI_FUNCTION_FW_DOWNLOAD;
+	dlmsg->Reserved1[0] = dlmsg->Reserved1[1] = dlmsg->Reserved1[2] = 0;
+	dlmsg->MsgFlags = 0;
+
+	/* Set up the Transaction SGE.
+	 */
+	ptsge->Reserved = 0;
+	ptsge->ContextSize = 0;
+	ptsge->DetailsLength = 12;
+	ptsge->Flags = MPI_SGE_FLAGS_TRANSACTION_ELEMENT;
+	ptsge->Reserved_0100_Checksum = 0;
+	ptsge->ImageOffset = 0;
+	ptsge->ImageSize = cpu_to_le32(fwlen);
+
+	/* Add the SGL
+	 */
+
+	/*
+	 * Need to kmalloc area(s) for holding firmware image bytes.
+	 * But we need to do it piece meal, using a proper
+	 * scatter gather list (with 128kB MAX hunks).
+	 *
+	 * A practical limit here might be # of sg hunks that fit into
+	 * a single IOC request frame; 12 or 8 (see below), so:
+	 * For FC9xx: 12 x 128kB == 1.5 mB (max)
+	 * For C1030:  8 x 128kB == 1   mB (max)
+	 * We could support chaining, but things get ugly(ier:)
+	 *
+	 * Set the sge_offset to the start of the sgl (bytes).
+	 */
+	sgdir = 0x04000000;		/* IOC will READ from sys mem */
+	sge_offset = sizeof(MPIHeader_t) + sizeof(FWDownloadTCSGE_t);
+	if ((sgl = kbuf_alloc_2_sgl(fwlen, sgdir, sge_offset,
+				    &numfrags, &buflist, &sgl_dma, iocp)) == NULL)
+		return -ENOMEM;
+
+	/*
+	 * We should only need SGL with 2 simple_32bit entries (up to 256 kB)
+	 * for FC9xx f/w image, but calculate max number of sge hunks
+	 * we can fit into a request frame, and limit ourselves to that.
+	 * (currently no chain support)
+	 * maxfrags = (Request Size - FWdownload Size ) / Size of 32 bit SGE
+	 *	Request		maxfrags
+	 *	128		12
+	 *	96		8
+	 *	64		4
+	 */
+	maxfrags = (iocp->req_sz - sizeof(MPIHeader_t) - sizeof(FWDownloadTCSGE_t))
+			/ (sizeof(dma_addr_t) + sizeof(u32));
+	if (numfrags > maxfrags) {
+		ret = -EMLINK;
+		goto fwdl_out;
+	}
+
+	dctlprintk((KERN_INFO "DbG: sgl buffer  = %p, sgfrags = %d\n", sgl, numfrags));
+
+	/*
+	 * Parse SG list, copying sgl itself,
+	 * plus f/w image hunks from user space as we go...
+	 */
+	ret = -EFAULT;
+	sgIn = sgl;
+	bl = buflist;
+	for (i=0; i < numfrags; i++) {
+
+		/* Get the SGE type: 0 - TCSGE, 3 - Chain, 1 - Simple SGE
+		 * Skip everything but Simple. If simple, copy from
+		 *	user space into kernel space.
+		 * Note: we should not have anything but Simple as
+		 *	Chain SGE are illegal.
+		 */
+		nib = (sgIn->FlagsLength & 0x30000000) >> 28;
+		if (nib == 0 || nib == 3) {
+			;
+		} else if (sgIn->Address) {
+			mpt_add_sge(sgOut, sgIn->FlagsLength, sgIn->Address);
+			n++;
+			if (copy_from_user(bl->kptr, ufwbuf+fw_bytes_copied, bl->len)) {
+				printk(KERN_ERR "%s@%d::_ioctl_fwdl - "
+						"Unable to copy f/w buffer hunk#%d @ %p\n",
+						__FILE__, __LINE__, n, ufwbuf);
+				goto fwdl_out;
+			}
+			fw_bytes_copied += bl->len;
+		}
+		sgIn++;
+		bl++;
+		sgOut += (sizeof(dma_addr_t) + sizeof(u32));
+	}
+
+#ifdef MPT_DEBUG
+	{
+		u32 *m = (u32 *)mf;
+		printk(KERN_INFO MYNAM ": F/W download request:\n" KERN_INFO " ");
+		for (i=0; i < 7+numfrags*2; i++)
+			printk(" %08x", le32_to_cpu(m[i]));
+		printk("\n");
+	}
+#endif
+
+	/*
+	 * Finally, perform firmware download.
+	 */
+	iocp->ioctl->wait_done = 0;
+	mpt_put_msg_frame(mptctl_id, iocp, mf);
+
+	/* Now wait for the command to complete */
+	ret = wait_event_interruptible_timeout(mptctl_wait,
+	     iocp->ioctl->wait_done == 1,
+	     HZ*60);
+
+	if(ret <=0 && (iocp->ioctl->wait_done != 1 )) {
+	/* Now we need to reset the board */
+		mptctl_timeout_expired(iocp->ioctl);
+		ret = -ENODATA;
+		goto fwdl_out;
+	}
+
+	if (sgl)
+		kfree_sgl(sgl, sgl_dma, buflist, iocp);
+
+	ReplyMsg = (pFWDownloadReply_t)iocp->ioctl->ReplyFrame;
+	iocstat = le16_to_cpu(ReplyMsg->IOCStatus) & MPI_IOCSTATUS_MASK;
+	if (iocstat == MPI_IOCSTATUS_SUCCESS) {
+		printk(KERN_INFO MYNAM ": F/W update successfully sent to %s!\n", iocp->name);
+		return 0;
+	} else if (iocstat == MPI_IOCSTATUS_INVALID_FUNCTION) {
+		printk(KERN_WARNING MYNAM ": ?Hmmm...  %s says it doesn't support F/W download!?!\n",
+				iocp->name);
+		printk(KERN_WARNING MYNAM ": (time to go bang on somebodies door)\n");
+		return -EBADRQC;
+	} else if (iocstat == MPI_IOCSTATUS_BUSY) {
+		printk(KERN_WARNING MYNAM ": Warning!  %s says: IOC_BUSY!\n", iocp->name);
+		printk(KERN_WARNING MYNAM ": (try again later?)\n");
+		return -EBUSY;
+	} else {
+		printk(KERN_WARNING MYNAM "::ioctl_fwdl() ERROR!  %s returned [bad] status = %04xh\n",
+				    iocp->name, iocstat);
+		printk(KERN_WARNING MYNAM ": (bad VooDoo)\n");
+		return -ENOMSG;
+	}
+	return 0;
+
+fwdl_out:
+        kfree_sgl(sgl, sgl_dma, buflist, iocp);
+	return ret;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * SGE Allocation routine
+ *
+ * Inputs:	bytes - number of bytes to be transferred
+ *		sgdir - data direction
+ *		sge_offset - offset (in bytes) from the start of the request
+ *			frame to the first SGE
+ *		ioc - pointer to the mptadapter
+ * Outputs:	frags - number of scatter gather elements
+ *		blp - point to the buflist pointer
+ *		sglbuf_dma - pointer to the (dma) sgl
+ * Returns:	Null if failes
+ *		pointer to the (virtual) sgl if successful.
+ */
+static MptSge_t *
+kbuf_alloc_2_sgl(int bytes, u32 sgdir, int sge_offset, int *frags,
+		 struct buflist **blp, dma_addr_t *sglbuf_dma, MPT_ADAPTER *ioc)
+{
+	MptSge_t	*sglbuf = NULL;		/* pointer to array of SGE */
+						/* and chain buffers */
+	struct buflist	*buflist = NULL;	/* kernel routine */
+	MptSge_t	*sgl;
+	int		 numfrags = 0;
+	int		 fragcnt = 0;
+	int		 alloc_sz = min(bytes,MAX_KMALLOC_SZ);	// avoid kernel warning msg!
+	int		 bytes_allocd = 0;
+	int		 this_alloc;
+	dma_addr_t	 pa;					// phys addr
+	int		 i, buflist_ent;
+	int		 sg_spill = MAX_FRAGS_SPILL1;
+	int		 dir;
+	/* initialization */
+	*frags = 0;
+	*blp = NULL;
+
+	/* Allocate and initialize an array of kernel
+	 * structures for the SG elements.
+	 */
+	i = MAX_SGL_BYTES / 8;
+	buflist = kmalloc(i, GFP_USER);
+	if (buflist == NULL)
+		return NULL;
+	memset(buflist, 0, i);
+	buflist_ent = 0;
+
+	/* Allocate a single block of memory to store the sg elements and
+	 * the chain buffers.  The calling routine is responsible for
+	 * copying the data in this array into the correct place in the
+	 * request and chain buffers.
+	 */
+	sglbuf = pci_alloc_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf_dma);
+	if (sglbuf == NULL)
+		goto free_and_fail;
+
+	if (sgdir & 0x04000000)
+		dir = PCI_DMA_TODEVICE;
+	else
+		dir = PCI_DMA_FROMDEVICE;
+
+	/* At start:
+	 *	sgl = sglbuf = point to beginning of sg buffer
+	 *	buflist_ent = 0 = first kernel structure
+	 *	sg_spill = number of SGE that can be written before the first
+	 *		chain element.
+	 *
+	 */
+	sgl = sglbuf;
+	sg_spill = ((ioc->req_sz - sge_offset)/(sizeof(dma_addr_t) + sizeof(u32))) - 1;
+	while (bytes_allocd < bytes) {
+		this_alloc = min(alloc_sz, bytes-bytes_allocd);
+		buflist[buflist_ent].len = this_alloc;
+		buflist[buflist_ent].kptr = pci_alloc_consistent(ioc->pcidev,
+								 this_alloc,
+								 &pa);
+		if (buflist[buflist_ent].kptr == NULL) {
+			alloc_sz = alloc_sz / 2;
+			if (alloc_sz == 0) {
+				printk(KERN_WARNING MYNAM "-SG: No can do - "
+						    "not enough memory!   :-(\n");
+				printk(KERN_WARNING MYNAM "-SG: (freeing %d frags)\n",
+						    numfrags);
+				goto free_and_fail;
+			}
+			continue;
+		} else {
+			dma_addr_t dma_addr;
+
+			bytes_allocd += this_alloc;
+			sgl->FlagsLength = (0x10000000|MPT_SGE_FLAGS_ADDRESSING|sgdir|this_alloc);
+			dma_addr = pci_map_single(ioc->pcidev, buflist[buflist_ent].kptr, this_alloc, dir);
+			sgl->Address = dma_addr;
+
+			fragcnt++;
+			numfrags++;
+			sgl++;
+			buflist_ent++;
+		}
+
+		if (bytes_allocd >= bytes)
+			break;
+
+		/* Need to chain? */
+		if (fragcnt == sg_spill) {
+			printk(KERN_WARNING MYNAM "-SG: No can do - " "Chain required!   :-(\n");
+			printk(KERN_WARNING MYNAM "(freeing %d frags)\n", numfrags);
+			goto free_and_fail;
+		}
+
+		/* overflow check... */
+		if (numfrags*8 > MAX_SGL_BYTES){
+			/* GRRRRR... */
+			printk(KERN_WARNING MYNAM "-SG: No can do - "
+					    "too many SG frags!   :-(\n");
+			printk(KERN_WARNING MYNAM "-SG: (freeing %d frags)\n",
+					    numfrags);
+			goto free_and_fail;
+		}
+	}
+
+	/* Last sge fixup: set LE+eol+eob bits */
+	sgl[-1].FlagsLength |= 0xC1000000;
+
+	*frags = numfrags;
+	*blp = buflist;
+
+	dctlprintk((KERN_INFO MYNAM "-SG: kbuf_alloc_2_sgl() - "
+			   "%d SG frags generated!\n",
+			   numfrags));
+
+	dctlprintk((KERN_INFO MYNAM "-SG: kbuf_alloc_2_sgl() - "
+			   "last (big) alloc_sz=%d\n",
+			   alloc_sz));
+
+	return sglbuf;
+
+free_and_fail:
+	if (sglbuf != NULL) {
+		int i;
+
+		for (i = 0; i < numfrags; i++) {
+			dma_addr_t dma_addr;
+			u8 *kptr;
+			int len;
+
+			if ((sglbuf[i].FlagsLength >> 24) == 0x30)
+				continue;
+
+			dma_addr = sglbuf[i].Address;
+			kptr = buflist[i].kptr;
+			len = buflist[i].len;
+
+			pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
+		}
+		pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sglbuf, *sglbuf_dma);
+	}
+	kfree(buflist);
+	return NULL;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Routine to free the SGL elements.
+ */
+static void
+kfree_sgl(MptSge_t *sgl, dma_addr_t sgl_dma, struct buflist *buflist, MPT_ADAPTER *ioc)
+{
+	MptSge_t	*sg = sgl;
+	struct buflist	*bl = buflist;
+	u32		 nib;
+	int		 dir;
+	int		 n = 0;
+
+	if (sg->FlagsLength & 0x04000000)
+		dir = PCI_DMA_TODEVICE;
+	else
+		dir = PCI_DMA_FROMDEVICE;
+
+	nib = (sg->FlagsLength & 0xF0000000) >> 28;
+	while (! (nib & 0x4)) { /* eob */
+		/* skip ignore/chain. */
+		if (nib == 0 || nib == 3) {
+			;
+		} else if (sg->Address) {
+			dma_addr_t dma_addr;
+			void *kptr;
+			int len;
+
+			dma_addr = sg->Address;
+			kptr = bl->kptr;
+			len = bl->len;
+			pci_unmap_single(ioc->pcidev, dma_addr, len, dir);
+			pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
+			n++;
+		}
+		sg++;
+		bl++;
+		nib = (le32_to_cpu(sg->FlagsLength) & 0xF0000000) >> 28;
+	}
+
+	/* we're at eob! */
+	if (sg->Address) {
+		dma_addr_t dma_addr;
+		void *kptr;
+		int len;
+
+		dma_addr = sg->Address;
+		kptr = bl->kptr;
+		len = bl->len;
+		pci_unmap_single(ioc->pcidev, dma_addr, len, dir);
+		pci_free_consistent(ioc->pcidev, len, kptr, dma_addr);
+		n++;
+	}
+
+	pci_free_consistent(ioc->pcidev, MAX_SGL_BYTES, sgl, sgl_dma);
+	kfree(buflist);
+	dctlprintk((KERN_INFO MYNAM "-SG: Free'd 1 SGL buf + %d kbufs!\n", n));
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptctl_getiocinfo - Query the host adapter for IOC information.
+ *	@arg: User space argument
+ *
+ * Outputs:	None.
+ * Return:	0 if successful
+ *		-EFAULT if data unavailable
+ *		-ENODEV  if no such device/adapter
+ */
+static int
+mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
+{
+	struct mpt_ioctl_iocinfo __user *uarg = (void __user *) arg;
+	struct mpt_ioctl_iocinfo *karg;
+	MPT_ADAPTER		*ioc;
+	struct pci_dev		*pdev;
+	struct Scsi_Host	*sh;
+	MPT_SCSI_HOST		*hd;
+	int			iocnum;
+	int			numDevices = 0;
+	unsigned int		max_id;
+	int			ii;
+	int			port;
+	int			cim_rev;
+	u8			revision;
+
+	dctlprintk((": mptctl_getiocinfo called.\n"));
+	/* Add of PCI INFO results in unaligned access for
+	 * IA64 and Sparc. Reset long to int. Return no PCI
+	 * data for obsolete format.
+	 */
+	if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev0))
+		cim_rev = 0;
+	else if (data_size == sizeof(struct mpt_ioctl_iocinfo_rev1))
+		cim_rev = 1;
+	else if (data_size == sizeof(struct mpt_ioctl_iocinfo))
+		cim_rev = 2;
+	else if (data_size == (sizeof(struct mpt_ioctl_iocinfo_rev0)+12))
+		cim_rev = 0;	/* obsolete */
+	else
+		return -EFAULT;
+
+	karg = kmalloc(data_size, GFP_KERNEL);
+	if (karg == NULL) {
+		printk(KERN_ERR "%s::mpt_ioctl_iocinfo() @%d - no memory available!\n",
+				__FILE__, __LINE__);
+		return -ENOMEM;
+	}
+
+	if (copy_from_user(karg, uarg, data_size)) {
+		printk(KERN_ERR "%s@%d::mptctl_getiocinfo - "
+			"Unable to read in mpt_ioctl_iocinfo struct @ %p\n",
+				__FILE__, __LINE__, uarg);
+		kfree(karg);
+		return -EFAULT;
+	}
+
+	if (((iocnum = mpt_verify_adapter(karg->hdr.iocnum, &ioc)) < 0) ||
+	    (ioc == NULL)) {
+		dctlprintk((KERN_ERR "%s::mptctl_getiocinfo() @%d - ioc%d not found!\n",
+				__FILE__, __LINE__, iocnum));
+		kfree(karg);
+		return -ENODEV;
+	}
+
+	/* Verify the data transfer size is correct.
+	 * Ignore the port setting.
+	 */
+	if (karg->hdr.maxDataSize != data_size) {
+		printk(KERN_ERR "%s@%d::mptctl_getiocinfo - "
+			"Structure size mismatch. Command not completed.\n",
+				__FILE__, __LINE__);
+		kfree(karg);
+		return -EFAULT;
+	}
+
+	/* Fill in the data and return the structure to the calling
+	 * program
+	 */
+	if (ioc->bus_type == FC)
+		karg->adapterType = MPT_IOCTL_INTERFACE_FC;
+	else
+		karg->adapterType = MPT_IOCTL_INTERFACE_SCSI;
+
+	port = karg->hdr.port;
+
+	karg->port = port;
+	pdev = (struct pci_dev *) ioc->pcidev;
+
+	karg->pciId = pdev->device;
+	pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
+	karg->hwRev = revision;
+	karg->subSystemDevice = pdev->subsystem_device;
+	karg->subSystemVendor = pdev->subsystem_vendor;
+
+	if (cim_rev == 1) {
+		/* Get the PCI bus, device, and function numbers for the IOC
+		 */
+		karg->pciInfo.u.bits.busNumber = pdev->bus->number;
+		karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn );
+		karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
+	} else if (cim_rev == 2) {
+		/* Get the PCI bus, device, function and segment ID numbers 
+		   for the IOC */
+		karg->pciInfo.u.bits.busNumber = pdev->bus->number;
+		karg->pciInfo.u.bits.deviceNumber = PCI_SLOT( pdev->devfn );
+		karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
+		karg->pciInfo.u.bits.functionNumber = PCI_FUNC( pdev->devfn );
+		karg->pciInfo.segmentID = pci_domain_nr(pdev->bus);
+	}
+
+	/* Get number of devices
+         */
+	if ((sh = ioc->sh) != NULL) {
+		 /* sh->max_id = maximum target ID + 1
+		 */
+		max_id = sh->max_id - 1;
+		hd = (MPT_SCSI_HOST *) sh->hostdata;
+
+		/* Check all of the target structures and
+		 * keep a counter.
+		 */
+		if (hd && hd->Targets) {
+			for (ii = 0; ii <= max_id; ii++) {
+				if (hd->Targets[ii])
+					numDevices++;
+			}
+		}
+	}
+	karg->numDevices = numDevices;
+
+	/* Set the BIOS and FW Version
+	 */
+	karg->FWVersion = ioc->facts.FWVersion.Word;
+	karg->BIOSVersion = ioc->biosVersion;
+
+	/* Set the Version Strings.
+	 */
+	strncpy (karg->driverVersion, MPT_LINUX_PACKAGE_NAME, MPT_IOCTL_VERSION_LENGTH);
+	karg->driverVersion[MPT_IOCTL_VERSION_LENGTH-1]='\0';
+
+	karg->busChangeEvent = 0;
+	karg->hostId = ioc->pfacts[port].PortSCSIID;
+	karg->rsvd[0] = karg->rsvd[1] = 0;
+
+	/* Copy the data from kernel memory to user memory
+	 */
+	if (copy_to_user((char __user *)arg, karg, data_size)) {
+		printk(KERN_ERR "%s@%d::mptctl_getiocinfo - "
+			"Unable to write out mpt_ioctl_iocinfo struct @ %p\n",
+				__FILE__, __LINE__, uarg);
+		kfree(karg);
+		return -EFAULT;
+	}
+
+	kfree(karg);
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptctl_gettargetinfo - Query the host adapter for target information.
+ *	@arg: User space argument
+ *
+ * Outputs:	None.
+ * Return:	0 if successful
+ *		-EFAULT if data unavailable
+ *		-ENODEV  if no such device/adapter
+ */
+static int
+mptctl_gettargetinfo (unsigned long arg)
+{
+	struct mpt_ioctl_targetinfo __user *uarg = (void __user *) arg;
+	struct mpt_ioctl_targetinfo karg;
+	MPT_ADAPTER		*ioc;
+	struct Scsi_Host	*sh;
+	MPT_SCSI_HOST		*hd;
+	VirtDevice		*vdev;
+	char			*pmem;
+	int			*pdata;
+	IOCPage2_t		*pIoc2;
+	IOCPage3_t		*pIoc3;
+	int			iocnum;
+	int			numDevices = 0;
+	unsigned int		max_id;
+	int			id, jj, indexed_lun, lun_index;
+	u32			lun;
+	int			maxWordsLeft;
+	int			numBytes;
+	u8			port, devType, bus_id;
+
+	dctlprintk(("mptctl_gettargetinfo called.\n"));
+	if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_targetinfo))) {
+		printk(KERN_ERR "%s@%d::mptctl_gettargetinfo - "
+			"Unable to read in mpt_ioctl_targetinfo struct @ %p\n",
+				__FILE__, __LINE__, uarg);
+		return -EFAULT;
+	}
+
+	if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+	    (ioc == NULL)) {
+		dctlprintk((KERN_ERR "%s::mptctl_gettargetinfo() @%d - ioc%d not found!\n",
+				__FILE__, __LINE__, iocnum));
+		return -ENODEV;
+	}
+
+	/* Get the port number and set the maximum number of bytes
+	 * in the returned structure.
+	 * Ignore the port setting.
+	 */
+	numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header);
+	maxWordsLeft = numBytes/sizeof(int);
+	port = karg.hdr.port;
+
+	if (maxWordsLeft <= 0) {
+		printk(KERN_ERR "%s::mptctl_gettargetinfo() @%d - no memory available!\n",
+				__FILE__, __LINE__);
+		return -ENOMEM;
+	}
+
+	/* Fill in the data and return the structure to the calling
+	 * program
+	 */
+
+	/* struct mpt_ioctl_targetinfo does not contain sufficient space
+	 * for the target structures so when the IOCTL is called, there is
+	 * not sufficient stack space for the structure. Allocate memory,
+	 * populate the memory, copy back to the user, then free memory.
+	 * targetInfo format:
+	 * bits 31-24: reserved
+	 *      23-16: LUN
+	 *      15- 8: Bus Number
+	 *       7- 0: Target ID
+	 */
+	pmem = kmalloc(numBytes, GFP_KERNEL);
+	if (pmem == NULL) {
+		printk(KERN_ERR "%s::mptctl_gettargetinfo() @%d - no memory available!\n",
+				__FILE__, __LINE__);
+		return -ENOMEM;
+	}
+	memset(pmem, 0, numBytes);
+	pdata =  (int *) pmem;
+
+	/* Get number of devices
+         */
+	if ((sh = ioc->sh) != NULL) {
+
+		max_id = sh->max_id - 1;
+		hd = (MPT_SCSI_HOST *) sh->hostdata;
+
+		/* Check all of the target structures.
+		 * Save the Id and increment the counter,
+		 * if ptr non-null.
+		 * sh->max_id = maximum target ID + 1
+		 */
+		if (hd && hd->Targets) {
+			mpt_findImVolumes(ioc);
+			pIoc2 = ioc->spi_data.pIocPg2;
+			for ( id = 0; id <= max_id; ) {
+				if ( pIoc2 && pIoc2->NumActiveVolumes ) {
+					if ( id == pIoc2->RaidVolume[0].VolumeID ) {
+						if (maxWordsLeft <= 0) {
+							printk(KERN_ERR "mptctl_gettargetinfo - "
+			"buffer is full but volume is available on ioc %d\n, numDevices=%d", iocnum, numDevices);
+							goto data_space_full;
+						}
+						if ( ( pIoc2->RaidVolume[0].Flags & MPI_IOCPAGE2_FLAG_VOLUME_INACTIVE ) == 0 )
+                        				devType = 0x80;
+                    				else
+                        				devType = 0xC0;
+						bus_id = pIoc2->RaidVolume[0].VolumeBus;
+	            				numDevices++;
+                    				*pdata = ( (devType << 24) | (bus_id << 8) | id );
+						dctlprintk((KERN_ERR "mptctl_gettargetinfo - "
+		"volume ioc=%d target=%x numDevices=%d pdata=%p\n", iocnum, *pdata, numDevices, pdata));
+                    				pdata++;
+						--maxWordsLeft;
+						goto next_id;
+					} else {
+						pIoc3 = ioc->spi_data.pIocPg3;
+            					for ( jj = 0; jj < pIoc3->NumPhysDisks; jj++ ) {
+                    					if ( pIoc3->PhysDisk[jj].PhysDiskID == id )
+								goto next_id;
+						}
+					}
+				}
+				if ( (vdev = hd->Targets[id]) ) {
+					for (jj = 0; jj <= MPT_LAST_LUN; jj++) {
+						lun_index = (jj >> 5);
+						indexed_lun = (jj % 32);
+						lun = (1 << indexed_lun);
+						if (vdev->luns[lun_index] & lun) {
+							if (maxWordsLeft <= 0) {
+								printk(KERN_ERR "mptctl_gettargetinfo - "
+			"buffer is full but more targets are available on ioc %d numDevices=%d\n", iocnum, numDevices);
+								goto data_space_full;
+							}
+							bus_id = vdev->bus_id;
+							numDevices++;
+                            				*pdata = ( (jj << 16) | (bus_id << 8) | id );
+							dctlprintk((KERN_ERR "mptctl_gettargetinfo - "
+		"target ioc=%d target=%x numDevices=%d pdata=%p\n", iocnum, *pdata, numDevices, pdata));
+							pdata++;
+							--maxWordsLeft;
+						}
+					}
+				}
+next_id:
+				id++;
+			}
+		}
+	}
+data_space_full:
+	karg.numDevices = numDevices;
+
+	/* Copy part of the data from kernel memory to user memory
+	 */
+	if (copy_to_user((char __user *)arg, &karg,
+				sizeof(struct mpt_ioctl_targetinfo))) {
+		printk(KERN_ERR "%s@%d::mptctl_gettargetinfo - "
+			"Unable to write out mpt_ioctl_targetinfo struct @ %p\n",
+				__FILE__, __LINE__, uarg);
+		kfree(pmem);
+		return -EFAULT;
+	}
+
+	/* Copy the remaining data from kernel memory to user memory
+	 */
+	if (copy_to_user(uarg->targetInfo, pmem, numBytes)) {
+		printk(KERN_ERR "%s@%d::mptctl_gettargetinfo - "
+			"Unable to write out mpt_ioctl_targetinfo struct @ %p\n",
+				__FILE__, __LINE__, pdata);
+		kfree(pmem);
+		return -EFAULT;
+	}
+
+	kfree(pmem);
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* MPT IOCTL Test function.
+ *
+ * Outputs:	None.
+ * Return:	0 if successful
+ *		-EFAULT if data unavailable
+ *		-ENODEV  if no such device/adapter
+ */
+static int
+mptctl_readtest (unsigned long arg)
+{
+	struct mpt_ioctl_test __user *uarg = (void __user *) arg;
+	struct mpt_ioctl_test	 karg;
+	MPT_ADAPTER *ioc;
+	int iocnum;
+
+	dctlprintk(("mptctl_readtest called.\n"));
+	if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_test))) {
+		printk(KERN_ERR "%s@%d::mptctl_readtest - "
+			"Unable to read in mpt_ioctl_test struct @ %p\n",
+				__FILE__, __LINE__, uarg);
+		return -EFAULT;
+	}
+
+	if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+	    (ioc == NULL)) {
+		dctlprintk((KERN_ERR "%s::mptctl_readtest() @%d - ioc%d not found!\n",
+				__FILE__, __LINE__, iocnum));
+		return -ENODEV;
+	}
+
+	/* Fill in the data and return the structure to the calling
+	 * program
+	 */
+
+#ifdef MFCNT
+	karg.chip_type = ioc->mfcnt;
+#else
+	karg.chip_type = ioc->pcidev->device;
+#endif
+	strncpy (karg.name, ioc->name, MPT_MAX_NAME);
+	karg.name[MPT_MAX_NAME-1]='\0';
+	strncpy (karg.product, ioc->prod_name, MPT_PRODUCT_LENGTH);
+	karg.product[MPT_PRODUCT_LENGTH-1]='\0';
+
+	/* Copy the data from kernel memory to user memory
+	 */
+	if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_test))) {
+		printk(KERN_ERR "%s@%d::mptctl_readtest - "
+			"Unable to write out mpt_ioctl_test struct @ %p\n",
+				__FILE__, __LINE__, uarg);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptctl_eventquery - Query the host adapter for the event types
+ *	that are being logged.
+ *	@arg: User space argument
+ *
+ * Outputs:	None.
+ * Return:	0 if successful
+ *		-EFAULT if data unavailable
+ *		-ENODEV  if no such device/adapter
+ */
+static int
+mptctl_eventquery (unsigned long arg)
+{
+	struct mpt_ioctl_eventquery __user *uarg = (void __user *) arg;
+	struct mpt_ioctl_eventquery	 karg;
+	MPT_ADAPTER *ioc;
+	int iocnum;
+
+	dctlprintk(("mptctl_eventquery called.\n"));
+	if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventquery))) {
+		printk(KERN_ERR "%s@%d::mptctl_eventquery - "
+			"Unable to read in mpt_ioctl_eventquery struct @ %p\n",
+				__FILE__, __LINE__, uarg);
+		return -EFAULT;
+	}
+
+	if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+	    (ioc == NULL)) {
+		dctlprintk((KERN_ERR "%s::mptctl_eventquery() @%d - ioc%d not found!\n",
+				__FILE__, __LINE__, iocnum));
+		return -ENODEV;
+	}
+
+	karg.eventEntries = ioc->eventLogSize;
+	karg.eventTypes = ioc->eventTypes;
+
+	/* Copy the data from kernel memory to user memory
+	 */
+	if (copy_to_user((char __user *)arg, &karg, sizeof(struct mpt_ioctl_eventquery))) {
+		printk(KERN_ERR "%s@%d::mptctl_eventquery - "
+			"Unable to write out mpt_ioctl_eventquery struct @ %p\n",
+				__FILE__, __LINE__, uarg);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mptctl_eventenable (unsigned long arg)
+{
+	struct mpt_ioctl_eventenable __user *uarg = (void __user *) arg;
+	struct mpt_ioctl_eventenable	 karg;
+	MPT_ADAPTER *ioc;
+	int iocnum;
+
+	dctlprintk(("mptctl_eventenable called.\n"));
+	if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventenable))) {
+		printk(KERN_ERR "%s@%d::mptctl_eventenable - "
+			"Unable to read in mpt_ioctl_eventenable struct @ %p\n",
+				__FILE__, __LINE__, uarg);
+		return -EFAULT;
+	}
+
+	if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+	    (ioc == NULL)) {
+		dctlprintk((KERN_ERR "%s::mptctl_eventenable() @%d - ioc%d not found!\n",
+				__FILE__, __LINE__, iocnum));
+		return -ENODEV;
+	}
+
+	if (ioc->events == NULL) {
+		/* Have not yet allocated memory - do so now.
+		 */
+		int sz = MPTCTL_EVENT_LOG_SIZE * sizeof(MPT_IOCTL_EVENTS);
+		ioc->events = kmalloc(sz, GFP_KERNEL);
+		if (ioc->events == NULL) {
+			printk(KERN_ERR MYNAM ": ERROR - Insufficient memory to add adapter!\n");
+			return -ENOMEM;
+		}
+		memset(ioc->events, 0, sz);
+		ioc->alloc_total += sz;
+
+		ioc->eventLogSize = MPTCTL_EVENT_LOG_SIZE;
+		ioc->eventContext = 0;
+        }
+
+	/* Update the IOC event logging flag.
+	 */
+	ioc->eventTypes = karg.eventTypes;
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mptctl_eventreport (unsigned long arg)
+{
+	struct mpt_ioctl_eventreport __user *uarg = (void __user *) arg;
+	struct mpt_ioctl_eventreport	 karg;
+	MPT_ADAPTER		 *ioc;
+	int			 iocnum;
+	int			 numBytes, maxEvents, max;
+
+	dctlprintk(("mptctl_eventreport called.\n"));
+	if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_eventreport))) {
+		printk(KERN_ERR "%s@%d::mptctl_eventreport - "
+			"Unable to read in mpt_ioctl_eventreport struct @ %p\n",
+				__FILE__, __LINE__, uarg);
+		return -EFAULT;
+	}
+
+	if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+	    (ioc == NULL)) {
+		dctlprintk((KERN_ERR "%s::mptctl_eventreport() @%d - ioc%d not found!\n",
+				__FILE__, __LINE__, iocnum));
+		return -ENODEV;
+	}
+
+	numBytes = karg.hdr.maxDataSize - sizeof(mpt_ioctl_header);
+	maxEvents = numBytes/sizeof(MPT_IOCTL_EVENTS);
+
+
+	max = ioc->eventLogSize < maxEvents ? ioc->eventLogSize : maxEvents;
+
+	/* If fewer than 1 event is requested, there must have
+	 * been some type of error.
+	 */
+	if ((max < 1) || !ioc->events)
+		return -ENODATA;
+
+	/* Copy the data from kernel memory to user memory
+	 */
+	numBytes = max * sizeof(MPT_IOCTL_EVENTS);
+	if (copy_to_user(uarg->eventData, ioc->events, numBytes)) {
+		printk(KERN_ERR "%s@%d::mptctl_eventreport - "
+			"Unable to write out mpt_ioctl_eventreport struct @ %p\n",
+				__FILE__, __LINE__, ioc->events);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mptctl_replace_fw (unsigned long arg)
+{
+	struct mpt_ioctl_replace_fw __user *uarg = (void __user *) arg;
+	struct mpt_ioctl_replace_fw	 karg;
+	MPT_ADAPTER		 *ioc;
+	int			 iocnum;
+	int			 newFwSize;
+
+	dctlprintk(("mptctl_replace_fw called.\n"));
+	if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_replace_fw))) {
+		printk(KERN_ERR "%s@%d::mptctl_replace_fw - "
+			"Unable to read in mpt_ioctl_replace_fw struct @ %p\n",
+				__FILE__, __LINE__, uarg);
+		return -EFAULT;
+	}
+
+	if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+	    (ioc == NULL)) {
+		dctlprintk((KERN_ERR "%s::mptctl_replace_fw() @%d - ioc%d not found!\n",
+				__FILE__, __LINE__, iocnum));
+		return -ENODEV;
+	}
+
+	/* If caching FW, Free the old FW image
+	 */
+	if (ioc->cached_fw == NULL)
+		return 0;
+
+	mpt_free_fw_memory(ioc);
+
+	/* Allocate memory for the new FW image
+	 */
+	newFwSize = karg.newImageSize;
+
+	if (newFwSize & 0x01)
+		newFwSize += 1;
+	if (newFwSize & 0x02)
+		newFwSize += 2;
+
+	mpt_alloc_fw_memory(ioc, newFwSize);
+	if (ioc->cached_fw == NULL)
+		return -ENOMEM;
+
+	/* Copy the data from user memory to kernel space
+	 */
+	if (copy_from_user(ioc->cached_fw, uarg->newImage, newFwSize)) {
+		printk(KERN_ERR "%s@%d::mptctl_replace_fw - "
+				"Unable to read in mpt_ioctl_replace_fw image "
+				"@ %p\n", __FILE__, __LINE__, uarg);
+		mpt_free_fw_memory(ioc);
+		return -EFAULT;
+	}
+
+	/* Update IOCFactsReply
+	 */
+	ioc->facts.FWImageSize = newFwSize;
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* MPT IOCTL MPTCOMMAND function.
+ * Cast the arg into the mpt_ioctl_mpt_command structure.
+ *
+ * Outputs:	None.
+ * Return:	0 if successful
+ *		-EBUSY  if previous command timout and IOC reset is not complete.
+ *		-EFAULT if data unavailable
+ *		-ENODEV if no such device/adapter
+ *		-ETIME	if timer expires
+ *		-ENOMEM if memory allocation error
+ */
+static int
+mptctl_mpt_command (unsigned long arg)
+{
+	struct mpt_ioctl_command __user *uarg = (void __user *) arg;
+	struct mpt_ioctl_command  karg;
+	MPT_ADAPTER	*ioc;
+	int		iocnum;
+	int		rc;
+
+	dctlprintk(("mptctl_command called.\n"));
+
+	if (copy_from_user(&karg, uarg, sizeof(struct mpt_ioctl_command))) {
+		printk(KERN_ERR "%s@%d::mptctl_mpt_command - "
+			"Unable to read in mpt_ioctl_command struct @ %p\n",
+				__FILE__, __LINE__, uarg);
+		return -EFAULT;
+	}
+
+	if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+	    (ioc == NULL)) {
+		dctlprintk((KERN_ERR "%s::mptctl_mpt_command() @%d - ioc%d not found!\n",
+				__FILE__, __LINE__, iocnum));
+		return -ENODEV;
+	}
+
+	rc = mptctl_do_mpt_command (karg, &uarg->MF);
+
+	return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* Worker routine for the IOCTL MPTCOMMAND and MPTCOMMAND32 (sparc) commands.
+ *
+ * Outputs:	None.
+ * Return:	0 if successful
+ *		-EBUSY  if previous command timout and IOC reset is not complete.
+ *		-EFAULT if data unavailable
+ *		-ENODEV if no such device/adapter
+ *		-ETIME	if timer expires
+ *		-ENOMEM if memory allocation error
+ *		-EPERM if SCSI I/O and target is untagged
+ */
+static int
+mptctl_do_mpt_command (struct mpt_ioctl_command karg, void __user *mfPtr)
+{
+	MPT_ADAPTER	*ioc;
+	MPT_FRAME_HDR	*mf = NULL;
+	MPIHeader_t	*hdr;
+	char		*psge;
+	struct buflist	bufIn;	/* data In buffer */
+	struct buflist	bufOut; /* data Out buffer */
+	dma_addr_t	dma_addr_in;
+	dma_addr_t	dma_addr_out;
+	int		sgSize = 0;	/* Num SG elements */
+	int		iocnum, flagsLength;
+	int		sz, rc = 0;
+	int		msgContext;
+	u16		req_idx;
+	ulong 		timeout;
+
+	dctlprintk(("mptctl_do_mpt_command called.\n"));
+	bufIn.kptr = bufOut.kptr = NULL;
+
+	if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+	    (ioc == NULL)) {
+		dctlprintk((KERN_ERR "%s::mptctl_do_mpt_command() @%d - ioc%d not found!\n",
+				__FILE__, __LINE__, iocnum));
+		return -ENODEV;
+	}
+	if (!ioc->ioctl) {
+		printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
+			"No memory available during driver init.\n",
+				__FILE__, __LINE__);
+		return -ENOMEM;
+	} else if (ioc->ioctl->status & MPT_IOCTL_STATUS_DID_IOCRESET) {
+		printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
+			"Busy with IOC Reset \n", __FILE__, __LINE__);
+		return -EBUSY;
+	}
+
+	/* Verify that the final request frame will not be too large.
+	 */
+	sz = karg.dataSgeOffset * 4;
+	if (karg.dataInSize > 0)
+		sz += sizeof(dma_addr_t) + sizeof(u32);
+	if (karg.dataOutSize > 0)
+		sz += sizeof(dma_addr_t) + sizeof(u32);
+
+	if (sz > ioc->req_sz) {
+		printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
+			"Request frame too large (%d) maximum (%d)\n",
+				__FILE__, __LINE__, sz, ioc->req_sz);
+		return -EFAULT;
+	}
+
+	/* Get a free request frame and save the message context.
+	 */
+        if ((mf = mpt_get_msg_frame(mptctl_id, ioc)) == NULL)
+                return -EAGAIN;
+
+	hdr = (MPIHeader_t *) mf;
+	msgContext = le32_to_cpu(hdr->MsgContext);
+	req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
+
+	/* Copy the request frame
+	 * Reset the saved message context.
+	 * Request frame in user space
+	 */
+	if (copy_from_user(mf, mfPtr, karg.dataSgeOffset * 4)) {
+		printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
+			"Unable to read MF from mpt_ioctl_command struct @ %p\n",
+			__FILE__, __LINE__, mfPtr);
+		rc = -EFAULT;
+		goto done_free_mem;
+	}
+	hdr->MsgContext = cpu_to_le32(msgContext);
+
+
+	/* Verify that this request is allowed.
+	 */
+	switch (hdr->Function) {
+	case MPI_FUNCTION_IOC_FACTS:
+	case MPI_FUNCTION_PORT_FACTS:
+		karg.dataOutSize  = karg.dataInSize = 0;
+		break;
+
+	case MPI_FUNCTION_CONFIG:
+	case MPI_FUNCTION_FC_COMMON_TRANSPORT_SEND:
+	case MPI_FUNCTION_FC_EX_LINK_SRVC_SEND:
+	case MPI_FUNCTION_FW_UPLOAD:
+	case MPI_FUNCTION_SCSI_ENCLOSURE_PROCESSOR:
+	case MPI_FUNCTION_FW_DOWNLOAD:
+	case MPI_FUNCTION_FC_PRIMITIVE_SEND:
+		break;
+
+	case MPI_FUNCTION_SCSI_IO_REQUEST:
+		if (ioc->sh) {
+			SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf;
+			VirtDevice	*pTarget = NULL;
+			MPT_SCSI_HOST	*hd = NULL;
+			int qtag = MPI_SCSIIO_CONTROL_UNTAGGED;
+			int scsidir = 0;
+			int target = (int) pScsiReq->TargetID;
+			int dataSize;
+
+			if ((target < 0) || (target >= ioc->sh->max_id)) {
+				printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
+					"Target ID out of bounds. \n",
+					__FILE__, __LINE__);
+				rc = -ENODEV;
+				goto done_free_mem;
+			}
+
+			pScsiReq->MsgFlags = mpt_msg_flags();
+
+			/* verify that app has not requested
+			 *	more sense data than driver
+			 *	can provide, if so, reset this parameter
+			 * set the sense buffer pointer low address
+			 * update the control field to specify Q type
+			 */
+			if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE)
+				pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
+			else
+				pScsiReq->SenseBufferLength = karg.maxSenseBytes;
+
+			pScsiReq->SenseBufferLowAddr =
+				cpu_to_le32(ioc->sense_buf_low_dma
+				   + (req_idx * MPT_SENSE_BUFFER_ALLOC));
+
+			if ((hd = (MPT_SCSI_HOST *) ioc->sh->hostdata)) {
+				if (hd->Targets)
+					pTarget = hd->Targets[target];
+			}
+
+			if (pTarget &&(pTarget->tflags & MPT_TARGET_FLAGS_Q_YES))
+				qtag = MPI_SCSIIO_CONTROL_SIMPLEQ;
+
+			/* Have the IOCTL driver set the direction based
+			 * on the dataOutSize (ordering issue with Sparc).
+			 */
+			if (karg.dataOutSize > 0) {
+				scsidir = MPI_SCSIIO_CONTROL_WRITE;
+				dataSize = karg.dataOutSize;
+			} else {
+				scsidir = MPI_SCSIIO_CONTROL_READ;
+				dataSize = karg.dataInSize;
+			}
+
+			pScsiReq->Control = cpu_to_le32(scsidir | qtag);
+			pScsiReq->DataLength = cpu_to_le32(dataSize);
+
+			ioc->ioctl->reset = MPTCTL_RESET_OK;
+			ioc->ioctl->target = target;
+
+		} else {
+			printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
+				"SCSI driver is not loaded. \n",
+					__FILE__, __LINE__);
+			rc = -EFAULT;
+			goto done_free_mem;
+		}
+		break;
+
+	case MPI_FUNCTION_RAID_ACTION:
+		/* Just add a SGE
+		 */
+		break;
+
+	case MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH:
+		if (ioc->sh) {
+			SCSIIORequest_t *pScsiReq = (SCSIIORequest_t *) mf;
+			int qtag = MPI_SCSIIO_CONTROL_SIMPLEQ;
+			int scsidir = MPI_SCSIIO_CONTROL_READ;
+			int dataSize;
+
+			pScsiReq->MsgFlags = mpt_msg_flags();
+
+			/* verify that app has not requested
+			 *	more sense data than driver
+			 *	can provide, if so, reset this parameter
+			 * set the sense buffer pointer low address
+			 * update the control field to specify Q type
+			 */
+			if (karg.maxSenseBytes > MPT_SENSE_BUFFER_SIZE)
+				pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
+			else
+				pScsiReq->SenseBufferLength = karg.maxSenseBytes;
+
+			pScsiReq->SenseBufferLowAddr =
+				cpu_to_le32(ioc->sense_buf_low_dma
+				   + (req_idx * MPT_SENSE_BUFFER_ALLOC));
+
+			/* All commands to physical devices are tagged
+			 */
+
+			/* Have the IOCTL driver set the direction based
+			 * on the dataOutSize (ordering issue with Sparc).
+			 */
+			if (karg.dataOutSize > 0) {
+				scsidir = MPI_SCSIIO_CONTROL_WRITE;
+				dataSize = karg.dataOutSize;
+			} else {
+				scsidir = MPI_SCSIIO_CONTROL_READ;
+				dataSize = karg.dataInSize;
+			}
+
+			pScsiReq->Control = cpu_to_le32(scsidir | qtag);
+			pScsiReq->DataLength = cpu_to_le32(dataSize);
+
+			ioc->ioctl->reset = MPTCTL_RESET_OK;
+			ioc->ioctl->target = pScsiReq->TargetID;
+		} else {
+			printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
+				"SCSI driver is not loaded. \n",
+					__FILE__, __LINE__);
+			rc = -EFAULT;
+			goto done_free_mem;
+		}
+		break;
+
+	case MPI_FUNCTION_SCSI_TASK_MGMT:
+		{
+			MPT_SCSI_HOST *hd = NULL;
+			if ((ioc->sh == NULL) || ((hd = (MPT_SCSI_HOST *)ioc->sh->hostdata) == NULL)) {
+				printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
+					"SCSI driver not loaded or SCSI host not found. \n",
+					__FILE__, __LINE__);
+				rc = -EFAULT;
+				goto done_free_mem;
+			} else if (mptctl_set_tm_flags(hd) != 0) {
+				rc = -EPERM;
+				goto done_free_mem;
+			}
+		}
+		break;
+
+	case MPI_FUNCTION_IOC_INIT:
+		{
+			IOCInit_t	*pInit = (IOCInit_t *) mf;
+			u32		high_addr, sense_high;
+
+			/* Verify that all entries in the IOC INIT match
+			 * existing setup (and in LE format).
+			 */
+			if (sizeof(dma_addr_t) == sizeof(u64)) {
+				high_addr = cpu_to_le32((u32)((u64)ioc->req_frames_dma >> 32));
+				sense_high= cpu_to_le32((u32)((u64)ioc->sense_buf_pool_dma >> 32));
+			} else {
+				high_addr = 0;
+				sense_high= 0;
+			}
+
+			if ((pInit->Flags != 0) || (pInit->MaxDevices != ioc->facts.MaxDevices) ||
+				(pInit->MaxBuses != ioc->facts.MaxBuses) ||
+				(pInit->ReplyFrameSize != cpu_to_le16(ioc->reply_sz)) ||
+				(pInit->HostMfaHighAddr != high_addr) ||
+				(pInit->SenseBufferHighAddr != sense_high)) {
+				printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
+					"IOC_INIT issued with 1 or more incorrect parameters. Rejected.\n",
+					__FILE__, __LINE__);
+				rc = -EFAULT;
+				goto done_free_mem;
+			}
+		}
+		break;
+	default:
+		/*
+		 * MPI_FUNCTION_PORT_ENABLE
+		 * MPI_FUNCTION_TARGET_CMD_BUFFER_POST
+		 * MPI_FUNCTION_TARGET_ASSIST
+		 * MPI_FUNCTION_TARGET_STATUS_SEND
+		 * MPI_FUNCTION_TARGET_MODE_ABORT
+		 * MPI_FUNCTION_IOC_MESSAGE_UNIT_RESET
+		 * MPI_FUNCTION_IO_UNIT_RESET
+		 * MPI_FUNCTION_HANDSHAKE
+		 * MPI_FUNCTION_REPLY_FRAME_REMOVAL
+		 * MPI_FUNCTION_EVENT_NOTIFICATION
+		 *  (driver handles event notification)
+		 * MPI_FUNCTION_EVENT_ACK
+		 */
+
+		/*  What to do with these???  CHECK ME!!!
+			MPI_FUNCTION_FC_LINK_SRVC_BUF_POST
+			MPI_FUNCTION_FC_LINK_SRVC_RSP
+			MPI_FUNCTION_FC_ABORT
+			MPI_FUNCTION_LAN_SEND
+			MPI_FUNCTION_LAN_RECEIVE
+		 	MPI_FUNCTION_LAN_RESET
+		*/
+
+		printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
+			"Illegal request (function 0x%x) \n",
+			__FILE__, __LINE__, hdr->Function);
+		rc = -EFAULT;
+		goto done_free_mem;
+	}
+
+	/* Add the SGL ( at most one data in SGE and one data out SGE )
+	 * In the case of two SGE's - the data out (write) will always
+	 * preceede the data in (read) SGE. psgList is used to free the
+	 * allocated memory.
+	 */
+	psge = (char *) (((int *) mf) + karg.dataSgeOffset);
+	flagsLength = 0;
+
+	/* bufIn and bufOut are used for user to kernel space transfers
+	 */
+	bufIn.kptr = bufOut.kptr = NULL;
+	bufIn.len = bufOut.len = 0;
+
+	if (karg.dataOutSize > 0)
+		sgSize ++;
+
+	if (karg.dataInSize > 0)
+		sgSize ++;
+
+	if (sgSize > 0) {
+
+		/* Set up the dataOut memory allocation */
+		if (karg.dataOutSize > 0) {
+			if (karg.dataInSize > 0) {
+				flagsLength = ( MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+						MPI_SGE_FLAGS_END_OF_BUFFER |
+						MPI_SGE_FLAGS_DIRECTION |
+						mpt_addr_size() )
+						<< MPI_SGE_FLAGS_SHIFT;
+			} else {
+				flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE;
+			}
+			flagsLength |= karg.dataOutSize;
+			bufOut.len = karg.dataOutSize;
+			bufOut.kptr = pci_alloc_consistent(
+					ioc->pcidev, bufOut.len, &dma_addr_out);
+
+			if (bufOut.kptr == NULL) {
+				rc = -ENOMEM;
+				goto done_free_mem;
+			} else {
+				/* Set up this SGE.
+				 * Copy to MF and to sglbuf
+				 */
+				mpt_add_sge(psge, flagsLength, dma_addr_out);
+				psge += (sizeof(u32) + sizeof(dma_addr_t));
+
+				/* Copy user data to kernel space.
+				 */
+				if (copy_from_user(bufOut.kptr,
+						karg.dataOutBufPtr,
+						bufOut.len)) {
+					printk(KERN_ERR
+						"%s@%d::mptctl_do_mpt_command - Unable "
+						"to read user data "
+						"struct @ %p\n",
+						__FILE__, __LINE__,karg.dataOutBufPtr);
+					rc =  -EFAULT;
+					goto done_free_mem;
+				}
+			}
+		}
+
+		if (karg.dataInSize > 0) {
+			flagsLength = MPT_SGE_FLAGS_SSIMPLE_READ;
+			flagsLength |= karg.dataInSize;
+
+			bufIn.len = karg.dataInSize;
+			bufIn.kptr = pci_alloc_consistent(ioc->pcidev,
+					bufIn.len, &dma_addr_in);
+
+			if (bufIn.kptr == NULL) {
+				rc = -ENOMEM;
+				goto done_free_mem;
+			} else {
+				/* Set up this SGE
+				 * Copy to MF and to sglbuf
+				 */
+				mpt_add_sge(psge, flagsLength, dma_addr_in);
+			}
+		}
+	} else  {
+		/* Add a NULL SGE
+		 */
+		mpt_add_sge(psge, flagsLength, (dma_addr_t) -1);
+	}
+
+	ioc->ioctl->wait_done = 0;
+	if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT) {
+
+		DBG_DUMP_TM_REQUEST_FRAME((u32 *)mf);
+
+		if (mpt_send_handshake_request(mptctl_id, ioc,
+			sizeof(SCSITaskMgmt_t), (u32*)mf,
+			CAN_SLEEP) != 0) {
+			dfailprintk((MYIOC_s_ERR_FMT "_send_handshake FAILED!"
+				" (ioc %p, mf %p) \n", ioc->name,
+				ioc, mf));
+			mptctl_free_tm_flags(ioc);
+			rc = -ENODATA;
+			goto done_free_mem;
+		}
+
+	} else
+		mpt_put_msg_frame(mptctl_id, ioc, mf);
+
+	/* Now wait for the command to complete */
+	timeout = (karg.timeout > 0) ? karg.timeout : MPT_IOCTL_DEFAULT_TIMEOUT;
+	timeout = wait_event_interruptible_timeout(mptctl_wait,
+	     ioc->ioctl->wait_done == 1,
+	     HZ*timeout);
+
+	if(timeout <=0 && (ioc->ioctl->wait_done != 1 )) {
+	/* Now we need to reset the board */
+
+		if (hdr->Function == MPI_FUNCTION_SCSI_TASK_MGMT)
+			mptctl_free_tm_flags(ioc);
+
+		mptctl_timeout_expired(ioc->ioctl);
+		rc = -ENODATA;
+		goto done_free_mem;
+	}
+
+	mf = NULL;
+
+	/* If a valid reply frame, copy to the user.
+	 * Offset 2: reply length in U32's
+	 */
+	if (ioc->ioctl->status & MPT_IOCTL_STATUS_RF_VALID) {
+		if (karg.maxReplyBytes < ioc->reply_sz) {
+			 sz = min(karg.maxReplyBytes, 4*ioc->ioctl->ReplyFrame[2]);
+		} else {
+			 sz = min(ioc->reply_sz, 4*ioc->ioctl->ReplyFrame[2]);
+		}
+
+		if (sz > 0) {
+			if (copy_to_user(karg.replyFrameBufPtr,
+				 &ioc->ioctl->ReplyFrame, sz)){
+				 printk(KERN_ERR
+				     "%s@%d::mptctl_do_mpt_command - "
+				 "Unable to write out reply frame %p\n",
+				 __FILE__, __LINE__, karg.replyFrameBufPtr);
+				 rc =  -ENODATA;
+				 goto done_free_mem;
+			}
+		}
+	}
+
+	/* If valid sense data, copy to user.
+	 */
+	if (ioc->ioctl->status & MPT_IOCTL_STATUS_SENSE_VALID) {
+		sz = min(karg.maxSenseBytes, MPT_SENSE_BUFFER_SIZE);
+		if (sz > 0) {
+			if (copy_to_user(karg.senseDataPtr, ioc->ioctl->sense, sz)) {
+				printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
+				"Unable to write sense data to user %p\n",
+				__FILE__, __LINE__,
+				karg.senseDataPtr);
+				rc =  -ENODATA;
+				goto done_free_mem;
+			}
+		}
+	}
+
+	/* If the overall status is _GOOD and data in, copy data
+	 * to user.
+	 */
+	if ((ioc->ioctl->status & MPT_IOCTL_STATUS_COMMAND_GOOD) &&
+				(karg.dataInSize > 0) && (bufIn.kptr)) {
+
+		if (copy_to_user(karg.dataInBufPtr,
+				 bufIn.kptr, karg.dataInSize)) {
+			printk(KERN_ERR "%s@%d::mptctl_do_mpt_command - "
+				"Unable to write data to user %p\n",
+				__FILE__, __LINE__,
+				karg.dataInBufPtr);
+			rc =  -ENODATA;
+		}
+	}
+
+done_free_mem:
+
+	ioc->ioctl->status &= ~(MPT_IOCTL_STATUS_COMMAND_GOOD |
+		MPT_IOCTL_STATUS_SENSE_VALID |
+		MPT_IOCTL_STATUS_RF_VALID );
+
+	/* Free the allocated memory.
+	 */
+	if (bufOut.kptr != NULL) {
+		pci_free_consistent(ioc->pcidev,
+			bufOut.len, (void *) bufOut.kptr, dma_addr_out);
+	}
+
+	if (bufIn.kptr != NULL) {
+		pci_free_consistent(ioc->pcidev,
+			bufIn.len, (void *) bufIn.kptr, dma_addr_in);
+	}
+
+	/* mf is null if command issued successfully
+	 * otherwise, failure occured after mf acquired.
+	 */
+	if (mf)
+		mpt_free_msg_frame(ioc, mf);
+
+	return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* Prototype Routine for the HP HOST INFO command.
+ *
+ * Outputs:	None.
+ * Return:	0 if successful
+ *		-EFAULT if data unavailable
+ *		-EBUSY  if previous command timout and IOC reset is not complete.
+ *		-ENODEV if no such device/adapter
+ *		-ETIME	if timer expires
+ *		-ENOMEM if memory allocation error
+ */
+static int
+mptctl_hp_hostinfo(unsigned long arg, unsigned int data_size)
+{
+	hp_host_info_t	__user *uarg = (void __user *) arg;
+	MPT_ADAPTER		*ioc;
+	struct pci_dev		*pdev;
+	char			*pbuf;
+	dma_addr_t		buf_dma;
+	hp_host_info_t		karg;
+	CONFIGPARMS		cfg;
+	ConfigPageHeader_t	hdr;
+	int			iocnum;
+	int			rc, cim_rev;
+
+	dctlprintk((": mptctl_hp_hostinfo called.\n"));
+	/* Reset long to int. Should affect IA64 and SPARC only
+	 */
+	if (data_size == sizeof(hp_host_info_t))
+		cim_rev = 1;
+	else if (data_size == sizeof(hp_host_info_rev0_t))
+		cim_rev = 0;	/* obsolete */
+	else
+		return -EFAULT;
+
+	if (copy_from_user(&karg, uarg, sizeof(hp_host_info_t))) {
+		printk(KERN_ERR "%s@%d::mptctl_hp_host_info - "
+			"Unable to read in hp_host_info struct @ %p\n",
+				__FILE__, __LINE__, uarg);
+		return -EFAULT;
+	}
+
+	if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+	    (ioc == NULL)) {
+		dctlprintk((KERN_ERR "%s::mptctl_hp_hostinfo() @%d - ioc%d not found!\n",
+				__FILE__, __LINE__, iocnum));
+		return -ENODEV;
+	}
+
+	/* Fill in the data and return the structure to the calling
+	 * program
+	 */
+	pdev = (struct pci_dev *) ioc->pcidev;
+
+	karg.vendor = pdev->vendor;
+	karg.device = pdev->device;
+	karg.subsystem_id = pdev->subsystem_device;
+	karg.subsystem_vendor = pdev->subsystem_vendor;
+	karg.devfn = pdev->devfn;
+	karg.bus = pdev->bus->number;
+
+	/* Save the SCSI host no. if
+	 * SCSI driver loaded
+	 */
+	if (ioc->sh != NULL)
+		karg.host_no = ioc->sh->host_no;
+	else
+		karg.host_no =  -1;
+
+	/* Reformat the fw_version into a string
+	 */
+	karg.fw_version[0] = ioc->facts.FWVersion.Struct.Major >= 10 ?
+		((ioc->facts.FWVersion.Struct.Major / 10) + '0') : '0';
+	karg.fw_version[1] = (ioc->facts.FWVersion.Struct.Major % 10 ) + '0';
+	karg.fw_version[2] = '.';
+	karg.fw_version[3] = ioc->facts.FWVersion.Struct.Minor >= 10 ?
+		((ioc->facts.FWVersion.Struct.Minor / 10) + '0') : '0';
+	karg.fw_version[4] = (ioc->facts.FWVersion.Struct.Minor % 10 ) + '0';
+	karg.fw_version[5] = '.';
+	karg.fw_version[6] = ioc->facts.FWVersion.Struct.Unit >= 10 ?
+		((ioc->facts.FWVersion.Struct.Unit / 10) + '0') : '0';
+	karg.fw_version[7] = (ioc->facts.FWVersion.Struct.Unit % 10 ) + '0';
+	karg.fw_version[8] = '.';
+	karg.fw_version[9] = ioc->facts.FWVersion.Struct.Dev >= 10 ?
+		((ioc->facts.FWVersion.Struct.Dev / 10) + '0') : '0';
+	karg.fw_version[10] = (ioc->facts.FWVersion.Struct.Dev % 10 ) + '0';
+	karg.fw_version[11] = '\0';
+
+	/* Issue a config request to get the device serial number
+	 */
+	hdr.PageVersion = 0;
+	hdr.PageLength = 0;
+	hdr.PageNumber = 0;
+	hdr.PageType = MPI_CONFIG_PAGETYPE_MANUFACTURING;
+	cfg.hdr = &hdr;
+	cfg.physAddr = -1;
+	cfg.pageAddr = 0;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+	cfg.dir = 0;	/* read */
+	cfg.timeout = 10;
+
+	strncpy(karg.serial_number, " ", 24);
+	if (mpt_config(ioc, &cfg) == 0) {
+		if (cfg.hdr->PageLength > 0) {
+			/* Issue the second config page request */
+			cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+
+			pbuf = pci_alloc_consistent(ioc->pcidev, hdr.PageLength * 4, &buf_dma);
+			if (pbuf) {
+				cfg.physAddr = buf_dma;
+				if (mpt_config(ioc, &cfg) == 0) {
+					ManufacturingPage0_t *pdata = (ManufacturingPage0_t *) pbuf;
+					if (strlen(pdata->BoardTracerNumber) > 1) {
+						strncpy(karg.serial_number, 									    pdata->BoardTracerNumber, 24);
+						karg.serial_number[24-1]='\0';
+					}
+				}
+				pci_free_consistent(ioc->pcidev, hdr.PageLength * 4, pbuf, buf_dma);
+				pbuf = NULL;
+			}
+		}
+	}
+	rc = mpt_GetIocState(ioc, 1);
+	switch (rc) {
+	case MPI_IOC_STATE_OPERATIONAL:
+		karg.ioc_status =  HP_STATUS_OK;
+		break;
+
+	case MPI_IOC_STATE_FAULT:
+		karg.ioc_status =  HP_STATUS_FAILED;
+		break;
+
+	case MPI_IOC_STATE_RESET:
+	case MPI_IOC_STATE_READY:
+	default:
+		karg.ioc_status =  HP_STATUS_OTHER;
+		break;
+	}
+
+	karg.base_io_addr = pci_resource_start(pdev, 0);
+
+	if (ioc->bus_type == FC)
+		karg.bus_phys_width = HP_BUS_WIDTH_UNK;
+	else
+		karg.bus_phys_width = HP_BUS_WIDTH_16;
+
+	karg.hard_resets = 0;
+	karg.soft_resets = 0;
+	karg.timeouts = 0;
+	if (ioc->sh != NULL) {
+		MPT_SCSI_HOST *hd =  (MPT_SCSI_HOST *)ioc->sh->hostdata;
+
+		if (hd && (cim_rev == 1)) {
+			karg.hard_resets = hd->hard_resets;
+			karg.soft_resets = hd->soft_resets;
+			karg.timeouts = hd->timeouts;
+		}
+	}
+
+	cfg.pageAddr = 0;
+	cfg.action = MPI_TOOLBOX_ISTWI_READ_WRITE_TOOL;
+	cfg.dir = MPI_TB_ISTWI_FLAGS_READ;
+	cfg.timeout = 10;
+	pbuf = pci_alloc_consistent(ioc->pcidev, 4, &buf_dma);
+	if (pbuf) {
+		cfg.physAddr = buf_dma;
+		if ((mpt_toolbox(ioc, &cfg)) == 0) {
+			karg.rsvd = *(u32 *)pbuf;
+		}
+		pci_free_consistent(ioc->pcidev, 4, pbuf, buf_dma);
+		pbuf = NULL;
+	}
+
+	/* Copy the data from kernel memory to user memory
+	 */
+	if (copy_to_user((char __user *)arg, &karg, sizeof(hp_host_info_t))) {
+		printk(KERN_ERR "%s@%d::mptctl_hpgethostinfo - "
+			"Unable to write out hp_host_info @ %p\n",
+				__FILE__, __LINE__, uarg);
+		return -EFAULT;
+	}
+
+	return 0;
+
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* Prototype Routine for the HP TARGET INFO command.
+ *
+ * Outputs:	None.
+ * Return:	0 if successful
+ *		-EFAULT if data unavailable
+ *		-EBUSY  if previous command timout and IOC reset is not complete.
+ *		-ENODEV if no such device/adapter
+ *		-ETIME	if timer expires
+ *		-ENOMEM if memory allocation error
+ */
+static int
+mptctl_hp_targetinfo(unsigned long arg)
+{
+	hp_target_info_t __user *uarg = (void __user *) arg;
+	SCSIDevicePage0_t	*pg0_alloc;
+	SCSIDevicePage3_t	*pg3_alloc;
+	MPT_ADAPTER		*ioc;
+	MPT_SCSI_HOST 		*hd = NULL;
+	hp_target_info_t	karg;
+	int			iocnum;
+	int			data_sz;
+	dma_addr_t		page_dma;
+	CONFIGPARMS	 	cfg;
+	ConfigPageHeader_t	hdr;
+	int			tmp, np, rc = 0;
+
+	dctlprintk((": mptctl_hp_targetinfo called.\n"));
+	if (copy_from_user(&karg, uarg, sizeof(hp_target_info_t))) {
+		printk(KERN_ERR "%s@%d::mptctl_hp_targetinfo - "
+			"Unable to read in hp_host_targetinfo struct @ %p\n",
+				__FILE__, __LINE__, uarg);
+		return -EFAULT;
+	}
+
+	if (((iocnum = mpt_verify_adapter(karg.hdr.iocnum, &ioc)) < 0) ||
+		(ioc == NULL)) {
+		dctlprintk((KERN_ERR "%s::mptctl_hp_targetinfo() @%d - ioc%d not found!\n",
+				__FILE__, __LINE__, iocnum));
+		return -ENODEV;
+	}
+
+	/*  There is nothing to do for FCP parts.
+	 */
+	if (ioc->bus_type == FC)
+		return 0;
+
+	if ((ioc->spi_data.sdp0length == 0) || (ioc->sh == NULL))
+		return 0;
+
+	if (ioc->sh->host_no != karg.hdr.host)
+		return -ENODEV;
+
+       /* Get the data transfer speeds
+        */
+	data_sz = ioc->spi_data.sdp0length * 4;
+	pg0_alloc = (SCSIDevicePage0_t *) pci_alloc_consistent(ioc->pcidev, data_sz, &page_dma);
+	if (pg0_alloc) {
+		hdr.PageVersion = ioc->spi_data.sdp0version;
+		hdr.PageLength = data_sz;
+		hdr.PageNumber = 0;
+		hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
+
+		cfg.hdr = &hdr;
+		cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+		cfg.dir = 0;
+		cfg.timeout = 0;
+		cfg.physAddr = page_dma;
+
+		cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id;
+
+		if ((rc = mpt_config(ioc, &cfg)) == 0) {
+			np = le32_to_cpu(pg0_alloc->NegotiatedParameters);
+			karg.negotiated_width = np & MPI_SCSIDEVPAGE0_NP_WIDE ?
+					HP_BUS_WIDTH_16 : HP_BUS_WIDTH_8;
+
+			if (np & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) {
+				tmp = (np & MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8;
+				if (tmp < 0x09)
+					karg.negotiated_speed = HP_DEV_SPEED_ULTRA320;
+				else if (tmp <= 0x09)
+					karg.negotiated_speed = HP_DEV_SPEED_ULTRA160;
+				else if (tmp <= 0x0A)
+					karg.negotiated_speed = HP_DEV_SPEED_ULTRA2;
+				else if (tmp <= 0x0C)
+					karg.negotiated_speed = HP_DEV_SPEED_ULTRA;
+				else if (tmp <= 0x25)
+					karg.negotiated_speed = HP_DEV_SPEED_FAST;
+				else
+					karg.negotiated_speed = HP_DEV_SPEED_ASYNC;
+			} else
+				karg.negotiated_speed = HP_DEV_SPEED_ASYNC;
+		}
+
+		pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg0_alloc, page_dma);
+	}
+
+	/* Set defaults
+	 */
+	karg.message_rejects = -1;
+	karg.phase_errors = -1;
+	karg.parity_errors = -1;
+	karg.select_timeouts = -1;
+
+	/* Get the target error parameters
+	 */
+	hdr.PageVersion = 0;
+	hdr.PageLength = 0;
+	hdr.PageNumber = 3;
+	hdr.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
+
+	cfg.hdr = &hdr;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
+	cfg.dir = 0;
+	cfg.timeout = 0;
+	cfg.physAddr = -1;
+	if ((mpt_config(ioc, &cfg) == 0) && (cfg.hdr->PageLength > 0)) {
+		/* Issue the second config page request */
+		cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+		data_sz = (int) cfg.hdr->PageLength * 4;
+		pg3_alloc = (SCSIDevicePage3_t *) pci_alloc_consistent(
+							ioc->pcidev, data_sz, &page_dma);
+		if (pg3_alloc) {
+			cfg.physAddr = page_dma;
+			cfg.pageAddr = (karg.hdr.channel << 8) | karg.hdr.id;
+			if ((rc = mpt_config(ioc, &cfg)) == 0) {
+				karg.message_rejects = (u32) le16_to_cpu(pg3_alloc->MsgRejectCount);
+				karg.phase_errors = (u32) le16_to_cpu(pg3_alloc->PhaseErrorCount);
+				karg.parity_errors = (u32) le16_to_cpu(pg3_alloc->ParityErrorCount);
+			}
+			pci_free_consistent(ioc->pcidev, data_sz, (u8 *) pg3_alloc, page_dma);
+		}
+	}
+	hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
+	if (hd != NULL)
+		karg.select_timeouts = hd->sel_timeout[karg.hdr.id];
+
+	/* Copy the data from kernel memory to user memory
+	 */
+	if (copy_to_user((char __user *)arg, &karg, sizeof(hp_target_info_t))) {
+		printk(KERN_ERR "%s@%d::mptctl_hp_target_info - "
+			"Unable to write out mpt_ioctl_targetinfo struct @ %p\n",
+				__FILE__, __LINE__, uarg);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+static struct file_operations mptctl_fops = {
+	.owner =	THIS_MODULE,
+	.llseek =	no_llseek,
+	.unlocked_ioctl = mptctl_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = compat_mpctl_ioctl,
+#endif
+};
+
+static struct miscdevice mptctl_miscdev = {
+	MPT_MINOR,
+	MYNAM,
+	&mptctl_fops
+};
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#ifdef CONFIG_COMPAT
+
+#include <linux/ioctl32.h>
+
+static int
+compat_mptfwxfer_ioctl(struct file *filp, unsigned int cmd,
+			unsigned long arg)
+{
+	struct mpt_fw_xfer32 kfw32;
+	struct mpt_fw_xfer kfw;
+	MPT_ADAPTER *iocp = NULL;
+	int iocnum, iocnumX;
+	int nonblock = (filp->f_flags & O_NONBLOCK);
+	int ret;
+
+	dctlprintk((KERN_INFO MYNAM "::compat_mptfwxfer_ioctl() called\n"));
+
+	if (copy_from_user(&kfw32, (char __user *)arg, sizeof(kfw32)))
+		return -EFAULT;
+
+	/* Verify intended MPT adapter */
+	iocnumX = kfw32.iocnum & 0xFF;
+	if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
+	    (iocp == NULL)) {
+		dctlprintk((KERN_ERR MYNAM "::compat_mptfwxfer_ioctl @%d - ioc%d not found!\n",
+				__LINE__, iocnumX));
+		return -ENODEV;
+	}
+
+	if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0)
+		return ret;
+
+	kfw.iocnum = iocnum;
+	kfw.fwlen = kfw32.fwlen;
+	kfw.bufp = compat_ptr(kfw32.bufp);
+
+	ret = mptctl_do_fw_download(kfw.iocnum, kfw.bufp, kfw.fwlen);
+
+	up(&iocp->ioctl->sem_ioc);
+
+	return ret;
+}
+
+static int
+compat_mpt_command(struct file *filp, unsigned int cmd,
+			unsigned long arg)
+{
+	struct mpt_ioctl_command32 karg32;
+	struct mpt_ioctl_command32 __user *uarg = (struct mpt_ioctl_command32 __user *) arg;
+	struct mpt_ioctl_command karg;
+	MPT_ADAPTER *iocp = NULL;
+	int iocnum, iocnumX;
+	int nonblock = (filp->f_flags & O_NONBLOCK);
+	int ret;
+
+	dctlprintk((KERN_INFO MYNAM "::compat_mpt_command() called\n"));
+
+	if (copy_from_user(&karg32, (char __user *)arg, sizeof(karg32)))
+		return -EFAULT;
+
+	/* Verify intended MPT adapter */
+	iocnumX = karg32.hdr.iocnum & 0xFF;
+	if (((iocnum = mpt_verify_adapter(iocnumX, &iocp)) < 0) ||
+	    (iocp == NULL)) {
+		dctlprintk((KERN_ERR MYNAM "::compat_mpt_command @%d - ioc%d not found!\n",
+				__LINE__, iocnumX));
+		return -ENODEV;
+	}
+
+	if ((ret = mptctl_syscall_down(iocp, nonblock)) != 0)
+		return ret;
+
+	/* Copy data to karg */
+	karg.hdr.iocnum = karg32.hdr.iocnum;
+	karg.hdr.port = karg32.hdr.port;
+	karg.timeout = karg32.timeout;
+	karg.maxReplyBytes = karg32.maxReplyBytes;
+
+	karg.dataInSize = karg32.dataInSize;
+	karg.dataOutSize = karg32.dataOutSize;
+	karg.maxSenseBytes = karg32.maxSenseBytes;
+	karg.dataSgeOffset = karg32.dataSgeOffset;
+
+	karg.replyFrameBufPtr = (char __user *)(unsigned long)karg32.replyFrameBufPtr;
+	karg.dataInBufPtr = (char __user *)(unsigned long)karg32.dataInBufPtr;
+	karg.dataOutBufPtr = (char __user *)(unsigned long)karg32.dataOutBufPtr;
+	karg.senseDataPtr = (char __user *)(unsigned long)karg32.senseDataPtr;
+
+	/* Pass new structure to do_mpt_command
+	 */
+	ret = mptctl_do_mpt_command (karg, &uarg->MF);
+
+	up(&iocp->ioctl->sem_ioc);
+
+	return ret;
+}
+
+static long compat_mpctl_ioctl(struct file *f, unsigned int cmd, unsigned long arg)
+{
+	long ret;
+	lock_kernel();
+	switch (cmd) {
+	case MPTIOCINFO:
+	case MPTIOCINFO1:
+	case MPTIOCINFO2:
+	case MPTTARGETINFO:
+	case MPTEVENTQUERY:
+	case MPTEVENTENABLE:
+	case MPTEVENTREPORT:
+	case MPTHARDRESET:
+	case HP_GETHOSTINFO:
+	case HP_GETTARGETINFO:
+	case MPTTEST:
+		ret = __mptctl_ioctl(f, cmd, arg);
+		break;
+	case MPTCOMMAND32:
+		ret = compat_mpt_command(f, cmd, arg);
+		break;
+	case MPTFWDOWNLOAD32:
+		ret = compat_mptfwxfer_ioctl(f, cmd, arg);
+		break;
+	default:
+		ret = -ENOIOCTLCMD;
+		break;
+	}
+	unlock_kernel();
+	return ret;
+}
+
+#endif
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptctl_probe - Installs ioctl devices per bus.
+ *	@pdev: Pointer to pci_dev structure
+ *
+ *	Returns 0 for success, non-zero for failure.
+ *
+ */
+
+static int
+mptctl_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	int err;
+	int sz;
+	u8 *mem;
+	MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+
+	/*
+	 * Allocate and inite a MPT_IOCTL structure
+	*/
+	sz = sizeof (MPT_IOCTL);
+	mem = kmalloc(sz, GFP_KERNEL);
+	if (mem == NULL) {
+		err = -ENOMEM;
+		goto out_fail;
+	}
+
+	memset(mem, 0, sz);
+	ioc->ioctl = (MPT_IOCTL *) mem;
+	ioc->ioctl->ioc = ioc;
+	sema_init(&ioc->ioctl->sem_ioc, 1);
+	return 0;
+
+out_fail:
+
+	mptctl_remove(pdev);
+	return err;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptctl_remove - Removed ioctl devices
+ *	@pdev: Pointer to pci_dev structure
+ *
+ *
+ */
+static void
+mptctl_remove(struct pci_dev *pdev)
+{
+	MPT_ADAPTER *ioc = pci_get_drvdata(pdev);
+
+	kfree ( ioc->ioctl );
+}
+
+static struct mpt_pci_driver mptctl_driver = {
+  .probe		= mptctl_probe,
+  .remove		= mptctl_remove,
+};
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int __init mptctl_init(void)
+{
+	int err;
+	int where = 1;
+
+	show_mptmod_ver(my_NAME, my_VERSION);
+
+	if(mpt_device_driver_register(&mptctl_driver,
+	  MPTCTL_DRIVER) != 0 ) {
+		dprintk((KERN_INFO MYNAM
+		": failed to register dd callbacks\n"));
+	}
+
+	/* Register this device */
+	err = misc_register(&mptctl_miscdev);
+	if (err < 0) {
+		printk(KERN_ERR MYNAM ": Can't register misc device [minor=%d].\n", MPT_MINOR);
+		goto out_fail;
+	}
+	printk(KERN_INFO MYNAM ": Registered with Fusion MPT base driver\n");
+	printk(KERN_INFO MYNAM ": /dev/%s @ (major,minor=%d,%d)\n",
+			 mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor);
+
+	/*
+	 *  Install our handler
+	 */
+	++where;
+	if ((mptctl_id = mpt_register(mptctl_reply, MPTCTL_DRIVER)) < 0) {
+		printk(KERN_ERR MYNAM ": ERROR: Failed to register with Fusion MPT base driver\n");
+		misc_deregister(&mptctl_miscdev);
+		err = -EBUSY;
+		goto out_fail;
+	}
+
+	if (mpt_reset_register(mptctl_id, mptctl_ioc_reset) == 0) {
+		dprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
+	} else {
+		/* FIXME! */
+	}
+
+	return 0;
+
+out_fail:
+
+	mpt_device_driver_deregister(MPTCTL_DRIVER);
+
+	return err;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static void mptctl_exit(void)
+{
+	misc_deregister(&mptctl_miscdev);
+	printk(KERN_INFO MYNAM ": Deregistered /dev/%s @ (major,minor=%d,%d)\n",
+			 mptctl_miscdev.name, MISC_MAJOR, mptctl_miscdev.minor);
+
+	/* De-register reset handler from base module */
+	mpt_reset_deregister(mptctl_id);
+	dprintk((KERN_INFO MYNAM ": Deregistered for IOC reset notifications\n"));
+
+	/* De-register callback handler from base module */
+	mpt_deregister(mptctl_id);
+	printk(KERN_INFO MYNAM ": Deregistered from Fusion MPT base driver\n");
+
+        mpt_device_driver_deregister(MPTCTL_DRIVER);
+
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+module_init(mptctl_init);
+module_exit(mptctl_exit);
diff --git a/drivers/message/fusion/mptctl.h b/drivers/message/fusion/mptctl.h
new file mode 100644
index 0000000..cc4ecf0
--- /dev/null
+++ b/drivers/message/fusion/mptctl.h
@@ -0,0 +1,484 @@
+/*
+ *  linux/drivers/message/fusion/mptioctl.h
+ *      Fusion MPT misc device (ioctl) driver.
+ *      For use with PCI chip/adapter(s):
+ *          LSIFC9xx/LSI409xx Fibre Channel
+ *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
+ *
+ *  Credits:
+ *      This driver would not exist if not for Alan Cox's development
+ *      of the linux i2o driver.
+ *
+ *      A huge debt of gratitude is owed to David S. Miller (DaveM)
+ *      for fixing much of the stupid and broken stuff in the early
+ *      driver while porting to sparc64 platform.  THANK YOU!
+ *
+ *      (see also mptbase.c)
+ *
+ *  Copyright (c) 1999-2004 LSI Logic Corporation
+ *  Originally By: Steven J. Ralston
+ *  (mailto:sjralston1@netscape.net)
+ *  (mailto:mpt_linux_developer@lsil.com)
+ *
+ *  $Id: mptctl.h,v 1.13 2002/12/03 21:26:33 pdelaney Exp $
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; version 2 of the License.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    NO WARRANTY
+    THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+    CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+    LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+    solely responsible for determining the appropriateness of using and
+    distributing the Program and assumes all risks associated with its
+    exercise of rights under this Agreement, including but not limited to
+    the risks and costs of program errors, damage to or loss of data,
+    programs or equipment, and unavailability or interruption of operations.
+
+    DISCLAIMER OF LIABILITY
+    NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+    ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+    TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+    USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+    HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+
+#ifndef MPTCTL_H_INCLUDED
+#define MPTCTL_H_INCLUDED
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "linux/version.h"
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *
+ */
+#define MPT_MISCDEV_BASENAME            "mptctl"
+#define MPT_MISCDEV_PATHNAME            "/dev/" MPT_MISCDEV_BASENAME
+
+#define MPT_PRODUCT_LENGTH              12
+
+/*
+ *  Generic MPT Control IOCTLs and structures
+ */
+#define MPT_MAGIC_NUMBER	'm'
+
+#define MPTRWPERF		_IOWR(MPT_MAGIC_NUMBER,0,struct mpt_raw_r_w)
+
+#define MPTFWDOWNLOAD		_IOWR(MPT_MAGIC_NUMBER,15,struct mpt_fw_xfer)
+#define MPTCOMMAND		_IOWR(MPT_MAGIC_NUMBER,20,struct mpt_ioctl_command)
+
+#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
+#define MPTFWDOWNLOAD32		_IOWR(MPT_MAGIC_NUMBER,15,struct mpt_fw_xfer32)
+#define MPTCOMMAND32		_IOWR(MPT_MAGIC_NUMBER,20,struct mpt_ioctl_command32)
+#endif
+
+#define MPTIOCINFO		_IOWR(MPT_MAGIC_NUMBER,17,struct mpt_ioctl_iocinfo)
+#define MPTIOCINFO1		_IOWR(MPT_MAGIC_NUMBER,17,struct mpt_ioctl_iocinfo_rev0)
+#define MPTIOCINFO2		_IOWR(MPT_MAGIC_NUMBER,17,struct mpt_ioctl_iocinfo_rev1)
+#define MPTTARGETINFO		_IOWR(MPT_MAGIC_NUMBER,18,struct mpt_ioctl_targetinfo)
+#define MPTTEST			_IOWR(MPT_MAGIC_NUMBER,19,struct mpt_ioctl_test)
+#define MPTEVENTQUERY		_IOWR(MPT_MAGIC_NUMBER,21,struct mpt_ioctl_eventquery)
+#define MPTEVENTENABLE		_IOWR(MPT_MAGIC_NUMBER,22,struct mpt_ioctl_eventenable)
+#define MPTEVENTREPORT		_IOWR(MPT_MAGIC_NUMBER,23,struct mpt_ioctl_eventreport)
+#define MPTHARDRESET		_IOWR(MPT_MAGIC_NUMBER,24,struct mpt_ioctl_diag_reset)
+#define MPTFWREPLACE		_IOWR(MPT_MAGIC_NUMBER,25,struct mpt_ioctl_replace_fw)
+
+/*
+ * SPARC PLATFORM REMARKS:
+ * IOCTL data structures that contain pointers
+ * will have different sizes in the driver and applications
+ * (as the app. will not use 8-byte pointers).
+ * Apps should use MPTFWDOWNLOAD and MPTCOMMAND.
+ * The driver will convert data from
+ * mpt_fw_xfer32 (mpt_ioctl_command32) to mpt_fw_xfer (mpt_ioctl_command)
+ * internally.
+ *
+ * If data structures change size, must handle as in IOCGETINFO.
+ */
+struct mpt_fw_xfer {
+	unsigned int	 iocnum;	/* IOC unit number */
+	unsigned int	 fwlen;
+	void		__user *bufp;	/* Pointer to firmware buffer */
+};
+
+#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
+struct mpt_fw_xfer32 {
+	unsigned int iocnum;
+	unsigned int fwlen;
+	u32 bufp;
+};
+#endif	/*}*/
+
+/*
+ *  IOCTL header structure.
+ *  iocnum - must be defined.
+ *  port - must be defined for all IOCTL commands other than MPTIOCINFO
+ *  maxDataSize - ignored on MPTCOMMAND commands
+ *		- ignored on MPTFWREPLACE commands
+ *		- on query commands, reports the maximum number of bytes to be returned
+ *		  to the host driver (count includes the header).
+ *		  That is, set to sizeof(struct mpt_ioctl_iocinfo) for fixed sized commands.
+ *		  Set to sizeof(struct mpt_ioctl_targetinfo) + datasize for variable
+ *			sized commands. (MPTTARGETINFO, MPTEVENTREPORT)
+ */
+typedef struct _mpt_ioctl_header {
+	unsigned int	 iocnum;	/* IOC unit number */
+	unsigned int	 port;		/* IOC port number */
+	int		 maxDataSize;	/* Maximum Num. bytes to transfer on read */
+} mpt_ioctl_header;
+
+/*
+ * Issue a diagnostic reset
+ */
+struct mpt_ioctl_diag_reset {
+	mpt_ioctl_header hdr;
+};
+
+
+/*
+ *  PCI bus/device/function information structure.
+ */
+struct mpt_ioctl_pci_info {
+	union {
+		struct {
+			unsigned int  deviceNumber   :  5;
+			unsigned int  functionNumber :  3;
+			unsigned int  busNumber      : 24;
+		} bits;
+		unsigned int  asUlong;
+	} u;
+};
+
+struct mpt_ioctl_pci_info2 {
+	union {
+		struct {
+			unsigned int  deviceNumber   :  5;
+			unsigned int  functionNumber :  3;
+			unsigned int  busNumber      : 24;
+		} bits;
+		unsigned int  asUlong;
+	} u;
+  int segmentID;
+};
+
+/*
+ *  Adapter Information Page
+ *  Read only.
+ *  Data starts at offset 0xC
+ */
+#define MPT_IOCTL_INTERFACE_FC		(0x01)
+#define MPT_IOCTL_INTERFACE_SCSI	(0x00)
+#define MPT_IOCTL_VERSION_LENGTH	(32)
+
+struct mpt_ioctl_iocinfo {
+	mpt_ioctl_header hdr;
+	int		 adapterType;	/* SCSI or FCP */
+	int		 port;		/* port number */
+	int		 pciId;		/* PCI Id. */
+	int		 hwRev;		/* hardware revision */
+	int		 subSystemDevice;	/* PCI subsystem Device ID */
+	int		 subSystemVendor;	/* PCI subsystem Vendor ID */
+	int		 numDevices;		/* number of devices */
+	int		 FWVersion;		/* FW Version (integer) */
+	int		 BIOSVersion;		/* BIOS Version (integer) */
+	char		 driverVersion[MPT_IOCTL_VERSION_LENGTH];	/* Driver Version (string) */
+	char		 busChangeEvent;
+	char		 hostId;
+	char		 rsvd[2];
+	struct mpt_ioctl_pci_info2  pciInfo; /* Added Rev 2 */
+};
+
+struct mpt_ioctl_iocinfo_rev1 {
+	mpt_ioctl_header hdr;
+	int		 adapterType;	/* SCSI or FCP */
+	int		 port;		/* port number */
+	int		 pciId;		/* PCI Id. */
+	int		 hwRev;		/* hardware revision */
+	int		 subSystemDevice;	/* PCI subsystem Device ID */
+	int		 subSystemVendor;	/* PCI subsystem Vendor ID */
+	int		 numDevices;		/* number of devices */
+	int		 FWVersion;		/* FW Version (integer) */
+	int		 BIOSVersion;		/* BIOS Version (integer) */
+	char		 driverVersion[MPT_IOCTL_VERSION_LENGTH];	/* Driver Version (string) */
+	char		 busChangeEvent;
+	char		 hostId;
+	char		 rsvd[2];
+	struct mpt_ioctl_pci_info  pciInfo; /* Added Rev 1 */
+};
+
+/* Original structure, must always accept these
+ * IOCTLs. 4 byte pads can occur based on arch with
+ * above structure. Wish to re-align, but cannot.
+ */
+struct mpt_ioctl_iocinfo_rev0 {
+	mpt_ioctl_header hdr;
+	int		 adapterType;	/* SCSI or FCP */
+	int		 port;		/* port number */
+	int		 pciId;		/* PCI Id. */
+	int		 hwRev;		/* hardware revision */
+	int		 subSystemDevice;	/* PCI subsystem Device ID */
+	int		 subSystemVendor;	/* PCI subsystem Vendor ID */
+	int		 numDevices;		/* number of devices */
+	int		 FWVersion;		/* FW Version (integer) */
+	int		 BIOSVersion;		/* BIOS Version (integer) */
+	char		 driverVersion[MPT_IOCTL_VERSION_LENGTH];	/* Driver Version (string) */
+	char		 busChangeEvent;
+	char		 hostId;
+	char		 rsvd[2];
+};
+
+/*
+ * Device Information Page
+ * Report the number of, and ids of, all targets
+ * on this IOC.  The ids array is a packed structure
+ * of the known targetInfo.
+ * bits 31-24: reserved
+ *      23-16: LUN
+ *      15- 8: Bus Number
+ *       7- 0: Target ID
+ */
+struct mpt_ioctl_targetinfo {
+	mpt_ioctl_header hdr;
+	int		 numDevices;	/* Num targets on this ioc */
+	int		 targetInfo[1];
+};
+
+
+/*
+ * Event reporting IOCTL's.  These IOCTL's will
+ * use the following defines:
+ */
+struct mpt_ioctl_eventquery {
+	mpt_ioctl_header hdr;
+	unsigned short	 eventEntries;
+	unsigned short	 reserved;
+	unsigned int	 eventTypes;
+};
+
+struct mpt_ioctl_eventenable {
+	mpt_ioctl_header hdr;
+	unsigned int	 eventTypes;
+};
+
+#ifndef __KERNEL__
+typedef struct {
+	uint	event;
+	uint	eventContext;
+	uint	data[2];
+} MPT_IOCTL_EVENTS;
+#endif
+
+struct mpt_ioctl_eventreport {
+	mpt_ioctl_header	hdr;
+	MPT_IOCTL_EVENTS	eventData[1];
+};
+
+#define MPT_MAX_NAME	32
+struct mpt_ioctl_test {
+	mpt_ioctl_header hdr;
+	u8		 name[MPT_MAX_NAME];
+	int		 chip_type;
+	u8		 product [MPT_PRODUCT_LENGTH];
+};
+
+/* Replace the FW image cached in host driver memory
+ * newImageSize - image size in bytes
+ * newImage - first byte of the new image
+ */
+typedef struct mpt_ioctl_replace_fw {
+	mpt_ioctl_header hdr;
+	int		 newImageSize;
+	u8		 newImage[1];
+} mpt_ioctl_replace_fw_t;
+
+/* General MPT Pass through data strucutre
+ *
+ * iocnum
+ * timeout - in seconds, command timeout. If 0, set by driver to
+ *		default value.
+ * replyFrameBufPtr - reply location
+ * dataInBufPtr - destination for read
+ * dataOutBufPtr - data source for write
+ * senseDataPtr - sense data location
+ * maxReplyBytes - maximum number of reply bytes to be sent to app.
+ * dataInSize - num bytes for data transfer in (read)
+ * dataOutSize - num bytes for data transfer out (write)
+ * dataSgeOffset - offset in words from the start of the request message
+ *		to the first SGL
+ * MF[1];
+ *
+ * Remark:  Some config pages have bi-directional transfer,
+ * both a read and a write. The basic structure allows for
+ * a bidirectional set up. Normal messages will have one or
+ * both of these buffers NULL.
+ */
+struct mpt_ioctl_command {
+	mpt_ioctl_header hdr;
+	int		timeout;	/* optional (seconds) */
+	char		__user *replyFrameBufPtr;
+	char		__user *dataInBufPtr;
+	char		__user *dataOutBufPtr;
+	char		__user *senseDataPtr;
+	int		maxReplyBytes;
+	int		dataInSize;
+	int		dataOutSize;
+	int		maxSenseBytes;
+	int		dataSgeOffset;
+	char		MF[1];
+};
+
+/*
+ * SPARC PLATFORM: See earlier remark.
+ */
+#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
+struct mpt_ioctl_command32 {
+	mpt_ioctl_header hdr;
+	int	timeout;
+	u32	replyFrameBufPtr;
+	u32	dataInBufPtr;
+	u32	dataOutBufPtr;
+	u32	senseDataPtr;
+	int	maxReplyBytes;
+	int	dataInSize;
+	int	dataOutSize;
+	int	maxSenseBytes;
+	int	dataSgeOffset;
+	char	MF[1];
+};
+#endif	/*}*/
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	HP Specific IOCTL Defines and Structures
+ */
+
+#define CPQFCTS_IOC_MAGIC 'Z'
+#define HP_IOC_MAGIC 'Z'
+#define HP_GETHOSTINFO		_IOR(HP_IOC_MAGIC, 20, hp_host_info_t)
+#define HP_GETHOSTINFO1		_IOR(HP_IOC_MAGIC, 20, hp_host_info_rev0_t)
+#define HP_GETTARGETINFO	_IOR(HP_IOC_MAGIC, 21, hp_target_info_t)
+
+/* All HP IOCTLs must include this header
+ */
+typedef struct _hp_header {
+	unsigned int iocnum;
+	unsigned int host;
+	unsigned int channel;
+	unsigned int id;
+	unsigned int lun;
+} hp_header_t;
+
+/*
+ *  Header:
+ *  iocnum 	required (input)
+ *  host 	ignored
+ *  channe	ignored
+ *  id		ignored
+ *  lun		ignored
+ */
+typedef struct _hp_host_info {
+	hp_header_t	 hdr;
+	u16		 vendor;
+	u16		 device;
+	u16		 subsystem_vendor;
+	u16		 subsystem_id;
+	u8		 devfn;
+	u8		 bus;
+	ushort		 host_no;		/* SCSI Host number, if scsi driver not loaded*/
+	u8		 fw_version[16];	/* string */
+	u8		 serial_number[24];	/* string */
+	u32		 ioc_status;
+	u32		 bus_phys_width;
+	u32		 base_io_addr;
+	u32		 rsvd;
+	unsigned int	 hard_resets;		/* driver initiated resets */
+	unsigned int	 soft_resets;		/* ioc, external resets */
+	unsigned int	 timeouts;		/* num timeouts */
+} hp_host_info_t;
+
+/* replace ulongs with uints, need to preserve backwards
+ * compatibility.
+ */
+typedef struct _hp_host_info_rev0 {
+	hp_header_t	 hdr;
+	u16		 vendor;
+	u16		 device;
+	u16		 subsystem_vendor;
+	u16		 subsystem_id;
+	u8		 devfn;
+	u8		 bus;
+	ushort		 host_no;		/* SCSI Host number, if scsi driver not loaded*/
+	u8		 fw_version[16];	/* string */
+	u8		 serial_number[24];	/* string */
+	u32		 ioc_status;
+	u32		 bus_phys_width;
+	u32		 base_io_addr;
+	u32		 rsvd;
+	unsigned long	 hard_resets;		/* driver initiated resets */
+	unsigned long	 soft_resets;		/* ioc, external resets */
+	unsigned long	 timeouts;		/* num timeouts */
+} hp_host_info_rev0_t;
+
+/*
+ *  Header:
+ *  iocnum 	required (input)
+ *  host 	required
+ *  channel	required	(bus number)
+ *  id		required
+ *  lun		ignored
+ *
+ *  All error values between 0 and 0xFFFF in size.
+ */
+typedef struct _hp_target_info {
+	hp_header_t	 hdr;
+	u32 parity_errors;
+	u32 phase_errors;
+	u32 select_timeouts;
+	u32 message_rejects;
+	u32 negotiated_speed;
+	u8  negotiated_width;
+	u8  rsvd[7];				/* 8 byte alignment */
+} hp_target_info_t;
+
+#define HP_STATUS_OTHER		1
+#define HP_STATUS_OK		2
+#define HP_STATUS_FAILED	3
+
+#define HP_BUS_WIDTH_UNK	1
+#define HP_BUS_WIDTH_8		2
+#define HP_BUS_WIDTH_16		3
+#define HP_BUS_WIDTH_32		4
+
+#define HP_DEV_SPEED_ASYNC	2
+#define HP_DEV_SPEED_FAST	3
+#define HP_DEV_SPEED_ULTRA	4
+#define HP_DEV_SPEED_ULTRA2	5
+#define HP_DEV_SPEED_ULTRA160	6
+#define HP_DEV_SPEED_SCSI1	7
+#define HP_DEV_SPEED_ULTRA320	8
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#endif
+
diff --git a/drivers/message/fusion/mptlan.c b/drivers/message/fusion/mptlan.c
new file mode 100644
index 0000000..ef2713b
--- /dev/null
+++ b/drivers/message/fusion/mptlan.c
@@ -0,0 +1,1688 @@
+/*
+ *  linux/drivers/message/fusion/mptlan.c
+ *      IP Over Fibre Channel device driver.
+ *      For use with PCI chip/adapter(s):
+ *          LSIFC9xx/LSI409xx Fibre Channel
+ *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
+ *
+ *  Credits:
+ *      This driver would not exist if not for Alan Cox's development
+ *      of the linux i2o driver.
+ *
+ *      Special thanks goes to the I2O LAN driver people at the
+ *      University of Helsinki, who, unbeknownst to them, provided
+ *      the inspiration and initial structure for this driver.
+ *
+ *      A huge debt of gratitude is owed to David S. Miller (DaveM)
+ *      for fixing much of the stupid and broken stuff in the early
+ *      driver while porting to sparc64 platform.  THANK YOU!
+ *
+ *      A really huge debt of gratitude is owed to Eddie C. Dost
+ *      for gobs of hard work fixing and optimizing LAN code.
+ *      THANK YOU!
+ *
+ *      (see also mptbase.c)
+ *
+ *  Copyright (c) 2000-2004 LSI Logic Corporation
+ *  Originally By: Noah Romer
+ *  (mailto:mpt_linux_developer@lsil.com)
+ *
+ *  $Id: mptlan.c,v 1.53 2002/10/17 20:15:58 pdelaney Exp $
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; version 2 of the License.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    NO WARRANTY
+    THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+    CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+    LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+    solely responsible for determining the appropriateness of using and
+    distributing the Program and assumes all risks associated with its
+    exercise of rights under this Agreement, including but not limited to
+    the risks and costs of program errors, damage to or loss of data,
+    programs or equipment, and unavailability or interruption of operations.
+
+    DISCLAIMER OF LIABILITY
+    NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+    ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+    TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+    USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+    HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Define statements used for debugging
+ */
+//#define MPT_LAN_IO_DEBUG
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "mptlan.h"
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+
+#define MYNAM		"mptlan"
+
+MODULE_LICENSE("GPL");
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * MPT LAN message sizes without variable part.
+ */
+#define MPT_LAN_RECEIVE_POST_REQUEST_SIZE \
+	(sizeof(LANReceivePostRequest_t) - sizeof(SGE_MPI_UNION))
+
+#define MPT_LAN_TRANSACTION32_SIZE \
+	(sizeof(SGETransaction32_t) - sizeof(u32))
+
+/*
+ *  Fusion MPT LAN private structures
+ */
+
+struct NAA_Hosed {
+	u16 NAA;
+	u8 ieee[FC_ALEN];
+	struct NAA_Hosed *next;
+};
+
+struct BufferControl {
+	struct sk_buff	*skb;
+	dma_addr_t	dma;
+	unsigned int	len;
+};
+
+struct mpt_lan_priv {
+	MPT_ADAPTER *mpt_dev;
+	u8 pnum; /* Port number in the IOC. This is not a Unix network port! */
+
+	atomic_t buckets_out;		/* number of unused buckets on IOC */
+	int bucketthresh;		/* Send more when this many left */
+
+	int *mpt_txfidx; /* Free Tx Context list */
+	int mpt_txfidx_tail;
+	spinlock_t txfidx_lock;
+
+	int *mpt_rxfidx; /* Free Rx Context list */
+	int mpt_rxfidx_tail;
+	spinlock_t rxfidx_lock;
+
+	struct BufferControl *RcvCtl;	/* Receive BufferControl structs */
+	struct BufferControl *SendCtl;	/* Send BufferControl structs */
+
+	int max_buckets_out;		/* Max buckets to send to IOC */
+	int tx_max_out;			/* IOC's Tx queue len */
+
+	u32 total_posted;
+	u32 total_received;
+	struct net_device_stats stats;	/* Per device statistics */
+
+	struct work_struct post_buckets_task;
+	unsigned long post_buckets_active;
+};
+
+struct mpt_lan_ohdr {
+	u16	dtype;
+	u8	daddr[FC_ALEN];
+	u16	stype;
+	u8	saddr[FC_ALEN];
+};
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+/*
+ *  Forward protos...
+ */
+static int  lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf,
+		       MPT_FRAME_HDR *reply);
+static int  mpt_lan_open(struct net_device *dev);
+static int  mpt_lan_reset(struct net_device *dev);
+static int  mpt_lan_close(struct net_device *dev);
+static void mpt_lan_post_receive_buckets(void *dev_id);
+static void mpt_lan_wake_post_buckets_task(struct net_device *dev,
+					   int priority);
+static int  mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg);
+static int  mpt_lan_receive_post_reply(struct net_device *dev,
+				       LANReceivePostReply_t *pRecvRep);
+static int  mpt_lan_send_turbo(struct net_device *dev, u32 tmsg);
+static int  mpt_lan_send_reply(struct net_device *dev,
+			       LANSendReply_t *pSendRep);
+static int  mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase);
+static int  mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
+static unsigned short mpt_lan_type_trans(struct sk_buff *skb,
+					 struct net_device *dev);
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  Fusion MPT LAN private data
+ */
+static int LanCtx = -1;
+
+static u32 max_buckets_out = 127;
+static u32 tx_max_out_p = 127 - 16;
+
+#ifdef QLOGIC_NAA_WORKAROUND
+static struct NAA_Hosed *mpt_bad_naa = NULL;
+DEFINE_RWLOCK(bad_naa_lock);
+#endif
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * Fusion MPT LAN external data
+ */
+extern int mpt_lan_index;
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	lan_reply - Handle all data sent from the hardware.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@mf: Pointer to original MPT request frame (NULL if TurboReply)
+ *	@reply: Pointer to MPT reply frame
+ *
+ *	Returns 1 indicating original alloc'd request frame ptr
+ *	should be freed, or 0 if it shouldn't.
+ */
+static int
+lan_reply (MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *reply)
+{
+	struct net_device *dev = ioc->netdev;
+	int FreeReqFrame = 0;
+
+	dioprintk((KERN_INFO MYNAM ": %s/%s: Got reply.\n",
+		  IOC_AND_NETDEV_NAMES_s_s(dev)));
+
+//	dioprintk((KERN_INFO MYNAM "@lan_reply: mf = %p, reply = %p\n",
+//			mf, reply));
+
+	if (mf == NULL) {
+		u32 tmsg = CAST_PTR_TO_U32(reply);
+
+		dioprintk((KERN_INFO MYNAM ": %s/%s: @lan_reply, tmsg %08x\n",
+				IOC_AND_NETDEV_NAMES_s_s(dev),
+				tmsg));
+
+		switch (GET_LAN_FORM(tmsg)) {
+
+		// NOTE!  (Optimization) First case here is now caught in
+		//  mptbase.c::mpt_interrupt() routine and callcack here
+		//  is now skipped for this case!  20001218 -sralston
+#if 0
+		case LAN_REPLY_FORM_MESSAGE_CONTEXT:
+//			dioprintk((KERN_INFO MYNAM "/lan_reply: "
+//				  "MessageContext turbo reply received\n"));
+			FreeReqFrame = 1;
+			break;
+#endif
+
+		case LAN_REPLY_FORM_SEND_SINGLE:
+//			dioprintk((MYNAM "/lan_reply: "
+//				  "calling mpt_lan_send_reply (turbo)\n"));
+
+			// Potential BUG here?  -sralston
+			//	FreeReqFrame = mpt_lan_send_turbo(dev, tmsg);
+			//  If/when mpt_lan_send_turbo would return 1 here,
+			//  calling routine (mptbase.c|mpt_interrupt)
+			//  would Oops because mf has already been set
+			//  to NULL.  So after return from this func,
+			//  mpt_interrupt() will attempt to put (NULL) mf ptr
+			//  item back onto its adapter FreeQ - Oops!:-(
+			//  It's Ok, since mpt_lan_send_turbo() *currently*
+			//  always returns 0, but..., just in case:
+
+			(void) mpt_lan_send_turbo(dev, tmsg);
+			FreeReqFrame = 0;
+
+			break;
+
+		case LAN_REPLY_FORM_RECEIVE_SINGLE:
+//			dioprintk((KERN_INFO MYNAM "@lan_reply: "
+//				  "rcv-Turbo = %08x\n", tmsg));
+			mpt_lan_receive_post_turbo(dev, tmsg);
+			break;
+
+		default:
+			printk (KERN_ERR MYNAM "/lan_reply: Got a turbo reply "
+				"that I don't know what to do with\n");
+
+			/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
+
+			break;
+		}
+
+		return FreeReqFrame;
+	}
+
+//	msg = (u32 *) reply;
+//	dioprintk((KERN_INFO MYNAM "@lan_reply: msg = %08x %08x %08x %08x\n",
+//		  le32_to_cpu(msg[0]), le32_to_cpu(msg[1]),
+//		  le32_to_cpu(msg[2]), le32_to_cpu(msg[3])));
+//	dioprintk((KERN_INFO MYNAM "@lan_reply: Function = %02xh\n",
+//		  reply->u.hdr.Function));
+
+	switch (reply->u.hdr.Function) {
+
+	case MPI_FUNCTION_LAN_SEND:
+	{
+		LANSendReply_t *pSendRep;
+
+		pSendRep = (LANSendReply_t *) reply;
+		FreeReqFrame = mpt_lan_send_reply(dev, pSendRep);
+		break;
+	}
+
+	case MPI_FUNCTION_LAN_RECEIVE:
+	{
+		LANReceivePostReply_t *pRecvRep;
+
+		pRecvRep = (LANReceivePostReply_t *) reply;
+		if (pRecvRep->NumberOfContexts) {
+			mpt_lan_receive_post_reply(dev, pRecvRep);
+			if (!(pRecvRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
+				FreeReqFrame = 1;
+		} else
+			dioprintk((KERN_INFO MYNAM "@lan_reply: zero context "
+				  "ReceivePostReply received.\n"));
+		break;
+	}
+
+	case MPI_FUNCTION_LAN_RESET:
+		/* Just a default reply. Might want to check it to
+		 * make sure that everything went ok.
+		 */
+		FreeReqFrame = 1;
+		break;
+
+	case MPI_FUNCTION_EVENT_NOTIFICATION:
+	case MPI_FUNCTION_EVENT_ACK:
+		/* UPDATE!  20010120 -sralston
+		 *  _EVENT_NOTIFICATION should NOT come down this path any more.
+		 *  Should be routed to mpt_lan_event_process(), but just in case...
+		 */
+		FreeReqFrame = 1;
+		break;
+
+	default:
+		printk (KERN_ERR MYNAM "/lan_reply: Got a non-turbo "
+			"reply that I don't know what to do with\n");
+
+		/* CHECKME!  Hmmm...  FreeReqFrame is 0 here; is that right? */
+		FreeReqFrame = 1;
+
+		break;
+	}
+
+	return FreeReqFrame;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
+{
+	struct net_device *dev = ioc->netdev;
+	struct mpt_lan_priv *priv = netdev_priv(dev);
+
+	dlprintk((KERN_INFO MYNAM ": IOC %s_reset routed to LAN driver!\n",
+			reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
+			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
+
+	if (priv->mpt_rxfidx == NULL)
+		return (1);
+
+	if (reset_phase == MPT_IOC_SETUP_RESET) {
+		;
+	} else if (reset_phase == MPT_IOC_PRE_RESET) {
+		int i;
+		unsigned long flags;
+
+		netif_stop_queue(dev);
+
+		dlprintk ((KERN_INFO "mptlan/ioc_reset: called netif_stop_queue for %s.\n", dev->name));
+
+		atomic_set(&priv->buckets_out, 0);
+
+		/* Reset Rx Free Tail index and re-populate the queue. */
+		spin_lock_irqsave(&priv->rxfidx_lock, flags);
+		priv->mpt_rxfidx_tail = -1;
+		for (i = 0; i < priv->max_buckets_out; i++)
+			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
+		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
+	} else {
+		mpt_lan_post_receive_buckets(dev);
+		netif_wake_queue(dev);
+	}
+
+	return 1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
+{
+	dlprintk((KERN_INFO MYNAM ": MPT event routed to LAN driver!\n"));
+
+	switch (le32_to_cpu(pEvReply->Event)) {
+	case MPI_EVENT_NONE:				/* 00 */
+	case MPI_EVENT_LOG_DATA:			/* 01 */
+	case MPI_EVENT_STATE_CHANGE:			/* 02 */
+	case MPI_EVENT_UNIT_ATTENTION:			/* 03 */
+	case MPI_EVENT_IOC_BUS_RESET:			/* 04 */
+	case MPI_EVENT_EXT_BUS_RESET:			/* 05 */
+	case MPI_EVENT_RESCAN:				/* 06 */
+		/* Ok, do we need to do anything here? As far as
+		   I can tell, this is when a new device gets added
+		   to the loop. */
+	case MPI_EVENT_LINK_STATUS_CHANGE:		/* 07 */
+	case MPI_EVENT_LOOP_STATE_CHANGE:		/* 08 */
+	case MPI_EVENT_LOGOUT:				/* 09 */
+	case MPI_EVENT_EVENT_CHANGE:			/* 0A */
+	default:
+		break;
+	}
+
+	/*
+	 *  NOTE: pEvent->AckRequired handling now done in mptbase.c;
+	 *  Do NOT do it here now!
+	 */
+
+	return 1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_open(struct net_device *dev)
+{
+	struct mpt_lan_priv *priv = netdev_priv(dev);
+	int i;
+
+	if (mpt_lan_reset(dev) != 0) {
+		MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+
+		printk (KERN_WARNING MYNAM "/lan_open: lan_reset failed.");
+
+		if (mpt_dev->active)
+			printk ("The ioc is active. Perhaps it needs to be"
+				" reset?\n");
+		else
+			printk ("The ioc in inactive, most likely in the "
+				"process of being reset. Please try again in "
+				"a moment.\n");
+	}
+
+	priv->mpt_txfidx = kmalloc(priv->tx_max_out * sizeof(int), GFP_KERNEL);
+	if (priv->mpt_txfidx == NULL)
+		goto out;
+	priv->mpt_txfidx_tail = -1;
+
+	priv->SendCtl = kmalloc(priv->tx_max_out * sizeof(struct BufferControl),
+				GFP_KERNEL);
+	if (priv->SendCtl == NULL)
+		goto out_mpt_txfidx;
+	for (i = 0; i < priv->tx_max_out; i++) {
+		memset(&priv->SendCtl[i], 0, sizeof(struct BufferControl));
+		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = i;
+	}
+
+	dlprintk((KERN_INFO MYNAM "@lo: Finished initializing SendCtl\n"));
+
+	priv->mpt_rxfidx = kmalloc(priv->max_buckets_out * sizeof(int),
+				   GFP_KERNEL);
+	if (priv->mpt_rxfidx == NULL)
+		goto out_SendCtl;
+	priv->mpt_rxfidx_tail = -1;
+
+	priv->RcvCtl = kmalloc(priv->max_buckets_out *
+						sizeof(struct BufferControl),
+			       GFP_KERNEL);
+	if (priv->RcvCtl == NULL)
+		goto out_mpt_rxfidx;
+	for (i = 0; i < priv->max_buckets_out; i++) {
+		memset(&priv->RcvCtl[i], 0, sizeof(struct BufferControl));
+		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = i;
+	}
+
+/**/	dlprintk((KERN_INFO MYNAM "/lo: txfidx contains - "));
+/**/	for (i = 0; i < priv->tx_max_out; i++)
+/**/		dlprintk((" %xh", priv->mpt_txfidx[i]));
+/**/	dlprintk(("\n"));
+
+	dlprintk((KERN_INFO MYNAM "/lo: Finished initializing RcvCtl\n"));
+
+	mpt_lan_post_receive_buckets(dev);
+	printk(KERN_INFO MYNAM ": %s/%s: interface up & active\n",
+			IOC_AND_NETDEV_NAMES_s_s(dev));
+
+	if (mpt_event_register(LanCtx, mpt_lan_event_process) != 0) {
+		printk (KERN_WARNING MYNAM "/lo: Unable to register for Event"
+			" Notifications. This is a bad thing! We're not going "
+			"to go ahead, but I'd be leery of system stability at "
+			"this point.\n");
+	}
+
+	netif_start_queue(dev);
+	dlprintk((KERN_INFO MYNAM "/lo: Done.\n"));
+
+	return 0;
+out_mpt_rxfidx:
+	kfree(priv->mpt_rxfidx);
+	priv->mpt_rxfidx = NULL;
+out_SendCtl:
+	kfree(priv->SendCtl);
+	priv->SendCtl = NULL;
+out_mpt_txfidx:
+	kfree(priv->mpt_txfidx);
+	priv->mpt_txfidx = NULL;
+out:	return -ENOMEM;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* Send a LanReset message to the FW. This should result in the FW returning
+   any buckets it still has. */
+static int
+mpt_lan_reset(struct net_device *dev)
+{
+	MPT_FRAME_HDR *mf;
+	LANResetRequest_t *pResetReq;
+	struct mpt_lan_priv *priv = netdev_priv(dev);
+
+	mf = mpt_get_msg_frame(LanCtx, priv->mpt_dev);
+
+	if (mf == NULL) {
+/*		dlprintk((KERN_ERR MYNAM "/reset: Evil funkiness abounds! "
+		"Unable to allocate a request frame.\n"));
+*/
+		return -1;
+	}
+
+	pResetReq = (LANResetRequest_t *) mf;
+
+	pResetReq->Function	= MPI_FUNCTION_LAN_RESET;
+	pResetReq->ChainOffset	= 0;
+	pResetReq->Reserved	= 0;
+	pResetReq->PortNumber	= priv->pnum;
+	pResetReq->MsgFlags	= 0;
+	pResetReq->Reserved2	= 0;
+
+	mpt_put_msg_frame(LanCtx, priv->mpt_dev, mf);
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_close(struct net_device *dev)
+{
+	struct mpt_lan_priv *priv = netdev_priv(dev);
+	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+	unsigned int timeout;
+	int i;
+
+	dlprintk((KERN_INFO MYNAM ": mpt_lan_close called\n"));
+
+	mpt_event_deregister(LanCtx);
+
+	dlprintk((KERN_INFO MYNAM ":lan_close: Posted %d buckets "
+		  "since driver was loaded, %d still out\n",
+		  priv->total_posted,atomic_read(&priv->buckets_out)));
+
+	netif_stop_queue(dev);
+
+	mpt_lan_reset(dev);
+
+	timeout = 2 * HZ;
+	while (atomic_read(&priv->buckets_out) && --timeout) {
+		set_current_state(TASK_INTERRUPTIBLE);
+		schedule_timeout(1);
+	}
+
+	for (i = 0; i < priv->max_buckets_out; i++) {
+		if (priv->RcvCtl[i].skb != NULL) {
+/**/			dlprintk((KERN_INFO MYNAM "/lan_close: bucket %05x "
+/**/				  "is still out\n", i));
+			pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[i].dma,
+					 priv->RcvCtl[i].len,
+					 PCI_DMA_FROMDEVICE);
+			dev_kfree_skb(priv->RcvCtl[i].skb);
+		}
+	}
+
+	kfree (priv->RcvCtl);
+	kfree (priv->mpt_rxfidx);
+
+	for (i = 0; i < priv->tx_max_out; i++) {
+		if (priv->SendCtl[i].skb != NULL) {
+			pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[i].dma,
+					 priv->SendCtl[i].len,
+					 PCI_DMA_TODEVICE);
+			dev_kfree_skb(priv->SendCtl[i].skb);
+		}
+	}
+
+	kfree(priv->SendCtl);
+	kfree(priv->mpt_txfidx);
+
+	atomic_set(&priv->buckets_out, 0);
+
+	printk(KERN_INFO MYNAM ": %s/%s: interface down & inactive\n",
+			IOC_AND_NETDEV_NAMES_s_s(dev));
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static struct net_device_stats *
+mpt_lan_get_stats(struct net_device *dev)
+{
+	struct mpt_lan_priv *priv = netdev_priv(dev);
+
+	return (struct net_device_stats *) &priv->stats;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if ((new_mtu < MPT_LAN_MIN_MTU) || (new_mtu > MPT_LAN_MAX_MTU))
+		return -EINVAL;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* Tx timeout handler. */
+static void
+mpt_lan_tx_timeout(struct net_device *dev)
+{
+	struct mpt_lan_priv *priv = netdev_priv(dev);
+	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+
+	if (mpt_dev->active) {
+		dlprintk (("mptlan/tx_timeout: calling netif_wake_queue for %s.\n", dev->name));
+		netif_wake_queue(dev);
+	}
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+//static inline int
+static int
+mpt_lan_send_turbo(struct net_device *dev, u32 tmsg)
+{
+	struct mpt_lan_priv *priv = netdev_priv(dev);
+	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+	struct sk_buff *sent;
+	unsigned long flags;
+	u32 ctx;
+
+	ctx = GET_LAN_BUFFER_CONTEXT(tmsg);
+	sent = priv->SendCtl[ctx].skb;
+
+	priv->stats.tx_packets++;
+	priv->stats.tx_bytes += sent->len;
+
+	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
+			IOC_AND_NETDEV_NAMES_s_s(dev),
+			__FUNCTION__, sent));
+
+	priv->SendCtl[ctx].skb = NULL;
+	pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
+			 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
+	dev_kfree_skb_irq(sent);
+
+	spin_lock_irqsave(&priv->txfidx_lock, flags);
+	priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
+	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
+
+	netif_wake_queue(dev);
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_send_reply(struct net_device *dev, LANSendReply_t *pSendRep)
+{
+	struct mpt_lan_priv *priv = netdev_priv(dev);
+	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+	struct sk_buff *sent;
+	unsigned long flags;
+	int FreeReqFrame = 0;
+	u32 *pContext;
+	u32 ctx;
+	u8 count;
+
+	count = pSendRep->NumberOfContexts;
+
+	dioprintk((KERN_INFO MYNAM ": send_reply: IOCStatus: %04x\n",
+		 le16_to_cpu(pSendRep->IOCStatus)));
+
+	/* Add check for Loginfo Flag in IOCStatus */
+
+	switch (le16_to_cpu(pSendRep->IOCStatus) & MPI_IOCSTATUS_MASK) {
+	case MPI_IOCSTATUS_SUCCESS:
+		priv->stats.tx_packets += count;
+		break;
+
+	case MPI_IOCSTATUS_LAN_CANCELED:
+	case MPI_IOCSTATUS_LAN_TRANSMIT_ABORTED:
+		break;
+
+	case MPI_IOCSTATUS_INVALID_SGL:
+		priv->stats.tx_errors += count;
+		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Invalid SGL sent to IOC!\n",
+				IOC_AND_NETDEV_NAMES_s_s(dev));
+		goto out;
+
+	default:
+		priv->stats.tx_errors += count;
+		break;
+	}
+
+	pContext = &pSendRep->BufferContext;
+
+	spin_lock_irqsave(&priv->txfidx_lock, flags);
+	while (count > 0) {
+		ctx = GET_LAN_BUFFER_CONTEXT(le32_to_cpu(*pContext));
+
+		sent = priv->SendCtl[ctx].skb;
+		priv->stats.tx_bytes += sent->len;
+
+		dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, skb %p sent.\n",
+				IOC_AND_NETDEV_NAMES_s_s(dev),
+				__FUNCTION__, sent));
+
+		priv->SendCtl[ctx].skb = NULL;
+		pci_unmap_single(mpt_dev->pcidev, priv->SendCtl[ctx].dma,
+				 priv->SendCtl[ctx].len, PCI_DMA_TODEVICE);
+		dev_kfree_skb_irq(sent);
+
+		priv->mpt_txfidx[++priv->mpt_txfidx_tail] = ctx;
+
+		pContext++;
+		count--;
+	}
+	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
+
+out:
+	if (!(pSendRep->MsgFlags & MPI_MSGFLAGS_CONTINUATION_REPLY))
+		FreeReqFrame = 1;
+
+	netif_wake_queue(dev);
+	return FreeReqFrame;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_sdu_send (struct sk_buff *skb, struct net_device *dev)
+{
+	struct mpt_lan_priv *priv = netdev_priv(dev);
+	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+	MPT_FRAME_HDR *mf;
+	LANSendRequest_t *pSendReq;
+	SGETransaction32_t *pTrans;
+	SGESimple64_t *pSimple;
+	dma_addr_t dma;
+	unsigned long flags;
+	int ctx;
+	u16 cur_naa = 0x1000;
+
+	dioprintk((KERN_INFO MYNAM ": %s called, skb_addr = %p\n",
+			__FUNCTION__, skb));
+
+	spin_lock_irqsave(&priv->txfidx_lock, flags);
+	if (priv->mpt_txfidx_tail < 0) {
+		netif_stop_queue(dev);
+		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
+
+		printk (KERN_ERR "%s: no tx context available: %u\n",
+			__FUNCTION__, priv->mpt_txfidx_tail);
+		return 1;
+	}
+
+	mf = mpt_get_msg_frame(LanCtx, mpt_dev);
+	if (mf == NULL) {
+		netif_stop_queue(dev);
+		spin_unlock_irqrestore(&priv->txfidx_lock, flags);
+
+		printk (KERN_ERR "%s: Unable to alloc request frame\n",
+			__FUNCTION__);
+		return 1;
+	}
+
+	ctx = priv->mpt_txfidx[priv->mpt_txfidx_tail--];
+	spin_unlock_irqrestore(&priv->txfidx_lock, flags);
+
+//	dioprintk((KERN_INFO MYNAM ": %s/%s: Creating new msg frame (send).\n",
+//			IOC_AND_NETDEV_NAMES_s_s(dev)));
+
+	pSendReq = (LANSendRequest_t *) mf;
+
+	/* Set the mac.raw pointer, since this apparently isn't getting
+	 * done before we get the skb. Pull the data pointer past the mac data.
+	 */
+	skb->mac.raw = skb->data;
+	skb_pull(skb, 12);
+
+        dma = pci_map_single(mpt_dev->pcidev, skb->data, skb->len,
+			     PCI_DMA_TODEVICE);
+
+	priv->SendCtl[ctx].skb = skb;
+	priv->SendCtl[ctx].dma = dma;
+	priv->SendCtl[ctx].len = skb->len;
+
+	/* Message Header */
+	pSendReq->Reserved    = 0;
+	pSendReq->Function    = MPI_FUNCTION_LAN_SEND;
+	pSendReq->ChainOffset = 0;
+	pSendReq->Reserved2   = 0;
+	pSendReq->MsgFlags    = 0;
+	pSendReq->PortNumber  = priv->pnum;
+
+	/* Transaction Context Element */
+	pTrans = (SGETransaction32_t *) pSendReq->SG_List;
+
+	/* No Flags, 8 bytes of Details, 32bit Context (bloody turbo replies) */
+	pTrans->ContextSize   = sizeof(u32);
+	pTrans->DetailsLength = 2 * sizeof(u32);
+	pTrans->Flags         = 0;
+	pTrans->TransactionContext[0] = cpu_to_le32(ctx);
+
+//	dioprintk((KERN_INFO MYNAM ": %s/%s: BC = %08x, skb = %p, buff = %p\n",
+//			IOC_AND_NETDEV_NAMES_s_s(dev),
+//			ctx, skb, skb->data));
+
+#ifdef QLOGIC_NAA_WORKAROUND
+{
+	struct NAA_Hosed *nh;
+
+	/* Munge the NAA for Tx packets to QLogic boards, which don't follow
+	   RFC 2625. The longer I look at this, the more my opinion of Qlogic
+	   drops. */
+	read_lock_irq(&bad_naa_lock);
+	for (nh = mpt_bad_naa; nh != NULL; nh=nh->next) {
+		if ((nh->ieee[0] == skb->mac.raw[0]) &&
+		    (nh->ieee[1] == skb->mac.raw[1]) &&
+		    (nh->ieee[2] == skb->mac.raw[2]) &&
+		    (nh->ieee[3] == skb->mac.raw[3]) &&
+		    (nh->ieee[4] == skb->mac.raw[4]) &&
+		    (nh->ieee[5] == skb->mac.raw[5])) {
+			cur_naa = nh->NAA;
+			dlprintk ((KERN_INFO "mptlan/sdu_send: using NAA value "
+				  "= %04x.\n", cur_naa));
+			break;
+		}
+	}
+	read_unlock_irq(&bad_naa_lock);
+}
+#endif
+
+	pTrans->TransactionDetails[0] = cpu_to_le32((cur_naa         << 16) |
+						    (skb->mac.raw[0] <<  8) |
+						    (skb->mac.raw[1] <<  0));
+	pTrans->TransactionDetails[1] = cpu_to_le32((skb->mac.raw[2] << 24) |
+						    (skb->mac.raw[3] << 16) |
+						    (skb->mac.raw[4] <<  8) |
+						    (skb->mac.raw[5] <<  0));
+
+	pSimple = (SGESimple64_t *) &pTrans->TransactionDetails[2];
+
+	/* If we ever decide to send more than one Simple SGE per LANSend, then
+	   we will need to make sure that LAST_ELEMENT only gets set on the
+	   last one. Otherwise, bad voodoo and evil funkiness will commence. */
+	pSimple->FlagsLength = cpu_to_le32(
+			((MPI_SGE_FLAGS_LAST_ELEMENT |
+			  MPI_SGE_FLAGS_END_OF_BUFFER |
+			  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+			  MPI_SGE_FLAGS_SYSTEM_ADDRESS |
+			  MPI_SGE_FLAGS_HOST_TO_IOC |
+			  MPI_SGE_FLAGS_64_BIT_ADDRESSING |
+			  MPI_SGE_FLAGS_END_OF_LIST) << MPI_SGE_FLAGS_SHIFT) |
+			skb->len);
+	pSimple->Address.Low = cpu_to_le32((u32) dma);
+	if (sizeof(dma_addr_t) > sizeof(u32))
+		pSimple->Address.High = cpu_to_le32((u32) ((u64) dma >> 32));
+	else
+		pSimple->Address.High = 0;
+
+	mpt_put_msg_frame (LanCtx, mpt_dev, mf);
+	dev->trans_start = jiffies;
+
+	dioprintk((KERN_INFO MYNAM ": %s/%s: Sending packet. FlagsLength = %08x.\n",
+			IOC_AND_NETDEV_NAMES_s_s(dev),
+			le32_to_cpu(pSimple->FlagsLength)));
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static inline void
+mpt_lan_wake_post_buckets_task(struct net_device *dev, int priority)
+/*
+ * @priority: 0 = put it on the timer queue, 1 = put it on the immediate queue
+ */
+{
+	struct mpt_lan_priv *priv = dev->priv;
+	
+	if (test_and_set_bit(0, &priv->post_buckets_active) == 0) {
+		if (priority) {
+			schedule_work(&priv->post_buckets_task);
+		} else {
+			schedule_delayed_work(&priv->post_buckets_task, 1);
+			dioprintk((KERN_INFO MYNAM ": post_buckets queued on "
+				   "timer.\n"));
+		}
+	        dioprintk((KERN_INFO MYNAM ": %s/%s: Queued post_buckets task.\n",
+			   IOC_AND_NETDEV_NAMES_s_s(dev) ));
+	}
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static inline int
+mpt_lan_receive_skb(struct net_device *dev, struct sk_buff *skb)
+{
+	struct mpt_lan_priv *priv = dev->priv;
+
+	skb->protocol = mpt_lan_type_trans(skb, dev);
+
+	dioprintk((KERN_INFO MYNAM ": %s/%s: Incoming packet (%d bytes) "
+		 "delivered to upper level.\n",
+			IOC_AND_NETDEV_NAMES_s_s(dev), skb->len));
+
+	priv->stats.rx_bytes += skb->len;
+	priv->stats.rx_packets++;
+
+	skb->dev = dev;
+	netif_rx(skb);
+
+	dioprintk((MYNAM "/receive_skb: %d buckets remaining\n",
+		 atomic_read(&priv->buckets_out)));
+
+	if (atomic_read(&priv->buckets_out) < priv->bucketthresh)
+		mpt_lan_wake_post_buckets_task(dev, 1);
+
+	dioprintk((KERN_INFO MYNAM "/receive_post_reply: %d buckets "
+		  "remaining, %d received back since sod\n",
+		  atomic_read(&priv->buckets_out), priv->total_received));
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+//static inline int
+static int
+mpt_lan_receive_post_turbo(struct net_device *dev, u32 tmsg)
+{
+	struct mpt_lan_priv *priv = dev->priv;
+	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+	struct sk_buff *skb, *old_skb;
+	unsigned long flags;
+	u32 ctx, len;
+
+	ctx = GET_LAN_BUCKET_CONTEXT(tmsg);
+	skb = priv->RcvCtl[ctx].skb;
+
+	len = GET_LAN_PACKET_LENGTH(tmsg);
+
+	if (len < MPT_LAN_RX_COPYBREAK) {
+		old_skb = skb;
+
+		skb = (struct sk_buff *)dev_alloc_skb(len);
+		if (!skb) {
+			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
+					IOC_AND_NETDEV_NAMES_s_s(dev),
+					__FILE__, __LINE__);
+			return -ENOMEM;
+		}
+
+		pci_dma_sync_single_for_cpu(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
+					    priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+
+		memcpy(skb_put(skb, len), old_skb->data, len);
+
+		pci_dma_sync_single_for_device(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
+					       priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+		goto out;
+	}
+
+	skb_put(skb, len);
+
+	priv->RcvCtl[ctx].skb = NULL;
+
+	pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
+			 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+
+out:
+	spin_lock_irqsave(&priv->rxfidx_lock, flags);
+	priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
+	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
+
+	atomic_dec(&priv->buckets_out);
+	priv->total_received++;
+
+	return mpt_lan_receive_skb(dev, skb);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_receive_post_free(struct net_device *dev,
+			  LANReceivePostReply_t *pRecvRep)
+{
+	struct mpt_lan_priv *priv = dev->priv;
+	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+	unsigned long flags;
+	struct sk_buff *skb;
+	u32 ctx;
+	int count;
+	int i;
+
+	count = pRecvRep->NumberOfContexts;
+
+/**/	dlprintk((KERN_INFO MYNAM "/receive_post_reply: "
+		  "IOC returned %d buckets, freeing them...\n", count));
+
+	spin_lock_irqsave(&priv->rxfidx_lock, flags);
+	for (i = 0; i < count; i++) {
+		ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
+
+		skb = priv->RcvCtl[ctx].skb;
+
+//		dlprintk((KERN_INFO MYNAM ": %s: dev_name = %s\n",
+//				IOC_AND_NETDEV_NAMES_s_s(dev)));
+//		dlprintk((KERN_INFO MYNAM "@rpr[2], priv = %p, buckets_out addr = %p",
+//				priv, &(priv->buckets_out)));
+//		dlprintk((KERN_INFO MYNAM "@rpr[2] TC + 3\n"));
+
+		priv->RcvCtl[ctx].skb = NULL;
+		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
+				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+		dev_kfree_skb_any(skb);
+
+		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
+	}
+	spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
+
+	atomic_sub(count, &priv->buckets_out);
+
+//	for (i = 0; i < priv->max_buckets_out; i++)
+//		if (priv->RcvCtl[i].skb != NULL)
+//			dlprintk((KERN_INFO MYNAM "@rpr: bucket %03x "
+//				  "is still out\n", i));
+
+/*	dlprintk((KERN_INFO MYNAM "/receive_post_reply: freed %d buckets\n",
+		  count));
+*/
+/**/	dlprintk((KERN_INFO MYNAM "@receive_post_reply: %d buckets "
+/**/		  "remaining, %d received back since sod.\n",
+/**/		  atomic_read(&priv->buckets_out), priv->total_received));
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mpt_lan_receive_post_reply(struct net_device *dev,
+			   LANReceivePostReply_t *pRecvRep)
+{
+	struct mpt_lan_priv *priv = dev->priv;
+	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+	struct sk_buff *skb, *old_skb;
+	unsigned long flags;
+	u32 len, ctx, offset;
+	u32 remaining = le32_to_cpu(pRecvRep->BucketsRemaining);
+	int count;
+	int i, l;
+
+	dioprintk((KERN_INFO MYNAM ": mpt_lan_receive_post_reply called\n"));
+	dioprintk((KERN_INFO MYNAM ": receive_post_reply: IOCStatus: %04x\n",
+		 le16_to_cpu(pRecvRep->IOCStatus)));
+
+	if ((le16_to_cpu(pRecvRep->IOCStatus) & MPI_IOCSTATUS_MASK) ==
+						MPI_IOCSTATUS_LAN_CANCELED)
+		return mpt_lan_receive_post_free(dev, pRecvRep);
+
+	len = le32_to_cpu(pRecvRep->PacketLength);
+	if (len == 0) {
+		printk (KERN_ERR MYNAM ": %s/%s: ERROR - Got a non-TURBO "
+			"ReceivePostReply w/ PacketLength zero!\n",
+				IOC_AND_NETDEV_NAMES_s_s(dev));
+		printk (KERN_ERR MYNAM ": MsgFlags = %02x, IOCStatus = %04x\n",
+				pRecvRep->MsgFlags, le16_to_cpu(pRecvRep->IOCStatus));
+		return -1;
+	}
+
+	ctx    = le32_to_cpu(pRecvRep->BucketContext[0]);
+	count  = pRecvRep->NumberOfContexts;
+	skb    = priv->RcvCtl[ctx].skb;
+
+	offset = le32_to_cpu(pRecvRep->PacketOffset);
+//	if (offset != 0) {
+//		printk (KERN_INFO MYNAM ": %s/%s: Got a ReceivePostReply "
+//			"w/ PacketOffset %u\n",
+//				IOC_AND_NETDEV_NAMES_s_s(dev),
+//				offset);
+//	}
+
+	dioprintk((KERN_INFO MYNAM ": %s/%s: @rpr, offset = %d, len = %d\n",
+			IOC_AND_NETDEV_NAMES_s_s(dev),
+			offset, len));
+
+	if (count > 1) {
+		int szrem = len;
+
+//		dioprintk((KERN_INFO MYNAM ": %s/%s: Multiple buckets returned "
+//			"for single packet, concatenating...\n",
+//				IOC_AND_NETDEV_NAMES_s_s(dev)));
+
+		skb = (struct sk_buff *)dev_alloc_skb(len);
+		if (!skb) {
+			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
+					IOC_AND_NETDEV_NAMES_s_s(dev),
+					__FILE__, __LINE__);
+			return -ENOMEM;
+		}
+
+		spin_lock_irqsave(&priv->rxfidx_lock, flags);
+		for (i = 0; i < count; i++) {
+
+			ctx = le32_to_cpu(pRecvRep->BucketContext[i]);
+			old_skb = priv->RcvCtl[ctx].skb;
+
+			l = priv->RcvCtl[ctx].len;
+			if (szrem < l)
+				l = szrem;
+
+//			dioprintk((KERN_INFO MYNAM ": %s/%s: Buckets = %d, len = %u\n",
+//					IOC_AND_NETDEV_NAMES_s_s(dev),
+//					i, l));
+
+			pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
+						    priv->RcvCtl[ctx].dma,
+						    priv->RcvCtl[ctx].len,
+						    PCI_DMA_FROMDEVICE);
+			memcpy(skb_put(skb, l), old_skb->data, l);
+
+			pci_dma_sync_single_for_device(mpt_dev->pcidev,
+						       priv->RcvCtl[ctx].dma,
+						       priv->RcvCtl[ctx].len,
+						       PCI_DMA_FROMDEVICE);
+
+			priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
+			szrem -= l;
+		}
+		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
+
+	} else if (len < MPT_LAN_RX_COPYBREAK) {
+
+		old_skb = skb;
+
+		skb = (struct sk_buff *)dev_alloc_skb(len);
+		if (!skb) {
+			printk (KERN_ERR MYNAM ": %s/%s: ERROR - Can't allocate skb! (%s@%d)\n",
+					IOC_AND_NETDEV_NAMES_s_s(dev),
+					__FILE__, __LINE__);
+			return -ENOMEM;
+		}
+
+		pci_dma_sync_single_for_cpu(mpt_dev->pcidev,
+					    priv->RcvCtl[ctx].dma,
+					    priv->RcvCtl[ctx].len,
+					    PCI_DMA_FROMDEVICE);
+
+		memcpy(skb_put(skb, len), old_skb->data, len);
+
+		pci_dma_sync_single_for_device(mpt_dev->pcidev,
+					       priv->RcvCtl[ctx].dma,
+					       priv->RcvCtl[ctx].len,
+					       PCI_DMA_FROMDEVICE);
+
+		spin_lock_irqsave(&priv->rxfidx_lock, flags);
+		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
+		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
+
+	} else {
+		spin_lock_irqsave(&priv->rxfidx_lock, flags);
+
+		priv->RcvCtl[ctx].skb = NULL;
+
+		pci_unmap_single(mpt_dev->pcidev, priv->RcvCtl[ctx].dma,
+				 priv->RcvCtl[ctx].len, PCI_DMA_FROMDEVICE);
+		priv->RcvCtl[ctx].dma = 0;
+
+		priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
+		spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
+
+		skb_put(skb,len);
+	}
+
+	atomic_sub(count, &priv->buckets_out);
+	priv->total_received += count;
+
+	if (priv->mpt_rxfidx_tail >= MPT_LAN_MAX_BUCKETS_OUT) {
+		printk (KERN_ERR MYNAM ": %s/%s: Yoohoo! mpt_rxfidx_tail = %d, "
+			"MPT_LAN_MAX_BUCKETS_OUT = %d\n",
+				IOC_AND_NETDEV_NAMES_s_s(dev),
+				priv->mpt_rxfidx_tail,
+				MPT_LAN_MAX_BUCKETS_OUT);
+
+		panic("Damn it Jim! I'm a doctor, not a programmer! "
+				"Oh, wait a sec, I am a programmer. "
+				"And, who's Jim?!?!\n"
+				"Arrgghh! We've done it again!\n");
+	}
+
+	if (remaining == 0)
+		printk (KERN_WARNING MYNAM ": %s/%s: WARNING - IOC out of buckets! "
+			"(priv->buckets_out = %d)\n",
+			IOC_AND_NETDEV_NAMES_s_s(dev),
+			atomic_read(&priv->buckets_out));
+	else if (remaining < 10)
+		printk (KERN_INFO MYNAM ": %s/%s: IOC says %d buckets left. "
+			"(priv->buckets_out = %d)\n",
+			IOC_AND_NETDEV_NAMES_s_s(dev),
+			remaining, atomic_read(&priv->buckets_out));
+	
+	if ((remaining < priv->bucketthresh) &&
+	    ((atomic_read(&priv->buckets_out) - remaining) >
+	     MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH)) {
+		
+		printk (KERN_WARNING MYNAM " Mismatch between driver's "
+			"buckets_out count and fw's BucketsRemaining "
+			"count has crossed the threshold, issuing a "
+			"LanReset to clear the fw's hashtable. You may "
+			"want to check your /var/log/messages for \"CRC "
+			"error\" event notifications.\n");
+		
+		mpt_lan_reset(dev);
+		mpt_lan_wake_post_buckets_task(dev, 0);
+	}
+	
+	return mpt_lan_receive_skb(dev, skb);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* Simple SGE's only at the moment */
+
+static void
+mpt_lan_post_receive_buckets(void *dev_id)
+{
+	struct net_device *dev = dev_id;
+	struct mpt_lan_priv *priv = dev->priv;
+	MPT_ADAPTER *mpt_dev = priv->mpt_dev;
+	MPT_FRAME_HDR *mf;
+	LANReceivePostRequest_t *pRecvReq;
+	SGETransaction32_t *pTrans;
+	SGESimple64_t *pSimple;
+	struct sk_buff *skb;
+	dma_addr_t dma;
+	u32 curr, buckets, count, max;
+	u32 len = (dev->mtu + dev->hard_header_len + 4);
+	unsigned long flags;
+	int i;
+
+	curr = atomic_read(&priv->buckets_out);
+	buckets = (priv->max_buckets_out - curr);
+
+	dioprintk((KERN_INFO MYNAM ": %s/%s: @%s, Start_buckets = %u, buckets_out = %u\n",
+			IOC_AND_NETDEV_NAMES_s_s(dev),
+			__FUNCTION__, buckets, curr));
+
+	max = (mpt_dev->req_sz - MPT_LAN_RECEIVE_POST_REQUEST_SIZE) /
+			(MPT_LAN_TRANSACTION32_SIZE + sizeof(SGESimple64_t));
+
+	while (buckets) {
+		mf = mpt_get_msg_frame(LanCtx, mpt_dev);
+		if (mf == NULL) {
+			printk (KERN_ERR "%s: Unable to alloc request frame\n",
+				__FUNCTION__);
+			dioprintk((KERN_ERR "%s: %u buckets remaining\n",
+				 __FUNCTION__, buckets));
+			goto out;
+		}
+		pRecvReq = (LANReceivePostRequest_t *) mf;
+
+		count = buckets;
+		if (count > max)
+			count = max;
+
+		pRecvReq->Function    = MPI_FUNCTION_LAN_RECEIVE;
+		pRecvReq->ChainOffset = 0;
+		pRecvReq->MsgFlags    = 0;
+		pRecvReq->PortNumber  = priv->pnum;
+
+		pTrans = (SGETransaction32_t *) pRecvReq->SG_List;
+		pSimple = NULL;
+
+		for (i = 0; i < count; i++) {
+			int ctx;
+
+			spin_lock_irqsave(&priv->rxfidx_lock, flags);
+			if (priv->mpt_rxfidx_tail < 0) {
+				printk (KERN_ERR "%s: Can't alloc context\n",
+					__FUNCTION__);
+				spin_unlock_irqrestore(&priv->rxfidx_lock,
+						       flags);
+				break;
+			}
+
+			ctx = priv->mpt_rxfidx[priv->mpt_rxfidx_tail--];
+
+			skb = priv->RcvCtl[ctx].skb;
+			if (skb && (priv->RcvCtl[ctx].len != len)) {
+				pci_unmap_single(mpt_dev->pcidev,
+						 priv->RcvCtl[ctx].dma,
+						 priv->RcvCtl[ctx].len,
+						 PCI_DMA_FROMDEVICE);
+				dev_kfree_skb(priv->RcvCtl[ctx].skb);
+				skb = priv->RcvCtl[ctx].skb = NULL;
+			}
+
+			if (skb == NULL) {
+				skb = dev_alloc_skb(len);
+				if (skb == NULL) {
+					printk (KERN_WARNING
+						MYNAM "/%s: Can't alloc skb\n",
+						__FUNCTION__);
+					priv->mpt_rxfidx[++priv->mpt_rxfidx_tail] = ctx;
+					spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
+					break;
+				}
+
+				dma = pci_map_single(mpt_dev->pcidev, skb->data,
+						     len, PCI_DMA_FROMDEVICE);
+
+				priv->RcvCtl[ctx].skb = skb;
+				priv->RcvCtl[ctx].dma = dma;
+				priv->RcvCtl[ctx].len = len;
+			}
+
+			spin_unlock_irqrestore(&priv->rxfidx_lock, flags);
+
+			pTrans->ContextSize   = sizeof(u32);
+			pTrans->DetailsLength = 0;
+			pTrans->Flags         = 0;
+			pTrans->TransactionContext[0] = cpu_to_le32(ctx);
+
+			pSimple = (SGESimple64_t *) pTrans->TransactionDetails;
+
+			pSimple->FlagsLength = cpu_to_le32(
+				((MPI_SGE_FLAGS_END_OF_BUFFER |
+				  MPI_SGE_FLAGS_SIMPLE_ELEMENT |
+				  MPI_SGE_FLAGS_64_BIT_ADDRESSING) << MPI_SGE_FLAGS_SHIFT) | len);
+			pSimple->Address.Low = cpu_to_le32((u32) priv->RcvCtl[ctx].dma);
+			if (sizeof(dma_addr_t) > sizeof(u32))
+				pSimple->Address.High = cpu_to_le32((u32) ((u64) priv->RcvCtl[ctx].dma >> 32));
+			else
+				pSimple->Address.High = 0;
+
+			pTrans = (SGETransaction32_t *) (pSimple + 1);
+		}
+
+		if (pSimple == NULL) {
+/**/			printk (KERN_WARNING MYNAM "/%s: No buckets posted\n",
+/**/				__FUNCTION__);
+			mpt_free_msg_frame(mpt_dev, mf);
+			goto out;
+		}
+
+		pSimple->FlagsLength |= cpu_to_le32(MPI_SGE_FLAGS_END_OF_LIST << MPI_SGE_FLAGS_SHIFT);
+
+		pRecvReq->BucketCount = cpu_to_le32(i);
+
+/*	printk(KERN_INFO MYNAM ": posting buckets\n   ");
+ *	for (i = 0; i < j + 2; i ++)
+ *	    printk (" %08x", le32_to_cpu(msg[i]));
+ *	printk ("\n");
+ */
+
+		mpt_put_msg_frame(LanCtx, mpt_dev, mf);
+
+		priv->total_posted += i;
+		buckets -= i;
+		atomic_add(i, &priv->buckets_out);
+	}
+
+out:
+	dioprintk((KERN_INFO MYNAM "/%s: End_buckets = %u, priv->buckets_out = %u\n",
+		  __FUNCTION__, buckets, atomic_read(&priv->buckets_out)));
+	dioprintk((KERN_INFO MYNAM "/%s: Posted %u buckets and received %u back\n",
+	__FUNCTION__, priv->total_posted, priv->total_received));
+
+	clear_bit(0, &priv->post_buckets_active);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static struct net_device *
+mpt_register_lan_device (MPT_ADAPTER *mpt_dev, int pnum)
+{
+	struct net_device *dev = alloc_fcdev(sizeof(struct mpt_lan_priv));
+	struct mpt_lan_priv *priv = NULL;
+	u8 HWaddr[FC_ALEN], *a;
+
+	if (!dev)
+		return NULL;
+
+	dev->mtu = MPT_LAN_MTU;
+
+	priv = netdev_priv(dev);
+
+	priv->mpt_dev = mpt_dev;
+	priv->pnum = pnum;
+
+	memset(&priv->post_buckets_task, 0, sizeof(struct work_struct));
+	INIT_WORK(&priv->post_buckets_task, mpt_lan_post_receive_buckets, dev);
+	priv->post_buckets_active = 0;
+
+	dlprintk((KERN_INFO MYNAM "@%d: bucketlen = %d\n",
+			__LINE__, dev->mtu + dev->hard_header_len + 4));
+
+	atomic_set(&priv->buckets_out, 0);
+	priv->total_posted = 0;
+	priv->total_received = 0;
+	priv->max_buckets_out = max_buckets_out;
+	if (mpt_dev->pfacts[0].MaxLanBuckets < max_buckets_out)
+		priv->max_buckets_out = mpt_dev->pfacts[0].MaxLanBuckets;
+
+	dlprintk((KERN_INFO MYNAM "@%d: MaxLanBuckets=%d, max_buckets_out/priv=%d/%d\n",
+			__LINE__,
+			mpt_dev->pfacts[0].MaxLanBuckets,
+			max_buckets_out,
+			priv->max_buckets_out));
+
+	priv->bucketthresh = priv->max_buckets_out * 2 / 3;
+	spin_lock_init(&priv->txfidx_lock);
+	spin_lock_init(&priv->rxfidx_lock);
+
+	memset(&priv->stats, 0, sizeof(priv->stats));
+
+	/*  Grab pre-fetched LANPage1 stuff. :-) */
+	a = (u8 *) &mpt_dev->lan_cnfg_page1.HardwareAddressLow;
+
+	HWaddr[0] = a[5];
+	HWaddr[1] = a[4];
+	HWaddr[2] = a[3];
+	HWaddr[3] = a[2];
+	HWaddr[4] = a[1];
+	HWaddr[5] = a[0];
+
+	dev->addr_len = FC_ALEN;
+	memcpy(dev->dev_addr, HWaddr, FC_ALEN);
+	memset(dev->broadcast, 0xff, FC_ALEN);
+
+	/* The Tx queue is 127 deep on the 909.
+	 * Give ourselves some breathing room.
+	 */
+	priv->tx_max_out = (tx_max_out_p <= MPT_TX_MAX_OUT_LIM) ?
+			    tx_max_out_p : MPT_TX_MAX_OUT_LIM;
+
+	dev->open = mpt_lan_open;
+	dev->stop = mpt_lan_close;
+	dev->get_stats = mpt_lan_get_stats;
+	dev->set_multicast_list = NULL;
+	dev->change_mtu = mpt_lan_change_mtu;
+	dev->hard_start_xmit = mpt_lan_sdu_send;
+
+/* Not in 2.3.42. Need 2.3.45+ */
+	dev->tx_timeout = mpt_lan_tx_timeout;
+	dev->watchdog_timeo = MPT_LAN_TX_TIMEOUT;
+
+	dlprintk((KERN_INFO MYNAM ": Finished registering dev "
+		"and setting initial values\n"));
+
+	SET_MODULE_OWNER(dev);
+
+	if (register_netdev(dev) != 0) {
+		free_netdev(dev);
+		dev = NULL;
+	}
+	return dev;
+}
+
+static int
+mptlan_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
+	struct net_device	*dev;
+	int			i;
+
+	for (i = 0; i < ioc->facts.NumberOfPorts; i++) {
+		printk(KERN_INFO MYNAM ": %s: PortNum=%x, "
+		       "ProtocolFlags=%02Xh (%c%c%c%c)\n",
+		       ioc->name, ioc->pfacts[i].PortNumber,
+		       ioc->pfacts[i].ProtocolFlags,
+		       MPT_PROTOCOL_FLAGS_c_c_c_c(
+			       ioc->pfacts[i].ProtocolFlags));
+
+		if (!(ioc->pfacts[i].ProtocolFlags &
+					MPI_PORTFACTS_PROTOCOL_LAN)) {
+			printk(KERN_INFO MYNAM ": %s: Hmmm... LAN protocol "
+			       "seems to be disabled on this adapter port!\n",
+			       ioc->name);
+			continue;
+		}
+
+		dev = mpt_register_lan_device(ioc, i);
+		if (!dev) {
+			printk(KERN_ERR MYNAM ": %s: Unable to register "
+			       "port%d as a LAN device\n", ioc->name,
+			       ioc->pfacts[i].PortNumber);
+			continue;
+		}
+		
+		printk(KERN_INFO MYNAM ": %s: Fusion MPT LAN device "
+		       "registered as '%s'\n", ioc->name, dev->name);
+		printk(KERN_INFO MYNAM ": %s/%s: "
+		       "LanAddr = %02X:%02X:%02X:%02X:%02X:%02X\n",
+		       IOC_AND_NETDEV_NAMES_s_s(dev),
+		       dev->dev_addr[0], dev->dev_addr[1],
+		       dev->dev_addr[2], dev->dev_addr[3],
+		       dev->dev_addr[4], dev->dev_addr[5]);
+	
+		ioc->netdev = dev;
+
+		return 0;
+	}
+
+	return -ENODEV;
+}
+
+static void
+mptlan_remove(struct pci_dev *pdev)
+{
+	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
+	struct net_device	*dev = ioc->netdev;
+
+	if(dev != NULL) {
+		unregister_netdev(dev);
+		free_netdev(dev);
+	}
+}
+
+static struct mpt_pci_driver mptlan_driver = {
+	.probe		= mptlan_probe,
+	.remove		= mptlan_remove,
+};
+
+static int __init mpt_lan_init (void)
+{
+	show_mptmod_ver(LANAME, LANVER);
+
+	if ((LanCtx = mpt_register(lan_reply, MPTLAN_DRIVER)) <= 0) {
+		printk (KERN_ERR MYNAM ": Failed to register with MPT base driver\n");
+		return -EBUSY;
+	}
+
+	/* Set the callback index to be used by driver core for turbo replies */
+	mpt_lan_index = LanCtx;
+
+	dlprintk((KERN_INFO MYNAM ": assigned context of %d\n", LanCtx));
+
+	if (mpt_reset_register(LanCtx, mpt_lan_ioc_reset)) {
+		printk(KERN_ERR MYNAM ": Eieee! unable to register a reset "
+		       "handler with mptbase! The world is at an end! "
+		       "Everything is fading to black! Goodbye.\n");
+		return -EBUSY;
+	}
+
+	dlprintk((KERN_INFO MYNAM ": Registered for IOC reset notifications\n"));
+	
+	if (mpt_device_driver_register(&mptlan_driver, MPTLAN_DRIVER))
+		dprintk((KERN_INFO MYNAM ": failed to register dd callbacks\n"));
+	return 0;
+}
+
+static void __exit mpt_lan_exit(void)
+{
+	mpt_device_driver_deregister(MPTLAN_DRIVER);
+	mpt_reset_deregister(LanCtx);
+
+	if (LanCtx >= 0) {
+		mpt_deregister(LanCtx);
+		LanCtx = -1;
+		mpt_lan_index = 0;
+	}
+}
+
+module_init(mpt_lan_init);
+module_exit(mpt_lan_exit);
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static unsigned short
+mpt_lan_type_trans(struct sk_buff *skb, struct net_device *dev)
+{
+	struct mpt_lan_ohdr *fch = (struct mpt_lan_ohdr *)skb->data;
+	struct fcllc *fcllc;
+
+	skb->mac.raw = skb->data;
+	skb_pull(skb, sizeof(struct mpt_lan_ohdr));
+
+	if (fch->dtype == htons(0xffff)) {
+		u32 *p = (u32 *) fch;
+
+		swab32s(p + 0);
+		swab32s(p + 1);
+		swab32s(p + 2);
+		swab32s(p + 3);
+
+		printk (KERN_WARNING MYNAM ": %s: WARNING - Broadcast swap F/W bug detected!\n",
+				NETDEV_PTR_TO_IOC_NAME_s(dev));
+		printk (KERN_WARNING MYNAM ": Please update sender @ MAC_addr = %02x:%02x:%02x:%02x:%02x:%02x\n",
+				fch->saddr[0], fch->saddr[1], fch->saddr[2],
+				fch->saddr[3], fch->saddr[4], fch->saddr[5]);
+	}
+
+	if (*fch->daddr & 1) {
+		if (!memcmp(fch->daddr, dev->broadcast, FC_ALEN)) {
+			skb->pkt_type = PACKET_BROADCAST;
+		} else {
+			skb->pkt_type = PACKET_MULTICAST;
+		}
+	} else {
+		if (memcmp(fch->daddr, dev->dev_addr, FC_ALEN)) {
+			skb->pkt_type = PACKET_OTHERHOST;
+		} else {
+			skb->pkt_type = PACKET_HOST;
+		}
+	}
+
+	fcllc = (struct fcllc *)skb->data;
+
+#ifdef QLOGIC_NAA_WORKAROUND
+{
+	u16 source_naa = fch->stype, found = 0;
+
+	/* Workaround for QLogic not following RFC 2625 in regards to the NAA
+	   value. */
+
+	if ((source_naa & 0xF000) == 0)
+		source_naa = swab16(source_naa);
+
+	if (fcllc->ethertype == htons(ETH_P_ARP))
+	    dlprintk ((KERN_INFO "mptlan/type_trans: got arp req/rep w/ naa of "
+		      "%04x.\n", source_naa));
+
+	if ((fcllc->ethertype == htons(ETH_P_ARP)) &&
+	   ((source_naa >> 12) !=  MPT_LAN_NAA_RFC2625)){
+		struct NAA_Hosed *nh, *prevnh;
+		int i;
+
+		dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep from "
+			  "system with non-RFC 2625 NAA value (%04x).\n",
+			  source_naa));
+
+		write_lock_irq(&bad_naa_lock);
+		for (prevnh = nh = mpt_bad_naa; nh != NULL;
+		     prevnh=nh, nh=nh->next) {
+			if ((nh->ieee[0] == fch->saddr[0]) &&
+			    (nh->ieee[1] == fch->saddr[1]) &&
+			    (nh->ieee[2] == fch->saddr[2]) &&
+			    (nh->ieee[3] == fch->saddr[3]) &&
+			    (nh->ieee[4] == fch->saddr[4]) &&
+			    (nh->ieee[5] == fch->saddr[5])) {
+				found = 1;
+				dlprintk ((KERN_INFO "mptlan/type_trans: ARP Re"
+					 "q/Rep w/ bad NAA from system already"
+					 " in DB.\n"));
+				break;
+			}
+		}
+
+		if ((!found) && (nh == NULL)) {
+
+			nh = kmalloc(sizeof(struct NAA_Hosed), GFP_KERNEL);
+			dlprintk ((KERN_INFO "mptlan/type_trans: ARP Req/Rep w/"
+				 " bad NAA from system not yet in DB.\n"));
+
+			if (nh != NULL) {
+				nh->next = NULL;
+				if (!mpt_bad_naa)
+					mpt_bad_naa = nh;
+				if (prevnh)
+					prevnh->next = nh;
+
+				nh->NAA = source_naa; /* Set the S_NAA value. */
+				for (i = 0; i < FC_ALEN; i++)
+					nh->ieee[i] = fch->saddr[i];
+				dlprintk ((KERN_INFO "Got ARP from %02x:%02x:%02x:%02x:"
+					  "%02x:%02x with non-compliant S_NAA value.\n",
+					  fch->saddr[0], fch->saddr[1], fch->saddr[2],
+					  fch->saddr[3], fch->saddr[4],fch->saddr[5]));
+			} else {
+				printk (KERN_ERR "mptlan/type_trans: Unable to"
+					" kmalloc a NAA_Hosed struct.\n");
+			}
+		} else if (!found) {
+			printk (KERN_ERR "mptlan/type_trans: found not"
+				" set, but nh isn't null. Evil "
+				"funkiness abounds.\n");
+		}
+		write_unlock_irq(&bad_naa_lock);
+	}
+}
+#endif
+
+	/* Strip the SNAP header from ARP packets since we don't
+	 * pass them through to the 802.2/SNAP layers.
+	 */
+	if (fcllc->dsap == EXTENDED_SAP &&
+		(fcllc->ethertype == htons(ETH_P_IP) ||
+		 fcllc->ethertype == htons(ETH_P_ARP))) {
+		skb_pull(skb, sizeof(struct fcllc));
+		return fcllc->ethertype;
+	}
+
+	return htons(ETH_P_802_2);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptlan.h b/drivers/message/fusion/mptlan.h
new file mode 100644
index 0000000..0579042
--- /dev/null
+++ b/drivers/message/fusion/mptlan.h
@@ -0,0 +1,85 @@
+/* mptlan.h */
+
+#ifndef LINUX_MPTLAN_H_INCLUDED
+#define LINUX_MPTLAN_H_INCLUDED
+/*****************************************************************************/
+
+#if !defined(__GENKSYMS__)
+#include <linux/module.h>
+#endif
+
+#include <linux/netdevice.h>
+#include <linux/errno.h>
+// #include <linux/etherdevice.h>
+#include <linux/fcdevice.h>
+// #include <linux/fddidevice.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/spinlock.h>
+#include <linux/version.h>
+#include <linux/workqueue.h>
+#include <linux/delay.h>
+// #include <linux/trdevice.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+    /* Override mptbase.h by pre-defining these! */
+    #define MODULEAUTHOR "Noah Romer, Eddie C. Dost"
+
+#include "mptbase.h"
+
+/*****************************************************************************/
+#define LANAME		"Fusion MPT LAN driver"
+#define LANVER		MPT_LINUX_VERSION_COMMON
+
+#ifdef MODULE
+MODULE_AUTHOR(MODULEAUTHOR);
+MODULE_DESCRIPTION(LANAME);
+#endif
+/*****************************************************************************/
+
+#define MPT_LAN_MAX_BUCKETS_OUT 256
+#define MPT_LAN_BUCKET_THRESH	18 /* 9 buckets in one message */
+#define MPT_LAN_BUCKETS_REMAIN_MISMATCH_THRESH 10
+#define MPT_LAN_RX_COPYBREAK	200
+#define MPT_LAN_TX_TIMEOUT	(1*HZ)
+#define MPT_TX_MAX_OUT_LIM      127
+
+#define MPT_LAN_MIN_MTU		96		/* RFC2625 */
+#define MPT_LAN_MAX_MTU		65280		/* RFC2625 */
+#define MPT_LAN_MTU             13312		/* Max perf range + lower mem
+						   usage than 16128 */
+
+#define MPT_LAN_NAA_RFC2625     0x1
+#define MPT_LAN_NAA_QLOGIC      0x2
+
+/* MPT LAN Reset and Suspend Resource Flags Defines */
+
+#define MPT_LAN_RESOURCE_FLAG_RETURN_POSTED_BUCKETS    0x01
+#define MPT_LAN_RESOURCE_FLAG_RETURN_PEND_TRANSMITS    0x02
+
+/*****************************************************************************/
+#ifdef MPT_LAN_IO_DEBUG
+#define dioprintk(x)  printk x
+#else
+#define dioprintk(x)
+#endif
+
+#ifdef MPT_LAN_DEBUG
+#define dlprintk(x)  printk x
+#else
+#define dlprintk(x)
+#endif
+
+#define NETDEV_TO_LANPRIV_PTR(d)	((struct mpt_lan_priv *)(d)->priv)
+#define NETDEV_PTR_TO_IOC_NAME_s(d)	(NETDEV_TO_LANPRIV_PTR(d)->mpt_dev->name)
+#define IOC_AND_NETDEV_NAMES_s_s(d)	NETDEV_PTR_TO_IOC_NAME_s(d), (d)->name
+
+/*****************************************************************************/
+#endif
+
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
new file mode 100644
index 0000000..c98d625
--- /dev/null
+++ b/drivers/message/fusion/mptscsih.c
@@ -0,0 +1,6021 @@
+/*
+ *  linux/drivers/message/fusion/mptscsih.c
+ *      High performance SCSI / Fibre Channel SCSI Host device driver.
+ *      For use with PCI chip/adapter(s):
+ *          LSIFC9xx/LSI409xx Fibre Channel
+ *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
+ *
+ *  Credits:
+ *      This driver would not exist if not for Alan Cox's development
+ *      of the linux i2o driver.
+ *
+ *      A special thanks to Pamela Delaney (LSI Logic) for tons of work
+ *      and countless enhancements while adding support for the 1030
+ *      chip family.  Pam has been instrumental in the development of
+ *      of the 2.xx.xx series fusion drivers, and her contributions are
+ *      far too numerous to hope to list in one place.
+ *
+ *      A huge debt of gratitude is owed to David S. Miller (DaveM)
+ *      for fixing much of the stupid and broken stuff in the early
+ *      driver while porting to sparc64 platform.  THANK YOU!
+ *
+ *      (see mptbase.c)
+ *
+ *  Copyright (c) 1999-2004 LSI Logic Corporation
+ *  Original author: Steven J. Ralston
+ *  (mailto:sjralston1@netscape.net)
+ *  (mailto:mpt_linux_developer@lsil.com)
+ *
+ *  $Id: mptscsih.c,v 1.104 2002/12/03 21:26:34 pdelaney Exp $
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; version 2 of the License.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    NO WARRANTY
+    THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+    CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+    LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+    solely responsible for determining the appropriateness of using and
+    distributing the Program and assumes all risks associated with its
+    exercise of rights under this Agreement, including but not limited to
+    the risks and costs of program errors, damage to or loss of data,
+    programs or equipment, and unavailability or interruption of operations.
+
+    DISCLAIMER OF LIABILITY
+    NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+    ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+    TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+    USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+    HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+#include "linux_compat.h"	/* linux-2.6 tweaks */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/errno.h>
+#include <linux/kdev_t.h>
+#include <linux/blkdev.h>
+#include <linux/delay.h>	/* for mdelay */
+#include <linux/interrupt.h>	/* needed for in_interrupt() proto */
+#include <linux/reboot.h>	/* notifier code */
+#include <linux/sched.h>
+#include <linux/workqueue.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+
+#include "mptbase.h"
+#include "mptscsih.h"
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#define my_NAME		"Fusion MPT SCSI Host driver"
+#define my_VERSION	MPT_LINUX_VERSION_COMMON
+#define MYNAM		"mptscsih"
+
+MODULE_AUTHOR(MODULEAUTHOR);
+MODULE_DESCRIPTION(my_NAME);
+MODULE_LICENSE("GPL");
+
+/* Command line args */
+static int mpt_dv = MPTSCSIH_DOMAIN_VALIDATION;
+MODULE_PARM(mpt_dv, "i");
+MODULE_PARM_DESC(mpt_dv, " DV Algorithm: enhanced=1, basic=0 (default=MPTSCSIH_DOMAIN_VALIDATION=1)");
+
+static int mpt_width = MPTSCSIH_MAX_WIDTH;
+MODULE_PARM(mpt_width, "i");
+MODULE_PARM_DESC(mpt_width, " Max Bus Width: wide=1, narrow=0 (default=MPTSCSIH_MAX_WIDTH=1)");
+
+static int mpt_factor = MPTSCSIH_MIN_SYNC;
+MODULE_PARM(mpt_factor, "h");
+MODULE_PARM_DESC(mpt_factor, " Min Sync Factor (default=MPTSCSIH_MIN_SYNC=0x08)");
+
+static int mpt_saf_te = MPTSCSIH_SAF_TE;
+MODULE_PARM(mpt_saf_te, "i");
+MODULE_PARM_DESC(mpt_saf_te, " Force enabling SEP Processor: enable=1  (default=MPTSCSIH_SAF_TE=0)");
+
+static int mpt_pq_filter = 0;
+MODULE_PARM(mpt_pq_filter, "i");
+MODULE_PARM_DESC(mpt_pq_filter, " Enable peripheral qualifier filter: enable=1  (default=0)");
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+typedef struct _BIG_SENSE_BUF {
+	u8		data[MPT_SENSE_BUFFER_ALLOC];
+} BIG_SENSE_BUF;
+
+#define MPT_SCANDV_GOOD			(0x00000000) /* must be 0 */
+#define MPT_SCANDV_DID_RESET		(0x00000001)
+#define MPT_SCANDV_SENSE		(0x00000002)
+#define MPT_SCANDV_SOME_ERROR		(0x00000004)
+#define MPT_SCANDV_SELECTION_TIMEOUT	(0x00000008)
+#define MPT_SCANDV_ISSUE_SENSE		(0x00000010)
+#define MPT_SCANDV_FALLBACK		(0x00000020)
+
+#define MPT_SCANDV_MAX_RETRIES		(10)
+
+#define MPT_ICFLAG_BUF_CAP	0x01	/* ReadBuffer Read Capacity format */
+#define MPT_ICFLAG_ECHO		0x02	/* ReadBuffer Echo buffer format */
+#define MPT_ICFLAG_PHYS_DISK	0x04	/* Any SCSI IO but do Phys Disk Format */
+#define MPT_ICFLAG_TAGGED_CMD	0x08	/* Do tagged IO */
+#define MPT_ICFLAG_DID_RESET	0x20	/* Bus Reset occurred with this command */
+#define MPT_ICFLAG_RESERVED	0x40	/* Reserved has been issued */
+
+typedef struct _internal_cmd {
+	char		*data;		/* data pointer */
+	dma_addr_t	data_dma;	/* data dma address */
+	int		size;		/* transfer size */
+	u8		cmd;		/* SCSI Op Code */
+	u8		bus;		/* bus number */
+	u8		id;		/* SCSI ID (virtual) */
+	u8		lun;
+	u8		flags;		/* Bit Field - See above */
+	u8		physDiskNum;	/* Phys disk number, -1 else */
+	u8		rsvd2;
+	u8		rsvd;
+} INTERNAL_CMD;
+
+typedef struct _negoparms {
+	u8 width;
+	u8 offset;
+	u8 factor;
+	u8 flags;
+} NEGOPARMS;
+
+typedef struct _dv_parameters {
+	NEGOPARMS	 max;
+	NEGOPARMS	 now;
+	u8		 cmd;
+	u8		 id;
+	u16		 pad1;
+} DVPARAMETERS;
+
+
+/*
+ *  Other private/forward protos...
+ */
+static int	mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
+static void	mptscsih_report_queue_full(struct scsi_cmnd *sc, SCSIIOReply_t *pScsiReply, SCSIIORequest_t *pScsiReq);
+static int	mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
+
+static int	mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
+				 SCSIIORequest_t *pReq, int req_idx);
+static void	mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx);
+static void	copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply);
+static int	mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd);
+static int	mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout );
+static u32	SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc);
+
+static int	mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout);
+static int	mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout);
+
+static int	mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
+static int	mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
+
+static void	mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *data, int dlen);
+static void	mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56);
+static void	mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq);
+static void	mptscsih_setDevicePage1Flags (u8 width, u8 factor, u8 offset, int *requestedPtr, int *configurationPtr, u8 flags);
+static void	mptscsih_no_negotiate(MPT_SCSI_HOST *hd, int target_id);
+static int	mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target, int flags);
+static int	mptscsih_writeIOCPage4(MPT_SCSI_HOST *hd, int target_id, int bus);
+static int	mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *r);
+static void	mptscsih_timer_expired(unsigned long data);
+static int	mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *iocmd);
+static int	mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum);
+
+#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
+static int	mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io);
+static void	mptscsih_domainValidation(void *hd);
+static int	mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id);
+static void	mptscsih_qas_check(MPT_SCSI_HOST *hd, int id);
+static int	mptscsih_doDv(MPT_SCSI_HOST *hd, int channel, int target);
+static void	mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage);
+static void	mptscsih_fillbuf(char *buffer, int size, int index, int width);
+#endif
+/* module entry point */
+static int  __init   mptscsih_init  (void);
+static void __exit   mptscsih_exit  (void);
+
+static int  mptscsih_probe (struct pci_dev *, const struct pci_device_id *);
+static void mptscsih_remove(struct pci_dev *);
+static void mptscsih_shutdown(struct device *);
+#ifdef CONFIG_PM
+static int mptscsih_suspend(struct pci_dev *pdev, u32 state);
+static int mptscsih_resume(struct pci_dev *pdev);
+#endif
+
+
+/*
+ *	Private data...
+ */
+
+static int	mpt_scsi_hosts = 0;
+
+static int	ScsiDoneCtx = -1;
+static int	ScsiTaskCtx = -1;
+static int	ScsiScanDvCtx = -1; /* Used only for bus scan and dv */
+
+#define SNS_LEN(scp)	sizeof((scp)->sense_buffer)
+
+#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
+/*
+ * Domain Validation task structure
+ */
+static DEFINE_SPINLOCK(dvtaskQ_lock);
+static int dvtaskQ_active = 0;
+static int dvtaskQ_release = 0;
+static struct work_struct	mptscsih_dvTask;
+#endif
+
+/*
+ * Wait Queue setup
+ */
+static DECLARE_WAIT_QUEUE_HEAD (scandv_waitq);
+static int scandv_wait_done = 1;
+
+
+/* Driver command line structure
+ */
+static struct scsi_host_template driver_template;
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_add_sge - Place a simple SGE at address pAddr.
+ *	@pAddr: virtual address for SGE
+ *	@flagslength: SGE flags and data transfer length
+ *	@dma_addr: Physical address
+ *
+ *	This routine places a MPT request frame back on the MPT adapter's
+ *	FreeQ.
+ */
+static inline void
+mptscsih_add_sge(char *pAddr, u32 flagslength, dma_addr_t dma_addr)
+{
+	if (sizeof(dma_addr_t) == sizeof(u64)) {
+		SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
+		u32 tmp = dma_addr & 0xFFFFFFFF;
+
+		pSge->FlagsLength = cpu_to_le32(flagslength);
+		pSge->Address.Low = cpu_to_le32(tmp);
+		tmp = (u32) ((u64)dma_addr >> 32);
+		pSge->Address.High = cpu_to_le32(tmp);
+
+	} else {
+		SGESimple32_t *pSge = (SGESimple32_t *) pAddr;
+		pSge->FlagsLength = cpu_to_le32(flagslength);
+		pSge->Address = cpu_to_le32(dma_addr);
+	}
+} /* mptscsih_add_sge() */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_add_chain - Place a chain SGE at address pAddr.
+ *	@pAddr: virtual address for SGE
+ *	@next: nextChainOffset value (u32's)
+ *	@length: length of next SGL segment
+ *	@dma_addr: Physical address
+ *
+ *	This routine places a MPT request frame back on the MPT adapter's
+ *	FreeQ.
+ */
+static inline void
+mptscsih_add_chain(char *pAddr, u8 next, u16 length, dma_addr_t dma_addr)
+{
+	if (sizeof(dma_addr_t) == sizeof(u64)) {
+		SGEChain64_t *pChain = (SGEChain64_t *) pAddr;
+		u32 tmp = dma_addr & 0xFFFFFFFF;
+
+		pChain->Length = cpu_to_le16(length);
+		pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size();
+
+		pChain->NextChainOffset = next;
+
+		pChain->Address.Low = cpu_to_le32(tmp);
+		tmp = (u32) ((u64)dma_addr >> 32);
+		pChain->Address.High = cpu_to_le32(tmp);
+	} else {
+		SGEChain32_t *pChain = (SGEChain32_t *) pAddr;
+		pChain->Length = cpu_to_le16(length);
+		pChain->Flags = MPI_SGE_FLAGS_CHAIN_ELEMENT | mpt_addr_size();
+		pChain->NextChainOffset = next;
+		pChain->Address = cpu_to_le32(dma_addr);
+	}
+} /* mptscsih_add_chain() */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptscsih_getFreeChainBuffer - Function to get a free chain
+ *	from the MPT_SCSI_HOST FreeChainQ.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@req_idx: Index of the SCSI IO request frame. (output)
+ *
+ *	return SUCCESS or FAILED
+ */
+static inline int
+mptscsih_getFreeChainBuffer(MPT_ADAPTER *ioc, int *retIndex)
+{
+	MPT_FRAME_HDR *chainBuf;
+	unsigned long flags;
+	int rc;
+	int chain_idx;
+
+	dsgprintk((MYIOC_s_INFO_FMT "getFreeChainBuffer called\n",
+			ioc->name));
+	spin_lock_irqsave(&ioc->FreeQlock, flags);
+	if (!list_empty(&ioc->FreeChainQ)) {
+		int offset;
+
+		chainBuf = list_entry(ioc->FreeChainQ.next, MPT_FRAME_HDR,
+				u.frame.linkage.list);
+		list_del(&chainBuf->u.frame.linkage.list);
+		offset = (u8 *)chainBuf - (u8 *)ioc->ChainBuffer;
+		chain_idx = offset / ioc->req_sz;
+		rc = SUCCESS;
+		dsgprintk((MYIOC_s_INFO_FMT "getFreeChainBuffer (index %d), got buf=%p\n",
+			ioc->name, *retIndex, chainBuf));
+	} else {
+		rc = FAILED;
+		chain_idx = MPT_HOST_NO_CHAIN;
+		dfailprintk((MYIOC_s_ERR_FMT "getFreeChainBuffer failed\n",
+			ioc->name));
+	}
+	spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+	*retIndex = chain_idx;
+	return rc;
+} /* mptscsih_getFreeChainBuffer() */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptscsih_AddSGE - Add a SGE (plus chain buffers) to the
+ *	SCSIIORequest_t Message Frame.
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@SCpnt: Pointer to scsi_cmnd structure
+ *	@pReq: Pointer to SCSIIORequest_t structure
+ *
+ *	Returns ...
+ */
+static int
+mptscsih_AddSGE(MPT_ADAPTER *ioc, struct scsi_cmnd *SCpnt,
+		SCSIIORequest_t *pReq, int req_idx)
+{
+	char 	*psge;
+	char	*chainSge;
+	struct scatterlist *sg;
+	int	 frm_sz;
+	int	 sges_left, sg_done;
+	int	 chain_idx = MPT_HOST_NO_CHAIN;
+	int	 sgeOffset;
+	int	 numSgeSlots, numSgeThisFrame;
+	u32	 sgflags, sgdir, thisxfer = 0;
+	int	 chain_dma_off = 0;
+	int	 newIndex;
+	int	 ii;
+	dma_addr_t v2;
+	u32	RequestNB;
+
+	sgdir = le32_to_cpu(pReq->Control) & MPI_SCSIIO_CONTROL_DATADIRECTION_MASK;
+	if (sgdir == MPI_SCSIIO_CONTROL_WRITE)  {
+		sgdir = MPT_TRANSFER_HOST_TO_IOC;
+	} else {
+		sgdir = MPT_TRANSFER_IOC_TO_HOST;
+	}
+
+	psge = (char *) &pReq->SGL;
+	frm_sz = ioc->req_sz;
+
+	/* Map the data portion, if any.
+	 * sges_left  = 0 if no data transfer.
+	 */
+	if ( (sges_left = SCpnt->use_sg) ) {
+		sges_left = pci_map_sg(ioc->pcidev,
+			       (struct scatterlist *) SCpnt->request_buffer,
+ 			       SCpnt->use_sg,
+			       SCpnt->sc_data_direction);
+		if (sges_left == 0)
+			return FAILED;
+	} else if (SCpnt->request_bufflen) {
+		SCpnt->SCp.dma_handle = pci_map_single(ioc->pcidev,
+				      SCpnt->request_buffer,
+				      SCpnt->request_bufflen,
+				      SCpnt->sc_data_direction);
+		dsgprintk((MYIOC_s_INFO_FMT "SG: non-SG for %p, len=%d\n",
+				ioc->name, SCpnt, SCpnt->request_bufflen));
+		mptscsih_add_sge((char *) &pReq->SGL,
+			0xD1000000|MPT_SGE_FLAGS_ADDRESSING|sgdir|SCpnt->request_bufflen,
+			SCpnt->SCp.dma_handle);
+
+		return SUCCESS;
+	}
+
+	/* Handle the SG case.
+	 */
+	sg = (struct scatterlist *) SCpnt->request_buffer;
+	sg_done  = 0;
+	sgeOffset = sizeof(SCSIIORequest_t) - sizeof(SGE_IO_UNION);
+	chainSge = NULL;
+
+	/* Prior to entering this loop - the following must be set
+	 * current MF:  sgeOffset (bytes)
+	 *              chainSge (Null if original MF is not a chain buffer)
+	 *              sg_done (num SGE done for this MF)
+	 */
+
+nextSGEset:
+	numSgeSlots = ((frm_sz - sgeOffset) / (sizeof(u32) + sizeof(dma_addr_t)) );
+	numSgeThisFrame = (sges_left < numSgeSlots) ? sges_left : numSgeSlots;
+
+	sgflags = MPT_SGE_FLAGS_SIMPLE_ELEMENT | MPT_SGE_FLAGS_ADDRESSING | sgdir;
+
+	/* Get first (num - 1) SG elements
+	 * Skip any SG entries with a length of 0
+	 * NOTE: at finish, sg and psge pointed to NEXT data/location positions
+	 */
+	for (ii=0; ii < (numSgeThisFrame-1); ii++) {
+		thisxfer = sg_dma_len(sg);
+		if (thisxfer == 0) {
+			sg ++; /* Get next SG element from the OS */
+			sg_done++;
+			continue;
+		}
+
+		v2 = sg_dma_address(sg);
+		mptscsih_add_sge(psge, sgflags | thisxfer, v2);
+
+		sg++;		/* Get next SG element from the OS */
+		psge += (sizeof(u32) + sizeof(dma_addr_t));
+		sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
+		sg_done++;
+	}
+
+	if (numSgeThisFrame == sges_left) {
+		/* Add last element, end of buffer and end of list flags.
+		 */
+		sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT |
+				MPT_SGE_FLAGS_END_OF_BUFFER |
+				MPT_SGE_FLAGS_END_OF_LIST;
+
+		/* Add last SGE and set termination flags.
+		 * Note: Last SGE may have a length of 0 - which should be ok.
+		 */
+		thisxfer = sg_dma_len(sg);
+
+		v2 = sg_dma_address(sg);
+		mptscsih_add_sge(psge, sgflags | thisxfer, v2);
+		/*
+		sg++;
+		psge += (sizeof(u32) + sizeof(dma_addr_t));
+		*/
+		sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
+		sg_done++;
+
+		if (chainSge) {
+			/* The current buffer is a chain buffer,
+			 * but there is not another one.
+			 * Update the chain element
+			 * Offset and Length fields.
+			 */
+			mptscsih_add_chain((char *)chainSge, 0, sgeOffset, ioc->ChainBufferDMA + chain_dma_off);
+		} else {
+			/* The current buffer is the original MF
+			 * and there is no Chain buffer.
+			 */
+			pReq->ChainOffset = 0;
+			RequestNB = (((sgeOffset - 1) >> ioc->NBShiftFactor)  + 1) & 0x03;
+			dsgprintk((MYIOC_s_ERR_FMT 
+			    "Single Buffer RequestNB=%x, sgeOffset=%d\n", ioc->name, RequestNB, sgeOffset));
+			ioc->RequestNB[req_idx] = RequestNB;
+		}
+	} else {
+		/* At least one chain buffer is needed.
+		 * Complete the first MF
+		 *  - last SGE element, set the LastElement bit
+		 *  - set ChainOffset (words) for orig MF
+		 *             (OR finish previous MF chain buffer)
+		 *  - update MFStructPtr ChainIndex
+		 *  - Populate chain element
+		 * Also
+		 * Loop until done.
+		 */
+
+		dsgprintk((MYIOC_s_INFO_FMT "SG: Chain Required! sg done %d\n",
+				ioc->name, sg_done));
+
+		/* Set LAST_ELEMENT flag for last non-chain element
+		 * in the buffer. Since psge points at the NEXT
+		 * SGE element, go back one SGE element, update the flags
+		 * and reset the pointer. (Note: sgflags & thisxfer are already
+		 * set properly).
+		 */
+		if (sg_done) {
+			u32 *ptmp = (u32 *) (psge - (sizeof(u32) + sizeof(dma_addr_t)));
+			sgflags = le32_to_cpu(*ptmp);
+			sgflags |= MPT_SGE_FLAGS_LAST_ELEMENT;
+			*ptmp = cpu_to_le32(sgflags);
+		}
+
+		if (chainSge) {
+			/* The current buffer is a chain buffer.
+			 * chainSge points to the previous Chain Element.
+			 * Update its chain element Offset and Length (must
+			 * include chain element size) fields.
+			 * Old chain element is now complete.
+			 */
+			u8 nextChain = (u8) (sgeOffset >> 2);
+			sgeOffset += (sizeof(u32) + sizeof(dma_addr_t));
+			mptscsih_add_chain((char *)chainSge, nextChain, sgeOffset, ioc->ChainBufferDMA + chain_dma_off);
+		} else {
+			/* The original MF buffer requires a chain buffer -
+			 * set the offset.
+			 * Last element in this MF is a chain element.
+			 */
+			pReq->ChainOffset = (u8) (sgeOffset >> 2);
+			RequestNB = (((sgeOffset - 1) >> ioc->NBShiftFactor)  + 1) & 0x03;
+			dsgprintk((MYIOC_s_ERR_FMT "Chain Buffer Needed, RequestNB=%x sgeOffset=%d\n", ioc->name, RequestNB, sgeOffset));
+			ioc->RequestNB[req_idx] = RequestNB;
+		}
+
+		sges_left -= sg_done;
+
+
+		/* NOTE: psge points to the beginning of the chain element
+		 * in current buffer. Get a chain buffer.
+		 */
+		dsgprintk((MYIOC_s_INFO_FMT 
+		    "calling getFreeChainBuffer SCSI cmd=%02x (%p)\n",
+		    ioc->name, pReq->CDB[0], SCpnt));
+		if ((mptscsih_getFreeChainBuffer(ioc, &newIndex)) == FAILED)
+			return FAILED;
+
+		/* Update the tracking arrays.
+		 * If chainSge == NULL, update ReqToChain, else ChainToChain
+		 */
+		if (chainSge) {
+			ioc->ChainToChain[chain_idx] = newIndex;
+		} else {
+			ioc->ReqToChain[req_idx] = newIndex;
+		}
+		chain_idx = newIndex;
+		chain_dma_off = ioc->req_sz * chain_idx;
+
+		/* Populate the chainSGE for the current buffer.
+		 * - Set chain buffer pointer to psge and fill
+		 *   out the Address and Flags fields.
+		 */
+		chainSge = (char *) psge;
+		dsgprintk((KERN_INFO "  Current buff @ %p (index 0x%x)",
+				psge, req_idx));
+
+		/* Start the SGE for the next buffer
+		 */
+		psge = (char *) (ioc->ChainBuffer + chain_dma_off);
+		sgeOffset = 0;
+		sg_done = 0;
+
+		dsgprintk((KERN_INFO "  Chain buff @ %p (index 0x%x)\n",
+				psge, chain_idx));
+
+		/* Start the SGE for the next buffer
+		 */
+
+		goto nextSGEset;
+	}
+
+	return SUCCESS;
+} /* mptscsih_AddSGE() */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptscsih_io_done - Main SCSI IO callback routine registered to
+ *	Fusion MPT (base) driver
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@mf: Pointer to original MPT request frame
+ *	@r: Pointer to MPT reply frame (NULL if TurboReply)
+ *
+ *	This routine is called from mpt.c::mpt_interrupt() at the completion
+ *	of any SCSI IO request.
+ *	This routine is registered with the Fusion MPT (base) driver at driver
+ *	load/init time via the mpt_register() API call.
+ *
+ *	Returns 1 indicating alloc'd request frame ptr should be freed.
+ */
+static int
+mptscsih_io_done(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
+{
+	struct scsi_cmnd	*sc;
+	MPT_SCSI_HOST	*hd;
+	SCSIIORequest_t	*pScsiReq;
+	SCSIIOReply_t	*pScsiReply;
+	u16		 req_idx;
+
+	hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
+
+	req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
+	sc = hd->ScsiLookup[req_idx];
+	if (sc == NULL) {
+		MPIHeader_t *hdr = (MPIHeader_t *)mf;
+
+		/* Remark: writeSDP1 will use the ScsiDoneCtx
+		 * If a SCSI I/O cmd, device disabled by OS and
+		 * completion done. Cannot touch sc struct. Just free mem.
+		 */
+		if (hdr->Function == MPI_FUNCTION_SCSI_IO_REQUEST)
+			printk(MYIOC_s_ERR_FMT "NULL ScsiCmd ptr!\n",
+			ioc->name);
+
+		mptscsih_freeChainBuffers(ioc, req_idx);
+		return 1;
+	}
+
+	dmfprintk((MYIOC_s_INFO_FMT
+		"ScsiDone (mf=%p,mr=%p,sc=%p,idx=%d)\n",
+		ioc->name, mf, mr, sc, req_idx));
+
+	sc->result = DID_OK << 16;		/* Set default reply as OK */
+	pScsiReq = (SCSIIORequest_t *) mf;
+	pScsiReply = (SCSIIOReply_t *) mr;
+
+	if (pScsiReply == NULL) {
+		/* special context reply handling */
+		;
+	} else {
+		u32	 xfer_cnt;
+		u16	 status;
+		u8	 scsi_state, scsi_status;
+
+		status = le16_to_cpu(pScsiReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+		scsi_state = pScsiReply->SCSIState;
+		scsi_status = pScsiReply->SCSIStatus;
+		xfer_cnt = le32_to_cpu(pScsiReply->TransferCount);
+		sc->resid = sc->request_bufflen - xfer_cnt;
+
+		dreplyprintk((KERN_NOTICE "Reply ha=%d id=%d lun=%d:\n"
+			"IOCStatus=%04xh SCSIState=%02xh SCSIStatus=%02xh\n"
+			"resid=%d bufflen=%d xfer_cnt=%d\n",
+			ioc->id, pScsiReq->TargetID, pScsiReq->LUN[1],
+			status, scsi_state, scsi_status, sc->resid, 
+			sc->request_bufflen, xfer_cnt));
+
+		if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID)
+			copy_sense_data(sc, hd, mf, pScsiReply);
+                
+		/*
+		 *  Look for + dump FCP ResponseInfo[]!
+		 */
+		if (scsi_state & MPI_SCSI_STATE_RESPONSE_INFO_VALID) {
+			printk(KERN_NOTICE "  FCP_ResponseInfo=%08xh\n",
+			le32_to_cpu(pScsiReply->ResponseInfo));
+		}
+
+		switch(status) {
+		case MPI_IOCSTATUS_BUSY:			/* 0x0002 */
+			/* CHECKME!
+			 * Maybe: DRIVER_BUSY | SUGGEST_RETRY | DID_SOFT_ERROR (retry)
+			 * But not: DID_BUS_BUSY lest one risk
+			 * killing interrupt handler:-(
+			 */
+			sc->result = SAM_STAT_BUSY;
+			break;
+
+		case MPI_IOCSTATUS_SCSI_INVALID_BUS:		/* 0x0041 */
+		case MPI_IOCSTATUS_SCSI_INVALID_TARGETID:	/* 0x0042 */
+			sc->result = DID_BAD_TARGET << 16;
+			break;
+
+		case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:	/* 0x0043 */
+			/* Spoof to SCSI Selection Timeout! */
+			sc->result = DID_NO_CONNECT << 16;
+
+			if (hd->sel_timeout[pScsiReq->TargetID] < 0xFFFF)
+				hd->sel_timeout[pScsiReq->TargetID]++;
+			break;
+
+		case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:	/* 0x0048 */
+		case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:		/* 0x004B */
+		case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:		/* 0x004C */
+			/* Linux handles an unsolicited DID_RESET better
+			 * than an unsolicited DID_ABORT.
+			 */
+			sc->result = DID_RESET << 16;
+
+			/* GEM Workaround. */
+			if (ioc->bus_type == SCSI)
+				mptscsih_no_negotiate(hd, sc->device->id);
+			break;
+
+		case MPI_IOCSTATUS_SCSI_RESIDUAL_MISMATCH:	/* 0x0049 */
+			if ( xfer_cnt >= sc->underflow ) {
+				/* Sufficient data transfer occurred */
+				sc->result = (DID_OK << 16) | scsi_status;
+			} else if ( xfer_cnt == 0 ) {
+				/* A CRC Error causes this condition; retry */ 
+				sc->result = (DRIVER_SENSE << 24) | (DID_OK << 16) | 
+					(CHECK_CONDITION << 1);
+				sc->sense_buffer[0] = 0x70;
+				sc->sense_buffer[2] = NO_SENSE;
+				sc->sense_buffer[12] = 0;
+				sc->sense_buffer[13] = 0;
+			} else {
+				sc->result = DID_SOFT_ERROR << 16;
+			}
+			dreplyprintk((KERN_NOTICE "RESIDUAL_MISMATCH: result=%x on id=%d\n", sc->result, sc->target));
+			break;
+		
+		case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:		/* 0x0045 */
+			/*
+			 *  Do upfront check for valid SenseData and give it
+			 *  precedence!
+			 */
+			sc->result = (DID_OK << 16) | scsi_status;
+			if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
+				/* Have already saved the status and sense data
+				 */
+				;
+			} else {
+				if (xfer_cnt < sc->underflow) {
+					sc->result = DID_SOFT_ERROR << 16;
+				}
+				if (scsi_state & (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)) {
+					/* What to do?
+				 	*/
+					sc->result = DID_SOFT_ERROR << 16;
+				}
+				else if (scsi_state & MPI_SCSI_STATE_TERMINATED) {
+					/*  Not real sure here either...  */
+					sc->result = DID_RESET << 16;
+				}
+			}
+
+			dreplyprintk((KERN_NOTICE "  sc->underflow={report ERR if < %02xh bytes xfer'd}\n",
+					sc->underflow));
+			dreplyprintk((KERN_NOTICE "  ActBytesXferd=%02xh\n", xfer_cnt));
+			/* Report Queue Full
+			 */
+			if (scsi_status == MPI_SCSI_STATUS_TASK_SET_FULL)
+				mptscsih_report_queue_full(sc, pScsiReply, pScsiReq);
+			
+			break;
+
+		case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:	/* 0x0040 */
+		case MPI_IOCSTATUS_SUCCESS:			/* 0x0000 */
+			scsi_status = pScsiReply->SCSIStatus;
+			sc->result = (DID_OK << 16) | scsi_status;
+			if (scsi_state == 0) {
+				;
+			} else if (scsi_state & MPI_SCSI_STATE_AUTOSENSE_VALID) {
+				/*
+				 * If running against circa 200003dd 909 MPT f/w,
+				 * may get this (AUTOSENSE_VALID) for actual TASK_SET_FULL
+				 * (QUEUE_FULL) returned from device! --> get 0x0000?128
+				 * and with SenseBytes set to 0.
+				 */
+				if (pScsiReply->SCSIStatus == MPI_SCSI_STATUS_TASK_SET_FULL)
+					mptscsih_report_queue_full(sc, pScsiReply, pScsiReq);
+
+			}
+			else if (scsi_state &
+			         (MPI_SCSI_STATE_AUTOSENSE_FAILED | MPI_SCSI_STATE_NO_SCSI_STATUS)
+			   ) {
+				/*
+				 * What to do?
+				 */
+				sc->result = DID_SOFT_ERROR << 16;
+			}
+			else if (scsi_state & MPI_SCSI_STATE_TERMINATED) {
+				/*  Not real sure here either...  */
+				sc->result = DID_RESET << 16;
+			}
+			else if (scsi_state & MPI_SCSI_STATE_QUEUE_TAG_REJECTED) {
+				/* Device Inq. data indicates that it supports
+				 * QTags, but rejects QTag messages.
+				 * This command completed OK.
+				 *
+				 * Not real sure here either so do nothing...  */
+			}
+
+			if (sc->result == MPI_SCSI_STATUS_TASK_SET_FULL)
+				mptscsih_report_queue_full(sc, pScsiReply, pScsiReq);
+
+			/* Add handling of:
+			 * Reservation Conflict, Busy,
+			 * Command Terminated, CHECK
+			 */
+			break;
+
+		case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:		/* 0x0047 */
+			sc->result = DID_SOFT_ERROR << 16;
+			break;
+
+		case MPI_IOCSTATUS_INVALID_FUNCTION:		/* 0x0001 */
+		case MPI_IOCSTATUS_INVALID_SGL:			/* 0x0003 */
+		case MPI_IOCSTATUS_INTERNAL_ERROR:		/* 0x0004 */
+		case MPI_IOCSTATUS_RESERVED:			/* 0x0005 */
+		case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:	/* 0x0006 */
+		case MPI_IOCSTATUS_INVALID_FIELD:		/* 0x0007 */
+		case MPI_IOCSTATUS_INVALID_STATE:		/* 0x0008 */
+		case MPI_IOCSTATUS_SCSI_DATA_OVERRUN:		/* 0x0044 */
+		case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:		/* 0x0046 */
+		case MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED:	/* 0x004A */
+		default:
+			/*
+			 * What to do?
+			 */
+			sc->result = DID_SOFT_ERROR << 16;
+			break;
+
+		}	/* switch(status) */
+
+		dreplyprintk((KERN_NOTICE "  sc->result is %08xh\n", sc->result));
+	} /* end of address reply case */
+
+	/* Unmap the DMA buffers, if any. */
+	if (sc->use_sg) {
+		pci_unmap_sg(ioc->pcidev, (struct scatterlist *) sc->request_buffer,
+			    sc->use_sg, sc->sc_data_direction);
+	} else if (sc->request_bufflen) {
+		pci_unmap_single(ioc->pcidev, sc->SCp.dma_handle,
+				sc->request_bufflen, sc->sc_data_direction);
+	}
+
+	hd->ScsiLookup[req_idx] = NULL;
+
+	sc->scsi_done(sc);		/* Issue the command callback */
+
+	/* Free Chain buffers */
+	mptscsih_freeChainBuffers(ioc, req_idx);
+	return 1;
+}
+
+
+/*
+ *	mptscsih_flush_running_cmds - For each command found, search
+ *		Scsi_Host instance taskQ and reply to OS.
+ *		Called only if recovering from a FW reload.
+ *	@hd: Pointer to a SCSI HOST structure
+ *
+ *	Returns: None.
+ *
+ *	Must be called while new I/Os are being queued.
+ */
+static void
+mptscsih_flush_running_cmds(MPT_SCSI_HOST *hd)
+{
+	MPT_ADAPTER *ioc = hd->ioc;
+	struct scsi_cmnd	*SCpnt;
+	MPT_FRAME_HDR	*mf;
+	int		 ii;
+	int		 max = ioc->req_depth;
+
+	dprintk((KERN_INFO MYNAM ": flush_ScsiLookup called\n"));
+	for (ii= 0; ii < max; ii++) {
+		if ((SCpnt = hd->ScsiLookup[ii]) != NULL) {
+
+			/* Command found.
+			 */
+
+			/* Null ScsiLookup index
+			 */
+			hd->ScsiLookup[ii] = NULL;
+
+			mf = MPT_INDEX_2_MFPTR(ioc, ii);
+			dmfprintk(( "flush: ScsiDone (mf=%p,sc=%p)\n",
+					mf, SCpnt));
+
+			/* Set status, free OS resources (SG DMA buffers)
+			 * Do OS callback
+			 * Free driver resources (chain, msg buffers)
+			 */
+			if (scsi_device_online(SCpnt->device)) {
+				if (SCpnt->use_sg) {
+					pci_unmap_sg(ioc->pcidev,
+						(struct scatterlist *) SCpnt->request_buffer,
+						SCpnt->use_sg,
+						SCpnt->sc_data_direction);
+				} else if (SCpnt->request_bufflen) {
+					pci_unmap_single(ioc->pcidev,
+						SCpnt->SCp.dma_handle,
+						SCpnt->request_bufflen,
+						SCpnt->sc_data_direction);
+				}
+			}
+			SCpnt->result = DID_RESET << 16;
+			SCpnt->host_scribble = NULL;
+
+			/* Free Chain buffers */
+			mptscsih_freeChainBuffers(ioc, ii);
+
+			/* Free Message frames */
+			mpt_free_msg_frame(ioc, mf);
+
+			SCpnt->scsi_done(SCpnt);	/* Issue the command callback */
+		}
+	}
+
+	return;
+}
+
+/*
+ *	mptscsih_search_running_cmds - Delete any commands associated
+ *		with the specified target and lun. Function called only
+ *		when a lun is disable by mid-layer.
+ *		Do NOT access the referenced scsi_cmnd structure or
+ *		members. Will cause either a paging or NULL ptr error.
+ *	@hd: Pointer to a SCSI HOST structure
+ *	@target: target id
+ *	@lun: lun
+ *
+ *	Returns: None.
+ *
+ *	Called from slave_destroy.
+ */
+static void
+mptscsih_search_running_cmds(MPT_SCSI_HOST *hd, uint target, uint lun)
+{
+	SCSIIORequest_t	*mf = NULL;
+	int		 ii;
+	int		 max = hd->ioc->req_depth;
+
+	dsprintk((KERN_INFO MYNAM ": search_running target %d lun %d max %d\n",
+			target, lun, max));
+
+	for (ii=0; ii < max; ii++) {
+		if (hd->ScsiLookup[ii] != NULL) {
+
+			mf = (SCSIIORequest_t *)MPT_INDEX_2_MFPTR(hd->ioc, ii);
+
+			dsprintk(( "search_running: found (sc=%p, mf = %p) target %d, lun %d \n",
+					hd->ScsiLookup[ii], mf, mf->TargetID, mf->LUN[1]));
+
+			if ((mf->TargetID != ((u8)target)) || (mf->LUN[1] != ((u8) lun)))
+				continue;
+
+			/* Cleanup
+			 */
+			hd->ScsiLookup[ii] = NULL;
+			mptscsih_freeChainBuffers(hd->ioc, ii);
+			mpt_free_msg_frame(hd->ioc, (MPT_FRAME_HDR *)mf);
+		}
+	}
+
+	return;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  Hack! It might be nice to report if a device is returning QUEUE_FULL
+ *  but maybe not each and every time...
+ */
+static long last_queue_full = 0;
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptscsih_report_queue_full - Report QUEUE_FULL status returned
+ *	from a SCSI target device.
+ *	@sc: Pointer to scsi_cmnd structure
+ *	@pScsiReply: Pointer to SCSIIOReply_t
+ *	@pScsiReq: Pointer to original SCSI request
+ *
+ *	This routine periodically reports QUEUE_FULL status returned from a
+ *	SCSI target device.  It reports this to the console via kernel
+ *	printk() API call, not more than once every 10 seconds.
+ */
+static void
+mptscsih_report_queue_full(struct scsi_cmnd *sc, SCSIIOReply_t *pScsiReply, SCSIIORequest_t *pScsiReq)
+{
+	long time = jiffies;
+
+	if (time - last_queue_full > 10 * HZ) {
+		char *ioc_str = "ioc?";
+
+		if (sc->device && sc->device->host != NULL && sc->device->host->hostdata != NULL)
+			ioc_str = ((MPT_SCSI_HOST *)sc->device->host->hostdata)->ioc->name;
+		dprintk((MYIOC_s_WARN_FMT "Device (%d:%d:%d) reported QUEUE_FULL!\n",
+				ioc_str, 0, sc->device->id, sc->device->lun));
+		last_queue_full = time;
+	}
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static char *info_kbuf = NULL;
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptscsih_probe - Installs scsi devices per bus.
+ *	@pdev: Pointer to pci_dev structure
+ *
+ *	Returns 0 for success, non-zero for failure.
+ *
+ */
+
+static int
+mptscsih_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+	struct Scsi_Host	*sh;
+	MPT_SCSI_HOST		*hd;
+	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
+	unsigned long		 flags;
+	int			 sz, ii;
+	int			 numSGE = 0;
+	int			 scale;
+	int			 ioc_cap;
+	u8			*mem;
+	int			error=0;
+
+
+	/* 20010202 -sralston
+	 *  Added sanity check on readiness of the MPT adapter.
+	 */
+	if (ioc->last_state != MPI_IOC_STATE_OPERATIONAL) {
+		printk(MYIOC_s_WARN_FMT
+		  "Skipping because it's not operational!\n",
+		  ioc->name);
+		return -ENODEV;
+	}
+
+	if (!ioc->active) {
+		printk(MYIOC_s_WARN_FMT "Skipping because it's disabled!\n",
+		  ioc->name);
+		return -ENODEV;
+	}
+
+	/*  Sanity check - ensure at least 1 port is INITIATOR capable
+	 */
+	ioc_cap = 0;
+	for (ii=0; ii < ioc->facts.NumberOfPorts; ii++) {
+		if (ioc->pfacts[ii].ProtocolFlags &
+		    MPI_PORTFACTS_PROTOCOL_INITIATOR)
+			ioc_cap ++;
+	}
+
+	if (!ioc_cap) {
+		printk(MYIOC_s_WARN_FMT
+			"Skipping ioc=%p because SCSI Initiator mode is NOT enabled!\n",
+			ioc->name, ioc);
+		return -ENODEV;
+	}
+
+	sh = scsi_host_alloc(&driver_template, sizeof(MPT_SCSI_HOST));
+        
+	if (!sh) {
+		printk(MYIOC_s_WARN_FMT
+			"Unable to register controller with SCSI subsystem\n",
+			ioc->name);
+                return -1;
+        }
+	
+	spin_lock_irqsave(&ioc->FreeQlock, flags);
+
+	/* Attach the SCSI Host to the IOC structure
+	 */
+	ioc->sh = sh;
+
+	sh->io_port = 0;
+	sh->n_io_port = 0;
+	sh->irq = 0;
+
+	/* set 16 byte cdb's */
+	sh->max_cmd_len = 16;
+
+	/* Yikes!  This is important!
+	 * Otherwise, by default, linux
+	 * only scans target IDs 0-7!
+	 * pfactsN->MaxDevices unreliable
+	 * (not supported in early
+	 *	versions of the FW).
+	 * max_id = 1 + actual max id,
+	 * max_lun = 1 + actual last lun,
+	 *	see hosts.h :o(
+	 */
+	if (ioc->bus_type == SCSI) {
+		sh->max_id = MPT_MAX_SCSI_DEVICES;
+	} else {
+	/* For FC, increase the queue depth
+	 * from MPT_SCSI_CAN_QUEUE (31)
+	 * to MPT_FC_CAN_QUEUE (63).
+	 */
+		sh->can_queue = MPT_FC_CAN_QUEUE;
+		sh->max_id =
+		  MPT_MAX_FC_DEVICES<256 ? MPT_MAX_FC_DEVICES : 255;
+	}
+		
+	sh->max_lun = MPT_LAST_LUN + 1;
+	sh->max_channel = 0;
+	sh->this_id = ioc->pfacts[0].PortSCSIID;
+		
+	/* Required entry.
+	 */
+	sh->unique_id = ioc->id;
+
+	/* Verify that we won't exceed the maximum
+	 * number of chain buffers
+	 * We can optimize:  ZZ = req_sz/sizeof(SGE)
+	 * For 32bit SGE's:
+	 *  numSGE = 1 + (ZZ-1)*(maxChain -1) + ZZ
+	 *               + (req_sz - 64)/sizeof(SGE)
+	 * A slightly different algorithm is required for
+	 * 64bit SGEs.
+	 */
+	scale = ioc->req_sz/(sizeof(dma_addr_t) + sizeof(u32));
+	if (sizeof(dma_addr_t) == sizeof(u64)) {
+		numSGE = (scale - 1) *
+		  (ioc->facts.MaxChainDepth-1) + scale +
+		  (ioc->req_sz - 60) / (sizeof(dma_addr_t) +
+		  sizeof(u32));
+	} else {
+		numSGE = 1 + (scale - 1) *
+		  (ioc->facts.MaxChainDepth-1) + scale +
+		  (ioc->req_sz - 64) / (sizeof(dma_addr_t) +
+		  sizeof(u32));
+	}
+		
+	if (numSGE < sh->sg_tablesize) {
+		/* Reset this value */
+		dprintk((MYIOC_s_INFO_FMT
+		  "Resetting sg_tablesize to %d from %d\n",
+		  ioc->name, numSGE, sh->sg_tablesize));
+		sh->sg_tablesize = numSGE;
+	}
+
+	/* Set the pci device pointer in Scsi_Host structure.
+	 */
+	scsi_set_device(sh, &ioc->pcidev->dev);
+
+	spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+	hd = (MPT_SCSI_HOST *) sh->hostdata;
+	hd->ioc = ioc;
+
+	/* SCSI needs scsi_cmnd lookup table!
+	 * (with size equal to req_depth*PtrSz!)
+	 */
+	sz = ioc->req_depth * sizeof(void *);
+	mem = kmalloc(sz, GFP_ATOMIC);
+	if (mem == NULL) {
+		error = -ENOMEM;
+		goto mptscsih_probe_failed;
+	}
+
+	memset(mem, 0, sz);
+	hd->ScsiLookup = (struct scsi_cmnd **) mem;
+
+	dprintk((MYIOC_s_INFO_FMT "ScsiLookup @ %p, sz=%d\n",
+		 ioc->name, hd->ScsiLookup, sz));
+		
+	/* Allocate memory for the device structures.
+	 * A non-Null pointer at an offset
+	 * indicates a device exists.
+	 * max_id = 1 + maximum id (hosts.h)
+	 */
+	sz = sh->max_id * sizeof(void *);
+	mem = kmalloc(sz, GFP_ATOMIC);
+	if (mem == NULL) {
+		error = -ENOMEM;
+		goto mptscsih_probe_failed;
+	}
+
+	memset(mem, 0, sz);
+	hd->Targets = (VirtDevice **) mem;
+
+	dprintk((KERN_INFO
+	  "  Targets @ %p, sz=%d\n", hd->Targets, sz));
+
+	/* Clear the TM flags
+	 */
+	hd->tmPending = 0;
+	hd->tmState = TM_STATE_NONE;
+	hd->resetPending = 0;
+	hd->abortSCpnt = NULL;
+
+	/* Clear the pointer used to store
+	 * single-threaded commands, i.e., those
+	 * issued during a bus scan, dv and
+	 * configuration pages.
+	 */
+	hd->cmdPtr = NULL;
+
+	/* Initialize this SCSI Hosts' timers
+	 * To use, set the timer expires field
+	 * and add_timer
+	 */
+	init_timer(&hd->timer);
+	hd->timer.data = (unsigned long) hd;
+	hd->timer.function = mptscsih_timer_expired;
+
+	if (ioc->bus_type == SCSI) {
+		/* Update with the driver setup
+		 * values.
+		 */
+		if (ioc->spi_data.maxBusWidth > mpt_width)
+			ioc->spi_data.maxBusWidth = mpt_width;
+		if (ioc->spi_data.minSyncFactor < mpt_factor)
+			ioc->spi_data.minSyncFactor = mpt_factor;
+
+		if (ioc->spi_data.minSyncFactor == MPT_ASYNC) {
+			ioc->spi_data.maxSyncOffset = 0;
+		}
+
+		ioc->spi_data.Saf_Te = mpt_saf_te;
+
+		hd->negoNvram = 0;
+#ifndef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
+		hd->negoNvram = MPT_SCSICFG_USE_NVRAM;
+#endif
+		ioc->spi_data.forceDv = 0;
+		ioc->spi_data.noQas = 0;
+		for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
+			ioc->spi_data.dvStatus[ii] =
+			  MPT_SCSICFG_NEGOTIATE;
+		}
+
+		for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++)
+			ioc->spi_data.dvStatus[ii] |=
+			  MPT_SCSICFG_DV_NOT_DONE;
+
+		dinitprintk((MYIOC_s_INFO_FMT
+			"dv %x width %x factor %x saf_te %x\n",
+			ioc->name, mpt_dv,
+			mpt_width,
+			mpt_factor,
+			mpt_saf_te));
+	}
+
+	mpt_scsi_hosts++;
+
+	error = scsi_add_host (sh, &ioc->pcidev->dev);
+	if(error) {
+		dprintk((KERN_ERR MYNAM
+		  "scsi_add_host failed\n"));
+		goto mptscsih_probe_failed;
+	}
+
+	scsi_scan_host(sh);
+	return 0;
+
+mptscsih_probe_failed:
+
+	mptscsih_remove(pdev);
+	return error;
+
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptscsih_remove - Removed scsi devices
+ *	@pdev: Pointer to pci_dev structure
+ *
+ *
+ */
+static void
+mptscsih_remove(struct pci_dev *pdev)
+{
+	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
+	struct Scsi_Host 	*host = ioc->sh;
+	MPT_SCSI_HOST		*hd;
+	int 		 	count;
+	unsigned long	 	flags;
+
+	if(!host)
+		return;
+
+	scsi_remove_host(host);
+
+#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
+	/* Check DV thread active */
+	count = 10 * HZ;
+	spin_lock_irqsave(&dvtaskQ_lock, flags);
+	if (dvtaskQ_active) {
+		spin_unlock_irqrestore(&dvtaskQ_lock, flags);
+		while(dvtaskQ_active && --count) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(1);
+		}
+	} else {
+		spin_unlock_irqrestore(&dvtaskQ_lock, flags);
+	}
+	if (!count)
+		printk(KERN_ERR MYNAM ": ERROR - DV thread still active!\n");
+#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY)
+	else
+		printk(KERN_ERR MYNAM ": DV thread orig %d, count %d\n", 10 * HZ, count);
+#endif
+#endif
+
+	hd = (MPT_SCSI_HOST *)host->hostdata;
+	if (hd != NULL) {
+		int sz1;
+
+		mptscsih_shutdown(&pdev->dev);
+
+		sz1=0;
+
+		if (hd->ScsiLookup != NULL) {
+			sz1 = hd->ioc->req_depth * sizeof(void *);
+			kfree(hd->ScsiLookup);
+			hd->ScsiLookup = NULL;
+		}
+
+		if (hd->Targets != NULL) {
+			/*
+			 * Free pointer array.
+			 */
+			kfree(hd->Targets);
+			hd->Targets = NULL;
+		}
+
+		dprintk((MYIOC_s_INFO_FMT 
+		    "Free'd ScsiLookup (%d) memory\n",
+		    hd->ioc->name, sz1));
+
+		/* NULL the Scsi_Host pointer
+		 */
+		hd->ioc->sh = NULL;
+	}
+
+	scsi_host_put(host);
+	mpt_scsi_hosts--;
+
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptscsih_shutdown - reboot notifier
+ *
+ */
+static void
+mptscsih_shutdown(struct device * dev)
+{
+	MPT_ADAPTER 		*ioc = pci_get_drvdata(to_pci_dev(dev));
+	struct Scsi_Host 	*host = ioc->sh;
+	MPT_SCSI_HOST		*hd;
+
+	if(!host)
+		return;
+
+	hd = (MPT_SCSI_HOST *)host->hostdata;
+
+	/* Flush the cache of this adapter
+	 */
+	if(hd != NULL)
+		mptscsih_synchronize_cache(hd, 0);
+
+}
+
+#ifdef CONFIG_PM
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptscsih_suspend - Fusion MPT scsie driver suspend routine.
+ *
+ *
+ */
+static int
+mptscsih_suspend(struct pci_dev *pdev, u32 state)
+{
+	mptscsih_shutdown(&pdev->dev);
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptscsih_resume - Fusion MPT scsi driver resume routine.
+ *
+ *
+ */
+static int
+mptscsih_resume(struct pci_dev *pdev)
+{
+	MPT_ADAPTER 		*ioc = pci_get_drvdata(pdev);
+	struct Scsi_Host 	*host = ioc->sh;
+	MPT_SCSI_HOST		*hd;
+
+	if(!host)
+		return 0;
+
+	hd = (MPT_SCSI_HOST *)host->hostdata;
+	if(!hd)
+		return 0;
+
+#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
+	{
+	unsigned long lflags;
+	spin_lock_irqsave(&dvtaskQ_lock, lflags);
+	if (!dvtaskQ_active) {
+		dvtaskQ_active = 1;
+		spin_unlock_irqrestore(&dvtaskQ_lock, lflags);
+		INIT_WORK(&mptscsih_dvTask,
+		  mptscsih_domainValidation, (void *) hd);
+		schedule_work(&mptscsih_dvTask);
+	} else {
+		spin_unlock_irqrestore(&dvtaskQ_lock, lflags);
+	}
+	}
+#endif
+	return 0;
+}
+
+#endif
+
+static struct mpt_pci_driver mptscsih_driver = {
+	.probe		= mptscsih_probe,
+	.remove		= mptscsih_remove,
+	.shutdown	= mptscsih_shutdown,
+#ifdef CONFIG_PM
+	.suspend	= mptscsih_suspend,
+	.resume		= mptscsih_resume,
+#endif
+};
+
+/*  SCSI host fops start here...  */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_init - Register MPT adapter(s) as SCSI host(s) with
+ *	linux scsi mid-layer.
+ *
+ *	Returns 0 for success, non-zero for failure.
+ */
+static int __init
+mptscsih_init(void)
+{
+
+	show_mptmod_ver(my_NAME, my_VERSION);
+
+	ScsiDoneCtx = mpt_register(mptscsih_io_done, MPTSCSIH_DRIVER);
+	ScsiTaskCtx = mpt_register(mptscsih_taskmgmt_complete, MPTSCSIH_DRIVER);
+	ScsiScanDvCtx = mpt_register(mptscsih_scandv_complete, MPTSCSIH_DRIVER);
+
+	if (mpt_event_register(ScsiDoneCtx, mptscsih_event_process) == 0) {
+		devtprintk((KERN_INFO MYNAM
+		  ": Registered for IOC event notifications\n"));
+	}
+
+	if (mpt_reset_register(ScsiDoneCtx, mptscsih_ioc_reset) == 0) {
+		dprintk((KERN_INFO MYNAM
+		  ": Registered for IOC reset notifications\n"));
+	}
+
+	if(mpt_device_driver_register(&mptscsih_driver,
+	  MPTSCSIH_DRIVER) != 0 ) {
+		dprintk((KERN_INFO MYNAM
+		": failed to register dd callbacks\n"));
+	}
+
+	return 0;
+
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_exit - Unregisters MPT adapter(s)
+ *
+ */
+static void __exit
+mptscsih_exit(void)
+{
+	mpt_device_driver_deregister(MPTSCSIH_DRIVER);
+
+	mpt_reset_deregister(ScsiDoneCtx);
+	dprintk((KERN_INFO MYNAM
+	  ": Deregistered for IOC reset notifications\n"));
+
+	mpt_event_deregister(ScsiDoneCtx);
+	dprintk((KERN_INFO MYNAM
+	  ": Deregistered for IOC event notifications\n"));
+
+	mpt_deregister(ScsiScanDvCtx);
+	mpt_deregister(ScsiTaskCtx);
+	mpt_deregister(ScsiDoneCtx);
+
+	if (info_kbuf != NULL)
+		kfree(info_kbuf);
+
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_info - Return information about MPT adapter
+ *	@SChost: Pointer to Scsi_Host structure
+ *
+ *	(linux scsi_host_template.info routine)
+ *
+ *	Returns pointer to buffer where information was written.
+ */
+static const char *
+mptscsih_info(struct Scsi_Host *SChost)
+{
+	MPT_SCSI_HOST *h;
+	int size = 0;
+
+	if (info_kbuf == NULL)
+		if ((info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
+			return info_kbuf;
+
+	h = (MPT_SCSI_HOST *)SChost->hostdata;
+	info_kbuf[0] = '\0';
+	if (h) {
+		mpt_print_ioc_summary(h->ioc, info_kbuf, &size, 0, 0);
+		info_kbuf[size-1] = '\0';
+	}
+
+	return info_kbuf;
+}
+
+struct info_str {
+	char *buffer;
+	int   length;
+	int   offset;
+	int   pos;
+};
+
+static void copy_mem_info(struct info_str *info, char *data, int len)
+{
+	if (info->pos + len > info->length)
+		len = info->length - info->pos;
+
+	if (info->pos + len < info->offset) {
+		info->pos += len;
+		return;
+	}
+
+	if (info->pos < info->offset) {
+	        data += (info->offset - info->pos);
+	        len  -= (info->offset - info->pos);
+	}
+
+	if (len > 0) {
+                memcpy(info->buffer + info->pos, data, len);
+                info->pos += len;
+	}
+}
+
+static int copy_info(struct info_str *info, char *fmt, ...)
+{
+	va_list args;
+	char buf[81];
+	int len;
+
+	va_start(args, fmt);
+	len = vsprintf(buf, fmt, args);
+	va_end(args);
+
+	copy_mem_info(info, buf, len);
+	return len;
+}
+
+static int mptscsih_host_info(MPT_ADAPTER *ioc, char *pbuf, off_t offset, int len)
+{
+	struct info_str info;
+
+	info.buffer	= pbuf;
+	info.length	= len;
+	info.offset	= offset;
+	info.pos	= 0;
+
+	copy_info(&info, "%s: %s, ", ioc->name, ioc->prod_name);
+	copy_info(&info, "%s%08xh, ", MPT_FW_REV_MAGIC_ID_STRING, ioc->facts.FWVersion.Word);
+	copy_info(&info, "Ports=%d, ", ioc->facts.NumberOfPorts);
+	copy_info(&info, "MaxQ=%d\n", ioc->req_depth);
+
+	return ((info.pos > info.offset) ? info.pos - info.offset : 0);
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_proc_info - Return information about MPT adapter
+ *
+ *	(linux scsi_host_template.info routine)
+ *
+ * 	buffer: if write, user data; if read, buffer for user
+ * 	length: if write, return length;
+ * 	offset: if write, 0; if read, the current offset into the buffer from
+ * 		the previous read.
+ * 	hostno: scsi host number
+ *	func:   if write = 1; if read = 0
+ */
+static int
+mptscsih_proc_info(struct Scsi_Host *host, char *buffer, char **start, off_t offset,
+			int length, int func)
+{
+	MPT_SCSI_HOST	*hd = (MPT_SCSI_HOST *)host->hostdata;
+	MPT_ADAPTER	*ioc = hd->ioc;
+	int size = 0;
+
+	if (func) {
+		/* 
+		 * write is not supported 
+		 */
+	} else {
+		if (start)
+			*start = buffer;
+
+		size = mptscsih_host_info(ioc, buffer, offset, length);
+	}
+
+	return size;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+#define ADD_INDEX_LOG(req_ent)	do { } while(0)
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_qcmd - Primary Fusion MPT SCSI initiator IO start routine.
+ *	@SCpnt: Pointer to scsi_cmnd structure
+ *	@done: Pointer SCSI mid-layer IO completion function
+ *
+ *	(linux scsi_host_template.queuecommand routine)
+ *	This is the primary SCSI IO start routine.  Create a MPI SCSIIORequest
+ *	from a linux scsi_cmnd request and send it to the IOC.
+ *
+ *	Returns 0. (rtn value discarded by linux scsi mid-layer)
+ */
+static int
+mptscsih_qcmd(struct scsi_cmnd *SCpnt, void (*done)(struct scsi_cmnd *))
+{
+	MPT_SCSI_HOST		*hd;
+	MPT_FRAME_HDR		*mf;
+	SCSIIORequest_t		*pScsiReq;
+	VirtDevice		*pTarget;
+	int	 target;
+	int	 lun;
+	u32	 datalen;
+	u32	 scsictl;
+	u32	 scsidir;
+	u32	 cmd_len;
+	int	 my_idx;
+	int	 ii;
+
+	hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata;
+	target = SCpnt->device->id;
+	lun = SCpnt->device->lun;
+	SCpnt->scsi_done = done;
+
+	pTarget = hd->Targets[target];
+
+	dmfprintk((MYIOC_s_INFO_FMT "qcmd: SCpnt=%p, done()=%p\n",
+			(hd && hd->ioc) ? hd->ioc->name : "ioc?", SCpnt, done));
+
+	if (hd->resetPending) {
+		dtmprintk((MYIOC_s_WARN_FMT "qcmd: SCpnt=%p timeout + 60HZ\n",
+			(hd && hd->ioc) ? hd->ioc->name : "ioc?", SCpnt));
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	/*
+	 *  Put together a MPT SCSI request...
+	 */
+	if ((mf = mpt_get_msg_frame(ScsiDoneCtx, hd->ioc)) == NULL) {
+		dprintk((MYIOC_s_WARN_FMT "QueueCmd, no msg frames!!\n",
+				hd->ioc->name));
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	pScsiReq = (SCSIIORequest_t *) mf;
+
+	my_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
+
+	ADD_INDEX_LOG(my_idx);
+
+	/*  BUG FIX!  19991030 -sralston
+	 *    TUR's being issued with scsictl=0x02000000 (DATA_IN)!
+	 *    Seems we may receive a buffer (datalen>0) even when there
+	 *    will be no data transfer!  GRRRRR...
+	 */
+	if (SCpnt->sc_data_direction == DMA_FROM_DEVICE) {
+		datalen = SCpnt->request_bufflen;
+		scsidir = MPI_SCSIIO_CONTROL_READ;	/* DATA IN  (host<--ioc<--dev) */
+	} else if (SCpnt->sc_data_direction == DMA_TO_DEVICE) {
+		datalen = SCpnt->request_bufflen;
+		scsidir = MPI_SCSIIO_CONTROL_WRITE;	/* DATA OUT (host-->ioc-->dev) */
+	} else {
+		datalen = 0;
+		scsidir = MPI_SCSIIO_CONTROL_NODATATRANSFER;
+	}
+
+	/* Default to untagged. Once a target structure has been allocated,
+	 * use the Inquiry data to determine if device supports tagged.
+	 */
+	if (   pTarget
+	    && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)
+	    && (SCpnt->device->tagged_supported)) {
+		scsictl = scsidir | MPI_SCSIIO_CONTROL_SIMPLEQ;
+	} else {
+		scsictl = scsidir | MPI_SCSIIO_CONTROL_UNTAGGED;
+	}
+
+	/* Use the above information to set up the message frame
+	 */
+	pScsiReq->TargetID = (u8) target;
+	pScsiReq->Bus = (u8) SCpnt->device->channel;
+	pScsiReq->ChainOffset = 0;
+	pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
+	pScsiReq->CDBLength = SCpnt->cmd_len;
+	pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
+	pScsiReq->Reserved = 0;
+	pScsiReq->MsgFlags = mpt_msg_flags();
+	pScsiReq->LUN[0] = 0;
+	pScsiReq->LUN[1] = lun;
+	pScsiReq->LUN[2] = 0;
+	pScsiReq->LUN[3] = 0;
+	pScsiReq->LUN[4] = 0;
+	pScsiReq->LUN[5] = 0;
+	pScsiReq->LUN[6] = 0;
+	pScsiReq->LUN[7] = 0;
+	pScsiReq->Control = cpu_to_le32(scsictl);
+
+	/*
+	 *  Write SCSI CDB into the message
+	 */
+	cmd_len = SCpnt->cmd_len;
+	for (ii=0; ii < cmd_len; ii++)
+		pScsiReq->CDB[ii] = SCpnt->cmnd[ii];
+
+	for (ii=cmd_len; ii < 16; ii++)
+		pScsiReq->CDB[ii] = 0;
+
+	/* DataLength */
+	pScsiReq->DataLength = cpu_to_le32(datalen);
+
+	/* SenseBuffer low address */
+	pScsiReq->SenseBufferLowAddr = cpu_to_le32(hd->ioc->sense_buf_low_dma
+					   + (my_idx * MPT_SENSE_BUFFER_ALLOC));
+
+	/* Now add the SG list
+	 * Always have a SGE even if null length.
+	 */
+	if (datalen == 0) {
+		/* Add a NULL SGE */
+		mptscsih_add_sge((char *)&pScsiReq->SGL, MPT_SGE_FLAGS_SSIMPLE_READ | 0,
+			(dma_addr_t) -1);
+	} else {
+		/* Add a 32 or 64 bit SGE */
+		if (mptscsih_AddSGE(hd->ioc, SCpnt, pScsiReq, my_idx) != SUCCESS)
+			goto fail;
+	}
+
+	hd->ScsiLookup[my_idx] = SCpnt;
+	SCpnt->host_scribble = NULL;
+
+#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
+	if (hd->ioc->bus_type == SCSI) {
+		int dvStatus = hd->ioc->spi_data.dvStatus[target];
+		int issueCmd = 1;
+
+		if (dvStatus || hd->ioc->spi_data.forceDv) {
+
+			if ((dvStatus & MPT_SCSICFG_NEED_DV) ||
+				(hd->ioc->spi_data.forceDv & MPT_SCSICFG_NEED_DV)) {
+				unsigned long lflags;
+				/* Schedule DV if necessary */
+				spin_lock_irqsave(&dvtaskQ_lock, lflags);
+				if (!dvtaskQ_active) {
+					dvtaskQ_active = 1;
+					spin_unlock_irqrestore(&dvtaskQ_lock, lflags);
+					INIT_WORK(&mptscsih_dvTask, mptscsih_domainValidation, (void *) hd);
+
+					schedule_work(&mptscsih_dvTask);
+				} else {
+					spin_unlock_irqrestore(&dvtaskQ_lock, lflags);
+				}
+				hd->ioc->spi_data.forceDv &= ~MPT_SCSICFG_NEED_DV;
+			}
+
+			/* Trying to do DV to this target, extend timeout.
+			 * Wait to issue until flag is clear
+			 */
+			if (dvStatus & MPT_SCSICFG_DV_PENDING) {
+				mod_timer(&SCpnt->eh_timeout, jiffies + 40 * HZ);
+				issueCmd = 0;
+			}
+
+			/* Set the DV flags.
+			 */
+			if (dvStatus & MPT_SCSICFG_DV_NOT_DONE)
+				mptscsih_set_dvflags(hd, pScsiReq);
+
+			if (!issueCmd)
+				goto fail;
+		}
+	}
+#endif
+
+	mpt_put_msg_frame(ScsiDoneCtx, hd->ioc, mf);
+	dmfprintk((MYIOC_s_INFO_FMT "Issued SCSI cmd (%p) mf=%p idx=%d\n",
+			hd->ioc->name, SCpnt, mf, my_idx));
+	DBG_DUMP_REQUEST_FRAME(mf)
+	return 0;
+
+ fail:
+	mptscsih_freeChainBuffers(hd->ioc, my_idx);
+	mpt_free_msg_frame(hd->ioc, mf);
+	return SCSI_MLQUEUE_HOST_BUSY;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptscsih_freeChainBuffers - Function to free chain buffers associated
+ *	with a SCSI IO request
+ *	@hd: Pointer to the MPT_SCSI_HOST instance
+ *	@req_idx: Index of the SCSI IO request frame.
+ *
+ *	Called if SG chain buffer allocation fails and mptscsih callbacks.
+ *	No return.
+ */
+static void
+mptscsih_freeChainBuffers(MPT_ADAPTER *ioc, int req_idx)
+{
+	MPT_FRAME_HDR *chain;
+	unsigned long flags;
+	int chain_idx;
+	int next;
+
+	/* Get the first chain index and reset
+	 * tracker state.
+	 */
+	chain_idx = ioc->ReqToChain[req_idx];
+	ioc->ReqToChain[req_idx] = MPT_HOST_NO_CHAIN;
+
+	while (chain_idx != MPT_HOST_NO_CHAIN) {
+
+		/* Save the next chain buffer index */
+		next = ioc->ChainToChain[chain_idx];
+
+		/* Free this chain buffer and reset
+		 * tracker
+		 */
+		ioc->ChainToChain[chain_idx] = MPT_HOST_NO_CHAIN;
+
+		chain = (MPT_FRAME_HDR *) (ioc->ChainBuffer
+					+ (chain_idx * ioc->req_sz));
+
+		spin_lock_irqsave(&ioc->FreeQlock, flags);
+		list_add_tail(&chain->u.frame.linkage.list, &ioc->FreeChainQ);
+		spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+
+		dmfprintk((MYIOC_s_INFO_FMT "FreeChainBuffers (index %d)\n",
+				ioc->name, chain_idx));
+
+		/* handle next */
+		chain_idx = next;
+	}
+	return;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	Reset Handling
+ */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptscsih_TMHandler - Generic handler for SCSI Task Management.
+ *	Fall through to mpt_HardResetHandler if: not operational, too many
+ *	failed TM requests or handshake failure.
+ *
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@type: Task Management type
+ *	@target: Logical Target ID for reset (if appropriate)
+ *	@lun: Logical Unit for reset (if appropriate)
+ *	@ctx2abort: Context for the task to be aborted (if appropriate)
+ *
+ *	Remark: Currently invoked from a non-interrupt thread (_bh).
+ *
+ *	Remark: With old EH code, at most 1 SCSI TaskMgmt function per IOC
+ *	will be active.
+ *
+ *	Returns 0 for SUCCESS or -1 if FAILED.
+ */
+static int
+mptscsih_TMHandler(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout)
+{
+	MPT_ADAPTER	*ioc;
+	int		 rc = -1;
+	int		 doTask = 1;
+	u32		 ioc_raw_state;
+	unsigned long	 flags;
+
+	/* If FW is being reloaded currently, return success to
+	 * the calling function.
+	 */
+	if (hd == NULL)
+		return 0;
+
+	ioc = hd->ioc;
+	if (ioc == NULL) {
+		printk(KERN_ERR MYNAM " TMHandler" " NULL ioc!\n");
+		return FAILED;
+	}
+	dtmprintk((MYIOC_s_INFO_FMT "TMHandler Entered!\n", ioc->name));
+
+	// SJR - CHECKME - Can we avoid this here?
+	// (mpt_HardResetHandler has this check...)
+	spin_lock_irqsave(&ioc->diagLock, flags);
+	if ((ioc->diagPending) || (ioc->alt_ioc && ioc->alt_ioc->diagPending)) {
+		spin_unlock_irqrestore(&ioc->diagLock, flags);
+		return FAILED;
+	}
+	spin_unlock_irqrestore(&ioc->diagLock, flags);
+
+	/*  Wait a fixed amount of time for the TM pending flag to be cleared.
+	 *  If we time out and not bus reset, then we return a FAILED status to the caller.
+	 *  The call to mptscsih_tm_pending_wait() will set the pending flag if we are
+	 *  successful. Otherwise, reload the FW.
+	 */
+	if (mptscsih_tm_pending_wait(hd) == FAILED) {
+		if (type == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK) {
+			dtmprintk((KERN_WARNING MYNAM ": %s: TMHandler abort: "
+			   "Timed out waiting for last TM (%d) to complete! \n",
+			   hd->ioc->name, hd->tmPending));
+			return FAILED;
+		} else if (type == MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET) {
+			dtmprintk((KERN_WARNING MYNAM ": %s: TMHandler target reset: "
+			   "Timed out waiting for last TM (%d) to complete! \n",
+			   hd->ioc->name, hd->tmPending));
+			return FAILED;
+		} else if (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) {
+			dtmprintk((KERN_WARNING MYNAM ": %s: TMHandler bus reset: "
+			   "Timed out waiting for last TM (%d) to complete! \n",
+			   hd->ioc->name, hd->tmPending));
+			if (hd->tmPending & (1 << MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS))
+				return FAILED;
+
+			doTask = 0;
+		}
+	} else {
+		spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
+		hd->tmPending |=  (1 << type);
+		spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
+	}
+
+	/* Is operational?
+	 */
+	ioc_raw_state = mpt_GetIocState(hd->ioc, 0);
+
+#ifdef MPT_DEBUG_RESET
+	if ((ioc_raw_state & MPI_IOC_STATE_MASK) != MPI_IOC_STATE_OPERATIONAL) {
+		printk(MYIOC_s_WARN_FMT
+			"TM Handler: IOC Not operational(0x%x)!\n",
+			hd->ioc->name, ioc_raw_state);
+	}
+#endif
+
+	if (doTask && ((ioc_raw_state & MPI_IOC_STATE_MASK) == MPI_IOC_STATE_OPERATIONAL)
+				&& !(ioc_raw_state & MPI_DOORBELL_ACTIVE)) {
+
+		/* Isse the Task Mgmt request.
+		 */
+		if (hd->hard_resets < -1)
+			hd->hard_resets++;
+		rc = mptscsih_IssueTaskMgmt(hd, type, channel, target, lun, ctx2abort, timeout);
+		if (rc) {
+			printk(MYIOC_s_INFO_FMT "Issue of TaskMgmt failed!\n", hd->ioc->name);
+		} else {
+			dtmprintk((MYIOC_s_INFO_FMT "Issue of TaskMgmt Successful!\n", hd->ioc->name));
+		}
+	}
+
+	/* Only fall through to the HRH if this is a bus reset
+	 */
+	if ((type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) && (rc ||
+		ioc->reload_fw || (ioc->alt_ioc && ioc->alt_ioc->reload_fw))) {
+		dtmprintk((MYIOC_s_INFO_FMT "Calling HardReset! \n",
+			 hd->ioc->name));
+		rc = mpt_HardResetHandler(hd->ioc, CAN_SLEEP);
+	}
+
+	dtmprintk((MYIOC_s_INFO_FMT "TMHandler rc = %d!\n", hd->ioc->name, rc));
+
+	return rc;
+}
+
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptscsih_IssueTaskMgmt - Generic send Task Management function.
+ *	@hd: Pointer to MPT_SCSI_HOST structure
+ *	@type: Task Management type
+ *	@target: Logical Target ID for reset (if appropriate)
+ *	@lun: Logical Unit for reset (if appropriate)
+ *	@ctx2abort: Context for the task to be aborted (if appropriate)
+ *
+ *	Remark: _HardResetHandler can be invoked from an interrupt thread (timer)
+ *	or a non-interrupt thread.  In the former, must not call schedule().
+ *
+ *	Not all fields are meaningfull for all task types.
+ *
+ *	Returns 0 for SUCCESS, -999 for "no msg frames",
+ *	else other non-zero value returned.
+ */
+static int
+mptscsih_IssueTaskMgmt(MPT_SCSI_HOST *hd, u8 type, u8 channel, u8 target, u8 lun, int ctx2abort, ulong timeout)
+{
+	MPT_FRAME_HDR	*mf;
+	SCSITaskMgmt_t	*pScsiTm;
+	int		 ii;
+	int		 retval;
+
+	/* Return Fail to calling function if no message frames available.
+	 */
+	if ((mf = mpt_get_msg_frame(ScsiTaskCtx, hd->ioc)) == NULL) {
+		dfailprintk((MYIOC_s_ERR_FMT "IssueTaskMgmt, no msg frames!!\n",
+				hd->ioc->name));
+		//return FAILED;
+		return -999;
+	}
+	dtmprintk((MYIOC_s_INFO_FMT "IssueTaskMgmt request @ %p\n",
+			hd->ioc->name, mf));
+
+	/* Format the Request
+	 */
+	pScsiTm = (SCSITaskMgmt_t *) mf;
+	pScsiTm->TargetID = target;
+	pScsiTm->Bus = channel;
+	pScsiTm->ChainOffset = 0;
+	pScsiTm->Function = MPI_FUNCTION_SCSI_TASK_MGMT;
+
+	pScsiTm->Reserved = 0;
+	pScsiTm->TaskType = type;
+	pScsiTm->Reserved1 = 0;
+	pScsiTm->MsgFlags = (type == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS)
+                    ? MPI_SCSITASKMGMT_MSGFLAGS_LIPRESET_RESET_OPTION : 0;
+
+	for (ii= 0; ii < 8; ii++) {
+		pScsiTm->LUN[ii] = 0;
+	}
+	pScsiTm->LUN[1] = lun;
+
+	for (ii=0; ii < 7; ii++)
+		pScsiTm->Reserved2[ii] = 0;
+
+	pScsiTm->TaskMsgContext = ctx2abort;
+
+	dtmprintk((MYIOC_s_INFO_FMT
+		"IssueTaskMgmt: ctx2abort (0x%08x) type=%d\n",
+		hd->ioc->name, ctx2abort, type));
+
+	DBG_DUMP_TM_REQUEST_FRAME((u32 *)pScsiTm);
+
+	if ((retval = mpt_send_handshake_request(ScsiTaskCtx, hd->ioc,
+		sizeof(SCSITaskMgmt_t), (u32*)pScsiTm,
+		CAN_SLEEP)) != 0) {
+		dfailprintk((MYIOC_s_ERR_FMT "_send_handshake FAILED!"
+			" (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd,
+			hd->ioc, mf));
+		mpt_free_msg_frame(hd->ioc, mf);
+		return retval;
+	}
+
+	if(mptscsih_tm_wait_for_completion(hd, timeout) == FAILED) {
+		dfailprintk((MYIOC_s_ERR_FMT "_wait_for_completion FAILED!"
+			" (hd %p, ioc %p, mf %p) \n", hd->ioc->name, hd,
+			hd->ioc, mf));
+		mpt_free_msg_frame(hd->ioc, mf);
+		dtmprintk((MYIOC_s_INFO_FMT "Calling HardReset! \n",
+			 hd->ioc->name));
+		retval = mpt_HardResetHandler(hd->ioc, CAN_SLEEP);
+	}
+
+	return retval;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_abort - Abort linux scsi_cmnd routine, new_eh variant
+ *	@SCpnt: Pointer to scsi_cmnd structure, IO to be aborted
+ *
+ *	(linux scsi_host_template.eh_abort_handler routine)
+ *
+ *	Returns SUCCESS or FAILED.
+ */
+static int
+mptscsih_abort(struct scsi_cmnd * SCpnt)
+{
+	MPT_SCSI_HOST	*hd;
+	MPT_ADAPTER	*ioc;
+	MPT_FRAME_HDR	*mf;
+	u32		 ctx2abort;
+	int		 scpnt_idx;
+	spinlock_t	*host_lock = SCpnt->device->host->host_lock;
+
+	/* If we can't locate our host adapter structure, return FAILED status.
+	 */
+	if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL) {
+		SCpnt->result = DID_RESET << 16;
+		SCpnt->scsi_done(SCpnt);
+		dfailprintk((KERN_WARNING MYNAM ": mptscsih_abort: "
+			   "Can't locate host! (sc=%p)\n",
+			   SCpnt));
+		return FAILED;
+	}
+
+	ioc = hd->ioc;
+	if (hd->resetPending)
+		return FAILED;
+
+	printk(KERN_WARNING MYNAM ": %s: >> Attempting task abort! (sc=%p)\n",
+	       hd->ioc->name, SCpnt);
+
+	if (hd->timeouts < -1)
+		hd->timeouts++;
+
+	/* Find this command
+	 */
+	if ((scpnt_idx = SCPNT_TO_LOOKUP_IDX(SCpnt)) < 0) {
+		/* Cmd not found in ScsiLookup. 
+		 * Do OS callback.
+		 */
+		SCpnt->result = DID_RESET << 16;
+		dtmprintk((KERN_WARNING MYNAM ": %s: mptscsih_abort: "
+			   "Command not in the active list! (sc=%p)\n",
+			   hd->ioc->name, SCpnt));
+		return SUCCESS;
+	}
+
+	/* Most important!  Set TaskMsgContext to SCpnt's MsgContext!
+	 * (the IO to be ABORT'd)
+	 *
+	 * NOTE: Since we do not byteswap MsgContext, we do not
+	 *	 swap it here either.  It is an opaque cookie to
+	 *	 the controller, so it does not matter. -DaveM
+	 */
+	mf = MPT_INDEX_2_MFPTR(hd->ioc, scpnt_idx);
+	ctx2abort = mf->u.frame.hwhdr.msgctxu.MsgContext;
+
+	hd->abortSCpnt = SCpnt;
+
+	spin_unlock_irq(host_lock);
+	if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
+		SCpnt->device->channel, SCpnt->device->id, SCpnt->device->lun,
+		ctx2abort, 2 /* 2 second timeout */)
+		< 0) {
+
+		/* The TM request failed and the subsequent FW-reload failed!
+		 * Fatal error case.
+		 */
+		printk(MYIOC_s_WARN_FMT "Error issuing abort task! (sc=%p)\n",
+		       hd->ioc->name, SCpnt);
+
+		/* We must clear our pending flag before clearing our state.
+		 */
+		hd->tmPending = 0;
+		hd->tmState = TM_STATE_NONE;
+
+		spin_lock_irq(host_lock);
+
+		/* Unmap the DMA buffers, if any. */
+		if (SCpnt->use_sg) {
+			pci_unmap_sg(ioc->pcidev, (struct scatterlist *) SCpnt->request_buffer,
+				    SCpnt->use_sg, SCpnt->sc_data_direction);
+		} else if (SCpnt->request_bufflen) {
+			pci_unmap_single(ioc->pcidev, SCpnt->SCp.dma_handle,
+				SCpnt->request_bufflen, SCpnt->sc_data_direction);
+		}
+		hd->ScsiLookup[scpnt_idx] = NULL;
+		SCpnt->result = DID_RESET << 16;
+		SCpnt->scsi_done(SCpnt);		/* Issue the command callback */
+		mptscsih_freeChainBuffers(ioc, scpnt_idx);
+		mpt_free_msg_frame(ioc, mf);
+		return FAILED;
+	}
+	spin_lock_irq(host_lock);
+	return SUCCESS;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_dev_reset - Perform a SCSI TARGET_RESET!  new_eh variant
+ *	@SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
+ *
+ *	(linux scsi_host_template.eh_dev_reset_handler routine)
+ *
+ *	Returns SUCCESS or FAILED.
+ */
+static int
+mptscsih_dev_reset(struct scsi_cmnd * SCpnt)
+{
+	MPT_SCSI_HOST	*hd;
+	spinlock_t	*host_lock = SCpnt->device->host->host_lock;
+
+	/* If we can't locate our host adapter structure, return FAILED status.
+	 */
+	if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
+		dtmprintk((KERN_WARNING MYNAM ": mptscsih_dev_reset: "
+			   "Can't locate host! (sc=%p)\n",
+			   SCpnt));
+		return FAILED;
+	}
+
+	if (hd->resetPending)
+		return FAILED;
+
+	printk(KERN_WARNING MYNAM ": %s: >> Attempting target reset! (sc=%p)\n",
+	       hd->ioc->name, SCpnt);
+
+	spin_unlock_irq(host_lock);
+	if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_TARGET_RESET,
+		SCpnt->device->channel, SCpnt->device->id,
+		0, 0, 5 /* 5 second timeout */)
+		< 0){
+		/* The TM request failed and the subsequent FW-reload failed!
+		 * Fatal error case.
+		 */
+		printk(MYIOC_s_WARN_FMT "Error processing TaskMgmt request (sc=%p)\n",
+		 		hd->ioc->name, SCpnt);
+		hd->tmPending = 0;
+		hd->tmState = TM_STATE_NONE;
+		spin_lock_irq(host_lock);
+		return FAILED;
+	}
+	spin_lock_irq(host_lock);
+	return SUCCESS;
+
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_bus_reset - Perform a SCSI BUS_RESET!	new_eh variant
+ *	@SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
+ *
+ *	(linux scsi_host_template.eh_bus_reset_handler routine)
+ *
+ *	Returns SUCCESS or FAILED.
+ */
+static int
+mptscsih_bus_reset(struct scsi_cmnd * SCpnt)
+{
+	MPT_SCSI_HOST	*hd;
+	spinlock_t	*host_lock = SCpnt->device->host->host_lock;
+
+	/* If we can't locate our host adapter structure, return FAILED status.
+	 */
+	if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
+		dtmprintk((KERN_WARNING MYNAM ": mptscsih_bus_reset: "
+			   "Can't locate host! (sc=%p)\n",
+			   SCpnt ) );
+		return FAILED;
+	}
+
+	printk(KERN_WARNING MYNAM ": %s: >> Attempting bus reset! (sc=%p)\n",
+	       hd->ioc->name, SCpnt);
+
+	if (hd->timeouts < -1)
+		hd->timeouts++;
+
+	/* We are now ready to execute the task management request. */
+	spin_unlock_irq(host_lock);
+	if (mptscsih_TMHandler(hd, MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS,
+		SCpnt->device->channel, 0, 0, 0, 5 /* 5 second timeout */)
+	    < 0){
+
+		/* The TM request failed and the subsequent FW-reload failed!
+		 * Fatal error case.
+		 */
+		printk(MYIOC_s_WARN_FMT
+		       "Error processing TaskMgmt request (sc=%p)\n",
+		       hd->ioc->name, SCpnt);
+		hd->tmPending = 0;
+		hd->tmState = TM_STATE_NONE;
+		spin_lock_irq(host_lock);
+		return FAILED;
+	}
+	spin_lock_irq(host_lock);
+	return SUCCESS;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_host_reset - Perform a SCSI host adapter RESET!
+ *	new_eh variant
+ *	@SCpnt: Pointer to scsi_cmnd structure, IO which reset is due to
+ *
+ *	(linux scsi_host_template.eh_host_reset_handler routine)
+ *
+ *	Returns SUCCESS or FAILED.
+ */
+static int
+mptscsih_host_reset(struct scsi_cmnd *SCpnt)
+{
+	MPT_SCSI_HOST *  hd;
+	int              status = SUCCESS;
+	spinlock_t	*host_lock = SCpnt->device->host->host_lock;
+
+	/*  If we can't locate the host to reset, then we failed. */
+	if ((hd = (MPT_SCSI_HOST *) SCpnt->device->host->hostdata) == NULL){
+		dtmprintk( ( KERN_WARNING MYNAM ": mptscsih_host_reset: "
+			     "Can't locate host! (sc=%p)\n",
+			     SCpnt ) );
+		return FAILED;
+	}
+
+	printk(KERN_WARNING MYNAM ": %s: >> Attempting host reset! (sc=%p)\n",
+	       hd->ioc->name, SCpnt);
+
+	/*  If our attempts to reset the host failed, then return a failed
+	 *  status.  The host will be taken off line by the SCSI mid-layer.
+	 */
+	spin_unlock_irq(host_lock);
+	if (mpt_HardResetHandler(hd->ioc, CAN_SLEEP) < 0){
+		status = FAILED;
+	} else {
+		/*  Make sure TM pending is cleared and TM state is set to
+		 *  NONE.
+		 */
+		hd->tmPending = 0;
+		hd->tmState = TM_STATE_NONE;
+	}
+	spin_lock_irq(host_lock);
+
+
+	dtmprintk( ( KERN_WARNING MYNAM ": mptscsih_host_reset: "
+		     "Status = %s\n",
+		     (status == SUCCESS) ? "SUCCESS" : "FAILED" ) );
+
+	return status;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_tm_pending_wait - wait for pending task management request to
+ *		complete.
+ *	@hd: Pointer to MPT host structure.
+ *
+ *	Returns {SUCCESS,FAILED}.
+ */
+static int
+mptscsih_tm_pending_wait(MPT_SCSI_HOST * hd)
+{
+	unsigned long  flags;
+	int            loop_count = 4 * 10;  /* Wait 10 seconds */
+	int            status = FAILED;
+
+	do {
+		spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
+		if (hd->tmState == TM_STATE_NONE) {
+			hd->tmState = TM_STATE_IN_PROGRESS;
+			hd->tmPending = 1;
+			status = SUCCESS;
+			spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
+		msleep(250);
+	} while (--loop_count);
+
+	return status;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_tm_wait_for_completion - wait for completion of TM task
+ *	@hd: Pointer to MPT host structure.
+ *
+ *	Returns {SUCCESS,FAILED}.
+ */
+static int
+mptscsih_tm_wait_for_completion(MPT_SCSI_HOST * hd, ulong timeout )
+{
+	unsigned long  flags;
+	int            loop_count = 4 * timeout;
+	int            status = FAILED;
+
+	do {
+		spin_lock_irqsave(&hd->ioc->FreeQlock, flags);
+		if(hd->tmPending == 0) {
+			status = SUCCESS;
+			spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
+			break;
+		}
+		spin_unlock_irqrestore(&hd->ioc->FreeQlock, flags);
+		msleep_interruptible(250);
+	} while (--loop_count);
+
+	return status;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_taskmgmt_complete - Registered with Fusion MPT base driver
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@mf: Pointer to SCSI task mgmt request frame
+ *	@mr: Pointer to SCSI task mgmt reply frame
+ *
+ *	This routine is called from mptbase.c::mpt_interrupt() at the completion
+ *	of any SCSI task management request.
+ *	This routine is registered with the MPT (base) driver at driver
+ *	load/init time via the mpt_register() API call.
+ *
+ *	Returns 1 indicating alloc'd request frame ptr should be freed.
+ */
+static int
+mptscsih_taskmgmt_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
+{
+	SCSITaskMgmtReply_t	*pScsiTmReply;
+	SCSITaskMgmt_t		*pScsiTmReq;
+	MPT_SCSI_HOST		*hd;
+	unsigned long		 flags;
+	u16			 iocstatus;
+	u8			 tmType;
+
+	dtmprintk((MYIOC_s_WARN_FMT "TaskMgmt completed (mf=%p,mr=%p)\n",
+			ioc->name, mf, mr));
+	if (ioc->sh) {
+		/* Depending on the thread, a timer is activated for
+		 * the TM request.  Delete this timer on completion of TM.
+		 * Decrement count of outstanding TM requests.
+		 */
+		hd = (MPT_SCSI_HOST *)ioc->sh->hostdata;
+	} else {
+		dtmprintk((MYIOC_s_WARN_FMT "TaskMgmt Complete: NULL Scsi Host Ptr\n",
+			ioc->name));
+		return 1;
+	}
+
+	if (mr == NULL) {
+		dtmprintk((MYIOC_s_WARN_FMT "ERROR! TaskMgmt Reply: NULL Request %p\n",
+			ioc->name, mf));
+		return 1;
+	} else {
+		pScsiTmReply = (SCSITaskMgmtReply_t*)mr;
+		pScsiTmReq = (SCSITaskMgmt_t*)mf;
+
+		/* Figure out if this was ABORT_TASK, TARGET_RESET, or BUS_RESET! */
+		tmType = pScsiTmReq->TaskType;
+
+		dtmprintk((MYIOC_s_WARN_FMT "  TaskType = %d, TerminationCount=%d\n",
+				ioc->name, tmType, le32_to_cpu(pScsiTmReply->TerminationCount)));
+		DBG_DUMP_TM_REPLY_FRAME((u32 *)pScsiTmReply);
+
+		iocstatus = le16_to_cpu(pScsiTmReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+		dtmprintk((MYIOC_s_WARN_FMT "  SCSI TaskMgmt (%d) IOCStatus=%04x IOCLogInfo=%08x\n",
+			ioc->name, tmType, iocstatus, le32_to_cpu(pScsiTmReply->IOCLogInfo)));
+		/* Error?  (anything non-zero?) */
+		if (iocstatus) {
+
+			/* clear flags and continue.
+			 */
+			if (tmType == MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK)
+				hd->abortSCpnt = NULL;
+
+			/* If an internal command is present
+			 * or the TM failed - reload the FW.
+			 * FC FW may respond FAILED to an ABORT
+			 */
+			if (tmType == MPI_SCSITASKMGMT_TASKTYPE_RESET_BUS) {
+				if ((hd->cmdPtr) ||
+				    (iocstatus == MPI_IOCSTATUS_SCSI_TASK_MGMT_FAILED)) {
+					if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0) {
+						printk((KERN_WARNING
+							" Firmware Reload FAILED!!\n"));
+					}
+				}
+			}
+		} else {
+			dtmprintk((MYIOC_s_WARN_FMT " TaskMgmt SUCCESS\n", ioc->name));
+
+			hd->abortSCpnt = NULL;
+
+		}
+	}
+
+	spin_lock_irqsave(&ioc->FreeQlock, flags);
+	hd->tmPending = 0;
+	spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+	hd->tmState = TM_STATE_NONE;
+
+	return 1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	This is anyones guess quite frankly.
+ */
+static int
+mptscsih_bios_param(struct scsi_device * sdev, struct block_device *bdev,
+		sector_t capacity, int geom[])
+{
+	int		heads;
+	int		sectors;
+	sector_t	cylinders;
+	ulong 		dummy;
+
+	heads = 64;
+	sectors = 32;
+
+	dummy = heads * sectors;
+	cylinders = capacity;
+	sector_div(cylinders,dummy);
+
+	/*
+	 * Handle extended translation size for logical drives
+	 * > 1Gb
+	 */
+	if ((ulong)capacity >= 0x200000) {
+		heads = 255;
+		sectors = 63;
+		dummy = heads * sectors;
+		cylinders = capacity;
+		sector_div(cylinders,dummy);
+	}
+
+	/* return result */
+	geom[0] = heads;
+	geom[1] = sectors;
+	geom[2] = cylinders;
+
+	dprintk((KERN_NOTICE
+		": bios_param: Id=%i Lun=%i Channel=%i CHS=%i/%i/%i\n",
+		sdev->id, sdev->lun,sdev->channel,(int)cylinders,heads,sectors));
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	OS entry point to allow host driver to alloc memory
+ *	for each scsi device. Called once per device the bus scan.
+ *	Return non-zero if allocation fails.
+ *	Init memory once per id (not LUN).
+ */
+static int
+mptscsih_slave_alloc(struct scsi_device *device)
+{
+	struct Scsi_Host	*host = device->host;
+	MPT_SCSI_HOST		*hd = (MPT_SCSI_HOST *)host->hostdata;
+	VirtDevice		*vdev;
+	uint			target = device->id;
+
+	if (hd == NULL)
+		return -ENODEV;
+
+	if ((vdev = hd->Targets[target]) != NULL)
+		goto out;
+
+	vdev = kmalloc(sizeof(VirtDevice), GFP_KERNEL);
+	if (!vdev) {
+		printk(MYIOC_s_ERR_FMT "slave_alloc kmalloc(%zd) FAILED!\n",
+				hd->ioc->name, sizeof(VirtDevice));
+		return -ENOMEM;
+	}
+
+	memset(vdev, 0, sizeof(VirtDevice));
+	vdev->tflags = MPT_TARGET_FLAGS_Q_YES;
+	vdev->ioc_id = hd->ioc->id;
+	vdev->target_id = device->id;
+	vdev->bus_id = device->channel;
+	vdev->raidVolume = 0;
+	hd->Targets[device->id] = vdev;
+	if (hd->ioc->bus_type == SCSI) {
+		if (hd->ioc->spi_data.isRaid & (1 << device->id)) {
+			vdev->raidVolume = 1;
+			ddvtprintk((KERN_INFO
+			    "RAID Volume @ id %d\n", device->id));
+		}
+	} else {
+		vdev->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
+	}
+
+ out:
+	vdev->num_luns++;
+	return 0;
+}
+
+static int mptscsih_is_raid_volume(MPT_SCSI_HOST *hd, uint id)
+{
+	int i;
+
+	if (!hd->ioc->spi_data.isRaid || !hd->ioc->spi_data.pIocPg3)
+		return 0;
+
+	for (i = 0; i < hd->ioc->spi_data.pIocPg3->NumPhysDisks; i++) {
+		if (id == hd->ioc->spi_data.pIocPg3->PhysDisk[i].PhysDiskID)
+			return 1;
+	}
+
+	return 0;
+}
+
+/*
+ *	OS entry point to allow for host driver to free allocated memory
+ *	Called if no device present or device being unloaded
+ */
+static void
+mptscsih_slave_destroy(struct scsi_device *device)
+{
+	struct Scsi_Host	*host = device->host;
+	MPT_SCSI_HOST		*hd = (MPT_SCSI_HOST *)host->hostdata;
+	VirtDevice		*vdev;
+	uint			target = device->id;
+	uint			lun = device->lun;
+
+	if (hd == NULL)
+		return;
+
+	mptscsih_search_running_cmds(hd, target, lun);
+
+	vdev = hd->Targets[target];
+	vdev->luns[0] &= ~(1 << lun);
+	if (--vdev->num_luns)
+		return;
+
+	kfree(hd->Targets[target]);
+	hd->Targets[target] = NULL;
+	
+	if (hd->ioc->bus_type == SCSI) {
+		if (mptscsih_is_raid_volume(hd, target)) {
+			hd->ioc->spi_data.forceDv |= MPT_SCSICFG_RELOAD_IOC_PG3;
+		} else {
+			hd->ioc->spi_data.dvStatus[target] =
+				MPT_SCSICFG_NEGOTIATE;
+
+			if (!hd->negoNvram) {
+				hd->ioc->spi_data.dvStatus[target] |=
+					MPT_SCSICFG_DV_NOT_DONE;
+			}
+		}
+	}
+}
+
+static void
+mptscsih_set_queue_depth(struct scsi_device *device, MPT_SCSI_HOST *hd,
+	VirtDevice *pTarget, int qdepth)
+{
+	int	max_depth;
+	int	tagged;
+
+	if (hd->ioc->bus_type == SCSI) {
+		if (pTarget->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY) {
+			if (!(pTarget->tflags & MPT_TARGET_FLAGS_Q_YES))
+				max_depth = 1;
+			else if (((pTarget->inq_data[0] & 0x1f) == 0x00) &&
+			         (pTarget->minSyncFactor <= MPT_ULTRA160 ))
+				max_depth = MPT_SCSI_CMD_PER_DEV_HIGH;
+			else
+				max_depth = MPT_SCSI_CMD_PER_DEV_LOW;
+		} else {
+			/* error case - No Inq. Data */
+			max_depth = 1;
+		}
+	} else
+		max_depth = MPT_SCSI_CMD_PER_DEV_HIGH;
+
+	if (qdepth > max_depth)
+		qdepth = max_depth;
+	if (qdepth == 1)
+		tagged = 0;
+	else
+		tagged = MSG_SIMPLE_TAG;
+
+	scsi_adjust_queue_depth(device, tagged, qdepth);
+}
+
+
+/*
+ *	OS entry point to adjust the queue_depths on a per-device basis.
+ *	Called once per device the bus scan. Use it to force the queue_depth
+ *	member to 1 if a device does not support Q tags.
+ *	Return non-zero if fails.
+ */
+static int
+mptscsih_slave_configure(struct scsi_device *device)
+{
+	struct Scsi_Host	*sh = device->host;
+	VirtDevice		*pTarget;
+	MPT_SCSI_HOST		*hd = (MPT_SCSI_HOST *)sh->hostdata;
+
+	if ((hd == NULL) || (hd->Targets == NULL)) {
+		return 0;
+	}
+
+	dsprintk((MYIOC_s_INFO_FMT
+		"device @ %p, id=%d, LUN=%d, channel=%d\n",
+		hd->ioc->name, device, device->id, device->lun, device->channel));
+	dsprintk((MYIOC_s_INFO_FMT
+		"sdtr %d wdtr %d ppr %d inq length=%d\n",
+		hd->ioc->name, device->sdtr, device->wdtr,
+		device->ppr, device->inquiry_len));
+
+	if (device->id > sh->max_id) {
+		/* error case, should never happen */
+		scsi_adjust_queue_depth(device, 0, 1);
+		goto slave_configure_exit;
+	}
+
+	pTarget = hd->Targets[device->id];
+
+	if (pTarget == NULL) {
+		/* Driver doesn't know about this device.
+		 * Kernel may generate a "Dummy Lun 0" which
+		 * may become a real Lun if a 
+		 * "scsi add-single-device" command is executed
+		 * while the driver is active (hot-plug a 
+		 * device).  LSI Raid controllers need 
+		 * queue_depth set to DEV_HIGH for this reason.
+		 */
+		scsi_adjust_queue_depth(device, MSG_SIMPLE_TAG,
+			MPT_SCSI_CMD_PER_DEV_HIGH);
+		goto slave_configure_exit;
+	}
+
+	mptscsih_initTarget(hd, device->channel, device->id, device->lun,
+		device->inquiry, device->inquiry_len );
+	mptscsih_set_queue_depth(device, hd, pTarget, MPT_SCSI_CMD_PER_DEV_HIGH);
+
+	dsprintk((MYIOC_s_INFO_FMT
+		"Queue depth=%d, tflags=%x\n",
+		hd->ioc->name, device->queue_depth, pTarget->tflags));
+
+	dsprintk((MYIOC_s_INFO_FMT
+		"negoFlags=%x, maxOffset=%x, SyncFactor=%x\n",
+		hd->ioc->name, pTarget->negoFlags, pTarget->maxOffset, pTarget->minSyncFactor));
+
+slave_configure_exit:
+
+	dsprintk((MYIOC_s_INFO_FMT
+		"tagged %d, simple %d, ordered %d\n",
+		hd->ioc->name,device->tagged_supported, device->simple_tags,
+		device->ordered_tags));
+
+	return 0;
+}
+
+static ssize_t
+mptscsih_store_queue_depth(struct device *dev, const char *buf, size_t count)
+{
+	int			 depth;
+	struct scsi_device	*sdev = to_scsi_device(dev);
+	MPT_SCSI_HOST		*hd = (MPT_SCSI_HOST *) sdev->host->hostdata;
+	VirtDevice		*pTarget;
+
+	depth = simple_strtoul(buf, NULL, 0);
+	if (depth == 0)
+		return -EINVAL;
+	pTarget = hd->Targets[sdev->id];
+	if (pTarget == NULL)
+		return -EINVAL;
+	mptscsih_set_queue_depth(sdev, (MPT_SCSI_HOST *) sdev->host->hostdata,
+		pTarget, depth);
+	return count;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  Private routines...
+ */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* Utility function to copy sense data from the scsi_cmnd buffer
+ * to the FC and SCSI target structures.
+ *
+ */
+static void
+copy_sense_data(struct scsi_cmnd *sc, MPT_SCSI_HOST *hd, MPT_FRAME_HDR *mf, SCSIIOReply_t *pScsiReply)
+{
+	VirtDevice	*target;
+	SCSIIORequest_t	*pReq;
+	u32		 sense_count = le32_to_cpu(pScsiReply->SenseCount);
+	int		 index;
+
+	/* Get target structure
+	 */
+	pReq = (SCSIIORequest_t *) mf;
+	index = (int) pReq->TargetID;
+	target = hd->Targets[index];
+
+	if (sense_count) {
+		u8 *sense_data;
+		int req_index;
+
+		/* Copy the sense received into the scsi command block. */
+		req_index = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
+		sense_data = ((u8 *)hd->ioc->sense_buf_pool + (req_index * MPT_SENSE_BUFFER_ALLOC));
+		memcpy(sc->sense_buffer, sense_data, SNS_LEN(sc));
+
+		/* Log SMART data (asc = 0x5D, non-IM case only) if required.
+		 */
+		if ((hd->ioc->events) && (hd->ioc->eventTypes & (1 << MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE))) {
+			if ((sense_data[12] == 0x5D) && (target->raidVolume == 0)) {
+				int idx;
+				MPT_ADAPTER *ioc = hd->ioc;
+
+				idx = ioc->eventContext % ioc->eventLogSize;
+				ioc->events[idx].event = MPI_EVENT_SCSI_DEVICE_STATUS_CHANGE;
+				ioc->events[idx].eventContext = ioc->eventContext;
+
+				ioc->events[idx].data[0] = (pReq->LUN[1] << 24) ||
+					(MPI_EVENT_SCSI_DEV_STAT_RC_SMART_DATA << 16) ||
+					(pReq->Bus << 8) || pReq->TargetID;
+
+				ioc->events[idx].data[1] = (sense_data[13] << 8) || sense_data[12];
+
+				ioc->eventContext++;
+			}
+		}
+	} else {
+		dprintk((MYIOC_s_INFO_FMT "Hmmm... SenseData len=0! (?)\n",
+				hd->ioc->name));
+	}
+}
+
+static u32
+SCPNT_TO_LOOKUP_IDX(struct scsi_cmnd *sc)
+{
+	MPT_SCSI_HOST *hd;
+	int i;
+
+	hd = (MPT_SCSI_HOST *) sc->device->host->hostdata;
+
+	for (i = 0; i < hd->ioc->req_depth; i++) {
+		if (hd->ScsiLookup[i] == sc) {
+			return i;
+		}
+	}
+
+	return -1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mptscsih_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
+{
+	MPT_SCSI_HOST	*hd;
+	unsigned long	 flags;
+
+	dtmprintk((KERN_WARNING MYNAM
+			": IOC %s_reset routed to SCSI host driver!\n",
+			reset_phase==MPT_IOC_SETUP_RESET ? "setup" : (
+			reset_phase==MPT_IOC_PRE_RESET ? "pre" : "post")));
+
+	/* If a FW reload request arrives after base installed but
+	 * before all scsi hosts have been attached, then an alt_ioc
+	 * may have a NULL sh pointer.
+	 */
+	if ((ioc->sh == NULL) || (ioc->sh->hostdata == NULL))
+		return 0;
+	else
+		hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
+
+	if (reset_phase == MPT_IOC_SETUP_RESET) {
+		dtmprintk((MYIOC_s_WARN_FMT "Setup-Diag Reset\n", ioc->name));
+
+		/* Clean Up:
+		 * 1. Set Hard Reset Pending Flag
+		 * All new commands go to doneQ
+		 */
+		hd->resetPending = 1;
+
+	} else if (reset_phase == MPT_IOC_PRE_RESET) {
+		dtmprintk((MYIOC_s_WARN_FMT "Pre-Diag Reset\n", ioc->name));
+
+		/* 2. Flush running commands
+		 *	Clean ScsiLookup (and associated memory)
+		 *	AND clean mytaskQ
+		 */
+
+		/* 2b. Reply to OS all known outstanding I/O commands.
+		 */
+		mptscsih_flush_running_cmds(hd);
+
+		/* 2c. If there was an internal command that
+		 * has not completed, configuration or io request,
+		 * free these resources.
+		 */
+		if (hd->cmdPtr) {
+			del_timer(&hd->timer);
+			mpt_free_msg_frame(ioc, hd->cmdPtr);
+		}
+
+		dtmprintk((MYIOC_s_WARN_FMT "Pre-Reset complete.\n", ioc->name));
+
+	} else {
+		dtmprintk((MYIOC_s_WARN_FMT "Post-Diag Reset\n", ioc->name));
+
+		/* Once a FW reload begins, all new OS commands are
+		 * redirected to the doneQ w/ a reset status.
+		 * Init all control structures.
+		 */
+
+		/* ScsiLookup initialization
+		 */
+		{
+			int ii;
+			for (ii=0; ii < hd->ioc->req_depth; ii++)
+				hd->ScsiLookup[ii] = NULL;
+		}
+
+		/* 2. Chain Buffer initialization
+		 */
+
+		/* 4. Renegotiate to all devices, if SCSI
+		 */
+		if (ioc->bus_type == SCSI) {
+			dnegoprintk(("writeSDP1: ALL_IDS USE_NVRAM\n"));
+			mptscsih_writeSDP1(hd, 0, 0, MPT_SCSICFG_ALL_IDS | MPT_SCSICFG_USE_NVRAM);
+		}
+
+		/* 5. Enable new commands to be posted
+		 */
+		spin_lock_irqsave(&ioc->FreeQlock, flags);
+		hd->tmPending = 0;
+		spin_unlock_irqrestore(&ioc->FreeQlock, flags);
+		hd->resetPending = 0;
+		hd->tmState = TM_STATE_NONE;
+
+		/* 6. If there was an internal command,
+		 * wake this process up.
+		 */
+		if (hd->cmdPtr) {
+			/*
+			 * Wake up the original calling thread
+			 */
+			hd->pLocal = &hd->localReply;
+			hd->pLocal->completion = MPT_SCANDV_DID_RESET;
+			scandv_wait_done = 1;
+			wake_up(&scandv_waitq);
+			hd->cmdPtr = NULL;
+		}
+
+		/* 7. Set flag to force DV and re-read IOC Page 3
+		 */
+		if (ioc->bus_type == SCSI) {
+			ioc->spi_data.forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
+			ddvtprintk(("Set reload IOC Pg3 Flag\n"));
+		}
+
+		dtmprintk((MYIOC_s_WARN_FMT "Post-Reset complete.\n", ioc->name));
+
+	}
+
+	return 1;		/* currently means nothing really */
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+static int
+mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply)
+{
+	MPT_SCSI_HOST *hd;
+	u8 event = le32_to_cpu(pEvReply->Event) & 0xFF;
+
+	devtprintk((MYIOC_s_INFO_FMT "MPT event (=%02Xh) routed to SCSI host driver!\n",
+			ioc->name, event));
+
+	switch (event) {
+	case MPI_EVENT_UNIT_ATTENTION:			/* 03 */
+		/* FIXME! */
+		break;
+	case MPI_EVENT_IOC_BUS_RESET:			/* 04 */
+	case MPI_EVENT_EXT_BUS_RESET:			/* 05 */
+		hd = NULL;
+		if (ioc->sh) {
+			hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
+			if (hd && (ioc->bus_type == SCSI) && (hd->soft_resets < -1))
+				hd->soft_resets++;
+		}
+		break;
+	case MPI_EVENT_LOGOUT:				/* 09 */
+		/* FIXME! */
+		break;
+
+		/*
+		 *  CHECKME! Don't think we need to do
+		 *  anything for these, but...
+		 */
+	case MPI_EVENT_RESCAN:				/* 06 */
+	case MPI_EVENT_LINK_STATUS_CHANGE:		/* 07 */
+	case MPI_EVENT_LOOP_STATE_CHANGE:		/* 08 */
+		/*
+		 *  CHECKME!  Falling thru...
+		 */
+		break;
+
+	case MPI_EVENT_INTEGRATED_RAID:			/* 0B */
+#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
+		/* negoNvram set to 0 if DV enabled and to USE_NVRAM if
+		 * if DV disabled. Need to check for target mode.
+		 */
+		hd = NULL;
+		if (ioc->sh)
+			hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
+
+		if (hd && (ioc->bus_type == SCSI) && (hd->negoNvram == 0)) {
+			ScsiCfgData	*pSpi;
+			Ioc3PhysDisk_t	*pPDisk;
+			int		 numPDisk;
+			u8		 reason;
+			u8		 physDiskNum;
+
+			reason = (le32_to_cpu(pEvReply->Data[0]) & 0x00FF0000) >> 16;
+			if (reason == MPI_EVENT_RAID_RC_DOMAIN_VAL_NEEDED) {
+				/* New or replaced disk.
+				 * Set DV flag and schedule DV.
+				 */
+				pSpi = &ioc->spi_data;
+				physDiskNum = (le32_to_cpu(pEvReply->Data[0]) & 0xFF000000) >> 24;
+				ddvtprintk(("DV requested for phys disk id %d\n", physDiskNum));
+				if (pSpi->pIocPg3) {
+					pPDisk =  pSpi->pIocPg3->PhysDisk;
+					numPDisk =pSpi->pIocPg3->NumPhysDisks;
+
+					while (numPDisk) {
+						if (physDiskNum == pPDisk->PhysDiskNum) {
+							pSpi->dvStatus[pPDisk->PhysDiskID] = (MPT_SCSICFG_NEED_DV | MPT_SCSICFG_DV_NOT_DONE);
+							pSpi->forceDv = MPT_SCSICFG_NEED_DV;
+							ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID));
+							break;
+						}
+						pPDisk++;
+						numPDisk--;
+					}
+
+					if (numPDisk == 0) {
+						/* The physical disk that needs DV was not found
+						 * in the stored IOC Page 3. The driver must reload
+						 * this page. DV routine will set the NEED_DV flag for
+						 * all phys disks that have DV_NOT_DONE set.
+						 */
+						pSpi->forceDv = MPT_SCSICFG_NEED_DV | MPT_SCSICFG_RELOAD_IOC_PG3;
+						ddvtprintk(("phys disk %d not found. Setting reload IOC Pg3 Flag\n", physDiskNum));
+					}
+				}
+			}
+		}
+#endif
+
+#if defined(MPT_DEBUG_DV) || defined(MPT_DEBUG_DV_TINY)
+		printk("Raid Event RF: ");
+		{
+			u32 *m = (u32 *)pEvReply;
+			int ii;
+			int n = (int)pEvReply->MsgLength;
+			for (ii=6; ii < n; ii++)
+				printk(" %08x", le32_to_cpu(m[ii]));
+			printk("\n");
+		}
+#endif
+		break;
+
+	case MPI_EVENT_NONE:				/* 00 */
+	case MPI_EVENT_LOG_DATA:			/* 01 */
+	case MPI_EVENT_STATE_CHANGE:			/* 02 */
+	case MPI_EVENT_EVENT_CHANGE:			/* 0A */
+	default:
+		dprintk((KERN_INFO "  Ignoring event (=%02Xh)\n", event));
+		break;
+	}
+
+	return 1;		/* currently means nothing really */
+}
+
+static struct device_attribute mptscsih_queue_depth_attr = {
+	.attr = {
+		.name = 	"queue_depth",
+		.mode =		S_IWUSR,
+	},
+	.store = mptscsih_store_queue_depth,
+};
+
+static struct device_attribute *mptscsih_dev_attrs[] = {
+	&mptscsih_queue_depth_attr,
+	NULL,
+};
+
+static struct scsi_host_template driver_template = {
+	.proc_name			= "mptscsih",
+	.proc_info			= mptscsih_proc_info,
+	.name				= "MPT SCSI Host",
+	.info				= mptscsih_info,
+	.queuecommand			= mptscsih_qcmd,
+	.slave_alloc			= mptscsih_slave_alloc,
+	.slave_configure		= mptscsih_slave_configure,
+	.slave_destroy			= mptscsih_slave_destroy,
+	.eh_abort_handler		= mptscsih_abort,
+	.eh_device_reset_handler	= mptscsih_dev_reset,
+	.eh_bus_reset_handler		= mptscsih_bus_reset,
+	.eh_host_reset_handler		= mptscsih_host_reset,
+	.bios_param			= mptscsih_bios_param,
+	.can_queue			= MPT_SCSI_CAN_QUEUE,
+	.this_id			= -1,
+	.sg_tablesize			= MPT_SCSI_SG_DEPTH,
+	.max_sectors			= 8192,
+	.cmd_per_lun			= 7,
+	.use_clustering			= ENABLE_CLUSTERING,
+	.sdev_attrs			= mptscsih_dev_attrs,
+};
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptscsih_initTarget - Target, LUN alloc/free functionality.
+ *	@hd: Pointer to MPT_SCSI_HOST structure
+ *	@bus_id: Bus number (?)
+ *	@target_id: SCSI target id
+ *	@lun: SCSI LUN id
+ *	@data: Pointer to data
+ *	@dlen: Number of INQUIRY bytes
+ *
+ *	NOTE: It's only SAFE to call this routine if data points to
+ *	sane & valid STANDARD INQUIRY data!
+ *
+ *	Allocate and initialize memory for this target.
+ *	Save inquiry data.
+ *
+ */
+static void
+mptscsih_initTarget(MPT_SCSI_HOST *hd, int bus_id, int target_id, u8 lun, char *data, int dlen)
+{
+	int		indexed_lun, lun_index;
+	VirtDevice	*vdev;
+	ScsiCfgData	*pSpi;
+	char		data_56;
+
+	dinitprintk((MYIOC_s_INFO_FMT "initTarget bus=%d id=%d lun=%d hd=%p\n",
+			hd->ioc->name, bus_id, target_id, lun, hd));
+
+	/*
+	 * If the peripheral qualifier filter is enabled then if the target reports a 0x1
+	 * (i.e. The targer is capable of supporting the specified peripheral device type
+	 * on this logical unit; however, the physical device is not currently connected
+	 * to this logical unit) it will be converted to a 0x3 (i.e. The target is not 
+	 * capable of supporting a physical device on this logical unit). This is to work
+	 * around a bug in th emid-layer in some distributions in which the mid-layer will
+	 * continue to try to communicate to the LUN and evntually create a dummy LUN.
+	*/
+	if (mpt_pq_filter && dlen && (data[0] & 0xE0))
+		data[0] |= 0x40;
+	
+	/* Is LUN supported? If so, upper 2 bits will be 0
+	* in first byte of inquiry data.
+	*/
+	if (data[0] & 0xe0)
+		return;
+
+	if ((vdev = hd->Targets[target_id]) == NULL) {
+		return;
+	}
+
+	lun_index = (lun >> 5);  /* 32 luns per lun_index */
+	indexed_lun = (lun % 32);
+	vdev->luns[lun_index] |= (1 << indexed_lun);
+
+	if (hd->ioc->bus_type == SCSI) {
+		if ((data[0] == TYPE_PROCESSOR) && (hd->ioc->spi_data.Saf_Te)) {
+			/* Treat all Processors as SAF-TE if
+			 * command line option is set */
+			vdev->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
+			mptscsih_writeIOCPage4(hd, target_id, bus_id);
+		}else if ((data[0] == TYPE_PROCESSOR) &&
+			!(vdev->tflags & MPT_TARGET_FLAGS_SAF_TE_ISSUED )) {
+			if ( dlen > 49 ) {
+				vdev->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
+				if ( data[44] == 'S' &&
+				     data[45] == 'A' &&
+				     data[46] == 'F' &&
+				     data[47] == '-' &&
+				     data[48] == 'T' &&
+				     data[49] == 'E' ) {
+					vdev->tflags |= MPT_TARGET_FLAGS_SAF_TE_ISSUED;
+					mptscsih_writeIOCPage4(hd, target_id, bus_id);
+				}
+			}
+		}
+		if (!(vdev->tflags & MPT_TARGET_FLAGS_VALID_INQUIRY)) {
+			if ( dlen > 8 ) {
+				memcpy (vdev->inq_data, data, 8);
+			} else {
+				memcpy (vdev->inq_data, data, dlen);
+			}
+
+			/* If have not done DV, set the DV flag.
+			 */
+			pSpi = &hd->ioc->spi_data;
+			if ((data[0] == TYPE_TAPE) || (data[0] == TYPE_PROCESSOR)) {
+				if (pSpi->dvStatus[target_id] & MPT_SCSICFG_DV_NOT_DONE)
+					pSpi->dvStatus[target_id] |= MPT_SCSICFG_NEED_DV;
+			}
+
+			vdev->tflags |= MPT_TARGET_FLAGS_VALID_INQUIRY;
+
+
+			data_56 = 0x0F;  /* Default to full capabilities if Inq data length is < 57 */
+			if (dlen > 56) {
+				if ( (!(vdev->tflags & MPT_TARGET_FLAGS_VALID_56))) {
+				/* Update the target capabilities
+				 */
+					data_56 = data[56];
+					vdev->tflags |= MPT_TARGET_FLAGS_VALID_56;
+				}
+			}
+			mptscsih_setTargetNegoParms(hd, vdev, data_56);
+		} else {
+			/* Initial Inquiry may not request enough data bytes to
+			 * obtain byte 57.  DV will; if target doesn't return
+			 * at least 57 bytes, data[56] will be zero. */
+			if (dlen > 56) {
+				if ( (!(vdev->tflags & MPT_TARGET_FLAGS_VALID_56))) {
+				/* Update the target capabilities
+				 */
+					data_56 = data[56];
+					vdev->tflags |= MPT_TARGET_FLAGS_VALID_56;
+					mptscsih_setTargetNegoParms(hd, vdev, data_56);
+				}
+			}
+		}
+	}
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  Update the target negotiation parameters based on the
+ *  the Inquiry data, adapter capabilities, and NVRAM settings.
+ *
+ */
+static void
+mptscsih_setTargetNegoParms(MPT_SCSI_HOST *hd, VirtDevice *target, char byte56)
+{
+	ScsiCfgData *pspi_data = &hd->ioc->spi_data;
+	int  id = (int) target->target_id;
+	int  nvram;
+	VirtDevice	*vdev;
+	int ii;
+	u8 width = MPT_NARROW;
+	u8 factor = MPT_ASYNC;
+	u8 offset = 0;
+	u8 version, nfactor;
+	u8 noQas = 1;
+
+	target->negoFlags = pspi_data->noQas;
+
+	/* noQas == 0 => device supports QAS. Need byte 56 of Inq to determine
+	 * support. If available, default QAS to off and allow enabling.
+	 * If not available, default QAS to on, turn off for non-disks.
+	 */
+
+	/* Set flags based on Inquiry data
+	 */
+	version = target->inq_data[2] & 0x07;
+	if (version < 2) {
+		width = 0;
+		factor = MPT_ULTRA2;
+		offset = pspi_data->maxSyncOffset;
+		target->tflags &= ~MPT_TARGET_FLAGS_Q_YES;
+	} else {
+		if (target->inq_data[7] & 0x20) {
+			width = 1;
+		}
+
+		if (target->inq_data[7] & 0x10) {
+			factor = pspi_data->minSyncFactor;
+			if (target->tflags & MPT_TARGET_FLAGS_VALID_56) {
+				/* bits 2 & 3 show Clocking support */
+				if ((byte56 & 0x0C) == 0)
+					factor = MPT_ULTRA2;
+				else {
+					if ((byte56 & 0x03) == 0)
+						factor = MPT_ULTRA160;
+					else {
+						factor = MPT_ULTRA320;
+						if (byte56 & 0x02)
+						{
+							ddvtprintk((KERN_INFO "Enabling QAS due to byte56=%02x on id=%d!\n", byte56, id));
+							noQas = 0;
+						}
+						if (target->inq_data[0] == TYPE_TAPE) {
+							if (byte56 & 0x01)
+								target->negoFlags |= MPT_TAPE_NEGO_IDP;
+						}
+					}
+				}
+			} else {
+				ddvtprintk((KERN_INFO "Enabling QAS on id=%d due to ~TARGET_FLAGS_VALID_56!\n", id));
+				noQas = 0;
+			}
+				
+			offset = pspi_data->maxSyncOffset;
+
+			/* If RAID, never disable QAS
+			 * else if non RAID, do not disable
+			 *   QAS if bit 1 is set
+			 * bit 1 QAS support, non-raid only
+			 * bit 0 IU support
+			 */
+			if (target->raidVolume == 1) {
+				noQas = 0;
+			}
+		} else {
+			factor = MPT_ASYNC;
+			offset = 0;
+		}
+	}
+
+	if ( (target->inq_data[7] & 0x02) == 0) {
+		target->tflags &= ~MPT_TARGET_FLAGS_Q_YES;
+	}
+
+	/* Update tflags based on NVRAM settings. (SCSI only)
+	 */
+	if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) {
+		nvram = pspi_data->nvram[id];
+		nfactor = (nvram & MPT_NVRAM_SYNC_MASK) >> 8;
+
+		if (width)
+			width = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1;
+
+		if (offset > 0) {
+			/* Ensure factor is set to the
+			 * maximum of: adapter, nvram, inquiry
+			 */
+			if (nfactor) {
+				if (nfactor < pspi_data->minSyncFactor )
+					nfactor = pspi_data->minSyncFactor;
+
+				factor = max(factor, nfactor);
+				if (factor == MPT_ASYNC)
+					offset = 0;
+			} else {
+				offset = 0;
+				factor = MPT_ASYNC;
+		}
+		} else {
+			factor = MPT_ASYNC;
+		}
+	}
+
+	/* Make sure data is consistent
+	 */
+	if ((!width) && (factor < MPT_ULTRA2)) {
+		factor = MPT_ULTRA2;
+	}
+
+	/* Save the data to the target structure.
+	 */
+	target->minSyncFactor = factor;
+	target->maxOffset = offset;
+	target->maxWidth = width;
+
+	target->tflags |= MPT_TARGET_FLAGS_VALID_NEGO;
+
+	/* Disable unused features.
+	 */
+	if (!width)
+		target->negoFlags |= MPT_TARGET_NO_NEGO_WIDE;
+
+	if (!offset)
+		target->negoFlags |= MPT_TARGET_NO_NEGO_SYNC;
+
+	if ( factor > MPT_ULTRA320 )
+		noQas = 0;
+
+	/* GEM, processor WORKAROUND
+	 */
+	if ((target->inq_data[0] == TYPE_PROCESSOR) || (target->inq_data[0] > 0x08)) {
+		target->negoFlags |= (MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC);
+		pspi_data->dvStatus[id] |= MPT_SCSICFG_BLK_NEGO;
+	} else {
+		if (noQas && (pspi_data->noQas == 0)) {
+			pspi_data->noQas |= MPT_TARGET_NO_NEGO_QAS;
+			target->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
+
+			/* Disable QAS in a mixed configuration case
+	 		*/
+
+			ddvtprintk((KERN_INFO "Disabling QAS due to noQas=%02x on id=%d!\n", noQas, id));
+			for (ii = 0; ii < id; ii++) {
+				if ( (vdev = hd->Targets[ii]) ) {
+					vdev->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
+					mptscsih_writeSDP1(hd, 0, ii, vdev->negoFlags);
+				}	
+			}
+		}
+	}
+
+	/* Write SDP1 on this I/O to this target */
+	if (pspi_data->dvStatus[id] & MPT_SCSICFG_NEGOTIATE) {
+		ddvtprintk((KERN_INFO "MPT_SCSICFG_NEGOTIATE on id=%d!\n", id));
+		mptscsih_writeSDP1(hd, 0, id, hd->negoNvram);
+		pspi_data->dvStatus[id] &= ~MPT_SCSICFG_NEGOTIATE;
+	} else if (pspi_data->dvStatus[id] & MPT_SCSICFG_BLK_NEGO) {
+		ddvtprintk((KERN_INFO "MPT_SCSICFG_BLK_NEGO on id=%d!\n", id));
+		mptscsih_writeSDP1(hd, 0, id, MPT_SCSICFG_BLK_NEGO);
+		pspi_data->dvStatus[id] &= ~MPT_SCSICFG_BLK_NEGO;
+	}
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/* If DV disabled (negoNvram set to USE_NVARM) or if not LUN 0, return.
+ * Else set the NEED_DV flag after Read Capacity Issued (disks)
+ * or Mode Sense (cdroms).
+ *
+ * Tapes, initTarget will set this flag on completion of Inquiry command.
+ * Called only if DV_NOT_DONE flag is set
+ */
+static void mptscsih_set_dvflags(MPT_SCSI_HOST *hd, SCSIIORequest_t *pReq)
+{
+	u8 cmd;
+	ScsiCfgData *pSpi;
+
+	ddvtprintk((" set_dvflags: id=%d lun=%d negoNvram=%x cmd=%x\n", 
+		pReq->TargetID, pReq->LUN[1], hd->negoNvram, pReq->CDB[0]));
+	
+	if ((pReq->LUN[1] != 0) || (hd->negoNvram != 0))
+		return;
+
+	cmd = pReq->CDB[0];
+
+	if ((cmd == READ_CAPACITY) || (cmd == MODE_SENSE)) {
+		pSpi = &hd->ioc->spi_data;
+		if ((pSpi->isRaid & (1 << pReq->TargetID)) && pSpi->pIocPg3) {
+			/* Set NEED_DV for all hidden disks
+			 */
+			Ioc3PhysDisk_t *pPDisk =  pSpi->pIocPg3->PhysDisk;
+			int		numPDisk = pSpi->pIocPg3->NumPhysDisks;
+
+			while (numPDisk) {
+				pSpi->dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV;
+				ddvtprintk(("NEED_DV set for phys disk id %d\n", pPDisk->PhysDiskID));
+				pPDisk++;
+				numPDisk--;
+			}
+		}
+		pSpi->dvStatus[pReq->TargetID] |= MPT_SCSICFG_NEED_DV;
+		ddvtprintk(("NEED_DV set for visible disk id %d\n", pReq->TargetID));
+	}
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ * If no Target, bus reset on 1st I/O. Set the flag to
+ * prevent any future negotiations to this device.
+ */
+static void mptscsih_no_negotiate(MPT_SCSI_HOST *hd, int target_id)
+{
+
+	if ((hd->Targets) && (hd->Targets[target_id] == NULL))
+		hd->ioc->spi_data.dvStatus[target_id] |= MPT_SCSICFG_BLK_NEGO;
+
+	return;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  SCSI Config Page functionality ...
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*	mptscsih_setDevicePage1Flags  - add Requested and Configuration fields flags
+ *	based on width, factor and offset parameters.
+ *	@width: bus width
+ *	@factor: sync factor
+ *	@offset: sync offset
+ *	@requestedPtr: pointer to requested values (updated)
+ *	@configurationPtr: pointer to configuration values (updated)
+ *	@flags: flags to block WDTR or SDTR negotiation
+ *
+ *	Return: None.
+ *
+ *	Remark: Called by writeSDP1 and _dv_params
+ */
+static void
+mptscsih_setDevicePage1Flags (u8 width, u8 factor, u8 offset, int *requestedPtr, int *configurationPtr, u8 flags)
+{
+	u8 nowide = flags & MPT_TARGET_NO_NEGO_WIDE;
+	u8 nosync = flags & MPT_TARGET_NO_NEGO_SYNC;
+
+	*configurationPtr = 0;
+	*requestedPtr = width ? MPI_SCSIDEVPAGE1_RP_WIDE : 0;
+	*requestedPtr |= (offset << 16) | (factor << 8);
+
+	if (width && offset && !nowide && !nosync) {
+		if (factor < MPT_ULTRA160) {
+			*requestedPtr |= (MPI_SCSIDEVPAGE1_RP_IU + MPI_SCSIDEVPAGE1_RP_DT);
+			if ((flags & MPT_TARGET_NO_NEGO_QAS) == 0)
+				*requestedPtr |= MPI_SCSIDEVPAGE1_RP_QAS;
+			if (flags & MPT_TAPE_NEGO_IDP)
+				*requestedPtr |= 0x08000000;
+		} else if (factor < MPT_ULTRA2) {
+			*requestedPtr |= MPI_SCSIDEVPAGE1_RP_DT;
+		}
+	}
+
+	if (nowide)
+		*configurationPtr |= MPI_SCSIDEVPAGE1_CONF_WDTR_DISALLOWED;
+
+	if (nosync)
+		*configurationPtr |= MPI_SCSIDEVPAGE1_CONF_SDTR_DISALLOWED;
+
+	return;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*	mptscsih_writeSDP1  - write SCSI Device Page 1
+ *	@hd: Pointer to a SCSI Host Strucutre
+ *	@portnum: IOC port number
+ *	@target_id: writeSDP1 for single ID
+ *	@flags: MPT_SCSICFG_ALL_IDS, MPT_SCSICFG_USE_NVRAM, MPT_SCSICFG_BLK_NEGO
+ *
+ *	Return: -EFAULT if read of config page header fails
+ *		or 0 if success.
+ *
+ *	Remark: If a target has been found, the settings from the
+ *		target structure are used, else the device is set
+ *		to async/narrow.
+ *
+ *	Remark: Called during init and after a FW reload.
+ *	Remark: We do not wait for a return, write pages sequentially.
+ */
+static int
+mptscsih_writeSDP1(MPT_SCSI_HOST *hd, int portnum, int target_id, int flags)
+{
+	MPT_ADAPTER		*ioc = hd->ioc;
+	Config_t		*pReq;
+	SCSIDevicePage1_t	*pData;
+	VirtDevice		*pTarget;
+	MPT_FRAME_HDR		*mf;
+	dma_addr_t		 dataDma;
+	u16			 req_idx;
+	u32			 frameOffset;
+	u32			 requested, configuration, flagsLength;
+	int			 ii, nvram;
+	int			 id = 0, maxid = 0;
+	u8			 width;
+	u8			 factor;
+	u8			 offset;
+	u8			 bus = 0;
+	u8			 negoFlags;
+	u8			 maxwidth, maxoffset, maxfactor;
+
+	if (ioc->spi_data.sdp1length == 0)
+		return 0;
+
+	if (flags & MPT_SCSICFG_ALL_IDS) {
+		id = 0;
+		maxid = ioc->sh->max_id - 1;
+	} else if (ioc->sh) {
+		id = target_id;
+		maxid = min_t(int, id, ioc->sh->max_id - 1);
+	}
+
+	for (; id <= maxid; id++) {
+
+		if (id == ioc->pfacts[portnum].PortSCSIID)
+			continue;
+
+		/* Use NVRAM to get adapter and target maximums
+		 * Data over-riden by target structure information, if present
+		 */
+		maxwidth = ioc->spi_data.maxBusWidth;
+		maxoffset = ioc->spi_data.maxSyncOffset;
+		maxfactor = ioc->spi_data.minSyncFactor;
+		if (ioc->spi_data.nvram && (ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
+			nvram = ioc->spi_data.nvram[id];
+
+			if (maxwidth)
+				maxwidth = nvram & MPT_NVRAM_WIDE_DISABLE ? 0 : 1;
+
+			if (maxoffset > 0) {
+				maxfactor = (nvram & MPT_NVRAM_SYNC_MASK) >> 8;
+				if (maxfactor == 0) {
+					/* Key for async */
+					maxfactor = MPT_ASYNC;
+					maxoffset = 0;
+				} else if (maxfactor < ioc->spi_data.minSyncFactor) {
+					maxfactor = ioc->spi_data.minSyncFactor;
+				}
+			} else
+				maxfactor = MPT_ASYNC;
+		}
+
+		/* Set the negotiation flags.
+		 */
+		negoFlags = ioc->spi_data.noQas;
+		if (!maxwidth)
+			negoFlags |= MPT_TARGET_NO_NEGO_WIDE;
+
+		if (!maxoffset)
+			negoFlags |= MPT_TARGET_NO_NEGO_SYNC;
+
+		if (flags & MPT_SCSICFG_USE_NVRAM) {
+			width = maxwidth;
+			factor = maxfactor;
+			offset = maxoffset;
+		} else {
+			width = 0;
+			factor = MPT_ASYNC;
+			offset = 0;
+			//negoFlags = 0;
+			//negoFlags = MPT_TARGET_NO_NEGO_SYNC;
+		}
+
+		/* If id is not a raid volume, get the updated
+		 * transmission settings from the target structure.
+		 */
+		if (hd->Targets && (pTarget = hd->Targets[id]) && !pTarget->raidVolume) {
+			width = pTarget->maxWidth;
+			factor = pTarget->minSyncFactor;
+			offset = pTarget->maxOffset;
+			negoFlags = pTarget->negoFlags;
+		}
+		
+#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
+		/* Force to async and narrow if DV has not been executed
+		 * for this ID
+		 */
+		if ((hd->ioc->spi_data.dvStatus[id] & MPT_SCSICFG_DV_NOT_DONE) != 0) {
+			width = 0;
+			factor = MPT_ASYNC;
+			offset = 0;
+		}
+#endif
+
+		if (flags & MPT_SCSICFG_BLK_NEGO)
+			negoFlags = MPT_TARGET_NO_NEGO_WIDE | MPT_TARGET_NO_NEGO_SYNC;
+
+		mptscsih_setDevicePage1Flags(width, factor, offset,
+					&requested, &configuration, negoFlags);
+		dnegoprintk(("writeSDP1: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n",
+			target_id, width, factor, offset, negoFlags, requested, configuration));
+
+		/* Get a MF for this command.
+		 */
+		if ((mf = mpt_get_msg_frame(ScsiDoneCtx, ioc)) == NULL) {
+			dprintk((MYIOC_s_WARN_FMT "write SDP1: no msg frames!\n",
+						ioc->name));
+			return -EAGAIN;
+		}
+
+		ddvprintk((MYIOC_s_INFO_FMT "WriteSDP1 (mf=%p, id=%d, req=0x%x, cfg=0x%x)\n",
+			hd->ioc->name, mf, id, requested, configuration));
+
+
+		/* Set the request and the data pointers.
+		 * Request takes: 36 bytes (32 bit SGE)
+		 * SCSI Device Page 1 requires 16 bytes
+		 * 40 + 16 <= size of SCSI IO Request = 56 bytes
+		 * and MF size >= 64 bytes.
+		 * Place data at end of MF.
+		 */
+		pReq = (Config_t *)mf;
+
+		req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
+		frameOffset = ioc->req_sz - sizeof(SCSIDevicePage1_t);
+
+		pData = (SCSIDevicePage1_t *)((u8 *) mf + frameOffset);
+		dataDma = ioc->req_frames_dma + (req_idx * ioc->req_sz) + frameOffset;
+
+		/* Complete the request frame (same for all requests).
+		 */
+		pReq->Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+		pReq->Reserved = 0;
+		pReq->ChainOffset = 0;
+		pReq->Function = MPI_FUNCTION_CONFIG;
+		pReq->ExtPageLength = 0;
+		pReq->ExtPageType = 0;
+		pReq->MsgFlags = 0;
+		for (ii=0; ii < 8; ii++) {
+			pReq->Reserved2[ii] = 0;
+		}
+		pReq->Header.PageVersion = ioc->spi_data.sdp1version;
+		pReq->Header.PageLength = ioc->spi_data.sdp1length;
+		pReq->Header.PageNumber = 1;
+		pReq->Header.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
+		pReq->PageAddress = cpu_to_le32(id | (bus << 8 ));
+
+		/* Add a SGE to the config request.
+		 */
+		flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE | ioc->spi_data.sdp1length * 4;
+
+		mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
+
+		/* Set up the common data portion
+		 */
+		pData->Header.PageVersion = pReq->Header.PageVersion;
+		pData->Header.PageLength = pReq->Header.PageLength;
+		pData->Header.PageNumber = pReq->Header.PageNumber;
+		pData->Header.PageType = pReq->Header.PageType;
+		pData->RequestedParameters = cpu_to_le32(requested);
+		pData->Reserved = 0;
+		pData->Configuration = cpu_to_le32(configuration);
+
+		dprintk((MYIOC_s_INFO_FMT
+			"write SDP1: id %d pgaddr 0x%x req 0x%x config 0x%x\n",
+				ioc->name, id, (id | (bus<<8)),
+				requested, configuration));
+
+		mpt_put_msg_frame(ScsiDoneCtx, ioc, mf);
+	}
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*	mptscsih_writeIOCPage4  - write IOC Page 4
+ *	@hd: Pointer to a SCSI Host Structure
+ *	@target_id: write IOC Page4 for this ID & Bus
+ *
+ *	Return: -EAGAIN if unable to obtain a Message Frame
+ *		or 0 if success.
+ *
+ *	Remark: We do not wait for a return, write pages sequentially.
+ */
+static int
+mptscsih_writeIOCPage4(MPT_SCSI_HOST *hd, int target_id, int bus)
+{
+	MPT_ADAPTER		*ioc = hd->ioc;
+	Config_t		*pReq;
+	IOCPage4_t		*IOCPage4Ptr;
+	MPT_FRAME_HDR		*mf;
+	dma_addr_t		 dataDma;
+	u16			 req_idx;
+	u32			 frameOffset;
+	u32			 flagsLength;
+	int			 ii;
+
+	/* Get a MF for this command.
+	 */
+	if ((mf = mpt_get_msg_frame(ScsiDoneCtx, ioc)) == NULL) {
+		dprintk((MYIOC_s_WARN_FMT "writeIOCPage4 : no msg frames!\n",
+					ioc->name));
+		return -EAGAIN;
+	}
+
+	/* Set the request and the data pointers.
+	 * Place data at end of MF.
+	 */
+	pReq = (Config_t *)mf;
+
+	req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
+	frameOffset = ioc->req_sz - sizeof(IOCPage4_t);
+
+	/* Complete the request frame (same for all requests).
+	 */
+	pReq->Action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+	pReq->Reserved = 0;
+	pReq->ChainOffset = 0;
+	pReq->Function = MPI_FUNCTION_CONFIG;
+	pReq->ExtPageLength = 0;
+	pReq->ExtPageType = 0;
+	pReq->MsgFlags = 0;
+	for (ii=0; ii < 8; ii++) {
+		pReq->Reserved2[ii] = 0;
+	}
+
+       	IOCPage4Ptr = ioc->spi_data.pIocPg4;
+       	dataDma = ioc->spi_data.IocPg4_dma;
+       	ii = IOCPage4Ptr->ActiveSEP++;
+       	IOCPage4Ptr->SEP[ii].SEPTargetID = target_id;
+       	IOCPage4Ptr->SEP[ii].SEPBus = bus;
+       	pReq->Header = IOCPage4Ptr->Header;
+	pReq->PageAddress = cpu_to_le32(target_id | (bus << 8 ));
+
+	/* Add a SGE to the config request.
+	 */
+	flagsLength = MPT_SGE_FLAGS_SSIMPLE_WRITE |
+		(IOCPage4Ptr->Header.PageLength + ii) * 4;
+
+	mpt_add_sge((char *)&pReq->PageBufferSGE, flagsLength, dataDma);
+
+	dinitprintk((MYIOC_s_INFO_FMT
+		"writeIOCPage4: MaxSEP=%d ActiveSEP=%d id=%d bus=%d\n",
+			ioc->name, IOCPage4Ptr->MaxSEP, IOCPage4Ptr->ActiveSEP, target_id, bus));
+
+	mpt_put_msg_frame(ScsiDoneCtx, ioc, mf);
+
+	return 0;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *  Bus Scan and Domain Validation functionality ...
+ */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	mptscsih_scandv_complete - Scan and DV callback routine registered
+ *	to Fustion MPT (base) driver.
+ *
+ *	@ioc: Pointer to MPT_ADAPTER structure
+ *	@mf: Pointer to original MPT request frame
+ *	@mr: Pointer to MPT reply frame (NULL if TurboReply)
+ *
+ *	This routine is called from mpt.c::mpt_interrupt() at the completion
+ *	of any SCSI IO request.
+ *	This routine is registered with the Fusion MPT (base) driver at driver
+ *	load/init time via the mpt_register() API call.
+ *
+ *	Returns 1 indicating alloc'd request frame ptr should be freed.
+ *
+ *	Remark: Sets a completion code and (possibly) saves sense data
+ *	in the IOC member localReply structure.
+ *	Used ONLY for DV and other internal commands.
+ */
+static int
+mptscsih_scandv_complete(MPT_ADAPTER *ioc, MPT_FRAME_HDR *mf, MPT_FRAME_HDR *mr)
+{
+	MPT_SCSI_HOST	*hd;
+	SCSIIORequest_t *pReq;
+	int		 completionCode;
+	u16		 req_idx;
+
+	if ((mf == NULL) ||
+	    (mf >= MPT_INDEX_2_MFPTR(ioc, ioc->req_depth))) {
+		printk(MYIOC_s_ERR_FMT
+			"ScanDvComplete, %s req frame ptr! (=%p)\n",
+				ioc->name, mf?"BAD":"NULL", (void *) mf);
+		goto wakeup;
+	}
+
+	hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
+	del_timer(&hd->timer);
+	req_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
+	hd->ScsiLookup[req_idx] = NULL;
+	pReq = (SCSIIORequest_t *) mf;
+
+	if (mf != hd->cmdPtr) {
+		printk(MYIOC_s_WARN_FMT "ScanDvComplete (mf=%p, cmdPtr=%p, idx=%d)\n",
+				hd->ioc->name, (void *)mf, (void *) hd->cmdPtr, req_idx);
+	}
+	hd->cmdPtr = NULL;
+
+	ddvprintk((MYIOC_s_INFO_FMT "ScanDvComplete (mf=%p,mr=%p,idx=%d)\n",
+			hd->ioc->name, mf, mr, req_idx));
+
+	hd->pLocal = &hd->localReply;
+	hd->pLocal->scsiStatus = 0;
+
+	/* If target struct exists, clear sense valid flag.
+	 */
+	if (mr == NULL) {
+		completionCode = MPT_SCANDV_GOOD;
+	} else {
+		SCSIIOReply_t	*pReply;
+		u16		 status;
+		u8		 scsi_status;
+
+		pReply = (SCSIIOReply_t *) mr;
+
+		status = le16_to_cpu(pReply->IOCStatus) & MPI_IOCSTATUS_MASK;
+		scsi_status = pReply->SCSIStatus;
+
+		ddvtprintk((KERN_NOTICE "  IOCStatus=%04xh, SCSIState=%02xh, SCSIStatus=%02xh, IOCLogInfo=%08xh\n",
+			     status, pReply->SCSIState, scsi_status,
+			     le32_to_cpu(pReply->IOCLogInfo)));
+
+		switch(status) {
+
+		case MPI_IOCSTATUS_SCSI_DEVICE_NOT_THERE:	/* 0x0043 */
+			completionCode = MPT_SCANDV_SELECTION_TIMEOUT;
+			break;
+
+		case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:		/* 0x0046 */
+		case MPI_IOCSTATUS_SCSI_TASK_TERMINATED:	/* 0x0048 */
+		case MPI_IOCSTATUS_SCSI_IOC_TERMINATED:		/* 0x004B */
+		case MPI_IOCSTATUS_SCSI_EXT_TERMINATED:		/* 0x004C */
+			completionCode = MPT_SCANDV_DID_RESET;
+			break;
+
+		case MPI_IOCSTATUS_SCSI_DATA_UNDERRUN:		/* 0x0045 */
+		case MPI_IOCSTATUS_SCSI_RECOVERED_ERROR:	/* 0x0040 */
+		case MPI_IOCSTATUS_SUCCESS:			/* 0x0000 */
+			if (pReply->Function == MPI_FUNCTION_CONFIG) {
+				ConfigReply_t *pr = (ConfigReply_t *)mr;
+				completionCode = MPT_SCANDV_GOOD;
+				hd->pLocal->header.PageVersion = pr->Header.PageVersion;
+				hd->pLocal->header.PageLength = pr->Header.PageLength;
+				hd->pLocal->header.PageNumber = pr->Header.PageNumber;
+				hd->pLocal->header.PageType = pr->Header.PageType;
+
+			} else if (pReply->Function == MPI_FUNCTION_RAID_ACTION) {
+				/* If the RAID Volume request is successful,
+				 * return GOOD, else indicate that
+				 * some type of error occurred.
+				 */
+				MpiRaidActionReply_t	*pr = (MpiRaidActionReply_t *)mr;
+				if (pr->ActionStatus == MPI_RAID_ACTION_ASTATUS_SUCCESS)
+					completionCode = MPT_SCANDV_GOOD;
+				else
+					completionCode = MPT_SCANDV_SOME_ERROR;
+
+			} else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_VALID) {
+				u8		*sense_data;
+				int		 sz;
+
+				/* save sense data in global structure
+				 */
+				completionCode = MPT_SCANDV_SENSE;
+				hd->pLocal->scsiStatus = scsi_status;
+				sense_data = ((u8 *)hd->ioc->sense_buf_pool +
+					(req_idx * MPT_SENSE_BUFFER_ALLOC));
+
+				sz = min_t(int, pReq->SenseBufferLength,
+							SCSI_STD_SENSE_BYTES);
+				memcpy(hd->pLocal->sense, sense_data, sz);
+
+				ddvprintk((KERN_NOTICE "  Check Condition, sense ptr %p\n",
+						sense_data));
+			} else if (pReply->SCSIState & MPI_SCSI_STATE_AUTOSENSE_FAILED) {
+				if (pReq->CDB[0] == INQUIRY)
+					completionCode = MPT_SCANDV_ISSUE_SENSE;
+				else
+					completionCode = MPT_SCANDV_DID_RESET;
+			}
+			else if (pReply->SCSIState & MPI_SCSI_STATE_NO_SCSI_STATUS)
+				completionCode = MPT_SCANDV_DID_RESET;
+			else if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
+				completionCode = MPT_SCANDV_DID_RESET;
+			else {
+				completionCode = MPT_SCANDV_GOOD;
+				hd->pLocal->scsiStatus = scsi_status;
+			}
+			break;
+
+		case MPI_IOCSTATUS_SCSI_PROTOCOL_ERROR:		/* 0x0047 */
+			if (pReply->SCSIState & MPI_SCSI_STATE_TERMINATED)
+				completionCode = MPT_SCANDV_DID_RESET;
+			else
+				completionCode = MPT_SCANDV_SOME_ERROR;
+			break;
+
+		default:
+			completionCode = MPT_SCANDV_SOME_ERROR;
+			break;
+
+		}	/* switch(status) */
+
+		ddvtprintk((KERN_NOTICE "  completionCode set to %08xh\n",
+				completionCode));
+	} /* end of address reply case */
+
+	hd->pLocal->completion = completionCode;
+
+	/* MF and RF are freed in mpt_interrupt
+	 */
+wakeup:
+	/* Free Chain buffers (will never chain) in scan or dv */
+	//mptscsih_freeChainBuffers(ioc, req_idx);
+
+	/*
+	 * Wake up the original calling thread
+	 */
+	scandv_wait_done = 1;
+	wake_up(&scandv_waitq);
+
+	return 1;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*	mptscsih_timer_expired - Call back for timer process.
+ *	Used only for dv functionality.
+ *	@data: Pointer to MPT_SCSI_HOST recast as an unsigned long
+ *
+ */
+static void mptscsih_timer_expired(unsigned long data)
+{
+	MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *) data;
+
+	ddvprintk((MYIOC_s_WARN_FMT "Timer Expired! Cmd %p\n", hd->ioc->name, hd->cmdPtr));
+
+	if (hd->cmdPtr) {
+		MPIHeader_t *cmd = (MPIHeader_t *)hd->cmdPtr;
+
+		if (cmd->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
+			/* Desire to issue a task management request here.
+			 * TM requests MUST be single threaded.
+			 * If old eh code and no TM current, issue request.
+			 * If new eh code, do nothing. Wait for OS cmd timeout
+			 *	for bus reset.
+			 */
+			ddvtprintk((MYIOC_s_NOTE_FMT "DV Cmd Timeout: NoOp\n", hd->ioc->name));
+		} else {
+			/* Perform a FW reload */
+			if (mpt_HardResetHandler(hd->ioc, NO_SLEEP) < 0) {
+				printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", hd->ioc->name);
+			}
+		}
+	} else {
+		/* This should NEVER happen */
+		printk(MYIOC_s_WARN_FMT "Null cmdPtr!!!!\n", hd->ioc->name);
+	}
+
+	/* No more processing.
+	 * TM call will generate an interrupt for SCSI TM Management.
+	 * The FW will reply to all outstanding commands, callback will finish cleanup.
+	 * Hard reset clean-up will free all resources.
+	 */
+	ddvprintk((MYIOC_s_WARN_FMT "Timer Expired Complete!\n", hd->ioc->name));
+
+	return;
+}
+
+#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*	mptscsih_do_raid - Format and Issue a RAID volume request message.
+ *	@hd: Pointer to scsi host structure
+ *	@action: What do be done.
+ *	@id: Logical target id.
+ *	@bus: Target locations bus.
+ *
+ *	Returns: < 0 on a fatal error
+ *		0 on success
+ *
+ *	Remark: Wait to return until reply processed by the ISR.
+ */
+static int
+mptscsih_do_raid(MPT_SCSI_HOST *hd, u8 action, INTERNAL_CMD *io)
+{
+	MpiRaidActionRequest_t	*pReq;
+	MPT_FRAME_HDR		*mf;
+	int			in_isr;
+
+	in_isr = in_interrupt();
+	if (in_isr) {
+		dprintk((MYIOC_s_WARN_FMT "Internal raid request not allowed in ISR context!\n",
+       				hd->ioc->name));
+		return -EPERM;
+	}
+
+	/* Get and Populate a free Frame
+	 */
+	if ((mf = mpt_get_msg_frame(ScsiScanDvCtx, hd->ioc)) == NULL) {
+		ddvprintk((MYIOC_s_WARN_FMT "_do_raid: no msg frames!\n",
+					hd->ioc->name));
+		return -EAGAIN;
+	}
+	pReq = (MpiRaidActionRequest_t *)mf;
+	pReq->Action = action;
+	pReq->Reserved1 = 0;
+	pReq->ChainOffset = 0;
+	pReq->Function = MPI_FUNCTION_RAID_ACTION;
+	pReq->VolumeID = io->id;
+	pReq->VolumeBus = io->bus;
+	pReq->PhysDiskNum = io->physDiskNum;
+	pReq->MsgFlags = 0;
+	pReq->Reserved2 = 0;
+	pReq->ActionDataWord = 0; /* Reserved for this action */
+	//pReq->ActionDataSGE = 0;
+
+	mpt_add_sge((char *)&pReq->ActionDataSGE,
+		MPT_SGE_FLAGS_SSIMPLE_READ | 0, (dma_addr_t) -1);
+
+	ddvprintk((MYIOC_s_INFO_FMT "RAID Volume action %x id %d\n",
+			hd->ioc->name, action, io->id));
+
+	hd->pLocal = NULL;
+	hd->timer.expires = jiffies + HZ*10; /* 10 second timeout */
+	scandv_wait_done = 0;
+
+	/* Save cmd pointer, for resource free if timeout or
+	 * FW reload occurs
+	 */
+	hd->cmdPtr = mf;
+
+	add_timer(&hd->timer);
+	mpt_put_msg_frame(ScsiScanDvCtx, hd->ioc, mf);
+	wait_event(scandv_waitq, scandv_wait_done);
+
+	if ((hd->pLocal == NULL) || (hd->pLocal->completion != MPT_SCANDV_GOOD))
+		return -1;
+
+	return 0;
+}
+#endif /* ~MPTSCSIH_ENABLE_DOMAIN_VALIDATION */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_do_cmd - Do internal command.
+ *	@hd: MPT_SCSI_HOST pointer
+ *	@io: INTERNAL_CMD pointer.
+ *
+ *	Issue the specified internally generated command and do command
+ *	specific cleanup. For bus scan / DV only.
+ *	NOTES: If command is Inquiry and status is good,
+ *	initialize a target structure, save the data
+ *
+ *	Remark: Single threaded access only.
+ *
+ *	Return:
+ *		< 0 if an illegal command or no resources
+ *
+ *		   0 if good
+ *
+ *		 > 0 if command complete but some type of completion error.
+ */
+static int
+mptscsih_do_cmd(MPT_SCSI_HOST *hd, INTERNAL_CMD *io)
+{
+	MPT_FRAME_HDR	*mf;
+	SCSIIORequest_t	*pScsiReq;
+	SCSIIORequest_t	 ReqCopy;
+	int		 my_idx, ii, dir;
+	int		 rc, cmdTimeout;
+	int		in_isr;
+	char		 cmdLen;
+	char		 CDB[]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0};
+	char		 cmd = io->cmd;
+
+	in_isr = in_interrupt();
+	if (in_isr) {
+		dprintk((MYIOC_s_WARN_FMT "Internal SCSI IO request not allowed in ISR context!\n",
+       				hd->ioc->name));
+		return -EPERM;
+	}
+
+
+	/* Set command specific information
+	 */
+	switch (cmd) {
+	case INQUIRY:
+		cmdLen = 6;
+		dir = MPI_SCSIIO_CONTROL_READ;
+		CDB[0] = cmd;
+		CDB[4] = io->size;
+		cmdTimeout = 10;
+		break;
+
+	case TEST_UNIT_READY:
+		cmdLen = 6;
+		dir = MPI_SCSIIO_CONTROL_READ;
+		cmdTimeout = 10;
+		break;
+
+	case START_STOP:
+		cmdLen = 6;
+		dir = MPI_SCSIIO_CONTROL_READ;
+		CDB[0] = cmd;
+		CDB[4] = 1;	/*Spin up the disk */
+		cmdTimeout = 15;
+		break;
+
+	case REQUEST_SENSE:
+		cmdLen = 6;
+		CDB[0] = cmd;
+		CDB[4] = io->size;
+		dir = MPI_SCSIIO_CONTROL_READ;
+		cmdTimeout = 10;
+		break;
+
+	case READ_BUFFER:
+		cmdLen = 10;
+		dir = MPI_SCSIIO_CONTROL_READ;
+		CDB[0] = cmd;
+		if (io->flags & MPT_ICFLAG_ECHO) {
+			CDB[1] = 0x0A;
+		} else {
+			CDB[1] = 0x02;
+		}
+
+		if (io->flags & MPT_ICFLAG_BUF_CAP) {
+			CDB[1] |= 0x01;
+		}
+		CDB[6] = (io->size >> 16) & 0xFF;
+		CDB[7] = (io->size >>  8) & 0xFF;
+		CDB[8] = io->size & 0xFF;
+		cmdTimeout = 10;
+		break;
+
+	case WRITE_BUFFER:
+		cmdLen = 10;
+		dir = MPI_SCSIIO_CONTROL_WRITE;
+		CDB[0] = cmd;
+		if (io->flags & MPT_ICFLAG_ECHO) {
+			CDB[1] = 0x0A;
+		} else {
+			CDB[1] = 0x02;
+		}
+		CDB[6] = (io->size >> 16) & 0xFF;
+		CDB[7] = (io->size >>  8) & 0xFF;
+		CDB[8] = io->size & 0xFF;
+		cmdTimeout = 10;
+		break;
+
+	case RESERVE:
+		cmdLen = 6;
+		dir = MPI_SCSIIO_CONTROL_READ;
+		CDB[0] = cmd;
+		cmdTimeout = 10;
+		break;
+
+	case RELEASE:
+		cmdLen = 6;
+		dir = MPI_SCSIIO_CONTROL_READ;
+		CDB[0] = cmd;
+		cmdTimeout = 10;
+		break;
+
+	case SYNCHRONIZE_CACHE:
+		cmdLen = 10;
+		dir = MPI_SCSIIO_CONTROL_READ;
+		CDB[0] = cmd;
+//		CDB[1] = 0x02;	/* set immediate bit */
+		cmdTimeout = 10;
+		break;
+
+	default:
+		/* Error Case */
+		return -EFAULT;
+	}
+
+	/* Get and Populate a free Frame
+	 */
+	if ((mf = mpt_get_msg_frame(ScsiScanDvCtx, hd->ioc)) == NULL) {
+		ddvprintk((MYIOC_s_WARN_FMT "No msg frames!\n",
+					hd->ioc->name));
+		return -EBUSY;
+	}
+
+	pScsiReq = (SCSIIORequest_t *) mf;
+
+	/* Get the request index */
+	my_idx = le16_to_cpu(mf->u.frame.hwhdr.msgctxu.fld.req_idx);
+	ADD_INDEX_LOG(my_idx); /* for debug */
+
+	if (io->flags & MPT_ICFLAG_PHYS_DISK) {
+		pScsiReq->TargetID = io->physDiskNum;
+		pScsiReq->Bus = 0;
+		pScsiReq->ChainOffset = 0;
+		pScsiReq->Function = MPI_FUNCTION_RAID_SCSI_IO_PASSTHROUGH;
+	} else {
+		pScsiReq->TargetID = io->id;
+		pScsiReq->Bus = io->bus;
+		pScsiReq->ChainOffset = 0;
+		pScsiReq->Function = MPI_FUNCTION_SCSI_IO_REQUEST;
+	}
+
+	pScsiReq->CDBLength = cmdLen;
+	pScsiReq->SenseBufferLength = MPT_SENSE_BUFFER_SIZE;
+
+	pScsiReq->Reserved = 0;
+
+	pScsiReq->MsgFlags = mpt_msg_flags();
+	/* MsgContext set in mpt_get_msg_fram call  */
+
+	for (ii=0; ii < 8; ii++)
+		pScsiReq->LUN[ii] = 0;
+	pScsiReq->LUN[1] = io->lun;
+
+	if (io->flags & MPT_ICFLAG_TAGGED_CMD)
+		pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_SIMPLEQ);
+	else
+		pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED);
+
+	if (cmd == REQUEST_SENSE) {
+		pScsiReq->Control = cpu_to_le32(dir | MPI_SCSIIO_CONTROL_UNTAGGED);
+		ddvprintk((MYIOC_s_INFO_FMT "Untagged! 0x%2x\n",
+			hd->ioc->name, cmd));
+	}
+
+	for (ii=0; ii < 16; ii++)
+		pScsiReq->CDB[ii] = CDB[ii];
+
+	pScsiReq->DataLength = cpu_to_le32(io->size);
+	pScsiReq->SenseBufferLowAddr = cpu_to_le32(hd->ioc->sense_buf_low_dma
+					   + (my_idx * MPT_SENSE_BUFFER_ALLOC));
+
+	ddvprintk((MYIOC_s_INFO_FMT "Sending Command 0x%x for (%d:%d:%d)\n",
+			hd->ioc->name, cmd, io->bus, io->id, io->lun));
+
+	if (dir == MPI_SCSIIO_CONTROL_READ) {
+		mpt_add_sge((char *) &pScsiReq->SGL,
+			MPT_SGE_FLAGS_SSIMPLE_READ | io->size,
+			io->data_dma);
+	} else {
+		mpt_add_sge((char *) &pScsiReq->SGL,
+			MPT_SGE_FLAGS_SSIMPLE_WRITE | io->size,
+			io->data_dma);
+	}
+
+	/* The ISR will free the request frame, but we need
+	 * the information to initialize the target. Duplicate.
+	 */
+	memcpy(&ReqCopy, pScsiReq, sizeof(SCSIIORequest_t));
+
+	/* Issue this command after:
+	 *	finish init
+	 *	add timer
+	 * Wait until the reply has been received
+	 *  ScsiScanDvCtx callback function will
+	 *	set hd->pLocal;
+	 *	set scandv_wait_done and call wake_up
+	 */
+	hd->pLocal = NULL;
+	hd->timer.expires = jiffies + HZ*cmdTimeout;
+	scandv_wait_done = 0;
+
+	/* Save cmd pointer, for resource free if timeout or
+	 * FW reload occurs
+	 */
+	hd->cmdPtr = mf;
+
+	add_timer(&hd->timer);
+	mpt_put_msg_frame(ScsiScanDvCtx, hd->ioc, mf);
+	wait_event(scandv_waitq, scandv_wait_done);
+
+	if (hd->pLocal) {
+		rc = hd->pLocal->completion;
+		hd->pLocal->skip = 0;
+
+		/* Always set fatal error codes in some cases.
+		 */
+		if (rc == MPT_SCANDV_SELECTION_TIMEOUT)
+			rc = -ENXIO;
+		else if (rc == MPT_SCANDV_SOME_ERROR)
+			rc =  -rc;
+	} else {
+		rc = -EFAULT;
+		/* This should never happen. */
+		ddvprintk((MYIOC_s_INFO_FMT "_do_cmd: Null pLocal!!!\n",
+				hd->ioc->name));
+	}
+
+	return rc;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_synchronize_cache - Send SYNCHRONIZE_CACHE to all disks.
+ *	@hd: Pointer to MPT_SCSI_HOST structure
+ *	@portnum: IOC port number
+ *
+ *	Uses the ISR, but with special processing.
+ *	MUST be single-threaded.
+ *
+ *	Return: 0 on completion
+ */
+static int
+mptscsih_synchronize_cache(MPT_SCSI_HOST *hd, int portnum)
+{
+	MPT_ADAPTER		*ioc= hd->ioc;
+	VirtDevice		*pTarget;
+	SCSIDevicePage1_t	*pcfg1Data = NULL;
+	INTERNAL_CMD		 iocmd;
+	CONFIGPARMS		 cfg;
+	dma_addr_t		 cfg1_dma_addr = -1;
+	ConfigPageHeader_t	 header1;
+	int			 bus = 0;
+	int			 id = 0;
+	int			 lun;
+	int			 indexed_lun, lun_index;
+	int			 hostId = ioc->pfacts[portnum].PortSCSIID;
+	int			 max_id;
+	int			 requested, configuration, data;
+	int			 doConfig = 0;
+	u8			 flags, factor;
+
+	max_id = ioc->sh->max_id - 1;
+
+	/* Following parameters will not change
+	 * in this routine.
+	 */
+	iocmd.cmd = SYNCHRONIZE_CACHE;
+	iocmd.flags = 0;
+	iocmd.physDiskNum = -1;
+	iocmd.data = NULL;
+	iocmd.data_dma = -1;
+	iocmd.size = 0;
+	iocmd.rsvd = iocmd.rsvd2 = 0;
+
+	/* No SCSI hosts
+	 */
+	if (hd->Targets == NULL)
+		return 0;
+
+	/* Skip the host
+	 */
+	if (id == hostId)
+		id++;
+
+	/* Write SDP1 for all SCSI devices
+	 * Alloc memory and set up config buffer
+	 */
+	if (ioc->bus_type == SCSI) {
+		if (ioc->spi_data.sdp1length > 0) {
+			pcfg1Data = (SCSIDevicePage1_t *)pci_alloc_consistent(ioc->pcidev,
+					 ioc->spi_data.sdp1length * 4, &cfg1_dma_addr);
+
+			if (pcfg1Data != NULL) {
+				doConfig = 1;
+				header1.PageVersion = ioc->spi_data.sdp1version;
+				header1.PageLength = ioc->spi_data.sdp1length;
+				header1.PageNumber = 1;
+				header1.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
+				cfg.hdr = &header1;
+				cfg.physAddr = cfg1_dma_addr;
+				cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+				cfg.dir = 1;
+				cfg.timeout = 0;
+			}
+		}
+	}
+
+	/* loop through all devices on this port
+	 */
+	while (bus < MPT_MAX_BUS) {
+		iocmd.bus = bus;
+		iocmd.id = id;
+		pTarget = hd->Targets[(int)id];
+
+		if (doConfig) {
+
+			/* Set the negotiation flags */
+			if (pTarget && (pTarget = hd->Targets[id]) && !pTarget->raidVolume) {
+				flags = pTarget->negoFlags;
+			} else {
+				flags = hd->ioc->spi_data.noQas;
+				if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
+					data = hd->ioc->spi_data.nvram[id];
+
+					if (data & MPT_NVRAM_WIDE_DISABLE)
+						flags |= MPT_TARGET_NO_NEGO_WIDE;
+
+					factor = (data & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT;
+					if ((factor == 0) || (factor == MPT_ASYNC))
+						flags |= MPT_TARGET_NO_NEGO_SYNC;
+				}
+			}
+
+			/* Force to async, narrow */
+			mptscsih_setDevicePage1Flags(0, MPT_ASYNC, 0, &requested,
+					&configuration, flags);
+			dnegoprintk(("syncronize cache: id=%d width=0 factor=MPT_ASYNC "
+				"offset=0 negoFlags=%x request=%x config=%x\n",
+				id, flags, requested, configuration));
+			pcfg1Data->RequestedParameters = le32_to_cpu(requested);
+			pcfg1Data->Reserved = 0;
+			pcfg1Data->Configuration = le32_to_cpu(configuration);
+			cfg.pageAddr = (bus<<8) | id;
+			mpt_config(hd->ioc, &cfg);
+		}
+
+		/* If target Ptr NULL or if this target is NOT a disk, skip.
+		 */
+		if ((pTarget) && (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)){
+			for (lun=0; lun <= MPT_LAST_LUN; lun++) {
+				/* If LUN present, issue the command
+				 */
+				lun_index = (lun >> 5);  /* 32 luns per lun_index */
+				indexed_lun = (lun % 32);
+				if (pTarget->luns[lun_index] & (1<<indexed_lun)) {
+					iocmd.lun = lun;
+					(void) mptscsih_do_cmd(hd, &iocmd);
+				}
+			}
+		}
+
+		/* get next relevant device */
+		id++;
+
+		if (id == hostId)
+			id++;
+
+		if (id > max_id) {
+			id = 0;
+			bus++;
+		}
+	}
+
+	if (pcfg1Data) {
+		pci_free_consistent(ioc->pcidev, header1.PageLength * 4, pcfg1Data, cfg1_dma_addr);
+	}
+
+	return 0;
+}
+
+#ifdef MPTSCSIH_ENABLE_DOMAIN_VALIDATION
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_domainValidation - Top level handler for domain validation.
+ *	@hd: Pointer to MPT_SCSI_HOST structure.
+ *
+ *	Uses the ISR, but with special processing.
+ *	Called from schedule, should not be in interrupt mode.
+ *	While thread alive, do dv for all devices needing dv
+ *
+ *	Return: None.
+ */
+static void
+mptscsih_domainValidation(void *arg)
+{
+	MPT_SCSI_HOST		*hd;
+	MPT_ADAPTER		*ioc;
+	unsigned long		 flags;
+	int 			 id, maxid, dvStatus, did;
+	int			 ii, isPhysDisk;
+
+	spin_lock_irqsave(&dvtaskQ_lock, flags);
+	dvtaskQ_active = 1;
+	if (dvtaskQ_release) {
+		dvtaskQ_active = 0;
+		spin_unlock_irqrestore(&dvtaskQ_lock, flags);
+		return;
+	}
+	spin_unlock_irqrestore(&dvtaskQ_lock, flags);
+
+	/* For this ioc, loop through all devices and do dv to each device.
+	 * When complete with this ioc, search through the ioc list, and
+	 * for each scsi ioc found, do dv for all devices. Exit when no
+	 * device needs dv.
+	 */
+	did = 1;
+	while (did) {
+		did = 0;
+		list_for_each_entry(ioc, &ioc_list, list) {
+			spin_lock_irqsave(&dvtaskQ_lock, flags);
+			if (dvtaskQ_release) {
+				dvtaskQ_active = 0;
+				spin_unlock_irqrestore(&dvtaskQ_lock, flags);
+				return;
+			}
+			spin_unlock_irqrestore(&dvtaskQ_lock, flags);
+
+			msleep(250);
+
+			/* DV only to SCSI adapters */
+			if (ioc->bus_type != SCSI)
+				continue;
+
+			/* Make sure everything looks ok */
+			if (ioc->sh == NULL)
+				continue;
+
+			hd = (MPT_SCSI_HOST *) ioc->sh->hostdata;
+			if (hd == NULL)
+				continue;
+
+			if ((ioc->spi_data.forceDv & MPT_SCSICFG_RELOAD_IOC_PG3) != 0) {
+				mpt_read_ioc_pg_3(ioc);
+				if (ioc->spi_data.pIocPg3) {
+					Ioc3PhysDisk_t *pPDisk = ioc->spi_data.pIocPg3->PhysDisk;
+					int		numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks;
+
+					while (numPDisk) {
+						if (ioc->spi_data.dvStatus[pPDisk->PhysDiskID] & MPT_SCSICFG_DV_NOT_DONE)
+							ioc->spi_data.dvStatus[pPDisk->PhysDiskID] |= MPT_SCSICFG_NEED_DV;
+
+						pPDisk++;
+						numPDisk--;
+					}
+				}
+				ioc->spi_data.forceDv &= ~MPT_SCSICFG_RELOAD_IOC_PG3;
+			}
+
+			maxid = min_t(int, ioc->sh->max_id, MPT_MAX_SCSI_DEVICES);
+
+			for (id = 0; id < maxid; id++) {
+				spin_lock_irqsave(&dvtaskQ_lock, flags);
+				if (dvtaskQ_release) {
+					dvtaskQ_active = 0;
+					spin_unlock_irqrestore(&dvtaskQ_lock, flags);
+					return;
+				}
+				spin_unlock_irqrestore(&dvtaskQ_lock, flags);
+				dvStatus = hd->ioc->spi_data.dvStatus[id];
+
+				if (dvStatus & MPT_SCSICFG_NEED_DV) {
+					did++;
+					hd->ioc->spi_data.dvStatus[id] |= MPT_SCSICFG_DV_PENDING;
+					hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_NEED_DV;
+
+					msleep(250);
+
+					/* If hidden phys disk, block IO's to all
+					 *	raid volumes
+					 * else, process normally
+					 */
+					isPhysDisk = mptscsih_is_phys_disk(ioc, id);
+					if (isPhysDisk) {
+						for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
+							if (hd->ioc->spi_data.isRaid & (1 << ii)) {
+								hd->ioc->spi_data.dvStatus[ii] |= MPT_SCSICFG_DV_PENDING;
+							}
+						}
+					}
+
+					if (mptscsih_doDv(hd, 0, id) == 1) {
+						/* Untagged device was busy, try again
+						 */
+						hd->ioc->spi_data.dvStatus[id] |= MPT_SCSICFG_NEED_DV;
+						hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_DV_PENDING;
+					} else {
+						/* DV is complete. Clear flags.
+						 */
+						hd->ioc->spi_data.dvStatus[id] &= ~(MPT_SCSICFG_DV_NOT_DONE | MPT_SCSICFG_DV_PENDING);
+					}
+
+					if (isPhysDisk) {
+						for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
+							if (hd->ioc->spi_data.isRaid & (1 << ii)) {
+								hd->ioc->spi_data.dvStatus[ii] &= ~MPT_SCSICFG_DV_PENDING;
+							}
+						}
+					}
+
+					if (hd->ioc->spi_data.noQas)
+						mptscsih_qas_check(hd, id);
+				}
+			}
+		}
+	}
+
+	spin_lock_irqsave(&dvtaskQ_lock, flags);
+	dvtaskQ_active = 0;
+	spin_unlock_irqrestore(&dvtaskQ_lock, flags);
+
+	return;
+}
+
+/* Search IOC page 3 to determine if this is hidden physical disk
+ */
+static int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, int id)
+{
+	if (ioc->spi_data.pIocPg3) {
+		Ioc3PhysDisk_t *pPDisk =  ioc->spi_data.pIocPg3->PhysDisk;
+		int		numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks;
+
+		while (numPDisk) {
+			if (pPDisk->PhysDiskID == id) {
+				return 1;
+			}
+			pPDisk++;
+			numPDisk--;
+		}
+	}
+	return 0;
+}
+
+/* Write SDP1 if no QAS has been enabled
+ */
+static void mptscsih_qas_check(MPT_SCSI_HOST *hd, int id)
+{
+	VirtDevice *pTarget;
+	int ii;
+
+	if (hd->Targets == NULL)
+		return;
+
+	for (ii=0; ii < MPT_MAX_SCSI_DEVICES; ii++) {
+		if (ii == id)
+			continue;
+
+		if ((hd->ioc->spi_data.dvStatus[ii] & MPT_SCSICFG_DV_NOT_DONE) != 0)
+			continue;
+
+		pTarget = hd->Targets[ii];
+
+		if ((pTarget != NULL) && (!pTarget->raidVolume)) {
+			if ((pTarget->negoFlags & hd->ioc->spi_data.noQas) == 0) {
+				pTarget->negoFlags |= hd->ioc->spi_data.noQas;
+				dnegoprintk(("writeSDP1: id=%d flags=0\n", id));
+				mptscsih_writeSDP1(hd, 0, ii, 0);
+			}
+		} else {
+			if (mptscsih_is_phys_disk(hd->ioc, ii) == 1) {
+				dnegoprintk(("writeSDP1: id=%d SCSICFG_USE_NVRAM\n", id));
+				mptscsih_writeSDP1(hd, 0, ii, MPT_SCSICFG_USE_NVRAM);
+			}
+		}
+	}
+	return;
+}
+
+
+
+#define MPT_GET_NVRAM_VALS	0x01
+#define MPT_UPDATE_MAX		0x02
+#define MPT_SET_MAX		0x04
+#define MPT_SET_MIN		0x08
+#define MPT_FALLBACK		0x10
+#define MPT_SAVE		0x20
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/**
+ *	mptscsih_doDv - Perform domain validation to a target.
+ *	@hd: Pointer to MPT_SCSI_HOST structure.
+ *	@portnum: IOC port number.
+ *	@target: Physical ID of this target
+ *
+ *	Uses the ISR, but with special processing.
+ *	MUST be single-threaded.
+ *	Test will exit if target is at async & narrow.
+ *
+ *	Return: None.
+ */
+static int
+mptscsih_doDv(MPT_SCSI_HOST *hd, int bus_number, int id)
+{
+	MPT_ADAPTER		*ioc = hd->ioc;
+	VirtDevice		*pTarget;
+	SCSIDevicePage1_t	*pcfg1Data;
+	SCSIDevicePage0_t	*pcfg0Data;
+	u8			*pbuf1;
+	u8			*pbuf2;
+	u8			*pDvBuf;
+	dma_addr_t		 dvbuf_dma = -1;
+	dma_addr_t		 buf1_dma = -1;
+	dma_addr_t		 buf2_dma = -1;
+	dma_addr_t		 cfg1_dma_addr = -1;
+	dma_addr_t		 cfg0_dma_addr = -1;
+	ConfigPageHeader_t	 header1;
+	ConfigPageHeader_t	 header0;
+	DVPARAMETERS		 dv;
+	INTERNAL_CMD		 iocmd;
+	CONFIGPARMS		 cfg;
+	int			 dv_alloc = 0;
+	int			 rc, sz = 0;
+	int			 bufsize = 0;
+	int			 dataBufSize = 0;
+	int			 echoBufSize = 0;
+	int			 notDone;
+	int			 patt;
+	int			 repeat;
+	int			 retcode = 0;
+	int			 nfactor =  MPT_ULTRA320;
+	char			 firstPass = 1;
+	char			 doFallback = 0;
+	char			 readPage0;
+	char			 bus, lun;
+	char			 inq0 = 0;
+
+	if (ioc->spi_data.sdp1length == 0)
+		return 0;
+
+	if (ioc->spi_data.sdp0length == 0)
+		return 0;
+
+	/* If multiple buses are used, require that the initiator
+	 * id be the same on all buses.
+	 */
+	if (id == ioc->pfacts[0].PortSCSIID)
+		return 0;
+
+	lun = 0;
+	bus = (u8) bus_number;
+	ddvtprintk((MYIOC_s_NOTE_FMT
+			"DV started: bus=%d, id=%d dv @ %p\n",
+			ioc->name, bus, id, &dv));
+
+	/* Prep DV structure
+	 */
+	memset (&dv, 0, sizeof(DVPARAMETERS));
+	dv.id = id;
+
+	/* Populate tmax with the current maximum
+	 * transfer parameters for this target.
+	 * Exit if narrow and async.
+	 */
+	dv.cmd = MPT_GET_NVRAM_VALS;
+	mptscsih_dv_parms(hd, &dv, NULL);
+
+	/* Prep SCSI IO structure
+	 */
+	iocmd.id = id;
+	iocmd.bus = bus;
+	iocmd.lun = lun;
+	iocmd.flags = 0;
+	iocmd.physDiskNum = -1;
+	iocmd.rsvd = iocmd.rsvd2 = 0;
+
+	pTarget = hd->Targets[id];
+
+	/* Use tagged commands if possible.
+	 */
+	if (pTarget) {
+		if (pTarget->tflags & MPT_TARGET_FLAGS_Q_YES)
+			iocmd.flags |= MPT_ICFLAG_TAGGED_CMD;
+		else {
+			if (hd->ioc->facts.FWVersion.Word < 0x01000600)
+				return 0;
+
+			if ((hd->ioc->facts.FWVersion.Word >= 0x01010000) &&
+				(hd->ioc->facts.FWVersion.Word < 0x01010B00))
+				return 0;
+		}
+	}
+
+	/* Prep cfg structure
+	 */
+	cfg.pageAddr = (bus<<8) | id;
+	cfg.hdr = NULL;
+
+	/* Prep SDP0 header
+	 */
+	header0.PageVersion = ioc->spi_data.sdp0version;
+	header0.PageLength = ioc->spi_data.sdp0length;
+	header0.PageNumber = 0;
+	header0.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
+
+	/* Prep SDP1 header
+	 */
+	header1.PageVersion = ioc->spi_data.sdp1version;
+	header1.PageLength = ioc->spi_data.sdp1length;
+	header1.PageNumber = 1;
+	header1.PageType = MPI_CONFIG_PAGETYPE_SCSI_DEVICE;
+
+	if (header0.PageLength & 1)
+		dv_alloc = (header0.PageLength * 4) + 4;
+
+	dv_alloc +=  (2048 + (header1.PageLength * 4));
+
+	pDvBuf = pci_alloc_consistent(ioc->pcidev, dv_alloc, &dvbuf_dma);
+	if (pDvBuf == NULL)
+		return 0;
+
+	sz = 0;
+	pbuf1 = (u8 *)pDvBuf;
+	buf1_dma = dvbuf_dma;
+	sz +=1024;
+
+	pbuf2 = (u8 *) (pDvBuf + sz);
+	buf2_dma = dvbuf_dma + sz;
+	sz +=1024;
+
+	pcfg0Data = (SCSIDevicePage0_t *) (pDvBuf + sz);
+	cfg0_dma_addr = dvbuf_dma + sz;
+	sz += header0.PageLength * 4;
+
+	/* 8-byte alignment
+	 */
+	if (header0.PageLength & 1)
+		sz += 4;
+
+	pcfg1Data = (SCSIDevicePage1_t *) (pDvBuf + sz);
+	cfg1_dma_addr = dvbuf_dma + sz;
+
+	/* Skip this ID? Set cfg.hdr to force config page write
+	 */
+	{
+		ScsiCfgData *pspi_data = &hd->ioc->spi_data;
+		if (pspi_data->nvram && (pspi_data->nvram[id] != MPT_HOST_NVRAM_INVALID)) {
+			/* Set the factor from nvram */
+			nfactor = (pspi_data->nvram[id] & MPT_NVRAM_SYNC_MASK) >> 8;
+			if (nfactor < pspi_data->minSyncFactor )
+				nfactor = pspi_data->minSyncFactor;
+
+			if (!(pspi_data->nvram[id] & MPT_NVRAM_ID_SCAN_ENABLE) ||
+				(pspi_data->PortFlags == MPI_SCSIPORTPAGE2_PORT_FLAGS_OFF_DV) ) {
+
+				ddvprintk((MYIOC_s_NOTE_FMT "DV Skipped: bus, id, lun (%d, %d, %d)\n",
+					ioc->name, bus, id, lun));
+
+				dv.cmd = MPT_SET_MAX;
+				mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
+				cfg.hdr = &header1;
+
+				/* Save the final negotiated settings to
+				 * SCSI device page 1.
+				 */
+				cfg.physAddr = cfg1_dma_addr;
+				cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+				cfg.dir = 1;
+				mpt_config(hd->ioc, &cfg);
+				goto target_done;
+			}
+		}
+	}
+
+	/* Finish iocmd inititialization - hidden or visible disk? */
+	if (ioc->spi_data.pIocPg3) {
+		/* Search IOC page 3 for matching id
+		 */
+		Ioc3PhysDisk_t *pPDisk =  ioc->spi_data.pIocPg3->PhysDisk;
+		int		numPDisk = ioc->spi_data.pIocPg3->NumPhysDisks;
+
+		while (numPDisk) {
+			if (pPDisk->PhysDiskID == id) {
+				/* match */
+				iocmd.flags |= MPT_ICFLAG_PHYS_DISK;
+				iocmd.physDiskNum = pPDisk->PhysDiskNum;
+
+				/* Quiesce the IM
+				 */
+				if (mptscsih_do_raid(hd, MPI_RAID_ACTION_QUIESCE_PHYS_IO, &iocmd) < 0) {
+					ddvprintk((MYIOC_s_ERR_FMT "RAID Queisce FAILED!\n", ioc->name));
+					goto target_done;
+				}
+				break;
+			}
+			pPDisk++;
+			numPDisk--;
+		}
+	}
+
+	/* RAID Volume ID's may double for a physical device. If RAID but
+	 * not a physical ID as well, skip DV.
+	 */
+	if ((hd->ioc->spi_data.isRaid & (1 << id)) && !(iocmd.flags & MPT_ICFLAG_PHYS_DISK))
+		goto target_done;
+
+
+	/* Basic Test.
+	 * Async & Narrow - Inquiry
+	 * Async & Narrow - Inquiry
+	 * Maximum transfer rate - Inquiry
+	 * Compare buffers:
+	 *	If compare, test complete.
+	 *	If miscompare and first pass, repeat
+	 *	If miscompare and not first pass, fall back and repeat
+	 */
+	hd->pLocal = NULL;
+	readPage0 = 0;
+	sz = SCSI_MAX_INQUIRY_BYTES;
+	rc = MPT_SCANDV_GOOD;
+	while (1) {
+		ddvprintk((MYIOC_s_NOTE_FMT "DV: Start Basic test on id=%d\n", ioc->name, id));
+		retcode = 0;
+		dv.cmd = MPT_SET_MIN;
+		mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
+
+		cfg.hdr = &header1;
+		cfg.physAddr = cfg1_dma_addr;
+		cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+		cfg.dir = 1;
+		if (mpt_config(hd->ioc, &cfg) != 0)
+			goto target_done;
+
+		/* Wide - narrow - wide workaround case
+		 */
+		if ((rc == MPT_SCANDV_ISSUE_SENSE) && dv.max.width) {
+			/* Send an untagged command to reset disk Qs corrupted
+			 * when a parity error occurs on a Request Sense.
+			 */
+			if ((hd->ioc->facts.FWVersion.Word >= 0x01000600) ||
+				((hd->ioc->facts.FWVersion.Word >= 0x01010000) &&
+				(hd->ioc->facts.FWVersion.Word < 0x01010B00)) ) {
+
+				iocmd.cmd = REQUEST_SENSE;
+				iocmd.data_dma = buf1_dma;
+				iocmd.data = pbuf1;
+				iocmd.size = 0x12;
+				if (mptscsih_do_cmd(hd, &iocmd) < 0)
+					goto target_done;
+				else {
+					if (hd->pLocal == NULL)
+						goto target_done;
+					rc = hd->pLocal->completion;
+					if ((rc == MPT_SCANDV_GOOD) || (rc == MPT_SCANDV_SENSE)) {
+						dv.max.width = 0;
+						doFallback = 0;
+					} else
+						goto target_done;
+				}
+			} else
+				goto target_done;
+		}
+
+		iocmd.cmd = INQUIRY;
+		iocmd.data_dma = buf1_dma;
+		iocmd.data = pbuf1;
+		iocmd.size = sz;
+		memset(pbuf1, 0x00, sz);
+		if (mptscsih_do_cmd(hd, &iocmd) < 0)
+			goto target_done;
+		else {
+			if (hd->pLocal == NULL)
+				goto target_done;
+			rc = hd->pLocal->completion;
+			if (rc == MPT_SCANDV_GOOD) {
+				if (hd->pLocal->scsiStatus == SAM_STAT_BUSY) {
+					if ((iocmd.flags & MPT_ICFLAG_TAGGED_CMD) == 0)
+						retcode = 1;
+					else
+						retcode = 0;
+
+					goto target_done;
+				}
+			} else if  (rc == MPT_SCANDV_SENSE) {
+				;
+			} else {
+				/* If first command doesn't complete
+				 * with a good status or with a check condition,
+				 * exit.
+				 */
+				goto target_done;
+			}
+		}
+
+		/* Reset the size for disks
+		 */
+		inq0 = (*pbuf1) & 0x1F;
+		if ((inq0 == 0) && pTarget && !pTarget->raidVolume) {
+			sz = 0x40;
+			iocmd.size = sz;
+		}
+
+		/* Another GEM workaround. Check peripheral device type,
+		 * if PROCESSOR, quit DV.
+		 */
+		if (inq0 == TYPE_PROCESSOR) {
+			mptscsih_initTarget(hd,
+				bus,
+				id,
+				lun,
+				pbuf1,
+				sz);
+			goto target_done;
+		}
+
+		if (inq0 > 0x08)
+			goto target_done;
+
+		if (mptscsih_do_cmd(hd, &iocmd) < 0)
+			goto target_done;
+
+		if (sz == 0x40) {
+			if ((pTarget->maxWidth == 1) && (pTarget->maxOffset) && (nfactor < 0x0A)
+				&& (pTarget->minSyncFactor > 0x09)) {
+				if ((pbuf1[56] & 0x04) == 0)
+					;
+				else if ((pbuf1[56] & 0x01) == 1) {
+					pTarget->minSyncFactor =
+					    nfactor > MPT_ULTRA320 ? nfactor : MPT_ULTRA320;
+				} else {
+					pTarget->minSyncFactor =
+					    nfactor > MPT_ULTRA160 ? nfactor : MPT_ULTRA160;
+				}
+
+				dv.max.factor = pTarget->minSyncFactor;
+
+				if ((pbuf1[56] & 0x02) == 0) {
+					pTarget->negoFlags |= MPT_TARGET_NO_NEGO_QAS;
+					hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS;
+					ddvprintk((MYIOC_s_NOTE_FMT 
+					    "DV: Start Basic noQas on id=%d due to pbuf1[56]=%x\n", 
+					    ioc->name, id, pbuf1[56]));
+				}
+			}
+		}
+
+		if (doFallback)
+			dv.cmd = MPT_FALLBACK;
+		else
+			dv.cmd = MPT_SET_MAX;
+
+		mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
+		if (mpt_config(hd->ioc, &cfg) != 0)
+			goto target_done;
+
+		if ((!dv.now.width) && (!dv.now.offset))
+			goto target_done;
+
+		iocmd.cmd = INQUIRY;
+		iocmd.data_dma = buf2_dma;
+		iocmd.data = pbuf2;
+		iocmd.size = sz;
+		memset(pbuf2, 0x00, sz);
+		if (mptscsih_do_cmd(hd, &iocmd) < 0)
+			goto target_done;
+		else if (hd->pLocal == NULL)
+			goto target_done;
+		else {
+			/* Save the return code.
+			 * If this is the first pass,
+			 * read SCSI Device Page 0
+			 * and update the target max parameters.
+			 */
+			rc = hd->pLocal->completion;
+			doFallback = 0;
+			if (rc == MPT_SCANDV_GOOD) {
+				if (!readPage0) {
+					u32 sdp0_info;
+					u32 sdp0_nego;
+
+					cfg.hdr = &header0;
+					cfg.physAddr = cfg0_dma_addr;
+					cfg.action = MPI_CONFIG_ACTION_PAGE_READ_CURRENT;
+					cfg.dir = 0;
+
+					if (mpt_config(hd->ioc, &cfg) != 0)
+						goto target_done;
+
+					sdp0_info = le32_to_cpu(pcfg0Data->Information) & 0x0E;
+					sdp0_nego = (le32_to_cpu(pcfg0Data->NegotiatedParameters) & 0xFF00 ) >> 8;
+
+					/* Quantum and Fujitsu workarounds.
+					 * Quantum: PPR U320 -> PPR reply with Ultra2 and wide
+					 * Fujitsu: PPR U320 -> Msg Reject and Ultra2 and wide
+					 * Resetart with a request for U160.
+					 */
+					if ((dv.now.factor == MPT_ULTRA320) && (sdp0_nego == MPT_ULTRA2)) {
+							doFallback = 1;
+					} else {
+						dv.cmd = MPT_UPDATE_MAX;
+						mptscsih_dv_parms(hd, &dv, (void *)pcfg0Data);
+						/* Update the SCSI device page 1 area
+						 */
+						pcfg1Data->RequestedParameters = pcfg0Data->NegotiatedParameters;
+						readPage0 = 1;
+					}
+				}
+
+				/* Quantum workaround. Restart this test will the fallback
+				 * flag set.
+				 */
+				if (doFallback == 0) {
+					if (memcmp(pbuf1, pbuf2, sz) != 0) {
+						if (!firstPass)
+							doFallback = 1;
+					} else {
+						ddvprintk((MYIOC_s_NOTE_FMT 
+						    "DV:Inquiry compared id=%d, calling initTarget\n", ioc->name, id));
+						hd->ioc->spi_data.dvStatus[id] &= ~MPT_SCSICFG_DV_NOT_DONE;
+						mptscsih_initTarget(hd,
+							bus,
+							id,
+							lun,
+							pbuf1,
+							sz);
+						break;	/* test complete */
+					}
+				}
+
+
+			} else if (rc == MPT_SCANDV_ISSUE_SENSE)
+				doFallback = 1;	/* set fallback flag */
+			else if ((rc == MPT_SCANDV_DID_RESET) || 
+				 (rc == MPT_SCANDV_SENSE) || 
+				 (rc == MPT_SCANDV_FALLBACK))
+				doFallback = 1;	/* set fallback flag */
+			else
+				goto target_done;
+
+			firstPass = 0;
+		}
+	}
+	ddvprintk((MYIOC_s_NOTE_FMT "DV: Basic test on id=%d completed OK.\n", ioc->name, id));
+
+	if (mpt_dv == 0)
+		goto target_done;
+
+	inq0 = (*pbuf1) & 0x1F;
+
+	/* Continue only for disks
+	 */
+	if (inq0 != 0)
+		goto target_done;
+
+	if ( ioc->spi_data.PortFlags == MPI_SCSIPORTPAGE2_PORT_FLAGS_BASIC_DV_ONLY )
+		goto target_done;
+
+	/* Start the Enhanced Test.
+	 * 0) issue TUR to clear out check conditions
+	 * 1) read capacity of echo (regular) buffer
+	 * 2) reserve device
+	 * 3) do write-read-compare data pattern test
+	 * 4) release
+	 * 5) update nego parms to target struct
+	 */
+	cfg.hdr = &header1;
+	cfg.physAddr = cfg1_dma_addr;
+	cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+	cfg.dir = 1;
+
+	iocmd.cmd = TEST_UNIT_READY;
+	iocmd.data_dma = -1;
+	iocmd.data = NULL;
+	iocmd.size = 0;
+	notDone = 1;
+	while (notDone) {
+		if (mptscsih_do_cmd(hd, &iocmd) < 0)
+			goto target_done;
+
+		if (hd->pLocal == NULL)
+			goto target_done;
+
+		rc = hd->pLocal->completion;
+		if (rc == MPT_SCANDV_GOOD)
+			notDone = 0;
+		else if (rc == MPT_SCANDV_SENSE) {
+			u8 skey = hd->pLocal->sense[2] & 0x0F;
+			u8 asc = hd->pLocal->sense[12];
+			u8 ascq = hd->pLocal->sense[13];
+			ddvprintk((MYIOC_s_INFO_FMT
+				"SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n",
+				ioc->name, skey, asc, ascq));
+
+			if (skey == UNIT_ATTENTION)
+				notDone++; /* repeat */
+			else if ((skey == NOT_READY) &&
+					(asc == 0x04)&&(ascq == 0x01)) {
+				/* wait then repeat */
+				mdelay (2000);
+				notDone++;
+			} else if ((skey == NOT_READY) && (asc == 0x3A)) {
+				/* no medium, try read test anyway */
+				notDone = 0;
+			} else {
+				/* All other errors are fatal.
+				 */
+				ddvprintk((MYIOC_s_INFO_FMT "DV: fatal error.",
+						ioc->name));
+				goto target_done;
+			}
+		} else
+			goto target_done;
+	}
+
+	iocmd.cmd = READ_BUFFER;
+	iocmd.data_dma = buf1_dma;
+	iocmd.data = pbuf1;
+	iocmd.size = 4;
+	iocmd.flags |= MPT_ICFLAG_BUF_CAP;
+
+	dataBufSize = 0;
+	echoBufSize = 0;
+	for (patt = 0; patt < 2; patt++) {
+		if (patt == 0)
+			iocmd.flags |= MPT_ICFLAG_ECHO;
+		else
+			iocmd.flags &= ~MPT_ICFLAG_ECHO;
+
+		notDone = 1;
+		while (notDone) {
+			bufsize = 0;
+
+			/* If not ready after 8 trials,
+			 * give up on this device.
+			 */
+			if (notDone > 8)
+				goto target_done;
+
+			if (mptscsih_do_cmd(hd, &iocmd) < 0)
+				goto target_done;
+			else if (hd->pLocal == NULL)
+				goto target_done;
+			else {
+				rc = hd->pLocal->completion;
+				ddvprintk(("ReadBuffer Comp Code %d", rc));
+				ddvprintk(("  buff: %0x %0x %0x %0x\n",
+					pbuf1[0], pbuf1[1], pbuf1[2], pbuf1[3]));
+
+				if (rc == MPT_SCANDV_GOOD) {
+					notDone = 0;
+					if (iocmd.flags & MPT_ICFLAG_ECHO) {
+						bufsize =  ((pbuf1[2] & 0x1F) <<8) | pbuf1[3];
+					} else {
+						bufsize =  pbuf1[1]<<16 | pbuf1[2]<<8 | pbuf1[3];
+					}
+				} else if (rc == MPT_SCANDV_SENSE) {
+					u8 skey = hd->pLocal->sense[2] & 0x0F;
+					u8 asc = hd->pLocal->sense[12];
+					u8 ascq = hd->pLocal->sense[13];
+					ddvprintk((MYIOC_s_INFO_FMT
+						"SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n",
+						ioc->name, skey, asc, ascq));
+					if (skey == ILLEGAL_REQUEST) {
+						notDone = 0;
+					} else if (skey == UNIT_ATTENTION) {
+						notDone++; /* repeat */
+					} else if ((skey == NOT_READY) &&
+						(asc == 0x04)&&(ascq == 0x01)) {
+						/* wait then repeat */
+						mdelay (2000);
+						notDone++;
+					} else {
+						/* All other errors are fatal.
+						 */
+						ddvprintk((MYIOC_s_INFO_FMT "DV: fatal error.",
+							ioc->name));
+						goto target_done;
+					}
+				} else {
+					/* All other errors are fatal
+					 */
+					goto target_done;
+				}
+			}
+		}
+
+		if (iocmd.flags & MPT_ICFLAG_ECHO)
+			echoBufSize = bufsize;
+		else
+			dataBufSize = bufsize;
+	}
+	sz = 0;
+	iocmd.flags &= ~MPT_ICFLAG_BUF_CAP;
+
+	/* Use echo buffers if possible,
+	 * Exit if both buffers are 0.
+	 */
+	if (echoBufSize > 0) {
+		iocmd.flags |= MPT_ICFLAG_ECHO;
+		if (dataBufSize > 0)
+			bufsize = min(echoBufSize, dataBufSize);
+		else
+			bufsize = echoBufSize;
+	} else if (dataBufSize == 0)
+		goto target_done;
+
+	ddvprintk((MYIOC_s_INFO_FMT "%s Buffer Capacity %d\n", ioc->name,
+		(iocmd.flags & MPT_ICFLAG_ECHO) ? "Echo" : " ", bufsize));
+
+	/* Data buffers for write-read-compare test max 1K.
+	 */
+	sz = min(bufsize, 1024);
+
+	/* --- loop ----
+	 * On first pass, always issue a reserve.
+	 * On additional loops, only if a reset has occurred.
+	 * iocmd.flags indicates if echo or regular buffer
+	 */
+	for (patt = 0; patt < 4; patt++) {
+		ddvprintk(("Pattern %d\n", patt));
+		if ((iocmd.flags & MPT_ICFLAG_RESERVED) && (iocmd.flags & MPT_ICFLAG_DID_RESET)) {
+			iocmd.cmd = TEST_UNIT_READY;
+			iocmd.data_dma = -1;
+			iocmd.data = NULL;
+			iocmd.size = 0;
+			if (mptscsih_do_cmd(hd, &iocmd) < 0)
+				goto target_done;
+
+			iocmd.cmd = RELEASE;
+			iocmd.data_dma = -1;
+			iocmd.data = NULL;
+			iocmd.size = 0;
+			if (mptscsih_do_cmd(hd, &iocmd) < 0)
+				goto target_done;
+			else if (hd->pLocal == NULL)
+				goto target_done;
+			else {
+				rc = hd->pLocal->completion;
+				ddvprintk(("Release rc %d\n", rc));
+				if (rc == MPT_SCANDV_GOOD)
+					iocmd.flags &= ~MPT_ICFLAG_RESERVED;
+				else
+					goto target_done;
+			}
+			iocmd.flags &= ~MPT_ICFLAG_RESERVED;
+		}
+		iocmd.flags &= ~MPT_ICFLAG_DID_RESET;
+
+		repeat = 5;
+		while (repeat && (!(iocmd.flags & MPT_ICFLAG_RESERVED))) {
+			iocmd.cmd = RESERVE;
+			iocmd.data_dma = -1;
+			iocmd.data = NULL;
+			iocmd.size = 0;
+			if (mptscsih_do_cmd(hd, &iocmd) < 0)
+				goto target_done;
+			else if (hd->pLocal == NULL)
+				goto target_done;
+			else {
+				rc = hd->pLocal->completion;
+				if (rc == MPT_SCANDV_GOOD) {
+					iocmd.flags |= MPT_ICFLAG_RESERVED;
+				} else if (rc == MPT_SCANDV_SENSE) {
+					/* Wait if coming ready
+					 */
+					u8 skey = hd->pLocal->sense[2] & 0x0F;
+					u8 asc = hd->pLocal->sense[12];
+					u8 ascq = hd->pLocal->sense[13];
+					ddvprintk((MYIOC_s_INFO_FMT
+						"DV: Reserve Failed: ", ioc->name));
+					ddvprintk(("SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n",
+							skey, asc, ascq));
+
+					if ((skey == NOT_READY) && (asc == 0x04)&&
+									(ascq == 0x01)) {
+						/* wait then repeat */
+						mdelay (2000);
+						notDone++;
+					} else {
+						ddvprintk((MYIOC_s_INFO_FMT
+							"DV: Reserved Failed.", ioc->name));
+						goto target_done;
+					}
+				} else {
+					ddvprintk((MYIOC_s_INFO_FMT "DV: Reserved Failed.",
+							 ioc->name));
+					goto target_done;
+				}
+			}
+		}
+
+		mptscsih_fillbuf(pbuf1, sz, patt, 1);
+		iocmd.cmd = WRITE_BUFFER;
+		iocmd.data_dma = buf1_dma;
+		iocmd.data = pbuf1;
+		iocmd.size = sz;
+		if (mptscsih_do_cmd(hd, &iocmd) < 0)
+			goto target_done;
+		else if (hd->pLocal == NULL)
+			goto target_done;
+		else {
+			rc = hd->pLocal->completion;
+			if (rc == MPT_SCANDV_GOOD)
+				;		/* Issue read buffer */
+			else if (rc == MPT_SCANDV_DID_RESET) {
+				/* If using echo buffers, reset to data buffers.
+				 * Else do Fallback and restart
+				 * this test (re-issue reserve
+				 * because of bus reset).
+				 */
+				if ((iocmd.flags & MPT_ICFLAG_ECHO) && (dataBufSize >= bufsize)) {
+					iocmd.flags &= ~MPT_ICFLAG_ECHO;
+				} else {
+					dv.cmd = MPT_FALLBACK;
+					mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
+
+					if (mpt_config(hd->ioc, &cfg) != 0)
+						goto target_done;
+
+					if ((!dv.now.width) && (!dv.now.offset))
+						goto target_done;
+				}
+
+				iocmd.flags |= MPT_ICFLAG_DID_RESET;
+				patt = -1;
+				continue;
+			} else if (rc == MPT_SCANDV_SENSE) {
+				/* Restart data test if UA, else quit.
+				 */
+				u8 skey = hd->pLocal->sense[2] & 0x0F;
+				ddvprintk((MYIOC_s_INFO_FMT
+					"SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n", ioc->name, skey,
+					hd->pLocal->sense[12], hd->pLocal->sense[13]));
+				if (skey == UNIT_ATTENTION) {
+					patt = -1;
+					continue;
+				} else if (skey == ILLEGAL_REQUEST) {
+					if (iocmd.flags & MPT_ICFLAG_ECHO) {
+						if (dataBufSize >= bufsize) {
+							iocmd.flags &= ~MPT_ICFLAG_ECHO;
+							patt = -1;
+							continue;
+						}
+					}
+					goto target_done;
+				}
+				else
+					goto target_done;
+			} else {
+				/* fatal error */
+				goto target_done;
+			}
+		}
+
+		iocmd.cmd = READ_BUFFER;
+		iocmd.data_dma = buf2_dma;
+		iocmd.data = pbuf2;
+		iocmd.size = sz;
+		if (mptscsih_do_cmd(hd, &iocmd) < 0)
+			goto target_done;
+		else if (hd->pLocal == NULL)
+			goto target_done;
+		else {
+			rc = hd->pLocal->completion;
+			if (rc == MPT_SCANDV_GOOD) {
+				 /* If buffers compare,
+				  * go to next pattern,
+				  * else, do a fallback and restart
+				  * data transfer test.
+				  */
+				if (memcmp (pbuf1, pbuf2, sz) == 0) {
+					; /* goto next pattern */
+				} else {
+					/* Miscompare with Echo buffer, go to data buffer,
+					 * if that buffer exists.
+					 * Miscompare with Data buffer, check first 4 bytes,
+					 * some devices return capacity. Exit in this case.
+					 */
+					if (iocmd.flags & MPT_ICFLAG_ECHO) {
+						if (dataBufSize >= bufsize)
+							iocmd.flags &= ~MPT_ICFLAG_ECHO;
+						else
+							goto target_done;
+					} else {
+						if (dataBufSize == (pbuf2[1]<<16 | pbuf2[2]<<8 | pbuf2[3])) {
+							/* Argh. Device returning wrong data.
+							 * Quit DV for this device.
+							 */
+							goto target_done;
+						}
+
+						/* Had an actual miscompare. Slow down.*/
+						dv.cmd = MPT_FALLBACK;
+						mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
+
+						if (mpt_config(hd->ioc, &cfg) != 0)
+							goto target_done;
+
+						if ((!dv.now.width) && (!dv.now.offset))
+							goto target_done;
+					}
+
+					patt = -1;
+					continue;
+				}
+			} else if (rc == MPT_SCANDV_DID_RESET) {
+				/* Do Fallback and restart
+				 * this test (re-issue reserve
+				 * because of bus reset).
+				 */
+				dv.cmd = MPT_FALLBACK;
+				mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
+
+				if (mpt_config(hd->ioc, &cfg) != 0)
+					 goto target_done;
+
+				if ((!dv.now.width) && (!dv.now.offset))
+					goto target_done;
+
+				iocmd.flags |= MPT_ICFLAG_DID_RESET;
+				patt = -1;
+				continue;
+			} else if (rc == MPT_SCANDV_SENSE) {
+				/* Restart data test if UA, else quit.
+				 */
+				u8 skey = hd->pLocal->sense[2] & 0x0F;
+				ddvprintk((MYIOC_s_INFO_FMT
+					"SenseKey:ASC:ASCQ = (%x:%02x:%02x)\n", ioc->name, skey,
+					hd->pLocal->sense[12], hd->pLocal->sense[13]));
+				if (skey == UNIT_ATTENTION) {
+					patt = -1;
+					continue;
+				}
+				else
+					goto target_done;
+			} else {
+				/* fatal error */
+				goto target_done;
+			}
+		}
+
+	} /* --- end of patt loop ---- */
+
+target_done:
+	if (iocmd.flags & MPT_ICFLAG_RESERVED) {
+		iocmd.cmd = RELEASE;
+		iocmd.data_dma = -1;
+		iocmd.data = NULL;
+		iocmd.size = 0;
+		if (mptscsih_do_cmd(hd, &iocmd) < 0)
+			printk(MYIOC_s_INFO_FMT "DV: Release failed. id %d",
+					ioc->name, id);
+		else if (hd->pLocal) {
+			if (hd->pLocal->completion == MPT_SCANDV_GOOD)
+				iocmd.flags &= ~MPT_ICFLAG_RESERVED;
+		} else {
+			printk(MYIOC_s_INFO_FMT "DV: Release failed. id %d",
+						ioc->name, id);
+		}
+	}
+
+
+	/* Set if cfg1_dma_addr contents is valid
+	 */
+	if ((cfg.hdr != NULL) && (retcode == 0)){
+		/* If disk, not U320, disable QAS
+		 */
+		if ((inq0 == 0) && (dv.now.factor > MPT_ULTRA320)) {
+			hd->ioc->spi_data.noQas = MPT_TARGET_NO_NEGO_QAS;
+			ddvprintk((MYIOC_s_NOTE_FMT 
+			    "noQas set due to id=%d has factor=%x\n", ioc->name, id, dv.now.factor));
+		}
+
+		dv.cmd = MPT_SAVE;
+		mptscsih_dv_parms(hd, &dv, (void *)pcfg1Data);
+
+		/* Double writes to SDP1 can cause problems,
+		 * skip save of the final negotiated settings to
+		 * SCSI device page 1.
+		 *
+		cfg.hdr = &header1;
+		cfg.physAddr = cfg1_dma_addr;
+		cfg.action = MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT;
+		cfg.dir = 1;
+		mpt_config(hd->ioc, &cfg);
+		 */
+	}
+
+	/* If this is a RAID Passthrough, enable internal IOs
+	 */
+	if (iocmd.flags & MPT_ICFLAG_PHYS_DISK) {
+		if (mptscsih_do_raid(hd, MPI_RAID_ACTION_ENABLE_PHYS_IO, &iocmd) < 0)
+			ddvprintk((MYIOC_s_ERR_FMT "RAID Enable FAILED!\n", ioc->name));
+	}
+
+	/* Done with the DV scan of the current target
+	 */
+	if (pDvBuf)
+		pci_free_consistent(ioc->pcidev, dv_alloc, pDvBuf, dvbuf_dma);
+
+	ddvtprintk((MYIOC_s_INFO_FMT "DV Done id=%d\n",
+			ioc->name, id));
+
+	return retcode;
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*	mptscsih_dv_parms - perform a variety of operations on the
+ *	parameters used for negotiation.
+ *	@hd: Pointer to a SCSI host.
+ *	@dv: Pointer to a structure that contains the maximum and current
+ *		negotiated parameters.
+ */
+static void
+mptscsih_dv_parms(MPT_SCSI_HOST *hd, DVPARAMETERS *dv,void *pPage)
+{
+	VirtDevice		*pTarget;
+	SCSIDevicePage0_t	*pPage0;
+	SCSIDevicePage1_t	*pPage1;
+	int			val = 0, data, configuration;
+	u8			width = 0;
+	u8			offset = 0;
+	u8			factor = 0;
+	u8			negoFlags = 0;
+	u8			cmd = dv->cmd;
+	u8			id = dv->id;
+
+	switch (cmd) {
+	case MPT_GET_NVRAM_VALS:
+		ddvprintk((MYIOC_s_NOTE_FMT "Getting NVRAM: ",
+							 hd->ioc->name));
+		/* Get the NVRAM values and save in tmax
+		 * If not an LVD bus, the adapter minSyncFactor has been
+		 * already throttled back.
+		 */
+		if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume) {
+			width = pTarget->maxWidth;
+			offset = pTarget->maxOffset;
+			factor = pTarget->minSyncFactor;
+			negoFlags = pTarget->negoFlags;
+		} else {
+			if (hd->ioc->spi_data.nvram && (hd->ioc->spi_data.nvram[id] != MPT_HOST_NVRAM_INVALID)) {
+				data = hd->ioc->spi_data.nvram[id];
+				width = data & MPT_NVRAM_WIDE_DISABLE ? 0 : 1;
+				if ((offset = hd->ioc->spi_data.maxSyncOffset) == 0)
+					factor = MPT_ASYNC;
+				else {
+					factor = (data & MPT_NVRAM_SYNC_MASK) >> MPT_NVRAM_SYNC_SHIFT;
+					if ((factor == 0) || (factor == MPT_ASYNC)){
+						factor = MPT_ASYNC;
+						offset = 0;
+					}
+				}
+			} else {
+				width = MPT_NARROW;
+				offset = 0;
+				factor = MPT_ASYNC;
+			}
+
+			/* Set the negotiation flags */
+			negoFlags = hd->ioc->spi_data.noQas;
+			if (!width)
+				negoFlags |= MPT_TARGET_NO_NEGO_WIDE;
+
+			if (!offset)
+				negoFlags |= MPT_TARGET_NO_NEGO_SYNC;
+		}
+
+		/* limit by adapter capabilities */
+		width = min(width, hd->ioc->spi_data.maxBusWidth);
+		offset = min(offset, hd->ioc->spi_data.maxSyncOffset);
+		factor = max(factor, hd->ioc->spi_data.minSyncFactor);
+
+		/* Check Consistency */
+		if (offset && (factor < MPT_ULTRA2) && !width)
+			factor = MPT_ULTRA2;
+
+		dv->max.width = width;
+		dv->max.offset = offset;
+		dv->max.factor = factor;
+		dv->max.flags = negoFlags;
+		ddvprintk((" id=%d width=%d factor=%x offset=%x flags=%x\n",
+				id, width, factor, offset, negoFlags));
+		break;
+
+	case MPT_UPDATE_MAX:
+		ddvprintk((MYIOC_s_NOTE_FMT
+			"Updating with SDP0 Data: ", hd->ioc->name));
+		/* Update tmax values with those from Device Page 0.*/
+		pPage0 = (SCSIDevicePage0_t *) pPage;
+		if (pPage0) {
+			val = cpu_to_le32(pPage0->NegotiatedParameters);
+			dv->max.width = val & MPI_SCSIDEVPAGE0_NP_WIDE ? 1 : 0;
+			dv->max.offset = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_OFFSET_MASK) >> 16;
+			dv->max.factor = (val&MPI_SCSIDEVPAGE0_NP_NEG_SYNC_PERIOD_MASK) >> 8;
+		}
+
+		dv->now.width = dv->max.width;
+		dv->now.offset = dv->max.offset;
+		dv->now.factor = dv->max.factor;
+		ddvprintk(("id=%d width=%d factor=%x offset=%x flags=%x\n",
+				id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags));
+		break;
+
+	case MPT_SET_MAX:
+		ddvprintk((MYIOC_s_NOTE_FMT "Setting Max: ",
+								hd->ioc->name));
+		/* Set current to the max values. Update the config page.*/
+		dv->now.width = dv->max.width;
+		dv->now.offset = dv->max.offset;
+		dv->now.factor = dv->max.factor;
+		dv->now.flags = dv->max.flags;
+
+		pPage1 = (SCSIDevicePage1_t *)pPage;
+		if (pPage1) {
+			mptscsih_setDevicePage1Flags (dv->now.width, dv->now.factor,
+				dv->now.offset, &val, &configuration, dv->now.flags);
+			dnegoprintk(("Setting Max: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n",
+				id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags, val, configuration));
+			pPage1->RequestedParameters = le32_to_cpu(val);
+			pPage1->Reserved = 0;
+			pPage1->Configuration = le32_to_cpu(configuration);
+		}
+
+		ddvprintk(("id=%d width=%d factor=%x offset=%x flags=%x request=%x configuration=%x\n",
+				id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags, val, configuration));
+		break;
+
+	case MPT_SET_MIN:
+		ddvprintk((MYIOC_s_NOTE_FMT "Setting Min: ",
+								hd->ioc->name));
+		/* Set page to asynchronous and narrow
+		 * Do not update now, breaks fallback routine. */
+		width = MPT_NARROW;
+		offset = 0;
+		factor = MPT_ASYNC;
+		negoFlags = dv->max.flags;
+
+		pPage1 = (SCSIDevicePage1_t *)pPage;
+		if (pPage1) {
+			mptscsih_setDevicePage1Flags (width, factor,
+				offset, &val, &configuration, negoFlags);
+			dnegoprintk(("Setting Min: id=%d width=%d factor=%x offset=%x negoFlags=%x request=%x config=%x\n",
+				id, width, factor, offset, negoFlags, val, configuration));
+			pPage1->RequestedParameters = le32_to_cpu(val);
+			pPage1->Reserved = 0;
+			pPage1->Configuration = le32_to_cpu(configuration);
+		}
+		ddvprintk(("id=%d width=%d factor=%x offset=%x request=%x config=%x negoFlags=%x\n",
+				id, width, factor, offset, val, configuration, negoFlags));
+		break;
+
+	case MPT_FALLBACK:
+		ddvprintk((MYIOC_s_NOTE_FMT
+			"Fallback: Start: offset %d, factor %x, width %d \n",
+				hd->ioc->name, dv->now.offset,
+				dv->now.factor, dv->now.width));
+		width = dv->now.width;
+		offset = dv->now.offset;
+		factor = dv->now.factor;
+		if ((offset) && (dv->max.width)) {
+			if (factor < MPT_ULTRA160)
+				factor = MPT_ULTRA160;
+			else if (factor < MPT_ULTRA2) {
+				factor = MPT_ULTRA2;
+				width = MPT_WIDE;
+			} else if ((factor == MPT_ULTRA2) && width) {
+				factor = MPT_ULTRA2;
+				width = MPT_NARROW;
+			} else if (factor < MPT_ULTRA) {
+				factor = MPT_ULTRA;
+				width = MPT_WIDE;
+			} else if ((factor == MPT_ULTRA) && width) {
+				width = MPT_NARROW;
+			} else if (factor < MPT_FAST) {
+				factor = MPT_FAST;
+				width = MPT_WIDE;
+			} else if ((factor == MPT_FAST) && width) {
+				factor = MPT_FAST;
+				width = MPT_NARROW;
+			} else if (factor < MPT_SCSI) {
+				factor = MPT_SCSI;
+				width = MPT_WIDE;
+			} else if ((factor == MPT_SCSI) && width) {
+				factor = MPT_SCSI;
+				width = MPT_NARROW;
+			} else {
+				factor = MPT_ASYNC;
+				offset = 0;
+			}
+
+		} else if (offset) {
+			width = MPT_NARROW;
+			if (factor < MPT_ULTRA)
+				factor = MPT_ULTRA;
+			else if (factor < MPT_FAST)
+				factor = MPT_FAST;
+			else if (factor < MPT_SCSI)
+				factor = MPT_SCSI;
+			else {
+				factor = MPT_ASYNC;
+				offset = 0;
+			}
+
+		} else {
+			width = MPT_NARROW;
+			factor = MPT_ASYNC;
+		}
+		dv->max.flags |= MPT_TARGET_NO_NEGO_QAS;
+		dv->max.flags &= ~MPT_TAPE_NEGO_IDP;
+
+		dv->now.width = width;
+		dv->now.offset = offset;
+		dv->now.factor = factor;
+		dv->now.flags = dv->max.flags;
+
+		pPage1 = (SCSIDevicePage1_t *)pPage;
+		if (pPage1) {
+			mptscsih_setDevicePage1Flags (width, factor, offset, &val,
+						&configuration, dv->now.flags);
+			dnegoprintk(("Finish: id=%d width=%d offset=%d factor=%x flags=%x request=%x config=%x\n",
+			     id, width, offset, factor, dv->now.flags, val, configuration));
+
+			pPage1->RequestedParameters = le32_to_cpu(val);
+			pPage1->Reserved = 0;
+			pPage1->Configuration = le32_to_cpu(configuration);
+		}
+
+		ddvprintk(("Finish: id=%d offset=%d factor=%x width=%d request=%x config=%x\n",
+			     id, dv->now.offset, dv->now.factor, dv->now.width, val, configuration));
+		break;
+
+	case MPT_SAVE:
+		ddvprintk((MYIOC_s_NOTE_FMT
+			"Saving to Target structure: ", hd->ioc->name));
+		ddvprintk(("id=%d width=%x factor=%x offset=%d flags=%x\n",
+			     id, dv->now.width, dv->now.factor, dv->now.offset, dv->now.flags));
+
+		/* Save these values to target structures
+		 * or overwrite nvram (phys disks only).
+		 */
+
+		if ((hd->Targets)&&((pTarget = hd->Targets[(int)id]) != NULL) && !pTarget->raidVolume ) {
+			pTarget->maxWidth = dv->now.width;
+			pTarget->maxOffset = dv->now.offset;
+			pTarget->minSyncFactor = dv->now.factor;
+			pTarget->negoFlags = dv->now.flags;
+		} else {
+			/* Preserv all flags, use
+			 * read-modify-write algorithm
+			 */
+			if (hd->ioc->spi_data.nvram) {
+				data = hd->ioc->spi_data.nvram[id];
+
+				if (dv->now.width)
+					data &= ~MPT_NVRAM_WIDE_DISABLE;
+				else
+					data |= MPT_NVRAM_WIDE_DISABLE;
+
+				if (!dv->now.offset)
+					factor = MPT_ASYNC;
+
+				data &= ~MPT_NVRAM_SYNC_MASK;
+				data |= (dv->now.factor << MPT_NVRAM_SYNC_SHIFT) & MPT_NVRAM_SYNC_MASK;
+
+				hd->ioc->spi_data.nvram[id] = data;
+			}
+		}
+		break;
+	}
+}
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*	mptscsih_fillbuf - fill a buffer with a special data pattern
+ *		cleanup. For bus scan only.
+ *
+ *	@buffer: Pointer to data buffer to be filled.
+ *	@size: Number of bytes to fill
+ *	@index: Pattern index
+ *	@width: bus width, 0 (8 bits) or 1 (16 bits)
+ */
+static void
+mptscsih_fillbuf(char *buffer, int size, int index, int width)
+{
+	char *ptr = buffer;
+	int ii;
+	char byte;
+	short val;
+
+	switch (index) {
+	case 0:
+
+		if (width) {
+			/* Pattern:  0000 FFFF 0000 FFFF
+			 */
+			for (ii=0; ii < size; ii++, ptr++) {
+				if (ii & 0x02)
+					*ptr = 0xFF;
+				else
+					*ptr = 0x00;
+			}
+		} else {
+			/* Pattern:  00 FF 00 FF
+			 */
+			for (ii=0; ii < size; ii++, ptr++) {
+				if (ii & 0x01)
+					*ptr = 0xFF;
+				else
+					*ptr = 0x00;
+			}
+		}
+		break;
+
+	case 1:
+		if (width) {
+			/* Pattern:  5555 AAAA 5555 AAAA 5555
+			 */
+			for (ii=0; ii < size; ii++, ptr++) {
+				if (ii & 0x02)
+					*ptr = 0xAA;
+				else
+					*ptr = 0x55;
+			}
+		} else {
+			/* Pattern:  55 AA 55 AA 55
+			 */
+			for (ii=0; ii < size; ii++, ptr++) {
+				if (ii & 0x01)
+					*ptr = 0xAA;
+				else
+					*ptr = 0x55;
+			}
+		}
+		break;
+
+	case 2:
+		/* Pattern:  00 01 02 03 04 05
+		 * ... FE FF 00 01..
+		 */
+		for (ii=0; ii < size; ii++, ptr++)
+			*ptr = (char) ii;
+		break;
+
+	case 3:
+		if (width) {
+			/* Wide Pattern:  FFFE 0001 FFFD 0002
+			 * ...  4000 DFFF 8000 EFFF
+			 */
+			byte = 0;
+			for (ii=0; ii < size/2; ii++) {
+				/* Create the base pattern
+				 */
+				val = (1 << byte);
+				/* every 64 (0x40) bytes flip the pattern
+				 * since we fill 2 bytes / iteration,
+				 * test for ii = 0x20
+				 */
+				if (ii & 0x20)
+					val = ~(val);
+
+				if (ii & 0x01) {
+					*ptr = (char)( (val & 0xFF00) >> 8);
+					ptr++;
+					*ptr = (char)(val & 0xFF);
+					byte++;
+					byte &= 0x0F;
+				} else {
+					val = ~val;
+					*ptr = (char)( (val & 0xFF00) >> 8);
+					ptr++;
+					*ptr = (char)(val & 0xFF);
+				}
+
+				ptr++;
+			}
+		} else {
+			/* Narrow Pattern:  FE 01 FD 02 FB 04
+			 * .. 7F 80 01 FE 02 FD ...  80 7F
+			 */
+			byte = 0;
+			for (ii=0; ii < size; ii++, ptr++) {
+				/* Base pattern - first 32 bytes
+				 */
+				if (ii & 0x01) {
+					*ptr = (1 << byte);
+					byte++;
+					byte &= 0x07;
+				} else {
+					*ptr = (char) (~(1 << byte));
+				}
+
+				/* Flip the pattern every 32 bytes
+				 */
+				if (ii & 0x20)
+					*ptr = ~(*ptr);
+			}
+		}
+		break;
+	}
+}
+#endif /* ~MPTSCSIH_ENABLE_DOMAIN_VALIDATION */
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+
+module_init(mptscsih_init);
+module_exit(mptscsih_exit);
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
new file mode 100644
index 0000000..5cb2fd4
--- /dev/null
+++ b/drivers/message/fusion/mptscsih.h
@@ -0,0 +1,94 @@
+/*
+ *  linux/drivers/message/fusion/mptscsih.h
+ *      High performance SCSI / Fibre Channel SCSI Host device driver.
+ *      For use with PCI chip/adapter(s):
+ *          LSIFC9xx/LSI409xx Fibre Channel
+ *      running LSI Logic Fusion MPT (Message Passing Technology) firmware.
+ *
+ *  Credits:
+ *      This driver would not exist if not for Alan Cox's development
+ *      of the linux i2o driver.
+ *
+ *      A huge debt of gratitude is owed to David S. Miller (DaveM)
+ *      for fixing much of the stupid and broken stuff in the early
+ *      driver while porting to sparc64 platform.  THANK YOU!
+ *
+ *      (see also mptbase.c)
+ *
+ *  Copyright (c) 1999-2004 LSI Logic Corporation
+ *  Originally By: Steven J. Ralston
+ *  (mailto:netscape.net)
+ *  (mailto:mpt_linux_developer@lsil.com)
+ *
+ *  $Id: mptscsih.h,v 1.21 2002/12/03 21:26:35 pdelaney Exp $
+ */
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; version 2 of the License.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    NO WARRANTY
+    THE PROGRAM IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OR
+    CONDITIONS OF ANY KIND, EITHER EXPRESS OR IMPLIED INCLUDING, WITHOUT
+    LIMITATION, ANY WARRANTIES OR CONDITIONS OF TITLE, NON-INFRINGEMENT,
+    MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. Each Recipient is
+    solely responsible for determining the appropriateness of using and
+    distributing the Program and assumes all risks associated with its
+    exercise of rights under this Agreement, including but not limited to
+    the risks and costs of program errors, damage to or loss of data,
+    programs or equipment, and unavailability or interruption of operations.
+
+    DISCLAIMER OF LIABILITY
+    NEITHER RECIPIENT NOR ANY CONTRIBUTORS SHALL HAVE ANY LIABILITY FOR ANY
+    DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+    DAMAGES (INCLUDING WITHOUT LIMITATION LOST PROFITS), HOWEVER CAUSED AND
+    ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
+    TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
+    USE OR DISTRIBUTION OF THE PROGRAM OR THE EXERCISE OF ANY RIGHTS GRANTED
+    HEREUNDER, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGES
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+*/
+
+#ifndef SCSIHOST_H_INCLUDED
+#define SCSIHOST_H_INCLUDED
+
+/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
+/*
+ *	SCSI Public stuff...
+ */
+
+#define MPT_SCSI_CMD_PER_DEV_HIGH	31
+#define MPT_SCSI_CMD_PER_DEV_LOW	7
+
+#define MPT_SCSI_CMD_PER_LUN		7
+
+#define MPT_SCSI_MAX_SECTORS    8192
+
+/* To disable domain validation, uncomment the
+ * following line. No effect for FC devices.
+ * For SCSI devices, driver will negotiate to
+ * NVRAM settings (if available) or to maximum adapter
+ * capabilities.
+ */
+
+#define MPTSCSIH_ENABLE_DOMAIN_VALIDATION
+
+
+/* SCSI driver setup structure. Settings can be overridden
+ * by command line options.
+ */
+#define MPTSCSIH_DOMAIN_VALIDATION      1
+#define MPTSCSIH_MAX_WIDTH              1
+#define MPTSCSIH_MIN_SYNC               0x08
+#define MPTSCSIH_SAF_TE                 0
+
+#endif
diff --git a/drivers/message/i2o/Kconfig b/drivers/message/i2o/Kconfig
new file mode 100644
index 0000000..8d132b0
--- /dev/null
+++ b/drivers/message/i2o/Kconfig
@@ -0,0 +1,75 @@
+
+menu "I2O device support"
+
+config I2O
+	tristate "I2O support"
+	depends on PCI
+	---help---
+	  The Intelligent Input/Output (I2O) architecture allows hardware
+	  drivers to be split into two parts: an operating system specific
+	  module called the OSM and an hardware specific module called the
+	  HDM. The OSM can talk to a whole range of HDM's, and ideally the
+	  HDM's are not OS dependent. This allows for the same HDM driver to
+	  be used under different operating systems if the relevant OSM is in
+	  place. In order for this to work, you need to have an I2O interface
+	  adapter card in your computer. This card contains a special I/O
+	  processor (IOP), thus allowing high speeds since the CPU does not
+	  have to deal with I/O.
+
+	  If you say Y here, you will get a choice of interface adapter
+	  drivers and OSM's with the following questions.
+
+	  To compile this support as a module, choose M here: the
+	  modules will be called i2o_core.
+
+	  If unsure, say N.
+
+config I2O_CONFIG
+	tristate "I2O Configuration support"
+	depends on PCI && I2O
+	help
+	  Say Y for support of the configuration interface for the I2O adapters.
+	  If you have a RAID controller from Adaptec and you want to use the
+	  raidutils to manage your RAID array, you have to say Y here.
+
+	  To compile this support as a module, choose M here: the
+	  module will be called i2o_config.
+
+config I2O_BLOCK
+	tristate "I2O Block OSM"
+	depends on I2O
+	help
+	  Include support for the I2O Block OSM. The Block OSM presents disk
+	  and other structured block devices to the operating system. If you
+	  are using an RAID controller, you could access the array only by
+	  the Block OSM driver. But it is possible to access the single disks
+	  by the SCSI OSM driver, for example to monitor the disks.
+
+	  To compile this support as a module, choose M here: the
+	  module will be called i2o_block.
+
+config I2O_SCSI
+	tristate "I2O SCSI OSM"
+	depends on I2O && SCSI
+	help
+	  Allows direct SCSI access to SCSI devices on a SCSI or FibreChannel
+	  I2O controller. You can use both the SCSI and Block OSM together if
+	  you wish. To access a RAID array, you must use the Block OSM driver.
+	  But you could use the SCSI OSM driver to monitor the single disks.
+
+	  To compile this support as a module, choose M here: the
+	  module will be called i2o_scsi.
+
+config I2O_PROC
+	tristate "I2O /proc support"
+	depends on I2O
+	help
+	  If you say Y here and to "/proc file system support", you will be
+	  able to read I2O related information from the virtual directory
+	  /proc/i2o.
+
+	  To compile this support as a module, choose M here: the
+	  module will be called i2o_proc.
+
+endmenu
+
diff --git a/drivers/message/i2o/Makefile b/drivers/message/i2o/Makefile
new file mode 100644
index 0000000..aabc6cd
--- /dev/null
+++ b/drivers/message/i2o/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for the kernel I2O OSM.
+#
+# Note : at this point, these files are compiled on all systems.
+# In the future, some of these should be built conditionally.
+#
+
+i2o_core-y		+= iop.o driver.o device.o debug.o pci.o exec-osm.o
+obj-$(CONFIG_I2O)	+= i2o_core.o
+obj-$(CONFIG_I2O_CONFIG)+= i2o_config.o
+obj-$(CONFIG_I2O_BLOCK)	+= i2o_block.o
+obj-$(CONFIG_I2O_SCSI)	+= i2o_scsi.o
+obj-$(CONFIG_I2O_PROC)	+= i2o_proc.o
diff --git a/drivers/message/i2o/README b/drivers/message/i2o/README
new file mode 100644
index 0000000..a81f851
--- /dev/null
+++ b/drivers/message/i2o/README
@@ -0,0 +1,98 @@
+
+	Linux I2O Support	(c) Copyright 1999 Red Hat Software
+					and others.
+
+	This program is free software; you can redistribute it and/or
+	modify it under the terms of the GNU General Public License
+	as published by the Free Software Foundation; either version
+	2 of the License, or (at your option) any later version.
+
+AUTHORS (so far)
+
+Alan Cox, Building Number Three Ltd.
+	Core code, SCSI and Block OSMs
+
+Steve Ralston, LSI Logic Corp.
+	Debugging SCSI and Block OSM
+
+Deepak Saxena, Intel Corp.
+	Various core/block extensions
+	/proc interface, bug fixes
+	Ioctl interfaces for control
+	Debugging LAN OSM
+
+Philip Rumpf
+	Fixed assorted dumb SMP locking bugs
+
+Juha Sievanen, University of Helsinki Finland
+	LAN OSM code
+	/proc interface to LAN class
+	Bug fixes
+	Core code extensions
+
+Auvo Häkkinen, University of Helsinki Finland
+	LAN OSM code
+	/Proc interface to LAN class
+	Bug fixes
+	Core code extensions
+
+Taneli Vähäkangas, University of Helsinki Finland
+	Fixes to i2o_config
+
+CREDITS
+
+	This work was made possible by 
+
+Red Hat Software
+	Funding for the Building #3 part of the project
+
+Symbios Logic (Now LSI)
+	Host adapters, hints, known to work platforms when I hit
+	compatibility problems
+
+BoxHill Corporation
+	Loan of initial FibreChannel disk array used for development work.
+
+European Comission
+	Funding the work done by the University of Helsinki
+
+SysKonnect
+        Loan of FDDI and Gigabit Ethernet cards
+
+ASUSTeK
+        Loan of I2O motherboard 
+
+STATUS:
+
+o	The core setup works within limits.
+o	The scsi layer seems to almost work. 
+           I'm still chasing down the hang bug.
+o	The block OSM is mostly functional
+o	LAN OSM works with FDDI and Ethernet cards.
+
+TO DO:
+
+General:
+o	Provide hidden address space if asked
+o	Long term message flow control
+o	PCI IOP's without interrupts are not supported yet
+o	Push FAIL handling into the core
+o	DDM control interfaces for module load etc
+o       Add I2O 2.0 support (Deffered to 2.5 kernel)
+
+Block:
+o	Multiple major numbers
+o	Read ahead and cache handling stuff. Talk to Ingo and people
+o	Power management
+o	Finish Media changers
+
+SCSI:
+o	Find the right way to associate drives/luns/busses
+
+Lan:	
+o	Performance tuning
+o	Test Fibre Channel code
+
+Tape:
+o	Anyone seen anything implementing this ?
+           (D.S: Will attempt to do so if spare cycles permit)
diff --git a/drivers/message/i2o/README.ioctl b/drivers/message/i2o/README.ioctl
new file mode 100644
index 0000000..73dd084
--- /dev/null
+++ b/drivers/message/i2o/README.ioctl
@@ -0,0 +1,394 @@
+
+Linux I2O User Space Interface
+rev 0.3 - 04/20/99
+
+=============================================================================
+Originally written by Deepak Saxena(deepak@plexity.net)
+Currently maintained by Deepak Saxena(deepak@plexity.net)
+=============================================================================
+
+I. Introduction
+
+The Linux I2O subsystem provides a set of ioctl() commands that can be
+utilized by user space applications to communicate with IOPs and devices
+on individual IOPs. This document defines the specific ioctl() commands
+that are available to the user and provides examples of their uses.
+
+This document assumes the reader is familiar with or has access to the 
+I2O specification as no I2O message parameters are outlined.  For information 
+on the specification, see http://www.i2osig.org
+
+This document and the I2O user space interface are currently maintained
+by Deepak Saxena.  Please send all comments, errata, and bug fixes to
+deepak@csociety.purdue.edu
+
+II. IOP Access
+
+Access to the I2O subsystem is provided through the device file named 
+/dev/i2o/ctl.  This file is a character file with major number 10 and minor
+number 166.  It can be created through the following command:
+
+   mknod /dev/i2o/ctl c 10 166
+
+III. Determining the IOP Count
+
+   SYNOPSIS 
+
+   ioctl(fd, I2OGETIOPS,  int *count);
+
+   u8 count[MAX_I2O_CONTROLLERS];
+
+   DESCRIPTION
+
+   This function returns the system's active IOP table.  count should
+   point to a buffer containing MAX_I2O_CONTROLLERS entries.  Upon 
+   returning, each entry will contain a non-zero value if the given
+   IOP unit is active, and NULL if it is inactive or non-existent.
+
+   RETURN VALUE.
+
+   Returns 0 if no errors occur, and -1 otherwise.  If an error occurs,
+   errno is set appropriately:
+
+     EFAULT   Invalid user space pointer was passed
+
+IV. Getting Hardware Resource Table
+
+   SYNOPSIS 
+ 
+   ioctl(fd, I2OHRTGET, struct i2o_cmd_hrt *hrt);
+
+      struct i2o_cmd_hrtlct
+      {
+         u32   iop;      /* IOP unit number */
+         void  *resbuf;  /* Buffer for result */
+         u32   *reslen;  /* Buffer length in bytes */
+      };
+
+   DESCRIPTION
+
+   This function returns the Hardware Resource Table of the IOP specified 
+   by hrt->iop in the buffer pointed to by hrt->resbuf. The actual size of 
+   the data is written into *(hrt->reslen).
+
+   RETURNS
+
+   This function returns 0 if no errors occur. If an error occurs, -1 
+   is returned and errno is set appropriately:
+
+      EFAULT      Invalid user space pointer was passed
+      ENXIO       Invalid IOP number
+      ENOBUFS     Buffer not large enough.  If this occurs, the required
+                  buffer length is written into *(hrt->reslen)
+  
+V. Getting Logical Configuration Table
+   
+   SYNOPSIS 
+ 
+   ioctl(fd, I2OLCTGET, struct i2o_cmd_lct *lct);
+
+      struct i2o_cmd_hrtlct
+      {
+         u32   iop;      /* IOP unit number */
+         void  *resbuf;  /* Buffer for result */
+         u32   *reslen;  /* Buffer length in bytes */
+      };
+
+   DESCRIPTION
+
+   This function returns the Logical Configuration Table of the IOP specified
+   by lct->iop in the buffer pointed to by lct->resbuf. The actual size of 
+   the data is written into *(lct->reslen).
+
+   RETURNS
+
+   This function returns 0 if no errors occur. If an error occurs, -1 
+   is returned and errno is set appropriately:
+
+      EFAULT      Invalid user space pointer was passed
+      ENXIO       Invalid IOP number
+      ENOBUFS     Buffer not large enough.  If this occurs, the required
+                  buffer length is written into *(lct->reslen)
+
+VI. Settting Parameters
+   
+   SYNOPSIS 
+ 
+   ioctl(fd, I2OPARMSET, struct i2o_parm_setget *ops);
+
+      struct i2o_cmd_psetget
+      {
+         u32   iop;      /* IOP unit number */
+         u32   tid;      /* Target device TID */
+         void  *opbuf;   /* Operation List buffer */
+         u32   oplen;    /* Operation List buffer length in bytes */
+         void  *resbuf;  /* Result List buffer */
+         u32   *reslen;  /* Result List buffer length in bytes */
+      };
+
+   DESCRIPTION
+
+   This function posts a UtilParamsSet message to the device identified
+   by ops->iop and ops->tid.  The operation list for the message is 
+   sent through the ops->opbuf buffer, and the result list is written
+   into the buffer pointed to by ops->resbuf.  The number of bytes 
+   written is placed into *(ops->reslen). 
+
+   RETURNS
+
+   The return value is the size in bytes of the data written into
+   ops->resbuf if no errors occur.  If an error occurs, -1 is returned 
+   and errno is set appropriatly:
+
+      EFAULT      Invalid user space pointer was passed
+      ENXIO       Invalid IOP number
+      ENOBUFS     Buffer not large enough.  If this occurs, the required
+                  buffer length is written into *(ops->reslen)
+      ETIMEDOUT   Timeout waiting for reply message
+      ENOMEM      Kernel memory allocation error
+
+   A return value of 0 does not mean that the value was actually
+   changed properly on the IOP.  The user should check the result
+   list to determine the specific status of the transaction.
+
+VII. Getting Parameters
+   
+   SYNOPSIS 
+ 
+   ioctl(fd, I2OPARMGET, struct i2o_parm_setget *ops);
+
+      struct i2o_parm_setget
+      {
+         u32   iop;      /* IOP unit number */
+         u32   tid;      /* Target device TID */
+         void  *opbuf;   /* Operation List buffer */
+         u32   oplen;    /* Operation List buffer length in bytes */
+         void  *resbuf;  /* Result List buffer */
+         u32   *reslen;  /* Result List buffer length in bytes */
+      };
+
+   DESCRIPTION
+
+   This function posts a UtilParamsGet message to the device identified
+   by ops->iop and ops->tid.  The operation list for the message is 
+   sent through the ops->opbuf buffer, and the result list is written
+   into the buffer pointed to by ops->resbuf.  The actual size of data
+   written is placed into *(ops->reslen).
+
+   RETURNS
+
+      EFAULT      Invalid user space pointer was passed
+      ENXIO       Invalid IOP number
+      ENOBUFS     Buffer not large enough.  If this occurs, the required
+                  buffer length is written into *(ops->reslen)
+      ETIMEDOUT   Timeout waiting for reply message
+      ENOMEM      Kernel memory allocation error
+
+   A return value of 0 does not mean that the value was actually
+   properly retreived.  The user should check the result list 
+   to determine the specific status of the transaction.
+
+VIII. Downloading Software
+   
+   SYNOPSIS 
+ 
+   ioctl(fd, I2OSWDL, struct i2o_sw_xfer *sw);
+
+      struct i2o_sw_xfer
+      {
+         u32   iop;       /* IOP unit number */
+         u8    flags;     /* DownloadFlags field */
+         u8    sw_type;   /* Software type */
+         u32   sw_id;     /* Software ID */
+         void  *buf;      /* Pointer to software buffer */
+         u32   *swlen;    /* Length of software buffer */        
+         u32   *maxfrag;  /* Number of fragments */
+         u32   *curfrag;  /* Current fragment number */
+      };
+
+   DESCRIPTION
+
+   This function downloads a software fragment pointed by sw->buf
+   to the iop identified by sw->iop. The DownloadFlags, SwID, SwType
+   and SwSize fields of the ExecSwDownload message are filled in with
+   the values of sw->flags, sw->sw_id, sw->sw_type and *(sw->swlen).
+
+   The fragments _must_ be sent in order and be 8K in size. The last
+   fragment _may_ be shorter, however. The kernel will compute its
+   size based on information in the sw->swlen field.
+
+   Please note that SW transfers can take a long time.
+
+   RETURNS
+
+   This function returns 0 no errors occur. If an error occurs, -1 
+   is returned and errno is set appropriatly:
+
+      EFAULT      Invalid user space pointer was passed
+      ENXIO       Invalid IOP number
+      ETIMEDOUT   Timeout waiting for reply message
+      ENOMEM      Kernel memory allocation error
+
+IX. Uploading Software
+   
+   SYNOPSIS 
+
+   ioctl(fd, I2OSWUL, struct i2o_sw_xfer *sw);
+
+      struct i2o_sw_xfer
+      {
+         u32   iop;      /* IOP unit number */
+         u8    flags; 	 /* UploadFlags */
+         u8    sw_type;  /* Software type */
+         u32   sw_id;    /* Software ID */
+         void  *buf;     /* Pointer to software buffer */
+         u32   *swlen;   /* Length of software buffer */        
+         u32   *maxfrag; /* Number of fragments */
+         u32   *curfrag; /* Current fragment number */
+      };
+
+   DESCRIPTION
+
+   This function uploads a software fragment from the IOP identified
+   by sw->iop, sw->sw_type, sw->sw_id and optionally sw->swlen fields.
+   The UploadFlags, SwID, SwType and SwSize fields of the ExecSwUpload
+   message are filled in with the values of sw->flags, sw->sw_id,
+   sw->sw_type and *(sw->swlen).
+
+   The fragments _must_ be requested in order and be 8K in size. The
+   user is responsible for allocating memory pointed by sw->buf. The
+   last fragment _may_ be shorter.
+
+   Please note that SW transfers can take a long time.
+
+   RETURNS
+
+   This function returns 0 if no errors occur.  If an error occurs, -1
+   is returned and errno is set appropriatly:
+
+      EFAULT      Invalid user space pointer was passed
+      ENXIO       Invalid IOP number
+      ETIMEDOUT   Timeout waiting for reply message
+      ENOMEM      Kernel memory allocation error
+         
+X. Removing Software
+   
+   SYNOPSIS 
+ 
+   ioctl(fd, I2OSWDEL, struct i2o_sw_xfer *sw);
+
+      struct i2o_sw_xfer
+      {
+         u32   iop;      /* IOP unit number */
+         u8    flags; 	 /* RemoveFlags */
+         u8    sw_type;  /* Software type */
+         u32   sw_id;    /* Software ID */
+         void  *buf;     /* Unused */
+         u32   *swlen;   /* Length of the software data */        
+         u32   *maxfrag; /* Unused */
+         u32   *curfrag; /* Unused */
+      };
+
+   DESCRIPTION
+
+   This function removes software from the IOP identified by sw->iop.
+   The RemoveFlags, SwID, SwType and SwSize fields of the ExecSwRemove message 
+   are filled in with the values of sw->flags, sw->sw_id, sw->sw_type and 
+   *(sw->swlen). Give zero in *(sw->len) if the value is unknown. IOP uses 
+   *(sw->swlen) value to verify correct identication of the module to remove. 
+   The actual size of the module is written into *(sw->swlen).
+
+   RETURNS
+
+   This function returns 0 if no errors occur.  If an error occurs, -1
+   is returned and errno is set appropriatly:
+
+      EFAULT      Invalid user space pointer was passed
+      ENXIO       Invalid IOP number
+      ETIMEDOUT   Timeout waiting for reply message
+      ENOMEM      Kernel memory allocation error
+
+X. Validating Configuration
+
+   SYNOPSIS
+
+   ioctl(fd, I2OVALIDATE, int *iop);
+	u32 iop;
+
+   DESCRIPTION
+
+   This function posts an ExecConfigValidate message to the controller
+   identified by iop. This message indicates that the current
+   configuration is accepted. The iop changes the status of suspect drivers 
+   to valid and may delete old drivers from its store.
+
+   RETURNS
+
+   This function returns 0 if no erro occur.  If an error occurs, -1 is
+   returned and errno is set appropriatly:
+
+      ETIMEDOUT   Timeout waiting for reply message
+      ENXIO       Invalid IOP number
+
+XI. Configuration Dialog
+   
+   SYNOPSIS 
+ 
+   ioctl(fd, I2OHTML, struct i2o_html *htquery);
+      struct i2o_html
+      {
+         u32   iop;      /* IOP unit number */
+         u32   tid;      /* Target device ID */
+         u32   page;     /* HTML page */
+         void  *resbuf;  /* Buffer for reply HTML page */
+         u32   *reslen;  /* Length in bytes of reply buffer */
+         void  *qbuf;    /* Pointer to HTTP query string */
+         u32   qlen;     /* Length in bytes of query string buffer */        
+      };
+
+   DESCRIPTION
+
+   This function posts an UtilConfigDialog message to the device identified
+   by htquery->iop and htquery->tid.  The requested HTML page number is 
+   provided by the htquery->page field, and the resultant data is stored 
+   in the buffer pointed to by htquery->resbuf.  If there is an HTTP query 
+   string that is to be sent to the device, it should be sent in the buffer
+   pointed to by htquery->qbuf.  If there is no query string, this field
+   should be set to NULL. The actual size of the reply received is written
+   into *(htquery->reslen).
+  
+   RETURNS
+
+   This function returns 0 if no error occur. If an error occurs, -1
+   is returned and errno is set appropriatly:
+
+      EFAULT      Invalid user space pointer was passed
+      ENXIO       Invalid IOP number
+      ENOBUFS     Buffer not large enough.  If this occurs, the required
+                  buffer length is written into *(ops->reslen)
+      ETIMEDOUT   Timeout waiting for reply message
+      ENOMEM      Kernel memory allocation error
+
+XII. Events
+
+    In the process of determining this.  Current idea is to have use
+    the select() interface to allow user apps to periodically poll
+    the /dev/i2o/ctl device for events.  When select() notifies the user
+    that an event is available, the user would call read() to retrieve
+    a list of all the events that are pending for the specific device.
+
+=============================================================================
+Revision History
+=============================================================================
+
+Rev 0.1 - 04/01/99
+- Initial revision
+
+Rev 0.2 - 04/06/99
+- Changed return values to match UNIX ioctl() standard.  Only return values
+  are 0 and -1.  All errors are reported through errno.
+- Added summary of proposed possible event interfaces
+
+Rev 0.3 - 04/20/99
+- Changed all ioctls() to use pointers to user data instead of actual data
+- Updated error values to match the code
diff --git a/drivers/message/i2o/debug.c b/drivers/message/i2o/debug.c
new file mode 100644
index 0000000..2a5d478
--- /dev/null
+++ b/drivers/message/i2o/debug.c
@@ -0,0 +1,481 @@
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/i2o.h>
+
+extern struct i2o_driver **i2o_drivers;
+extern unsigned int i2o_max_drivers;
+static void i2o_report_util_cmd(u8 cmd);
+static void i2o_report_exec_cmd(u8 cmd);
+static void i2o_report_fail_status(u8 req_status, u32 * msg);
+static void i2o_report_common_status(u8 req_status);
+static void i2o_report_common_dsc(u16 detailed_status);
+
+/*
+ * Used for error reporting/debugging purposes.
+ * Report Cmd name, Request status, Detailed Status.
+ */
+void i2o_report_status(const char *severity, const char *str,
+		       struct i2o_message *m)
+{
+	u32 *msg = (u32 *) m;
+	u8 cmd = (msg[1] >> 24) & 0xFF;
+	u8 req_status = (msg[4] >> 24) & 0xFF;
+	u16 detailed_status = msg[4] & 0xFFFF;
+	//struct i2o_driver *h = i2o_drivers[msg[2] & (i2o_max_drivers-1)];
+
+	if (cmd == I2O_CMD_UTIL_EVT_REGISTER)
+		return;		// No status in this reply
+
+	printk(KERN_DEBUG "%s%s: ", severity, str);
+
+	if (cmd < 0x1F)		// Utility cmd
+		i2o_report_util_cmd(cmd);
+
+	else if (cmd >= 0xA0 && cmd <= 0xEF)	// Executive cmd
+		i2o_report_exec_cmd(cmd);
+	else
+		printk(KERN_DEBUG "Cmd = %0#2x, ", cmd);	// Other cmds
+
+	if (msg[0] & MSG_FAIL) {
+		i2o_report_fail_status(req_status, msg);
+		return;
+	}
+
+	i2o_report_common_status(req_status);
+
+	if (cmd < 0x1F || (cmd >= 0xA0 && cmd <= 0xEF))
+		i2o_report_common_dsc(detailed_status);
+	else
+		printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n",
+		       detailed_status);
+}
+
+/* Used to dump a message to syslog during debugging */
+void i2o_dump_message(struct i2o_message *m)
+{
+#ifdef DEBUG
+	u32 *msg = (u32 *) m;
+	int i;
+	printk(KERN_INFO "Dumping I2O message size %d @ %p\n",
+	       msg[0] >> 16 & 0xffff, msg);
+	for (i = 0; i < ((msg[0] >> 16) & 0xffff); i++)
+		printk(KERN_INFO "  msg[%d] = %0#10x\n", i, msg[i]);
+#endif
+}
+
+/*
+ * Used for error reporting/debugging purposes.
+ * Following fail status are common to all classes.
+ * The preserved message must be handled in the reply handler.
+ */
+static void i2o_report_fail_status(u8 req_status, u32 * msg)
+{
+	static char *FAIL_STATUS[] = {
+		"0x80",		/* not used */
+		"SERVICE_SUSPENDED",	/* 0x81 */
+		"SERVICE_TERMINATED",	/* 0x82 */
+		"CONGESTION",
+		"FAILURE",
+		"STATE_ERROR",
+		"TIME_OUT",
+		"ROUTING_FAILURE",
+		"INVALID_VERSION",
+		"INVALID_OFFSET",
+		"INVALID_MSG_FLAGS",
+		"FRAME_TOO_SMALL",
+		"FRAME_TOO_LARGE",
+		"INVALID_TARGET_ID",
+		"INVALID_INITIATOR_ID",
+		"INVALID_INITIATOR_CONTEX",	/* 0x8F */
+		"UNKNOWN_FAILURE"	/* 0xFF */
+	};
+
+	if (req_status == I2O_FSC_TRANSPORT_UNKNOWN_FAILURE)
+		printk(KERN_DEBUG "TRANSPORT_UNKNOWN_FAILURE (%0#2x)\n.",
+		       req_status);
+	else
+		printk(KERN_DEBUG "TRANSPORT_%s.\n",
+		       FAIL_STATUS[req_status & 0x0F]);
+
+	/* Dump some details */
+
+	printk(KERN_ERR "  InitiatorId = %d, TargetId = %d\n",
+	       (msg[1] >> 12) & 0xFFF, msg[1] & 0xFFF);
+	printk(KERN_ERR "  LowestVersion = 0x%02X, HighestVersion = 0x%02X\n",
+	       (msg[4] >> 8) & 0xFF, msg[4] & 0xFF);
+	printk(KERN_ERR "  FailingHostUnit = 0x%04X,  FailingIOP = 0x%03X\n",
+	       msg[5] >> 16, msg[5] & 0xFFF);
+
+	printk(KERN_ERR "  Severity:  0x%02X ", (msg[4] >> 16) & 0xFF);
+	if (msg[4] & (1 << 16))
+		printk(KERN_DEBUG "(FormatError), "
+		       "this msg can never be delivered/processed.\n");
+	if (msg[4] & (1 << 17))
+		printk(KERN_DEBUG "(PathError), "
+		       "this msg can no longer be delivered/processed.\n");
+	if (msg[4] & (1 << 18))
+		printk(KERN_DEBUG "(PathState), "
+		       "the system state does not allow delivery.\n");
+	if (msg[4] & (1 << 19))
+		printk(KERN_DEBUG
+		       "(Congestion), resources temporarily not available;"
+		       "do not retry immediately.\n");
+}
+
+/*
+ * Used for error reporting/debugging purposes.
+ * Following reply status are common to all classes.
+ */
+static void i2o_report_common_status(u8 req_status)
+{
+	static char *REPLY_STATUS[] = {
+		"SUCCESS",
+		"ABORT_DIRTY",
+		"ABORT_NO_DATA_TRANSFER",
+		"ABORT_PARTIAL_TRANSFER",
+		"ERROR_DIRTY",
+		"ERROR_NO_DATA_TRANSFER",
+		"ERROR_PARTIAL_TRANSFER",
+		"PROCESS_ABORT_DIRTY",
+		"PROCESS_ABORT_NO_DATA_TRANSFER",
+		"PROCESS_ABORT_PARTIAL_TRANSFER",
+		"TRANSACTION_ERROR",
+		"PROGRESS_REPORT"
+	};
+
+	if (req_status >= ARRAY_SIZE(REPLY_STATUS))
+		printk(KERN_DEBUG "RequestStatus = %0#2x", req_status);
+	else
+		printk(KERN_DEBUG "%s", REPLY_STATUS[req_status]);
+}
+
+/*
+ * Used for error reporting/debugging purposes.
+ * Following detailed status are valid  for executive class,
+ * utility class, DDM class and for transaction error replies.
+ */
+static void i2o_report_common_dsc(u16 detailed_status)
+{
+	static char *COMMON_DSC[] = {
+		"SUCCESS",
+		"0x01",		// not used
+		"BAD_KEY",
+		"TCL_ERROR",
+		"REPLY_BUFFER_FULL",
+		"NO_SUCH_PAGE",
+		"INSUFFICIENT_RESOURCE_SOFT",
+		"INSUFFICIENT_RESOURCE_HARD",
+		"0x08",		// not used
+		"CHAIN_BUFFER_TOO_LARGE",
+		"UNSUPPORTED_FUNCTION",
+		"DEVICE_LOCKED",
+		"DEVICE_RESET",
+		"INAPPROPRIATE_FUNCTION",
+		"INVALID_INITIATOR_ADDRESS",
+		"INVALID_MESSAGE_FLAGS",
+		"INVALID_OFFSET",
+		"INVALID_PARAMETER",
+		"INVALID_REQUEST",
+		"INVALID_TARGET_ADDRESS",
+		"MESSAGE_TOO_LARGE",
+		"MESSAGE_TOO_SMALL",
+		"MISSING_PARAMETER",
+		"TIMEOUT",
+		"UNKNOWN_ERROR",
+		"UNKNOWN_FUNCTION",
+		"UNSUPPORTED_VERSION",
+		"DEVICE_BUSY",
+		"DEVICE_NOT_AVAILABLE"
+	};
+
+	if (detailed_status > I2O_DSC_DEVICE_NOT_AVAILABLE)
+		printk(KERN_DEBUG " / DetailedStatus = %0#4x.\n",
+		       detailed_status);
+	else
+		printk(KERN_DEBUG " / %s.\n", COMMON_DSC[detailed_status]);
+}
+
+/*
+ * Used for error reporting/debugging purposes
+ */
+static void i2o_report_util_cmd(u8 cmd)
+{
+	switch (cmd) {
+	case I2O_CMD_UTIL_NOP:
+		printk(KERN_DEBUG "UTIL_NOP, ");
+		break;
+	case I2O_CMD_UTIL_ABORT:
+		printk(KERN_DEBUG "UTIL_ABORT, ");
+		break;
+	case I2O_CMD_UTIL_CLAIM:
+		printk(KERN_DEBUG "UTIL_CLAIM, ");
+		break;
+	case I2O_CMD_UTIL_RELEASE:
+		printk(KERN_DEBUG "UTIL_CLAIM_RELEASE, ");
+		break;
+	case I2O_CMD_UTIL_CONFIG_DIALOG:
+		printk(KERN_DEBUG "UTIL_CONFIG_DIALOG, ");
+		break;
+	case I2O_CMD_UTIL_DEVICE_RESERVE:
+		printk(KERN_DEBUG "UTIL_DEVICE_RESERVE, ");
+		break;
+	case I2O_CMD_UTIL_DEVICE_RELEASE:
+		printk(KERN_DEBUG "UTIL_DEVICE_RELEASE, ");
+		break;
+	case I2O_CMD_UTIL_EVT_ACK:
+		printk(KERN_DEBUG "UTIL_EVENT_ACKNOWLEDGE, ");
+		break;
+	case I2O_CMD_UTIL_EVT_REGISTER:
+		printk(KERN_DEBUG "UTIL_EVENT_REGISTER, ");
+		break;
+	case I2O_CMD_UTIL_LOCK:
+		printk(KERN_DEBUG "UTIL_LOCK, ");
+		break;
+	case I2O_CMD_UTIL_LOCK_RELEASE:
+		printk(KERN_DEBUG "UTIL_LOCK_RELEASE, ");
+		break;
+	case I2O_CMD_UTIL_PARAMS_GET:
+		printk(KERN_DEBUG "UTIL_PARAMS_GET, ");
+		break;
+	case I2O_CMD_UTIL_PARAMS_SET:
+		printk(KERN_DEBUG "UTIL_PARAMS_SET, ");
+		break;
+	case I2O_CMD_UTIL_REPLY_FAULT_NOTIFY:
+		printk(KERN_DEBUG "UTIL_REPLY_FAULT_NOTIFY, ");
+		break;
+	default:
+		printk(KERN_DEBUG "Cmd = %0#2x, ", cmd);
+	}
+}
+
+/*
+ * Used for error reporting/debugging purposes
+ */
+static void i2o_report_exec_cmd(u8 cmd)
+{
+	switch (cmd) {
+	case I2O_CMD_ADAPTER_ASSIGN:
+		printk(KERN_DEBUG "EXEC_ADAPTER_ASSIGN, ");
+		break;
+	case I2O_CMD_ADAPTER_READ:
+		printk(KERN_DEBUG "EXEC_ADAPTER_READ, ");
+		break;
+	case I2O_CMD_ADAPTER_RELEASE:
+		printk(KERN_DEBUG "EXEC_ADAPTER_RELEASE, ");
+		break;
+	case I2O_CMD_BIOS_INFO_SET:
+		printk(KERN_DEBUG "EXEC_BIOS_INFO_SET, ");
+		break;
+	case I2O_CMD_BOOT_DEVICE_SET:
+		printk(KERN_DEBUG "EXEC_BOOT_DEVICE_SET, ");
+		break;
+	case I2O_CMD_CONFIG_VALIDATE:
+		printk(KERN_DEBUG "EXEC_CONFIG_VALIDATE, ");
+		break;
+	case I2O_CMD_CONN_SETUP:
+		printk(KERN_DEBUG "EXEC_CONN_SETUP, ");
+		break;
+	case I2O_CMD_DDM_DESTROY:
+		printk(KERN_DEBUG "EXEC_DDM_DESTROY, ");
+		break;
+	case I2O_CMD_DDM_ENABLE:
+		printk(KERN_DEBUG "EXEC_DDM_ENABLE, ");
+		break;
+	case I2O_CMD_DDM_QUIESCE:
+		printk(KERN_DEBUG "EXEC_DDM_QUIESCE, ");
+		break;
+	case I2O_CMD_DDM_RESET:
+		printk(KERN_DEBUG "EXEC_DDM_RESET, ");
+		break;
+	case I2O_CMD_DDM_SUSPEND:
+		printk(KERN_DEBUG "EXEC_DDM_SUSPEND, ");
+		break;
+	case I2O_CMD_DEVICE_ASSIGN:
+		printk(KERN_DEBUG "EXEC_DEVICE_ASSIGN, ");
+		break;
+	case I2O_CMD_DEVICE_RELEASE:
+		printk(KERN_DEBUG "EXEC_DEVICE_RELEASE, ");
+		break;
+	case I2O_CMD_HRT_GET:
+		printk(KERN_DEBUG "EXEC_HRT_GET, ");
+		break;
+	case I2O_CMD_ADAPTER_CLEAR:
+		printk(KERN_DEBUG "EXEC_IOP_CLEAR, ");
+		break;
+	case I2O_CMD_ADAPTER_CONNECT:
+		printk(KERN_DEBUG "EXEC_IOP_CONNECT, ");
+		break;
+	case I2O_CMD_ADAPTER_RESET:
+		printk(KERN_DEBUG "EXEC_IOP_RESET, ");
+		break;
+	case I2O_CMD_LCT_NOTIFY:
+		printk(KERN_DEBUG "EXEC_LCT_NOTIFY, ");
+		break;
+	case I2O_CMD_OUTBOUND_INIT:
+		printk(KERN_DEBUG "EXEC_OUTBOUND_INIT, ");
+		break;
+	case I2O_CMD_PATH_ENABLE:
+		printk(KERN_DEBUG "EXEC_PATH_ENABLE, ");
+		break;
+	case I2O_CMD_PATH_QUIESCE:
+		printk(KERN_DEBUG "EXEC_PATH_QUIESCE, ");
+		break;
+	case I2O_CMD_PATH_RESET:
+		printk(KERN_DEBUG "EXEC_PATH_RESET, ");
+		break;
+	case I2O_CMD_STATIC_MF_CREATE:
+		printk(KERN_DEBUG "EXEC_STATIC_MF_CREATE, ");
+		break;
+	case I2O_CMD_STATIC_MF_RELEASE:
+		printk(KERN_DEBUG "EXEC_STATIC_MF_RELEASE, ");
+		break;
+	case I2O_CMD_STATUS_GET:
+		printk(KERN_DEBUG "EXEC_STATUS_GET, ");
+		break;
+	case I2O_CMD_SW_DOWNLOAD:
+		printk(KERN_DEBUG "EXEC_SW_DOWNLOAD, ");
+		break;
+	case I2O_CMD_SW_UPLOAD:
+		printk(KERN_DEBUG "EXEC_SW_UPLOAD, ");
+		break;
+	case I2O_CMD_SW_REMOVE:
+		printk(KERN_DEBUG "EXEC_SW_REMOVE, ");
+		break;
+	case I2O_CMD_SYS_ENABLE:
+		printk(KERN_DEBUG "EXEC_SYS_ENABLE, ");
+		break;
+	case I2O_CMD_SYS_MODIFY:
+		printk(KERN_DEBUG "EXEC_SYS_MODIFY, ");
+		break;
+	case I2O_CMD_SYS_QUIESCE:
+		printk(KERN_DEBUG "EXEC_SYS_QUIESCE, ");
+		break;
+	case I2O_CMD_SYS_TAB_SET:
+		printk(KERN_DEBUG "EXEC_SYS_TAB_SET, ");
+		break;
+	default:
+		printk(KERN_DEBUG "Cmd = %#02x, ", cmd);
+	}
+}
+
+void i2o_debug_state(struct i2o_controller *c)
+{
+	printk(KERN_INFO "%s: State = ", c->name);
+	switch (((i2o_status_block *) c->status_block.virt)->iop_state) {
+	case 0x01:
+		printk(KERN_DEBUG "INIT\n");
+		break;
+	case 0x02:
+		printk(KERN_DEBUG "RESET\n");
+		break;
+	case 0x04:
+		printk(KERN_DEBUG "HOLD\n");
+		break;
+	case 0x05:
+		printk(KERN_DEBUG "READY\n");
+		break;
+	case 0x08:
+		printk(KERN_DEBUG "OPERATIONAL\n");
+		break;
+	case 0x10:
+		printk(KERN_DEBUG "FAILED\n");
+		break;
+	case 0x11:
+		printk(KERN_DEBUG "FAULTED\n");
+		break;
+	default:
+		printk(KERN_DEBUG "%x (unknown !!)\n",
+		       ((i2o_status_block *) c->status_block.virt)->iop_state);
+	}
+};
+
+void i2o_dump_hrt(struct i2o_controller *c)
+{
+	u32 *rows = (u32 *) c->hrt.virt;
+	u8 *p = (u8 *) c->hrt.virt;
+	u8 *d;
+	int count;
+	int length;
+	int i;
+	int state;
+
+	if (p[3] != 0) {
+		printk(KERN_ERR
+		       "%s: HRT table for controller is too new a version.\n",
+		       c->name);
+		return;
+	}
+
+	count = p[0] | (p[1] << 8);
+	length = p[2];
+
+	printk(KERN_INFO "%s: HRT has %d entries of %d bytes each.\n",
+	       c->name, count, length << 2);
+
+	rows += 2;
+
+	for (i = 0; i < count; i++) {
+		printk(KERN_INFO "Adapter %08X: ", rows[0]);
+		p = (u8 *) (rows + 1);
+		d = (u8 *) (rows + 2);
+		state = p[1] << 8 | p[0];
+
+		printk(KERN_DEBUG "TID %04X:[", state & 0xFFF);
+		state >>= 12;
+		if (state & (1 << 0))
+			printk(KERN_DEBUG "H");	/* Hidden */
+		if (state & (1 << 2)) {
+			printk(KERN_DEBUG "P");	/* Present */
+			if (state & (1 << 1))
+				printk(KERN_DEBUG "C");	/* Controlled */
+		}
+		if (state > 9)
+			printk(KERN_DEBUG "*");	/* Hard */
+
+		printk(KERN_DEBUG "]:");
+
+		switch (p[3] & 0xFFFF) {
+		case 0:
+			/* Adapter private bus - easy */
+			printk(KERN_DEBUG
+			       "Local bus %d: I/O at 0x%04X Mem 0x%08X", p[2],
+			       d[1] << 8 | d[0], *(u32 *) (d + 4));
+			break;
+		case 1:
+			/* ISA bus */
+			printk(KERN_DEBUG
+			       "ISA %d: CSN %d I/O at 0x%04X Mem 0x%08X", p[2],
+			       d[2], d[1] << 8 | d[0], *(u32 *) (d + 4));
+			break;
+
+		case 2:	/* EISA bus */
+			printk(KERN_DEBUG
+			       "EISA %d: Slot %d I/O at 0x%04X Mem 0x%08X",
+			       p[2], d[3], d[1] << 8 | d[0], *(u32 *) (d + 4));
+			break;
+
+		case 3:	/* MCA bus */
+			printk(KERN_DEBUG
+			       "MCA %d: Slot %d I/O at 0x%04X Mem 0x%08X", p[2],
+			       d[3], d[1] << 8 | d[0], *(u32 *) (d + 4));
+			break;
+
+		case 4:	/* PCI bus */
+			printk(KERN_DEBUG
+			       "PCI %d: Bus %d Device %d Function %d", p[2],
+			       d[2], d[1], d[0]);
+			break;
+
+		case 0x80:	/* Other */
+		default:
+			printk(KERN_DEBUG "Unsupported bus type.");
+			break;
+		}
+		printk(KERN_DEBUG "\n");
+		rows += length;
+	}
+}
+
+EXPORT_SYMBOL(i2o_dump_message);
diff --git a/drivers/message/i2o/device.c b/drivers/message/i2o/device.c
new file mode 100644
index 0000000..eb907e8
--- /dev/null
+++ b/drivers/message/i2o/device.c
@@ -0,0 +1,634 @@
+/*
+ *	Functions to handle I2O devices
+ *
+ *	Copyright (C) 2004	Markus Lidel <Markus.Lidel@shadowconnect.com>
+ *
+ *	This program is free software; you can redistribute it and/or modify it
+ *	under the terms of the GNU General Public License as published by the
+ *	Free Software Foundation; either version 2 of the License, or (at your
+ *	option) any later version.
+ *
+ *	Fixes/additions:
+ *		Markus Lidel <Markus.Lidel@shadowconnect.com>
+ *			initial version.
+ */
+
+#include <linux/module.h>
+#include <linux/i2o.h>
+#include <linux/delay.h>
+
+/* Exec OSM functions */
+extern struct bus_type i2o_bus_type;
+
+/**
+ *	i2o_device_issue_claim - claim or release a device
+ *	@dev: I2O device to claim or release
+ *	@cmd: claim or release command
+ *	@type: type of claim
+ *
+ *	Issue I2O UTIL_CLAIM or UTIL_RELEASE messages. The message to be sent
+ *	is set by cmd. dev is the I2O device which should be claim or
+ *	released and the type is the claim type (see the I2O spec).
+ *
+ *	Returs 0 on success or negative error code on failure.
+ */
+static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd,
+					 u32 type)
+{
+	struct i2o_message __iomem *msg;
+	u32 m;
+
+	m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -ETIMEDOUT;
+
+	writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid, &msg->u.head[1]);
+	writel(type, &msg->body[0]);
+
+	return i2o_msg_post_wait(dev->iop, m, 60);
+};
+
+/**
+ * 	i2o_device_claim - claim a device for use by an OSM
+ *	@dev: I2O device to claim
+ *	@drv: I2O driver which wants to claim the device
+ *
+ *	Do the leg work to assign a device to a given OSM. If the claim succeed
+ *	the owner of the rimary. If the attempt fails a negative errno code
+ *	is returned. On success zero is returned.
+ */
+int i2o_device_claim(struct i2o_device *dev)
+{
+	int rc = 0;
+
+	down(&dev->lock);
+
+	rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_CLAIM, I2O_CLAIM_PRIMARY);
+	if (!rc)
+		pr_debug("i2o: claim of device %d succeded\n",
+			 dev->lct_data.tid);
+	else
+		pr_debug("i2o: claim of device %d failed %d\n",
+			 dev->lct_data.tid, rc);
+
+	up(&dev->lock);
+
+	return rc;
+};
+
+/**
+ *	i2o_device_claim_release - release a device that the OSM is using
+ *	@dev: device to release
+ *	@drv: driver which claimed the device
+ *
+ *	Drop a claim by an OSM on a given I2O device.
+ *
+ *	AC - some devices seem to want to refuse an unclaim until they have
+ *	finished internal processing. It makes sense since you don't want a
+ *	new device to go reconfiguring the entire system until you are done.
+ *	Thus we are prepared to wait briefly.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+int i2o_device_claim_release(struct i2o_device *dev)
+{
+	int tries;
+	int rc = 0;
+
+	down(&dev->lock);
+
+	/*
+	 *      If the controller takes a nonblocking approach to
+	 *      releases we have to sleep/poll for a few times.
+	 */
+	for (tries = 0; tries < 10; tries++) {
+		rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_RELEASE,
+					    I2O_CLAIM_PRIMARY);
+		if (!rc)
+			break;
+
+		ssleep(1);
+	}
+
+	if (!rc)
+		pr_debug("i2o: claim release of device %d succeded\n",
+			 dev->lct_data.tid);
+	else
+		pr_debug("i2o: claim release of device %d failed %d\n",
+			 dev->lct_data.tid, rc);
+
+	up(&dev->lock);
+
+	return rc;
+};
+
+/**
+ *	i2o_device_release - release the memory for a I2O device
+ *	@dev: I2O device which should be released
+ *
+ *	Release the allocated memory. This function is called if refcount of
+ *	device reaches 0 automatically.
+ */
+static void i2o_device_release(struct device *dev)
+{
+	struct i2o_device *i2o_dev = to_i2o_device(dev);
+
+	pr_debug("i2o: device %s released\n", dev->bus_id);
+
+	kfree(i2o_dev);
+};
+
+/**
+ *	i2o_device_class_release - Remove I2O device attributes
+ *	@cd: I2O class device which is added to the I2O device class
+ *
+ *	Removes attributes from the I2O device again. Also search each device
+ *	on the controller for I2O devices which refert to this device as parent
+ *	or user and remove this links also.
+ */
+static void i2o_device_class_release(struct class_device *cd)
+{
+	struct i2o_device *i2o_dev, *tmp;
+	struct i2o_controller *c;
+
+	i2o_dev = to_i2o_device(cd->dev);
+	c = i2o_dev->iop;
+
+	sysfs_remove_link(&i2o_dev->device.kobj, "parent");
+	sysfs_remove_link(&i2o_dev->device.kobj, "user");
+
+	list_for_each_entry(tmp, &c->devices, list) {
+		if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
+			sysfs_remove_link(&tmp->device.kobj, "parent");
+		if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
+			sysfs_remove_link(&tmp->device.kobj, "user");
+	}
+};
+
+/* I2O device class */
+static struct class i2o_device_class = {
+	.name = "i2o_device",
+	.release = i2o_device_class_release
+};
+
+/**
+ *	i2o_device_alloc - Allocate a I2O device and initialize it
+ *
+ *	Allocate the memory for a I2O device and initialize locks and lists
+ *
+ *	Returns the allocated I2O device or a negative error code if the device
+ *	could not be allocated.
+ */
+static struct i2o_device *i2o_device_alloc(void)
+{
+	struct i2o_device *dev;
+
+	dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return ERR_PTR(-ENOMEM);
+
+	memset(dev, 0, sizeof(*dev));
+
+	INIT_LIST_HEAD(&dev->list);
+	init_MUTEX(&dev->lock);
+
+	dev->device.bus = &i2o_bus_type;
+	dev->device.release = &i2o_device_release;
+	dev->classdev.class = &i2o_device_class;
+	dev->classdev.dev = &dev->device;
+
+	return dev;
+};
+
+/**
+ *	i2o_device_add - allocate a new I2O device and add it to the IOP
+ *	@iop: I2O controller where the device is on
+ *	@entry: LCT entry of the I2O device
+ *
+ *	Allocate a new I2O device and initialize it with the LCT entry. The
+ *	device is appended to the device list of the controller.
+ *
+ *	Returns a pointer to the I2O device on success or negative error code
+ *	on failure.
+ */
+static struct i2o_device *i2o_device_add(struct i2o_controller *c,
+					 i2o_lct_entry * entry)
+{
+	struct i2o_device *dev;
+
+	dev = i2o_device_alloc();
+	if (IS_ERR(dev)) {
+		printk(KERN_ERR "i2o: unable to allocate i2o device\n");
+		return dev;
+	}
+
+	dev->lct_data = *entry;
+
+	snprintf(dev->device.bus_id, BUS_ID_SIZE, "%d:%03x", c->unit,
+		 dev->lct_data.tid);
+
+	snprintf(dev->classdev.class_id, BUS_ID_SIZE, "%d:%03x", c->unit,
+		 dev->lct_data.tid);
+
+	dev->iop = c;
+	dev->device.parent = &c->device;
+
+	device_register(&dev->device);
+
+	list_add_tail(&dev->list, &c->devices);
+
+	class_device_register(&dev->classdev);
+
+	i2o_driver_notify_device_add_all(dev);
+
+	pr_debug("i2o: device %s added\n", dev->device.bus_id);
+
+	return dev;
+};
+
+/**
+ *	i2o_device_remove - remove an I2O device from the I2O core
+ *	@dev: I2O device which should be released
+ *
+ *	Is used on I2O controller removal or LCT modification, when the device
+ *	is removed from the system. Note that the device could still hang
+ *	around until the refcount reaches 0.
+ */
+void i2o_device_remove(struct i2o_device *i2o_dev)
+{
+	i2o_driver_notify_device_remove_all(i2o_dev);
+	class_device_unregister(&i2o_dev->classdev);
+	list_del(&i2o_dev->list);
+	device_unregister(&i2o_dev->device);
+};
+
+/**
+ *	i2o_device_parse_lct - Parse a previously fetched LCT and create devices
+ *	@c: I2O controller from which the LCT should be parsed.
+ *
+ *	The Logical Configuration Table tells us what we can talk to on the
+ *	board. For every entry we create an I2O device, which is registered in
+ *	the I2O core.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+int i2o_device_parse_lct(struct i2o_controller *c)
+{
+	struct i2o_device *dev, *tmp;
+	i2o_lct *lct;
+	int i;
+	int max;
+
+	down(&c->lct_lock);
+
+	if (c->lct)
+		kfree(c->lct);
+
+	lct = c->dlct.virt;
+
+	c->lct = kmalloc(lct->table_size * 4, GFP_KERNEL);
+	if (!c->lct) {
+		up(&c->lct_lock);
+		return -ENOMEM;
+	}
+
+	if (lct->table_size * 4 > c->dlct.len) {
+		memcpy_fromio(c->lct, c->dlct.virt, c->dlct.len);
+		up(&c->lct_lock);
+		return -EAGAIN;
+	}
+
+	memcpy_fromio(c->lct, c->dlct.virt, lct->table_size * 4);
+
+	lct = c->lct;
+
+	max = (lct->table_size - 3) / 9;
+
+	pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max,
+		 lct->table_size);
+
+	/* remove devices, which are not in the LCT anymore */
+	list_for_each_entry_safe(dev, tmp, &c->devices, list) {
+		int found = 0;
+
+		for (i = 0; i < max; i++) {
+			if (lct->lct_entry[i].tid == dev->lct_data.tid) {
+				found = 1;
+				break;
+			}
+		}
+
+		if (!found)
+			i2o_device_remove(dev);
+	}
+
+	/* add new devices, which are new in the LCT */
+	for (i = 0; i < max; i++) {
+		int found = 0;
+
+		list_for_each_entry_safe(dev, tmp, &c->devices, list) {
+			if (lct->lct_entry[i].tid == dev->lct_data.tid) {
+				found = 1;
+				break;
+			}
+		}
+
+		if (!found)
+			i2o_device_add(c, &lct->lct_entry[i]);
+	}
+	up(&c->lct_lock);
+
+	return 0;
+};
+
+/**
+ *	i2o_device_class_show_class_id - Displays class id of I2O device
+ *	@cd: class device of which the class id should be displayed
+ *	@buf: buffer into which the class id should be printed
+ *
+ *	Returns the number of bytes which are printed into the buffer.
+ */
+static ssize_t i2o_device_class_show_class_id(struct class_device *cd,
+					      char *buf)
+{
+	struct i2o_device *dev = to_i2o_device(cd->dev);
+
+	sprintf(buf, "%03x\n", dev->lct_data.class_id);
+	return strlen(buf) + 1;
+};
+
+/**
+ *	i2o_device_class_show_tid - Displays TID of I2O device
+ *	@cd: class device of which the TID should be displayed
+ *	@buf: buffer into which the class id should be printed
+ *
+ *	Returns the number of bytes which are printed into the buffer.
+ */
+static ssize_t i2o_device_class_show_tid(struct class_device *cd, char *buf)
+{
+	struct i2o_device *dev = to_i2o_device(cd->dev);
+
+	sprintf(buf, "%03x\n", dev->lct_data.tid);
+	return strlen(buf) + 1;
+};
+
+/* I2O device class attributes */
+static CLASS_DEVICE_ATTR(class_id, S_IRUGO, i2o_device_class_show_class_id,
+			 NULL);
+static CLASS_DEVICE_ATTR(tid, S_IRUGO, i2o_device_class_show_tid, NULL);
+
+/**
+ *	i2o_device_class_add - Adds attributes to the I2O device
+ *	@cd: I2O class device which is added to the I2O device class
+ *
+ *	This function get called when a I2O device is added to the class. It
+ *	creates the attributes for each device and creates user/parent symlink
+ *	if necessary.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_device_class_add(struct class_device *cd)
+{
+	struct i2o_device *i2o_dev, *tmp;
+	struct i2o_controller *c;
+
+	i2o_dev = to_i2o_device(cd->dev);
+	c = i2o_dev->iop;
+
+	class_device_create_file(cd, &class_device_attr_class_id);
+	class_device_create_file(cd, &class_device_attr_tid);
+
+	/* create user entries for this device */
+	tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
+	if (tmp)
+		sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
+				  "user");
+
+	/* create user entries refering to this device */
+	list_for_each_entry(tmp, &c->devices, list)
+	    if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
+		sysfs_create_link(&tmp->device.kobj,
+				  &i2o_dev->device.kobj, "user");
+
+	/* create parent entries for this device */
+	tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
+	if (tmp)
+		sysfs_create_link(&i2o_dev->device.kobj, &tmp->device.kobj,
+				  "parent");
+
+	/* create parent entries refering to this device */
+	list_for_each_entry(tmp, &c->devices, list)
+	    if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
+		sysfs_create_link(&tmp->device.kobj,
+				  &i2o_dev->device.kobj, "parent");
+
+	return 0;
+};
+
+/* I2O device class interface */
+static struct class_interface i2o_device_class_interface = {
+	.class = &i2o_device_class,
+	.add = i2o_device_class_add
+};
+
+/*
+ *	Run time support routines
+ */
+
+/*	Issue UTIL_PARAMS_GET or UTIL_PARAMS_SET
+ *
+ *	This function can be used for all UtilParamsGet/Set operations.
+ *	The OperationList is given in oplist-buffer,
+ *	and results are returned in reslist-buffer.
+ *	Note that the minimum sized reslist is 8 bytes and contains
+ *	ResultCount, ErrorInfoSize, BlockStatus and BlockSize.
+ */
+
+int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
+		   int oplen, void *reslist, int reslen)
+{
+	struct i2o_message __iomem *msg;
+	u32 m;
+	u32 *res32 = (u32 *) reslist;
+	u32 *restmp = (u32 *) reslist;
+	int len = 0;
+	int i = 0;
+	int rc;
+	struct i2o_dma res;
+	struct i2o_controller *c = i2o_dev->iop;
+	struct device *dev = &c->pdev->dev;
+
+	res.virt = NULL;
+
+	if (i2o_dma_alloc(dev, &res, reslen, GFP_KERNEL))
+		return -ENOMEM;
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY) {
+		i2o_dma_free(dev, &res);
+		return -ETIMEDOUT;
+	}
+
+	i = 0;
+	writel(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid,
+	       &msg->u.head[1]);
+	writel(0, &msg->body[i++]);
+	writel(0x4C000000 | oplen, &msg->body[i++]);	/* OperationList */
+	memcpy_toio(&msg->body[i], oplist, oplen);
+	i += (oplen / 4 + (oplen % 4 ? 1 : 0));
+	writel(0xD0000000 | res.len, &msg->body[i++]);	/* ResultList */
+	writel(res.phys, &msg->body[i++]);
+
+	writel(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) |
+	       SGL_OFFSET_5, &msg->u.head[0]);
+
+	rc = i2o_msg_post_wait_mem(c, m, 10, &res);
+
+	/* This only looks like a memory leak - don't "fix" it. */
+	if (rc == -ETIMEDOUT)
+		return rc;
+
+	memcpy_fromio(reslist, res.virt, res.len);
+	i2o_dma_free(dev, &res);
+
+	/* Query failed */
+	if (rc)
+		return rc;
+	/*
+	 * Calculate number of bytes of Result LIST
+	 * We need to loop through each Result BLOCK and grab the length
+	 */
+	restmp = res32 + 1;
+	len = 1;
+	for (i = 0; i < (res32[0] & 0X0000FFFF); i++) {
+		if (restmp[0] & 0x00FF0000) {	/* BlockStatus != SUCCESS */
+			printk(KERN_WARNING
+			       "%s - Error:\n  ErrorInfoSize = 0x%02x, "
+			       "BlockStatus = 0x%02x, BlockSize = 0x%04x\n",
+			       (cmd ==
+				I2O_CMD_UTIL_PARAMS_SET) ? "PARAMS_SET" :
+			       "PARAMS_GET", res32[1] >> 24,
+			       (res32[1] >> 16) & 0xFF, res32[1] & 0xFFFF);
+
+			/*
+			 *      If this is the only request,than we return an error
+			 */
+			if ((res32[0] & 0x0000FFFF) == 1) {
+				return -((res32[1] >> 16) & 0xFF);	/* -BlockStatus */
+			}
+		}
+		len += restmp[0] & 0x0000FFFF;	/* Length of res BLOCK */
+		restmp += restmp[0] & 0x0000FFFF;	/* Skip to next BLOCK */
+	}
+	return (len << 2);	/* bytes used by result list */
+}
+
+/*
+ *	 Query one field group value or a whole scalar group.
+ */
+int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
+		       void *buf, int buflen)
+{
+	u16 opblk[] = { 1, 0, I2O_PARAMS_FIELD_GET, group, 1, field };
+	u8 resblk[8 + buflen];	/* 8 bytes for header */
+	int size;
+
+	if (field == -1)	/* whole group */
+		opblk[4] = -1;
+
+	size = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
+			      sizeof(opblk), resblk, sizeof(resblk));
+
+	memcpy(buf, resblk + 8, buflen);	/* cut off header */
+
+	if (size > buflen)
+		return buflen;
+
+	return size;
+}
+
+/*
+ * 	if oper == I2O_PARAMS_TABLE_GET, get from all rows
+ * 		if fieldcount == -1 return all fields
+ *			ibuf and ibuflen are unused (use NULL, 0)
+ * 		else return specific fields
+ *  			ibuf contains fieldindexes
+ *
+ * 	if oper == I2O_PARAMS_LIST_GET, get from specific rows
+ * 		if fieldcount == -1 return all fields
+ *			ibuf contains rowcount, keyvalues
+ * 		else return specific fields
+ *			fieldcount is # of fieldindexes
+ *  			ibuf contains fieldindexes, rowcount, keyvalues
+ *
+ *	You could also use directly function i2o_issue_params().
+ */
+int i2o_parm_table_get(struct i2o_device *dev, int oper, int group,
+		       int fieldcount, void *ibuf, int ibuflen, void *resblk,
+		       int reslen)
+{
+	u16 *opblk;
+	int size;
+
+	size = 10 + ibuflen;
+	if (size % 4)
+		size += 4 - size % 4;
+
+	opblk = kmalloc(size, GFP_KERNEL);
+	if (opblk == NULL) {
+		printk(KERN_ERR "i2o: no memory for query buffer.\n");
+		return -ENOMEM;
+	}
+
+	opblk[0] = 1;		/* operation count */
+	opblk[1] = 0;		/* pad */
+	opblk[2] = oper;
+	opblk[3] = group;
+	opblk[4] = fieldcount;
+	memcpy(opblk + 5, ibuf, ibuflen);	/* other params */
+
+	size = i2o_parm_issue(dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
+			      size, resblk, reslen);
+
+	kfree(opblk);
+	if (size > reslen)
+		return reslen;
+
+	return size;
+}
+
+/**
+ *	i2o_device_init - Initialize I2O devices
+ *
+ *	Registers the I2O device class.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+int i2o_device_init(void)
+{
+	int rc;
+
+	rc = class_register(&i2o_device_class);
+	if (rc)
+		return rc;
+
+	return class_interface_register(&i2o_device_class_interface);
+};
+
+/**
+ *	i2o_device_exit - I2O devices exit function
+ *
+ *	Unregisters the I2O device class.
+ */
+void i2o_device_exit(void)
+{
+	class_interface_register(&i2o_device_class_interface);
+	class_unregister(&i2o_device_class);
+};
+
+EXPORT_SYMBOL(i2o_device_claim);
+EXPORT_SYMBOL(i2o_device_claim_release);
+EXPORT_SYMBOL(i2o_parm_field_get);
+EXPORT_SYMBOL(i2o_parm_table_get);
+EXPORT_SYMBOL(i2o_parm_issue);
diff --git a/drivers/message/i2o/driver.c b/drivers/message/i2o/driver.c
new file mode 100644
index 0000000..91f4edb
--- /dev/null
+++ b/drivers/message/i2o/driver.c
@@ -0,0 +1,374 @@
+/*
+ *	Functions to handle I2O drivers (OSMs) and I2O bus type for sysfs
+ *
+ *	Copyright (C) 2004	Markus Lidel <Markus.Lidel@shadowconnect.com>
+ *
+ *	This program is free software; you can redistribute it and/or modify it
+ *	under the terms of the GNU General Public License as published by the
+ *	Free Software Foundation; either version 2 of the License, or (at your
+ *	option) any later version.
+ *
+ *	Fixes/additions:
+ *		Markus Lidel <Markus.Lidel@shadowconnect.com>
+ *			initial version.
+ */
+
+#include <linux/device.h>
+#include <linux/module.h>
+#include <linux/rwsem.h>
+#include <linux/i2o.h>
+
+/* max_drivers - Maximum I2O drivers (OSMs) which could be registered */
+unsigned int i2o_max_drivers = I2O_MAX_DRIVERS;
+module_param_named(max_drivers, i2o_max_drivers, uint, 0);
+MODULE_PARM_DESC(max_drivers, "maximum number of OSM's to support");
+
+/* I2O drivers lock and array */
+static spinlock_t i2o_drivers_lock;
+static struct i2o_driver **i2o_drivers;
+
+/**
+ *	i2o_bus_match - Tell if a I2O device class id match the class ids of
+ *			the I2O driver (OSM)
+ *
+ *	@dev: device which should be verified
+ *	@drv: the driver to match against
+ *
+ *	Used by the bus to check if the driver wants to handle the device.
+ *
+ *	Returns 1 if the class ids of the driver match the class id of the
+ *	device, otherwise 0.
+ */
+static int i2o_bus_match(struct device *dev, struct device_driver *drv)
+{
+	struct i2o_device *i2o_dev = to_i2o_device(dev);
+	struct i2o_driver *i2o_drv = to_i2o_driver(drv);
+	struct i2o_class_id *ids = i2o_drv->classes;
+
+	if (ids)
+		while (ids->class_id != I2O_CLASS_END) {
+			if (ids->class_id == i2o_dev->lct_data.class_id)
+				return 1;
+			ids++;
+		}
+	return 0;
+};
+
+/* I2O bus type */
+struct bus_type i2o_bus_type = {
+	.name = "i2o",
+	.match = i2o_bus_match,
+};
+
+/**
+ *	i2o_driver_register - Register a I2O driver (OSM) in the I2O core
+ *	@drv: I2O driver which should be registered
+ *
+ *	Registers the OSM drv in the I2O core and creates an event queues if
+ *	necessary.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+int i2o_driver_register(struct i2o_driver *drv)
+{
+	struct i2o_controller *c;
+	int i;
+	int rc = 0;
+	unsigned long flags;
+
+	pr_debug("i2o: Register driver %s\n", drv->name);
+
+	if (drv->event) {
+		drv->event_queue = create_workqueue(drv->name);
+		if (!drv->event_queue) {
+			printk(KERN_ERR "i2o: Could not initialize event queue "
+			       "for driver %s\n", drv->name);
+			return -EFAULT;
+		}
+		pr_debug("i2o: Event queue initialized for driver %s\n",
+			 drv->name);
+	} else
+		drv->event_queue = NULL;
+
+	drv->driver.name = drv->name;
+	drv->driver.bus = &i2o_bus_type;
+
+	spin_lock_irqsave(&i2o_drivers_lock, flags);
+
+	for (i = 0; i2o_drivers[i]; i++)
+		if (i >= i2o_max_drivers) {
+			printk(KERN_ERR "i2o: too many drivers registered, "
+			       "increase max_drivers\n");
+			spin_unlock_irqrestore(&i2o_drivers_lock, flags);
+			return -EFAULT;
+		}
+
+	drv->context = i;
+	i2o_drivers[i] = drv;
+
+	spin_unlock_irqrestore(&i2o_drivers_lock, flags);
+
+	pr_debug("i2o: driver %s gets context id %d\n", drv->name,
+		 drv->context);
+
+	list_for_each_entry(c, &i2o_controllers, list) {
+		struct i2o_device *i2o_dev;
+
+		i2o_driver_notify_controller_add(drv, c);
+		list_for_each_entry(i2o_dev, &c->devices, list)
+			i2o_driver_notify_device_add(drv, i2o_dev);
+	}
+
+
+	rc = driver_register(&drv->driver);
+	if (rc)
+		destroy_workqueue(drv->event_queue);
+
+	return rc;
+};
+
+/**
+ *	i2o_driver_unregister - Unregister a I2O driver (OSM) from the I2O core
+ *	@drv: I2O driver which should be unregistered
+ *
+ *	Unregisters the OSM drv from the I2O core and cleanup event queues if
+ *	necessary.
+ */
+void i2o_driver_unregister(struct i2o_driver *drv)
+{
+	struct i2o_controller *c;
+	unsigned long flags;
+
+	pr_debug("i2o: unregister driver %s\n", drv->name);
+
+	driver_unregister(&drv->driver);
+
+	list_for_each_entry(c, &i2o_controllers, list) {
+		struct i2o_device *i2o_dev;
+
+		list_for_each_entry(i2o_dev, &c->devices, list)
+		    i2o_driver_notify_device_remove(drv, i2o_dev);
+
+		i2o_driver_notify_controller_remove(drv, c);
+	}
+
+	spin_lock_irqsave(&i2o_drivers_lock, flags);
+	i2o_drivers[drv->context] = NULL;
+	spin_unlock_irqrestore(&i2o_drivers_lock, flags);
+
+	if (drv->event_queue) {
+		destroy_workqueue(drv->event_queue);
+		drv->event_queue = NULL;
+		pr_debug("i2o: event queue removed for %s\n", drv->name);
+	}
+};
+
+/**
+ *	i2o_driver_dispatch - dispatch an I2O reply message
+ *	@c: I2O controller of the message
+ *	@m: I2O message number
+ *	@msg: I2O message to be delivered
+ *
+ *	The reply is delivered to the driver from which the original message
+ *	was. This function is only called from interrupt context.
+ *
+ *	Returns 0 on success and the message should not be flushed. Returns > 0
+ *	on success and if the message should be flushed afterwords. Returns
+ *	negative error code on failure (the message will be flushed too).
+ */
+int i2o_driver_dispatch(struct i2o_controller *c, u32 m,
+			struct i2o_message __iomem *msg)
+{
+	struct i2o_driver *drv;
+	u32 context = readl(&msg->u.s.icntxt);
+
+	if (likely(context < i2o_max_drivers)) {
+		spin_lock(&i2o_drivers_lock);
+		drv = i2o_drivers[context];
+		spin_unlock(&i2o_drivers_lock);
+
+		if (unlikely(!drv)) {
+			printk(KERN_WARNING "%s: Spurious reply to unknown "
+			       "driver %d\n", c->name, context);
+			return -EIO;
+		}
+
+		if ((readl(&msg->u.head[1]) >> 24) == I2O_CMD_UTIL_EVT_REGISTER) {
+			struct i2o_device *dev, *tmp;
+			struct i2o_event *evt;
+			u16 size;
+			u16 tid;
+
+			tid = readl(&msg->u.head[1]) & 0x1fff;
+
+			pr_debug("%s: event received from device %d\n", c->name,
+				 tid);
+
+			/* cut of header from message size (in 32-bit words) */
+			size = (readl(&msg->u.head[0]) >> 16) - 5;
+
+			evt = kmalloc(size * 4 + sizeof(*evt), GFP_ATOMIC);
+			if (!evt)
+				return -ENOMEM;
+			memset(evt, 0, size * 4 + sizeof(*evt));
+
+			evt->size = size;
+			memcpy_fromio(&evt->tcntxt, &msg->u.s.tcntxt,
+				      (size + 2) * 4);
+
+			list_for_each_entry_safe(dev, tmp, &c->devices, list)
+			    if (dev->lct_data.tid == tid) {
+				evt->i2o_dev = dev;
+				break;
+			}
+
+			INIT_WORK(&evt->work, (void (*)(void *))drv->event,
+				  evt);
+			queue_work(drv->event_queue, &evt->work);
+			return 1;
+		}
+
+		if (likely(drv->reply))
+			return drv->reply(c, m, msg);
+		else
+			pr_debug("%s: Reply to driver %s, but no reply function"
+				 " defined!\n", c->name, drv->name);
+		return -EIO;
+	} else
+		printk(KERN_WARNING "%s: Spurious reply to unknown driver "
+		       "%d\n", c->name, readl(&msg->u.s.icntxt));
+	return -EIO;
+}
+
+/**
+ *	i2o_driver_notify_controller_add_all - Send notify of added controller
+ *					       to all I2O drivers
+ *
+ *	Send notifications to all registered drivers that a new controller was
+ *	added.
+ */
+void i2o_driver_notify_controller_add_all(struct i2o_controller *c)
+{
+	int i;
+	struct i2o_driver *drv;
+
+	for (i = 0; i < I2O_MAX_DRIVERS; i++) {
+		drv = i2o_drivers[i];
+
+		if (drv)
+			i2o_driver_notify_controller_add(drv, c);
+	}
+}
+
+/**
+ *	i2o_driver_notify_controller_remove_all - Send notify of removed
+ *						  controller to all I2O drivers
+ *
+ *	Send notifications to all registered drivers that a controller was
+ *	removed.
+ */
+void i2o_driver_notify_controller_remove_all(struct i2o_controller *c)
+{
+	int i;
+	struct i2o_driver *drv;
+
+	for (i = 0; i < I2O_MAX_DRIVERS; i++) {
+		drv = i2o_drivers[i];
+
+		if (drv)
+			i2o_driver_notify_controller_remove(drv, c);
+	}
+}
+
+/**
+ *	i2o_driver_notify_device_add_all - Send notify of added device to all
+ *					   I2O drivers
+ *
+ *	Send notifications to all registered drivers that a device was added.
+ */
+void i2o_driver_notify_device_add_all(struct i2o_device *i2o_dev)
+{
+	int i;
+	struct i2o_driver *drv;
+
+	for (i = 0; i < I2O_MAX_DRIVERS; i++) {
+		drv = i2o_drivers[i];
+
+		if (drv)
+			i2o_driver_notify_device_add(drv, i2o_dev);
+	}
+}
+
+/**
+ *	i2o_driver_notify_device_remove_all - Send notify of removed device to
+ *					      all I2O drivers
+ *
+ *	Send notifications to all registered drivers that a device was removed.
+ */
+void i2o_driver_notify_device_remove_all(struct i2o_device *i2o_dev)
+{
+	int i;
+	struct i2o_driver *drv;
+
+	for (i = 0; i < I2O_MAX_DRIVERS; i++) {
+		drv = i2o_drivers[i];
+
+		if (drv)
+			i2o_driver_notify_device_remove(drv, i2o_dev);
+	}
+}
+
+/**
+ *	i2o_driver_init - initialize I2O drivers (OSMs)
+ *
+ *	Registers the I2O bus and allocate memory for the array of OSMs.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+int __init i2o_driver_init(void)
+{
+	int rc = 0;
+
+	spin_lock_init(&i2o_drivers_lock);
+
+	if ((i2o_max_drivers < 2) || (i2o_max_drivers > 64) ||
+	    ((i2o_max_drivers ^ (i2o_max_drivers - 1)) !=
+	     (2 * i2o_max_drivers - 1))) {
+		printk(KERN_WARNING "i2o: max_drivers set to %d, but must be "
+		       ">=2 and <= 64 and a power of 2\n", i2o_max_drivers);
+		i2o_max_drivers = I2O_MAX_DRIVERS;
+	}
+	printk(KERN_INFO "i2o: max drivers = %d\n", i2o_max_drivers);
+
+	i2o_drivers =
+	    kmalloc(i2o_max_drivers * sizeof(*i2o_drivers), GFP_KERNEL);
+	if (!i2o_drivers)
+		return -ENOMEM;
+
+	memset(i2o_drivers, 0, i2o_max_drivers * sizeof(*i2o_drivers));
+
+	rc = bus_register(&i2o_bus_type);
+
+	if (rc < 0)
+		kfree(i2o_drivers);
+
+	return rc;
+};
+
+/**
+ *	i2o_driver_exit - clean up I2O drivers (OSMs)
+ *
+ *	Unregisters the I2O bus and free driver array.
+ */
+void __exit i2o_driver_exit(void)
+{
+	bus_unregister(&i2o_bus_type);
+	kfree(i2o_drivers);
+};
+
+EXPORT_SYMBOL(i2o_driver_register);
+EXPORT_SYMBOL(i2o_driver_unregister);
+EXPORT_SYMBOL(i2o_driver_notify_controller_add_all);
+EXPORT_SYMBOL(i2o_driver_notify_controller_remove_all);
+EXPORT_SYMBOL(i2o_driver_notify_device_add_all);
+EXPORT_SYMBOL(i2o_driver_notify_device_remove_all);
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
new file mode 100644
index 0000000..79c1cbf
--- /dev/null
+++ b/drivers/message/i2o/exec-osm.c
@@ -0,0 +1,507 @@
+/*
+ *	Executive OSM
+ *
+ * 	Copyright (C) 1999-2002	Red Hat Software
+ *
+ *	Written by Alan Cox, Building Number Three Ltd
+ *
+ *	This program is free software; you can redistribute it and/or modify it
+ *	under the terms of the GNU General Public License as published by the
+ *	Free Software Foundation; either version 2 of the License, or (at your
+ *	option) any later version.
+ *
+ *	A lot of the I2O message side code from this is taken from the Red
+ *	Creek RCPCI45 adapter driver by Red Creek Communications
+ *
+ *	Fixes/additions:
+ *		Philipp Rumpf
+ *		Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
+ *		Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
+ *		Deepak Saxena <deepak@plexity.net>
+ *		Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
+ *		Alan Cox <alan@redhat.com>:
+ *			Ported to Linux 2.5.
+ *		Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ *			Minor fixes for 2.6.
+ *		Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ *			Support for sysfs included.
+ */
+
+#include <linux/module.h>
+#include <linux/i2o.h>
+#include <linux/delay.h>
+
+#define OSM_NAME "exec-osm"
+
+struct i2o_driver i2o_exec_driver;
+
+static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind);
+
+/* Module internal functions from other sources */
+extern int i2o_device_parse_lct(struct i2o_controller *);
+
+/* global wait list for POST WAIT */
+static LIST_HEAD(i2o_exec_wait_list);
+
+/* Wait struct needed for POST WAIT */
+struct i2o_exec_wait {
+	wait_queue_head_t *wq;	/* Pointer to Wait queue */
+	struct i2o_dma dma;	/* DMA buffers to free on failure */
+	u32 tcntxt;		/* transaction context from reply */
+	int complete;		/* 1 if reply received otherwise 0 */
+	u32 m;			/* message id */
+	struct i2o_message __iomem *msg;	/* pointer to the reply message */
+	struct list_head list;	/* node in global wait list */
+};
+
+/* Exec OSM class handling definition */
+static struct i2o_class_id i2o_exec_class_id[] = {
+	{I2O_CLASS_EXECUTIVE},
+	{I2O_CLASS_END}
+};
+
+/**
+ *	i2o_exec_wait_alloc - Allocate a i2o_exec_wait struct an initialize it
+ *
+ *	Allocate the i2o_exec_wait struct and initialize the wait.
+ *
+ *	Returns i2o_exec_wait pointer on success or negative error code on
+ *	failure.
+ */
+static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
+{
+	struct i2o_exec_wait *wait;
+
+	wait = kmalloc(sizeof(*wait), GFP_KERNEL);
+	if (!wait)
+		return ERR_PTR(-ENOMEM);
+
+	memset(wait, 0, sizeof(*wait));
+
+	INIT_LIST_HEAD(&wait->list);
+
+	return wait;
+};
+
+/**
+ *	i2o_exec_wait_free - Free a i2o_exec_wait struct
+ *	@i2o_exec_wait: I2O wait data which should be cleaned up
+ */
+static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
+{
+	kfree(wait);
+};
+
+/**
+ * 	i2o_msg_post_wait_mem - Post and wait a message with DMA buffers
+ *	@c: controller
+ *	@m: message to post
+ *	@timeout: time in seconds to wait
+ *	@dma: i2o_dma struct of the DMA buffer to free on failure
+ *
+ * 	This API allows an OSM to post a message and then be told whether or
+ *	not the system received a successful reply. If the message times out
+ *	then the value '-ETIMEDOUT' is returned. This is a special case. In
+ *	this situation the message may (should) complete at an indefinite time
+ *	in the future. When it completes it will use the memory buffer
+ *	attached to the request. If -ETIMEDOUT is returned then the memory
+ *	buffer must not be freed. Instead the event completion will free them
+ *	for you. In all other cases the buffer are your problem.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+int i2o_msg_post_wait_mem(struct i2o_controller *c, u32 m, unsigned long
+			  timeout, struct i2o_dma *dma)
+{
+	DECLARE_WAIT_QUEUE_HEAD(wq);
+	struct i2o_exec_wait *wait;
+	static u32 tcntxt = 0x80000000;
+	struct i2o_message __iomem *msg = c->in_queue.virt + m;
+	int rc = 0;
+
+	wait = i2o_exec_wait_alloc();
+	if (!wait)
+		return -ENOMEM;
+
+	if (tcntxt == 0xffffffff)
+		tcntxt = 0x80000000;
+
+	if (dma)
+		wait->dma = *dma;
+
+	/*
+	 * Fill in the message initiator context and transaction context.
+	 * We will only use transaction contexts >= 0x80000000 for POST WAIT,
+	 * so we could find a POST WAIT reply easier in the reply handler.
+	 */
+	writel(i2o_exec_driver.context, &msg->u.s.icntxt);
+	wait->tcntxt = tcntxt++;
+	writel(wait->tcntxt, &msg->u.s.tcntxt);
+
+	/*
+	 * Post the message to the controller. At some point later it will
+	 * return. If we time out before it returns then complete will be zero.
+	 */
+	i2o_msg_post(c, m);
+
+	if (!wait->complete) {
+		wait->wq = &wq;
+		/*
+		 * we add elements add the head, because if a entry in the list
+		 * will never be removed, we have to iterate over it every time
+		 */
+		list_add(&wait->list, &i2o_exec_wait_list);
+
+		wait_event_interruptible_timeout(wq, wait->complete,
+			timeout * HZ);
+
+		wait->wq = NULL;
+	}
+
+	barrier();
+
+	if (wait->complete) {
+		if (readl(&wait->msg->body[0]) >> 24)
+			rc = readl(&wait->msg->body[0]) & 0xff;
+		i2o_flush_reply(c, wait->m);
+		i2o_exec_wait_free(wait);
+	} else {
+		/*
+		 * We cannot remove it now. This is important. When it does
+		 * terminate (which it must do if the controller has not
+		 * died...) then it will otherwise scribble on stuff.
+		 *
+		 * FIXME: try abort message
+		 */
+		if (dma)
+			dma->virt = NULL;
+
+		rc = -ETIMEDOUT;
+	}
+
+	return rc;
+};
+
+/**
+ *	i2o_msg_post_wait_complete - Reply to a i2o_msg_post request from IOP
+ *	@c: I2O controller which answers
+ *	@m: message id
+ *	@msg: pointer to the I2O reply message
+ *
+ *	This function is called in interrupt context only. If the reply reached
+ *	before the timeout, the i2o_exec_wait struct is filled with the message
+ *	and the task will be waked up. The task is now responsible for returning
+ *	the message m back to the controller! If the message reaches us after
+ *	the timeout clean up the i2o_exec_wait struct (including allocated
+ *	DMA buffer).
+ *
+ *	Return 0 on success and if the message m should not be given back to the
+ *	I2O controller, or >0 on success and if the message should be given back
+ *	afterwords. Returns negative error code on failure. In this case the
+ *	message must also be given back to the controller.
+ */
+static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
+				      struct i2o_message __iomem *msg)
+{
+	struct i2o_exec_wait *wait, *tmp;
+	static spinlock_t lock;
+	int rc = 1;
+	u32 context;
+
+	spin_lock_init(&lock);
+
+	context = readl(&msg->u.s.tcntxt);
+
+	/*
+	 * We need to search through the i2o_exec_wait_list to see if the given
+	 * message is still outstanding. If not, it means that the IOP took
+	 * longer to respond to the message than we had allowed and timer has
+	 * already expired. Not much we can do about that except log it for
+	 * debug purposes, increase timeout, and recompile.
+	 */
+	spin_lock(&lock);
+	list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) {
+		if (wait->tcntxt == context) {
+			list_del(&wait->list);
+
+			wait->m = m;
+			wait->msg = msg;
+			wait->complete = 1;
+
+			barrier();
+
+			if (wait->wq) {
+				wake_up_interruptible(wait->wq);
+				rc = 0;
+			} else {
+				struct device *dev;
+
+				dev = &c->pdev->dev;
+
+				pr_debug("%s: timedout reply received!\n",
+					 c->name);
+				i2o_dma_free(dev, &wait->dma);
+				i2o_exec_wait_free(wait);
+				rc = -1;
+			}
+
+			spin_unlock(&lock);
+
+			return rc;
+		}
+	}
+
+	spin_unlock(&lock);
+
+	pr_debug("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name,
+		 context);
+
+	return -1;
+};
+
+/**
+ *	i2o_exec_probe - Called if a new I2O device (executive class) appears
+ *	@dev: I2O device which should be probed
+ *
+ *	Registers event notification for every event from Executive device. The
+ *	return is always 0, because we want all devices of class Executive.
+ *
+ *	Returns 0 on success.
+ */
+static int i2o_exec_probe(struct device *dev)
+{
+	struct i2o_device *i2o_dev = to_i2o_device(dev);
+
+	i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff);
+
+	i2o_dev->iop->exec = i2o_dev;
+
+	return 0;
+};
+
+/**
+ *	i2o_exec_remove - Called on I2O device removal
+ *	@dev: I2O device which was removed
+ *
+ *	Unregisters event notification from Executive I2O device.
+ *
+ *	Returns 0 on success.
+ */
+static int i2o_exec_remove(struct device *dev)
+{
+	i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0);
+
+	return 0;
+};
+
+/**
+ *	i2o_exec_lct_modified - Called on LCT NOTIFY reply
+ *	@c: I2O controller on which the LCT has modified
+ *
+ *	This function handles asynchronus LCT NOTIFY replies. It parses the
+ *	new LCT and if the buffer for the LCT was to small sends a LCT NOTIFY
+ *	again.
+ */
+static void i2o_exec_lct_modified(struct i2o_controller *c)
+{
+	if (i2o_device_parse_lct(c) == -EAGAIN)
+		i2o_exec_lct_notify(c, 0);
+};
+
+/**
+ *	i2o_exec_reply -  I2O Executive reply handler
+ *	@c: I2O controller from which the reply comes
+ *	@m: message id
+ *	@msg: pointer to the I2O reply message
+ *
+ *	This function is always called from interrupt context. If a POST WAIT
+ *	reply was received, pass it to the complete function. If a LCT NOTIFY
+ *	reply was received, a new event is created to handle the update.
+ *
+ *	Returns 0 on success and if the reply should not be flushed or > 0
+ *	on success and if the reply should be flushed. Returns negative error
+ *	code on failure and if the reply should be flushed.
+ */
+static int i2o_exec_reply(struct i2o_controller *c, u32 m,
+			  struct i2o_message *msg)
+{
+	if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) {	// Fail bit is set
+		struct i2o_message __iomem *pmsg;	/* preserved message */
+		u32 pm;
+
+		pm = le32_to_cpu(msg->body[3]);
+
+		pmsg = i2o_msg_in_to_virt(c, pm);
+
+		i2o_report_status(KERN_INFO, "i2o_core", msg);
+
+		/* Release the preserved msg by resubmitting it as a NOP */
+		i2o_msg_nop(c, pm);
+
+		/* If reply to i2o_post_wait failed, return causes a timeout */
+		return -1;
+	}
+
+	if (le32_to_cpu(msg->u.s.tcntxt) & 0x80000000)
+		return i2o_msg_post_wait_complete(c, m, msg);
+
+	if ((le32_to_cpu(msg->u.head[1]) >> 24) == I2O_CMD_LCT_NOTIFY) {
+		struct work_struct *work;
+
+		pr_debug("%s: LCT notify received\n", c->name);
+
+		work = kmalloc(sizeof(*work), GFP_ATOMIC);
+		if (!work)
+			return -ENOMEM;
+
+		INIT_WORK(work, (void (*)(void *))i2o_exec_lct_modified, c);
+		queue_work(i2o_exec_driver.event_queue, work);
+		return 1;
+	}
+
+	/*
+	 * If this happens, we want to dump the message to the syslog so
+	 * it can be sent back to the card manufacturer by the end user
+	 * to aid in debugging.
+	 *
+	 */
+	printk(KERN_WARNING "%s: Unsolicited message reply sent to core!"
+	       "Message dumped to syslog\n", c->name);
+	i2o_dump_message(msg);
+
+	return -EFAULT;
+}
+
+/**
+ *	i2o_exec_event - Event handling function
+ *	@evt: Event which occurs
+ *
+ *	Handles events send by the Executive device. At the moment does not do
+ *	anything useful.
+ */
+static void i2o_exec_event(struct i2o_event *evt)
+{
+	osm_info("Event received from device: %d\n",
+		 evt->i2o_dev->lct_data.tid);
+	kfree(evt);
+};
+
+/**
+ *	i2o_exec_lct_get - Get the IOP's Logical Configuration Table
+ *	@c: I2O controller from which the LCT should be fetched
+ *
+ *	Send a LCT NOTIFY request to the controller, and wait
+ *	I2O_TIMEOUT_LCT_GET seconds until arrival of response. If the LCT is
+ *	to large, retry it.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+int i2o_exec_lct_get(struct i2o_controller *c)
+{
+	struct i2o_message __iomem *msg;
+	u32 m;
+	int i = 0;
+	int rc = -EAGAIN;
+
+	for (i = 1; i <= I2O_LCT_GET_TRIES; i++) {
+		m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+		if (m == I2O_QUEUE_EMPTY)
+			return -ETIMEDOUT;
+
+		writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]);
+		writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID,
+		       &msg->u.head[1]);
+		writel(0xffffffff, &msg->body[0]);
+		writel(0x00000000, &msg->body[1]);
+		writel(0xd0000000 | c->dlct.len, &msg->body[2]);
+		writel(c->dlct.phys, &msg->body[3]);
+
+		rc = i2o_msg_post_wait(c, m, I2O_TIMEOUT_LCT_GET);
+		if (rc < 0)
+			break;
+
+		rc = i2o_device_parse_lct(c);
+		if (rc != -EAGAIN)
+			break;
+	}
+
+	return rc;
+}
+
+/**
+ *	i2o_exec_lct_notify - Send a asynchronus LCT NOTIFY request
+ *	@c: I2O controller to which the request should be send
+ *	@change_ind: change indicator
+ *
+ *	This function sends a LCT NOTIFY request to the I2O controller with
+ *	the change indicator change_ind. If the change_ind == 0 the controller
+ *	replies immediately after the request. If change_ind > 0 the reply is
+ *	send after change indicator of the LCT is > change_ind.
+ */
+static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
+{
+	i2o_status_block *sb = c->status_block.virt;
+	struct device *dev;
+	struct i2o_message __iomem *msg;
+	u32 m;
+
+	dev = &c->pdev->dev;
+
+	if (i2o_dma_realloc(dev, &c->dlct, sb->expected_lct_size, GFP_KERNEL))
+		return -ENOMEM;
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -ETIMEDOUT;
+
+	writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6, &msg->u.head[0]);
+	writel(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 | ADAPTER_TID,
+	       &msg->u.head[1]);
+	writel(i2o_exec_driver.context, &msg->u.s.icntxt);
+	writel(0, &msg->u.s.tcntxt);	/* FIXME */
+	writel(0xffffffff, &msg->body[0]);
+	writel(change_ind, &msg->body[1]);
+	writel(0xd0000000 | c->dlct.len, &msg->body[2]);
+	writel(c->dlct.phys, &msg->body[3]);
+
+	i2o_msg_post(c, m);
+
+	return 0;
+};
+
+/* Exec OSM driver struct */
+struct i2o_driver i2o_exec_driver = {
+	.name = OSM_NAME,
+	.reply = i2o_exec_reply,
+	.event = i2o_exec_event,
+	.classes = i2o_exec_class_id,
+	.driver = {
+		   .probe = i2o_exec_probe,
+		   .remove = i2o_exec_remove,
+		   },
+};
+
+/**
+ *	i2o_exec_init - Registers the Exec OSM
+ *
+ *	Registers the Exec OSM in the I2O core.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+int __init i2o_exec_init(void)
+{
+	return i2o_driver_register(&i2o_exec_driver);
+};
+
+/**
+ *	i2o_exec_exit - Removes the Exec OSM
+ *
+ *	Unregisters the Exec OSM from the I2O core.
+ */
+void __exit i2o_exec_exit(void)
+{
+	i2o_driver_unregister(&i2o_exec_driver);
+};
+
+EXPORT_SYMBOL(i2o_msg_post_wait_mem);
+EXPORT_SYMBOL(i2o_exec_lct_get);
diff --git a/drivers/message/i2o/i2o_block.c b/drivers/message/i2o/i2o_block.c
new file mode 100644
index 0000000..7b74c87
--- /dev/null
+++ b/drivers/message/i2o/i2o_block.c
@@ -0,0 +1,1247 @@
+/*
+ *	Block OSM
+ *
+ * 	Copyright (C) 1999-2002	Red Hat Software
+ *
+ *	Written by Alan Cox, Building Number Three Ltd
+ *
+ *	This program is free software; you can redistribute it and/or modify it
+ *	under the terms of the GNU General Public License as published by the
+ *	Free Software Foundation; either version 2 of the License, or (at your
+ *	option) any later version.
+ *
+ *	This program is distributed in the hope that it will be useful, but
+ *	WITHOUT ANY WARRANTY; without even the implied warranty of
+ *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *	General Public License for more details.
+ *
+ *	For the purpose of avoiding doubt the preferred form of the work
+ *	for making modifications shall be a standards compliant form such
+ *	gzipped tar and not one requiring a proprietary or patent encumbered
+ *	tool to unpack.
+ *
+ *	Fixes/additions:
+ *		Steve Ralston:
+ *			Multiple device handling error fixes,
+ *			Added a queue depth.
+ *		Alan Cox:
+ *			FC920 has an rmw bug. Dont or in the end marker.
+ *			Removed queue walk, fixed for 64bitness.
+ *			Rewrote much of the code over time
+ *			Added indirect block lists
+ *			Handle 64K limits on many controllers
+ *			Don't use indirects on the Promise (breaks)
+ *			Heavily chop down the queue depths
+ *		Deepak Saxena:
+ *			Independent queues per IOP
+ *			Support for dynamic device creation/deletion
+ *			Code cleanup
+ *	    		Support for larger I/Os through merge* functions
+ *			(taken from DAC960 driver)
+ *		Boji T Kannanthanam:
+ *			Set the I2O Block devices to be detected in increasing
+ *			order of TIDs during boot.
+ *			Search and set the I2O block device that we boot off
+ *			from as the first device to be claimed (as /dev/i2o/hda)
+ *			Properly attach/detach I2O gendisk structure from the
+ *			system gendisk list. The I2O block devices now appear in
+ *			/proc/partitions.
+ *		Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ *			Minor bugfixes for 2.6.
+ */
+
+#include <linux/module.h>
+#include <linux/i2o.h>
+
+#include <linux/mempool.h>
+
+#include <linux/genhd.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+
+#include "i2o_block.h"
+
+#define OSM_NAME	"block-osm"
+#define OSM_VERSION	"$Rev$"
+#define OSM_DESCRIPTION	"I2O Block Device OSM"
+
+static struct i2o_driver i2o_block_driver;
+
+/* global Block OSM request mempool */
+static struct i2o_block_mempool i2o_blk_req_pool;
+
+/* Block OSM class handling definition */
+static struct i2o_class_id i2o_block_class_id[] = {
+	{I2O_CLASS_RANDOM_BLOCK_STORAGE},
+	{I2O_CLASS_END}
+};
+
+/**
+ *	i2o_block_device_free - free the memory of the I2O Block device
+ *	@dev: I2O Block device, which should be cleaned up
+ *
+ *	Frees the request queue, gendisk and the i2o_block_device structure.
+ */
+static void i2o_block_device_free(struct i2o_block_device *dev)
+{
+	blk_cleanup_queue(dev->gd->queue);
+
+	put_disk(dev->gd);
+
+	kfree(dev);
+};
+
+/**
+ *	i2o_block_remove - remove the I2O Block device from the system again
+ *	@dev: I2O Block device which should be removed
+ *
+ *	Remove gendisk from system and free all allocated memory.
+ *
+ *	Always returns 0.
+ */
+static int i2o_block_remove(struct device *dev)
+{
+	struct i2o_device *i2o_dev = to_i2o_device(dev);
+	struct i2o_block_device *i2o_blk_dev = dev_get_drvdata(dev);
+
+	osm_info("Device removed %s\n", i2o_blk_dev->gd->disk_name);
+
+	i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0);
+
+	del_gendisk(i2o_blk_dev->gd);
+
+	dev_set_drvdata(dev, NULL);
+
+	i2o_device_claim_release(i2o_dev);
+
+	i2o_block_device_free(i2o_blk_dev);
+
+	return 0;
+};
+
+/**
+ *	i2o_block_device flush - Flush all dirty data of I2O device dev
+ *	@dev: I2O device which should be flushed
+ *
+ *	Flushes all dirty data on device dev.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_block_device_flush(struct i2o_device *dev)
+{
+	struct i2o_message __iomem *msg;
+	u32 m;
+
+	m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -ETIMEDOUT;
+
+	writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(I2O_CMD_BLOCK_CFLUSH << 24 | HOST_TID << 12 | dev->lct_data.tid,
+	       &msg->u.head[1]);
+	writel(60 << 16, &msg->body[0]);
+	osm_debug("Flushing...\n");
+
+	return i2o_msg_post_wait(dev->iop, m, 60);
+};
+
+/**
+ *	i2o_block_device_mount - Mount (load) the media of device dev
+ *	@dev: I2O device which should receive the mount request
+ *	@media_id: Media Identifier
+ *
+ *	Load a media into drive. Identifier should be set to -1, because the
+ *	spec does not support any other value.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_block_device_mount(struct i2o_device *dev, u32 media_id)
+{
+	struct i2o_message __iomem *msg;
+	u32 m;
+
+	m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -ETIMEDOUT;
+
+	writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(I2O_CMD_BLOCK_MMOUNT << 24 | HOST_TID << 12 | dev->lct_data.tid,
+	       &msg->u.head[1]);
+	writel(-1, &msg->body[0]);
+	writel(0, &msg->body[1]);
+	osm_debug("Mounting...\n");
+
+	return i2o_msg_post_wait(dev->iop, m, 2);
+};
+
+/**
+ *	i2o_block_device_lock - Locks the media of device dev
+ *	@dev: I2O device which should receive the lock request
+ *	@media_id: Media Identifier
+ *
+ *	Lock media of device dev to prevent removal. The media identifier
+ *	should be set to -1, because the spec does not support any other value.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_block_device_lock(struct i2o_device *dev, u32 media_id)
+{
+	struct i2o_message __iomem *msg;
+	u32 m;
+
+	m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -ETIMEDOUT;
+
+	writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(I2O_CMD_BLOCK_MLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
+	       &msg->u.head[1]);
+	writel(-1, &msg->body[0]);
+	osm_debug("Locking...\n");
+
+	return i2o_msg_post_wait(dev->iop, m, 2);
+};
+
+/**
+ *	i2o_block_device_unlock - Unlocks the media of device dev
+ *	@dev: I2O device which should receive the unlocked request
+ *	@media_id: Media Identifier
+ *
+ *	Unlocks the media in device dev. The media identifier should be set to
+ *	-1, because the spec does not support any other value.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_block_device_unlock(struct i2o_device *dev, u32 media_id)
+{
+	struct i2o_message __iomem *msg;
+	u32 m;
+
+	m = i2o_msg_get_wait(dev->iop, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -ETIMEDOUT;
+
+	writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(I2O_CMD_BLOCK_MUNLOCK << 24 | HOST_TID << 12 | dev->lct_data.tid,
+	       &msg->u.head[1]);
+	writel(media_id, &msg->body[0]);
+	osm_debug("Unlocking...\n");
+
+	return i2o_msg_post_wait(dev->iop, m, 2);
+};
+
+/**
+ *	i2o_block_device_power - Power management for device dev
+ *	@dev: I2O device which should receive the power management request
+ *	@operation: Operation which should be send
+ *
+ *	Send a power management request to the device dev.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_block_device_power(struct i2o_block_device *dev, u8 op)
+{
+	struct i2o_device *i2o_dev = dev->i2o_dev;
+	struct i2o_controller *c = i2o_dev->iop;
+	struct i2o_message __iomem *msg;
+	u32 m;
+	int rc;
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -ETIMEDOUT;
+
+	writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(I2O_CMD_BLOCK_POWER << 24 | HOST_TID << 12 | i2o_dev->lct_data.
+	       tid, &msg->u.head[1]);
+	writel(op << 24, &msg->body[0]);
+	osm_debug("Power...\n");
+
+	rc = i2o_msg_post_wait(c, m, 60);
+	if (!rc)
+		dev->power = op;
+
+	return rc;
+};
+
+/**
+ *	i2o_block_request_alloc - Allocate an I2O block request struct
+ *
+ *	Allocates an I2O block request struct and initialize the list.
+ *
+ *	Returns a i2o_block_request pointer on success or negative error code
+ *	on failure.
+ */
+static inline struct i2o_block_request *i2o_block_request_alloc(void)
+{
+	struct i2o_block_request *ireq;
+
+	ireq = mempool_alloc(i2o_blk_req_pool.pool, GFP_ATOMIC);
+	if (!ireq)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&ireq->queue);
+
+	return ireq;
+};
+
+/**
+ *	i2o_block_request_free - Frees a I2O block request
+ *	@ireq: I2O block request which should be freed
+ *
+ *	Fres the allocated memory (give it back to the request mempool).
+ */
+static inline void i2o_block_request_free(struct i2o_block_request *ireq)
+{
+	mempool_free(ireq, i2o_blk_req_pool.pool);
+};
+
+/**
+ *	i2o_block_sglist_alloc - Allocate the SG list and map it
+ *	@ireq: I2O block request
+ *
+ *	Builds the SG list and map it into to be accessable by the controller.
+ *
+ *	Returns the number of elements in the SG list or 0 on failure.
+ */
+static inline int i2o_block_sglist_alloc(struct i2o_block_request *ireq)
+{
+	struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
+	int nents;
+
+	nents = blk_rq_map_sg(ireq->req->q, ireq->req, ireq->sg_table);
+
+	if (rq_data_dir(ireq->req) == READ)
+		ireq->sg_dma_direction = PCI_DMA_FROMDEVICE;
+	else
+		ireq->sg_dma_direction = PCI_DMA_TODEVICE;
+
+	ireq->sg_nents = dma_map_sg(dev, ireq->sg_table, nents,
+				    ireq->sg_dma_direction);
+
+	return ireq->sg_nents;
+};
+
+/**
+ *	i2o_block_sglist_free - Frees the SG list
+ *	@ireq: I2O block request from which the SG should be freed
+ *
+ *	Frees the SG list from the I2O block request.
+ */
+static inline void i2o_block_sglist_free(struct i2o_block_request *ireq)
+{
+	struct device *dev = &ireq->i2o_blk_dev->i2o_dev->iop->pdev->dev;
+
+	dma_unmap_sg(dev, ireq->sg_table, ireq->sg_nents,
+		     ireq->sg_dma_direction);
+};
+
+/**
+ *	i2o_block_prep_req_fn - Allocates I2O block device specific struct
+ *	@q: request queue for the request
+ *	@req: the request to prepare
+ *
+ *	Allocate the necessary i2o_block_request struct and connect it to
+ *	the request. This is needed that we not loose the SG list later on.
+ *
+ *	Returns BLKPREP_OK on success or BLKPREP_DEFER on failure.
+ */
+static int i2o_block_prep_req_fn(struct request_queue *q, struct request *req)
+{
+	struct i2o_block_device *i2o_blk_dev = q->queuedata;
+	struct i2o_block_request *ireq;
+
+	/* request is already processed by us, so return */
+	if (req->flags & REQ_SPECIAL) {
+		osm_debug("REQ_SPECIAL already set!\n");
+		req->flags |= REQ_DONTPREP;
+		return BLKPREP_OK;
+	}
+
+	/* connect the i2o_block_request to the request */
+	if (!req->special) {
+		ireq = i2o_block_request_alloc();
+		if (unlikely(IS_ERR(ireq))) {
+			osm_debug("unable to allocate i2o_block_request!\n");
+			return BLKPREP_DEFER;
+		}
+
+		ireq->i2o_blk_dev = i2o_blk_dev;
+		req->special = ireq;
+		ireq->req = req;
+	} else
+		ireq = req->special;
+
+	/* do not come back here */
+	req->flags |= REQ_DONTPREP | REQ_SPECIAL;
+
+	return BLKPREP_OK;
+};
+
+/**
+ *	i2o_block_delayed_request_fn - delayed request queue function
+ *	delayed_request: the delayed request with the queue to start
+ *
+ *	If the request queue is stopped for a disk, and there is no open
+ *	request, a new event is created, which calls this function to start
+ *	the queue after I2O_BLOCK_REQUEST_TIME. Otherwise the queue will never
+ *	be started again.
+ */
+static void i2o_block_delayed_request_fn(void *delayed_request)
+{
+	struct i2o_block_delayed_request *dreq = delayed_request;
+	struct request_queue *q = dreq->queue;
+	unsigned long flags;
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	blk_start_queue(q);
+	spin_unlock_irqrestore(q->queue_lock, flags);
+	kfree(dreq);
+};
+
+/**
+ *	i2o_block_reply - Block OSM reply handler.
+ *	@c: I2O controller from which the message arrives
+ *	@m: message id of reply
+ *	qmsg: the actuall I2O message reply
+ *
+ *	This function gets all the message replies.
+ *
+ */
+static int i2o_block_reply(struct i2o_controller *c, u32 m,
+			   struct i2o_message *msg)
+{
+	struct i2o_block_request *ireq;
+	struct request *req;
+	struct i2o_block_device *dev;
+	struct request_queue *q;
+	u8 st;
+	unsigned long flags;
+
+	/* FAILed message */
+	if (unlikely(le32_to_cpu(msg->u.head[0]) & (1 << 13))) {
+		struct i2o_message *pmsg;
+		u32 pm;
+
+		/*
+		 * FAILed message from controller
+		 * We increment the error count and abort it
+		 *
+		 * In theory this will never happen.  The I2O block class
+		 * specification states that block devices never return
+		 * FAILs but instead use the REQ status field...but
+		 * better be on the safe side since no one really follows
+		 * the spec to the book :)
+		 */
+		pm = le32_to_cpu(msg->body[3]);
+		pmsg = i2o_msg_in_to_virt(c, pm);
+
+		req = i2o_cntxt_list_get(c, le32_to_cpu(pmsg->u.s.tcntxt));
+		if (unlikely(!req)) {
+			osm_err("NULL reply received!\n");
+			return -1;
+		}
+
+		ireq = req->special;
+		dev = ireq->i2o_blk_dev;
+		q = dev->gd->queue;
+
+		req->errors++;
+
+		spin_lock_irqsave(q->queue_lock, flags);
+
+		while (end_that_request_chunk(req, !req->errors,
+					      le32_to_cpu(pmsg->body[1]))) ;
+		end_that_request_last(req);
+
+		dev->open_queue_depth--;
+		list_del(&ireq->queue);
+		blk_start_queue(q);
+
+		spin_unlock_irqrestore(q->queue_lock, flags);
+
+		/* Now flush the message by making it a NOP */
+		i2o_msg_nop(c, pm);
+
+		return -1;
+	}
+
+	req = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
+	if (unlikely(!req)) {
+		osm_err("NULL reply received!\n");
+		return -1;
+	}
+
+	ireq = req->special;
+	dev = ireq->i2o_blk_dev;
+	q = dev->gd->queue;
+
+	if (unlikely(!dev->i2o_dev)) {
+		/*
+		 * This is HACK, but Intel Integrated RAID allows user
+		 * to delete a volume that is claimed, locked, and in use
+		 * by the OS. We have to check for a reply from a
+		 * non-existent device and flag it as an error or the system
+		 * goes kaput...
+		 */
+		req->errors++;
+		osm_warn("Data transfer to deleted device!\n");
+		spin_lock_irqsave(q->queue_lock, flags);
+		while (end_that_request_chunk
+		       (req, !req->errors, le32_to_cpu(msg->body[1]))) ;
+		end_that_request_last(req);
+
+		dev->open_queue_depth--;
+		list_del(&ireq->queue);
+		blk_start_queue(q);
+
+		spin_unlock_irqrestore(q->queue_lock, flags);
+		return -1;
+	}
+
+	/*
+	 *      Lets see what is cooking. We stuffed the
+	 *      request in the context.
+	 */
+
+	st = le32_to_cpu(msg->body[0]) >> 24;
+
+	if (st != 0) {
+		int err;
+		char *bsa_errors[] = {
+			"Success",
+			"Media Error",
+			"Failure communicating to device",
+			"Device Failure",
+			"Device is not ready",
+			"Media not present",
+			"Media is locked by another user",
+			"Media has failed",
+			"Failure communicating to device",
+			"Device bus failure",
+			"Device is locked by another user",
+			"Device is write protected",
+			"Device has reset",
+			"Volume has changed, waiting for acknowledgement"
+		};
+
+		err = le32_to_cpu(msg->body[0]) & 0xffff;
+
+		/*
+		 *      Device not ready means two things. One is that the
+		 *      the thing went offline (but not a removal media)
+		 *
+		 *      The second is that you have a SuperTrak 100 and the
+		 *      firmware got constipated. Unlike standard i2o card
+		 *      setups the supertrak returns an error rather than
+		 *      blocking for the timeout in these cases.
+		 *
+		 *      Don't stick a supertrak100 into cache aggressive modes
+		 */
+
+		osm_err("block-osm: /dev/%s error: %s", dev->gd->disk_name,
+			bsa_errors[le32_to_cpu(msg->body[0]) & 0xffff]);
+		if (le32_to_cpu(msg->body[0]) & 0x00ff0000)
+			printk(KERN_ERR " - DDM attempted %d retries",
+			       (le32_to_cpu(msg->body[0]) >> 16) & 0x00ff);
+		printk(KERN_ERR ".\n");
+		req->errors++;
+	} else
+		req->errors = 0;
+
+	if (!end_that_request_chunk
+	    (req, !req->errors, le32_to_cpu(msg->body[1]))) {
+		add_disk_randomness(req->rq_disk);
+		spin_lock_irqsave(q->queue_lock, flags);
+
+		end_that_request_last(req);
+
+		dev->open_queue_depth--;
+		list_del(&ireq->queue);
+		blk_start_queue(q);
+
+		spin_unlock_irqrestore(q->queue_lock, flags);
+
+		i2o_block_sglist_free(ireq);
+		i2o_block_request_free(ireq);
+	} else
+		osm_err("still remaining chunks\n");
+
+	return 1;
+};
+
+static void i2o_block_event(struct i2o_event *evt)
+{
+	osm_info("block-osm: event received\n");
+};
+
+/*
+ *	SCSI-CAM for ioctl geometry mapping
+ *	Duplicated with SCSI - this should be moved into somewhere common
+ *	perhaps genhd ?
+ *
+ * LBA -> CHS mapping table taken from:
+ *
+ * "Incorporating the I2O Architecture into BIOS for Intel Architecture
+ *  Platforms"
+ *
+ * This is an I2O document that is only available to I2O members,
+ * not developers.
+ *
+ * From my understanding, this is how all the I2O cards do this
+ *
+ * Disk Size      | Sectors | Heads | Cylinders
+ * ---------------+---------+-------+-------------------
+ * 1 < X <= 528M  | 63      | 16    | X/(63 * 16 * 512)
+ * 528M < X <= 1G | 63      | 32    | X/(63 * 32 * 512)
+ * 1 < X <528M    | 63      | 16    | X/(63 * 16 * 512)
+ * 1 < X <528M    | 63      | 16    | X/(63 * 16 * 512)
+ *
+ */
+#define	BLOCK_SIZE_528M		1081344
+#define	BLOCK_SIZE_1G		2097152
+#define	BLOCK_SIZE_21G		4403200
+#define	BLOCK_SIZE_42G		8806400
+#define	BLOCK_SIZE_84G		17612800
+
+static void i2o_block_biosparam(unsigned long capacity, unsigned short *cyls,
+				unsigned char *hds, unsigned char *secs)
+{
+	unsigned long heads, sectors, cylinders;
+
+	sectors = 63L;		/* Maximize sectors per track */
+	if (capacity <= BLOCK_SIZE_528M)
+		heads = 16;
+	else if (capacity <= BLOCK_SIZE_1G)
+		heads = 32;
+	else if (capacity <= BLOCK_SIZE_21G)
+		heads = 64;
+	else if (capacity <= BLOCK_SIZE_42G)
+		heads = 128;
+	else
+		heads = 255;
+
+	cylinders = (unsigned long)capacity / (heads * sectors);
+
+	*cyls = (unsigned short)cylinders;	/* Stuff return values */
+	*secs = (unsigned char)sectors;
+	*hds = (unsigned char)heads;
+}
+
+/**
+ *	i2o_block_open - Open the block device
+ *
+ *	Power up the device, mount and lock the media. This function is called,
+ *	if the block device is opened for access.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_block_open(struct inode *inode, struct file *file)
+{
+	struct i2o_block_device *dev = inode->i_bdev->bd_disk->private_data;
+
+	if (!dev->i2o_dev)
+		return -ENODEV;
+
+	if (dev->power > 0x1f)
+		i2o_block_device_power(dev, 0x02);
+
+	i2o_block_device_mount(dev->i2o_dev, -1);
+
+	i2o_block_device_lock(dev->i2o_dev, -1);
+
+	osm_debug("Ready.\n");
+
+	return 0;
+};
+
+/**
+ *	i2o_block_release - Release the I2O block device
+ *
+ *	Unlock and unmount the media, and power down the device. Gets called if
+ *	the block device is closed.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_block_release(struct inode *inode, struct file *file)
+{
+	struct gendisk *disk = inode->i_bdev->bd_disk;
+	struct i2o_block_device *dev = disk->private_data;
+	u8 operation;
+
+	/*
+	 * This is to deail with the case of an application
+	 * opening a device and then the device dissapears while
+	 * it's in use, and then the application tries to release
+	 * it.  ex: Unmounting a deleted RAID volume at reboot.
+	 * If we send messages, it will just cause FAILs since
+	 * the TID no longer exists.
+	 */
+	if (!dev->i2o_dev)
+		return 0;
+
+	i2o_block_device_flush(dev->i2o_dev);
+
+	i2o_block_device_unlock(dev->i2o_dev, -1);
+
+	if (dev->flags & (1 << 3 | 1 << 4))	/* Removable */
+		operation = 0x21;
+	else
+		operation = 0x24;
+
+	i2o_block_device_power(dev, operation);
+
+	return 0;
+}
+
+/**
+ *	i2o_block_ioctl - Issue device specific ioctl calls.
+ *	@cmd: ioctl command
+ *	@arg: arg
+ *
+ *	Handles ioctl request for the block device.
+ *
+ *	Return 0 on success or negative error on failure.
+ */
+static int i2o_block_ioctl(struct inode *inode, struct file *file,
+			   unsigned int cmd, unsigned long arg)
+{
+	struct gendisk *disk = inode->i_bdev->bd_disk;
+	struct i2o_block_device *dev = disk->private_data;
+	void __user *argp = (void __user *)arg;
+
+	/* Anyone capable of this syscall can do *real bad* things */
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	switch (cmd) {
+	case HDIO_GETGEO:
+		{
+			struct hd_geometry g;
+			i2o_block_biosparam(get_capacity(disk),
+					    &g.cylinders, &g.heads, &g.sectors);
+			g.start = get_start_sect(inode->i_bdev);
+			return copy_to_user(argp, &g, sizeof(g)) ? -EFAULT : 0;
+		}
+
+	case BLKI2OGRSTRAT:
+		return put_user(dev->rcache, (int __user *)arg);
+	case BLKI2OGWSTRAT:
+		return put_user(dev->wcache, (int __user *)arg);
+	case BLKI2OSRSTRAT:
+		if (arg < 0 || arg > CACHE_SMARTFETCH)
+			return -EINVAL;
+		dev->rcache = arg;
+		break;
+	case BLKI2OSWSTRAT:
+		if (arg != 0
+		    && (arg < CACHE_WRITETHROUGH || arg > CACHE_SMARTBACK))
+			return -EINVAL;
+		dev->wcache = arg;
+		break;
+	}
+	return -ENOTTY;
+};
+
+/**
+ *	i2o_block_media_changed - Have we seen a media change?
+ *	@disk: gendisk which should be verified
+ *
+ *	Verifies if the media has changed.
+ *
+ *	Returns 1 if the media was changed or 0 otherwise.
+ */
+static int i2o_block_media_changed(struct gendisk *disk)
+{
+	struct i2o_block_device *p = disk->private_data;
+
+	if (p->media_change_flag) {
+		p->media_change_flag = 0;
+		return 1;
+	}
+	return 0;
+}
+
+/**
+ *	i2o_block_transfer - Transfer a request to/from the I2O controller
+ *	@req: the request which should be transfered
+ *
+ *	This function converts the request into a I2O message. The necessary
+ *	DMA buffers are allocated and after everything is setup post the message
+ *	to the I2O controller. No cleanup is done by this function. It is done
+ *	on the interrupt side when the reply arrives.
+ *
+ *	Return 0 on success or negative error code on failure.
+ */
+static int i2o_block_transfer(struct request *req)
+{
+	struct i2o_block_device *dev = req->rq_disk->private_data;
+	struct i2o_controller *c = dev->i2o_dev->iop;
+	int tid = dev->i2o_dev->lct_data.tid;
+	struct i2o_message __iomem *msg;
+	void __iomem *mptr;
+	struct i2o_block_request *ireq = req->special;
+	struct scatterlist *sg;
+	int sgnum;
+	int i;
+	u32 m;
+	u32 tcntxt;
+	u32 sg_flags;
+	int rc;
+
+	m = i2o_msg_get(c, &msg);
+	if (m == I2O_QUEUE_EMPTY) {
+		rc = -EBUSY;
+		goto exit;
+	}
+
+	tcntxt = i2o_cntxt_list_add(c, req);
+	if (!tcntxt) {
+		rc = -ENOMEM;
+		goto nop_msg;
+	}
+
+	if ((sgnum = i2o_block_sglist_alloc(ireq)) <= 0) {
+		rc = -ENOMEM;
+		goto context_remove;
+	}
+
+	/* Build the message based on the request. */
+	writel(i2o_block_driver.context, &msg->u.s.icntxt);
+	writel(tcntxt, &msg->u.s.tcntxt);
+	writel(req->nr_sectors << 9, &msg->body[1]);
+
+	writel((((u64) req->sector) << 9) & 0xffffffff, &msg->body[2]);
+	writel(req->sector >> 23, &msg->body[3]);
+
+	mptr = &msg->body[4];
+
+	sg = ireq->sg_table;
+
+	if (rq_data_dir(req) == READ) {
+		writel(I2O_CMD_BLOCK_READ << 24 | HOST_TID << 12 | tid,
+		       &msg->u.head[1]);
+		sg_flags = 0x10000000;
+		switch (dev->rcache) {
+		case CACHE_NULL:
+			writel(0, &msg->body[0]);
+			break;
+		case CACHE_PREFETCH:
+			writel(0x201F0008, &msg->body[0]);
+			break;
+		case CACHE_SMARTFETCH:
+			if (req->nr_sectors > 16)
+				writel(0x201F0008, &msg->body[0]);
+			else
+				writel(0x001F0000, &msg->body[0]);
+			break;
+		}
+	} else {
+		writel(I2O_CMD_BLOCK_WRITE << 24 | HOST_TID << 12 | tid,
+		       &msg->u.head[1]);
+		sg_flags = 0x14000000;
+		switch (dev->wcache) {
+		case CACHE_NULL:
+			writel(0, &msg->body[0]);
+			break;
+		case CACHE_WRITETHROUGH:
+			writel(0x001F0008, &msg->body[0]);
+			break;
+		case CACHE_WRITEBACK:
+			writel(0x001F0010, &msg->body[0]);
+			break;
+		case CACHE_SMARTBACK:
+			if (req->nr_sectors > 16)
+				writel(0x001F0004, &msg->body[0]);
+			else
+				writel(0x001F0010, &msg->body[0]);
+			break;
+		case CACHE_SMARTTHROUGH:
+			if (req->nr_sectors > 16)
+				writel(0x001F0004, &msg->body[0]);
+			else
+				writel(0x001F0010, &msg->body[0]);
+		}
+	}
+
+	for (i = sgnum; i > 0; i--) {
+		if (i == 1)
+			sg_flags |= 0x80000000;
+		writel(sg_flags | sg_dma_len(sg), mptr);
+		writel(sg_dma_address(sg), mptr + 4);
+		mptr += 8;
+		sg++;
+	}
+
+	writel(I2O_MESSAGE_SIZE
+	       (((unsigned long)mptr -
+		 (unsigned long)&msg->u.head[0]) >> 2) | SGL_OFFSET_8,
+	       &msg->u.head[0]);
+
+	list_add_tail(&ireq->queue, &dev->open_queue);
+	dev->open_queue_depth++;
+
+	i2o_msg_post(c, m);
+
+	return 0;
+
+      context_remove:
+	i2o_cntxt_list_remove(c, req);
+
+      nop_msg:
+	i2o_msg_nop(c, m);
+
+      exit:
+	return rc;
+};
+
+/**
+ *	i2o_block_request_fn - request queue handling function
+ *	q: request queue from which the request could be fetched
+ *
+ *	Takes the next request from the queue, transfers it and if no error
+ *	occurs dequeue it from the queue. On arrival of the reply the message
+ *	will be processed further. If an error occurs requeue the request.
+ */
+static void i2o_block_request_fn(struct request_queue *q)
+{
+	struct request *req;
+
+	while (!blk_queue_plugged(q)) {
+		req = elv_next_request(q);
+		if (!req)
+			break;
+
+		if (blk_fs_request(req)) {
+			struct i2o_block_delayed_request *dreq;
+			struct i2o_block_request *ireq = req->special;
+			unsigned int queue_depth;
+
+			queue_depth = ireq->i2o_blk_dev->open_queue_depth;
+
+			if (queue_depth < I2O_BLOCK_MAX_OPEN_REQUESTS)
+				if (!i2o_block_transfer(req)) {
+					blkdev_dequeue_request(req);
+					continue;
+				}
+
+			if (queue_depth)
+				break;
+
+			/* stop the queue and retry later */
+			dreq = kmalloc(sizeof(*dreq), GFP_ATOMIC);
+			if (!dreq)
+				continue;
+
+			dreq->queue = q;
+			INIT_WORK(&dreq->work, i2o_block_delayed_request_fn,
+				  dreq);
+
+			osm_info("transfer error\n");
+			if (!queue_delayed_work(i2o_block_driver.event_queue,
+						&dreq->work,
+						I2O_BLOCK_RETRY_TIME))
+				kfree(dreq);
+			else {
+				blk_stop_queue(q);
+				break;
+			}
+		} else
+			end_request(req, 0);
+	}
+};
+
+/* I2O Block device operations definition */
+static struct block_device_operations i2o_block_fops = {
+	.owner = THIS_MODULE,
+	.open = i2o_block_open,
+	.release = i2o_block_release,
+	.ioctl = i2o_block_ioctl,
+	.media_changed = i2o_block_media_changed
+};
+
+/**
+ *	i2o_block_device_alloc - Allocate memory for a I2O Block device
+ *
+ *	Allocate memory for the i2o_block_device struct, gendisk and request
+ *	queue and initialize them as far as no additional information is needed.
+ *
+ *	Returns a pointer to the allocated I2O Block device on succes or a
+ *	negative error code on failure.
+ */
+static struct i2o_block_device *i2o_block_device_alloc(void)
+{
+	struct i2o_block_device *dev;
+	struct gendisk *gd;
+	struct request_queue *queue;
+	int rc;
+
+	dev = kmalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev) {
+		osm_err("Insufficient memory to allocate I2O Block disk.\n");
+		rc = -ENOMEM;
+		goto exit;
+	}
+	memset(dev, 0, sizeof(*dev));
+
+	INIT_LIST_HEAD(&dev->open_queue);
+	spin_lock_init(&dev->lock);
+	dev->rcache = CACHE_PREFETCH;
+	dev->wcache = CACHE_WRITEBACK;
+
+	/* allocate a gendisk with 16 partitions */
+	gd = alloc_disk(16);
+	if (!gd) {
+		osm_err("Insufficient memory to allocate gendisk.\n");
+		rc = -ENOMEM;
+		goto cleanup_dev;
+	}
+
+	/* initialize the request queue */
+	queue = blk_init_queue(i2o_block_request_fn, &dev->lock);
+	if (!queue) {
+		osm_err("Insufficient memory to allocate request queue.\n");
+		rc = -ENOMEM;
+		goto cleanup_queue;
+	}
+
+	blk_queue_prep_rq(queue, i2o_block_prep_req_fn);
+
+	gd->major = I2O_MAJOR;
+	gd->queue = queue;
+	gd->fops = &i2o_block_fops;
+	gd->private_data = dev;
+
+	dev->gd = gd;
+
+	return dev;
+
+      cleanup_queue:
+	put_disk(gd);
+
+      cleanup_dev:
+	kfree(dev);
+
+      exit:
+	return ERR_PTR(rc);
+};
+
+/**
+ *	i2o_block_probe - verify if dev is a I2O Block device and install it
+ *	@dev: device to verify if it is a I2O Block device
+ *
+ *	We only verify if the user_tid of the device is 0xfff and then install
+ *	the device. Otherwise it is used by some other device (e. g. RAID).
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_block_probe(struct device *dev)
+{
+	struct i2o_device *i2o_dev = to_i2o_device(dev);
+	struct i2o_block_device *i2o_blk_dev;
+	struct i2o_controller *c = i2o_dev->iop;
+	struct gendisk *gd;
+	struct request_queue *queue;
+	static int unit = 0;
+	int rc;
+	u64 size;
+	u32 blocksize;
+	u16 power;
+	u32 flags, status;
+	int segments;
+
+	/* skip devices which are used by IOP */
+	if (i2o_dev->lct_data.user_tid != 0xfff) {
+		osm_debug("skipping used device %03x\n", i2o_dev->lct_data.tid);
+		return -ENODEV;
+	}
+
+	osm_info("New device detected (TID: %03x)\n", i2o_dev->lct_data.tid);
+
+	if (i2o_device_claim(i2o_dev)) {
+		osm_warn("Unable to claim device. Installation aborted\n");
+		rc = -EFAULT;
+		goto exit;
+	}
+
+	i2o_blk_dev = i2o_block_device_alloc();
+	if (IS_ERR(i2o_blk_dev)) {
+		osm_err("could not alloc a new I2O block device");
+		rc = PTR_ERR(i2o_blk_dev);
+		goto claim_release;
+	}
+
+	i2o_blk_dev->i2o_dev = i2o_dev;
+	dev_set_drvdata(dev, i2o_blk_dev);
+
+	/* setup gendisk */
+	gd = i2o_blk_dev->gd;
+	gd->first_minor = unit << 4;
+	sprintf(gd->disk_name, "i2o/hd%c", 'a' + unit);
+	sprintf(gd->devfs_name, "i2o/hd%c", 'a' + unit);
+	gd->driverfs_dev = &i2o_dev->device;
+
+	/* setup request queue */
+	queue = gd->queue;
+	queue->queuedata = i2o_blk_dev;
+
+	blk_queue_max_phys_segments(queue, I2O_MAX_SEGMENTS);
+	blk_queue_max_sectors(queue, I2O_MAX_SECTORS);
+
+	if (c->short_req)
+		segments = 8;
+	else {
+		i2o_status_block *sb;
+
+		sb = c->status_block.virt;
+
+		segments = (sb->inbound_frame_size -
+			    sizeof(struct i2o_message) / 4 - 4) / 2;
+	}
+
+	blk_queue_max_hw_segments(queue, segments);
+
+	osm_debug("max sectors = %d\n", I2O_MAX_SECTORS);
+	osm_debug("phys segments = %d\n", I2O_MAX_SEGMENTS);
+	osm_debug("hw segments = %d\n", segments);
+
+	/*
+	 *      Ask for the current media data. If that isn't supported
+	 *      then we ask for the device capacity data
+	 */
+	if (i2o_parm_field_get(i2o_dev, 0x0004, 1, &blocksize, 4) != 0
+	    || i2o_parm_field_get(i2o_dev, 0x0004, 0, &size, 8) != 0) {
+		i2o_parm_field_get(i2o_dev, 0x0000, 3, &blocksize, 4);
+		i2o_parm_field_get(i2o_dev, 0x0000, 4, &size, 8);
+	}
+	osm_debug("blocksize = %d\n", blocksize);
+
+	if (i2o_parm_field_get(i2o_dev, 0x0000, 2, &power, 2))
+		power = 0;
+	i2o_parm_field_get(i2o_dev, 0x0000, 5, &flags, 4);
+	i2o_parm_field_get(i2o_dev, 0x0000, 6, &status, 4);
+
+	set_capacity(gd, size >> 9);
+
+	i2o_event_register(i2o_dev, &i2o_block_driver, 0, 0xffffffff);
+
+	add_disk(gd);
+
+	unit++;
+
+	return 0;
+
+      claim_release:
+	i2o_device_claim_release(i2o_dev);
+
+      exit:
+	return rc;
+};
+
+/* Block OSM driver struct */
+static struct i2o_driver i2o_block_driver = {
+	.name = OSM_NAME,
+	.event = i2o_block_event,
+	.reply = i2o_block_reply,
+	.classes = i2o_block_class_id,
+	.driver = {
+		   .probe = i2o_block_probe,
+		   .remove = i2o_block_remove,
+		   },
+};
+
+/**
+ *	i2o_block_init - Block OSM initialization function
+ *
+ *	Allocate the slab and mempool for request structs, registers i2o_block
+ *	block device and finally register the Block OSM in the I2O core.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int __init i2o_block_init(void)
+{
+	int rc;
+	int size;
+
+	printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
+
+	/* Allocate request mempool and slab */
+	size = sizeof(struct i2o_block_request);
+	i2o_blk_req_pool.slab = kmem_cache_create("i2o_block_req", size, 0,
+						  SLAB_HWCACHE_ALIGN, NULL,
+						  NULL);
+	if (!i2o_blk_req_pool.slab) {
+		osm_err("can't init request slab\n");
+		rc = -ENOMEM;
+		goto exit;
+	}
+
+	i2o_blk_req_pool.pool = mempool_create(I2O_REQ_MEMPOOL_SIZE,
+					       mempool_alloc_slab,
+					       mempool_free_slab,
+					       i2o_blk_req_pool.slab);
+	if (!i2o_blk_req_pool.pool) {
+		osm_err("can't init request mempool\n");
+		rc = -ENOMEM;
+		goto free_slab;
+	}
+
+	/* Register the block device interfaces */
+	rc = register_blkdev(I2O_MAJOR, "i2o_block");
+	if (rc) {
+		osm_err("unable to register block device\n");
+		goto free_mempool;
+	}
+#ifdef MODULE
+	osm_info("registered device at major %d\n", I2O_MAJOR);
+#endif
+
+	/* Register Block OSM into I2O core */
+	rc = i2o_driver_register(&i2o_block_driver);
+	if (rc) {
+		osm_err("Could not register Block driver\n");
+		goto unregister_blkdev;
+	}
+
+	return 0;
+
+      unregister_blkdev:
+	unregister_blkdev(I2O_MAJOR, "i2o_block");
+
+      free_mempool:
+	mempool_destroy(i2o_blk_req_pool.pool);
+
+      free_slab:
+	kmem_cache_destroy(i2o_blk_req_pool.slab);
+
+      exit:
+	return rc;
+};
+
+/**
+ *	i2o_block_exit - Block OSM exit function
+ *
+ *	Unregisters Block OSM from I2O core, unregisters i2o_block block device
+ *	and frees the mempool and slab.
+ */
+static void __exit i2o_block_exit(void)
+{
+	/* Unregister I2O Block OSM from I2O core */
+	i2o_driver_unregister(&i2o_block_driver);
+
+	/* Unregister block device */
+	unregister_blkdev(I2O_MAJOR, "i2o_block");
+
+	/* Free request mempool and slab */
+	mempool_destroy(i2o_blk_req_pool.pool);
+	kmem_cache_destroy(i2o_blk_req_pool.slab);
+};
+
+MODULE_AUTHOR("Red Hat");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(OSM_DESCRIPTION);
+MODULE_VERSION(OSM_VERSION);
+
+module_init(i2o_block_init);
+module_exit(i2o_block_exit);
diff --git a/drivers/message/i2o/i2o_block.h b/drivers/message/i2o/i2o_block.h
new file mode 100644
index 0000000..ddd9a15
--- /dev/null
+++ b/drivers/message/i2o/i2o_block.h
@@ -0,0 +1,99 @@
+/*
+ *	Block OSM structures/API
+ *
+ * 	Copyright (C) 1999-2002	Red Hat Software
+ *
+ *	Written by Alan Cox, Building Number Three Ltd
+ *
+ *	This program is free software; you can redistribute it and/or modify it
+ *	under the terms of the GNU General Public License as published by the
+ *	Free Software Foundation; either version 2 of the License, or (at your
+ *	option) any later version.
+ *
+ *	This program is distributed in the hope that it will be useful, but
+ *	WITHOUT ANY WARRANTY; without even the implied warranty of
+ *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *	General Public License for more details.
+ *
+ *	For the purpose of avoiding doubt the preferred form of the work
+ *	for making modifications shall be a standards compliant form such
+ *	gzipped tar and not one requiring a proprietary or patent encumbered
+ *	tool to unpack.
+ *
+ *	Fixes/additions:
+ *		Steve Ralston:
+ *			Multiple device handling error fixes,
+ *			Added a queue depth.
+ *		Alan Cox:
+ *			FC920 has an rmw bug. Dont or in the end marker.
+ *			Removed queue walk, fixed for 64bitness.
+ *			Rewrote much of the code over time
+ *			Added indirect block lists
+ *			Handle 64K limits on many controllers
+ *			Don't use indirects on the Promise (breaks)
+ *			Heavily chop down the queue depths
+ *		Deepak Saxena:
+ *			Independent queues per IOP
+ *			Support for dynamic device creation/deletion
+ *			Code cleanup
+ *	    		Support for larger I/Os through merge* functions
+ *			(taken from DAC960 driver)
+ *		Boji T Kannanthanam:
+ *			Set the I2O Block devices to be detected in increasing
+ *			order of TIDs during boot.
+ *			Search and set the I2O block device that we boot off
+ *			from as the first device to be claimed (as /dev/i2o/hda)
+ *			Properly attach/detach I2O gendisk structure from the
+ *			system gendisk list. The I2O block devices now appear in
+ *			/proc/partitions.
+ *		Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ *			Minor bugfixes for 2.6.
+ */
+
+#ifndef I2O_BLOCK_OSM_H
+#define I2O_BLOCK_OSM_H
+
+#define I2O_BLOCK_RETRY_TIME HZ/4
+#define I2O_BLOCK_MAX_OPEN_REQUESTS 50
+
+/* I2O Block OSM mempool struct */
+struct i2o_block_mempool {
+	kmem_cache_t	*slab;
+	mempool_t	*pool;
+};
+
+/* I2O Block device descriptor */
+struct i2o_block_device {
+	struct i2o_device *i2o_dev;	/* pointer to I2O device */
+	struct gendisk *gd;
+	spinlock_t lock;		/* queue lock */
+	struct list_head open_queue;	/* list of transfered, but unfinished
+					   requests */
+	unsigned int open_queue_depth;	/* number of requests in the queue */
+
+	int rcache;			/* read cache flags */
+	int wcache;			/* write cache flags */
+	int flags;
+	int power;			/* power state */
+	int media_change_flag;		/* media changed flag */
+};
+
+/* I2O Block device request */
+struct i2o_block_request
+{
+	struct list_head queue;
+	struct request *req;		/* corresponding request */
+	struct i2o_block_device *i2o_blk_dev;	/* I2O block device */
+	int sg_dma_direction;		/* direction of DMA buffer read/write */
+	int sg_nents;			/* number of SG elements */
+	struct scatterlist sg_table[I2O_MAX_SEGMENTS]; /* SG table */
+};
+
+/* I2O Block device delayed request */
+struct i2o_block_delayed_request
+{
+	struct work_struct work;
+	struct request_queue *queue;
+};
+
+#endif
diff --git a/drivers/message/i2o/i2o_config.c b/drivers/message/i2o/i2o_config.c
new file mode 100644
index 0000000..5fc5004
--- /dev/null
+++ b/drivers/message/i2o/i2o_config.c
@@ -0,0 +1,1160 @@
+/*
+ * I2O Configuration Interface Driver
+ *
+ * (C) Copyright 1999-2002  Red Hat
+ *
+ * Written by Alan Cox, Building Number Three Ltd
+ *
+ * Fixes/additions:
+ *	Deepak Saxena (04/20/1999):
+ *		Added basic ioctl() support
+ *	Deepak Saxena (06/07/1999):
+ *		Added software download ioctl (still testing)
+ *	Auvo Häkkinen (09/10/1999):
+ *		Changes to i2o_cfg_reply(), ioctl_parms()
+ *		Added ioct_validate()
+ *	Taneli Vähäkangas (09/30/1999):
+ *		Fixed ioctl_swdl()
+ *	Taneli Vähäkangas (10/04/1999):
+ *		Changed ioctl_swdl(), implemented ioctl_swul() and ioctl_swdel()
+ *	Deepak Saxena (11/18/1999):
+ *		Added event managmenet support
+ *	Alan Cox <alan@redhat.com>:
+ *		2.4 rewrite ported to 2.5
+ *	Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ *		Added pass-thru support for Adaptec's raidutils
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/i2o.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/spinlock.h>
+#include <linux/smp_lock.h>
+#include <linux/ioctl32.h>
+#include <linux/compat.h>
+#include <linux/syscalls.h>
+
+#include <asm/uaccess.h>
+#include <asm/io.h>
+
+#define OSM_NAME	"config-osm"
+#define OSM_VERSION	"$Rev$"
+#define OSM_DESCRIPTION	"I2O Configuration OSM"
+
+extern int i2o_parm_issue(struct i2o_device *, int, void *, int, void *, int);
+
+static spinlock_t i2o_config_lock;
+
+#define MODINC(x,y) ((x) = ((x) + 1) % (y))
+
+struct sg_simple_element {
+	u32 flag_count;
+	u32 addr_bus;
+};
+
+struct i2o_cfg_info {
+	struct file *fp;
+	struct fasync_struct *fasync;
+	struct i2o_evt_info event_q[I2O_EVT_Q_LEN];
+	u16 q_in;		// Queue head index
+	u16 q_out;		// Queue tail index
+	u16 q_len;		// Queue length
+	u16 q_lost;		// Number of lost events
+	ulong q_id;		// Event queue ID...used as tx_context
+	struct i2o_cfg_info *next;
+};
+static struct i2o_cfg_info *open_files = NULL;
+static ulong i2o_cfg_info_id = 0;
+
+/*
+ *	Each of these describes an i2o message handler. They are
+ *	multiplexed by the i2o_core code
+ */
+
+static struct i2o_driver i2o_config_driver = {
+	.name = OSM_NAME
+};
+
+static int i2o_cfg_getiops(unsigned long arg)
+{
+	struct i2o_controller *c;
+	u8 __user *user_iop_table = (void __user *)arg;
+	u8 tmp[MAX_I2O_CONTROLLERS];
+	int ret = 0;
+
+	memset(tmp, 0, MAX_I2O_CONTROLLERS);
+
+	list_for_each_entry(c, &i2o_controllers, list)
+	    tmp[c->unit] = 1;
+
+	if (copy_to_user(user_iop_table, tmp, MAX_I2O_CONTROLLERS))
+		ret = -EFAULT;
+
+	return ret;
+};
+
+static int i2o_cfg_gethrt(unsigned long arg)
+{
+	struct i2o_controller *c;
+	struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg;
+	struct i2o_cmd_hrtlct kcmd;
+	i2o_hrt *hrt;
+	int len;
+	u32 reslen;
+	int ret = 0;
+
+	if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
+		return -EFAULT;
+
+	if (get_user(reslen, kcmd.reslen) < 0)
+		return -EFAULT;
+
+	if (kcmd.resbuf == NULL)
+		return -EFAULT;
+
+	c = i2o_find_iop(kcmd.iop);
+	if (!c)
+		return -ENXIO;
+
+	hrt = (i2o_hrt *) c->hrt.virt;
+
+	len = 8 + ((hrt->entry_len * hrt->num_entries) << 2);
+
+	/* We did a get user...so assuming mem is ok...is this bad? */
+	put_user(len, kcmd.reslen);
+	if (len > reslen)
+		ret = -ENOBUFS;
+	if (copy_to_user(kcmd.resbuf, (void *)hrt, len))
+		ret = -EFAULT;
+
+	return ret;
+};
+
+static int i2o_cfg_getlct(unsigned long arg)
+{
+	struct i2o_controller *c;
+	struct i2o_cmd_hrtlct __user *cmd = (struct i2o_cmd_hrtlct __user *)arg;
+	struct i2o_cmd_hrtlct kcmd;
+	i2o_lct *lct;
+	int len;
+	int ret = 0;
+	u32 reslen;
+
+	if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_hrtlct)))
+		return -EFAULT;
+
+	if (get_user(reslen, kcmd.reslen) < 0)
+		return -EFAULT;
+
+	if (kcmd.resbuf == NULL)
+		return -EFAULT;
+
+	c = i2o_find_iop(kcmd.iop);
+	if (!c)
+		return -ENXIO;
+
+	lct = (i2o_lct *) c->lct;
+
+	len = (unsigned int)lct->table_size << 2;
+	put_user(len, kcmd.reslen);
+	if (len > reslen)
+		ret = -ENOBUFS;
+	else if (copy_to_user(kcmd.resbuf, lct, len))
+		ret = -EFAULT;
+
+	return ret;
+};
+
+static int i2o_cfg_parms(unsigned long arg, unsigned int type)
+{
+	int ret = 0;
+	struct i2o_controller *c;
+	struct i2o_device *dev;
+	struct i2o_cmd_psetget __user *cmd =
+	    (struct i2o_cmd_psetget __user *)arg;
+	struct i2o_cmd_psetget kcmd;
+	u32 reslen;
+	u8 *ops;
+	u8 *res;
+	int len = 0;
+
+	u32 i2o_cmd = (type == I2OPARMGET ?
+		       I2O_CMD_UTIL_PARAMS_GET : I2O_CMD_UTIL_PARAMS_SET);
+
+	if (copy_from_user(&kcmd, cmd, sizeof(struct i2o_cmd_psetget)))
+		return -EFAULT;
+
+	if (get_user(reslen, kcmd.reslen))
+		return -EFAULT;
+
+	c = i2o_find_iop(kcmd.iop);
+	if (!c)
+		return -ENXIO;
+
+	dev = i2o_iop_find_device(c, kcmd.tid);
+	if (!dev)
+		return -ENXIO;
+
+	ops = (u8 *) kmalloc(kcmd.oplen, GFP_KERNEL);
+	if (!ops)
+		return -ENOMEM;
+
+	if (copy_from_user(ops, kcmd.opbuf, kcmd.oplen)) {
+		kfree(ops);
+		return -EFAULT;
+	}
+
+	/*
+	 * It's possible to have a _very_ large table
+	 * and that the user asks for all of it at once...
+	 */
+	res = (u8 *) kmalloc(65536, GFP_KERNEL);
+	if (!res) {
+		kfree(ops);
+		return -ENOMEM;
+	}
+
+	len = i2o_parm_issue(dev, i2o_cmd, ops, kcmd.oplen, res, 65536);
+	kfree(ops);
+
+	if (len < 0) {
+		kfree(res);
+		return -EAGAIN;
+	}
+
+	put_user(len, kcmd.reslen);
+	if (len > reslen)
+		ret = -ENOBUFS;
+	else if (copy_to_user(kcmd.resbuf, res, len))
+		ret = -EFAULT;
+
+	kfree(res);
+
+	return ret;
+};
+
+static int i2o_cfg_swdl(unsigned long arg)
+{
+	struct i2o_sw_xfer kxfer;
+	struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
+	unsigned char maxfrag = 0, curfrag = 1;
+	struct i2o_dma buffer;
+	struct i2o_message __iomem *msg;
+	u32 m;
+	unsigned int status = 0, swlen = 0, fragsize = 8192;
+	struct i2o_controller *c;
+
+	if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
+		return -EFAULT;
+
+	if (get_user(swlen, kxfer.swlen) < 0)
+		return -EFAULT;
+
+	if (get_user(maxfrag, kxfer.maxfrag) < 0)
+		return -EFAULT;
+
+	if (get_user(curfrag, kxfer.curfrag) < 0)
+		return -EFAULT;
+
+	if (curfrag == maxfrag)
+		fragsize = swlen - (maxfrag - 1) * 8192;
+
+	if (!kxfer.buf || !access_ok(VERIFY_READ, kxfer.buf, fragsize))
+		return -EFAULT;
+
+	c = i2o_find_iop(kxfer.iop);
+	if (!c)
+		return -ENXIO;
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -EBUSY;
+
+	if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
+		i2o_msg_nop(c, m);
+		return -ENOMEM;
+	}
+
+	__copy_from_user(buffer.virt, kxfer.buf, fragsize);
+
+	writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]);
+	writel(I2O_CMD_SW_DOWNLOAD << 24 | HOST_TID << 12 | ADAPTER_TID,
+	       &msg->u.head[1]);
+	writel(i2o_config_driver.context, &msg->u.head[2]);
+	writel(0, &msg->u.head[3]);
+	writel((((u32) kxfer.flags) << 24) | (((u32) kxfer.sw_type) << 16) |
+	       (((u32) maxfrag) << 8) | (((u32) curfrag)), &msg->body[0]);
+	writel(swlen, &msg->body[1]);
+	writel(kxfer.sw_id, &msg->body[2]);
+	writel(0xD0000000 | fragsize, &msg->body[3]);
+	writel(buffer.phys, &msg->body[4]);
+
+	osm_debug("swdl frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
+	status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
+
+	if (status != -ETIMEDOUT)
+		i2o_dma_free(&c->pdev->dev, &buffer);
+
+	if (status != I2O_POST_WAIT_OK) {
+		// it fails if you try and send frags out of order
+		// and for some yet unknown reasons too
+		osm_info("swdl failed, DetailedStatus = %d\n", status);
+		return status;
+	}
+
+	return 0;
+};
+
+static int i2o_cfg_swul(unsigned long arg)
+{
+	struct i2o_sw_xfer kxfer;
+	struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
+	unsigned char maxfrag = 0, curfrag = 1;
+	struct i2o_dma buffer;
+	struct i2o_message __iomem *msg;
+	u32 m;
+	unsigned int status = 0, swlen = 0, fragsize = 8192;
+	struct i2o_controller *c;
+	int ret = 0;
+
+	if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
+		goto return_fault;
+
+	if (get_user(swlen, kxfer.swlen) < 0)
+		goto return_fault;
+
+	if (get_user(maxfrag, kxfer.maxfrag) < 0)
+		goto return_fault;
+
+	if (get_user(curfrag, kxfer.curfrag) < 0)
+		goto return_fault;
+
+	if (curfrag == maxfrag)
+		fragsize = swlen - (maxfrag - 1) * 8192;
+
+	if (!kxfer.buf)
+		goto return_fault;
+
+	c = i2o_find_iop(kxfer.iop);
+	if (!c)
+		return -ENXIO;
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -EBUSY;
+
+	if (i2o_dma_alloc(&c->pdev->dev, &buffer, fragsize, GFP_KERNEL)) {
+		i2o_msg_nop(c, m);
+		return -ENOMEM;
+	}
+
+	writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_7, &msg->u.head[0]);
+	writel(I2O_CMD_SW_UPLOAD << 24 | HOST_TID << 12 | ADAPTER_TID,
+	       &msg->u.head[1]);
+	writel(i2o_config_driver.context, &msg->u.head[2]);
+	writel(0, &msg->u.head[3]);
+	writel((u32) kxfer.flags << 24 | (u32) kxfer.
+	       sw_type << 16 | (u32) maxfrag << 8 | (u32) curfrag,
+	       &msg->body[0]);
+	writel(swlen, &msg->body[1]);
+	writel(kxfer.sw_id, &msg->body[2]);
+	writel(0xD0000000 | fragsize, &msg->body[3]);
+	writel(buffer.phys, &msg->body[4]);
+
+	osm_debug("swul frag %d/%d (size %d)\n", curfrag, maxfrag, fragsize);
+	status = i2o_msg_post_wait_mem(c, m, 60, &buffer);
+
+	if (status != I2O_POST_WAIT_OK) {
+		if (status != -ETIMEDOUT)
+			i2o_dma_free(&c->pdev->dev, &buffer);
+
+		osm_info("swul failed, DetailedStatus = %d\n", status);
+		return status;
+	}
+
+	if (copy_to_user(kxfer.buf, buffer.virt, fragsize))
+		ret = -EFAULT;
+
+	i2o_dma_free(&c->pdev->dev, &buffer);
+
+return_ret:
+	return ret;
+return_fault:
+	ret = -EFAULT;
+	goto return_ret;
+};
+
+static int i2o_cfg_swdel(unsigned long arg)
+{
+	struct i2o_controller *c;
+	struct i2o_sw_xfer kxfer;
+	struct i2o_sw_xfer __user *pxfer = (struct i2o_sw_xfer __user *)arg;
+	struct i2o_message __iomem *msg;
+	u32 m;
+	unsigned int swlen;
+	int token;
+
+	if (copy_from_user(&kxfer, pxfer, sizeof(struct i2o_sw_xfer)))
+		return -EFAULT;
+
+	if (get_user(swlen, kxfer.swlen) < 0)
+		return -EFAULT;
+
+	c = i2o_find_iop(kxfer.iop);
+	if (!c)
+		return -ENXIO;
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -EBUSY;
+
+	writel(SEVEN_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(I2O_CMD_SW_REMOVE << 24 | HOST_TID << 12 | ADAPTER_TID,
+	       &msg->u.head[1]);
+	writel(i2o_config_driver.context, &msg->u.head[2]);
+	writel(0, &msg->u.head[3]);
+	writel((u32) kxfer.flags << 24 | (u32) kxfer.sw_type << 16,
+	       &msg->body[0]);
+	writel(swlen, &msg->body[1]);
+	writel(kxfer.sw_id, &msg->body[2]);
+
+	token = i2o_msg_post_wait(c, m, 10);
+
+	if (token != I2O_POST_WAIT_OK) {
+		osm_info("swdel failed, DetailedStatus = %d\n", token);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+};
+
+static int i2o_cfg_validate(unsigned long arg)
+{
+	int token;
+	int iop = (int)arg;
+	struct i2o_message __iomem *msg;
+	u32 m;
+	struct i2o_controller *c;
+
+	c = i2o_find_iop(iop);
+	if (!c)
+		return -ENXIO;
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -EBUSY;
+
+	writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(I2O_CMD_CONFIG_VALIDATE << 24 | HOST_TID << 12 | iop,
+	       &msg->u.head[1]);
+	writel(i2o_config_driver.context, &msg->u.head[2]);
+	writel(0, &msg->u.head[3]);
+
+	token = i2o_msg_post_wait(c, m, 10);
+
+	if (token != I2O_POST_WAIT_OK) {
+		osm_info("Can't validate configuration, ErrorStatus = %d\n",
+			 token);
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+};
+
+static int i2o_cfg_evt_reg(unsigned long arg, struct file *fp)
+{
+	struct i2o_message __iomem *msg;
+	u32 m;
+	struct i2o_evt_id __user *pdesc = (struct i2o_evt_id __user *)arg;
+	struct i2o_evt_id kdesc;
+	struct i2o_controller *c;
+	struct i2o_device *d;
+
+	if (copy_from_user(&kdesc, pdesc, sizeof(struct i2o_evt_id)))
+		return -EFAULT;
+
+	/* IOP exists? */
+	c = i2o_find_iop(kdesc.iop);
+	if (!c)
+		return -ENXIO;
+
+	/* Device exists? */
+	d = i2o_iop_find_device(c, kdesc.tid);
+	if (!d)
+		return -ENODEV;
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -EBUSY;
+
+	writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | kdesc.tid,
+	       &msg->u.head[1]);
+	writel(i2o_config_driver.context, &msg->u.head[2]);
+	writel(i2o_cntxt_list_add(c, fp->private_data), &msg->u.head[3]);
+	writel(kdesc.evt_mask, &msg->body[0]);
+
+	i2o_msg_post(c, m);
+
+	return 0;
+}
+
+static int i2o_cfg_evt_get(unsigned long arg, struct file *fp)
+{
+	struct i2o_cfg_info *p = NULL;
+	struct i2o_evt_get __user *uget = (struct i2o_evt_get __user *)arg;
+	struct i2o_evt_get kget;
+	unsigned long flags;
+
+	for (p = open_files; p; p = p->next)
+		if (p->q_id == (ulong) fp->private_data)
+			break;
+
+	if (!p->q_len)
+		return -ENOENT;
+
+	memcpy(&kget.info, &p->event_q[p->q_out], sizeof(struct i2o_evt_info));
+	MODINC(p->q_out, I2O_EVT_Q_LEN);
+	spin_lock_irqsave(&i2o_config_lock, flags);
+	p->q_len--;
+	kget.pending = p->q_len;
+	kget.lost = p->q_lost;
+	spin_unlock_irqrestore(&i2o_config_lock, flags);
+
+	if (copy_to_user(uget, &kget, sizeof(struct i2o_evt_get)))
+		return -EFAULT;
+	return 0;
+}
+
+#ifdef CONFIG_COMPAT
+static int i2o_cfg_passthru32(unsigned fd, unsigned cmnd, unsigned long arg,
+			      struct file *file)
+{
+	struct i2o_cmd_passthru32 __user *cmd;
+	struct i2o_controller *c;
+	u32 __user *user_msg;
+	u32 *reply = NULL;
+	u32 __user *user_reply = NULL;
+	u32 size = 0;
+	u32 reply_size = 0;
+	u32 rcode = 0;
+	struct i2o_dma sg_list[SG_TABLESIZE];
+	u32 sg_offset = 0;
+	u32 sg_count = 0;
+	u32 i = 0;
+	i2o_status_block *sb;
+	struct i2o_message *msg;
+	u32 m;
+	unsigned int iop;
+
+	cmd = (struct i2o_cmd_passthru32 __user *)arg;
+
+	if (get_user(iop, &cmd->iop) || get_user(i, &cmd->msg))
+		return -EFAULT;
+
+	user_msg = compat_ptr(i);
+
+	c = i2o_find_iop(iop);
+	if (!c) {
+		osm_debug("controller %d not found\n", iop);
+		return -ENXIO;
+	}
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+
+	sb = c->status_block.virt;
+
+	if (get_user(size, &user_msg[0])) {
+		osm_warn("unable to get size!\n");
+		return -EFAULT;
+	}
+	size = size >> 16;
+
+	if (size > sb->inbound_frame_size) {
+		osm_warn("size of message > inbound_frame_size");
+		return -EFAULT;
+	}
+
+	user_reply = &user_msg[size];
+
+	size <<= 2;		// Convert to bytes
+
+	/* Copy in the user's I2O command */
+	if (copy_from_user(msg, user_msg, size)) {
+		osm_warn("unable to copy user message\n");
+		return -EFAULT;
+	}
+	i2o_dump_message(msg);
+
+	if (get_user(reply_size, &user_reply[0]) < 0)
+		return -EFAULT;
+
+	reply_size >>= 16;
+	reply_size <<= 2;
+
+	reply = kmalloc(reply_size, GFP_KERNEL);
+	if (!reply) {
+		printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
+		       c->name);
+		return -ENOMEM;
+	}
+	memset(reply, 0, reply_size);
+
+	sg_offset = (msg->u.head[0] >> 4) & 0x0f;
+
+	writel(i2o_config_driver.context, &msg->u.s.icntxt);
+	writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
+
+	memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
+	if (sg_offset) {
+		struct sg_simple_element *sg;
+
+		if (sg_offset * 4 >= size) {
+			rcode = -EFAULT;
+			goto cleanup;
+		}
+		// TODO 64bit fix
+		sg = (struct sg_simple_element *)((&msg->u.head[0]) +
+						  sg_offset);
+		sg_count =
+		    (size - sg_offset * 4) / sizeof(struct sg_simple_element);
+		if (sg_count > SG_TABLESIZE) {
+			printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n",
+			       c->name, sg_count);
+			kfree(reply);
+			return -EINVAL;
+		}
+
+		for (i = 0; i < sg_count; i++) {
+			int sg_size;
+			struct i2o_dma *p;
+
+			if (!(sg[i].flag_count & 0x10000000
+			      /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) {
+				printk(KERN_DEBUG
+				       "%s:Bad SG element %d - not simple (%x)\n",
+				       c->name, i, sg[i].flag_count);
+				rcode = -EINVAL;
+				goto cleanup;
+			}
+			sg_size = sg[i].flag_count & 0xffffff;
+			p = &(sg_list[i]);
+			/* Allocate memory for the transfer */
+			if (i2o_dma_alloc
+			    (&c->pdev->dev, p, sg_size,
+			     PCI_DMA_BIDIRECTIONAL)) {
+				printk(KERN_DEBUG
+				       "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+				       c->name, sg_size, i, sg_count);
+				rcode = -ENOMEM;
+				goto cleanup;
+			}
+			/* Copy in the user's SG buffer if necessary */
+			if (sg[i].
+			    flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
+				// TODO 64bit fix
+				if (copy_from_user
+				    (p->virt, (void __user *)(unsigned long)sg[i].addr_bus,
+				     sg_size)) {
+					printk(KERN_DEBUG
+					       "%s: Could not copy SG buf %d FROM user\n",
+					       c->name, i);
+					rcode = -EFAULT;
+					goto cleanup;
+				}
+			}
+			//TODO 64bit fix
+			sg[i].addr_bus = (u32) p->phys;
+		}
+	}
+
+	rcode = i2o_msg_post_wait(c, m, 60);
+	if (rcode)
+		goto cleanup;
+
+	if (sg_offset) {
+		u32 msg[128];
+		/* Copy back the Scatter Gather buffers back to user space */
+		u32 j;
+		// TODO 64bit fix
+		struct sg_simple_element *sg;
+		int sg_size;
+
+		// re-acquire the original message to handle correctly the sg copy operation
+		memset(&msg, 0, MSG_FRAME_SIZE * 4);
+		// get user msg size in u32s
+		if (get_user(size, &user_msg[0])) {
+			rcode = -EFAULT;
+			goto cleanup;
+		}
+		size = size >> 16;
+		size *= 4;
+		/* Copy in the user's I2O command */
+		if (copy_from_user(msg, user_msg, size)) {
+			rcode = -EFAULT;
+			goto cleanup;
+		}
+		sg_count =
+		    (size - sg_offset * 4) / sizeof(struct sg_simple_element);
+
+		// TODO 64bit fix
+		sg = (struct sg_simple_element *)(msg + sg_offset);
+		for (j = 0; j < sg_count; j++) {
+			/* Copy out the SG list to user's buffer if necessary */
+			if (!
+			    (sg[j].
+			     flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) {
+				sg_size = sg[j].flag_count & 0xffffff;
+				// TODO 64bit fix
+				if (copy_to_user
+				    ((void __user *)(u64) sg[j].addr_bus,
+				     sg_list[j].virt, sg_size)) {
+					printk(KERN_WARNING
+					       "%s: Could not copy %p TO user %x\n",
+					       c->name, sg_list[j].virt,
+					       sg[j].addr_bus);
+					rcode = -EFAULT;
+					goto cleanup;
+				}
+			}
+		}
+	}
+
+	/* Copy back the reply to user space */
+	if (reply_size) {
+		// we wrote our own values for context - now restore the user supplied ones
+		if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) {
+			printk(KERN_WARNING
+			       "%s: Could not copy message context FROM user\n",
+			       c->name);
+			rcode = -EFAULT;
+		}
+		if (copy_to_user(user_reply, reply, reply_size)) {
+			printk(KERN_WARNING
+			       "%s: Could not copy reply TO user\n", c->name);
+			rcode = -EFAULT;
+		}
+	}
+
+      cleanup:
+	kfree(reply);
+	return rcode;
+}
+
+#else
+
+static int i2o_cfg_passthru(unsigned long arg)
+{
+	struct i2o_cmd_passthru __user *cmd =
+	    (struct i2o_cmd_passthru __user *)arg;
+	struct i2o_controller *c;
+	u32 __user *user_msg;
+	u32 *reply = NULL;
+	u32 __user *user_reply = NULL;
+	u32 size = 0;
+	u32 reply_size = 0;
+	u32 rcode = 0;
+	void *sg_list[SG_TABLESIZE];
+	u32 sg_offset = 0;
+	u32 sg_count = 0;
+	int sg_index = 0;
+	u32 i = 0;
+	void *p = NULL;
+	i2o_status_block *sb;
+	struct i2o_message __iomem *msg;
+	u32 m;
+	unsigned int iop;
+
+	if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
+		return -EFAULT;
+
+	c = i2o_find_iop(iop);
+	if (!c) {
+		osm_warn("controller %d not found\n", iop);
+		return -ENXIO;
+	}
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+
+	sb = c->status_block.virt;
+
+	if (get_user(size, &user_msg[0]))
+		return -EFAULT;
+	size = size >> 16;
+
+	if (size > sb->inbound_frame_size) {
+		osm_warn("size of message > inbound_frame_size");
+		return -EFAULT;
+	}
+
+	user_reply = &user_msg[size];
+
+	size <<= 2;		// Convert to bytes
+
+	/* Copy in the user's I2O command */
+	if (copy_from_user(msg, user_msg, size))
+		return -EFAULT;
+
+	if (get_user(reply_size, &user_reply[0]) < 0)
+		return -EFAULT;
+
+	reply_size >>= 16;
+	reply_size <<= 2;
+
+	reply = kmalloc(reply_size, GFP_KERNEL);
+	if (!reply) {
+		printk(KERN_WARNING "%s: Could not allocate reply buffer\n",
+		       c->name);
+		return -ENOMEM;
+	}
+	memset(reply, 0, reply_size);
+
+	sg_offset = (msg->u.head[0] >> 4) & 0x0f;
+
+	writel(i2o_config_driver.context, &msg->u.s.icntxt);
+	writel(i2o_cntxt_list_add(c, reply), &msg->u.s.tcntxt);
+
+	memset(sg_list, 0, sizeof(sg_list[0]) * SG_TABLESIZE);
+	if (sg_offset) {
+		struct sg_simple_element *sg;
+
+		if (sg_offset * 4 >= size) {
+			rcode = -EFAULT;
+			goto cleanup;
+		}
+		// TODO 64bit fix
+		sg = (struct sg_simple_element *)((&msg->u.head[0]) +
+						  sg_offset);
+		sg_count =
+		    (size - sg_offset * 4) / sizeof(struct sg_simple_element);
+		if (sg_count > SG_TABLESIZE) {
+			printk(KERN_DEBUG "%s:IOCTL SG List too large (%u)\n",
+			       c->name, sg_count);
+			kfree(reply);
+			return -EINVAL;
+		}
+
+		for (i = 0; i < sg_count; i++) {
+			int sg_size;
+
+			if (!(sg[i].flag_count & 0x10000000
+			      /*I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT */ )) {
+				printk(KERN_DEBUG
+				       "%s:Bad SG element %d - not simple (%x)\n",
+				       c->name, i, sg[i].flag_count);
+				rcode = -EINVAL;
+				goto cleanup;
+			}
+			sg_size = sg[i].flag_count & 0xffffff;
+			/* Allocate memory for the transfer */
+			p = kmalloc(sg_size, GFP_KERNEL);
+			if (!p) {
+				printk(KERN_DEBUG
+				       "%s: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+				       c->name, sg_size, i, sg_count);
+				rcode = -ENOMEM;
+				goto cleanup;
+			}
+			sg_list[sg_index++] = p;	// sglist indexed with input frame, not our internal frame.
+			/* Copy in the user's SG buffer if necessary */
+			if (sg[i].
+			    flag_count & 0x04000000 /*I2O_SGL_FLAGS_DIR */ ) {
+				// TODO 64bit fix
+				if (copy_from_user
+				    (p, (void __user *)sg[i].addr_bus,
+				     sg_size)) {
+					printk(KERN_DEBUG
+					       "%s: Could not copy SG buf %d FROM user\n",
+					       c->name, i);
+					rcode = -EFAULT;
+					goto cleanup;
+				}
+			}
+			//TODO 64bit fix
+			sg[i].addr_bus = virt_to_bus(p);
+		}
+	}
+
+	rcode = i2o_msg_post_wait(c, m, 60);
+	if (rcode)
+		goto cleanup;
+
+	if (sg_offset) {
+		u32 msg[128];
+		/* Copy back the Scatter Gather buffers back to user space */
+		u32 j;
+		// TODO 64bit fix
+		struct sg_simple_element *sg;
+		int sg_size;
+
+		// re-acquire the original message to handle correctly the sg copy operation
+		memset(&msg, 0, MSG_FRAME_SIZE * 4);
+		// get user msg size in u32s
+		if (get_user(size, &user_msg[0])) {
+			rcode = -EFAULT;
+			goto cleanup;
+		}
+		size = size >> 16;
+		size *= 4;
+		/* Copy in the user's I2O command */
+		if (copy_from_user(msg, user_msg, size)) {
+			rcode = -EFAULT;
+			goto cleanup;
+		}
+		sg_count =
+		    (size - sg_offset * 4) / sizeof(struct sg_simple_element);
+
+		// TODO 64bit fix
+		sg = (struct sg_simple_element *)(msg + sg_offset);
+		for (j = 0; j < sg_count; j++) {
+			/* Copy out the SG list to user's buffer if necessary */
+			if (!
+			    (sg[j].
+			     flag_count & 0x4000000 /*I2O_SGL_FLAGS_DIR */ )) {
+				sg_size = sg[j].flag_count & 0xffffff;
+				// TODO 64bit fix
+				if (copy_to_user
+				    ((void __user *)sg[j].addr_bus, sg_list[j],
+				     sg_size)) {
+					printk(KERN_WARNING
+					       "%s: Could not copy %p TO user %x\n",
+					       c->name, sg_list[j],
+					       sg[j].addr_bus);
+					rcode = -EFAULT;
+					goto cleanup;
+				}
+			}
+		}
+	}
+
+	/* Copy back the reply to user space */
+	if (reply_size) {
+		// we wrote our own values for context - now restore the user supplied ones
+		if (copy_from_user(reply + 2, user_msg + 2, sizeof(u32) * 2)) {
+			printk(KERN_WARNING
+			       "%s: Could not copy message context FROM user\n",
+			       c->name);
+			rcode = -EFAULT;
+		}
+		if (copy_to_user(user_reply, reply, reply_size)) {
+			printk(KERN_WARNING
+			       "%s: Could not copy reply TO user\n", c->name);
+			rcode = -EFAULT;
+		}
+	}
+
+      cleanup:
+	kfree(reply);
+	return rcode;
+}
+#endif
+
+/*
+ * IOCTL Handler
+ */
+static int i2o_cfg_ioctl(struct inode *inode, struct file *fp, unsigned int cmd,
+			 unsigned long arg)
+{
+	int ret;
+
+	switch (cmd) {
+	case I2OGETIOPS:
+		ret = i2o_cfg_getiops(arg);
+		break;
+
+	case I2OHRTGET:
+		ret = i2o_cfg_gethrt(arg);
+		break;
+
+	case I2OLCTGET:
+		ret = i2o_cfg_getlct(arg);
+		break;
+
+	case I2OPARMSET:
+		ret = i2o_cfg_parms(arg, I2OPARMSET);
+		break;
+
+	case I2OPARMGET:
+		ret = i2o_cfg_parms(arg, I2OPARMGET);
+		break;
+
+	case I2OSWDL:
+		ret = i2o_cfg_swdl(arg);
+		break;
+
+	case I2OSWUL:
+		ret = i2o_cfg_swul(arg);
+		break;
+
+	case I2OSWDEL:
+		ret = i2o_cfg_swdel(arg);
+		break;
+
+	case I2OVALIDATE:
+		ret = i2o_cfg_validate(arg);
+		break;
+
+	case I2OEVTREG:
+		ret = i2o_cfg_evt_reg(arg, fp);
+		break;
+
+	case I2OEVTGET:
+		ret = i2o_cfg_evt_get(arg, fp);
+		break;
+
+#ifndef CONFIG_COMPAT
+	case I2OPASSTHRU:
+		ret = i2o_cfg_passthru(arg);
+		break;
+#endif
+
+	default:
+		osm_debug("unknown ioctl called!\n");
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int cfg_open(struct inode *inode, struct file *file)
+{
+	struct i2o_cfg_info *tmp =
+	    (struct i2o_cfg_info *)kmalloc(sizeof(struct i2o_cfg_info),
+					   GFP_KERNEL);
+	unsigned long flags;
+
+	if (!tmp)
+		return -ENOMEM;
+
+	file->private_data = (void *)(i2o_cfg_info_id++);
+	tmp->fp = file;
+	tmp->fasync = NULL;
+	tmp->q_id = (ulong) file->private_data;
+	tmp->q_len = 0;
+	tmp->q_in = 0;
+	tmp->q_out = 0;
+	tmp->q_lost = 0;
+	tmp->next = open_files;
+
+	spin_lock_irqsave(&i2o_config_lock, flags);
+	open_files = tmp;
+	spin_unlock_irqrestore(&i2o_config_lock, flags);
+
+	return 0;
+}
+
+static int cfg_fasync(int fd, struct file *fp, int on)
+{
+	ulong id = (ulong) fp->private_data;
+	struct i2o_cfg_info *p;
+
+	for (p = open_files; p; p = p->next)
+		if (p->q_id == id)
+			break;
+
+	if (!p)
+		return -EBADF;
+
+	return fasync_helper(fd, fp, on, &p->fasync);
+}
+
+static int cfg_release(struct inode *inode, struct file *file)
+{
+	ulong id = (ulong) file->private_data;
+	struct i2o_cfg_info *p1, *p2;
+	unsigned long flags;
+
+	lock_kernel();
+	p1 = p2 = NULL;
+
+	spin_lock_irqsave(&i2o_config_lock, flags);
+	for (p1 = open_files; p1;) {
+		if (p1->q_id == id) {
+
+			if (p1->fasync)
+				cfg_fasync(-1, file, 0);
+			if (p2)
+				p2->next = p1->next;
+			else
+				open_files = p1->next;
+
+			kfree(p1);
+			break;
+		}
+		p2 = p1;
+		p1 = p1->next;
+	}
+	spin_unlock_irqrestore(&i2o_config_lock, flags);
+	unlock_kernel();
+
+	return 0;
+}
+
+static struct file_operations config_fops = {
+	.owner = THIS_MODULE,
+	.llseek = no_llseek,
+	.ioctl = i2o_cfg_ioctl,
+	.open = cfg_open,
+	.release = cfg_release,
+	.fasync = cfg_fasync,
+};
+
+static struct miscdevice i2o_miscdev = {
+	I2O_MINOR,
+	"i2octl",
+	&config_fops
+};
+
+static int __init i2o_config_init(void)
+{
+	printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
+
+	spin_lock_init(&i2o_config_lock);
+
+	if (misc_register(&i2o_miscdev) < 0) {
+		osm_err("can't register device.\n");
+		return -EBUSY;
+	}
+	/*
+	 *      Install our handler
+	 */
+	if (i2o_driver_register(&i2o_config_driver)) {
+		osm_err("handler register failed.\n");
+		misc_deregister(&i2o_miscdev);
+		return -EBUSY;
+	}
+#ifdef CONFIG_COMPAT
+	register_ioctl32_conversion(I2OPASSTHRU32, i2o_cfg_passthru32);
+	register_ioctl32_conversion(I2OGETIOPS, (void *)sys_ioctl);
+#endif
+	return 0;
+}
+
+static void i2o_config_exit(void)
+{
+#ifdef CONFIG_COMPAT
+	unregister_ioctl32_conversion(I2OPASSTHRU32);
+	unregister_ioctl32_conversion(I2OGETIOPS);
+#endif
+	misc_deregister(&i2o_miscdev);
+	i2o_driver_unregister(&i2o_config_driver);
+}
+
+MODULE_AUTHOR("Red Hat Software");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(OSM_DESCRIPTION);
+MODULE_VERSION(OSM_VERSION);
+
+module_init(i2o_config_init);
+module_exit(i2o_config_exit);
diff --git a/drivers/message/i2o/i2o_lan.h b/drivers/message/i2o/i2o_lan.h
new file mode 100644
index 0000000..561d633
--- /dev/null
+++ b/drivers/message/i2o/i2o_lan.h
@@ -0,0 +1,159 @@
+/*
+ *   	i2o_lan.h			I2O LAN Class definitions
+ *
+ *      I2O LAN CLASS OSM       	May 26th 2000
+ *
+ *      (C) Copyright 1999, 2000	University of Helsinki,
+ *					Department of Computer Science
+ *
+ *      This code is still under development / test.
+ *
+ *	Author:		Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
+ *			Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
+ *			Taneli Vähäkangas <Taneli.Vahakangas@cs.Helsinki.FI>
+ */
+
+#ifndef _I2O_LAN_H
+#define _I2O_LAN_H
+
+/* Default values for tunable parameters first */
+
+#define I2O_LAN_MAX_BUCKETS_OUT 96
+#define I2O_LAN_BUCKET_THRESH	18	/* 9 buckets in one message */
+#define I2O_LAN_RX_COPYBREAK	200
+#define I2O_LAN_TX_TIMEOUT 	(1*HZ)
+#define I2O_LAN_TX_BATCH_MODE	2	/* 2=automatic, 1=on, 0=off */
+#define I2O_LAN_EVENT_MASK	0	/* 0=None, 0xFFC00002=All */
+
+/* LAN types */
+#define I2O_LAN_ETHERNET	0x0030
+#define I2O_LAN_100VG		0x0040
+#define I2O_LAN_TR		0x0050
+#define I2O_LAN_FDDI		0x0060
+#define I2O_LAN_FIBRE_CHANNEL	0x0070
+#define I2O_LAN_UNKNOWN		0x00000000
+
+/* Connector types */
+
+/* Ethernet */
+#define I2O_LAN_AUI		(I2O_LAN_ETHERNET << 4) + 0x00000001
+#define I2O_LAN_10BASE5		(I2O_LAN_ETHERNET << 4) + 0x00000002
+#define I2O_LAN_FIORL		(I2O_LAN_ETHERNET << 4) + 0x00000003
+#define I2O_LAN_10BASE2		(I2O_LAN_ETHERNET << 4) + 0x00000004
+#define I2O_LAN_10BROAD36	(I2O_LAN_ETHERNET << 4) + 0x00000005
+#define I2O_LAN_10BASE_T	(I2O_LAN_ETHERNET << 4) + 0x00000006
+#define I2O_LAN_10BASE_FP	(I2O_LAN_ETHERNET << 4) + 0x00000007
+#define I2O_LAN_10BASE_FB	(I2O_LAN_ETHERNET << 4) + 0x00000008
+#define I2O_LAN_10BASE_FL	(I2O_LAN_ETHERNET << 4) + 0x00000009
+#define I2O_LAN_100BASE_TX	(I2O_LAN_ETHERNET << 4) + 0x0000000A
+#define I2O_LAN_100BASE_FX	(I2O_LAN_ETHERNET << 4) + 0x0000000B
+#define I2O_LAN_100BASE_T4	(I2O_LAN_ETHERNET << 4) + 0x0000000C
+#define I2O_LAN_1000BASE_SX	(I2O_LAN_ETHERNET << 4) + 0x0000000D
+#define I2O_LAN_1000BASE_LX	(I2O_LAN_ETHERNET << 4) + 0x0000000E
+#define I2O_LAN_1000BASE_CX	(I2O_LAN_ETHERNET << 4) + 0x0000000F
+#define I2O_LAN_1000BASE_T	(I2O_LAN_ETHERNET << 4) + 0x00000010
+
+/* AnyLAN */
+#define I2O_LAN_100VG_ETHERNET	(I2O_LAN_100VG << 4) + 0x00000001
+#define I2O_LAN_100VG_TR	(I2O_LAN_100VG << 4) + 0x00000002
+
+/* Token Ring */
+#define I2O_LAN_4MBIT		(I2O_LAN_TR << 4) + 0x00000001
+#define I2O_LAN_16MBIT		(I2O_LAN_TR << 4) + 0x00000002
+
+/* FDDI */
+#define I2O_LAN_125MBAUD	(I2O_LAN_FDDI << 4) + 0x00000001
+
+/* Fibre Channel */
+#define I2O_LAN_POINT_POINT	(I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000001
+#define I2O_LAN_ARB_LOOP	(I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000002
+#define I2O_LAN_PUBLIC_LOOP	(I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000003
+#define I2O_LAN_FABRIC		(I2O_LAN_FIBRE_CHANNEL << 4) + 0x00000004
+
+#define I2O_LAN_EMULATION	0x00000F00
+#define I2O_LAN_OTHER		0x00000F01
+#define I2O_LAN_DEFAULT		0xFFFFFFFF
+
+/* LAN class functions */
+
+#define LAN_PACKET_SEND		0x3B
+#define LAN_SDU_SEND		0x3D
+#define LAN_RECEIVE_POST	0x3E
+#define LAN_RESET		0x35
+#define LAN_SUSPEND		0x37
+
+/* LAN DetailedStatusCode defines */
+#define I2O_LAN_DSC_SUCCESS			0x00
+#define I2O_LAN_DSC_DEVICE_FAILURE		0x01
+#define I2O_LAN_DSC_DESTINATION_NOT_FOUND	0x02
+#define	I2O_LAN_DSC_TRANSMIT_ERROR		0x03
+#define I2O_LAN_DSC_TRANSMIT_ABORTED		0x04
+#define I2O_LAN_DSC_RECEIVE_ERROR		0x05
+#define I2O_LAN_DSC_RECEIVE_ABORTED		0x06
+#define I2O_LAN_DSC_DMA_ERROR			0x07
+#define I2O_LAN_DSC_BAD_PACKET_DETECTED		0x08
+#define I2O_LAN_DSC_OUT_OF_MEMORY		0x09
+#define I2O_LAN_DSC_BUCKET_OVERRUN		0x0A
+#define I2O_LAN_DSC_IOP_INTERNAL_ERROR		0x0B
+#define I2O_LAN_DSC_CANCELED			0x0C
+#define I2O_LAN_DSC_INVALID_TRANSACTION_CONTEXT	0x0D
+#define I2O_LAN_DSC_DEST_ADDRESS_DETECTED	0x0E
+#define I2O_LAN_DSC_DEST_ADDRESS_OMITTED	0x0F
+#define I2O_LAN_DSC_PARTIAL_PACKET_RETURNED	0x10
+#define I2O_LAN_DSC_SUSPENDED			0x11
+
+struct i2o_packet_info {
+	u32 offset : 24;
+	u32 flags  : 8;
+	u32 len    : 24;
+	u32 status : 8;
+};
+
+struct i2o_bucket_descriptor {
+	u32 context; 			/* FIXME: 64bit support */
+	struct i2o_packet_info packet_info[1];
+};
+
+/* Event Indicator Mask Flags for LAN OSM */
+
+#define I2O_LAN_EVT_LINK_DOWN		0x01
+#define I2O_LAN_EVT_LINK_UP		0x02
+#define I2O_LAN_EVT_MEDIA_CHANGE 	0x04
+
+#include <linux/netdevice.h>
+#include <linux/fddidevice.h>
+
+struct i2o_lan_local {
+	u8 unit;
+	struct i2o_device *i2o_dev;
+
+	struct fddi_statistics stats;   /* see also struct net_device_stats */
+	unsigned short (*type_trans)(struct sk_buff *, struct net_device *);
+	atomic_t buckets_out;  		/* nbr of unused buckets on DDM */
+	atomic_t tx_out;		/* outstanding TXes */
+	u8 tx_count;  			/* packets in one TX message frame */
+	u16 tx_max_out;	   		/* DDM's Tx queue len */
+	u8 sgl_max;			/* max SGLs in one message frame */
+	u32 m;				/* IOP address of the batch msg frame */
+
+	struct work_struct i2o_batch_send_task;
+	int send_active;
+	struct sk_buff **i2o_fbl;	/* Free bucket list (to reuse skbs) */
+	int i2o_fbl_tail;
+	spinlock_t fbl_lock;
+
+	spinlock_t tx_lock;
+
+	u32 max_size_mc_table;		/* max number of multicast addresses */
+
+	/* LAN OSM configurable parameters are here: */
+
+	u16 max_buckets_out;		/* max nbr of buckets to send to DDM */
+	u16 bucket_thresh;		/* send more when this many used */
+	u16 rx_copybreak;
+
+	u8  tx_batch_mode;		/* Set when using batch mode sends */
+	u32 i2o_event_mask;		/* To turn on interesting event flags */
+};
+
+#endif /* _I2O_LAN_H */
diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
new file mode 100644
index 0000000..b176d0e
--- /dev/null
+++ b/drivers/message/i2o/i2o_proc.c
@@ -0,0 +1,2112 @@
+/*
+ *	procfs handler for Linux I2O subsystem
+ *
+ *	(c) Copyright 1999	Deepak Saxena
+ *
+ *	Originally written by Deepak Saxena(deepak@plexity.net)
+ *
+ *	This program is free software; you can redistribute it and/or modify it
+ *	under the terms of the GNU General Public License as published by the
+ *	Free Software Foundation; either version 2 of the License, or (at your
+ *	option) any later version.
+ *
+ *	This is an initial test release. The code is based on the design of the
+ *	ide procfs system (drivers/block/ide-proc.c). Some code taken from
+ *	i2o-core module by Alan Cox.
+ *
+ *	DISCLAIMER: This code is still under development/test and may cause
+ *	your system to behave unpredictably.  Use at your own discretion.
+ *
+ *
+ *	Fixes/additions:
+ *		Juha Sievänen (Juha.Sievanen@cs.Helsinki.FI),
+ *		Auvo Häkkinen (Auvo.Hakkinen@cs.Helsinki.FI)
+ *		University of Helsinki, Department of Computer Science
+ *			LAN entries
+ *		Markus Lidel <Markus.Lidel@shadowconnect.com>
+ *			Changes for new I2O API
+ */
+
+#define OSM_NAME	"proc-osm"
+#define OSM_VERSION	"$Rev$"
+#define OSM_DESCRIPTION	"I2O ProcFS OSM"
+
+#define I2O_MAX_MODULES 4
+// FIXME!
+#define FMT_U64_HEX "0x%08x%08x"
+#define U64_VAL(pu64) *((u32*)(pu64)+1), *((u32*)(pu64))
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/i2o.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/byteorder.h>
+
+/* Structure used to define /proc entries */
+typedef struct _i2o_proc_entry_t {
+	char *name;		/* entry name */
+	mode_t mode;		/* mode */
+	struct file_operations *fops;	/* open function */
+} i2o_proc_entry;
+
+/* global I2O /proc/i2o entry */
+static struct proc_dir_entry *i2o_proc_dir_root;
+
+/* proc OSM driver struct */
+static struct i2o_driver i2o_proc_driver = {
+	.name = OSM_NAME,
+};
+
+static int print_serial_number(struct seq_file *seq, u8 * serialno, int max_len)
+{
+	int i;
+
+	/* 19990419 -sralston
+	 *      The I2O v1.5 (and v2.0 so far) "official specification"
+	 *      got serial numbers WRONG!
+	 *      Apparently, and despite what Section 3.4.4 says and
+	 *      Figure 3-35 shows (pg 3-39 in the pdf doc),
+	 *      the convention / consensus seems to be:
+	 *        + First byte is SNFormat
+	 *        + Second byte is SNLen (but only if SNFormat==7 (?))
+	 *        + (v2.0) SCSI+BS may use IEEE Registered (64 or 128 bit) format
+	 */
+	switch (serialno[0]) {
+	case I2O_SNFORMAT_BINARY:	/* Binary */
+		seq_printf(seq, "0x");
+		for (i = 0; i < serialno[1]; i++) {
+			seq_printf(seq, "%02X", serialno[2 + i]);
+		}
+		break;
+
+	case I2O_SNFORMAT_ASCII:	/* ASCII */
+		if (serialno[1] < ' ') {	/* printable or SNLen? */
+			/* sanity */
+			max_len =
+			    (max_len < serialno[1]) ? max_len : serialno[1];
+			serialno[1 + max_len] = '\0';
+
+			/* just print it */
+			seq_printf(seq, "%s", &serialno[2]);
+		} else {
+			/* print chars for specified length */
+			for (i = 0; i < serialno[1]; i++) {
+				seq_printf(seq, "%c", serialno[2 + i]);
+			}
+		}
+		break;
+
+	case I2O_SNFORMAT_UNICODE:	/* UNICODE */
+		seq_printf(seq, "UNICODE Format.  Can't Display\n");
+		break;
+
+	case I2O_SNFORMAT_LAN48_MAC:	/* LAN-48 MAC Address */
+		seq_printf(seq,
+			   "LAN-48 MAC address @ %02X:%02X:%02X:%02X:%02X:%02X",
+			   serialno[2], serialno[3],
+			   serialno[4], serialno[5], serialno[6], serialno[7]);
+		break;
+
+	case I2O_SNFORMAT_WAN:	/* WAN MAC Address */
+		/* FIXME: Figure out what a WAN access address looks like?? */
+		seq_printf(seq, "WAN Access Address");
+		break;
+
+/* plus new in v2.0 */
+	case I2O_SNFORMAT_LAN64_MAC:	/* LAN-64 MAC Address */
+		/* FIXME: Figure out what a LAN-64 address really looks like?? */
+		seq_printf(seq,
+			   "LAN-64 MAC address @ [?:%02X:%02X:?] %02X:%02X:%02X:%02X:%02X:%02X",
+			   serialno[8], serialno[9],
+			   serialno[2], serialno[3],
+			   serialno[4], serialno[5], serialno[6], serialno[7]);
+		break;
+
+	case I2O_SNFORMAT_DDM:	/* I2O DDM */
+		seq_printf(seq,
+			   "DDM: Tid=%03Xh, Rsvd=%04Xh, OrgId=%04Xh",
+			   *(u16 *) & serialno[2],
+			   *(u16 *) & serialno[4], *(u16 *) & serialno[6]);
+		break;
+
+	case I2O_SNFORMAT_IEEE_REG64:	/* IEEE Registered (64-bit) */
+	case I2O_SNFORMAT_IEEE_REG128:	/* IEEE Registered (128-bit) */
+		/* FIXME: Figure if this is even close?? */
+		seq_printf(seq,
+			   "IEEE NodeName(hi,lo)=(%08Xh:%08Xh), PortName(hi,lo)=(%08Xh:%08Xh)\n",
+			   *(u32 *) & serialno[2],
+			   *(u32 *) & serialno[6],
+			   *(u32 *) & serialno[10], *(u32 *) & serialno[14]);
+		break;
+
+	case I2O_SNFORMAT_UNKNOWN:	/* Unknown 0    */
+	case I2O_SNFORMAT_UNKNOWN2:	/* Unknown 0xff */
+	default:
+		seq_printf(seq, "Unknown data format (0x%02x)", serialno[0]);
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *	i2o_get_class_name - 	do i2o class name lookup
+ *	@class: class number
+ *
+ *	Return a descriptive string for an i2o class
+ */
+static const char *i2o_get_class_name(int class)
+{
+	int idx = 16;
+	static char *i2o_class_name[] = {
+		"Executive",
+		"Device Driver Module",
+		"Block Device",
+		"Tape Device",
+		"LAN Interface",
+		"WAN Interface",
+		"Fibre Channel Port",
+		"Fibre Channel Device",
+		"SCSI Device",
+		"ATE Port",
+		"ATE Device",
+		"Floppy Controller",
+		"Floppy Device",
+		"Secondary Bus Port",
+		"Peer Transport Agent",
+		"Peer Transport",
+		"Unknown"
+	};
+
+	switch (class & 0xfff) {
+	case I2O_CLASS_EXECUTIVE:
+		idx = 0;
+		break;
+	case I2O_CLASS_DDM:
+		idx = 1;
+		break;
+	case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+		idx = 2;
+		break;
+	case I2O_CLASS_SEQUENTIAL_STORAGE:
+		idx = 3;
+		break;
+	case I2O_CLASS_LAN:
+		idx = 4;
+		break;
+	case I2O_CLASS_WAN:
+		idx = 5;
+		break;
+	case I2O_CLASS_FIBRE_CHANNEL_PORT:
+		idx = 6;
+		break;
+	case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
+		idx = 7;
+		break;
+	case I2O_CLASS_SCSI_PERIPHERAL:
+		idx = 8;
+		break;
+	case I2O_CLASS_ATE_PORT:
+		idx = 9;
+		break;
+	case I2O_CLASS_ATE_PERIPHERAL:
+		idx = 10;
+		break;
+	case I2O_CLASS_FLOPPY_CONTROLLER:
+		idx = 11;
+		break;
+	case I2O_CLASS_FLOPPY_DEVICE:
+		idx = 12;
+		break;
+	case I2O_CLASS_BUS_ADAPTER_PORT:
+		idx = 13;
+		break;
+	case I2O_CLASS_PEER_TRANSPORT_AGENT:
+		idx = 14;
+		break;
+	case I2O_CLASS_PEER_TRANSPORT:
+		idx = 15;
+		break;
+	}
+
+	return i2o_class_name[idx];
+}
+
+#define SCSI_TABLE_SIZE	13
+static char *scsi_devices[] = {
+	"Direct-Access Read/Write",
+	"Sequential-Access Storage",
+	"Printer",
+	"Processor",
+	"WORM Device",
+	"CD-ROM Device",
+	"Scanner Device",
+	"Optical Memory Device",
+	"Medium Changer Device",
+	"Communications Device",
+	"Graphics Art Pre-Press Device",
+	"Graphics Art Pre-Press Device",
+	"Array Controller Device"
+};
+
+static char *chtostr(u8 * chars, int n)
+{
+	char tmp[256];
+	tmp[0] = 0;
+	return strncat(tmp, (char *)chars, n);
+}
+
+static int i2o_report_query_status(struct seq_file *seq, int block_status,
+				   char *group)
+{
+	switch (block_status) {
+	case -ETIMEDOUT:
+		return seq_printf(seq, "Timeout reading group %s.\n", group);
+	case -ENOMEM:
+		return seq_printf(seq, "No free memory to read the table.\n");
+	case -I2O_PARAMS_STATUS_INVALID_GROUP_ID:
+		return seq_printf(seq, "Group %s not supported.\n", group);
+	default:
+		return seq_printf(seq,
+				  "Error reading group %s. BlockStatus 0x%02X\n",
+				  group, -block_status);
+	}
+}
+
+static char *bus_strings[] = {
+	"Local Bus",
+	"ISA",
+	"EISA",
+	"MCA",
+	"PCI",
+	"PCMCIA",
+	"NUBUS",
+	"CARDBUS"
+};
+
+static int i2o_seq_show_hrt(struct seq_file *seq, void *v)
+{
+	struct i2o_controller *c = (struct i2o_controller *)seq->private;
+	i2o_hrt *hrt = (i2o_hrt *) c->hrt.virt;
+	u32 bus;
+	int i;
+
+	if (hrt->hrt_version) {
+		seq_printf(seq,
+			   "HRT table for controller is too new a version.\n");
+		return 0;
+	}
+
+	seq_printf(seq, "HRT has %d entries of %d bytes each.\n",
+		   hrt->num_entries, hrt->entry_len << 2);
+
+	for (i = 0; i < hrt->num_entries; i++) {
+		seq_printf(seq, "Entry %d:\n", i);
+		seq_printf(seq, "   Adapter ID: %0#10x\n",
+			   hrt->hrt_entry[i].adapter_id);
+		seq_printf(seq, "   Controlling tid: %0#6x\n",
+			   hrt->hrt_entry[i].parent_tid);
+
+		if (hrt->hrt_entry[i].bus_type != 0x80) {
+			bus = hrt->hrt_entry[i].bus_type;
+			seq_printf(seq, "   %s Information\n",
+				   bus_strings[bus]);
+
+			switch (bus) {
+			case I2O_BUS_LOCAL:
+				seq_printf(seq, "     IOBase: %0#6x,",
+					   hrt->hrt_entry[i].bus.local_bus.
+					   LbBaseIOPort);
+				seq_printf(seq, " MemoryBase: %0#10x\n",
+					   hrt->hrt_entry[i].bus.local_bus.
+					   LbBaseMemoryAddress);
+				break;
+
+			case I2O_BUS_ISA:
+				seq_printf(seq, "     IOBase: %0#6x,",
+					   hrt->hrt_entry[i].bus.isa_bus.
+					   IsaBaseIOPort);
+				seq_printf(seq, " MemoryBase: %0#10x,",
+					   hrt->hrt_entry[i].bus.isa_bus.
+					   IsaBaseMemoryAddress);
+				seq_printf(seq, " CSN: %0#4x,",
+					   hrt->hrt_entry[i].bus.isa_bus.CSN);
+				break;
+
+			case I2O_BUS_EISA:
+				seq_printf(seq, "     IOBase: %0#6x,",
+					   hrt->hrt_entry[i].bus.eisa_bus.
+					   EisaBaseIOPort);
+				seq_printf(seq, " MemoryBase: %0#10x,",
+					   hrt->hrt_entry[i].bus.eisa_bus.
+					   EisaBaseMemoryAddress);
+				seq_printf(seq, " Slot: %0#4x,",
+					   hrt->hrt_entry[i].bus.eisa_bus.
+					   EisaSlotNumber);
+				break;
+
+			case I2O_BUS_MCA:
+				seq_printf(seq, "     IOBase: %0#6x,",
+					   hrt->hrt_entry[i].bus.mca_bus.
+					   McaBaseIOPort);
+				seq_printf(seq, " MemoryBase: %0#10x,",
+					   hrt->hrt_entry[i].bus.mca_bus.
+					   McaBaseMemoryAddress);
+				seq_printf(seq, " Slot: %0#4x,",
+					   hrt->hrt_entry[i].bus.mca_bus.
+					   McaSlotNumber);
+				break;
+
+			case I2O_BUS_PCI:
+				seq_printf(seq, "     Bus: %0#4x",
+					   hrt->hrt_entry[i].bus.pci_bus.
+					   PciBusNumber);
+				seq_printf(seq, " Dev: %0#4x",
+					   hrt->hrt_entry[i].bus.pci_bus.
+					   PciDeviceNumber);
+				seq_printf(seq, " Func: %0#4x",
+					   hrt->hrt_entry[i].bus.pci_bus.
+					   PciFunctionNumber);
+				seq_printf(seq, " Vendor: %0#6x",
+					   hrt->hrt_entry[i].bus.pci_bus.
+					   PciVendorID);
+				seq_printf(seq, " Device: %0#6x\n",
+					   hrt->hrt_entry[i].bus.pci_bus.
+					   PciDeviceID);
+				break;
+
+			default:
+				seq_printf(seq, "      Unsupported Bus Type\n");
+			}
+		} else
+			seq_printf(seq, "   Unknown Bus Type\n");
+	}
+
+	return 0;
+}
+
+static int i2o_seq_show_lct(struct seq_file *seq, void *v)
+{
+	struct i2o_controller *c = (struct i2o_controller *)seq->private;
+	i2o_lct *lct = (i2o_lct *) c->lct;
+	int entries;
+	int i;
+
+#define BUS_TABLE_SIZE 3
+	static char *bus_ports[] = {
+		"Generic Bus",
+		"SCSI Bus",
+		"Fibre Channel Bus"
+	};
+
+	entries = (lct->table_size - 3) / 9;
+
+	seq_printf(seq, "LCT contains %d %s\n", entries,
+		   entries == 1 ? "entry" : "entries");
+	if (lct->boot_tid)
+		seq_printf(seq, "Boot Device @ ID %d\n", lct->boot_tid);
+
+	seq_printf(seq, "Current Change Indicator: %#10x\n", lct->change_ind);
+
+	for (i = 0; i < entries; i++) {
+		seq_printf(seq, "Entry %d\n", i);
+		seq_printf(seq, "  Class, SubClass  : %s",
+			   i2o_get_class_name(lct->lct_entry[i].class_id));
+
+		/*
+		 *      Classes which we'll print subclass info for
+		 */
+		switch (lct->lct_entry[i].class_id & 0xFFF) {
+		case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+			switch (lct->lct_entry[i].sub_class) {
+			case 0x00:
+				seq_printf(seq, ", Direct-Access Read/Write");
+				break;
+
+			case 0x04:
+				seq_printf(seq, ", WORM Drive");
+				break;
+
+			case 0x05:
+				seq_printf(seq, ", CD-ROM Drive");
+				break;
+
+			case 0x07:
+				seq_printf(seq, ", Optical Memory Device");
+				break;
+
+			default:
+				seq_printf(seq, ", Unknown (0x%02x)",
+					   lct->lct_entry[i].sub_class);
+				break;
+			}
+			break;
+
+		case I2O_CLASS_LAN:
+			switch (lct->lct_entry[i].sub_class & 0xFF) {
+			case 0x30:
+				seq_printf(seq, ", Ethernet");
+				break;
+
+			case 0x40:
+				seq_printf(seq, ", 100base VG");
+				break;
+
+			case 0x50:
+				seq_printf(seq, ", IEEE 802.5/Token-Ring");
+				break;
+
+			case 0x60:
+				seq_printf(seq, ", ANSI X3T9.5 FDDI");
+				break;
+
+			case 0x70:
+				seq_printf(seq, ", Fibre Channel");
+				break;
+
+			default:
+				seq_printf(seq, ", Unknown Sub-Class (0x%02x)",
+					   lct->lct_entry[i].sub_class & 0xFF);
+				break;
+			}
+			break;
+
+		case I2O_CLASS_SCSI_PERIPHERAL:
+			if (lct->lct_entry[i].sub_class < SCSI_TABLE_SIZE)
+				seq_printf(seq, ", %s",
+					   scsi_devices[lct->lct_entry[i].
+							sub_class]);
+			else
+				seq_printf(seq, ", Unknown Device Type");
+			break;
+
+		case I2O_CLASS_BUS_ADAPTER_PORT:
+			if (lct->lct_entry[i].sub_class < BUS_TABLE_SIZE)
+				seq_printf(seq, ", %s",
+					   bus_ports[lct->lct_entry[i].
+						     sub_class]);
+			else
+				seq_printf(seq, ", Unknown Bus Type");
+			break;
+		}
+		seq_printf(seq, "\n");
+
+		seq_printf(seq, "  Local TID        : 0x%03x\n",
+			   lct->lct_entry[i].tid);
+		seq_printf(seq, "  User TID         : 0x%03x\n",
+			   lct->lct_entry[i].user_tid);
+		seq_printf(seq, "  Parent TID       : 0x%03x\n",
+			   lct->lct_entry[i].parent_tid);
+		seq_printf(seq, "  Identity Tag     : 0x%x%x%x%x%x%x%x%x\n",
+			   lct->lct_entry[i].identity_tag[0],
+			   lct->lct_entry[i].identity_tag[1],
+			   lct->lct_entry[i].identity_tag[2],
+			   lct->lct_entry[i].identity_tag[3],
+			   lct->lct_entry[i].identity_tag[4],
+			   lct->lct_entry[i].identity_tag[5],
+			   lct->lct_entry[i].identity_tag[6],
+			   lct->lct_entry[i].identity_tag[7]);
+		seq_printf(seq, "  Change Indicator : %0#10x\n",
+			   lct->lct_entry[i].change_ind);
+		seq_printf(seq, "  Event Capab Mask : %0#10x\n",
+			   lct->lct_entry[i].device_flags);
+	}
+
+	return 0;
+}
+
+static int i2o_seq_show_status(struct seq_file *seq, void *v)
+{
+	struct i2o_controller *c = (struct i2o_controller *)seq->private;
+	char prodstr[25];
+	int version;
+	i2o_status_block *sb = c->status_block.virt;
+
+	i2o_status_get(c);	// reread the status block
+
+	seq_printf(seq, "Organization ID        : %0#6x\n", sb->org_id);
+
+	version = sb->i2o_version;
+
+/* FIXME for Spec 2.0
+	if (version == 0x02) {
+		seq_printf(seq, "Lowest I2O version supported: ");
+		switch(workspace[2]) {
+			case 0x00:
+				seq_printf(seq, "1.0\n");
+				break;
+			case 0x01:
+				seq_printf(seq, "1.5\n");
+				break;
+			case 0x02:
+				seq_printf(seq, "2.0\n");
+				break;
+		}
+
+		seq_printf(seq, "Highest I2O version supported: ");
+		switch(workspace[3]) {
+			case 0x00:
+				seq_printf(seq, "1.0\n");
+				break;
+			case 0x01:
+				seq_printf(seq, "1.5\n");
+				break;
+			case 0x02:
+				seq_printf(seq, "2.0\n");
+				break;
+		}
+	}
+*/
+	seq_printf(seq, "IOP ID                 : %0#5x\n", sb->iop_id);
+	seq_printf(seq, "Host Unit ID           : %0#6x\n", sb->host_unit_id);
+	seq_printf(seq, "Segment Number         : %0#5x\n", sb->segment_number);
+
+	seq_printf(seq, "I2O version            : ");
+	switch (version) {
+	case 0x00:
+		seq_printf(seq, "1.0\n");
+		break;
+	case 0x01:
+		seq_printf(seq, "1.5\n");
+		break;
+	case 0x02:
+		seq_printf(seq, "2.0\n");
+		break;
+	default:
+		seq_printf(seq, "Unknown version\n");
+	}
+
+	seq_printf(seq, "IOP State              : ");
+	switch (sb->iop_state) {
+	case 0x01:
+		seq_printf(seq, "INIT\n");
+		break;
+
+	case 0x02:
+		seq_printf(seq, "RESET\n");
+		break;
+
+	case 0x04:
+		seq_printf(seq, "HOLD\n");
+		break;
+
+	case 0x05:
+		seq_printf(seq, "READY\n");
+		break;
+
+	case 0x08:
+		seq_printf(seq, "OPERATIONAL\n");
+		break;
+
+	case 0x10:
+		seq_printf(seq, "FAILED\n");
+		break;
+
+	case 0x11:
+		seq_printf(seq, "FAULTED\n");
+		break;
+
+	default:
+		seq_printf(seq, "Unknown\n");
+		break;
+	}
+
+	seq_printf(seq, "Messenger Type         : ");
+	switch (sb->msg_type) {
+	case 0x00:
+		seq_printf(seq, "Memory mapped\n");
+		break;
+	case 0x01:
+		seq_printf(seq, "Memory mapped only\n");
+		break;
+	case 0x02:
+		seq_printf(seq, "Remote only\n");
+		break;
+	case 0x03:
+		seq_printf(seq, "Memory mapped and remote\n");
+		break;
+	default:
+		seq_printf(seq, "Unknown\n");
+	}
+
+	seq_printf(seq, "Inbound Frame Size     : %d bytes\n",
+		   sb->inbound_frame_size << 2);
+	seq_printf(seq, "Max Inbound Frames     : %d\n",
+		   sb->max_inbound_frames);
+	seq_printf(seq, "Current Inbound Frames : %d\n",
+		   sb->cur_inbound_frames);
+	seq_printf(seq, "Max Outbound Frames    : %d\n",
+		   sb->max_outbound_frames);
+
+	/* Spec doesn't say if NULL terminated or not... */
+	memcpy(prodstr, sb->product_id, 24);
+	prodstr[24] = '\0';
+	seq_printf(seq, "Product ID             : %s\n", prodstr);
+	seq_printf(seq, "Expected LCT Size      : %d bytes\n",
+		   sb->expected_lct_size);
+
+	seq_printf(seq, "IOP Capabilities\n");
+	seq_printf(seq, "    Context Field Size Support : ");
+	switch (sb->iop_capabilities & 0x0000003) {
+	case 0:
+		seq_printf(seq, "Supports only 32-bit context fields\n");
+		break;
+	case 1:
+		seq_printf(seq, "Supports only 64-bit context fields\n");
+		break;
+	case 2:
+		seq_printf(seq, "Supports 32-bit and 64-bit context fields, "
+			   "but not concurrently\n");
+		break;
+	case 3:
+		seq_printf(seq, "Supports 32-bit and 64-bit context fields "
+			   "concurrently\n");
+		break;
+	default:
+		seq_printf(seq, "0x%08x\n", sb->iop_capabilities);
+	}
+	seq_printf(seq, "    Current Context Field Size : ");
+	switch (sb->iop_capabilities & 0x0000000C) {
+	case 0:
+		seq_printf(seq, "not configured\n");
+		break;
+	case 4:
+		seq_printf(seq, "Supports only 32-bit context fields\n");
+		break;
+	case 8:
+		seq_printf(seq, "Supports only 64-bit context fields\n");
+		break;
+	case 12:
+		seq_printf(seq, "Supports both 32-bit or 64-bit context fields "
+			   "concurrently\n");
+		break;
+	default:
+		seq_printf(seq, "\n");
+	}
+	seq_printf(seq, "    Inbound Peer Support       : %s\n",
+		   (sb->
+		    iop_capabilities & 0x00000010) ? "Supported" :
+		   "Not supported");
+	seq_printf(seq, "    Outbound Peer Support      : %s\n",
+		   (sb->
+		    iop_capabilities & 0x00000020) ? "Supported" :
+		   "Not supported");
+	seq_printf(seq, "    Peer to Peer Support       : %s\n",
+		   (sb->
+		    iop_capabilities & 0x00000040) ? "Supported" :
+		   "Not supported");
+
+	seq_printf(seq, "Desired private memory size   : %d kB\n",
+		   sb->desired_mem_size >> 10);
+	seq_printf(seq, "Allocated private memory size : %d kB\n",
+		   sb->current_mem_size >> 10);
+	seq_printf(seq, "Private memory base address   : %0#10x\n",
+		   sb->current_mem_base);
+	seq_printf(seq, "Desired private I/O size      : %d kB\n",
+		   sb->desired_io_size >> 10);
+	seq_printf(seq, "Allocated private I/O size    : %d kB\n",
+		   sb->current_io_size >> 10);
+	seq_printf(seq, "Private I/O base address      : %0#10x\n",
+		   sb->current_io_base);
+
+	return 0;
+}
+
+static int i2o_seq_show_hw(struct seq_file *seq, void *v)
+{
+	struct i2o_controller *c = (struct i2o_controller *)seq->private;
+	static u32 work32[5];
+	static u8 *work8 = (u8 *) work32;
+	static u16 *work16 = (u16 *) work32;
+	int token;
+	u32 hwcap;
+
+	static char *cpu_table[] = {
+		"Intel 80960 series",
+		"AMD2900 series",
+		"Motorola 68000 series",
+		"ARM series",
+		"MIPS series",
+		"Sparc series",
+		"PowerPC series",
+		"Intel x86 series"
+	};
+
+	token =
+	    i2o_parm_field_get(c->exec, 0x0000, -1, &work32, sizeof(work32));
+
+	if (token < 0) {
+		i2o_report_query_status(seq, token, "0x0000 IOP Hardware");
+		return 0;
+	}
+
+	seq_printf(seq, "I2O Vendor ID    : %0#6x\n", work16[0]);
+	seq_printf(seq, "Product ID       : %0#6x\n", work16[1]);
+	seq_printf(seq, "CPU              : ");
+	if (work8[16] > 8)
+		seq_printf(seq, "Unknown\n");
+	else
+		seq_printf(seq, "%s\n", cpu_table[work8[16]]);
+	/* Anyone using ProcessorVersion? */
+
+	seq_printf(seq, "RAM              : %dkB\n", work32[1] >> 10);
+	seq_printf(seq, "Non-Volatile Mem : %dkB\n", work32[2] >> 10);
+
+	hwcap = work32[3];
+	seq_printf(seq, "Capabilities : 0x%08x\n", hwcap);
+	seq_printf(seq, "   [%s] Self booting\n",
+		   (hwcap & 0x00000001) ? "+" : "-");
+	seq_printf(seq, "   [%s] Upgradable IRTOS\n",
+		   (hwcap & 0x00000002) ? "+" : "-");
+	seq_printf(seq, "   [%s] Supports downloading DDMs\n",
+		   (hwcap & 0x00000004) ? "+" : "-");
+	seq_printf(seq, "   [%s] Supports installing DDMs\n",
+		   (hwcap & 0x00000008) ? "+" : "-");
+	seq_printf(seq, "   [%s] Battery-backed RAM\n",
+		   (hwcap & 0x00000010) ? "+" : "-");
+
+	return 0;
+}
+
+/* Executive group 0003h - Executing DDM List (table) */
+static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
+{
+	struct i2o_controller *c = (struct i2o_controller *)seq->private;
+	int token;
+	int i;
+
+	typedef struct _i2o_exec_execute_ddm_table {
+		u16 ddm_tid;
+		u8 module_type;
+		u8 reserved;
+		u16 i2o_vendor_id;
+		u16 module_id;
+		u8 module_name_version[28];
+		u32 data_size;
+		u32 code_size;
+	} i2o_exec_execute_ddm_table;
+
+	struct {
+		u16 result_count;
+		u16 pad;
+		u16 block_size;
+		u8 block_status;
+		u8 error_info_size;
+		u16 row_count;
+		u16 more_flag;
+		i2o_exec_execute_ddm_table ddm_table[I2O_MAX_MODULES];
+	} *result;
+
+	i2o_exec_execute_ddm_table ddm_table;
+
+	result = kmalloc(sizeof(*result), GFP_KERNEL);
+	if (!result)
+		return -ENOMEM;
+
+	token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0003, -1,
+				   NULL, 0, result, sizeof(*result));
+
+	if (token < 0) {
+		i2o_report_query_status(seq, token,
+					"0x0003 Executing DDM List");
+		goto out;
+	}
+
+	seq_printf(seq,
+		   "Tid   Module_type     Vendor Mod_id  Module_name             Vrs  Data_size Code_size\n");
+	ddm_table = result->ddm_table[0];
+
+	for (i = 0; i < result->row_count; ddm_table = result->ddm_table[++i]) {
+		seq_printf(seq, "0x%03x ", ddm_table.ddm_tid & 0xFFF);
+
+		switch (ddm_table.module_type) {
+		case 0x01:
+			seq_printf(seq, "Downloaded DDM  ");
+			break;
+		case 0x22:
+			seq_printf(seq, "Embedded DDM    ");
+			break;
+		default:
+			seq_printf(seq, "                ");
+		}
+
+		seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
+		seq_printf(seq, "%-#8x", ddm_table.module_id);
+		seq_printf(seq, "%-29s",
+			   chtostr(ddm_table.module_name_version, 28));
+		seq_printf(seq, "%9d  ", ddm_table.data_size);
+		seq_printf(seq, "%8d", ddm_table.code_size);
+
+		seq_printf(seq, "\n");
+	}
+      out:
+	kfree(result);
+	return 0;
+}
+
+/* Executive group 0004h - Driver Store (scalar) */
+static int i2o_seq_show_driver_store(struct seq_file *seq, void *v)
+{
+	struct i2o_controller *c = (struct i2o_controller *)seq->private;
+	u32 work32[8];
+	int token;
+
+	token =
+	    i2o_parm_field_get(c->exec, 0x0004, -1, &work32, sizeof(work32));
+	if (token < 0) {
+		i2o_report_query_status(seq, token, "0x0004 Driver Store");
+		return 0;
+	}
+
+	seq_printf(seq, "Module limit  : %d\n"
+		   "Module count  : %d\n"
+		   "Current space : %d kB\n"
+		   "Free space    : %d kB\n",
+		   work32[0], work32[1], work32[2] >> 10, work32[3] >> 10);
+
+	return 0;
+}
+
+/* Executive group 0005h - Driver Store Table (table) */
+static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
+{
+	typedef struct _i2o_driver_store {
+		u16 stored_ddm_index;
+		u8 module_type;
+		u8 reserved;
+		u16 i2o_vendor_id;
+		u16 module_id;
+		u8 module_name_version[28];
+		u8 date[8];
+		u32 module_size;
+		u32 mpb_size;
+		u32 module_flags;
+	} i2o_driver_store_table;
+
+	struct i2o_controller *c = (struct i2o_controller *)seq->private;
+	int token;
+	int i;
+
+	typedef struct {
+		u16 result_count;
+		u16 pad;
+		u16 block_size;
+		u8 block_status;
+		u8 error_info_size;
+		u16 row_count;
+		u16 more_flag;
+		i2o_driver_store_table dst[I2O_MAX_MODULES];
+	} i2o_driver_result_table;
+
+	i2o_driver_result_table *result;
+	i2o_driver_store_table *dst;
+
+	result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
+	if (result == NULL)
+		return -ENOMEM;
+
+	token = i2o_parm_table_get(c->exec, I2O_PARAMS_TABLE_GET, 0x0005, -1,
+				   NULL, 0, result, sizeof(*result));
+
+	if (token < 0) {
+		i2o_report_query_status(seq, token,
+					"0x0005 DRIVER STORE TABLE");
+		kfree(result);
+		return 0;
+	}
+
+	seq_printf(seq,
+		   "#  Module_type     Vendor Mod_id  Module_name             Vrs"
+		   "Date     Mod_size Par_size Flags\n");
+	for (i = 0, dst = &result->dst[0]; i < result->row_count;
+	     dst = &result->dst[++i]) {
+		seq_printf(seq, "%-3d", dst->stored_ddm_index);
+		switch (dst->module_type) {
+		case 0x01:
+			seq_printf(seq, "Downloaded DDM  ");
+			break;
+		case 0x22:
+			seq_printf(seq, "Embedded DDM    ");
+			break;
+		default:
+			seq_printf(seq, "                ");
+		}
+
+		seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
+		seq_printf(seq, "%-#8x", dst->module_id);
+		seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
+		seq_printf(seq, "%-9s", chtostr(dst->date, 8));
+		seq_printf(seq, "%8d ", dst->module_size);
+		seq_printf(seq, "%8d ", dst->mpb_size);
+		seq_printf(seq, "0x%04x", dst->module_flags);
+		seq_printf(seq, "\n");
+	}
+
+	kfree(result);
+	return 0;
+}
+
+/* Generic group F000h - Params Descriptor (table) */
+static int i2o_seq_show_groups(struct seq_file *seq, void *v)
+{
+	struct i2o_device *d = (struct i2o_device *)seq->private;
+	int token;
+	int i;
+	u8 properties;
+
+	typedef struct _i2o_group_info {
+		u16 group_number;
+		u16 field_count;
+		u16 row_count;
+		u8 properties;
+		u8 reserved;
+	} i2o_group_info;
+
+	struct {
+		u16 result_count;
+		u16 pad;
+		u16 block_size;
+		u8 block_status;
+		u8 error_info_size;
+		u16 row_count;
+		u16 more_flag;
+		i2o_group_info group[256];
+	} *result;
+
+	result = kmalloc(sizeof(*result), GFP_KERNEL);
+	if (!result)
+		return -ENOMEM;
+
+	token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0,
+				   result, sizeof(*result));
+
+	if (token < 0) {
+		i2o_report_query_status(seq, token, "0xF000 Params Descriptor");
+		goto out;
+	}
+
+	seq_printf(seq,
+		   "#  Group   FieldCount RowCount Type   Add Del Clear\n");
+
+	for (i = 0; i < result->row_count; i++) {
+		seq_printf(seq, "%-3d", i);
+		seq_printf(seq, "0x%04X ", result->group[i].group_number);
+		seq_printf(seq, "%10d ", result->group[i].field_count);
+		seq_printf(seq, "%8d ", result->group[i].row_count);
+
+		properties = result->group[i].properties;
+		if (properties & 0x1)
+			seq_printf(seq, "Table  ");
+		else
+			seq_printf(seq, "Scalar ");
+		if (properties & 0x2)
+			seq_printf(seq, " + ");
+		else
+			seq_printf(seq, " - ");
+		if (properties & 0x4)
+			seq_printf(seq, "  + ");
+		else
+			seq_printf(seq, "  - ");
+		if (properties & 0x8)
+			seq_printf(seq, "  + ");
+		else
+			seq_printf(seq, "  - ");
+
+		seq_printf(seq, "\n");
+	}
+
+	if (result->more_flag)
+		seq_printf(seq, "There is more...\n");
+      out:
+	kfree(result);
+	return 0;
+}
+
+/* Generic group F001h - Physical Device Table (table) */
+static int i2o_seq_show_phys_device(struct seq_file *seq, void *v)
+{
+	struct i2o_device *d = (struct i2o_device *)seq->private;
+	int token;
+	int i;
+
+	struct {
+		u16 result_count;
+		u16 pad;
+		u16 block_size;
+		u8 block_status;
+		u8 error_info_size;
+		u16 row_count;
+		u16 more_flag;
+		u32 adapter_id[64];
+	} result;
+
+	token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF001, -1, NULL, 0,
+				   &result, sizeof(result));
+
+	if (token < 0) {
+		i2o_report_query_status(seq, token,
+					"0xF001 Physical Device Table");
+		return 0;
+	}
+
+	if (result.row_count)
+		seq_printf(seq, "#  AdapterId\n");
+
+	for (i = 0; i < result.row_count; i++) {
+		seq_printf(seq, "%-2d", i);
+		seq_printf(seq, "%#7x\n", result.adapter_id[i]);
+	}
+
+	if (result.more_flag)
+		seq_printf(seq, "There is more...\n");
+
+	return 0;
+}
+
+/* Generic group F002h - Claimed Table (table) */
+static int i2o_seq_show_claimed(struct seq_file *seq, void *v)
+{
+	struct i2o_device *d = (struct i2o_device *)seq->private;
+	int token;
+	int i;
+
+	struct {
+		u16 result_count;
+		u16 pad;
+		u16 block_size;
+		u8 block_status;
+		u8 error_info_size;
+		u16 row_count;
+		u16 more_flag;
+		u16 claimed_tid[64];
+	} result;
+
+	token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF002, -1, NULL, 0,
+				   &result, sizeof(result));
+
+	if (token < 0) {
+		i2o_report_query_status(seq, token, "0xF002 Claimed Table");
+		return 0;
+	}
+
+	if (result.row_count)
+		seq_printf(seq, "#  ClaimedTid\n");
+
+	for (i = 0; i < result.row_count; i++) {
+		seq_printf(seq, "%-2d", i);
+		seq_printf(seq, "%#7x\n", result.claimed_tid[i]);
+	}
+
+	if (result.more_flag)
+		seq_printf(seq, "There is more...\n");
+
+	return 0;
+}
+
+/* Generic group F003h - User Table (table) */
+static int i2o_seq_show_users(struct seq_file *seq, void *v)
+{
+	struct i2o_device *d = (struct i2o_device *)seq->private;
+	int token;
+	int i;
+
+	typedef struct _i2o_user_table {
+		u16 instance;
+		u16 user_tid;
+		u8 claim_type;
+		u8 reserved1;
+		u16 reserved2;
+	} i2o_user_table;
+
+	struct {
+		u16 result_count;
+		u16 pad;
+		u16 block_size;
+		u8 block_status;
+		u8 error_info_size;
+		u16 row_count;
+		u16 more_flag;
+		i2o_user_table user[64];
+	} *result;
+
+	result = kmalloc(sizeof(*result), GFP_KERNEL);
+	if (!result)
+		return -ENOMEM;
+
+	token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF003, -1, NULL, 0,
+				   result, sizeof(*result));
+
+	if (token < 0) {
+		i2o_report_query_status(seq, token, "0xF003 User Table");
+		goto out;
+	}
+
+	seq_printf(seq, "#  Instance UserTid ClaimType\n");
+
+	for (i = 0; i < result->row_count; i++) {
+		seq_printf(seq, "%-3d", i);
+		seq_printf(seq, "%#8x ", result->user[i].instance);
+		seq_printf(seq, "%#7x ", result->user[i].user_tid);
+		seq_printf(seq, "%#9x\n", result->user[i].claim_type);
+	}
+
+	if (result->more_flag)
+		seq_printf(seq, "There is more...\n");
+      out:
+	kfree(result);
+	return 0;
+}
+
+/* Generic group F005h - Private message extensions (table) (optional) */
+static int i2o_seq_show_priv_msgs(struct seq_file *seq, void *v)
+{
+	struct i2o_device *d = (struct i2o_device *)seq->private;
+	int token;
+	int i;
+
+	typedef struct _i2o_private {
+		u16 ext_instance;
+		u16 organization_id;
+		u16 x_function_code;
+	} i2o_private;
+
+	struct {
+		u16 result_count;
+		u16 pad;
+		u16 block_size;
+		u8 block_status;
+		u8 error_info_size;
+		u16 row_count;
+		u16 more_flag;
+		i2o_private extension[64];
+	} result;
+
+	token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF000, -1, NULL, 0,
+				   &result, sizeof(result));
+
+	if (token < 0) {
+		i2o_report_query_status(seq, token,
+					"0xF005 Private Message Extensions (optional)");
+		return 0;
+	}
+
+	seq_printf(seq, "Instance#  OrgId  FunctionCode\n");
+
+	for (i = 0; i < result.row_count; i++) {
+		seq_printf(seq, "%0#9x ", result.extension[i].ext_instance);
+		seq_printf(seq, "%0#6x ", result.extension[i].organization_id);
+		seq_printf(seq, "%0#6x", result.extension[i].x_function_code);
+
+		seq_printf(seq, "\n");
+	}
+
+	if (result.more_flag)
+		seq_printf(seq, "There is more...\n");
+
+	return 0;
+}
+
+/* Generic group F006h - Authorized User Table (table) */
+static int i2o_seq_show_authorized_users(struct seq_file *seq, void *v)
+{
+	struct i2o_device *d = (struct i2o_device *)seq->private;
+	int token;
+	int i;
+
+	struct {
+		u16 result_count;
+		u16 pad;
+		u16 block_size;
+		u8 block_status;
+		u8 error_info_size;
+		u16 row_count;
+		u16 more_flag;
+		u32 alternate_tid[64];
+	} result;
+
+	token = i2o_parm_table_get(d, I2O_PARAMS_TABLE_GET, 0xF006, -1, NULL, 0,
+				   &result, sizeof(result));
+
+	if (token < 0) {
+		i2o_report_query_status(seq, token,
+					"0xF006 Autohorized User Table");
+		return 0;
+	}
+
+	if (result.row_count)
+		seq_printf(seq, "#  AlternateTid\n");
+
+	for (i = 0; i < result.row_count; i++) {
+		seq_printf(seq, "%-2d", i);
+		seq_printf(seq, "%#7x ", result.alternate_tid[i]);
+	}
+
+	if (result.more_flag)
+		seq_printf(seq, "There is more...\n");
+
+	return 0;
+}
+
+/* Generic group F100h - Device Identity (scalar) */
+static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
+{
+	struct i2o_device *d = (struct i2o_device *)seq->private;
+	static u32 work32[128];	// allow for "stuff" + up to 256 byte (max) serial number
+	// == (allow) 512d bytes (max)
+	static u16 *work16 = (u16 *) work32;
+	int token;
+
+	token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
+
+	if (token < 0) {
+		i2o_report_query_status(seq, token, "0xF100 Device Identity");
+		return 0;
+	}
+
+	seq_printf(seq, "Device Class  : %s\n", i2o_get_class_name(work16[0]));
+	seq_printf(seq, "Owner TID     : %0#5x\n", work16[2]);
+	seq_printf(seq, "Parent TID    : %0#5x\n", work16[3]);
+	seq_printf(seq, "Vendor info   : %s\n",
+		   chtostr((u8 *) (work32 + 2), 16));
+	seq_printf(seq, "Product info  : %s\n",
+		   chtostr((u8 *) (work32 + 6), 16));
+	seq_printf(seq, "Description   : %s\n",
+		   chtostr((u8 *) (work32 + 10), 16));
+	seq_printf(seq, "Product rev.  : %s\n",
+		   chtostr((u8 *) (work32 + 14), 8));
+
+	seq_printf(seq, "Serial number : ");
+	print_serial_number(seq, (u8 *) (work32 + 16),
+			    /* allow for SNLen plus
+			     * possible trailing '\0'
+			     */
+			    sizeof(work32) - (16 * sizeof(u32)) - 2);
+	seq_printf(seq, "\n");
+
+	return 0;
+}
+
+static int i2o_seq_show_dev_name(struct seq_file *seq, void *v)
+{
+	struct i2o_device *d = (struct i2o_device *)seq->private;
+
+	seq_printf(seq, "%s\n", d->device.bus_id);
+
+	return 0;
+}
+
+/* Generic group F101h - DDM Identity (scalar) */
+static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
+{
+	struct i2o_device *d = (struct i2o_device *)seq->private;
+	int token;
+
+	struct {
+		u16 ddm_tid;
+		u8 module_name[24];
+		u8 module_rev[8];
+		u8 sn_format;
+		u8 serial_number[12];
+		u8 pad[256];	// allow up to 256 byte (max) serial number
+	} result;
+
+	token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
+
+	if (token < 0) {
+		i2o_report_query_status(seq, token, "0xF101 DDM Identity");
+		return 0;
+	}
+
+	seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
+	seq_printf(seq, "Module name         : %s\n",
+		   chtostr(result.module_name, 24));
+	seq_printf(seq, "Module revision     : %s\n",
+		   chtostr(result.module_rev, 8));
+
+	seq_printf(seq, "Serial number       : ");
+	print_serial_number(seq, result.serial_number, sizeof(result) - 36);
+	/* allow for SNLen plus possible trailing '\0' */
+
+	seq_printf(seq, "\n");
+
+	return 0;
+}
+
+/* Generic group F102h - User Information (scalar) */
+static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
+{
+	struct i2o_device *d = (struct i2o_device *)seq->private;
+	int token;
+
+	struct {
+		u8 device_name[64];
+		u8 service_name[64];
+		u8 physical_location[64];
+		u8 instance_number[4];
+	} result;
+
+	token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
+
+	if (token < 0) {
+		i2o_report_query_status(seq, token, "0xF102 User Information");
+		return 0;
+	}
+
+	seq_printf(seq, "Device name     : %s\n",
+		   chtostr(result.device_name, 64));
+	seq_printf(seq, "Service name    : %s\n",
+		   chtostr(result.service_name, 64));
+	seq_printf(seq, "Physical name   : %s\n",
+		   chtostr(result.physical_location, 64));
+	seq_printf(seq, "Instance number : %s\n",
+		   chtostr(result.instance_number, 4));
+
+	return 0;
+}
+
+/* Generic group F103h - SGL Operating Limits (scalar) */
+static int i2o_seq_show_sgl_limits(struct seq_file *seq, void *v)
+{
+	struct i2o_device *d = (struct i2o_device *)seq->private;
+	static u32 work32[12];
+	static u16 *work16 = (u16 *) work32;
+	static u8 *work8 = (u8 *) work32;
+	int token;
+
+	token = i2o_parm_field_get(d, 0xF103, -1, &work32, sizeof(work32));
+
+	if (token < 0) {
+		i2o_report_query_status(seq, token,
+					"0xF103 SGL Operating Limits");
+		return 0;
+	}
+
+	seq_printf(seq, "SGL chain size        : %d\n", work32[0]);
+	seq_printf(seq, "Max SGL chain size    : %d\n", work32[1]);
+	seq_printf(seq, "SGL chain size target : %d\n", work32[2]);
+	seq_printf(seq, "SGL frag count        : %d\n", work16[6]);
+	seq_printf(seq, "Max SGL frag count    : %d\n", work16[7]);
+	seq_printf(seq, "SGL frag count target : %d\n", work16[8]);
+
+/* FIXME
+	if (d->i2oversion == 0x02)
+	{
+*/
+	seq_printf(seq, "SGL data alignment    : %d\n", work16[8]);
+	seq_printf(seq, "SGL addr limit        : %d\n", work8[20]);
+	seq_printf(seq, "SGL addr sizes supported : ");
+	if (work8[21] & 0x01)
+		seq_printf(seq, "32 bit ");
+	if (work8[21] & 0x02)
+		seq_printf(seq, "64 bit ");
+	if (work8[21] & 0x04)
+		seq_printf(seq, "96 bit ");
+	if (work8[21] & 0x08)
+		seq_printf(seq, "128 bit ");
+	seq_printf(seq, "\n");
+/*
+	}
+*/
+
+	return 0;
+}
+
+/* Generic group F200h - Sensors (scalar) */
+static int i2o_seq_show_sensors(struct seq_file *seq, void *v)
+{
+	struct i2o_device *d = (struct i2o_device *)seq->private;
+	int token;
+
+	struct {
+		u16 sensor_instance;
+		u8 component;
+		u16 component_instance;
+		u8 sensor_class;
+		u8 sensor_type;
+		u8 scaling_exponent;
+		u32 actual_reading;
+		u32 minimum_reading;
+		u32 low2lowcat_treshold;
+		u32 lowcat2low_treshold;
+		u32 lowwarn2low_treshold;
+		u32 low2lowwarn_treshold;
+		u32 norm2lowwarn_treshold;
+		u32 lowwarn2norm_treshold;
+		u32 nominal_reading;
+		u32 hiwarn2norm_treshold;
+		u32 norm2hiwarn_treshold;
+		u32 high2hiwarn_treshold;
+		u32 hiwarn2high_treshold;
+		u32 hicat2high_treshold;
+		u32 hi2hicat_treshold;
+		u32 maximum_reading;
+		u8 sensor_state;
+		u16 event_enable;
+	} result;
+
+	token = i2o_parm_field_get(d, 0xF200, -1, &result, sizeof(result));
+
+	if (token < 0) {
+		i2o_report_query_status(seq, token,
+					"0xF200 Sensors (optional)");
+		return 0;
+	}
+
+	seq_printf(seq, "Sensor instance       : %d\n", result.sensor_instance);
+
+	seq_printf(seq, "Component             : %d = ", result.component);
+	switch (result.component) {
+	case 0:
+		seq_printf(seq, "Other");
+		break;
+	case 1:
+		seq_printf(seq, "Planar logic Board");
+		break;
+	case 2:
+		seq_printf(seq, "CPU");
+		break;
+	case 3:
+		seq_printf(seq, "Chassis");
+		break;
+	case 4:
+		seq_printf(seq, "Power Supply");
+		break;
+	case 5:
+		seq_printf(seq, "Storage");
+		break;
+	case 6:
+		seq_printf(seq, "External");
+		break;
+	}
+	seq_printf(seq, "\n");
+
+	seq_printf(seq, "Component instance    : %d\n",
+		   result.component_instance);
+	seq_printf(seq, "Sensor class          : %s\n",
+		   result.sensor_class ? "Analog" : "Digital");
+
+	seq_printf(seq, "Sensor type           : %d = ", result.sensor_type);
+	switch (result.sensor_type) {
+	case 0:
+		seq_printf(seq, "Other\n");
+		break;
+	case 1:
+		seq_printf(seq, "Thermal\n");
+		break;
+	case 2:
+		seq_printf(seq, "DC voltage (DC volts)\n");
+		break;
+	case 3:
+		seq_printf(seq, "AC voltage (AC volts)\n");
+		break;
+	case 4:
+		seq_printf(seq, "DC current (DC amps)\n");
+		break;
+	case 5:
+		seq_printf(seq, "AC current (AC volts)\n");
+		break;
+	case 6:
+		seq_printf(seq, "Door open\n");
+		break;
+	case 7:
+		seq_printf(seq, "Fan operational\n");
+		break;
+	}
+
+	seq_printf(seq, "Scaling exponent      : %d\n",
+		   result.scaling_exponent);
+	seq_printf(seq, "Actual reading        : %d\n", result.actual_reading);
+	seq_printf(seq, "Minimum reading       : %d\n", result.minimum_reading);
+	seq_printf(seq, "Low2LowCat treshold   : %d\n",
+		   result.low2lowcat_treshold);
+	seq_printf(seq, "LowCat2Low treshold   : %d\n",
+		   result.lowcat2low_treshold);
+	seq_printf(seq, "LowWarn2Low treshold  : %d\n",
+		   result.lowwarn2low_treshold);
+	seq_printf(seq, "Low2LowWarn treshold  : %d\n",
+		   result.low2lowwarn_treshold);
+	seq_printf(seq, "Norm2LowWarn treshold : %d\n",
+		   result.norm2lowwarn_treshold);
+	seq_printf(seq, "LowWarn2Norm treshold : %d\n",
+		   result.lowwarn2norm_treshold);
+	seq_printf(seq, "Nominal reading       : %d\n", result.nominal_reading);
+	seq_printf(seq, "HiWarn2Norm treshold  : %d\n",
+		   result.hiwarn2norm_treshold);
+	seq_printf(seq, "Norm2HiWarn treshold  : %d\n",
+		   result.norm2hiwarn_treshold);
+	seq_printf(seq, "High2HiWarn treshold  : %d\n",
+		   result.high2hiwarn_treshold);
+	seq_printf(seq, "HiWarn2High treshold  : %d\n",
+		   result.hiwarn2high_treshold);
+	seq_printf(seq, "HiCat2High treshold   : %d\n",
+		   result.hicat2high_treshold);
+	seq_printf(seq, "High2HiCat treshold   : %d\n",
+		   result.hi2hicat_treshold);
+	seq_printf(seq, "Maximum reading       : %d\n", result.maximum_reading);
+
+	seq_printf(seq, "Sensor state          : %d = ", result.sensor_state);
+	switch (result.sensor_state) {
+	case 0:
+		seq_printf(seq, "Normal\n");
+		break;
+	case 1:
+		seq_printf(seq, "Abnormal\n");
+		break;
+	case 2:
+		seq_printf(seq, "Unknown\n");
+		break;
+	case 3:
+		seq_printf(seq, "Low Catastrophic (LoCat)\n");
+		break;
+	case 4:
+		seq_printf(seq, "Low (Low)\n");
+		break;
+	case 5:
+		seq_printf(seq, "Low Warning (LoWarn)\n");
+		break;
+	case 6:
+		seq_printf(seq, "High Warning (HiWarn)\n");
+		break;
+	case 7:
+		seq_printf(seq, "High (High)\n");
+		break;
+	case 8:
+		seq_printf(seq, "High Catastrophic (HiCat)\n");
+		break;
+	}
+
+	seq_printf(seq, "Event_enable : 0x%02X\n", result.event_enable);
+	seq_printf(seq, "    [%s] Operational state change. \n",
+		   (result.event_enable & 0x01) ? "+" : "-");
+	seq_printf(seq, "    [%s] Low catastrophic. \n",
+		   (result.event_enable & 0x02) ? "+" : "-");
+	seq_printf(seq, "    [%s] Low reading. \n",
+		   (result.event_enable & 0x04) ? "+" : "-");
+	seq_printf(seq, "    [%s] Low warning. \n",
+		   (result.event_enable & 0x08) ? "+" : "-");
+	seq_printf(seq,
+		   "    [%s] Change back to normal from out of range state. \n",
+		   (result.event_enable & 0x10) ? "+" : "-");
+	seq_printf(seq, "    [%s] High warning. \n",
+		   (result.event_enable & 0x20) ? "+" : "-");
+	seq_printf(seq, "    [%s] High reading. \n",
+		   (result.event_enable & 0x40) ? "+" : "-");
+	seq_printf(seq, "    [%s] High catastrophic. \n",
+		   (result.event_enable & 0x80) ? "+" : "-");
+
+	return 0;
+}
+
+static int i2o_seq_open_hrt(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_hrt, PDE(inode)->data);
+};
+
+static int i2o_seq_open_lct(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_lct, PDE(inode)->data);
+};
+
+static int i2o_seq_open_status(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_status, PDE(inode)->data);
+};
+
+static int i2o_seq_open_hw(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_hw, PDE(inode)->data);
+};
+
+static int i2o_seq_open_ddm_table(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_ddm_table, PDE(inode)->data);
+};
+
+static int i2o_seq_open_driver_store(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_driver_store, PDE(inode)->data);
+};
+
+static int i2o_seq_open_drivers_stored(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_drivers_stored, PDE(inode)->data);
+};
+
+static int i2o_seq_open_groups(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_groups, PDE(inode)->data);
+};
+
+static int i2o_seq_open_phys_device(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_phys_device, PDE(inode)->data);
+};
+
+static int i2o_seq_open_claimed(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_claimed, PDE(inode)->data);
+};
+
+static int i2o_seq_open_users(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_users, PDE(inode)->data);
+};
+
+static int i2o_seq_open_priv_msgs(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_priv_msgs, PDE(inode)->data);
+};
+
+static int i2o_seq_open_authorized_users(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_authorized_users,
+			   PDE(inode)->data);
+};
+
+static int i2o_seq_open_dev_identity(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_dev_identity, PDE(inode)->data);
+};
+
+static int i2o_seq_open_ddm_identity(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_ddm_identity, PDE(inode)->data);
+};
+
+static int i2o_seq_open_uinfo(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_uinfo, PDE(inode)->data);
+};
+
+static int i2o_seq_open_sgl_limits(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_sgl_limits, PDE(inode)->data);
+};
+
+static int i2o_seq_open_sensors(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_sensors, PDE(inode)->data);
+};
+
+static int i2o_seq_open_dev_name(struct inode *inode, struct file *file)
+{
+	return single_open(file, i2o_seq_show_dev_name, PDE(inode)->data);
+};
+
+static struct file_operations i2o_seq_fops_lct = {
+	.open = i2o_seq_open_lct,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_hrt = {
+	.open = i2o_seq_open_hrt,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_status = {
+	.open = i2o_seq_open_status,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_hw = {
+	.open = i2o_seq_open_hw,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_ddm_table = {
+	.open = i2o_seq_open_ddm_table,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_driver_store = {
+	.open = i2o_seq_open_driver_store,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_drivers_stored = {
+	.open = i2o_seq_open_drivers_stored,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_groups = {
+	.open = i2o_seq_open_groups,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_phys_device = {
+	.open = i2o_seq_open_phys_device,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_claimed = {
+	.open = i2o_seq_open_claimed,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_users = {
+	.open = i2o_seq_open_users,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_priv_msgs = {
+	.open = i2o_seq_open_priv_msgs,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_authorized_users = {
+	.open = i2o_seq_open_authorized_users,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_dev_name = {
+	.open = i2o_seq_open_dev_name,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_dev_identity = {
+	.open = i2o_seq_open_dev_identity,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_ddm_identity = {
+	.open = i2o_seq_open_ddm_identity,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_uinfo = {
+	.open = i2o_seq_open_uinfo,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_sgl_limits = {
+	.open = i2o_seq_open_sgl_limits,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+static struct file_operations i2o_seq_fops_sensors = {
+	.open = i2o_seq_open_sensors,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = single_release,
+};
+
+/*
+ * IOP specific entries...write field just in case someone
+ * ever wants one.
+ */
+static i2o_proc_entry i2o_proc_generic_iop_entries[] = {
+	{"hrt", S_IFREG | S_IRUGO, &i2o_seq_fops_hrt},
+	{"lct", S_IFREG | S_IRUGO, &i2o_seq_fops_lct},
+	{"status", S_IFREG | S_IRUGO, &i2o_seq_fops_status},
+	{"hw", S_IFREG | S_IRUGO, &i2o_seq_fops_hw},
+	{"ddm_table", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_table},
+	{"driver_store", S_IFREG | S_IRUGO, &i2o_seq_fops_driver_store},
+	{"drivers_stored", S_IFREG | S_IRUGO, &i2o_seq_fops_drivers_stored},
+	{NULL, 0, NULL}
+};
+
+/*
+ * Device specific entries
+ */
+static i2o_proc_entry generic_dev_entries[] = {
+	{"groups", S_IFREG | S_IRUGO, &i2o_seq_fops_groups},
+	{"phys_dev", S_IFREG | S_IRUGO, &i2o_seq_fops_phys_device},
+	{"claimed", S_IFREG | S_IRUGO, &i2o_seq_fops_claimed},
+	{"users", S_IFREG | S_IRUGO, &i2o_seq_fops_users},
+	{"priv_msgs", S_IFREG | S_IRUGO, &i2o_seq_fops_priv_msgs},
+	{"authorized_users", S_IFREG | S_IRUGO, &i2o_seq_fops_authorized_users},
+	{"dev_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_identity},
+	{"ddm_identity", S_IFREG | S_IRUGO, &i2o_seq_fops_ddm_identity},
+	{"user_info", S_IFREG | S_IRUGO, &i2o_seq_fops_uinfo},
+	{"sgl_limits", S_IFREG | S_IRUGO, &i2o_seq_fops_sgl_limits},
+	{"sensors", S_IFREG | S_IRUGO, &i2o_seq_fops_sensors},
+	{NULL, 0, NULL}
+};
+
+/*
+ *  Storage unit specific entries (SCSI Periph, BS) with device names
+ */
+static i2o_proc_entry rbs_dev_entries[] = {
+	{"dev_name", S_IFREG | S_IRUGO, &i2o_seq_fops_dev_name},
+	{NULL, 0, NULL}
+};
+
+/**
+ *	i2o_proc_create_entries - Creates proc dir entries
+ *	@dir: proc dir entry under which the entries should be placed
+ *	@i2o_pe: pointer to the entries which should be added
+ *	@data: pointer to I2O controller or device
+ *
+ *	Create proc dir entries for a I2O controller or I2O device.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_proc_create_entries(struct proc_dir_entry *dir,
+				   i2o_proc_entry * i2o_pe, void *data)
+{
+	struct proc_dir_entry *tmp;
+
+	while (i2o_pe->name) {
+		tmp = create_proc_entry(i2o_pe->name, i2o_pe->mode, dir);
+		if (!tmp)
+			return -1;
+
+		tmp->data = data;
+		tmp->proc_fops = i2o_pe->fops;
+
+		i2o_pe++;
+	}
+
+	return 0;
+}
+
+/**
+ *	i2o_proc_subdir_remove - Remove child entries from a proc entry
+ *	@dir: proc dir entry from which the childs should be removed
+ *
+ *	Iterate over each i2o proc entry under dir and remove it. If the child
+ *	also has entries, remove them too.
+ */
+static void i2o_proc_subdir_remove(struct proc_dir_entry *dir)
+{
+	struct proc_dir_entry *pe, *tmp;
+	pe = dir->subdir;
+	while (pe) {
+		tmp = pe->next;
+		i2o_proc_subdir_remove(pe);
+		remove_proc_entry(pe->name, dir);
+		pe = tmp;
+	}
+};
+
+/**
+ *	i2o_proc_device_add - Add an I2O device to the proc dir
+ *	@dir: proc dir entry to which the device should be added
+ *	@dev: I2O device which should be added
+ *
+ *	Add an I2O device to the proc dir entry dir and create the entries for
+ *	the device depending on the class of the I2O device.
+ */
+static void i2o_proc_device_add(struct proc_dir_entry *dir,
+				struct i2o_device *dev)
+{
+	char buff[10];
+	struct proc_dir_entry *devdir;
+	i2o_proc_entry *i2o_pe = NULL;
+
+	sprintf(buff, "%03x", dev->lct_data.tid);
+
+	osm_debug("adding device /proc/i2o/%s/%s\n", dev->iop->name, buff);
+
+	devdir = proc_mkdir(buff, dir);
+	if (!devdir) {
+		osm_warn("Could not allocate procdir!\n");
+		return;
+	}
+
+	devdir->data = dev;
+
+	i2o_proc_create_entries(devdir, generic_dev_entries, dev);
+
+	/* Inform core that we want updates about this device's status */
+	switch (dev->lct_data.class_id) {
+	case I2O_CLASS_SCSI_PERIPHERAL:
+	case I2O_CLASS_RANDOM_BLOCK_STORAGE:
+		i2o_pe = rbs_dev_entries;
+		break;
+	default:
+		break;
+	}
+	if (i2o_pe)
+		i2o_proc_create_entries(devdir, i2o_pe, dev);
+}
+
+/**
+ *	i2o_proc_iop_add - Add an I2O controller to the i2o proc tree
+ *	@dir: parent proc dir entry
+ *	@c: I2O controller which should be added
+ *
+ *	Add the entries to the parent proc dir entry. Also each device is added
+ *	to the controllers proc dir entry.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_proc_iop_add(struct proc_dir_entry *dir,
+			    struct i2o_controller *c)
+{
+	struct proc_dir_entry *iopdir;
+	struct i2o_device *dev;
+
+	osm_debug("adding IOP /proc/i2o/%s\n", c->name);
+
+	iopdir = proc_mkdir(c->name, dir);
+	if (!iopdir)
+		return -1;
+
+	iopdir->data = c;
+
+	i2o_proc_create_entries(iopdir, i2o_proc_generic_iop_entries, c);
+
+	list_for_each_entry(dev, &c->devices, list)
+	    i2o_proc_device_add(iopdir, dev);
+
+	return 0;
+}
+
+/**
+ *	i2o_proc_iop_remove - Removes an I2O controller from the i2o proc tree
+ *	@dir: parent proc dir entry
+ *	@c: I2O controller which should be removed
+ *
+ *	Iterate over each i2o proc entry and search controller c. If it is found
+ *	remove it from the tree.
+ */
+static void i2o_proc_iop_remove(struct proc_dir_entry *dir,
+				struct i2o_controller *c)
+{
+	struct proc_dir_entry *pe, *tmp;
+
+	pe = dir->subdir;
+	while (pe) {
+		tmp = pe->next;
+		if (pe->data == c) {
+			i2o_proc_subdir_remove(pe);
+			remove_proc_entry(pe->name, dir);
+		}
+		osm_debug("removing IOP /proc/i2o/%s\n", c->name);
+		pe = tmp;
+	}
+}
+
+/**
+ *	i2o_proc_fs_create - Create the i2o proc fs.
+ *
+ *	Iterate over each I2O controller and create the entries for it.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int __init i2o_proc_fs_create(void)
+{
+	struct i2o_controller *c;
+
+	i2o_proc_dir_root = proc_mkdir("i2o", NULL);
+	if (!i2o_proc_dir_root)
+		return -1;
+
+	i2o_proc_dir_root->owner = THIS_MODULE;
+
+	list_for_each_entry(c, &i2o_controllers, list)
+	    i2o_proc_iop_add(i2o_proc_dir_root, c);
+
+	return 0;
+};
+
+/**
+ *	i2o_proc_fs_destroy - Cleanup the all i2o proc entries
+ *
+ *	Iterate over each I2O controller and remove the entries for it.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int __exit i2o_proc_fs_destroy(void)
+{
+	struct i2o_controller *c;
+
+	list_for_each_entry(c, &i2o_controllers, list)
+	    i2o_proc_iop_remove(i2o_proc_dir_root, c);
+
+	remove_proc_entry("i2o", NULL);
+
+	return 0;
+};
+
+/**
+ *	i2o_proc_init - Init function for procfs
+ *
+ *	Registers Proc OSM and creates procfs entries.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int __init i2o_proc_init(void)
+{
+	int rc;
+
+	printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
+
+	rc = i2o_driver_register(&i2o_proc_driver);
+	if (rc)
+		return rc;
+
+	rc = i2o_proc_fs_create();
+	if (rc) {
+		i2o_driver_unregister(&i2o_proc_driver);
+		return rc;
+	}
+
+	return 0;
+};
+
+/**
+ *	i2o_proc_exit - Exit function for procfs
+ *
+ *	Unregisters Proc OSM and removes procfs entries.
+ */
+static void __exit i2o_proc_exit(void)
+{
+	i2o_driver_unregister(&i2o_proc_driver);
+	i2o_proc_fs_destroy();
+};
+
+MODULE_AUTHOR("Deepak Saxena");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(OSM_DESCRIPTION);
+MODULE_VERSION(OSM_VERSION);
+
+module_init(i2o_proc_init);
+module_exit(i2o_proc_exit);
diff --git a/drivers/message/i2o/i2o_scsi.c b/drivers/message/i2o/i2o_scsi.c
new file mode 100644
index 0000000..43f5875
--- /dev/null
+++ b/drivers/message/i2o/i2o_scsi.c
@@ -0,0 +1,830 @@
+/*
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2, or (at your option) any
+ * later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * For the avoidance of doubt the "preferred form" of this code is one which
+ * is in an open non patent encumbered format. Where cryptographic key signing
+ * forms part of the process of creating an executable the information
+ * including keys needed to generate an equivalently functional executable
+ * are deemed to be part of the source code.
+ *
+ *  Complications for I2O scsi
+ *
+ *	o	Each (bus,lun) is a logical device in I2O. We keep a map
+ *		table. We spoof failed selection for unmapped units
+ *	o	Request sense buffers can come back for free.
+ *	o	Scatter gather is a bit dynamic. We have to investigate at
+ *		setup time.
+ *	o	Some of our resources are dynamically shared. The i2o core
+ *		needs a message reservation protocol to avoid swap v net
+ *		deadlocking. We need to back off queue requests.
+ *
+ *	In general the firmware wants to help. Where its help isn't performance
+ *	useful we just ignore the aid. Its not worth the code in truth.
+ *
+ * Fixes/additions:
+ *	Steve Ralston:
+ *		Scatter gather now works
+ *	Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ *		Minor fixes for 2.6.
+ *
+ * To Do:
+ *	64bit cleanups
+ *	Fix the resource management problems.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/jiffies.h>
+#include <linux/interrupt.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <linux/proc_fs.h>
+#include <linux/prefetch.h>
+#include <linux/pci.h>
+#include <linux/blkdev.h>
+#include <linux/i2o.h>
+
+#include <asm/dma.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/atomic.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_device.h>
+#include <scsi/scsi_cmnd.h>
+
+#define OSM_NAME	"scsi-osm"
+#define OSM_VERSION	"$Rev$"
+#define OSM_DESCRIPTION	"I2O SCSI Peripheral OSM"
+
+static struct i2o_driver i2o_scsi_driver;
+
+static int i2o_scsi_max_id = 16;
+static int i2o_scsi_max_lun = 8;
+
+struct i2o_scsi_host {
+	struct Scsi_Host *scsi_host;	/* pointer to the SCSI host */
+	struct i2o_controller *iop;	/* pointer to the I2O controller */
+	struct i2o_device *channel[0];	/* channel->i2o_dev mapping table */
+};
+
+static struct scsi_host_template i2o_scsi_host_template;
+
+#define I2O_SCSI_CAN_QUEUE	4
+
+/* SCSI OSM class handling definition */
+static struct i2o_class_id i2o_scsi_class_id[] = {
+	{I2O_CLASS_SCSI_PERIPHERAL},
+	{I2O_CLASS_END}
+};
+
+static struct i2o_scsi_host *i2o_scsi_host_alloc(struct i2o_controller *c)
+{
+	struct i2o_scsi_host *i2o_shost;
+	struct i2o_device *i2o_dev;
+	struct Scsi_Host *scsi_host;
+	int max_channel = 0;
+	u8 type;
+	int i;
+	size_t size;
+	i2o_status_block *sb;
+
+	list_for_each_entry(i2o_dev, &c->devices, list)
+	    if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) {
+		if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) || (type == 1))	/* SCSI bus */
+			max_channel++;
+	}
+
+	if (!max_channel) {
+		osm_warn("no channels found on %s\n", c->name);
+		return ERR_PTR(-EFAULT);
+	}
+
+	size = max_channel * sizeof(struct i2o_device *)
+	    + sizeof(struct i2o_scsi_host);
+
+	scsi_host = scsi_host_alloc(&i2o_scsi_host_template, size);
+	if (!scsi_host) {
+		osm_warn("Could not allocate SCSI host\n");
+		return ERR_PTR(-ENOMEM);
+	}
+
+	scsi_host->max_channel = max_channel - 1;
+	scsi_host->max_id = i2o_scsi_max_id;
+	scsi_host->max_lun = i2o_scsi_max_lun;
+	scsi_host->this_id = c->unit;
+
+	sb = c->status_block.virt;
+
+	scsi_host->sg_tablesize = (sb->inbound_frame_size -
+				   sizeof(struct i2o_message) / 4 - 6) / 2;
+
+	i2o_shost = (struct i2o_scsi_host *)scsi_host->hostdata;
+	i2o_shost->scsi_host = scsi_host;
+	i2o_shost->iop = c;
+
+	i = 0;
+	list_for_each_entry(i2o_dev, &c->devices, list)
+	    if (i2o_dev->lct_data.class_id == I2O_CLASS_BUS_ADAPTER_PORT) {
+		if (i2o_parm_field_get(i2o_dev, 0x0000, 0, &type, 1) || (type == 1))	/* only SCSI bus */
+			i2o_shost->channel[i++] = i2o_dev;
+
+		if (i >= max_channel)
+			break;
+	}
+
+	return i2o_shost;
+};
+
+/**
+ *	i2o_scsi_get_host - Get an I2O SCSI host
+ *	@c: I2O controller to for which to get the SCSI host
+ *
+ *	If the I2O controller already exists as SCSI host, the SCSI host
+ *	is returned, otherwise the I2O controller is added to the SCSI
+ *	core.
+ *
+ *	Returns pointer to the I2O SCSI host on success or NULL on failure.
+ */
+static struct i2o_scsi_host *i2o_scsi_get_host(struct i2o_controller *c)
+{
+	return c->driver_data[i2o_scsi_driver.context];
+};
+
+/**
+ *	i2o_scsi_remove - Remove I2O device from SCSI core
+ *	@dev: device which should be removed
+ *
+ *	Removes the I2O device from the SCSI core again.
+ *
+ *	Returns 0 on success.
+ */
+static int i2o_scsi_remove(struct device *dev)
+{
+	struct i2o_device *i2o_dev = to_i2o_device(dev);
+	struct i2o_controller *c = i2o_dev->iop;
+	struct i2o_scsi_host *i2o_shost;
+	struct scsi_device *scsi_dev;
+
+	i2o_shost = i2o_scsi_get_host(c);
+
+	shost_for_each_device(scsi_dev, i2o_shost->scsi_host)
+	    if (scsi_dev->hostdata == i2o_dev) {
+		scsi_remove_device(scsi_dev);
+		scsi_device_put(scsi_dev);
+		break;
+	}
+
+	return 0;
+};
+
+/**
+ *	i2o_scsi_probe - verify if dev is a I2O SCSI device and install it
+ *	@dev: device to verify if it is a I2O SCSI device
+ *
+ *	Retrieve channel, id and lun for I2O device. If everthing goes well
+ *	register the I2O device as SCSI device on the I2O SCSI controller.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_scsi_probe(struct device *dev)
+{
+	struct i2o_device *i2o_dev = to_i2o_device(dev);
+	struct i2o_controller *c = i2o_dev->iop;
+	struct i2o_scsi_host *i2o_shost;
+	struct Scsi_Host *scsi_host;
+	struct i2o_device *parent;
+	struct scsi_device *scsi_dev;
+	u32 id;
+	u64 lun;
+	int channel = -1;
+	int i;
+
+	i2o_shost = i2o_scsi_get_host(c);
+	if (!i2o_shost)
+		return -EFAULT;
+
+	scsi_host = i2o_shost->scsi_host;
+
+	if (i2o_parm_field_get(i2o_dev, 0, 3, &id, 4) < 0)
+		return -EFAULT;
+
+	if (id >= scsi_host->max_id) {
+		osm_warn("SCSI device id (%d) >= max_id of I2O host (%d)", id,
+			 scsi_host->max_id);
+		return -EFAULT;
+	}
+
+	if (i2o_parm_field_get(i2o_dev, 0, 4, &lun, 8) < 0)
+		return -EFAULT;
+	if (lun >= scsi_host->max_lun) {
+		osm_warn("SCSI device id (%d) >= max_lun of I2O host (%d)",
+			 (unsigned int)lun, scsi_host->max_lun);
+		return -EFAULT;
+	}
+
+	parent = i2o_iop_find_device(c, i2o_dev->lct_data.parent_tid);
+	if (!parent) {
+		osm_warn("can not find parent of device %03x\n",
+			 i2o_dev->lct_data.tid);
+		return -EFAULT;
+	}
+
+	for (i = 0; i <= i2o_shost->scsi_host->max_channel; i++)
+		if (i2o_shost->channel[i] == parent)
+			channel = i;
+
+	if (channel == -1) {
+		osm_warn("can not find channel of device %03x\n",
+			 i2o_dev->lct_data.tid);
+		return -EFAULT;
+	}
+
+	scsi_dev =
+	    __scsi_add_device(i2o_shost->scsi_host, channel, id, lun, i2o_dev);
+
+	if (!scsi_dev) {
+		osm_warn("can not add SCSI device %03x\n",
+			 i2o_dev->lct_data.tid);
+		return -EFAULT;
+	}
+
+	osm_debug("added new SCSI device %03x (cannel: %d, id: %d, lun: %d)\n",
+		  i2o_dev->lct_data.tid, channel, id, (unsigned int)lun);
+
+	return 0;
+};
+
+static const char *i2o_scsi_info(struct Scsi_Host *SChost)
+{
+	struct i2o_scsi_host *hostdata;
+	hostdata = (struct i2o_scsi_host *)SChost->hostdata;
+	return hostdata->iop->name;
+}
+
+/**
+ *	i2o_scsi_reply - SCSI OSM message reply handler
+ *	@c: controller issuing the reply
+ *	@m: message id for flushing
+ *	@msg: the message from the controller
+ *
+ *	Process reply messages (interrupts in normal scsi controller think).
+ *	We can get a variety of messages to process. The normal path is
+ *	scsi command completions. We must also deal with IOP failures,
+ *	the reply to a bus reset and the reply to a LUN query.
+ *
+ *	Returns 0 on success and if the reply should not be flushed or > 0
+ *	on success and if the reply should be flushed. Returns negative error
+ *	code on failure and if the reply should be flushed.
+ */
+static int i2o_scsi_reply(struct i2o_controller *c, u32 m,
+			  struct i2o_message *msg)
+{
+	struct scsi_cmnd *cmd;
+	struct device *dev;
+	u8 as, ds, st;
+
+	cmd = i2o_cntxt_list_get(c, le32_to_cpu(msg->u.s.tcntxt));
+
+	if (msg->u.head[0] & (1 << 13)) {
+		struct i2o_message __iomem *pmsg;	/* preserved message */
+		u32 pm;
+		int err = DID_ERROR;
+
+		pm = le32_to_cpu(msg->body[3]);
+
+		pmsg = i2o_msg_in_to_virt(c, pm);
+
+		osm_err("IOP fail.\n");
+		osm_err("From %d To %d Cmd %d.\n",
+			(msg->u.head[1] >> 12) & 0xFFF,
+			msg->u.head[1] & 0xFFF, msg->u.head[1] >> 24);
+		osm_err("Failure Code %d.\n", msg->body[0] >> 24);
+		if (msg->body[0] & (1 << 16))
+			osm_err("Format error.\n");
+		if (msg->body[0] & (1 << 17))
+			osm_err("Path error.\n");
+		if (msg->body[0] & (1 << 18))
+			osm_err("Path State.\n");
+		if (msg->body[0] & (1 << 18))
+		{
+			osm_err("Congestion.\n");
+			err = DID_BUS_BUSY;
+		}
+
+		osm_debug("Failing message is %p.\n", pmsg);
+
+		cmd = i2o_cntxt_list_get(c, readl(&pmsg->u.s.tcntxt));
+		if (!cmd)
+			return 1;
+
+		cmd->result = err << 16;
+		cmd->scsi_done(cmd);
+
+		/* Now flush the message by making it a NOP */
+		i2o_msg_nop(c, pm);
+
+		return 1;
+	}
+
+	/*
+	 *      Low byte is device status, next is adapter status,
+	 *      (then one byte reserved), then request status.
+	 */
+	ds = (u8) le32_to_cpu(msg->body[0]);
+	as = (u8) (le32_to_cpu(msg->body[0]) >> 8);
+	st = (u8) (le32_to_cpu(msg->body[0]) >> 24);
+
+	/*
+	 *      Is this a control request coming back - eg an abort ?
+	 */
+
+	if (!cmd) {
+		if (st)
+			osm_warn("SCSI abort: %08X", le32_to_cpu(msg->body[0]));
+		osm_info("SCSI abort completed.\n");
+		return -EFAULT;
+	}
+
+	osm_debug("Completed %ld\n", cmd->serial_number);
+
+	if (st) {
+		u32 count, error;
+		/* An error has occurred */
+
+		switch (st) {
+		case 0x06:
+			count = le32_to_cpu(msg->body[1]);
+			if (count < cmd->underflow) {
+				int i;
+
+				osm_err("SCSI underflow 0x%08X 0x%08X\n", count,
+					cmd->underflow);
+				osm_debug("Cmd: ");
+				for (i = 0; i < 15; i++)
+					pr_debug("%02X ", cmd->cmnd[i]);
+				pr_debug(".\n");
+				cmd->result = (DID_ERROR << 16);
+			}
+			break;
+
+		default:
+			error = le32_to_cpu(msg->body[0]);
+
+			osm_err("SCSI error %08x\n", error);
+
+			if ((error & 0xff) == 0x02 /*CHECK_CONDITION */ ) {
+				int i;
+				u32 len = sizeof(cmd->sense_buffer);
+				len = (len > 40) ? 40 : len;
+				// Copy over the sense data
+				memcpy(cmd->sense_buffer, (void *)&msg->body[3],
+				       len);
+				for (i = 0; i <= len; i++)
+					osm_info("%02x\n",
+						 cmd->sense_buffer[i]);
+				if (cmd->sense_buffer[0] == 0x70
+				    && cmd->sense_buffer[2] == DATA_PROTECT) {
+					/* This is to handle an array failed */
+					cmd->result = (DID_TIME_OUT << 16);
+					printk(KERN_WARNING "%s: SCSI Data "
+					       "Protect-Device (%d,%d,%d) "
+					       "hba_status=0x%x, dev_status="
+					       "0x%x, cmd=0x%x\n", c->name,
+					       (u32) cmd->device->channel,
+					       (u32) cmd->device->id,
+					       (u32) cmd->device->lun,
+					       (error >> 8) & 0xff,
+					       error & 0xff, cmd->cmnd[0]);
+				} else
+					cmd->result = (DID_ERROR << 16);
+
+				break;
+			}
+
+			switch (as) {
+			case 0x0E:
+				/* SCSI Reset */
+				cmd->result = DID_RESET << 16;
+				break;
+
+			case 0x0F:
+				cmd->result = DID_PARITY << 16;
+				break;
+
+			default:
+				cmd->result = DID_ERROR << 16;
+				break;
+			}
+
+			break;
+		}
+
+		cmd->scsi_done(cmd);
+		return 1;
+	}
+
+	cmd->result = DID_OK << 16 | ds;
+
+	cmd->scsi_done(cmd);
+
+	dev = &c->pdev->dev;
+	if (cmd->use_sg)
+		dma_unmap_sg(dev, (struct scatterlist *)cmd->buffer,
+			     cmd->use_sg, cmd->sc_data_direction);
+	else if (cmd->request_bufflen)
+		dma_unmap_single(dev, (dma_addr_t) ((long)cmd->SCp.ptr),
+				 cmd->request_bufflen, cmd->sc_data_direction);
+
+	return 1;
+};
+
+/**
+ *	i2o_scsi_notify_controller_add - Retrieve notifications of added
+ *					 controllers
+ *	@c: the controller which was added
+ *
+ *	If a I2O controller is added, we catch the notification to add a
+ *	corresponding Scsi_Host.
+ */
+static void i2o_scsi_notify_controller_add(struct i2o_controller *c)
+{
+	struct i2o_scsi_host *i2o_shost;
+	int rc;
+
+	i2o_shost = i2o_scsi_host_alloc(c);
+	if (IS_ERR(i2o_shost)) {
+		osm_err("Could not initialize SCSI host\n");
+		return;
+	}
+
+	rc = scsi_add_host(i2o_shost->scsi_host, &c->device);
+	if (rc) {
+		osm_err("Could not add SCSI host\n");
+		scsi_host_put(i2o_shost->scsi_host);
+		return;
+	}
+
+	c->driver_data[i2o_scsi_driver.context] = i2o_shost;
+
+	osm_debug("new I2O SCSI host added\n");
+};
+
+/**
+ *	i2o_scsi_notify_controller_remove - Retrieve notifications of removed
+ *					    controllers
+ *	@c: the controller which was removed
+ *
+ *	If a I2O controller is removed, we catch the notification to remove the
+ *	corresponding Scsi_Host.
+ */
+static void i2o_scsi_notify_controller_remove(struct i2o_controller *c)
+{
+	struct i2o_scsi_host *i2o_shost;
+	i2o_shost = i2o_scsi_get_host(c);
+	if (!i2o_shost)
+		return;
+
+	c->driver_data[i2o_scsi_driver.context] = NULL;
+
+	scsi_remove_host(i2o_shost->scsi_host);
+	scsi_host_put(i2o_shost->scsi_host);
+	pr_info("I2O SCSI host removed\n");
+};
+
+/* SCSI OSM driver struct */
+static struct i2o_driver i2o_scsi_driver = {
+	.name = OSM_NAME,
+	.reply = i2o_scsi_reply,
+	.classes = i2o_scsi_class_id,
+	.notify_controller_add = i2o_scsi_notify_controller_add,
+	.notify_controller_remove = i2o_scsi_notify_controller_remove,
+	.driver = {
+		   .probe = i2o_scsi_probe,
+		   .remove = i2o_scsi_remove,
+		   },
+};
+
+/**
+ *	i2o_scsi_queuecommand - queue a SCSI command
+ *	@SCpnt: scsi command pointer
+ *	@done: callback for completion
+ *
+ *	Issue a scsi command asynchronously. Return 0 on success or 1 if
+ *	we hit an error (normally message queue congestion). The only
+ *	minor complication here is that I2O deals with the device addressing
+ *	so we have to map the bus/dev/lun back to an I2O handle as well
+ *	as faking absent devices ourself.
+ *
+ *	Locks: takes the controller lock on error path only
+ */
+
+static int i2o_scsi_queuecommand(struct scsi_cmnd *SCpnt,
+				 void (*done) (struct scsi_cmnd *))
+{
+	struct i2o_controller *c;
+	struct Scsi_Host *host;
+	struct i2o_device *i2o_dev;
+	struct device *dev;
+	int tid;
+	struct i2o_message __iomem *msg;
+	u32 m;
+	u32 scsi_flags, sg_flags;
+	u32 __iomem *mptr;
+	u32 __iomem *lenptr;
+	u32 len, reqlen;
+	int i;
+
+	/*
+	 *      Do the incoming paperwork
+	 */
+
+	i2o_dev = SCpnt->device->hostdata;
+	host = SCpnt->device->host;
+	c = i2o_dev->iop;
+	dev = &c->pdev->dev;
+
+	SCpnt->scsi_done = done;
+
+	if (unlikely(!i2o_dev)) {
+		osm_warn("no I2O device in request\n");
+		SCpnt->result = DID_NO_CONNECT << 16;
+		done(SCpnt);
+		return 0;
+	}
+
+	tid = i2o_dev->lct_data.tid;
+
+	osm_debug("qcmd: Tid = %03x\n", tid);
+	osm_debug("Real scsi messages.\n");
+
+	/*
+	 *      Obtain an I2O message. If there are none free then
+	 *      throw it back to the scsi layer
+	 */
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	/*
+	 *      Put together a scsi execscb message
+	 */
+
+	len = SCpnt->request_bufflen;
+
+	switch (SCpnt->sc_data_direction) {
+	case PCI_DMA_NONE:
+		scsi_flags = 0x00000000;	// DATA NO XFER
+		sg_flags = 0x00000000;
+		break;
+
+	case PCI_DMA_TODEVICE:
+		scsi_flags = 0x80000000;	// DATA OUT (iop-->dev)
+		sg_flags = 0x14000000;
+		break;
+
+	case PCI_DMA_FROMDEVICE:
+		scsi_flags = 0x40000000;	// DATA IN  (iop<--dev)
+		sg_flags = 0x10000000;
+		break;
+
+	default:
+		/* Unknown - kill the command */
+		SCpnt->result = DID_NO_CONNECT << 16;
+		done(SCpnt);
+		return 0;
+	}
+
+	writel(I2O_CMD_SCSI_EXEC << 24 | HOST_TID << 12 | tid, &msg->u.head[1]);
+	writel(i2o_scsi_driver.context, &msg->u.s.icntxt);
+
+	/* We want the SCSI control block back */
+	writel(i2o_cntxt_list_add(c, SCpnt), &msg->u.s.tcntxt);
+
+	/* LSI_920_PCI_QUIRK
+	 *
+	 *      Intermittant observations of msg frame word data corruption
+	 *      observed on msg[4] after:
+	 *        WRITE, READ-MODIFY-WRITE
+	 *      operations.  19990606 -sralston
+	 *
+	 *      (Hence we build this word via tag. Its good practice anyway
+	 *       we don't want fetches over PCI needlessly)
+	 */
+
+	/* Attach tags to the devices */
+	/*
+	   if(SCpnt->device->tagged_supported) {
+	   if(SCpnt->tag == HEAD_OF_QUEUE_TAG)
+	   scsi_flags |= 0x01000000;
+	   else if(SCpnt->tag == ORDERED_QUEUE_TAG)
+	   scsi_flags |= 0x01800000;
+	   }
+	 */
+
+	/* Direction, disconnect ok, tag, CDBLen */
+	writel(scsi_flags | 0x20200000 | SCpnt->cmd_len, &msg->body[0]);
+
+	mptr = &msg->body[1];
+
+	/* Write SCSI command into the message - always 16 byte block */
+	memcpy_toio(mptr, SCpnt->cmnd, 16);
+	mptr += 4;
+	lenptr = mptr++;	/* Remember me - fill in when we know */
+
+	reqlen = 12;		// SINGLE SGE
+
+	/* Now fill in the SGList and command */
+	if (SCpnt->use_sg) {
+		struct scatterlist *sg;
+		int sg_count;
+
+		sg = SCpnt->request_buffer;
+		len = 0;
+
+		sg_count = dma_map_sg(dev, sg, SCpnt->use_sg,
+				      SCpnt->sc_data_direction);
+
+		if (unlikely(sg_count <= 0))
+			return -ENOMEM;
+
+		for (i = SCpnt->use_sg; i > 0; i--) {
+			if (i == 1)
+				sg_flags |= 0xC0000000;
+			writel(sg_flags | sg_dma_len(sg), mptr++);
+			writel(sg_dma_address(sg), mptr++);
+			len += sg_dma_len(sg);
+			sg++;
+		}
+
+		reqlen = mptr - &msg->u.head[0];
+		writel(len, lenptr);
+	} else {
+		len = SCpnt->request_bufflen;
+
+		writel(len, lenptr);
+
+		if (len > 0) {
+			dma_addr_t dma_addr;
+
+			dma_addr = dma_map_single(dev, SCpnt->request_buffer,
+						  SCpnt->request_bufflen,
+						  SCpnt->sc_data_direction);
+			if (!dma_addr)
+				return -ENOMEM;
+
+			SCpnt->SCp.ptr = (void *)(unsigned long)dma_addr;
+			sg_flags |= 0xC0000000;
+			writel(sg_flags | SCpnt->request_bufflen, mptr++);
+			writel(dma_addr, mptr++);
+		} else
+			reqlen = 9;
+	}
+
+	/* Stick the headers on */
+	writel(reqlen << 16 | SGL_OFFSET_10, &msg->u.head[0]);
+
+	/* Queue the message */
+	i2o_msg_post(c, m);
+
+	osm_debug("Issued %ld\n", SCpnt->serial_number);
+
+	return 0;
+};
+
+/**
+ *	i2o_scsi_abort - abort a running command
+ *	@SCpnt: command to abort
+ *
+ *	Ask the I2O controller to abort a command. This is an asynchrnous
+ *	process and our callback handler will see the command complete with an
+ *	aborted message if it succeeds.
+ *
+ *	Returns 0 if the command is successfully aborted or negative error code
+ *	on failure.
+ */
+static int i2o_scsi_abort(struct scsi_cmnd *SCpnt)
+{
+	struct i2o_device *i2o_dev;
+	struct i2o_controller *c;
+	struct i2o_message __iomem *msg;
+	u32 m;
+	int tid;
+	int status = FAILED;
+
+	osm_warn("Aborting command block.\n");
+
+	i2o_dev = SCpnt->device->hostdata;
+	c = i2o_dev->iop;
+	tid = i2o_dev->lct_data.tid;
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(I2O_CMD_SCSI_ABORT << 24 | HOST_TID << 12 | tid,
+	       &msg->u.head[1]);
+	writel(i2o_cntxt_list_get_ptr(c, SCpnt), &msg->body[0]);
+
+	if (i2o_msg_post_wait(c, m, I2O_TIMEOUT_SCSI_SCB_ABORT))
+		status = SUCCESS;
+
+	return status;
+}
+
+/**
+ *	i2o_scsi_bios_param	-	Invent disk geometry
+ *	@sdev: scsi device
+ *	@dev: block layer device
+ *	@capacity: size in sectors
+ *	@ip: geometry array
+ *
+ *	This is anyones guess quite frankly. We use the same rules everyone
+ *	else appears to and hope. It seems to work.
+ */
+
+static int i2o_scsi_bios_param(struct scsi_device *sdev,
+			       struct block_device *dev, sector_t capacity,
+			       int *ip)
+{
+	int size;
+
+	size = capacity;
+	ip[0] = 64;		/* heads                        */
+	ip[1] = 32;		/* sectors                      */
+	if ((ip[2] = size >> 11) > 1024) {	/* cylinders, test for big disk */
+		ip[0] = 255;	/* heads                        */
+		ip[1] = 63;	/* sectors                      */
+		ip[2] = size / (255 * 63);	/* cylinders                    */
+	}
+	return 0;
+}
+
+static struct scsi_host_template i2o_scsi_host_template = {
+	.proc_name = OSM_NAME,
+	.name = OSM_DESCRIPTION,
+	.info = i2o_scsi_info,
+	.queuecommand = i2o_scsi_queuecommand,
+	.eh_abort_handler = i2o_scsi_abort,
+	.bios_param = i2o_scsi_bios_param,
+	.can_queue = I2O_SCSI_CAN_QUEUE,
+	.sg_tablesize = 8,
+	.cmd_per_lun = 6,
+	.use_clustering = ENABLE_CLUSTERING,
+};
+
+/**
+ *	i2o_scsi_init - SCSI OSM initialization function
+ *
+ *	Register SCSI OSM into I2O core.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int __init i2o_scsi_init(void)
+{
+	int rc;
+
+	printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
+
+	/* Register SCSI OSM into I2O core */
+	rc = i2o_driver_register(&i2o_scsi_driver);
+	if (rc) {
+		osm_err("Could not register SCSI driver\n");
+		return rc;
+	}
+
+	return 0;
+};
+
+/**
+ *	i2o_scsi_exit - SCSI OSM exit function
+ *
+ *	Unregisters SCSI OSM from I2O core.
+ */
+static void __exit i2o_scsi_exit(void)
+{
+	/* Unregister I2O SCSI OSM from I2O core */
+	i2o_driver_unregister(&i2o_scsi_driver);
+};
+
+MODULE_AUTHOR("Red Hat Software");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(OSM_DESCRIPTION);
+MODULE_VERSION(OSM_VERSION);
+
+module_init(i2o_scsi_init);
+module_exit(i2o_scsi_exit);
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
new file mode 100644
index 0000000..50c8ced
--- /dev/null
+++ b/drivers/message/i2o/iop.c
@@ -0,0 +1,1327 @@
+/*
+ *	Functions to handle I2O controllers and I2O message handling
+ *
+ *	Copyright (C) 1999-2002	Red Hat Software
+ *
+ *	Written by Alan Cox, Building Number Three Ltd
+ *
+ *	This program is free software; you can redistribute it and/or modify it
+ *	under the terms of the GNU General Public License as published by the
+ *	Free Software Foundation; either version 2 of the License, or (at your
+ *	option) any later version.
+ *
+ *	A lot of the I2O message side code from this is taken from the
+ *	Red Creek RCPCI45 adapter driver by Red Creek Communications
+ *
+ *	Fixes/additions:
+ *		Philipp Rumpf
+ *		Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
+ *		Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
+ *		Deepak Saxena <deepak@plexity.net>
+ *		Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
+ *		Alan Cox <alan@redhat.com>:
+ *			Ported to Linux 2.5.
+ *		Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ *			Minor fixes for 2.6.
+ */
+
+#include <linux/module.h>
+#include <linux/i2o.h>
+#include <linux/delay.h>
+
+#define OSM_VERSION	"$Rev$"
+#define OSM_DESCRIPTION	"I2O subsystem"
+
+/* global I2O controller list */
+LIST_HEAD(i2o_controllers);
+
+/*
+ * global I2O System Table. Contains information about all the IOPs in the
+ * system. Used to inform IOPs about each others existence.
+ */
+static struct i2o_dma i2o_systab;
+
+static int i2o_hrt_get(struct i2o_controller *c);
+
+/* Module internal functions from other sources */
+extern struct i2o_driver i2o_exec_driver;
+extern int i2o_exec_lct_get(struct i2o_controller *);
+extern void i2o_device_remove(struct i2o_device *);
+
+extern int __init i2o_driver_init(void);
+extern void __exit i2o_driver_exit(void);
+extern int __init i2o_exec_init(void);
+extern void __exit i2o_exec_exit(void);
+extern int __init i2o_pci_init(void);
+extern void __exit i2o_pci_exit(void);
+extern int i2o_device_init(void);
+extern void i2o_device_exit(void);
+
+/**
+ *	i2o_msg_nop - Returns a message which is not used
+ *	@c: I2O controller from which the message was created
+ *	@m: message which should be returned
+ *
+ *	If you fetch a message via i2o_msg_get, and can't use it, you must
+ *	return the message with this function. Otherwise the message frame
+ *	is lost.
+ */
+void i2o_msg_nop(struct i2o_controller *c, u32 m)
+{
+	struct i2o_message __iomem *msg = c->in_queue.virt + m;
+
+	writel(THREE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(I2O_CMD_UTIL_NOP << 24 | HOST_TID << 12 | ADAPTER_TID,
+	       &msg->u.head[1]);
+	writel(0, &msg->u.head[2]);
+	writel(0, &msg->u.head[3]);
+	i2o_msg_post(c, m);
+};
+
+/**
+ *	i2o_msg_get_wait - obtain an I2O message from the IOP
+ *	@c: I2O controller
+ *	@msg: pointer to a I2O message pointer
+ *	@wait: how long to wait until timeout
+ *
+ *	This function waits up to wait seconds for a message slot to be
+ *	available.
+ *
+ *	On a success the message is returned and the pointer to the message is
+ *	set in msg. The returned message is the physical page frame offset
+ *	address from the read port (see the i2o spec). If no message is
+ *	available returns I2O_QUEUE_EMPTY and msg is leaved untouched.
+ */
+u32 i2o_msg_get_wait(struct i2o_controller *c, struct i2o_message __iomem **msg,
+		     int wait)
+{
+	unsigned long timeout = jiffies + wait * HZ;
+	u32 m;
+
+	while ((m = i2o_msg_get(c, msg)) == I2O_QUEUE_EMPTY) {
+		if (time_after(jiffies, timeout)) {
+			pr_debug("%s: Timeout waiting for message frame.\n",
+				 c->name);
+			return I2O_QUEUE_EMPTY;
+		}
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(1);
+	}
+
+	return m;
+};
+
+#if BITS_PER_LONG == 64
+/**
+ *      i2o_cntxt_list_add - Append a pointer to context list and return a id
+ *	@c: controller to which the context list belong
+ *	@ptr: pointer to add to the context list
+ *
+ *	Because the context field in I2O is only 32-bit large, on 64-bit the
+ *	pointer is to large to fit in the context field. The i2o_cntxt_list
+ *	functions therefore map pointers to context fields.
+ *
+ *	Returns context id > 0 on success or 0 on failure.
+ */
+u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
+{
+	struct i2o_context_list_element *entry;
+	unsigned long flags;
+
+	if (!ptr)
+		printk(KERN_ERR "%s: couldn't add NULL pointer to context list!"
+		       "\n", c->name);
+
+	entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
+	if (!entry) {
+		printk(KERN_ERR "%s: Could not allocate memory for context "
+		       "list element\n", c->name);
+		return 0;
+	}
+
+	entry->ptr = ptr;
+	entry->timestamp = jiffies;
+	INIT_LIST_HEAD(&entry->list);
+
+	spin_lock_irqsave(&c->context_list_lock, flags);
+
+	if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
+		atomic_inc(&c->context_list_counter);
+
+	entry->context = atomic_read(&c->context_list_counter);
+
+	list_add(&entry->list, &c->context_list);
+
+	spin_unlock_irqrestore(&c->context_list_lock, flags);
+
+	pr_debug("%s: Add context to list %p -> %d\n", c->name, ptr, context);
+
+	return entry->context;
+};
+
+/**
+ *      i2o_cntxt_list_remove - Remove a pointer from the context list
+ *	@c: controller to which the context list belong
+ *	@ptr: pointer which should be removed from the context list
+ *
+ *	Removes a previously added pointer from the context list and returns
+ *	the matching context id.
+ *
+ *	Returns context id on succes or 0 on failure.
+ */
+u32 i2o_cntxt_list_remove(struct i2o_controller * c, void *ptr)
+{
+	struct i2o_context_list_element *entry;
+	u32 context = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->context_list_lock, flags);
+	list_for_each_entry(entry, &c->context_list, list)
+	    if (entry->ptr == ptr) {
+		list_del(&entry->list);
+		context = entry->context;
+		kfree(entry);
+		break;
+	}
+	spin_unlock_irqrestore(&c->context_list_lock, flags);
+
+	if (!context)
+		printk(KERN_WARNING "%s: Could not remove nonexistent ptr "
+		       "%p\n", c->name, ptr);
+
+	pr_debug("%s: remove ptr from context list %d -> %p\n", c->name,
+		 context, ptr);
+
+	return context;
+};
+
+/**
+ *      i2o_cntxt_list_get - Get a pointer from the context list and remove it
+ *	@c: controller to which the context list belong
+ *	@context: context id to which the pointer belong
+ *
+ *	Returns pointer to the matching context id on success or NULL on
+ *	failure.
+ */
+void *i2o_cntxt_list_get(struct i2o_controller *c, u32 context)
+{
+	struct i2o_context_list_element *entry;
+	unsigned long flags;
+	void *ptr = NULL;
+
+	spin_lock_irqsave(&c->context_list_lock, flags);
+	list_for_each_entry(entry, &c->context_list, list)
+	    if (entry->context == context) {
+		list_del(&entry->list);
+		ptr = entry->ptr;
+		kfree(entry);
+		break;
+	}
+	spin_unlock_irqrestore(&c->context_list_lock, flags);
+
+	if (!ptr)
+		printk(KERN_WARNING "%s: context id %d not found\n", c->name,
+		       context);
+
+	pr_debug("%s: get ptr from context list %d -> %p\n", c->name, context,
+		 ptr);
+
+	return ptr;
+};
+
+/**
+ *      i2o_cntxt_list_get_ptr - Get a context id from the context list
+ *	@c: controller to which the context list belong
+ *	@ptr: pointer to which the context id should be fetched
+ *
+ *	Returns context id which matches to the pointer on succes or 0 on
+ *	failure.
+ */
+u32 i2o_cntxt_list_get_ptr(struct i2o_controller * c, void *ptr)
+{
+	struct i2o_context_list_element *entry;
+	u32 context = 0;
+	unsigned long flags;
+
+	spin_lock_irqsave(&c->context_list_lock, flags);
+	list_for_each_entry(entry, &c->context_list, list)
+	    if (entry->ptr == ptr) {
+		context = entry->context;
+		break;
+	}
+	spin_unlock_irqrestore(&c->context_list_lock, flags);
+
+	if (!context)
+		printk(KERN_WARNING "%s: Could not find nonexistent ptr "
+		       "%p\n", c->name, ptr);
+
+	pr_debug("%s: get context id from context list %p -> %d\n", c->name,
+		 ptr, context);
+
+	return context;
+};
+#endif
+
+/**
+ *	i2o_iop_find - Find an I2O controller by id
+ *	@unit: unit number of the I2O controller to search for
+ *
+ *	Lookup the I2O controller on the controller list.
+ *
+ *	Returns pointer to the I2O controller on success or NULL if not found.
+ */
+struct i2o_controller *i2o_find_iop(int unit)
+{
+	struct i2o_controller *c;
+
+	list_for_each_entry(c, &i2o_controllers, list) {
+		if (c->unit == unit)
+			return c;
+	}
+
+	return NULL;
+};
+
+/**
+ *	i2o_iop_find_device - Find a I2O device on an I2O controller
+ *	@c: I2O controller where the I2O device hangs on
+ *	@tid: TID of the I2O device to search for
+ *
+ *	Searches the devices of the I2O controller for a device with TID tid and
+ *	returns it.
+ *
+ *	Returns a pointer to the I2O device if found, otherwise NULL.
+ */
+struct i2o_device *i2o_iop_find_device(struct i2o_controller *c, u16 tid)
+{
+	struct i2o_device *dev;
+
+	list_for_each_entry(dev, &c->devices, list)
+	    if (dev->lct_data.tid == tid)
+		return dev;
+
+	return NULL;
+};
+
+/**
+ *	i2o_quiesce_controller - quiesce controller
+ *	@c: controller
+ *
+ *	Quiesce an IOP. Causes IOP to make external operation quiescent
+ *	(i2o 'READY' state). Internal operation of the IOP continues normally.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_iop_quiesce(struct i2o_controller *c)
+{
+	struct i2o_message __iomem *msg;
+	u32 m;
+	i2o_status_block *sb = c->status_block.virt;
+	int rc;
+
+	i2o_status_get(c);
+
+	/* SysQuiesce discarded if IOP not in READY or OPERATIONAL state */
+	if ((sb->iop_state != ADAPTER_STATE_READY) &&
+	    (sb->iop_state != ADAPTER_STATE_OPERATIONAL))
+		return 0;
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -ETIMEDOUT;
+
+	writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(I2O_CMD_SYS_QUIESCE << 24 | HOST_TID << 12 | ADAPTER_TID,
+	       &msg->u.head[1]);
+
+	/* Long timeout needed for quiesce if lots of devices */
+	if ((rc = i2o_msg_post_wait(c, m, 240)))
+		printk(KERN_INFO "%s: Unable to quiesce (status=%#x).\n",
+		       c->name, -rc);
+	else
+		pr_debug("%s: Quiesced.\n", c->name);
+
+	i2o_status_get(c);	// Entered READY state
+
+	return rc;
+};
+
+/**
+ *	i2o_iop_enable - move controller from ready to OPERATIONAL
+ *	@c: I2O controller
+ *
+ *	Enable IOP. This allows the IOP to resume external operations and
+ *	reverses the effect of a quiesce. Returns zero or an error code if
+ *	an error occurs.
+ */
+static int i2o_iop_enable(struct i2o_controller *c)
+{
+	struct i2o_message __iomem *msg;
+	u32 m;
+	i2o_status_block *sb = c->status_block.virt;
+	int rc;
+
+	i2o_status_get(c);
+
+	/* Enable only allowed on READY state */
+	if (sb->iop_state != ADAPTER_STATE_READY)
+		return -EINVAL;
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -ETIMEDOUT;
+
+	writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(I2O_CMD_SYS_ENABLE << 24 | HOST_TID << 12 | ADAPTER_TID,
+	       &msg->u.head[1]);
+
+	/* How long of a timeout do we need? */
+	if ((rc = i2o_msg_post_wait(c, m, 240)))
+		printk(KERN_ERR "%s: Could not enable (status=%#x).\n",
+		       c->name, -rc);
+	else
+		pr_debug("%s: Enabled.\n", c->name);
+
+	i2o_status_get(c);	// entered OPERATIONAL state
+
+	return rc;
+};
+
+/**
+ *	i2o_iop_quiesce_all - Quiesce all I2O controllers on the system
+ *
+ *	Quiesce all I2O controllers which are connected to the system.
+ */
+static inline void i2o_iop_quiesce_all(void)
+{
+	struct i2o_controller *c, *tmp;
+
+	list_for_each_entry_safe(c, tmp, &i2o_controllers, list) {
+		if (!c->no_quiesce)
+			i2o_iop_quiesce(c);
+	}
+};
+
+/**
+ *	i2o_iop_enable_all - Enables all controllers on the system
+ *
+ *	Enables all I2O controllers which are connected to the system.
+ */
+static inline void i2o_iop_enable_all(void)
+{
+	struct i2o_controller *c, *tmp;
+
+	list_for_each_entry_safe(c, tmp, &i2o_controllers, list)
+	    i2o_iop_enable(c);
+};
+
+/**
+ *	i2o_clear_controller - Bring I2O controller into HOLD state
+ *	@c: controller
+ *
+ *	Clear an IOP to HOLD state, ie. terminate external operations, clear all
+ *	input queues and prepare for a system restart. IOP's internal operation
+ *	continues normally and the outbound queue is alive. The IOP is not
+ *	expected to rebuild its LCT.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_iop_clear(struct i2o_controller *c)
+{
+	struct i2o_message __iomem *msg;
+	u32 m;
+	int rc;
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -ETIMEDOUT;
+
+	/* Quiesce all IOPs first */
+	i2o_iop_quiesce_all();
+
+	writel(FOUR_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(I2O_CMD_ADAPTER_CLEAR << 24 | HOST_TID << 12 | ADAPTER_TID,
+	       &msg->u.head[1]);
+
+	if ((rc = i2o_msg_post_wait(c, m, 30)))
+		printk(KERN_INFO "%s: Unable to clear (status=%#x).\n",
+		       c->name, -rc);
+	else
+		pr_debug("%s: Cleared.\n", c->name);
+
+	/* Enable all IOPs */
+	i2o_iop_enable_all();
+
+	i2o_status_get(c);
+
+	return rc;
+}
+
+/**
+ *	i2o_iop_reset - reset an I2O controller
+ *	@c: controller to reset
+ *
+ *	Reset the IOP into INIT state and wait until IOP gets into RESET state.
+ *	Terminate all external operations, clear IOP's inbound and outbound
+ *	queues, terminate all DDMs, and reload the IOP's operating environment
+ *	and all local DDMs. The IOP rebuilds its LCT.
+ */
+static int i2o_iop_reset(struct i2o_controller *c)
+{
+	u8 *status = c->status.virt;
+	struct i2o_message __iomem *msg;
+	u32 m;
+	unsigned long timeout;
+	i2o_status_block *sb = c->status_block.virt;
+	int rc = 0;
+
+	pr_debug("%s: Resetting controller\n", c->name);
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -ETIMEDOUT;
+
+	memset(status, 0, 8);
+
+	/* Quiesce all IOPs first */
+	i2o_iop_quiesce_all();
+
+	writel(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(I2O_CMD_ADAPTER_RESET << 24 | HOST_TID << 12 | ADAPTER_TID,
+	       &msg->u.head[1]);
+	writel(i2o_exec_driver.context, &msg->u.s.icntxt);
+	writel(0, &msg->u.s.tcntxt);	//FIXME: use reasonable transaction context
+	writel(0, &msg->body[0]);
+	writel(0, &msg->body[1]);
+	writel(i2o_ptr_low((void *)c->status.phys), &msg->body[2]);
+	writel(i2o_ptr_high((void *)c->status.phys), &msg->body[3]);
+
+	i2o_msg_post(c, m);
+
+	/* Wait for a reply */
+	timeout = jiffies + I2O_TIMEOUT_RESET * HZ;
+	while (!*status) {
+		if (time_after(jiffies, timeout)) {
+			printk(KERN_ERR "%s: IOP reset timeout.\n", c->name);
+			rc = -ETIMEDOUT;
+			goto exit;
+		}
+
+		/* Promise bug */
+		if (status[1] || status[4]) {
+			*status = 0;
+			break;
+		}
+
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(1);
+
+		rmb();
+	}
+
+	if (*status == I2O_CMD_IN_PROGRESS) {
+		/*
+		 * Once the reset is sent, the IOP goes into the INIT state
+		 * which is indeterminate.  We need to wait until the IOP
+		 * has rebooted before we can let the system talk to
+		 * it. We read the inbound Free_List until a message is
+		 * available. If we can't read one in the given ammount of
+		 * time, we assume the IOP could not reboot properly.
+		 */
+		pr_debug("%s: Reset in progress, waiting for reboot...\n",
+			 c->name);
+
+		m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
+		while (m == I2O_QUEUE_EMPTY) {
+			if (time_after(jiffies, timeout)) {
+				printk(KERN_ERR "%s: IOP reset timeout.\n",
+				       c->name);
+				rc = -ETIMEDOUT;
+				goto exit;
+			}
+			set_current_state(TASK_UNINTERRUPTIBLE);
+			schedule_timeout(1);
+
+			m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_RESET);
+		}
+		i2o_msg_nop(c, m);
+	}
+
+	/* from here all quiesce commands are safe */
+	c->no_quiesce = 0;
+
+	/* If IopReset was rejected or didn't perform reset, try IopClear */
+	i2o_status_get(c);
+	if (*status == I2O_CMD_REJECTED || sb->iop_state != ADAPTER_STATE_RESET) {
+		printk(KERN_WARNING "%s: Reset rejected, trying to clear\n",
+		       c->name);
+		i2o_iop_clear(c);
+	} else
+		pr_debug("%s: Reset completed.\n", c->name);
+
+      exit:
+	/* Enable all IOPs */
+	i2o_iop_enable_all();
+
+	return rc;
+};
+
+/**
+ *	i2o_iop_init_outbound_queue - setup the outbound message queue
+ *	@c: I2O controller
+ *
+ *	Clear and (re)initialize IOP's outbound queue and post the message
+ *	frames to the IOP.
+ *
+ *	Returns 0 on success or a negative errno code on failure.
+ */
+static int i2o_iop_init_outbound_queue(struct i2o_controller *c)
+{
+	u8 *status = c->status.virt;
+	u32 m;
+	struct i2o_message __iomem *msg;
+	ulong timeout;
+	int i;
+
+	pr_debug("%s: Initializing Outbound Queue...\n", c->name);
+
+	memset(status, 0, 4);
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -ETIMEDOUT;
+
+	writel(EIGHT_WORD_MSG_SIZE | TRL_OFFSET_6, &msg->u.head[0]);
+	writel(I2O_CMD_OUTBOUND_INIT << 24 | HOST_TID << 12 | ADAPTER_TID,
+	       &msg->u.head[1]);
+	writel(i2o_exec_driver.context, &msg->u.s.icntxt);
+	writel(0x0106, &msg->u.s.tcntxt);	/* FIXME: why 0x0106, maybe in
+						   Spec? */
+	writel(PAGE_SIZE, &msg->body[0]);
+	writel(MSG_FRAME_SIZE << 16 | 0x80, &msg->body[1]);	/* Outbound msg frame
+								   size in words and Initcode */
+	writel(0xd0000004, &msg->body[2]);
+	writel(i2o_ptr_low((void *)c->status.phys), &msg->body[3]);
+	writel(i2o_ptr_high((void *)c->status.phys), &msg->body[4]);
+
+	i2o_msg_post(c, m);
+
+	timeout = jiffies + I2O_TIMEOUT_INIT_OUTBOUND_QUEUE * HZ;
+	while (*status <= I2O_CMD_IN_PROGRESS) {
+		if (time_after(jiffies, timeout)) {
+			printk(KERN_WARNING "%s: Timeout Initializing\n",
+			       c->name);
+			return -ETIMEDOUT;
+		}
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(1);
+
+		rmb();
+	}
+
+	m = c->out_queue.phys;
+
+	/* Post frames */
+	for (i = 0; i < NMBR_MSG_FRAMES; i++) {
+		i2o_flush_reply(c, m);
+		udelay(1);	/* Promise */
+		m += MSG_FRAME_SIZE * 4;
+	}
+
+	return 0;
+}
+
+/**
+ *	i2o_iop_send_nop - send a core NOP message
+ *	@c: controller
+ *
+ *	Send a no-operation message with a reply set to cause no
+ *	action either. Needed for bringing up promise controllers.
+ */
+static int i2o_iop_send_nop(struct i2o_controller *c)
+{
+	struct i2o_message __iomem *msg;
+	u32 m = i2o_msg_get_wait(c, &msg, HZ);
+	if (m == I2O_QUEUE_EMPTY)
+		return -ETIMEDOUT;
+	i2o_msg_nop(c, m);
+	return 0;
+}
+
+/**
+ *	i2o_iop_activate - Bring controller up to HOLD
+ *	@c: controller
+ *
+ *	This function brings an I2O controller into HOLD state. The adapter
+ *	is reset if necessary and then the queues and resource table are read.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_iop_activate(struct i2o_controller *c)
+{
+	struct pci_dev *i960 = NULL;
+	i2o_status_block *sb = c->status_block.virt;
+	int rc;
+
+	if (c->promise) {
+		/* Beat up the hardware first of all */
+		i960 =
+		    pci_find_slot(c->pdev->bus->number,
+				  PCI_DEVFN(PCI_SLOT(c->pdev->devfn), 0));
+		if (i960)
+			pci_write_config_word(i960, 0x42, 0);
+
+		/* Follow this sequence precisely or the controller
+		   ceases to perform useful functions until reboot */
+		if ((rc = i2o_iop_send_nop(c)))
+			return rc;
+
+		if ((rc = i2o_iop_reset(c)))
+			return rc;
+	}
+
+	/* In INIT state, Wait Inbound Q to initialize (in i2o_status_get) */
+	/* In READY state, Get status */
+
+	rc = i2o_status_get(c);
+	if (rc) {
+		printk(KERN_INFO "%s: Unable to obtain status, "
+		       "attempting a reset.\n", c->name);
+		if (i2o_iop_reset(c))
+			return rc;
+	}
+
+	if (sb->i2o_version > I2OVER15) {
+		printk(KERN_ERR "%s: Not running version 1.5 of the I2O "
+		       "Specification.\n", c->name);
+		return -ENODEV;
+	}
+
+	switch (sb->iop_state) {
+	case ADAPTER_STATE_FAULTED:
+		printk(KERN_CRIT "%s: hardware fault\n", c->name);
+		return -ENODEV;
+
+	case ADAPTER_STATE_READY:
+	case ADAPTER_STATE_OPERATIONAL:
+	case ADAPTER_STATE_HOLD:
+	case ADAPTER_STATE_FAILED:
+		pr_debug("%s: already running, trying to reset...\n", c->name);
+		if (i2o_iop_reset(c))
+			return -ENODEV;
+	}
+
+	rc = i2o_iop_init_outbound_queue(c);
+	if (rc)
+		return rc;
+
+	if (c->promise) {
+		if ((rc = i2o_iop_send_nop(c)))
+			return rc;
+
+		if ((rc = i2o_status_get(c)))
+			return rc;
+
+		if (i960)
+			pci_write_config_word(i960, 0x42, 0x3FF);
+	}
+
+	/* In HOLD state */
+
+	rc = i2o_hrt_get(c);
+
+	return rc;
+};
+
+/**
+ *	i2o_iop_systab_set - Set the I2O System Table of the specified IOP
+ *	@c: I2O controller to which the system table should be send
+ *
+ *	Before the systab could be set i2o_systab_build() must be called.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_iop_systab_set(struct i2o_controller *c)
+{
+	struct i2o_message __iomem *msg;
+	u32 m;
+	i2o_status_block *sb = c->status_block.virt;
+	struct device *dev = &c->pdev->dev;
+	struct resource *root;
+	int rc;
+
+	if (sb->current_mem_size < sb->desired_mem_size) {
+		struct resource *res = &c->mem_resource;
+		res->name = c->pdev->bus->name;
+		res->flags = IORESOURCE_MEM;
+		res->start = 0;
+		res->end = 0;
+		printk(KERN_INFO "%s: requires private memory resources.\n",
+		       c->name);
+		root = pci_find_parent_resource(c->pdev, res);
+		if (root == NULL)
+			printk(KERN_WARNING "%s: Can't find parent resource!\n",
+			       c->name);
+		if (root && allocate_resource(root, res, sb->desired_mem_size, sb->desired_mem_size, sb->desired_mem_size, 1 << 20,	/* Unspecified, so use 1Mb and play safe */
+					      NULL, NULL) >= 0) {
+			c->mem_alloc = 1;
+			sb->current_mem_size = 1 + res->end - res->start;
+			sb->current_mem_base = res->start;
+			printk(KERN_INFO "%s: allocated %ld bytes of PCI memory"
+			       " at 0x%08lX.\n", c->name,
+			       1 + res->end - res->start, res->start);
+		}
+	}
+
+	if (sb->current_io_size < sb->desired_io_size) {
+		struct resource *res = &c->io_resource;
+		res->name = c->pdev->bus->name;
+		res->flags = IORESOURCE_IO;
+		res->start = 0;
+		res->end = 0;
+		printk(KERN_INFO "%s: requires private memory resources.\n",
+		       c->name);
+		root = pci_find_parent_resource(c->pdev, res);
+		if (root == NULL)
+			printk(KERN_WARNING "%s: Can't find parent resource!\n",
+			       c->name);
+		if (root && allocate_resource(root, res, sb->desired_io_size, sb->desired_io_size, sb->desired_io_size, 1 << 20,	/* Unspecified, so use 1Mb and play safe */
+					      NULL, NULL) >= 0) {
+			c->io_alloc = 1;
+			sb->current_io_size = 1 + res->end - res->start;
+			sb->current_mem_base = res->start;
+			printk(KERN_INFO "%s: allocated %ld bytes of PCI I/O at"
+			       " 0x%08lX.\n", c->name,
+			       1 + res->end - res->start, res->start);
+		}
+	}
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -ETIMEDOUT;
+
+	i2o_systab.phys = dma_map_single(dev, i2o_systab.virt, i2o_systab.len,
+					 PCI_DMA_TODEVICE);
+	if (!i2o_systab.phys) {
+		i2o_msg_nop(c, m);
+		return -ENOMEM;
+	}
+
+	writel(I2O_MESSAGE_SIZE(12) | SGL_OFFSET_6, &msg->u.head[0]);
+	writel(I2O_CMD_SYS_TAB_SET << 24 | HOST_TID << 12 | ADAPTER_TID,
+	       &msg->u.head[1]);
+
+	/*
+	 * Provide three SGL-elements:
+	 * System table (SysTab), Private memory space declaration and
+	 * Private i/o space declaration
+	 *
+	 * FIXME: is this still true?
+	 * Nasty one here. We can't use dma_alloc_coherent to send the
+	 * same table to everyone. We have to go remap it for them all
+	 */
+
+	writel(c->unit + 2, &msg->body[0]);
+	writel(0, &msg->body[1]);
+	writel(0x54000000 | i2o_systab.len, &msg->body[2]);
+	writel(i2o_systab.phys, &msg->body[3]);
+	writel(0x54000000 | sb->current_mem_size, &msg->body[4]);
+	writel(sb->current_mem_base, &msg->body[5]);
+	writel(0xd4000000 | sb->current_io_size, &msg->body[6]);
+	writel(sb->current_io_base, &msg->body[6]);
+
+	rc = i2o_msg_post_wait(c, m, 120);
+
+	dma_unmap_single(dev, i2o_systab.phys, i2o_systab.len,
+			 PCI_DMA_TODEVICE);
+
+	if (rc < 0)
+		printk(KERN_ERR "%s: Unable to set SysTab (status=%#x).\n",
+		       c->name, -rc);
+	else
+		pr_debug("%s: SysTab set.\n", c->name);
+
+	i2o_status_get(c);	// Entered READY state
+
+	return rc;
+}
+
+/**
+ *	i2o_iop_online - Bring a controller online into OPERATIONAL state.
+ *	@c: I2O controller
+ *
+ *	Send the system table and enable the I2O controller.
+ *
+ *	Returns 0 on success or negativer error code on failure.
+ */
+static int i2o_iop_online(struct i2o_controller *c)
+{
+	int rc;
+
+	rc = i2o_iop_systab_set(c);
+	if (rc)
+		return rc;
+
+	/* In READY state */
+	pr_debug("%s: Attempting to enable...\n", c->name);
+	rc = i2o_iop_enable(c);
+	if (rc)
+		return rc;
+
+	return 0;
+};
+
+/**
+ *	i2o_iop_remove - Remove the I2O controller from the I2O core
+ *	@c: I2O controller
+ *
+ *	Remove the I2O controller from the I2O core. If devices are attached to
+ *	the controller remove these also and finally reset the controller.
+ */
+void i2o_iop_remove(struct i2o_controller *c)
+{
+	struct i2o_device *dev, *tmp;
+
+	pr_debug("%s: deleting controller\n", c->name);
+
+	i2o_driver_notify_controller_remove_all(c);
+
+	list_del(&c->list);
+
+	list_for_each_entry_safe(dev, tmp, &c->devices, list)
+	    i2o_device_remove(dev);
+
+	/* Ask the IOP to switch to RESET state */
+	i2o_iop_reset(c);
+}
+
+/**
+ *	i2o_systab_build - Build system table
+ *
+ *	The system table contains information about all the IOPs in the system
+ *	(duh) and is used by the Executives on the IOPs to establish peer2peer
+ *	connections. We're not supporting peer2peer at the moment, but this
+ *	will be needed down the road for things like lan2lan forwarding.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_systab_build(void)
+{
+	struct i2o_controller *c, *tmp;
+	int num_controllers = 0;
+	u32 change_ind = 0;
+	int count = 0;
+	struct i2o_sys_tbl *systab = i2o_systab.virt;
+
+	list_for_each_entry_safe(c, tmp, &i2o_controllers, list)
+	    num_controllers++;
+
+	if (systab) {
+		change_ind = systab->change_ind;
+		kfree(i2o_systab.virt);
+	}
+
+	/* Header + IOPs */
+	i2o_systab.len = sizeof(struct i2o_sys_tbl) + num_controllers *
+	    sizeof(struct i2o_sys_tbl_entry);
+
+	systab = i2o_systab.virt = kmalloc(i2o_systab.len, GFP_KERNEL);
+	if (!systab) {
+		printk(KERN_ERR "i2o: unable to allocate memory for System "
+		       "Table\n");
+		return -ENOMEM;
+	}
+	memset(systab, 0, i2o_systab.len);
+
+	systab->version = I2OVERSION;
+	systab->change_ind = change_ind + 1;
+
+	list_for_each_entry_safe(c, tmp, &i2o_controllers, list) {
+		i2o_status_block *sb;
+
+		if (count >= num_controllers) {
+			printk(KERN_ERR "i2o: controller added while building "
+			       "system table\n");
+			break;
+		}
+
+		sb = c->status_block.virt;
+
+		/*
+		 * Get updated IOP state so we have the latest information
+		 *
+		 * We should delete the controller at this point if it
+		 * doesn't respond since if it's not on the system table
+		 * it is techninically not part of the I2O subsystem...
+		 */
+		if (unlikely(i2o_status_get(c))) {
+			printk(KERN_ERR "%s: Deleting b/c could not get status"
+			       " while attempting to build system table\n",
+			       c->name);
+			i2o_iop_remove(c);
+			continue;	// try the next one
+		}
+
+		systab->iops[count].org_id = sb->org_id;
+		systab->iops[count].iop_id = c->unit + 2;
+		systab->iops[count].seg_num = 0;
+		systab->iops[count].i2o_version = sb->i2o_version;
+		systab->iops[count].iop_state = sb->iop_state;
+		systab->iops[count].msg_type = sb->msg_type;
+		systab->iops[count].frame_size = sb->inbound_frame_size;
+		systab->iops[count].last_changed = change_ind;
+		systab->iops[count].iop_capabilities = sb->iop_capabilities;
+		systab->iops[count].inbound_low = i2o_ptr_low(c->post_port);
+		systab->iops[count].inbound_high = i2o_ptr_high(c->post_port);
+
+		count++;
+	}
+
+	systab->num_entries = count;
+
+	return 0;
+};
+
+/**
+ *	i2o_parse_hrt - Parse the hardware resource table.
+ *	@c: I2O controller
+ *
+ *	We don't do anything with it except dumping it (in debug mode).
+ *
+ *	Returns 0.
+ */
+static int i2o_parse_hrt(struct i2o_controller *c)
+{
+	i2o_dump_hrt(c);
+	return 0;
+};
+
+/**
+ *	i2o_status_get - Get the status block from the I2O controller
+ *	@c: I2O controller
+ *
+ *	Issue a status query on the controller. This updates the attached
+ *	status block. The status block could then be accessed through
+ *	c->status_block.
+ *
+ *	Returns 0 on sucess or negative error code on failure.
+ */
+int i2o_status_get(struct i2o_controller *c)
+{
+	struct i2o_message __iomem *msg;
+	u32 m;
+	u8 *status_block;
+	unsigned long timeout;
+
+	status_block = (u8 *) c->status_block.virt;
+	memset(status_block, 0, sizeof(i2o_status_block));
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -ETIMEDOUT;
+
+	writel(NINE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(I2O_CMD_STATUS_GET << 24 | HOST_TID << 12 | ADAPTER_TID,
+	       &msg->u.head[1]);
+	writel(i2o_exec_driver.context, &msg->u.s.icntxt);
+	writel(0, &msg->u.s.tcntxt);	// FIXME: use resonable transaction context
+	writel(0, &msg->body[0]);
+	writel(0, &msg->body[1]);
+	writel(i2o_ptr_low((void *)c->status_block.phys), &msg->body[2]);
+	writel(i2o_ptr_high((void *)c->status_block.phys), &msg->body[3]);
+	writel(sizeof(i2o_status_block), &msg->body[4]);	/* always 88 bytes */
+
+	i2o_msg_post(c, m);
+
+	/* Wait for a reply */
+	timeout = jiffies + I2O_TIMEOUT_STATUS_GET * HZ;
+	while (status_block[87] != 0xFF) {
+		if (time_after(jiffies, timeout)) {
+			printk(KERN_ERR "%s: Get status timeout.\n", c->name);
+			return -ETIMEDOUT;
+		}
+
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(1);
+
+		rmb();
+	}
+
+#ifdef DEBUG
+	i2o_debug_state(c);
+#endif
+
+	return 0;
+}
+
+/*
+ *	i2o_hrt_get - Get the Hardware Resource Table from the I2O controller
+ *	@c: I2O controller from which the HRT should be fetched
+ *
+ *	The HRT contains information about possible hidden devices but is
+ *	mostly useless to us.
+ *
+ *	Returns 0 on success or negativer error code on failure.
+ */
+static int i2o_hrt_get(struct i2o_controller *c)
+{
+	int rc;
+	int i;
+	i2o_hrt *hrt = c->hrt.virt;
+	u32 size = sizeof(i2o_hrt);
+	struct device *dev = &c->pdev->dev;
+
+	for (i = 0; i < I2O_HRT_GET_TRIES; i++) {
+		struct i2o_message __iomem *msg;
+		u32 m;
+
+		m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+		if (m == I2O_QUEUE_EMPTY)
+			return -ETIMEDOUT;
+
+		writel(SIX_WORD_MSG_SIZE | SGL_OFFSET_4, &msg->u.head[0]);
+		writel(I2O_CMD_HRT_GET << 24 | HOST_TID << 12 | ADAPTER_TID,
+		       &msg->u.head[1]);
+		writel(0xd0000000 | c->hrt.len, &msg->body[0]);
+		writel(c->hrt.phys, &msg->body[1]);
+
+		rc = i2o_msg_post_wait_mem(c, m, 20, &c->hrt);
+
+		if (rc < 0) {
+			printk(KERN_ERR "%s: Unable to get HRT (status=%#x)\n",
+			       c->name, -rc);
+			return rc;
+		}
+
+		size = hrt->num_entries * hrt->entry_len << 2;
+		if (size > c->hrt.len) {
+			if (i2o_dma_realloc(dev, &c->hrt, size, GFP_KERNEL))
+				return -ENOMEM;
+			else
+				hrt = c->hrt.virt;
+		} else
+			return i2o_parse_hrt(c);
+	}
+
+	printk(KERN_ERR "%s: Unable to get HRT after %d tries, giving up\n",
+	       c->name, I2O_HRT_GET_TRIES);
+
+	return -EBUSY;
+}
+
+/**
+ *	i2o_iop_alloc - Allocate and initialize a i2o_controller struct
+ *
+ *	Allocate the necessary memory for a i2o_controller struct and
+ *	initialize the lists.
+ *
+ *	Returns a pointer to the I2O controller or a negative error code on
+ *	failure.
+ */
+struct i2o_controller *i2o_iop_alloc(void)
+{
+	static int unit = 0;	/* 0 and 1 are NULL IOP and Local Host */
+	struct i2o_controller *c;
+
+	c = kmalloc(sizeof(*c), GFP_KERNEL);
+	if (!c) {
+		printk(KERN_ERR "i2o: Insufficient memory to allocate a I2O "
+		       "controller.\n");
+		return ERR_PTR(-ENOMEM);
+	}
+	memset(c, 0, sizeof(*c));
+
+	INIT_LIST_HEAD(&c->devices);
+	spin_lock_init(&c->lock);
+	init_MUTEX(&c->lct_lock);
+	c->unit = unit++;
+	sprintf(c->name, "iop%d", c->unit);
+
+#if BITS_PER_LONG == 64
+	spin_lock_init(&c->context_list_lock);
+	atomic_set(&c->context_list_counter, 0);
+	INIT_LIST_HEAD(&c->context_list);
+#endif
+
+	return c;
+};
+
+/**
+ *	i2o_iop_free - Free the i2o_controller struct
+ *	@c: I2O controller to free
+ */
+void i2o_iop_free(struct i2o_controller *c)
+{
+	kfree(c);
+};
+
+/**
+ *	i2o_iop_add - Initialize the I2O controller and add him to the I2O core
+ *	@c: controller
+ *
+ *	Initialize the I2O controller and if no error occurs add him to the I2O
+ *	core.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+int i2o_iop_add(struct i2o_controller *c)
+{
+	int rc;
+
+	printk(KERN_INFO "%s: Activating I2O controller...\n", c->name);
+	printk(KERN_INFO "%s: This may take a few minutes if there are many "
+	       "devices\n", c->name);
+
+	if ((rc = i2o_iop_activate(c))) {
+		printk(KERN_ERR "%s: could not activate controller\n",
+		       c->name);
+		i2o_iop_reset(c);
+		return rc;
+	}
+
+	pr_debug("%s: building sys table...\n", c->name);
+
+	if ((rc = i2o_systab_build())) {
+		i2o_iop_reset(c);
+		return rc;
+	}
+
+	pr_debug("%s: online controller...\n", c->name);
+
+	if ((rc = i2o_iop_online(c))) {
+		i2o_iop_reset(c);
+		return rc;
+	}
+
+	pr_debug("%s: getting LCT...\n", c->name);
+
+	if ((rc = i2o_exec_lct_get(c))) {
+		i2o_iop_reset(c);
+		return rc;
+	}
+
+	list_add(&c->list, &i2o_controllers);
+
+	i2o_driver_notify_controller_add_all(c);
+
+	printk(KERN_INFO "%s: Controller added\n", c->name);
+
+	return 0;
+};
+
+/**
+ *	i2o_event_register - Turn on/off event notification for a I2O device
+ *	@dev: I2O device which should receive the event registration request
+ *	@drv: driver which want to get notified
+ *	@tcntxt: transaction context to use with this notifier
+ *	@evt_mask: mask of events
+ *
+ *	Create and posts an event registration message to the task. No reply
+ *	is waited for, or expected. If you do not want further notifications,
+ *	call the i2o_event_register again with a evt_mask of 0.
+ *
+ *	Returns 0 on success or -ETIMEDOUT if no message could be fetched for
+ *	sending the request.
+ */
+int i2o_event_register(struct i2o_device *dev, struct i2o_driver *drv,
+		       int tcntxt, u32 evt_mask)
+{
+	struct i2o_controller *c = dev->iop;
+	struct i2o_message __iomem *msg;
+	u32 m;
+
+	m = i2o_msg_get_wait(c, &msg, I2O_TIMEOUT_MESSAGE_GET);
+	if (m == I2O_QUEUE_EMPTY)
+		return -ETIMEDOUT;
+
+	writel(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0, &msg->u.head[0]);
+	writel(I2O_CMD_UTIL_EVT_REGISTER << 24 | HOST_TID << 12 | dev->lct_data.
+	       tid, &msg->u.head[1]);
+	writel(drv->context, &msg->u.s.icntxt);
+	writel(tcntxt, &msg->u.s.tcntxt);
+	writel(evt_mask, &msg->body[0]);
+
+	i2o_msg_post(c, m);
+
+	return 0;
+};
+
+/**
+ *	i2o_iop_init - I2O main initialization function
+ *
+ *	Initialize the I2O drivers (OSM) functions, register the Executive OSM,
+ *	initialize the I2O PCI part and finally initialize I2O device stuff.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int __init i2o_iop_init(void)
+{
+	int rc = 0;
+
+	printk(KERN_INFO OSM_DESCRIPTION " v" OSM_VERSION "\n");
+
+	rc = i2o_device_init();
+	if (rc)
+		goto exit;
+
+	rc = i2o_driver_init();
+	if (rc)
+		goto device_exit;
+
+	rc = i2o_exec_init();
+	if (rc)
+		goto driver_exit;
+
+	rc = i2o_pci_init();
+	if (rc < 0)
+		goto exec_exit;
+
+	return 0;
+
+      exec_exit:
+	i2o_exec_exit();
+
+      driver_exit:
+	i2o_driver_exit();
+
+      device_exit:
+	i2o_device_exit();
+
+      exit:
+	return rc;
+}
+
+/**
+ *	i2o_iop_exit - I2O main exit function
+ *
+ *	Removes I2O controllers from PCI subsystem and shut down OSMs.
+ */
+static void __exit i2o_iop_exit(void)
+{
+	i2o_pci_exit();
+	i2o_exec_exit();
+	i2o_driver_exit();
+	i2o_device_exit();
+};
+
+module_init(i2o_iop_init);
+module_exit(i2o_iop_exit);
+
+MODULE_AUTHOR("Red Hat Software");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION(OSM_DESCRIPTION);
+MODULE_VERSION(OSM_VERSION);
+
+#if BITS_PER_LONG == 64
+EXPORT_SYMBOL(i2o_cntxt_list_add);
+EXPORT_SYMBOL(i2o_cntxt_list_get);
+EXPORT_SYMBOL(i2o_cntxt_list_remove);
+EXPORT_SYMBOL(i2o_cntxt_list_get_ptr);
+#endif
+EXPORT_SYMBOL(i2o_msg_get_wait);
+EXPORT_SYMBOL(i2o_msg_nop);
+EXPORT_SYMBOL(i2o_find_iop);
+EXPORT_SYMBOL(i2o_iop_find_device);
+EXPORT_SYMBOL(i2o_event_register);
+EXPORT_SYMBOL(i2o_status_get);
+EXPORT_SYMBOL(i2o_controllers);
diff --git a/drivers/message/i2o/pci.c b/drivers/message/i2o/pci.c
new file mode 100644
index 0000000..e772752
--- /dev/null
+++ b/drivers/message/i2o/pci.c
@@ -0,0 +1,528 @@
+/*
+ *	PCI handling of I2O controller
+ *
+ * 	Copyright (C) 1999-2002	Red Hat Software
+ *
+ *	Written by Alan Cox, Building Number Three Ltd
+ *
+ *	This program is free software; you can redistribute it and/or modify it
+ *	under the terms of the GNU General Public License as published by the
+ *	Free Software Foundation; either version 2 of the License, or (at your
+ *	option) any later version.
+ *
+ *	A lot of the I2O message side code from this is taken from the Red
+ *	Creek RCPCI45 adapter driver by Red Creek Communications
+ *
+ *	Fixes/additions:
+ *		Philipp Rumpf
+ *		Juha Sievänen <Juha.Sievanen@cs.Helsinki.FI>
+ *		Auvo Häkkinen <Auvo.Hakkinen@cs.Helsinki.FI>
+ *		Deepak Saxena <deepak@plexity.net>
+ *		Boji T Kannanthanam <boji.t.kannanthanam@intel.com>
+ *		Alan Cox <alan@redhat.com>:
+ *			Ported to Linux 2.5.
+ *		Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ *			Minor fixes for 2.6.
+ *		Markus Lidel <Markus.Lidel@shadowconnect.com>:
+ *			Support for sysfs included.
+ */
+
+#include <linux/pci.h>
+#include <linux/interrupt.h>
+#include <linux/i2o.h>
+
+#ifdef CONFIG_MTRR
+#include <asm/mtrr.h>
+#endif				// CONFIG_MTRR
+
+/* Module internal functions from other sources */
+extern struct i2o_controller *i2o_iop_alloc(void);
+extern void i2o_iop_free(struct i2o_controller *);
+
+extern int i2o_iop_add(struct i2o_controller *);
+extern void i2o_iop_remove(struct i2o_controller *);
+
+extern int i2o_driver_dispatch(struct i2o_controller *, u32,
+			       struct i2o_message *);
+
+/* PCI device id table for all I2O controllers */
+static struct pci_device_id __devinitdata i2o_pci_ids[] = {
+	{PCI_DEVICE_CLASS(PCI_CLASS_INTELLIGENT_I2O << 8, 0xffff00)},
+	{PCI_DEVICE(PCI_VENDOR_ID_DPT, 0xa511)},
+	{0}
+};
+
+/**
+ *	i2o_dma_realloc - Realloc DMA memory
+ *	@dev: struct device pointer to the PCI device of the I2O controller
+ *	@addr: pointer to a i2o_dma struct DMA buffer
+ *	@len: new length of memory
+ *	@gfp_mask: GFP mask
+ *
+ *	If there was something allocated in the addr, free it first. If len > 0
+ *	than try to allocate it and write the addresses back to the addr
+ *	structure. If len == 0 set the virtual address to NULL.
+ *
+ *	Returns the 0 on success or negative error code on failure.
+ */
+int i2o_dma_realloc(struct device *dev, struct i2o_dma *addr, size_t len,
+		    unsigned int gfp_mask)
+{
+	i2o_dma_free(dev, addr);
+
+	if (len)
+		return i2o_dma_alloc(dev, addr, len, gfp_mask);
+
+	return 0;
+};
+
+/**
+ *	i2o_pci_free - Frees the DMA memory for the I2O controller
+ *	@c: I2O controller to free
+ *
+ *	Remove all allocated DMA memory and unmap memory IO regions. If MTRR
+ *	is enabled, also remove it again.
+ */
+static void i2o_pci_free(struct i2o_controller *c)
+{
+	struct device *dev;
+
+	dev = &c->pdev->dev;
+
+	i2o_dma_free(dev, &c->out_queue);
+	i2o_dma_free(dev, &c->status_block);
+	if (c->lct)
+		kfree(c->lct);
+	i2o_dma_free(dev, &c->dlct);
+	i2o_dma_free(dev, &c->hrt);
+	i2o_dma_free(dev, &c->status);
+
+#ifdef CONFIG_MTRR
+	if (c->mtrr_reg0 >= 0)
+		mtrr_del(c->mtrr_reg0, 0, 0);
+	if (c->mtrr_reg1 >= 0)
+		mtrr_del(c->mtrr_reg1, 0, 0);
+#endif
+
+	if (c->raptor && c->in_queue.virt)
+		iounmap(c->in_queue.virt);
+
+	if (c->base.virt)
+		iounmap(c->base.virt);
+}
+
+/**
+ *	i2o_pci_alloc - Allocate DMA memory, map IO memory for I2O controller
+ *	@c: I2O controller
+ *
+ *	Allocate DMA memory for a PCI (or in theory AGP) I2O controller. All
+ *	IO mappings are also done here. If MTRR is enabled, also do add memory
+ *	regions here.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int __devinit i2o_pci_alloc(struct i2o_controller *c)
+{
+	struct pci_dev *pdev = c->pdev;
+	struct device *dev = &pdev->dev;
+	int i;
+
+	for (i = 0; i < 6; i++) {
+		/* Skip I/O spaces */
+		if (!(pci_resource_flags(pdev, i) & IORESOURCE_IO)) {
+			if (!c->base.phys) {
+				c->base.phys = pci_resource_start(pdev, i);
+				c->base.len = pci_resource_len(pdev, i);
+
+				/*
+				 * If we know what card it is, set the size
+				 * correctly. Code is taken from dpt_i2o.c
+				 */
+				if (pdev->device == 0xa501) {
+					if (pdev->subsystem_device >= 0xc032 &&
+					    pdev->subsystem_device <= 0xc03b) {
+						if (c->base.len > 0x400000)
+							c->base.len = 0x400000;
+					} else {
+						if (c->base.len > 0x100000)
+							c->base.len = 0x100000;
+					}
+				}
+				if (!c->raptor)
+					break;
+			} else {
+				c->in_queue.phys = pci_resource_start(pdev, i);
+				c->in_queue.len = pci_resource_len(pdev, i);
+				break;
+			}
+		}
+	}
+
+	if (i == 6) {
+		printk(KERN_ERR "%s: I2O controller has no memory regions"
+		       " defined.\n", c->name);
+		i2o_pci_free(c);
+		return -EINVAL;
+	}
+
+	/* Map the I2O controller */
+	if (c->raptor) {
+		printk(KERN_INFO "%s: PCI I2O controller\n", c->name);
+		printk(KERN_INFO "     BAR0 at 0x%08lX size=%ld\n",
+		       (unsigned long)c->base.phys, (unsigned long)c->base.len);
+		printk(KERN_INFO "     BAR1 at 0x%08lX size=%ld\n",
+		       (unsigned long)c->in_queue.phys,
+		       (unsigned long)c->in_queue.len);
+	} else
+		printk(KERN_INFO "%s: PCI I2O controller at %08lX size=%ld\n",
+		       c->name, (unsigned long)c->base.phys,
+		       (unsigned long)c->base.len);
+
+	c->base.virt = ioremap(c->base.phys, c->base.len);
+	if (!c->base.virt) {
+		printk(KERN_ERR "%s: Unable to map controller.\n", c->name);
+		return -ENOMEM;
+	}
+
+	if (c->raptor) {
+		c->in_queue.virt = ioremap(c->in_queue.phys, c->in_queue.len);
+		if (!c->in_queue.virt) {
+			printk(KERN_ERR "%s: Unable to map controller.\n",
+			       c->name);
+			i2o_pci_free(c);
+			return -ENOMEM;
+		}
+	} else
+		c->in_queue = c->base;
+
+	c->irq_mask = c->base.virt + 0x34;
+	c->post_port = c->base.virt + 0x40;
+	c->reply_port = c->base.virt + 0x44;
+
+#ifdef CONFIG_MTRR
+	/* Enable Write Combining MTRR for IOP's memory region */
+	c->mtrr_reg0 = mtrr_add(c->in_queue.phys, c->in_queue.len,
+				MTRR_TYPE_WRCOMB, 1);
+	c->mtrr_reg1 = -1;
+
+	if (c->mtrr_reg0 < 0)
+		printk(KERN_WARNING "%s: could not enable write combining "
+		       "MTRR\n", c->name);
+	else
+		printk(KERN_INFO "%s: using write combining MTRR\n", c->name);
+
+	/*
+	 * If it is an INTEL i960 I/O processor then set the first 64K to
+	 * Uncacheable since the region contains the messaging unit which
+	 * shouldn't be cached.
+	 */
+	if ((pdev->vendor == PCI_VENDOR_ID_INTEL ||
+	     pdev->vendor == PCI_VENDOR_ID_DPT) && !c->raptor) {
+		printk(KERN_INFO "%s: MTRR workaround for Intel i960 processor"
+		       "\n", c->name);
+		c->mtrr_reg1 = mtrr_add(c->base.phys, 0x10000,
+					MTRR_TYPE_UNCACHABLE, 1);
+
+		if (c->mtrr_reg1 < 0) {
+			printk(KERN_WARNING "%s: Error in setting "
+			       "MTRR_TYPE_UNCACHABLE\n", c->name);
+			mtrr_del(c->mtrr_reg0, c->in_queue.phys,
+				 c->in_queue.len);
+			c->mtrr_reg0 = -1;
+		}
+	}
+#endif
+
+	if (i2o_dma_alloc(dev, &c->status, 8, GFP_KERNEL)) {
+		i2o_pci_free(c);
+		return -ENOMEM;
+	}
+
+	if (i2o_dma_alloc(dev, &c->hrt, sizeof(i2o_hrt), GFP_KERNEL)) {
+		i2o_pci_free(c);
+		return -ENOMEM;
+	}
+
+	if (i2o_dma_alloc(dev, &c->dlct, 8192, GFP_KERNEL)) {
+		i2o_pci_free(c);
+		return -ENOMEM;
+	}
+
+	if (i2o_dma_alloc(dev, &c->status_block, sizeof(i2o_status_block),
+			  GFP_KERNEL)) {
+		i2o_pci_free(c);
+		return -ENOMEM;
+	}
+
+	if (i2o_dma_alloc(dev, &c->out_queue, MSG_POOL_SIZE, GFP_KERNEL)) {
+		i2o_pci_free(c);
+		return -ENOMEM;
+	}
+
+	pci_set_drvdata(pdev, c);
+
+	return 0;
+}
+
+/**
+ *	i2o_pci_interrupt - Interrupt handler for I2O controller
+ *	@irq: interrupt line
+ *	@dev_id: pointer to the I2O controller
+ *	@r: pointer to registers
+ *
+ *	Handle an interrupt from a PCI based I2O controller. This turns out
+ *	to be rather simple. We keep the controller pointer in the cookie.
+ */
+static irqreturn_t i2o_pci_interrupt(int irq, void *dev_id, struct pt_regs *r)
+{
+	struct i2o_controller *c = dev_id;
+	struct device *dev = &c->pdev->dev;
+	struct i2o_message *m;
+	u32 mv;
+
+	/*
+	 * Old 960 steppings had a bug in the I2O unit that caused
+	 * the queue to appear empty when it wasn't.
+	 */
+	mv = I2O_REPLY_READ32(c);
+	if (mv == I2O_QUEUE_EMPTY) {
+		mv = I2O_REPLY_READ32(c);
+		if (unlikely(mv == I2O_QUEUE_EMPTY)) {
+			return IRQ_NONE;
+		} else
+			pr_debug("%s: 960 bug detected\n", c->name);
+	}
+
+	while (mv != I2O_QUEUE_EMPTY) {
+		/*
+		 * Map the message from the page frame map to kernel virtual.
+		 * Because bus_to_virt is deprecated, we have calculate the
+		 * location by ourself!
+		 */
+		m = i2o_msg_out_to_virt(c, mv);
+
+		/*
+		 *      Ensure this message is seen coherently but cachably by
+		 *      the processor
+		 */
+		dma_sync_single_for_cpu(dev, mv, MSG_FRAME_SIZE * 4,
+					PCI_DMA_FROMDEVICE);
+
+		/* dispatch it */
+		if (i2o_driver_dispatch(c, mv, m))
+			/* flush it if result != 0 */
+			i2o_flush_reply(c, mv);
+
+		/*
+		 * That 960 bug again...
+		 */
+		mv = I2O_REPLY_READ32(c);
+		if (mv == I2O_QUEUE_EMPTY)
+			mv = I2O_REPLY_READ32(c);
+	}
+	return IRQ_HANDLED;
+}
+
+/**
+ *	i2o_pci_irq_enable - Allocate interrupt for I2O controller
+ *
+ *	Allocate an interrupt for the I2O controller, and activate interrupts
+ *	on the I2O controller.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int i2o_pci_irq_enable(struct i2o_controller *c)
+{
+	struct pci_dev *pdev = c->pdev;
+	int rc;
+
+	I2O_IRQ_WRITE32(c, 0xffffffff);
+
+	if (pdev->irq) {
+		rc = request_irq(pdev->irq, i2o_pci_interrupt, SA_SHIRQ,
+				 c->name, c);
+		if (rc < 0) {
+			printk(KERN_ERR "%s: unable to allocate interrupt %d."
+			       "\n", c->name, pdev->irq);
+			return rc;
+		}
+	}
+
+	I2O_IRQ_WRITE32(c, 0x00000000);
+
+	printk(KERN_INFO "%s: Installed at IRQ %d\n", c->name, pdev->irq);
+
+	return 0;
+}
+
+/**
+ *	i2o_pci_irq_disable - Free interrupt for I2O controller
+ *	@c: I2O controller
+ *
+ *	Disable interrupts in I2O controller and then free interrupt.
+ */
+static void i2o_pci_irq_disable(struct i2o_controller *c)
+{
+	I2O_IRQ_WRITE32(c, 0xffffffff);
+
+	if (c->pdev->irq > 0)
+		free_irq(c->pdev->irq, c);
+}
+
+/**
+ *	i2o_pci_probe - Probe the PCI device for an I2O controller
+ *	@dev: PCI device to test
+ *	@id: id which matched with the PCI device id table
+ *
+ *	Probe the PCI device for any device which is a memory of the
+ *	Intelligent, I2O class or an Adaptec Zero Channel Controller. We
+ *	attempt to set up each such device and register it with the core.
+ *
+ *	Returns 0 on success or negative error code on failure.
+ */
+static int __devinit i2o_pci_probe(struct pci_dev *pdev,
+				   const struct pci_device_id *id)
+{
+	struct i2o_controller *c;
+	int rc;
+
+	printk(KERN_INFO "i2o: Checking for PCI I2O controllers...\n");
+
+	if ((pdev->class & 0xff) > 1) {
+		printk(KERN_WARNING "i2o: I2O controller found but does not "
+		       "support I2O 1.5 (skipping).\n");
+		return -ENODEV;
+	}
+
+	if ((rc = pci_enable_device(pdev))) {
+		printk(KERN_WARNING "i2o: I2O controller found but could not be"
+		       " enabled.\n");
+		return rc;
+	}
+
+	printk(KERN_INFO "i2o: I2O controller found on bus %d at %d.\n",
+	       pdev->bus->number, pdev->devfn);
+
+	if (pci_set_dma_mask(pdev, DMA_32BIT_MASK)) {
+		printk(KERN_WARNING "i2o: I2O controller on bus %d at %d: No "
+		       "suitable DMA available!\n", pdev->bus->number,
+		       pdev->devfn);
+		rc = -ENODEV;
+		goto disable;
+	}
+
+	pci_set_master(pdev);
+
+	c = i2o_iop_alloc();
+	if (IS_ERR(c)) {
+		printk(KERN_ERR "i2o: memory for I2O controller could not be "
+		       "allocated\n");
+		rc = PTR_ERR(c);
+		goto disable;
+	}
+
+	c->pdev = pdev;
+	c->device = pdev->dev;
+
+	/* Cards that fall apart if you hit them with large I/O loads... */
+	if (pdev->vendor == PCI_VENDOR_ID_NCR && pdev->device == 0x0630) {
+		c->short_req = 1;
+		printk(KERN_INFO "%s: Symbios FC920 workarounds activated.\n",
+		       c->name);
+	}
+
+	if (pdev->subsystem_vendor == PCI_VENDOR_ID_PROMISE) {
+		c->promise = 1;
+		printk(KERN_INFO "%s: Promise workarounds activated.\n",
+		       c->name);
+	}
+
+	/* Cards that go bananas if you quiesce them before you reset them. */
+	if (pdev->vendor == PCI_VENDOR_ID_DPT) {
+		c->no_quiesce = 1;
+		if (pdev->device == 0xa511)
+			c->raptor = 1;
+	}
+
+	if ((rc = i2o_pci_alloc(c))) {
+		printk(KERN_ERR "%s: DMA / IO allocation for I2O controller "
+		       " failed\n", c->name);
+		goto free_controller;
+	}
+
+	if (i2o_pci_irq_enable(c)) {
+		printk(KERN_ERR "%s: unable to enable interrupts for I2O "
+		       "controller\n", c->name);
+		goto free_pci;
+	}
+
+	if ((rc = i2o_iop_add(c)))
+		goto uninstall;
+
+	return 0;
+
+      uninstall:
+	i2o_pci_irq_disable(c);
+
+      free_pci:
+	i2o_pci_free(c);
+
+      free_controller:
+	i2o_iop_free(c);
+
+      disable:
+	pci_disable_device(pdev);
+
+	return rc;
+}
+
+/**
+ *	i2o_pci_remove - Removes a I2O controller from the system
+ *	pdev: I2O controller which should be removed
+ *
+ *	Reset the I2O controller, disable interrupts and remove all allocated
+ *	resources.
+ */
+static void __devexit i2o_pci_remove(struct pci_dev *pdev)
+{
+	struct i2o_controller *c;
+	c = pci_get_drvdata(pdev);
+
+	i2o_iop_remove(c);
+	i2o_pci_irq_disable(c);
+	i2o_pci_free(c);
+
+	printk(KERN_INFO "%s: Controller removed.\n", c->name);
+
+	i2o_iop_free(c);
+	pci_disable_device(pdev);
+};
+
+/* PCI driver for I2O controller */
+static struct pci_driver i2o_pci_driver = {
+	.name = "I2O controller",
+	.id_table = i2o_pci_ids,
+	.probe = i2o_pci_probe,
+	.remove = __devexit_p(i2o_pci_remove),
+};
+
+/**
+ *	i2o_pci_init - registers I2O PCI driver in PCI subsystem
+ *
+ *	Returns > 0 on success or negative error code on failure.
+ */
+int __init i2o_pci_init(void)
+{
+	return pci_register_driver(&i2o_pci_driver);
+};
+
+/**
+ *	i2o_pci_exit - unregisters I2O PCI driver from PCI subsystem
+ */
+void __exit i2o_pci_exit(void)
+{
+	pci_unregister_driver(&i2o_pci_driver);
+};
+
+EXPORT_SYMBOL(i2o_dma_realloc);
+MODULE_DEVICE_TABLE(pci, i2o_pci_ids);