Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6: (209 commits)
  [SCSI] fix oops during scsi scanning
  [SCSI] libsrp: fix memory leak in srp_ring_free()
  [SCSI] libiscsi, bnx2i: make bound ep check common
  [SCSI] libiscsi: add completion function for drivers that do not need pdu processing
  [SCSI] scsi_dh_rdac: changes for rdac debug logging
  [SCSI] scsi_dh_rdac: changes to collect the rdac debug information during the initialization
  [SCSI] scsi_dh_rdac: move the init code from rdac_activate to rdac_bus_attach
  [SCSI] sg: fix oops in the error path in sg_build_indirect()
  [SCSI] mptsas : Bump version to 3.04.12
  [SCSI] mptsas : FW event thread and scsi mid layer deadlock in SYNCHRONIZE CACHE command
  [SCSI] mptsas : Send DID_NO_CONNECT for pending IOs of removed device
  [SCSI] mptsas : PAE Kernel more than 4 GB kernel panic
  [SCSI] mptsas : NULL pointer on big endian systems causing Expander not to tear off
  [SCSI] mptsas : Sanity check for phyinfo is added
  [SCSI] scsi_dh_rdac: Add support for Sun StorageTek ST2500, ST2510 and ST2530
  [SCSI] pmcraid: PMC-Sierra MaxRAID driver to support 6Gb/s SAS RAID controller
  [SCSI] qla2xxx: Update version number to 8.03.01-k6.
  [SCSI] qla2xxx: Properly delete rports attached to a vport.
  [SCSI] qla2xxx: Correct various NPIV issues.
  [SCSI] qla2xxx: Correct qla2x00_eh_wait_on_command() to wait correctly.
  ...
diff --git a/MAINTAINERS b/MAINTAINERS
index 256d72a..837b598 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -4009,6 +4009,14 @@
 F:	drivers/block/pktcdvd.c
 F:	include/linux/pktcdvd.h
 
+PMC SIERRA MaxRAID DRIVER
+P:	Anil Ravindranath
+M:	anil_ravindranath@pmc-sierra.com
+L:	linux-scsi@vger.kernel.org
+W:	http://www.pmc-sierra.com/
+S:	Supported
+F:	drivers/scsi/pmcraid.*
+
 POSIX CLOCKS and TIMERS
 M:	Thomas Gleixner <tglx@linutronix.de>
 S:	Supported
@@ -4447,7 +4455,7 @@
 F:	include/scsi/sg.h
 
 SCSI SUBSYSTEM
-M:	"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
+M:	"James E.J. Bottomley" <James.Bottomley@suse.de>
 L:	linux-scsi@vger.kernel.org
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6.git
 T:	git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-rc-fixes-2.6.git
diff --git a/arch/s390/include/asm/timex.h b/arch/s390/include/asm/timex.h
index 24aa1cd..68d9fea 100644
--- a/arch/s390/include/asm/timex.h
+++ b/arch/s390/include/asm/timex.h
@@ -88,6 +88,14 @@
 void init_cpu_timer(void);
 unsigned long long monotonic_clock(void);
 
+void tod_to_timeval(__u64, struct timespec *);
+
+static inline
+void stck_to_timespec(unsigned long long stck, struct timespec *ts)
+{
+	tod_to_timeval(stck - TOD_UNIX_EPOCH, ts);
+}
+
 extern u64 sched_clock_base_cc;
 
 /**
diff --git a/arch/s390/kernel/debug.c b/arch/s390/kernel/debug.c
index be8bcea..4c51256 100644
--- a/arch/s390/kernel/debug.c
+++ b/arch/s390/kernel/debug.c
@@ -63,8 +63,6 @@
 } debug_sprintf_entry_t;
 
 
-extern void tod_to_timeval(uint64_t todval, struct timespec *xtime);
-
 /* internal function prototyes */
 
 static int debug_init(void);
@@ -1450,17 +1448,13 @@
 			 int area, debug_entry_t * entry, char *out_buf)
 {
 	struct timespec time_spec;
-	unsigned long long time;
 	char *except_str;
 	unsigned long caller;
 	int rc = 0;
 	unsigned int level;
 
 	level = entry->id.fields.level;
-	time = entry->id.stck;
-	/* adjust todclock to 1970 */
-	time -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
-	tod_to_timeval(time, &time_spec);
+	stck_to_timespec(entry->id.stck, &time_spec);
 
 	if (entry->id.fields.exception)
 		except_str = "*";
diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c
index 54e327e..e3dc28b 100644
--- a/arch/s390/kernel/time.c
+++ b/arch/s390/kernel/time.c
@@ -91,6 +91,7 @@
 	todval -= (sec * 1000000) << 12;
 	xtime->tv_nsec = ((todval * 1000) >> 12);
 }
+EXPORT_SYMBOL(tod_to_timeval);
 
 void clock_comparator_work(void)
 {
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 6f0d90d..32d0b87 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -64,6 +64,7 @@
 	spinlock_t lock;
 
 	const char *hw_handler_name;
+	char *hw_handler_params;
 	unsigned nr_priority_groups;
 	struct list_head priority_groups;
 	unsigned pg_init_required;	/* pg_init needs calling? */
@@ -219,6 +220,7 @@
 	}
 
 	kfree(m->hw_handler_name);
+	kfree(m->hw_handler_params);
 	mempool_destroy(m->mpio_pool);
 	kfree(m);
 }
@@ -615,6 +617,17 @@
 			dm_put_device(ti, p->path.dev);
 			goto bad;
 		}
+
+		if (m->hw_handler_params) {
+			r = scsi_dh_set_params(q, m->hw_handler_params);
+			if (r < 0) {
+				ti->error = "unable to set hardware "
+							"handler parameters";
+				scsi_dh_detach(q);
+				dm_put_device(ti, p->path.dev);
+				goto bad;
+			}
+		}
 	}
 
 	r = ps->type->add_path(ps, &p->path, as->argc, as->argv, &ti->error);
@@ -705,6 +718,7 @@
 static int parse_hw_handler(struct arg_set *as, struct multipath *m)
 {
 	unsigned hw_argc;
+	int ret;
 	struct dm_target *ti = m->ti;
 
 	static struct param _params[] = {
@@ -726,17 +740,33 @@
 	request_module("scsi_dh_%s", m->hw_handler_name);
 	if (scsi_dh_handler_exist(m->hw_handler_name) == 0) {
 		ti->error = "unknown hardware handler type";
-		kfree(m->hw_handler_name);
-		m->hw_handler_name = NULL;
-		return -EINVAL;
+		ret = -EINVAL;
+		goto fail;
 	}
 
-	if (hw_argc > 1)
-		DMWARN("Ignoring user-specified arguments for "
-		       "hardware handler \"%s\"", m->hw_handler_name);
+	if (hw_argc > 1) {
+		char *p;
+		int i, j, len = 4;
+
+		for (i = 0; i <= hw_argc - 2; i++)
+			len += strlen(as->argv[i]) + 1;
+		p = m->hw_handler_params = kzalloc(len, GFP_KERNEL);
+		if (!p) {
+			ti->error = "memory allocation failed";
+			ret = -ENOMEM;
+			goto fail;
+		}
+		j = sprintf(p, "%d", hw_argc - 1);
+		for (i = 0, p+=j+1; i <= hw_argc - 2; i++, p+=j+1)
+			j = sprintf(p, "%s", as->argv[i]);
+	}
 	consume(as, hw_argc - 1);
 
 	return 0;
+fail:
+	kfree(m->hw_handler_name);
+	m->hw_handler_name = NULL;
+	return ret;
 }
 
 static int parse_features(struct arg_set *as, struct multipath *m)
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index 5d0ba4f..76fa2ee 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1015,9 +1015,9 @@
 {
 	SGESimple64_t *pSge = (SGESimple64_t *) pAddr;
 	pSge->Address.Low = cpu_to_le32
-			(lower_32_bits((unsigned long)(dma_addr)));
+			(lower_32_bits(dma_addr));
 	pSge->Address.High = cpu_to_le32
-			(upper_32_bits((unsigned long)dma_addr));
+			(upper_32_bits(dma_addr));
 	pSge->FlagsLength = cpu_to_le32
 			((flagslength | MPT_SGE_FLAGS_64_BIT_ADDRESSING));
 }
@@ -1038,8 +1038,8 @@
 	u32 tmp;
 
 	pSge->Address.Low = cpu_to_le32
-			(lower_32_bits((unsigned long)(dma_addr)));
-	tmp = (u32)(upper_32_bits((unsigned long)dma_addr));
+			(lower_32_bits(dma_addr));
+	tmp = (u32)(upper_32_bits(dma_addr));
 
 	/*
 	 * 1078 errata workaround for the 36GB limitation
@@ -1101,7 +1101,7 @@
 		pChain->NextChainOffset = next;
 
 		pChain->Address.Low = cpu_to_le32(tmp);
-		tmp = (u32)(upper_32_bits((unsigned long)dma_addr));
+		tmp = (u32)(upper_32_bits(dma_addr));
 		pChain->Address.High = cpu_to_le32(tmp);
 }
 
@@ -1297,12 +1297,8 @@
 	psge = (char *)&ioc_init->HostPageBufferSGE;
 	flags_length = MPI_SGE_FLAGS_SIMPLE_ELEMENT |
 	    MPI_SGE_FLAGS_SYSTEM_ADDRESS |
-	    MPI_SGE_FLAGS_32_BIT_ADDRESSING |
 	    MPI_SGE_FLAGS_HOST_TO_IOC |
 	    MPI_SGE_FLAGS_END_OF_BUFFER;
-	if (sizeof(dma_addr_t) == sizeof(u64)) {
-	    flags_length |= MPI_SGE_FLAGS_64_BIT_ADDRESSING;
-	}
 	flags_length = flags_length << MPI_SGE_FLAGS_SHIFT;
 	flags_length |= ioc->HostPageBuffer_sz;
 	ioc->add_sge(psge, flags_length, ioc->HostPageBuffer_dma);
@@ -2224,8 +2220,6 @@
 	int	 hard;
 	int	 rc=0;
 	int	 ii;
-	u8	 cb_idx;
-	int	 handlers;
 	int	 ret = 0;
 	int	 reset_alt_ioc_active = 0;
 	int	 irq_allocated = 0;
@@ -2548,34 +2542,6 @@
 		mpt_get_manufacturing_pg_0(ioc);
 	}
 
-	/*
-	 * Call each currently registered protocol IOC reset handler
-	 * with post-reset indication.
-	 * NOTE: If we're doing _IOC_BRINGUP, there can be no
-	 * MptResetHandlers[] registered yet.
-	 */
-	if (hard_reset_done) {
-		rc = handlers = 0;
-		for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
-			if ((ret == 0) && MptResetHandlers[cb_idx]) {
-				dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
-				    "Calling IOC post_reset handler #%d\n",
-				    ioc->name, cb_idx));
-				rc += mpt_signal_reset(cb_idx, ioc, MPT_IOC_POST_RESET);
-				handlers++;
-			}
-
-			if (alt_ioc_ready && MptResetHandlers[cb_idx]) {
-				drsprintk(ioc, printk(MYIOC_s_DEBUG_FMT
-				    "Calling IOC post_reset handler #%d\n",
-				    ioc->alt_ioc->name, cb_idx));
-				rc += mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_POST_RESET);
-				handlers++;
-			}
-		}
-		/* FIXME?  Examine results here? */
-	}
-
  out:
 	if ((ret != 0) && irq_allocated) {
 		free_irq(ioc->pci_irq, ioc);
@@ -3938,6 +3904,7 @@
 	int count = 0;
 	u32 diag1val = 0;
 	MpiFwHeader_t *cached_fw;	/* Pointer to FW */
+	u8	 cb_idx;
 
 	/* Clear any existing interrupts */
 	CHIPREG_WRITE32(&ioc->chip->IntStatus, 0);
@@ -3956,6 +3923,18 @@
 		else
 			mdelay(1);
 
+		/*
+		 * Call each currently registered protocol IOC reset handler
+		 * with pre-reset indication.
+		 * NOTE: If we're doing _IOC_BRINGUP, there can be no
+		 * MptResetHandlers[] registered yet.
+		 */
+		for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+			if (MptResetHandlers[cb_idx])
+				(*(MptResetHandlers[cb_idx]))(ioc,
+						MPT_IOC_PRE_RESET);
+		}
+
 		for (count = 0; count < 60; count ++) {
 			doorbell = CHIPREG_READ32(&ioc->chip->Doorbell);
 			doorbell &= MPI_IOC_STATE_MASK;
@@ -4052,25 +4031,15 @@
 		 * NOTE: If we're doing _IOC_BRINGUP, there can be no
 		 * MptResetHandlers[] registered yet.
 		 */
-		{
-			u8	 cb_idx;
-			int	 r = 0;
-
-			for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
-				if (MptResetHandlers[cb_idx]) {
-					dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
-						"Calling IOC pre_reset handler #%d\n",
-						ioc->name, cb_idx));
-					r += mpt_signal_reset(cb_idx, ioc, MPT_IOC_PRE_RESET);
-					if (ioc->alt_ioc) {
-						dprintk(ioc, printk(MYIOC_s_DEBUG_FMT
-							"Calling alt-%s pre_reset handler #%d\n",
-							ioc->name, ioc->alt_ioc->name, cb_idx));
-						r += mpt_signal_reset(cb_idx, ioc->alt_ioc, MPT_IOC_PRE_RESET);
-					}
+		for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+			if (MptResetHandlers[cb_idx]) {
+				mpt_signal_reset(cb_idx,
+					ioc, MPT_IOC_PRE_RESET);
+				if (ioc->alt_ioc) {
+					mpt_signal_reset(cb_idx,
+					ioc->alt_ioc, MPT_IOC_PRE_RESET);
 				}
 			}
-			/* FIXME?  Examine results here? */
 		}
 
 		if (ioc->cached_fw)
@@ -6956,7 +6925,7 @@
 int
 mpt_HardResetHandler(MPT_ADAPTER *ioc, int sleepFlag)
 {
-	int		 rc;
+	int	 rc;
 	u8	 cb_idx;
 	unsigned long	 flags;
 	unsigned long	 time_count;
@@ -6982,8 +6951,6 @@
 		ioc->alt_ioc->ioc_reset_in_progress = 1;
 	spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
 
-	/* FIXME: If do_ioc_recovery fails, repeat....
-	 */
 
 	/* The SCSI driver needs to adjust timeouts on all current
 	 * commands prior to the diagnostic reset being issued.
@@ -7020,6 +6987,15 @@
 	}
 	spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
 
+	for (cb_idx = MPT_MAX_PROTOCOL_DRIVERS-1; cb_idx; cb_idx--) {
+		if (MptResetHandlers[cb_idx]) {
+			mpt_signal_reset(cb_idx, ioc, MPT_IOC_POST_RESET);
+			if (ioc->alt_ioc)
+				mpt_signal_reset(cb_idx,
+					ioc->alt_ioc, MPT_IOC_POST_RESET);
+		}
+	}
+
 	dtmprintk(ioc,
 	    printk(MYIOC_s_DEBUG_FMT
 		"HardResetHandler: completed (%d seconds): %s\n", ioc->name,
diff --git a/drivers/message/fusion/mptbase.h b/drivers/message/fusion/mptbase.h
index 1c8514d..8dd4d21 100644
--- a/drivers/message/fusion/mptbase.h
+++ b/drivers/message/fusion/mptbase.h
@@ -76,8 +76,8 @@
 #define COPYRIGHT	"Copyright (c) 1999-2008 " MODULEAUTHOR
 #endif
 
-#define MPT_LINUX_VERSION_COMMON	"3.04.10"
-#define MPT_LINUX_PACKAGE_NAME		"@(#)mptlinux-3.04.09"
+#define MPT_LINUX_VERSION_COMMON	"3.04.12"
+#define MPT_LINUX_PACKAGE_NAME		"@(#)mptlinux-3.04.12"
 #define WHAT_MAGIC_STRING		"@" "(" "#" ")"
 
 #define show_mptmod_ver(s,ver)  \
@@ -157,8 +157,9 @@
 /*
  *	Try to keep these at 2^N-1
  */
-#define MPT_FC_CAN_QUEUE	127
+#define MPT_FC_CAN_QUEUE	1024
 #define MPT_SCSI_CAN_QUEUE	127
+#define MPT_SAS_CAN_QUEUE	127
 
 /*
  * Set the MAX_SGE value based on user input.
@@ -879,23 +880,9 @@
 
 typedef struct _MPT_SCSI_HOST {
 	MPT_ADAPTER		 *ioc;
-	int			  port;
-	u32			  pad0;
-	MPT_LOCAL_REPLY		 *pLocal;		/* used for internal commands */
-	struct timer_list	  timer;
-		/* Pool of memory for holding SCpnts before doing
-		 * OS callbacks. freeQ is the free pool.
-		 */
-	u8			  negoNvram;		/* DV disabled, nego NVRAM */
-	u8			  pad1;
-	u8			  rsvd[2];
-	MPT_FRAME_HDR		 *cmdPtr;		/* Ptr to nonOS request */
-	struct scsi_cmnd	 *abortSCpnt;
-	MPT_LOCAL_REPLY		  localReply;		/* internal cmd reply struct */
 	ushort			  sel_timeout[MPT_MAX_FC_DEVICES];
 	char 			  *info_kbuf;
 	long			  last_queue_full;
-	u16			  tm_iocstatus;
 	u16			  spi_pending;
 	struct list_head	  target_reset_list;
 } MPT_SCSI_HOST;
diff --git a/drivers/message/fusion/mptfc.c b/drivers/message/fusion/mptfc.c
index e61df13..ebf6ae0 100644
--- a/drivers/message/fusion/mptfc.c
+++ b/drivers/message/fusion/mptfc.c
@@ -1288,25 +1288,6 @@
 	dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
 		 ioc->name, ioc->ScsiLookup));
 
-	/* Clear the TM flags
-	 */
-	hd->abortSCpnt = NULL;
-
-	/* Clear the pointer used to store
-	 * single-threaded commands, i.e., those
-	 * issued during a bus scan, dv and
-	 * configuration pages.
-	 */
-	hd->cmdPtr = NULL;
-
-	/* Initialize this SCSI Hosts' timers
-	 * To use, set the timer expires field
-	 * and add_timer
-	 */
-	init_timer(&hd->timer);
-	hd->timer.data = (unsigned long) hd;
-	hd->timer.function = mptscsih_timer_expired;
-
 	hd->last_queue_full = 0;
 
 	sh->transportt = mptfc_transport_template;
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 55ff252..83873e3 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -72,6 +72,7 @@
  */
 #define MPTSAS_RAID_CHANNEL	1
 
+#define SAS_CONFIG_PAGE_TIMEOUT		30
 MODULE_AUTHOR(MODULEAUTHOR);
 MODULE_DESCRIPTION(my_NAME);
 MODULE_LICENSE("GPL");
@@ -324,7 +325,6 @@
 {
 	struct fw_event_work *fw_event, *next;
 	struct mptsas_target_reset_event *target_reset_list, *n;
-	u8	flush_q;
 	MPT_SCSI_HOST	*hd = shost_priv(ioc->sh);
 
 	/* flush the target_reset_list */
@@ -344,15 +344,10 @@
 	     !ioc->fw_event_q || in_interrupt())
 		return;
 
-	flush_q = 0;
 	list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
 		if (cancel_delayed_work(&fw_event->work))
 			mptsas_free_fw_event(ioc, fw_event);
-		else
-			flush_q = 1;
 	}
-	if (flush_q)
-		flush_workqueue(ioc->fw_event_q);
 }
 
 
@@ -661,7 +656,7 @@
 	cfg.pageAddr = starget->id;
 	cfg.cfghdr.hdr = &hdr;
 	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
-	cfg.timeout = 10;
+	cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
 
 	if (mpt_config(ioc, &cfg) != 0)
 		goto out;
@@ -851,7 +846,13 @@
 		port_details->num_phys--;
 		port_details->phy_bitmask &= ~ (1 << phy_info->phy_id);
 		memset(&phy_info->attached, 0, sizeof(struct mptsas_devinfo));
-		sas_port_delete_phy(port_details->port, phy_info->phy);
+		if (phy_info->phy) {
+			devtprintk(ioc, dev_printk(KERN_DEBUG,
+				&phy_info->phy->dev, MYIOC_s_FMT
+				"delete phy %d, phy-obj (0x%p)\n", ioc->name,
+				phy_info->phy_id, phy_info->phy));
+			sas_port_delete_phy(port_details->port, phy_info->phy);
+		}
 		phy_info->port_details = NULL;
 	}
 
@@ -1272,7 +1273,6 @@
 		}
 		mptsas_cleanup_fw_event_q(ioc);
 		mptsas_queue_rescan(ioc);
-		mptsas_fw_event_on(ioc);
 		break;
 	default:
 		break;
@@ -1318,7 +1318,7 @@
 	cfg.pageAddr = form + form_specific;
 	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
 	cfg.dir = 0;	/* read */
-	cfg.timeout = 10;
+	cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
 
 	error = mpt_config(ioc, &cfg);
 	if (error)
@@ -1592,6 +1592,7 @@
 		mptsas_scan_sas_topology(ioc);
 		ioc->in_rescan = 0;
 		mptsas_free_fw_event(ioc, fw_event);
+		mptsas_fw_event_on(ioc);
 		return;
 	}
 
@@ -1891,7 +1892,7 @@
 	.eh_bus_reset_handler		= mptscsih_bus_reset,
 	.eh_host_reset_handler		= mptscsih_host_reset,
 	.bios_param			= mptscsih_bios_param,
-	.can_queue			= MPT_FC_CAN_QUEUE,
+	.can_queue			= MPT_SAS_CAN_QUEUE,
 	.this_id			= -1,
 	.sg_tablesize			= MPT_SCSI_SG_DEPTH,
 	.max_sectors			= 8192,
@@ -1926,7 +1927,7 @@
 	cfg.pageAddr = phy->identify.phy_identifier;
 	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
 	cfg.dir = 0;    /* read */
-	cfg.timeout = 10;
+	cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
 
 	error = mpt_config(ioc, &cfg);
 	if (error)
@@ -2278,7 +2279,7 @@
 	cfg.pageAddr = 0;
 	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
 	cfg.dir = 0;	/* read */
-	cfg.timeout = 10;
+	cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
 
 	error = mpt_config(ioc, &cfg);
 	if (error)
@@ -2349,7 +2350,7 @@
 
 	cfg.cfghdr.ehdr = &hdr;
 	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
-	cfg.timeout = 10;
+	cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
 	cfg.cfghdr.ehdr->PageType = MPI_CONFIG_PAGETYPE_EXTENDED;
 	cfg.cfghdr.ehdr->ExtPageType = MPI_CONFIG_EXTPAGETYPE_SAS_IO_UNIT;
 	cfg.cfghdr.ehdr->PageVersion = MPI_SASIOUNITPAGE1_PAGEVERSION;
@@ -2411,7 +2412,7 @@
 
 	cfg.cfghdr.ehdr = &hdr;
 	cfg.dir = 0;	/* read */
-	cfg.timeout = 10;
+	cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
 
 	/* Get Phy Pg 0 for each Phy. */
 	cfg.physAddr = -1;
@@ -2479,7 +2480,7 @@
 	cfg.physAddr = -1;
 	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
 	cfg.dir = 0;	/* read */
-	cfg.timeout = 10;
+	cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
 
 	memset(device_info, 0, sizeof(struct mptsas_devinfo));
 	error = mpt_config(ioc, &cfg);
@@ -2554,7 +2555,7 @@
 	cfg.pageAddr = form + form_specific;
 	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
 	cfg.dir = 0;	/* read */
-	cfg.timeout = 10;
+	cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
 
 	memset(port_info, 0, sizeof(struct mptsas_portinfo));
 	error = mpt_config(ioc, &cfg);
@@ -2635,7 +2636,7 @@
 	cfg.pageAddr = form + form_specific;
 	cfg.action = MPI_CONFIG_ACTION_PAGE_HEADER;
 	cfg.dir = 0;	/* read */
-	cfg.timeout = 10;
+	cfg.timeout = SAS_CONFIG_PAGE_TIMEOUT;
 
 	error = mpt_config(ioc, &cfg);
 	if (error)
@@ -3307,6 +3308,7 @@
 	expander_data = (MpiEventDataSasExpanderStatusChange_t *)
 	    fw_event->event_data;
 	memcpy(&sas_address, &expander_data->SASAddress, sizeof(__le64));
+	sas_address = le64_to_cpu(sas_address);
 	port_info = mptsas_find_portinfo_by_sas_address(ioc, sas_address);
 
 	if (expander_data->ReasonCode == MPI_EVENT_SAS_EXP_RC_ADDED) {
@@ -4760,10 +4762,9 @@
 
 	/* set 16 byte cdb's */
 	sh->max_cmd_len = 16;
-
-	sh->max_id = ioc->pfacts[0].PortSCSIID;
+	sh->can_queue = min_t(int, ioc->req_depth - 10, sh->can_queue);
+	sh->max_id = -1;
 	sh->max_lun = max_lun;
-
 	sh->transportt = mptsas_transport_template;
 
 	/* Required entry.
@@ -4821,25 +4822,6 @@
 	dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
 		 ioc->name, ioc->ScsiLookup));
 
-	/* Clear the TM flags
-	 */
-	hd->abortSCpnt = NULL;
-
-	/* Clear the pointer used to store
-	 * single-threaded commands, i.e., those
-	 * issued during a bus scan, dv and
-	 * configuration pages.
-	 */
-	hd->cmdPtr = NULL;
-
-	/* Initialize this SCSI Hosts' timers
-	 * To use, set the timer expires field
-	 * and add_timer
-	 */
-	init_timer(&hd->timer);
-	hd->timer.data = (unsigned long) hd;
-	hd->timer.function = mptscsih_timer_expired;
-
 	ioc->sas_data.ptClear = mpt_pt_clear;
 
 	hd->last_queue_full = 0;
diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
index 8440f78f..c295786 100644
--- a/drivers/message/fusion/mptscsih.c
+++ b/drivers/message/fusion/mptscsih.c
@@ -628,6 +628,16 @@
 		return 1;
 	}
 
+	if (ioc->bus_type == SAS) {
+		VirtDevice *vdevice = sc->device->hostdata;
+
+		if (!vdevice || !vdevice->vtarget ||
+		    vdevice->vtarget->deleted) {
+			sc->result = DID_NO_CONNECT << 16;
+			goto out;
+		}
+	}
+
 	sc->host_scribble = NULL;
 	sc->result = DID_OK << 16;		/* Set default reply as OK */
 	pScsiReq = (SCSIIORequest_t *) mf;
@@ -689,6 +699,7 @@
 
 		switch(status) {
 		case MPI_IOCSTATUS_BUSY:			/* 0x0002 */
+		case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:	/* 0x0006 */
 			/* CHECKME!
 			 * Maybe: DRIVER_BUSY | SUGGEST_RETRY | DID_SOFT_ERROR (retry)
 			 * But not: DID_BUS_BUSY lest one risk
@@ -872,7 +883,6 @@
 		case MPI_IOCSTATUS_INVALID_SGL:			/* 0x0003 */
 		case MPI_IOCSTATUS_INTERNAL_ERROR:		/* 0x0004 */
 		case MPI_IOCSTATUS_RESERVED:			/* 0x0005 */
-		case MPI_IOCSTATUS_INSUFFICIENT_RESOURCES:	/* 0x0006 */
 		case MPI_IOCSTATUS_INVALID_FIELD:		/* 0x0007 */
 		case MPI_IOCSTATUS_INVALID_STATE:		/* 0x0008 */
 		case MPI_IOCSTATUS_SCSI_IO_DATA_ERROR:		/* 0x0046 */
@@ -892,7 +902,7 @@
 #endif
 
 	} /* end of address reply case */
-
+out:
 	/* Unmap the DMA buffers, if any. */
 	scsi_dma_unmap(sc);
 
@@ -1729,9 +1739,6 @@
 	 */
 	mf = MPT_INDEX_2_MFPTR(ioc, scpnt_idx);
 	ctx2abort = mf->u.frame.hwhdr.msgctxu.MsgContext;
-
-	hd->abortSCpnt = SCpnt;
-
 	retval = mptscsih_IssueTaskMgmt(hd,
 			 MPI_SCSITASKMGMT_TASKTYPE_ABORT_TASK,
 			 vdevice->vtarget->channel,
@@ -2293,7 +2300,10 @@
 		else
 			max_depth = MPT_SCSI_CMD_PER_DEV_LOW;
 	} else
-		max_depth = MPT_SCSI_CMD_PER_DEV_HIGH;
+		 max_depth = ioc->sh->can_queue;
+
+	if (!sdev->tagged_supported)
+		max_depth = 1;
 
 	if (qdepth > max_depth)
 		qdepth = max_depth;
@@ -2627,50 +2637,6 @@
 	return 1;
 }
 
-/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
-/*	mptscsih_timer_expired - Call back for timer process.
- *	Used only for dv functionality.
- *	@data: Pointer to MPT_SCSI_HOST recast as an unsigned long
- *
- */
-void
-mptscsih_timer_expired(unsigned long data)
-{
-	MPT_SCSI_HOST *hd = (MPT_SCSI_HOST *) data;
-	MPT_ADAPTER 	*ioc = hd->ioc;
-
-	ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Timer Expired! Cmd %p\n", ioc->name, hd->cmdPtr));
-
-	if (hd->cmdPtr) {
-		MPIHeader_t *cmd = (MPIHeader_t *)hd->cmdPtr;
-
-		if (cmd->Function == MPI_FUNCTION_SCSI_IO_REQUEST) {
-			/* Desire to issue a task management request here.
-			 * TM requests MUST be single threaded.
-			 * If old eh code and no TM current, issue request.
-			 * If new eh code, do nothing. Wait for OS cmd timeout
-			 *	for bus reset.
-			 */
-		} else {
-			/* Perform a FW reload */
-			if (mpt_HardResetHandler(ioc, NO_SLEEP) < 0) {
-				printk(MYIOC_s_WARN_FMT "Firmware Reload FAILED!\n", ioc->name);
-			}
-		}
-	} else {
-		/* This should NEVER happen */
-		printk(MYIOC_s_WARN_FMT "Null cmdPtr!!!!\n", ioc->name);
-	}
-
-	/* No more processing.
-	 * TM call will generate an interrupt for SCSI TM Management.
-	 * The FW will reply to all outstanding commands, callback will finish cleanup.
-	 * Hard reset clean-up will free all resources.
-	 */
-	ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT "Timer Expired Complete!\n", ioc->name));
-
-	return;
-}
 
 /**
  *	mptscsih_get_completion_code -
@@ -3265,6 +3231,5 @@
 EXPORT_SYMBOL(mptscsih_event_process);
 EXPORT_SYMBOL(mptscsih_ioc_reset);
 EXPORT_SYMBOL(mptscsih_change_queue_depth);
-EXPORT_SYMBOL(mptscsih_timer_expired);
 
 /*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/fusion/mptscsih.h b/drivers/message/fusion/mptscsih.h
index eb3f677..e0b33e0 100644
--- a/drivers/message/fusion/mptscsih.h
+++ b/drivers/message/fusion/mptscsih.h
@@ -129,7 +129,6 @@
 extern int mptscsih_event_process(MPT_ADAPTER *ioc, EventNotificationReply_t *pEvReply);
 extern int mptscsih_ioc_reset(MPT_ADAPTER *ioc, int post_reset);
 extern int mptscsih_change_queue_depth(struct scsi_device *sdev, int qdepth);
-extern void mptscsih_timer_expired(unsigned long data);
 extern u8 mptscsih_raid_id_to_num(MPT_ADAPTER *ioc, u8 channel, u8 id);
 extern int mptscsih_is_phys_disk(MPT_ADAPTER *ioc, u8 channel, u8 id);
 extern struct device_attribute *mptscsih_host_attrs[];
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index c5b808f..69f4257 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -1472,28 +1472,7 @@
 	dprintk(ioc, printk(MYIOC_s_DEBUG_FMT "ScsiLookup @ %p\n",
 		 ioc->name, ioc->ScsiLookup));
 
-	/* Clear the TM flags
-	 */
-	hd->abortSCpnt = NULL;
-
-	/* Clear the pointer used to store
-	 * single-threaded commands, i.e., those
-	 * issued during a bus scan, dv and
-	 * configuration pages.
-	 */
-	hd->cmdPtr = NULL;
-
-	/* Initialize this SCSI Hosts' timers
-	 * To use, set the timer expires field
-	 * and add_timer
-	 */
-	init_timer(&hd->timer);
-	hd->timer.data = (unsigned long) hd;
-	hd->timer.function = mptscsih_timer_expired;
-
 	ioc->spi_data.Saf_Te = mpt_saf_te;
-
-	hd->negoNvram = MPT_SCSICFG_USE_NVRAM;
 	ddvprintk(ioc, printk(MYIOC_s_DEBUG_FMT
 		"saf_te %x\n",
 		ioc->name,
diff --git a/drivers/misc/enclosure.c b/drivers/misc/enclosure.c
index 348443b..7b03930 100644
--- a/drivers/misc/enclosure.c
+++ b/drivers/misc/enclosure.c
@@ -33,24 +33,44 @@
 static struct class enclosure_class;
 
 /**
- * enclosure_find - find an enclosure given a device
- * @dev:	the device to find for
+ * enclosure_find - find an enclosure given a parent device
+ * @dev:	the parent to match against
+ * @start:	Optional enclosure device to start from (NULL if none)
  *
- * Looks through the list of registered enclosures to see
- * if it can find a match for a device.  Returns NULL if no
- * enclosure is found. Obtains a reference to the enclosure class
- * device which must be released with device_put().
+ * Looks through the list of registered enclosures to find all those
+ * with @dev as a parent.  Returns NULL if no enclosure is
+ * found. @start can be used as a starting point to obtain multiple
+ * enclosures per parent (should begin with NULL and then be set to
+ * each returned enclosure device). Obtains a reference to the
+ * enclosure class device which must be released with device_put().
+ * If @start is not NULL, a reference must be taken on it which is
+ * released before returning (this allows a loop through all
+ * enclosures to exit with only the reference on the enclosure of
+ * interest held).  Note that the @dev may correspond to the actual
+ * device housing the enclosure, in which case no iteration via @start
+ * is required.
  */
-struct enclosure_device *enclosure_find(struct device *dev)
+struct enclosure_device *enclosure_find(struct device *dev,
+					struct enclosure_device *start)
 {
 	struct enclosure_device *edev;
 
 	mutex_lock(&container_list_lock);
-	list_for_each_entry(edev, &container_list, node) {
-		if (edev->edev.parent == dev) {
-			get_device(&edev->edev);
-			mutex_unlock(&container_list_lock);
-			return edev;
+	edev = list_prepare_entry(start, &container_list, node);
+	if (start)
+		put_device(&start->edev);
+
+	list_for_each_entry_continue(edev, &container_list, node) {
+		struct device *parent = edev->edev.parent;
+		/* parent might not be immediate, so iterate up to
+		 * the root of the tree if necessary */
+		while (parent) {
+			if (parent == dev) {
+				get_device(&edev->edev);
+				mutex_unlock(&container_list_lock);
+				return edev;
+			}
+			parent = parent->parent;
 		}
 	}
 	mutex_unlock(&container_list_lock);
@@ -295,6 +315,9 @@
 
 	cdev = &edev->component[component];
 
+	if (cdev->dev == dev)
+		return -EEXIST;
+
 	if (cdev->dev)
 		enclosure_remove_links(cdev);
 
@@ -312,19 +335,25 @@
  * Returns zero on success or an error.
  *
  */
-int enclosure_remove_device(struct enclosure_device *edev, int component)
+int enclosure_remove_device(struct enclosure_device *edev, struct device *dev)
 {
 	struct enclosure_component *cdev;
+	int i;
 
-	if (!edev || component >= edev->components)
+	if (!edev || !dev)
 		return -EINVAL;
 
-	cdev = &edev->component[component];
-
-	device_del(&cdev->cdev);
-	put_device(cdev->dev);
-	cdev->dev = NULL;
-	return device_add(&cdev->cdev);
+	for (i = 0; i < edev->components; i++) {
+		cdev = &edev->component[i];
+		if (cdev->dev == dev) {
+			enclosure_remove_links(cdev);
+			device_del(&cdev->cdev);
+			put_device(dev);
+			cdev->dev = NULL;
+			return device_add(&cdev->cdev);
+		}
+	}
+	return -ENODEV;
 }
 EXPORT_SYMBOL_GPL(enclosure_remove_device);
 
diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c
index 2ccbd18..1be6bf7 100644
--- a/drivers/s390/scsi/zfcp_aux.c
+++ b/drivers/s390/scsi/zfcp_aux.c
@@ -42,6 +42,12 @@
 module_param_named(device, init_device, charp, 0400);
 MODULE_PARM_DESC(device, "specify initial device");
 
+static struct kmem_cache *zfcp_cache_hw_align(const char *name,
+					      unsigned long size)
+{
+	return kmem_cache_create(name, size, roundup_pow_of_two(size), 0, NULL);
+}
+
 static int zfcp_reqlist_alloc(struct zfcp_adapter *adapter)
 {
 	int idx;
@@ -78,7 +84,7 @@
 	struct zfcp_port *port;
 	struct zfcp_unit *unit;
 
-	down(&zfcp_data.config_sema);
+	mutex_lock(&zfcp_data.config_mutex);
 	read_lock_irq(&zfcp_data.config_lock);
 	adapter = zfcp_get_adapter_by_busid(busid);
 	if (adapter)
@@ -93,31 +99,23 @@
 	unit = zfcp_unit_enqueue(port, lun);
 	if (IS_ERR(unit))
 		goto out_unit;
-	up(&zfcp_data.config_sema);
+	mutex_unlock(&zfcp_data.config_mutex);
 	ccw_device_set_online(adapter->ccw_device);
 
 	zfcp_erp_wait(adapter);
 	flush_work(&unit->scsi_work);
 
-	down(&zfcp_data.config_sema);
+	mutex_lock(&zfcp_data.config_mutex);
 	zfcp_unit_put(unit);
 out_unit:
 	zfcp_port_put(port);
 out_port:
 	zfcp_adapter_put(adapter);
 out_adapter:
-	up(&zfcp_data.config_sema);
+	mutex_unlock(&zfcp_data.config_mutex);
 	return;
 }
 
-static struct kmem_cache *zfcp_cache_create(int size, char *name)
-{
-	int align = 1;
-	while ((size - align) > 0)
-		align <<= 1;
-	return kmem_cache_create(name , size, align, 0, NULL);
-}
-
 static void __init zfcp_init_device_setup(char *devstr)
 {
 	char *token;
@@ -158,24 +156,27 @@
 {
 	int retval = -ENOMEM;
 
-	zfcp_data.fsf_req_qtcb_cache = zfcp_cache_create(
-			sizeof(struct zfcp_fsf_req_qtcb), "zfcp_fsf");
-	if (!zfcp_data.fsf_req_qtcb_cache)
+	zfcp_data.gpn_ft_cache = zfcp_cache_hw_align("zfcp_gpn",
+					sizeof(struct ct_iu_gpn_ft_req));
+	if (!zfcp_data.gpn_ft_cache)
 		goto out;
 
-	zfcp_data.sr_buffer_cache = zfcp_cache_create(
-			sizeof(struct fsf_status_read_buffer), "zfcp_sr");
+	zfcp_data.qtcb_cache = zfcp_cache_hw_align("zfcp_qtcb",
+					sizeof(struct fsf_qtcb));
+	if (!zfcp_data.qtcb_cache)
+		goto out_qtcb_cache;
+
+	zfcp_data.sr_buffer_cache = zfcp_cache_hw_align("zfcp_sr",
+					sizeof(struct fsf_status_read_buffer));
 	if (!zfcp_data.sr_buffer_cache)
 		goto out_sr_cache;
 
-	zfcp_data.gid_pn_cache = zfcp_cache_create(
-			sizeof(struct zfcp_gid_pn_data), "zfcp_gid");
+	zfcp_data.gid_pn_cache = zfcp_cache_hw_align("zfcp_gid",
+					sizeof(struct zfcp_gid_pn_data));
 	if (!zfcp_data.gid_pn_cache)
 		goto out_gid_cache;
 
-	zfcp_data.work_queue = create_singlethread_workqueue("zfcp_wq");
-
-	sema_init(&zfcp_data.config_sema, 1);
+	mutex_init(&zfcp_data.config_mutex);
 	rwlock_init(&zfcp_data.config_lock);
 
 	zfcp_data.scsi_transport_template =
@@ -209,7 +210,9 @@
 out_gid_cache:
 	kmem_cache_destroy(zfcp_data.sr_buffer_cache);
 out_sr_cache:
-	kmem_cache_destroy(zfcp_data.fsf_req_qtcb_cache);
+	kmem_cache_destroy(zfcp_data.qtcb_cache);
+out_qtcb_cache:
+	kmem_cache_destroy(zfcp_data.gpn_ft_cache);
 out:
 	return retval;
 }
@@ -263,7 +266,7 @@
  * @port: pointer to port where unit is added
  * @fcp_lun: FCP LUN of unit to be enqueued
  * Returns: pointer to enqueued unit on success, ERR_PTR on error
- * Locks: config_sema must be held to serialize changes to the unit list
+ * Locks: config_mutex must be held to serialize changes to the unit list
  *
  * Sets up some unit internal structures and creates sysfs entry.
  */
@@ -271,6 +274,13 @@
 {
 	struct zfcp_unit *unit;
 
+	read_lock_irq(&zfcp_data.config_lock);
+	if (zfcp_get_unit_by_lun(port, fcp_lun)) {
+		read_unlock_irq(&zfcp_data.config_lock);
+		return ERR_PTR(-EINVAL);
+	}
+	read_unlock_irq(&zfcp_data.config_lock);
+
 	unit = kzalloc(sizeof(struct zfcp_unit), GFP_KERNEL);
 	if (!unit)
 		return ERR_PTR(-ENOMEM);
@@ -282,8 +292,11 @@
 	unit->port = port;
 	unit->fcp_lun = fcp_lun;
 
-	dev_set_name(&unit->sysfs_device, "0x%016llx",
-		     (unsigned long long) fcp_lun);
+	if (dev_set_name(&unit->sysfs_device, "0x%016llx",
+			 (unsigned long long) fcp_lun)) {
+		kfree(unit);
+		return ERR_PTR(-ENOMEM);
+	}
 	unit->sysfs_device.parent = &port->sysfs_device;
 	unit->sysfs_device.release = zfcp_sysfs_unit_release;
 	dev_set_drvdata(&unit->sysfs_device, unit);
@@ -299,20 +312,15 @@
 	unit->latencies.cmd.channel.min = 0xFFFFFFFF;
 	unit->latencies.cmd.fabric.min = 0xFFFFFFFF;
 
-	read_lock_irq(&zfcp_data.config_lock);
-	if (zfcp_get_unit_by_lun(port, fcp_lun)) {
-		read_unlock_irq(&zfcp_data.config_lock);
-		goto err_out_free;
+	if (device_register(&unit->sysfs_device)) {
+		put_device(&unit->sysfs_device);
+		return ERR_PTR(-EINVAL);
 	}
-	read_unlock_irq(&zfcp_data.config_lock);
-
-	if (device_register(&unit->sysfs_device))
-		goto err_out_free;
 
 	if (sysfs_create_group(&unit->sysfs_device.kobj,
 			       &zfcp_sysfs_unit_attrs)) {
 		device_unregister(&unit->sysfs_device);
-		return ERR_PTR(-EIO);
+		return ERR_PTR(-EINVAL);
 	}
 
 	zfcp_unit_get(unit);
@@ -327,10 +335,6 @@
 	zfcp_port_get(port);
 
 	return unit;
-
-err_out_free:
-	kfree(unit);
-	return ERR_PTR(-EINVAL);
 }
 
 /**
@@ -353,37 +357,47 @@
 
 static int zfcp_allocate_low_mem_buffers(struct zfcp_adapter *adapter)
 {
-	/* must only be called with zfcp_data.config_sema taken */
-	adapter->pool.fsf_req_erp =
-		mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
-	if (!adapter->pool.fsf_req_erp)
+	/* must only be called with zfcp_data.config_mutex taken */
+	adapter->pool.erp_req =
+		mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
+	if (!adapter->pool.erp_req)
 		return -ENOMEM;
 
-	adapter->pool.fsf_req_scsi =
-		mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
-	if (!adapter->pool.fsf_req_scsi)
+	adapter->pool.gid_pn_req =
+		mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
+	if (!adapter->pool.gid_pn_req)
 		return -ENOMEM;
 
-	adapter->pool.fsf_req_abort =
-		mempool_create_slab_pool(1, zfcp_data.fsf_req_qtcb_cache);
-	if (!adapter->pool.fsf_req_abort)
+	adapter->pool.scsi_req =
+		mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
+	if (!adapter->pool.scsi_req)
 		return -ENOMEM;
 
-	adapter->pool.fsf_req_status_read =
+	adapter->pool.scsi_abort =
+		mempool_create_kmalloc_pool(1, sizeof(struct zfcp_fsf_req));
+	if (!adapter->pool.scsi_abort)
+		return -ENOMEM;
+
+	adapter->pool.status_read_req =
 		mempool_create_kmalloc_pool(FSF_STATUS_READS_RECOM,
 					    sizeof(struct zfcp_fsf_req));
-	if (!adapter->pool.fsf_req_status_read)
+	if (!adapter->pool.status_read_req)
 		return -ENOMEM;
 
-	adapter->pool.data_status_read =
+	adapter->pool.qtcb_pool =
+		mempool_create_slab_pool(4, zfcp_data.qtcb_cache);
+	if (!adapter->pool.qtcb_pool)
+		return -ENOMEM;
+
+	adapter->pool.status_read_data =
 		mempool_create_slab_pool(FSF_STATUS_READS_RECOM,
 					 zfcp_data.sr_buffer_cache);
-	if (!adapter->pool.data_status_read)
+	if (!adapter->pool.status_read_data)
 		return -ENOMEM;
 
-	adapter->pool.data_gid_pn =
+	adapter->pool.gid_pn_data =
 		mempool_create_slab_pool(1, zfcp_data.gid_pn_cache);
-	if (!adapter->pool.data_gid_pn)
+	if (!adapter->pool.gid_pn_data)
 		return -ENOMEM;
 
 	return 0;
@@ -391,19 +405,21 @@
 
 static void zfcp_free_low_mem_buffers(struct zfcp_adapter *adapter)
 {
-	/* zfcp_data.config_sema must be held */
-	if (adapter->pool.fsf_req_erp)
-		mempool_destroy(adapter->pool.fsf_req_erp);
-	if (adapter->pool.fsf_req_scsi)
-		mempool_destroy(adapter->pool.fsf_req_scsi);
-	if (adapter->pool.fsf_req_abort)
-		mempool_destroy(adapter->pool.fsf_req_abort);
-	if (adapter->pool.fsf_req_status_read)
-		mempool_destroy(adapter->pool.fsf_req_status_read);
-	if (adapter->pool.data_status_read)
-		mempool_destroy(adapter->pool.data_status_read);
-	if (adapter->pool.data_gid_pn)
-		mempool_destroy(adapter->pool.data_gid_pn);
+	/* zfcp_data.config_mutex must be held */
+	if (adapter->pool.erp_req)
+		mempool_destroy(adapter->pool.erp_req);
+	if (adapter->pool.scsi_req)
+		mempool_destroy(adapter->pool.scsi_req);
+	if (adapter->pool.scsi_abort)
+		mempool_destroy(adapter->pool.scsi_abort);
+	if (adapter->pool.qtcb_pool)
+		mempool_destroy(adapter->pool.qtcb_pool);
+	if (adapter->pool.status_read_req)
+		mempool_destroy(adapter->pool.status_read_req);
+	if (adapter->pool.status_read_data)
+		mempool_destroy(adapter->pool.status_read_data);
+	if (adapter->pool.gid_pn_data)
+		mempool_destroy(adapter->pool.gid_pn_data);
 }
 
 /**
@@ -418,7 +434,7 @@
 int zfcp_status_read_refill(struct zfcp_adapter *adapter)
 {
 	while (atomic_read(&adapter->stat_miss) > 0)
-		if (zfcp_fsf_status_read(adapter)) {
+		if (zfcp_fsf_status_read(adapter->qdio)) {
 			if (atomic_read(&adapter->stat_miss) >= 16) {
 				zfcp_erp_adapter_reopen(adapter, 0, "axsref1",
 							NULL);
@@ -446,6 +462,27 @@
 		   adapter->fsf_lic_version);
 }
 
+static int zfcp_setup_adapter_work_queue(struct zfcp_adapter *adapter)
+{
+	char name[TASK_COMM_LEN];
+
+	snprintf(name, sizeof(name), "zfcp_q_%s",
+		 dev_name(&adapter->ccw_device->dev));
+	adapter->work_queue = create_singlethread_workqueue(name);
+
+	if (adapter->work_queue)
+		return 0;
+	return -ENOMEM;
+}
+
+static void zfcp_destroy_adapter_work_queue(struct zfcp_adapter *adapter)
+{
+	if (adapter->work_queue)
+		destroy_workqueue(adapter->work_queue);
+	adapter->work_queue = NULL;
+
+}
+
 /**
  * zfcp_adapter_enqueue - enqueue a new adapter to the list
  * @ccw_device: pointer to the struct cc_device
@@ -455,7 +492,7 @@
  * Enqueues an adapter at the end of the adapter list in the driver data.
  * All adapter internal structures are set up.
  * Proc-fs entries are also created.
- * locks:	config_sema must be held to serialise changes to the adapter list
+ * locks: config_mutex must be held to serialize changes to the adapter list
  */
 int zfcp_adapter_enqueue(struct ccw_device *ccw_device)
 {
@@ -463,37 +500,37 @@
 
 	/*
 	 * Note: It is safe to release the list_lock, as any list changes
-	 * are protected by the config_sema, which must be held to get here
+	 * are protected by the config_mutex, which must be held to get here
 	 */
 
 	adapter = kzalloc(sizeof(struct zfcp_adapter), GFP_KERNEL);
 	if (!adapter)
 		return -ENOMEM;
 
-	adapter->gs = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL);
-	if (!adapter->gs) {
-		kfree(adapter);
-		return -ENOMEM;
-	}
-
 	ccw_device->handler = NULL;
 	adapter->ccw_device = ccw_device;
 	atomic_set(&adapter->refcount, 0);
 
-	if (zfcp_qdio_allocate(adapter))
-		goto qdio_allocate_failed;
+	if (zfcp_qdio_setup(adapter))
+		goto qdio_failed;
 
 	if (zfcp_allocate_low_mem_buffers(adapter))
-		goto failed_low_mem_buffers;
+		goto low_mem_buffers_failed;
 
 	if (zfcp_reqlist_alloc(adapter))
-		goto failed_low_mem_buffers;
+		goto low_mem_buffers_failed;
 
-	if (zfcp_adapter_debug_register(adapter))
+	if (zfcp_dbf_adapter_register(adapter))
 		goto debug_register_failed;
 
+	if (zfcp_setup_adapter_work_queue(adapter))
+		goto work_queue_failed;
+
+	if (zfcp_fc_gs_setup(adapter))
+		goto generic_services_failed;
+
 	init_waitqueue_head(&adapter->remove_wq);
-	init_waitqueue_head(&adapter->erp_thread_wqh);
+	init_waitqueue_head(&adapter->erp_ready_wq);
 	init_waitqueue_head(&adapter->erp_done_wqh);
 
 	INIT_LIST_HEAD(&adapter->port_list_head);
@@ -502,20 +539,14 @@
 
 	spin_lock_init(&adapter->req_list_lock);
 
-	spin_lock_init(&adapter->hba_dbf_lock);
-	spin_lock_init(&adapter->san_dbf_lock);
-	spin_lock_init(&adapter->scsi_dbf_lock);
-	spin_lock_init(&adapter->rec_dbf_lock);
-	spin_lock_init(&adapter->req_q_lock);
-	spin_lock_init(&adapter->qdio_stat_lock);
-
 	rwlock_init(&adapter->erp_lock);
 	rwlock_init(&adapter->abort_lock);
 
-	sema_init(&adapter->erp_ready_sem, 0);
+	if (zfcp_erp_thread_setup(adapter))
+		goto erp_thread_failed;
 
 	INIT_WORK(&adapter->stat_work, _zfcp_status_read_scheduler);
-	INIT_WORK(&adapter->scan_work, _zfcp_scan_ports_later);
+	INIT_WORK(&adapter->scan_work, _zfcp_fc_scan_ports_later);
 
 	adapter->service_level.seq_print = zfcp_print_sl;
 
@@ -529,20 +560,25 @@
 		goto sysfs_failed;
 
 	atomic_clear_mask(ZFCP_STATUS_COMMON_REMOVE, &adapter->status);
-	zfcp_fc_wka_ports_init(adapter);
 
 	if (!zfcp_adapter_scsi_register(adapter))
 		return 0;
 
 sysfs_failed:
-	zfcp_adapter_debug_unregister(adapter);
+	zfcp_erp_thread_kill(adapter);
+erp_thread_failed:
+	zfcp_fc_gs_destroy(adapter);
+generic_services_failed:
+	zfcp_destroy_adapter_work_queue(adapter);
+work_queue_failed:
+	zfcp_dbf_adapter_unregister(adapter->dbf);
 debug_register_failed:
 	dev_set_drvdata(&ccw_device->dev, NULL);
 	kfree(adapter->req_list);
-failed_low_mem_buffers:
+low_mem_buffers_failed:
 	zfcp_free_low_mem_buffers(adapter);
-qdio_allocate_failed:
-	zfcp_qdio_free(adapter);
+qdio_failed:
+	zfcp_qdio_destroy(adapter->qdio);
 	kfree(adapter);
 	return -ENOMEM;
 }
@@ -559,6 +595,7 @@
 
 	cancel_work_sync(&adapter->scan_work);
 	cancel_work_sync(&adapter->stat_work);
+	zfcp_fc_wka_ports_force_offline(adapter->gs);
 	zfcp_adapter_scsi_unregister(adapter);
 	sysfs_remove_group(&adapter->ccw_device->dev.kobj,
 			   &zfcp_sysfs_adapter_attrs);
@@ -570,13 +607,15 @@
 	if (!retval)
 		return;
 
-	zfcp_adapter_debug_unregister(adapter);
-	zfcp_qdio_free(adapter);
+	zfcp_fc_gs_destroy(adapter);
+	zfcp_erp_thread_kill(adapter);
+	zfcp_destroy_adapter_work_queue(adapter);
+	zfcp_dbf_adapter_unregister(adapter->dbf);
 	zfcp_free_low_mem_buffers(adapter);
+	zfcp_qdio_destroy(adapter->qdio);
 	kfree(adapter->req_list);
 	kfree(adapter->fc_stats);
 	kfree(adapter->stats_reset_data);
-	kfree(adapter->gs);
 	kfree(adapter);
 }
 
@@ -592,7 +631,7 @@
  * @status: initial status for the port
  * @d_id: destination id of the remote port to be enqueued
  * Returns: pointer to enqueued port on success, ERR_PTR on error
- * Locks: config_sema must be held to serialize changes to the port list
+ * Locks: config_mutex must be held to serialize changes to the port list
  *
  * All port internal structures are set up and the sysfs entry is generated.
  * d_id is used to enqueue ports with a well known address like the Directory
@@ -602,7 +641,13 @@
 				     u32 status, u32 d_id)
 {
 	struct zfcp_port *port;
-	int retval;
+
+	read_lock_irq(&zfcp_data.config_lock);
+	if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
+		read_unlock_irq(&zfcp_data.config_lock);
+		return ERR_PTR(-EINVAL);
+	}
+	read_unlock_irq(&zfcp_data.config_lock);
 
 	port = kzalloc(sizeof(struct zfcp_port), GFP_KERNEL);
 	if (!port)
@@ -610,7 +655,7 @@
 
 	init_waitqueue_head(&port->remove_wq);
 	INIT_LIST_HEAD(&port->unit_list_head);
-	INIT_WORK(&port->gid_pn_work, zfcp_erp_port_strategy_open_lookup);
+	INIT_WORK(&port->gid_pn_work, zfcp_fc_port_did_lookup);
 	INIT_WORK(&port->test_link_work, zfcp_fc_link_test_work);
 	INIT_WORK(&port->rport_work, zfcp_scsi_rport_work);
 
@@ -623,29 +668,24 @@
 	atomic_set_mask(status | ZFCP_STATUS_COMMON_REMOVE, &port->status);
 	atomic_set(&port->refcount, 0);
 
-	dev_set_name(&port->sysfs_device, "0x%016llx",
-		     (unsigned long long)wwpn);
+	if (dev_set_name(&port->sysfs_device, "0x%016llx",
+			 (unsigned long long)wwpn)) {
+		kfree(port);
+		return ERR_PTR(-ENOMEM);
+	}
 	port->sysfs_device.parent = &adapter->ccw_device->dev;
-
 	port->sysfs_device.release = zfcp_sysfs_port_release;
 	dev_set_drvdata(&port->sysfs_device, port);
 
-	read_lock_irq(&zfcp_data.config_lock);
-	if (zfcp_get_port_by_wwpn(adapter, wwpn)) {
-		read_unlock_irq(&zfcp_data.config_lock);
-		goto err_out_free;
+	if (device_register(&port->sysfs_device)) {
+		put_device(&port->sysfs_device);
+		return ERR_PTR(-EINVAL);
 	}
-	read_unlock_irq(&zfcp_data.config_lock);
 
-	if (device_register(&port->sysfs_device))
-		goto err_out_free;
-
-	retval = sysfs_create_group(&port->sysfs_device.kobj,
-				    &zfcp_sysfs_port_attrs);
-
-	if (retval) {
+	if (sysfs_create_group(&port->sysfs_device.kobj,
+			       &zfcp_sysfs_port_attrs)) {
 		device_unregister(&port->sysfs_device);
-		goto err_out;
+		return ERR_PTR(-EINVAL);
 	}
 
 	zfcp_port_get(port);
@@ -659,11 +699,6 @@
 
 	zfcp_adapter_get(adapter);
 	return port;
-
-err_out_free:
-	kfree(port);
-err_out:
-	return ERR_PTR(-EINVAL);
 }
 
 /**
@@ -672,12 +707,11 @@
  */
 void zfcp_port_dequeue(struct zfcp_port *port)
 {
-	wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
 	write_lock_irq(&zfcp_data.config_lock);
 	list_del(&port->list);
 	write_unlock_irq(&zfcp_data.config_lock);
-	if (port->rport)
-		port->rport->dd_data = NULL;
+	wait_event(port->remove_wq, atomic_read(&port->refcount) == 0);
+	cancel_work_sync(&port->rport_work); /* usually not necessary */
 	zfcp_adapter_put(port->adapter);
 	sysfs_remove_group(&port->sysfs_device.kobj, &zfcp_sysfs_port_attrs);
 	device_unregister(&port->sysfs_device);
diff --git a/drivers/s390/scsi/zfcp_ccw.c b/drivers/s390/scsi/zfcp_ccw.c
index d9da5c4..0c90f8e 100644
--- a/drivers/s390/scsi/zfcp_ccw.c
+++ b/drivers/s390/scsi/zfcp_ccw.c
@@ -18,12 +18,15 @@
 {
 	struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev);
 
-	down(&zfcp_data.config_sema);
+	if (!adapter)
+		return 0;
+
+	mutex_lock(&zfcp_data.config_mutex);
 
 	zfcp_erp_adapter_shutdown(adapter, 0, "ccsusp1", NULL);
 	zfcp_erp_wait(adapter);
 
-	up(&zfcp_data.config_sema);
+	mutex_unlock(&zfcp_data.config_mutex);
 
 	return 0;
 }
@@ -33,6 +36,9 @@
 {
 	struct zfcp_adapter *adapter = dev_get_drvdata(&cdev->dev);
 
+	if (!adapter)
+		return 0;
+
 	zfcp_erp_modify_adapter_status(adapter, "ccresu1", NULL,
 				       ZFCP_STATUS_COMMON_RUNNING, ZFCP_SET);
 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -63,25 +69,14 @@
  * zfcp_ccw_probe - probe function of zfcp driver
  * @ccw_device: pointer to belonging ccw device
  *
- * This function gets called by the common i/o layer and sets up the initial
- * data structures for each fcp adapter, which was detected by the system.
- * Also the sysfs files for this adapter will be created by this function.
- * In addition the nameserver port will be added to the ports of the adapter
- * and its sysfs representation will be created too.
+ * This function gets called by the common i/o layer for each FCP
+ * device found on the current system. This is only a stub to make cio
+ * work: To only allocate adapter resources for devices actually used,
+ * the allocation is deferred to the first call to ccw_set_online.
  */
 static int zfcp_ccw_probe(struct ccw_device *ccw_device)
 {
-	int retval = 0;
-
-	down(&zfcp_data.config_sema);
-	if (zfcp_adapter_enqueue(ccw_device)) {
-		dev_err(&ccw_device->dev,
-			"Setting up data structures for the "
-			"FCP adapter failed\n");
-		retval = -EINVAL;
-	}
-	up(&zfcp_data.config_sema);
-	return retval;
+	return 0;
 }
 
 /**
@@ -102,8 +97,11 @@
 	LIST_HEAD(port_remove_lh);
 
 	ccw_device_set_offline(ccw_device);
-	down(&zfcp_data.config_sema);
+
+	mutex_lock(&zfcp_data.config_mutex);
 	adapter = dev_get_drvdata(&ccw_device->dev);
+	if (!adapter)
+		goto out;
 
 	write_lock_irq(&zfcp_data.config_lock);
 	list_for_each_entry_safe(port, p, &adapter->port_list_head, list) {
@@ -129,29 +127,41 @@
 	wait_event(adapter->remove_wq, atomic_read(&adapter->refcount) == 0);
 	zfcp_adapter_dequeue(adapter);
 
-	up(&zfcp_data.config_sema);
+out:
+	mutex_unlock(&zfcp_data.config_mutex);
 }
 
 /**
  * zfcp_ccw_set_online - set_online function of zfcp driver
  * @ccw_device: pointer to belonging ccw device
  *
- * This function gets called by the common i/o layer and sets an adapter
- * into state online. Setting an fcp device online means that it will be
- * registered with the SCSI stack, that the QDIO queues will be set up
- * and that the adapter will be opened (asynchronously).
+ * This function gets called by the common i/o layer and sets an
+ * adapter into state online.  The first call will allocate all
+ * adapter resources that will be retained until the device is removed
+ * via zfcp_ccw_remove.
+ *
+ * Setting an fcp device online means that it will be registered with
+ * the SCSI stack, that the QDIO queues will be set up and that the
+ * adapter will be opened.
  */
 static int zfcp_ccw_set_online(struct ccw_device *ccw_device)
 {
 	struct zfcp_adapter *adapter;
-	int retval;
+	int ret = 0;
 
-	down(&zfcp_data.config_sema);
+	mutex_lock(&zfcp_data.config_mutex);
 	adapter = dev_get_drvdata(&ccw_device->dev);
 
-	retval = zfcp_erp_thread_setup(adapter);
-	if (retval)
-		goto out;
+	if (!adapter) {
+		ret = zfcp_adapter_enqueue(ccw_device);
+		if (ret) {
+			dev_err(&ccw_device->dev,
+				"Setting up data structures for the "
+				"FCP adapter failed\n");
+			goto out;
+		}
+		adapter = dev_get_drvdata(&ccw_device->dev);
+	}
 
 	/* initialize request counter */
 	BUG_ON(!zfcp_reqlist_isempty(adapter));
@@ -162,13 +172,11 @@
 	zfcp_erp_adapter_reopen(adapter, ZFCP_STATUS_COMMON_ERP_FAILED,
 				"ccsonl2", NULL);
 	zfcp_erp_wait(adapter);
-	up(&zfcp_data.config_sema);
-	flush_work(&adapter->scan_work);
-	return 0;
-
- out:
-	up(&zfcp_data.config_sema);
-	return retval;
+out:
+	mutex_unlock(&zfcp_data.config_mutex);
+	if (!ret)
+		flush_work(&adapter->scan_work);
+	return ret;
 }
 
 /**
@@ -182,12 +190,15 @@
 {
 	struct zfcp_adapter *adapter;
 
-	down(&zfcp_data.config_sema);
+	mutex_lock(&zfcp_data.config_mutex);
 	adapter = dev_get_drvdata(&ccw_device->dev);
+	if (!adapter)
+		goto out;
+
 	zfcp_erp_adapter_shutdown(adapter, 0, "ccsoff1", NULL);
 	zfcp_erp_wait(adapter);
-	zfcp_erp_thread_kill(adapter);
-	up(&zfcp_data.config_sema);
+	mutex_unlock(&zfcp_data.config_mutex);
+out:
 	return 0;
 }
 
@@ -240,11 +251,12 @@
 {
 	struct zfcp_adapter *adapter;
 
-	down(&zfcp_data.config_sema);
+	mutex_lock(&zfcp_data.config_mutex);
 	adapter = dev_get_drvdata(&cdev->dev);
 	zfcp_erp_adapter_shutdown(adapter, 0, "ccshut1", NULL);
 	zfcp_erp_wait(adapter);
-	up(&zfcp_data.config_sema);
+	zfcp_erp_thread_kill(adapter);
+	mutex_unlock(&zfcp_data.config_mutex);
 }
 
 static struct ccw_driver zfcp_ccw_driver = {
diff --git a/drivers/s390/scsi/zfcp_dbf.c b/drivers/s390/scsi/zfcp_dbf.c
index b99b87c..215b707 100644
--- a/drivers/s390/scsi/zfcp_dbf.c
+++ b/drivers/s390/scsi/zfcp_dbf.c
@@ -3,7 +3,7 @@
  *
  * Debug traces for zfcp.
  *
- * Copyright IBM Corporation 2002, 2008
+ * Copyright IBM Corporation 2002, 2009
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -11,6 +11,7 @@
 
 #include <linux/ctype.h>
 #include <asm/debug.h>
+#include "zfcp_dbf.h"
 #include "zfcp_ext.h"
 
 static u32 dbfsize = 4;
@@ -37,19 +38,6 @@
 	}
 }
 
-/* FIXME: this duplicate this code in s390 debug feature */
-static void zfcp_dbf_timestamp(unsigned long long stck, struct timespec *time)
-{
-	unsigned long long sec;
-
-	stck -= 0x8126d60e46000000LL - (0x3c26700LL * 1000000 * 4096);
-	sec = stck >> 12;
-	do_div(sec, 1000000);
-	time->tv_sec = sec;
-	stck -= (sec * 1000000) << 12;
-	time->tv_nsec = ((stck * 1000) >> 12);
-}
-
 static void zfcp_dbf_tag(char **p, const char *label, const char *tag)
 {
 	int i;
@@ -106,7 +94,7 @@
 	char *p = out_buf;
 
 	if (strncmp(dump->tag, "dump", ZFCP_DBF_TAG_SIZE) != 0) {
-		zfcp_dbf_timestamp(entry->id.stck, &t);
+		stck_to_timespec(entry->id.stck, &t);
 		zfcp_dbf_out(&p, "timestamp", "%011lu:%06lu",
 			     t.tv_sec, t.tv_nsec);
 		zfcp_dbf_out(&p, "cpu", "%02i", entry->id.fields.cpuid);
@@ -119,13 +107,10 @@
 	return p - out_buf;
 }
 
-/**
- * zfcp_hba_dbf_event_fsf_response - trace event for request completion
- * @fsf_req: request that has been completed
- */
-void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *fsf_req)
+void _zfcp_dbf_hba_fsf_response(const char *tag2, int level,
+				struct zfcp_fsf_req *fsf_req,
+				struct zfcp_dbf *dbf)
 {
-	struct zfcp_adapter *adapter = fsf_req->adapter;
 	struct fsf_qtcb *qtcb = fsf_req->qtcb;
 	union fsf_prot_status_qual *prot_status_qual =
 					&qtcb->prefix.prot_status_qual;
@@ -134,33 +119,14 @@
 	struct zfcp_port *port;
 	struct zfcp_unit *unit;
 	struct zfcp_send_els *send_els;
-	struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
-	struct zfcp_hba_dbf_record_response *response = &rec->u.response;
-	int level;
+	struct zfcp_dbf_hba_record *rec = &dbf->hba_buf;
+	struct zfcp_dbf_hba_record_response *response = &rec->u.response;
 	unsigned long flags;
 
-	spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
+	spin_lock_irqsave(&dbf->hba_lock, flags);
 	memset(rec, 0, sizeof(*rec));
 	strncpy(rec->tag, "resp", ZFCP_DBF_TAG_SIZE);
-
-	if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
-	    (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
-		strncpy(rec->tag2, "perr", ZFCP_DBF_TAG_SIZE);
-		level = 1;
-	} else if (qtcb->header.fsf_status != FSF_GOOD) {
-		strncpy(rec->tag2, "ferr", ZFCP_DBF_TAG_SIZE);
-		level = 1;
-	} else if ((fsf_req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
-		   (fsf_req->fsf_command == FSF_QTCB_OPEN_LUN)) {
-		strncpy(rec->tag2, "open", ZFCP_DBF_TAG_SIZE);
-		level = 4;
-	} else if (qtcb->header.log_length) {
-		strncpy(rec->tag2, "qtcb", ZFCP_DBF_TAG_SIZE);
-		level = 5;
-	} else {
-		strncpy(rec->tag2, "norm", ZFCP_DBF_TAG_SIZE);
-		level = 6;
-	}
+	strncpy(rec->tag2, tag2, ZFCP_DBF_TAG_SIZE);
 
 	response->fsf_command = fsf_req->fsf_command;
 	response->fsf_reqid = fsf_req->req_id;
@@ -173,9 +139,9 @@
 	memcpy(response->fsf_status_qual,
 	       fsf_status_qual, FSF_STATUS_QUALIFIER_SIZE);
 	response->fsf_req_status = fsf_req->status;
-	response->sbal_first = fsf_req->sbal_first;
-	response->sbal_last = fsf_req->sbal_last;
-	response->sbal_response = fsf_req->sbal_response;
+	response->sbal_first = fsf_req->queue_req.sbal_first;
+	response->sbal_last = fsf_req->queue_req.sbal_last;
+	response->sbal_response = fsf_req->queue_req.sbal_response;
 	response->pool = fsf_req->pool != NULL;
 	response->erp_action = (unsigned long)fsf_req->erp_action;
 
@@ -224,7 +190,7 @@
 		break;
 	}
 
-	debug_event(adapter->hba_dbf, level, rec, sizeof(*rec));
+	debug_event(dbf->hba, level, rec, sizeof(*rec));
 
 	/* have fcp channel microcode fixed to use as little as possible */
 	if (fsf_req->fsf_command != FSF_QTCB_FCP_CMND) {
@@ -232,31 +198,25 @@
 		char *buf = (char *)qtcb + qtcb->header.log_start;
 		int len = qtcb->header.log_length;
 		for (; len && !buf[len - 1]; len--);
-		zfcp_dbf_hexdump(adapter->hba_dbf, rec, sizeof(*rec), level,
-				 buf, len);
+		zfcp_dbf_hexdump(dbf->hba, rec, sizeof(*rec), level, buf,
+				 len);
 	}
 
-	spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
+	spin_unlock_irqrestore(&dbf->hba_lock, flags);
 }
 
-/**
- * zfcp_hba_dbf_event_fsf_unsol - trace event for an unsolicited status buffer
- * @tag: tag indicating which kind of unsolicited status has been received
- * @adapter: adapter that has issued the unsolicited status buffer
- * @status_buffer: buffer containing payload of unsolicited status
- */
-void zfcp_hba_dbf_event_fsf_unsol(const char *tag, struct zfcp_adapter *adapter,
-				  struct fsf_status_read_buffer *status_buffer)
+void _zfcp_dbf_hba_fsf_unsol(const char *tag, int level, struct zfcp_dbf *dbf,
+			     struct fsf_status_read_buffer *status_buffer)
 {
-	struct zfcp_hba_dbf_record *rec = &adapter->hba_dbf_buf;
+	struct zfcp_dbf_hba_record *rec = &dbf->hba_buf;
 	unsigned long flags;
 
-	spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
+	spin_lock_irqsave(&dbf->hba_lock, flags);
 	memset(rec, 0, sizeof(*rec));
 	strncpy(rec->tag, "stat", ZFCP_DBF_TAG_SIZE);
 	strncpy(rec->tag2, tag, ZFCP_DBF_TAG_SIZE);
 
-	rec->u.status.failed = atomic_read(&adapter->stat_miss);
+	rec->u.status.failed = atomic_read(&dbf->adapter->stat_miss);
 	if (status_buffer != NULL) {
 		rec->u.status.status_type = status_buffer->status_type;
 		rec->u.status.status_subtype = status_buffer->status_subtype;
@@ -293,63 +253,61 @@
 		       &status_buffer->payload, rec->u.status.payload_size);
 	}
 
-	debug_event(adapter->hba_dbf, 2, rec, sizeof(*rec));
-	spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
+	debug_event(dbf->hba, level, rec, sizeof(*rec));
+	spin_unlock_irqrestore(&dbf->hba_lock, flags);
 }
 
 /**
- * zfcp_hba_dbf_event_qdio - trace event for QDIO related failure
- * @adapter: adapter affected by this QDIO related event
+ * zfcp_dbf_hba_qdio - trace event for QDIO related failure
+ * @qdio: qdio structure affected by this QDIO related event
  * @qdio_error: as passed by qdio module
  * @sbal_index: first buffer with error condition, as passed by qdio module
  * @sbal_count: number of buffers affected, as passed by qdio module
  */
-void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *adapter,
-			     unsigned int qdio_error, int sbal_index,
-			     int sbal_count)
+void zfcp_dbf_hba_qdio(struct zfcp_dbf *dbf, unsigned int qdio_error,
+		       int sbal_index, int sbal_count)
 {
-	struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
+	struct zfcp_dbf_hba_record *r = &dbf->hba_buf;
 	unsigned long flags;
 
-	spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
+	spin_lock_irqsave(&dbf->hba_lock, flags);
 	memset(r, 0, sizeof(*r));
 	strncpy(r->tag, "qdio", ZFCP_DBF_TAG_SIZE);
 	r->u.qdio.qdio_error = qdio_error;
 	r->u.qdio.sbal_index = sbal_index;
 	r->u.qdio.sbal_count = sbal_count;
-	debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
-	spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
+	debug_event(dbf->hba, 0, r, sizeof(*r));
+	spin_unlock_irqrestore(&dbf->hba_lock, flags);
 }
 
 /**
- * zfcp_hba_dbf_event_berr - trace event for bit error threshold
- * @adapter: adapter affected by this QDIO related event
+ * zfcp_dbf_hba_berr - trace event for bit error threshold
+ * @dbf: dbf structure affected by this QDIO related event
  * @req: fsf request
  */
-void zfcp_hba_dbf_event_berr(struct zfcp_adapter *adapter,
-			     struct zfcp_fsf_req *req)
+void zfcp_dbf_hba_berr(struct zfcp_dbf *dbf, struct zfcp_fsf_req *req)
 {
-	struct zfcp_hba_dbf_record *r = &adapter->hba_dbf_buf;
+	struct zfcp_dbf_hba_record *r = &dbf->hba_buf;
 	struct fsf_status_read_buffer *sr_buf = req->data;
 	struct fsf_bit_error_payload *err = &sr_buf->payload.bit_error;
 	unsigned long flags;
 
-	spin_lock_irqsave(&adapter->hba_dbf_lock, flags);
+	spin_lock_irqsave(&dbf->hba_lock, flags);
 	memset(r, 0, sizeof(*r));
 	strncpy(r->tag, "berr", ZFCP_DBF_TAG_SIZE);
 	memcpy(&r->u.berr, err, sizeof(struct fsf_bit_error_payload));
-	debug_event(adapter->hba_dbf, 0, r, sizeof(*r));
-	spin_unlock_irqrestore(&adapter->hba_dbf_lock, flags);
+	debug_event(dbf->hba, 0, r, sizeof(*r));
+	spin_unlock_irqrestore(&dbf->hba_lock, flags);
 }
-static void zfcp_hba_dbf_view_response(char **p,
-				       struct zfcp_hba_dbf_record_response *r)
+static void zfcp_dbf_hba_view_response(char **p,
+				       struct zfcp_dbf_hba_record_response *r)
 {
 	struct timespec t;
 
 	zfcp_dbf_out(p, "fsf_command", "0x%08x", r->fsf_command);
 	zfcp_dbf_out(p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
 	zfcp_dbf_out(p, "fsf_seqno", "0x%08x", r->fsf_seqno);
-	zfcp_dbf_timestamp(r->fsf_issued, &t);
+	stck_to_timespec(r->fsf_issued, &t);
 	zfcp_dbf_out(p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec);
 	zfcp_dbf_out(p, "fsf_prot_status", "0x%08x", r->fsf_prot_status);
 	zfcp_dbf_out(p, "fsf_status", "0x%08x", r->fsf_status);
@@ -403,8 +361,8 @@
 	}
 }
 
-static void zfcp_hba_dbf_view_status(char **p,
-				     struct zfcp_hba_dbf_record_status *r)
+static void zfcp_dbf_hba_view_status(char **p,
+				     struct zfcp_dbf_hba_record_status *r)
 {
 	zfcp_dbf_out(p, "failed", "0x%02x", r->failed);
 	zfcp_dbf_out(p, "status_type", "0x%08x", r->status_type);
@@ -416,14 +374,14 @@
 		      r->payload_size);
 }
 
-static void zfcp_hba_dbf_view_qdio(char **p, struct zfcp_hba_dbf_record_qdio *r)
+static void zfcp_dbf_hba_view_qdio(char **p, struct zfcp_dbf_hba_record_qdio *r)
 {
 	zfcp_dbf_out(p, "qdio_error", "0x%08x", r->qdio_error);
 	zfcp_dbf_out(p, "sbal_index", "0x%02x", r->sbal_index);
 	zfcp_dbf_out(p, "sbal_count", "0x%02x", r->sbal_count);
 }
 
-static void zfcp_hba_dbf_view_berr(char **p, struct fsf_bit_error_payload *r)
+static void zfcp_dbf_hba_view_berr(char **p, struct fsf_bit_error_payload *r)
 {
 	zfcp_dbf_out(p, "link_failures", "%d", r->link_failure_error_count);
 	zfcp_dbf_out(p, "loss_of_sync_err", "%d", r->loss_of_sync_error_count);
@@ -447,10 +405,10 @@
 		     r->current_transmit_b2b_credit);
 }
 
-static int zfcp_hba_dbf_view_format(debug_info_t *id, struct debug_view *view,
+static int zfcp_dbf_hba_view_format(debug_info_t *id, struct debug_view *view,
 				    char *out_buf, const char *in_buf)
 {
-	struct zfcp_hba_dbf_record *r = (struct zfcp_hba_dbf_record *)in_buf;
+	struct zfcp_dbf_hba_record *r = (struct zfcp_dbf_hba_record *)in_buf;
 	char *p = out_buf;
 
 	if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
@@ -461,45 +419,42 @@
 		zfcp_dbf_tag(&p, "tag2", r->tag2);
 
 	if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) == 0)
-		zfcp_hba_dbf_view_response(&p, &r->u.response);
+		zfcp_dbf_hba_view_response(&p, &r->u.response);
 	else if (strncmp(r->tag, "stat", ZFCP_DBF_TAG_SIZE) == 0)
-		zfcp_hba_dbf_view_status(&p, &r->u.status);
+		zfcp_dbf_hba_view_status(&p, &r->u.status);
 	else if (strncmp(r->tag, "qdio", ZFCP_DBF_TAG_SIZE) == 0)
-		zfcp_hba_dbf_view_qdio(&p, &r->u.qdio);
+		zfcp_dbf_hba_view_qdio(&p, &r->u.qdio);
 	else if (strncmp(r->tag, "berr", ZFCP_DBF_TAG_SIZE) == 0)
-		zfcp_hba_dbf_view_berr(&p, &r->u.berr);
+		zfcp_dbf_hba_view_berr(&p, &r->u.berr);
 
 	if (strncmp(r->tag, "resp", ZFCP_DBF_TAG_SIZE) != 0)
 		p += sprintf(p, "\n");
 	return p - out_buf;
 }
 
-static struct debug_view zfcp_hba_dbf_view = {
-	"structured",
-	NULL,
-	&zfcp_dbf_view_header,
-	&zfcp_hba_dbf_view_format,
-	NULL,
-	NULL
+static struct debug_view zfcp_dbf_hba_view = {
+	.name = "structured",
+	.header_proc = zfcp_dbf_view_header,
+	.format_proc = zfcp_dbf_hba_view_format,
 };
 
-static const char *zfcp_rec_dbf_tags[] = {
+static const char *zfcp_dbf_rec_tags[] = {
 	[ZFCP_REC_DBF_ID_THREAD] = "thread",
 	[ZFCP_REC_DBF_ID_TARGET] = "target",
 	[ZFCP_REC_DBF_ID_TRIGGER] = "trigger",
 	[ZFCP_REC_DBF_ID_ACTION] = "action",
 };
 
-static int zfcp_rec_dbf_view_format(debug_info_t *id, struct debug_view *view,
+static int zfcp_dbf_rec_view_format(debug_info_t *id, struct debug_view *view,
 				    char *buf, const char *_rec)
 {
-	struct zfcp_rec_dbf_record *r = (struct zfcp_rec_dbf_record *)_rec;
+	struct zfcp_dbf_rec_record *r = (struct zfcp_dbf_rec_record *)_rec;
 	char *p = buf;
 	char hint[ZFCP_DBF_ID_SIZE + 1];
 
 	memcpy(hint, r->id2, ZFCP_DBF_ID_SIZE);
 	hint[ZFCP_DBF_ID_SIZE] = 0;
-	zfcp_dbf_outs(&p, "tag", zfcp_rec_dbf_tags[r->id]);
+	zfcp_dbf_outs(&p, "tag", zfcp_dbf_rec_tags[r->id]);
 	zfcp_dbf_outs(&p, "hint", hint);
 	switch (r->id) {
 	case ZFCP_REC_DBF_ID_THREAD:
@@ -537,24 +492,22 @@
 	return p - buf;
 }
 
-static struct debug_view zfcp_rec_dbf_view = {
-	"structured",
-	NULL,
-	&zfcp_dbf_view_header,
-	&zfcp_rec_dbf_view_format,
-	NULL,
-	NULL
+static struct debug_view zfcp_dbf_rec_view = {
+	.name = "structured",
+	.header_proc = zfcp_dbf_view_header,
+	.format_proc = zfcp_dbf_rec_view_format,
 };
 
 /**
- * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation
+ * zfcp_dbf_rec_thread - trace event related to recovery thread operation
  * @id2: identifier for event
- * @adapter: adapter
+ * @dbf: reference to dbf structure
  * This function assumes that the caller is holding erp_lock.
  */
-void zfcp_rec_dbf_event_thread(char *id2, struct zfcp_adapter *adapter)
+void zfcp_dbf_rec_thread(char *id2, struct zfcp_dbf *dbf)
 {
-	struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
+	struct zfcp_adapter *adapter = dbf->adapter;
+	struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
 	unsigned long flags = 0;
 	struct list_head *entry;
 	unsigned ready = 0, running = 0, total;
@@ -565,41 +518,41 @@
 		running++;
 	total = adapter->erp_total_count;
 
-	spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
+	spin_lock_irqsave(&dbf->rec_lock, flags);
 	memset(r, 0, sizeof(*r));
 	r->id = ZFCP_REC_DBF_ID_THREAD;
 	memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
 	r->u.thread.total = total;
 	r->u.thread.ready = ready;
 	r->u.thread.running = running;
-	debug_event(adapter->rec_dbf, 6, r, sizeof(*r));
-	spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
+	debug_event(dbf->rec, 6, r, sizeof(*r));
+	spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
 
 /**
- * zfcp_rec_dbf_event_thread - trace event related to recovery thread operation
+ * zfcp_dbf_rec_thread - trace event related to recovery thread operation
  * @id2: identifier for event
  * @adapter: adapter
  * This function assumes that the caller does not hold erp_lock.
  */
-void zfcp_rec_dbf_event_thread_lock(char *id2, struct zfcp_adapter *adapter)
+void zfcp_dbf_rec_thread_lock(char *id2, struct zfcp_dbf *dbf)
 {
+	struct zfcp_adapter *adapter = dbf->adapter;
 	unsigned long flags;
 
 	read_lock_irqsave(&adapter->erp_lock, flags);
-	zfcp_rec_dbf_event_thread(id2, adapter);
+	zfcp_dbf_rec_thread(id2, dbf);
 	read_unlock_irqrestore(&adapter->erp_lock, flags);
 }
 
-static void zfcp_rec_dbf_event_target(char *id2, void *ref,
-				      struct zfcp_adapter *adapter,
-				      atomic_t *status, atomic_t *erp_count,
-				      u64 wwpn, u32 d_id, u64 fcp_lun)
+static void zfcp_dbf_rec_target(char *id2, void *ref, struct zfcp_dbf *dbf,
+				atomic_t *status, atomic_t *erp_count, u64 wwpn,
+				u32 d_id, u64 fcp_lun)
 {
-	struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
+	struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
 	unsigned long flags;
 
-	spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
+	spin_lock_irqsave(&dbf->rec_lock, flags);
 	memset(r, 0, sizeof(*r));
 	r->id = ZFCP_REC_DBF_ID_TARGET;
 	memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
@@ -609,56 +562,57 @@
 	r->u.target.d_id = d_id;
 	r->u.target.fcp_lun = fcp_lun;
 	r->u.target.erp_count = atomic_read(erp_count);
-	debug_event(adapter->rec_dbf, 3, r, sizeof(*r));
-	spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
+	debug_event(dbf->rec, 3, r, sizeof(*r));
+	spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
 
 /**
- * zfcp_rec_dbf_event_adapter - trace event for adapter state change
+ * zfcp_dbf_rec_adapter - trace event for adapter state change
  * @id: identifier for trigger of state change
  * @ref: additional reference (e.g. request)
- * @adapter: adapter
+ * @dbf: reference to dbf structure
  */
-void zfcp_rec_dbf_event_adapter(char *id, void *ref,
-				struct zfcp_adapter *adapter)
+void zfcp_dbf_rec_adapter(char *id, void *ref, struct zfcp_dbf *dbf)
 {
-	zfcp_rec_dbf_event_target(id, ref, adapter, &adapter->status,
+	struct zfcp_adapter *adapter = dbf->adapter;
+
+	zfcp_dbf_rec_target(id, ref, dbf, &adapter->status,
 				  &adapter->erp_counter, 0, 0, 0);
 }
 
 /**
- * zfcp_rec_dbf_event_port - trace event for port state change
+ * zfcp_dbf_rec_port - trace event for port state change
  * @id: identifier for trigger of state change
  * @ref: additional reference (e.g. request)
  * @port: port
  */
-void zfcp_rec_dbf_event_port(char *id, void *ref, struct zfcp_port *port)
+void zfcp_dbf_rec_port(char *id, void *ref, struct zfcp_port *port)
 {
-	struct zfcp_adapter *adapter = port->adapter;
+	struct zfcp_dbf *dbf = port->adapter->dbf;
 
-	zfcp_rec_dbf_event_target(id, ref, adapter, &port->status,
+	zfcp_dbf_rec_target(id, ref, dbf, &port->status,
 				  &port->erp_counter, port->wwpn, port->d_id,
 				  0);
 }
 
 /**
- * zfcp_rec_dbf_event_unit - trace event for unit state change
+ * zfcp_dbf_rec_unit - trace event for unit state change
  * @id: identifier for trigger of state change
  * @ref: additional reference (e.g. request)
  * @unit: unit
  */
-void zfcp_rec_dbf_event_unit(char *id, void *ref, struct zfcp_unit *unit)
+void zfcp_dbf_rec_unit(char *id, void *ref, struct zfcp_unit *unit)
 {
 	struct zfcp_port *port = unit->port;
-	struct zfcp_adapter *adapter = port->adapter;
+	struct zfcp_dbf *dbf = port->adapter->dbf;
 
-	zfcp_rec_dbf_event_target(id, ref, adapter, &unit->status,
+	zfcp_dbf_rec_target(id, ref, dbf, &unit->status,
 				  &unit->erp_counter, port->wwpn, port->d_id,
 				  unit->fcp_lun);
 }
 
 /**
- * zfcp_rec_dbf_event_trigger - trace event for triggered error recovery
+ * zfcp_dbf_rec_trigger - trace event for triggered error recovery
  * @id2: identifier for error recovery trigger
  * @ref: additional reference (e.g. request)
  * @want: originally requested error recovery action
@@ -668,14 +622,15 @@
  * @port: port
  * @unit: unit
  */
-void zfcp_rec_dbf_event_trigger(char *id2, void *ref, u8 want, u8 need,
-				void *action, struct zfcp_adapter *adapter,
-				struct zfcp_port *port, struct zfcp_unit *unit)
+void zfcp_dbf_rec_trigger(char *id2, void *ref, u8 want, u8 need, void *action,
+			  struct zfcp_adapter *adapter, struct zfcp_port *port,
+			  struct zfcp_unit *unit)
 {
-	struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
+	struct zfcp_dbf *dbf = adapter->dbf;
+	struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
 	unsigned long flags;
 
-	spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
+	spin_lock_irqsave(&dbf->rec_lock, flags);
 	memset(r, 0, sizeof(*r));
 	r->id = ZFCP_REC_DBF_ID_TRIGGER;
 	memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
@@ -692,22 +647,22 @@
 		r->u.trigger.us = atomic_read(&unit->status);
 		r->u.trigger.fcp_lun = unit->fcp_lun;
 	}
-	debug_event(adapter->rec_dbf, action ? 1 : 4, r, sizeof(*r));
-	spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
+	debug_event(dbf->rec, action ? 1 : 4, r, sizeof(*r));
+	spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
 
 /**
- * zfcp_rec_dbf_event_action - trace event showing progress of recovery action
+ * zfcp_dbf_rec_action - trace event showing progress of recovery action
  * @id2: identifier
  * @erp_action: error recovery action struct pointer
  */
-void zfcp_rec_dbf_event_action(char *id2, struct zfcp_erp_action *erp_action)
+void zfcp_dbf_rec_action(char *id2, struct zfcp_erp_action *erp_action)
 {
-	struct zfcp_adapter *adapter = erp_action->adapter;
-	struct zfcp_rec_dbf_record *r = &adapter->rec_dbf_buf;
+	struct zfcp_dbf *dbf = erp_action->adapter->dbf;
+	struct zfcp_dbf_rec_record *r = &dbf->rec_buf;
 	unsigned long flags;
 
-	spin_lock_irqsave(&adapter->rec_dbf_lock, flags);
+	spin_lock_irqsave(&dbf->rec_lock, flags);
 	memset(r, 0, sizeof(*r));
 	r->id = ZFCP_REC_DBF_ID_ACTION;
 	memcpy(r->id2, id2, ZFCP_DBF_ID_SIZE);
@@ -715,26 +670,27 @@
 	r->u.action.status = erp_action->status;
 	r->u.action.step = erp_action->step;
 	r->u.action.fsf_req = (unsigned long)erp_action->fsf_req;
-	debug_event(adapter->rec_dbf, 5, r, sizeof(*r));
-	spin_unlock_irqrestore(&adapter->rec_dbf_lock, flags);
+	debug_event(dbf->rec, 5, r, sizeof(*r));
+	spin_unlock_irqrestore(&dbf->rec_lock, flags);
 }
 
 /**
- * zfcp_san_dbf_event_ct_request - trace event for issued CT request
+ * zfcp_dbf_san_ct_request - trace event for issued CT request
  * @fsf_req: request containing issued CT data
  */
-void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *fsf_req)
 {
 	struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
 	struct zfcp_wka_port *wka_port = ct->wka_port;
 	struct zfcp_adapter *adapter = wka_port->adapter;
+	struct zfcp_dbf *dbf = adapter->dbf;
 	struct ct_hdr *hdr = sg_virt(ct->req);
-	struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
-	struct zfcp_san_dbf_record_ct_request *oct = &r->u.ct_req;
+	struct zfcp_dbf_san_record *r = &dbf->san_buf;
+	struct zfcp_dbf_san_record_ct_request *oct = &r->u.ct_req;
 	int level = 3;
 	unsigned long flags;
 
-	spin_lock_irqsave(&adapter->san_dbf_lock, flags);
+	spin_lock_irqsave(&dbf->san_lock, flags);
 	memset(r, 0, sizeof(*r));
 	strncpy(r->tag, "octc", ZFCP_DBF_TAG_SIZE);
 	r->fsf_reqid = fsf_req->req_id;
@@ -749,28 +705,29 @@
 	oct->max_res_size = hdr->max_res_size;
 	oct->len = min((int)ct->req->length - (int)sizeof(struct ct_hdr),
 		       ZFCP_DBF_SAN_MAX_PAYLOAD);
-	debug_event(adapter->san_dbf, level, r, sizeof(*r));
-	zfcp_dbf_hexdump(adapter->san_dbf, r, sizeof(*r), level,
+	debug_event(dbf->san, level, r, sizeof(*r));
+	zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
 			 (void *)hdr + sizeof(struct ct_hdr), oct->len);
-	spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
+	spin_unlock_irqrestore(&dbf->san_lock, flags);
 }
 
 /**
- * zfcp_san_dbf_event_ct_response - trace event for completion of CT request
+ * zfcp_dbf_san_ct_response - trace event for completion of CT request
  * @fsf_req: request containing CT response
  */
-void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *fsf_req)
 {
 	struct zfcp_send_ct *ct = (struct zfcp_send_ct *)fsf_req->data;
 	struct zfcp_wka_port *wka_port = ct->wka_port;
 	struct zfcp_adapter *adapter = wka_port->adapter;
 	struct ct_hdr *hdr = sg_virt(ct->resp);
-	struct zfcp_san_dbf_record *r = &adapter->san_dbf_buf;
-	struct zfcp_san_dbf_record_ct_response *rct = &r->u.ct_resp;
+	struct zfcp_dbf *dbf = adapter->dbf;
+	struct zfcp_dbf_san_record *r = &dbf->san_buf;
+	struct zfcp_dbf_san_record_ct_response *rct = &r->u.ct_resp;
 	int level = 3;
 	unsigned long flags;
 
-	spin_lock_irqsave(&adapter->san_dbf_lock, flags);
+	spin_lock_irqsave(&dbf->san_lock, flags);
 	memset(r, 0, sizeof(*r));
 	strncpy(r->tag, "rctc", ZFCP_DBF_TAG_SIZE);
 	r->fsf_reqid = fsf_req->req_id;
@@ -785,22 +742,22 @@
 	rct->max_res_size = hdr->max_res_size;
 	rct->len = min((int)ct->resp->length - (int)sizeof(struct ct_hdr),
 		       ZFCP_DBF_SAN_MAX_PAYLOAD);
-	debug_event(adapter->san_dbf, level, r, sizeof(*r));
-	zfcp_dbf_hexdump(adapter->san_dbf, r, sizeof(*r), level,
+	debug_event(dbf->san, level, r, sizeof(*r));
+	zfcp_dbf_hexdump(dbf->san, r, sizeof(*r), level,
 			 (void *)hdr + sizeof(struct ct_hdr), rct->len);
-	spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
+	spin_unlock_irqrestore(&dbf->san_lock, flags);
 }
 
-static void zfcp_san_dbf_event_els(const char *tag, int level,
-				   struct zfcp_fsf_req *fsf_req, u32 s_id,
-				   u32 d_id, u8 ls_code, void *buffer,
-				   int buflen)
+static void zfcp_dbf_san_els(const char *tag, int level,
+			     struct zfcp_fsf_req *fsf_req, u32 s_id, u32 d_id,
+			     u8 ls_code, void *buffer, int buflen)
 {
 	struct zfcp_adapter *adapter = fsf_req->adapter;
-	struct zfcp_san_dbf_record *rec = &adapter->san_dbf_buf;
+	struct zfcp_dbf *dbf = adapter->dbf;
+	struct zfcp_dbf_san_record *rec = &dbf->san_buf;
 	unsigned long flags;
 
-	spin_lock_irqsave(&adapter->san_dbf_lock, flags);
+	spin_lock_irqsave(&dbf->san_lock, flags);
 	memset(rec, 0, sizeof(*rec));
 	strncpy(rec->tag, tag, ZFCP_DBF_TAG_SIZE);
 	rec->fsf_reqid = fsf_req->req_id;
@@ -808,45 +765,45 @@
 	rec->s_id = s_id;
 	rec->d_id = d_id;
 	rec->u.els.ls_code = ls_code;
-	debug_event(adapter->san_dbf, level, rec, sizeof(*rec));
-	zfcp_dbf_hexdump(adapter->san_dbf, rec, sizeof(*rec), level,
+	debug_event(dbf->san, level, rec, sizeof(*rec));
+	zfcp_dbf_hexdump(dbf->san, rec, sizeof(*rec), level,
 			 buffer, min(buflen, ZFCP_DBF_SAN_MAX_PAYLOAD));
-	spin_unlock_irqrestore(&adapter->san_dbf_lock, flags);
+	spin_unlock_irqrestore(&dbf->san_lock, flags);
 }
 
 /**
- * zfcp_san_dbf_event_els_request - trace event for issued ELS
+ * zfcp_dbf_san_els_request - trace event for issued ELS
  * @fsf_req: request containing issued ELS
  */
-void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_san_els_request(struct zfcp_fsf_req *fsf_req)
 {
 	struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
 
-	zfcp_san_dbf_event_els("oels", 2, fsf_req,
+	zfcp_dbf_san_els("oels", 2, fsf_req,
 			       fc_host_port_id(els->adapter->scsi_host),
 			       els->d_id, *(u8 *) sg_virt(els->req),
 			       sg_virt(els->req), els->req->length);
 }
 
 /**
- * zfcp_san_dbf_event_els_response - trace event for completed ELS
+ * zfcp_dbf_san_els_response - trace event for completed ELS
  * @fsf_req: request containing ELS response
  */
-void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_san_els_response(struct zfcp_fsf_req *fsf_req)
 {
 	struct zfcp_send_els *els = (struct zfcp_send_els *)fsf_req->data;
 
-	zfcp_san_dbf_event_els("rels", 2, fsf_req, els->d_id,
+	zfcp_dbf_san_els("rels", 2, fsf_req, els->d_id,
 			       fc_host_port_id(els->adapter->scsi_host),
 			       *(u8 *)sg_virt(els->req), sg_virt(els->resp),
 			       els->resp->length);
 }
 
 /**
- * zfcp_san_dbf_event_incoming_els - trace event for incomig ELS
+ * zfcp_dbf_san_incoming_els - trace event for incomig ELS
  * @fsf_req: request containing unsolicited status buffer with incoming ELS
  */
-void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *fsf_req)
+void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *fsf_req)
 {
 	struct zfcp_adapter *adapter = fsf_req->adapter;
 	struct fsf_status_read_buffer *buf =
@@ -854,16 +811,16 @@
 	int length = (int)buf->length -
 		     (int)((void *)&buf->payload - (void *)buf);
 
-	zfcp_san_dbf_event_els("iels", 1, fsf_req, buf->d_id,
+	zfcp_dbf_san_els("iels", 1, fsf_req, buf->d_id,
 			       fc_host_port_id(adapter->scsi_host),
 			       buf->payload.data[0], (void *)buf->payload.data,
 			       length);
 }
 
-static int zfcp_san_dbf_view_format(debug_info_t *id, struct debug_view *view,
+static int zfcp_dbf_san_view_format(debug_info_t *id, struct debug_view *view,
 				    char *out_buf, const char *in_buf)
 {
-	struct zfcp_san_dbf_record *r = (struct zfcp_san_dbf_record *)in_buf;
+	struct zfcp_dbf_san_record *r = (struct zfcp_dbf_san_record *)in_buf;
 	char *p = out_buf;
 
 	if (strncmp(r->tag, "dump", ZFCP_DBF_TAG_SIZE) == 0)
@@ -876,7 +833,7 @@
 	zfcp_dbf_out(&p, "d_id", "0x%06x", r->d_id);
 
 	if (strncmp(r->tag, "octc", ZFCP_DBF_TAG_SIZE) == 0) {
-		struct zfcp_san_dbf_record_ct_request *ct = &r->u.ct_req;
+		struct zfcp_dbf_san_record_ct_request *ct = &r->u.ct_req;
 		zfcp_dbf_out(&p, "cmd_req_code", "0x%04x", ct->cmd_req_code);
 		zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
 		zfcp_dbf_out(&p, "gs_type", "0x%02x", ct->gs_type);
@@ -884,7 +841,7 @@
 		zfcp_dbf_out(&p, "options", "0x%02x", ct->options);
 		zfcp_dbf_out(&p, "max_res_size", "0x%04x", ct->max_res_size);
 	} else if (strncmp(r->tag, "rctc", ZFCP_DBF_TAG_SIZE) == 0) {
-		struct zfcp_san_dbf_record_ct_response *ct = &r->u.ct_resp;
+		struct zfcp_dbf_san_record_ct_response *ct = &r->u.ct_resp;
 		zfcp_dbf_out(&p, "cmd_rsp_code", "0x%04x", ct->cmd_rsp_code);
 		zfcp_dbf_out(&p, "revision", "0x%02x", ct->revision);
 		zfcp_dbf_out(&p, "reason_code", "0x%02x", ct->reason_code);
@@ -894,35 +851,30 @@
 	} else if (strncmp(r->tag, "oels", ZFCP_DBF_TAG_SIZE) == 0 ||
 		   strncmp(r->tag, "rels", ZFCP_DBF_TAG_SIZE) == 0 ||
 		   strncmp(r->tag, "iels", ZFCP_DBF_TAG_SIZE) == 0) {
-		struct zfcp_san_dbf_record_els *els = &r->u.els;
+		struct zfcp_dbf_san_record_els *els = &r->u.els;
 		zfcp_dbf_out(&p, "ls_code", "0x%02x", els->ls_code);
 	}
 	return p - out_buf;
 }
 
-static struct debug_view zfcp_san_dbf_view = {
-	"structured",
-	NULL,
-	&zfcp_dbf_view_header,
-	&zfcp_san_dbf_view_format,
-	NULL,
-	NULL
+static struct debug_view zfcp_dbf_san_view = {
+	.name = "structured",
+	.header_proc = zfcp_dbf_view_header,
+	.format_proc = zfcp_dbf_san_view_format,
 };
 
-static void zfcp_scsi_dbf_event(const char *tag, const char *tag2, int level,
-				struct zfcp_adapter *adapter,
-				struct scsi_cmnd *scsi_cmnd,
-				struct zfcp_fsf_req *fsf_req,
-				unsigned long old_req_id)
+void _zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
+		    struct zfcp_dbf *dbf, struct scsi_cmnd *scsi_cmnd,
+		    struct zfcp_fsf_req *fsf_req, unsigned long old_req_id)
 {
-	struct zfcp_scsi_dbf_record *rec = &adapter->scsi_dbf_buf;
+	struct zfcp_dbf_scsi_record *rec = &dbf->scsi_buf;
 	struct zfcp_dbf_dump *dump = (struct zfcp_dbf_dump *)rec;
 	unsigned long flags;
 	struct fcp_rsp_iu *fcp_rsp;
 	char *fcp_rsp_info = NULL, *fcp_sns_info = NULL;
 	int offset = 0, buflen = 0;
 
-	spin_lock_irqsave(&adapter->scsi_dbf_lock, flags);
+	spin_lock_irqsave(&dbf->scsi_lock, flags);
 	do {
 		memset(rec, 0, sizeof(*rec));
 		if (offset == 0) {
@@ -976,68 +928,20 @@
 			dump->offset = offset;
 			dump->size = min(buflen - offset,
 					 (int)sizeof(struct
-						     zfcp_scsi_dbf_record) -
+						     zfcp_dbf_scsi_record) -
 					 (int)sizeof(struct zfcp_dbf_dump));
 			memcpy(dump->data, fcp_sns_info + offset, dump->size);
 			offset += dump->size;
 		}
-		debug_event(adapter->scsi_dbf, level, rec, sizeof(*rec));
+		debug_event(dbf->scsi, level, rec, sizeof(*rec));
 	} while (offset < buflen);
-	spin_unlock_irqrestore(&adapter->scsi_dbf_lock, flags);
+	spin_unlock_irqrestore(&dbf->scsi_lock, flags);
 }
 
-/**
- * zfcp_scsi_dbf_event_result - trace event for SCSI command completion
- * @tag: tag indicating success or failure of SCSI command
- * @level: trace level applicable for this event
- * @adapter: adapter that has been used to issue the SCSI command
- * @scsi_cmnd: SCSI command pointer
- * @fsf_req: request used to issue SCSI command (might be NULL)
- */
-void zfcp_scsi_dbf_event_result(const char *tag, int level,
-				struct zfcp_adapter *adapter,
-				struct scsi_cmnd *scsi_cmnd,
-				struct zfcp_fsf_req *fsf_req)
-{
-	zfcp_scsi_dbf_event("rslt", tag, level, adapter, scsi_cmnd, fsf_req, 0);
-}
-
-/**
- * zfcp_scsi_dbf_event_abort - trace event for SCSI command abort
- * @tag: tag indicating success or failure of abort operation
- * @adapter: adapter thas has been used to issue SCSI command to be aborted
- * @scsi_cmnd: SCSI command to be aborted
- * @new_fsf_req: request containing abort (might be NULL)
- * @old_req_id: identifier of request containg SCSI command to be aborted
- */
-void zfcp_scsi_dbf_event_abort(const char *tag, struct zfcp_adapter *adapter,
-			       struct scsi_cmnd *scsi_cmnd,
-			       struct zfcp_fsf_req *new_fsf_req,
-			       unsigned long old_req_id)
-{
-	zfcp_scsi_dbf_event("abrt", tag, 1, adapter, scsi_cmnd, new_fsf_req,
-			    old_req_id);
-}
-
-/**
- * zfcp_scsi_dbf_event_devreset - trace event for Logical Unit or Target Reset
- * @tag: tag indicating success or failure of reset operation
- * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
- * @unit: unit that needs reset
- * @scsi_cmnd: SCSI command which caused this error recovery
- */
-void zfcp_scsi_dbf_event_devreset(const char *tag, u8 flag,
-				  struct zfcp_unit *unit,
-				  struct scsi_cmnd *scsi_cmnd)
-{
-	zfcp_scsi_dbf_event(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1,
-			    unit->port->adapter, scsi_cmnd, NULL, 0);
-}
-
-static int zfcp_scsi_dbf_view_format(debug_info_t *id, struct debug_view *view,
+static int zfcp_dbf_scsi_view_format(debug_info_t *id, struct debug_view *view,
 				     char *out_buf, const char *in_buf)
 {
-	struct zfcp_scsi_dbf_record *r = (struct zfcp_scsi_dbf_record *)in_buf;
+	struct zfcp_dbf_scsi_record *r = (struct zfcp_dbf_scsi_record *)in_buf;
 	struct timespec t;
 	char *p = out_buf;
 
@@ -1059,7 +963,7 @@
 		zfcp_dbf_out(&p, "old_fsf_reqid", "0x%0Lx", r->old_fsf_reqid);
 	zfcp_dbf_out(&p, "fsf_reqid", "0x%0Lx", r->fsf_reqid);
 	zfcp_dbf_out(&p, "fsf_seqno", "0x%08x", r->fsf_seqno);
-	zfcp_dbf_timestamp(r->fsf_issued, &t);
+	stck_to_timespec(r->fsf_issued, &t);
 	zfcp_dbf_out(&p, "fsf_issued", "%011lu:%06lu", t.tv_sec, t.tv_nsec);
 
 	if (strncmp(r->tag, "rslt", ZFCP_DBF_TAG_SIZE) == 0) {
@@ -1078,84 +982,96 @@
 	return p - out_buf;
 }
 
-static struct debug_view zfcp_scsi_dbf_view = {
-	"structured",
-	NULL,
-	&zfcp_dbf_view_header,
-	&zfcp_scsi_dbf_view_format,
-	NULL,
-	NULL
+static struct debug_view zfcp_dbf_scsi_view = {
+	.name = "structured",
+	.header_proc = zfcp_dbf_view_header,
+	.format_proc = zfcp_dbf_scsi_view_format,
 };
 
+static debug_info_t *zfcp_dbf_reg(const char *name, int level,
+				  struct debug_view *view, int size)
+{
+	struct debug_info *d;
+
+	d = debug_register(name, dbfsize, level, size);
+	if (!d)
+		return NULL;
+
+	debug_register_view(d, &debug_hex_ascii_view);
+	debug_register_view(d, view);
+	debug_set_level(d, level);
+
+	return d;
+}
+
 /**
  * zfcp_adapter_debug_register - registers debug feature for an adapter
  * @adapter: pointer to adapter for which debug features should be registered
  * return: -ENOMEM on error, 0 otherwise
  */
-int zfcp_adapter_debug_register(struct zfcp_adapter *adapter)
+int zfcp_dbf_adapter_register(struct zfcp_adapter *adapter)
 {
 	char dbf_name[DEBUG_MAX_NAME_LEN];
+	struct zfcp_dbf *dbf;
+
+	dbf = kmalloc(sizeof(struct zfcp_dbf), GFP_KERNEL);
+	if (!dbf)
+		return -ENOMEM;
+
+	dbf->adapter = adapter;
+
+	spin_lock_init(&dbf->hba_lock);
+	spin_lock_init(&dbf->san_lock);
+	spin_lock_init(&dbf->scsi_lock);
+	spin_lock_init(&dbf->rec_lock);
 
 	/* debug feature area which records recovery activity */
 	sprintf(dbf_name, "zfcp_%s_rec", dev_name(&adapter->ccw_device->dev));
-	adapter->rec_dbf = debug_register(dbf_name, dbfsize, 1,
-					  sizeof(struct zfcp_rec_dbf_record));
-	if (!adapter->rec_dbf)
-		goto failed;
-	debug_register_view(adapter->rec_dbf, &debug_hex_ascii_view);
-	debug_register_view(adapter->rec_dbf, &zfcp_rec_dbf_view);
-	debug_set_level(adapter->rec_dbf, 3);
+	dbf->rec = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_rec_view,
+				sizeof(struct zfcp_dbf_rec_record));
+	if (!dbf->rec)
+		goto err_out;
 
 	/* debug feature area which records HBA (FSF and QDIO) conditions */
 	sprintf(dbf_name, "zfcp_%s_hba", dev_name(&adapter->ccw_device->dev));
-	adapter->hba_dbf = debug_register(dbf_name, dbfsize, 1,
-					  sizeof(struct zfcp_hba_dbf_record));
-	if (!adapter->hba_dbf)
-		goto failed;
-	debug_register_view(adapter->hba_dbf, &debug_hex_ascii_view);
-	debug_register_view(adapter->hba_dbf, &zfcp_hba_dbf_view);
-	debug_set_level(adapter->hba_dbf, 3);
+	dbf->hba = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_hba_view,
+				sizeof(struct zfcp_dbf_hba_record));
+	if (!dbf->hba)
+		goto err_out;
 
 	/* debug feature area which records SAN command failures and recovery */
 	sprintf(dbf_name, "zfcp_%s_san", dev_name(&adapter->ccw_device->dev));
-	adapter->san_dbf = debug_register(dbf_name, dbfsize, 1,
-					  sizeof(struct zfcp_san_dbf_record));
-	if (!adapter->san_dbf)
-		goto failed;
-	debug_register_view(adapter->san_dbf, &debug_hex_ascii_view);
-	debug_register_view(adapter->san_dbf, &zfcp_san_dbf_view);
-	debug_set_level(adapter->san_dbf, 6);
+	dbf->san = zfcp_dbf_reg(dbf_name, 6, &zfcp_dbf_san_view,
+				sizeof(struct zfcp_dbf_san_record));
+	if (!dbf->san)
+		goto err_out;
 
 	/* debug feature area which records SCSI command failures and recovery */
 	sprintf(dbf_name, "zfcp_%s_scsi", dev_name(&adapter->ccw_device->dev));
-	adapter->scsi_dbf = debug_register(dbf_name, dbfsize, 1,
-					   sizeof(struct zfcp_scsi_dbf_record));
-	if (!adapter->scsi_dbf)
-		goto failed;
-	debug_register_view(adapter->scsi_dbf, &debug_hex_ascii_view);
-	debug_register_view(adapter->scsi_dbf, &zfcp_scsi_dbf_view);
-	debug_set_level(adapter->scsi_dbf, 3);
+	dbf->scsi = zfcp_dbf_reg(dbf_name, 3, &zfcp_dbf_scsi_view,
+				 sizeof(struct zfcp_dbf_scsi_record));
+	if (!dbf->scsi)
+		goto err_out;
 
+	adapter->dbf = dbf;
 	return 0;
 
- failed:
-	zfcp_adapter_debug_unregister(adapter);
-
+err_out:
+	zfcp_dbf_adapter_unregister(dbf);
 	return -ENOMEM;
 }
 
 /**
  * zfcp_adapter_debug_unregister - unregisters debug feature for an adapter
- * @adapter: pointer to adapter for which debug features should be unregistered
+ * @dbf: pointer to dbf for which debug features should be unregistered
  */
-void zfcp_adapter_debug_unregister(struct zfcp_adapter *adapter)
+void zfcp_dbf_adapter_unregister(struct zfcp_dbf *dbf)
 {
-	debug_unregister(adapter->scsi_dbf);
-	debug_unregister(adapter->san_dbf);
-	debug_unregister(adapter->hba_dbf);
-	debug_unregister(adapter->rec_dbf);
-	adapter->scsi_dbf = NULL;
-	adapter->san_dbf = NULL;
-	adapter->hba_dbf = NULL;
-	adapter->rec_dbf = NULL;
+	debug_unregister(dbf->scsi);
+	debug_unregister(dbf->san);
+	debug_unregister(dbf->hba);
+	debug_unregister(dbf->rec);
+	dbf->adapter->dbf = NULL;
+	kfree(dbf);
 }
+
diff --git a/drivers/s390/scsi/zfcp_dbf.h b/drivers/s390/scsi/zfcp_dbf.h
index a573f73..6b1461e 100644
--- a/drivers/s390/scsi/zfcp_dbf.h
+++ b/drivers/s390/scsi/zfcp_dbf.h
@@ -2,7 +2,7 @@
  * This file is part of the zfcp device driver for
  * FCP adapters for IBM System z9 and zSeries.
  *
- * Copyright IBM Corp. 2008, 2008
+ * Copyright IBM Corp. 2008, 2009
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
@@ -22,7 +22,9 @@
 #ifndef ZFCP_DBF_H
 #define ZFCP_DBF_H
 
+#include "zfcp_ext.h"
 #include "zfcp_fsf.h"
+#include "zfcp_def.h"
 
 #define ZFCP_DBF_TAG_SIZE      4
 #define ZFCP_DBF_ID_SIZE       7
@@ -35,13 +37,13 @@
 	u8 data[];		/* dump data */
 } __attribute__ ((packed));
 
-struct zfcp_rec_dbf_record_thread {
+struct zfcp_dbf_rec_record_thread {
 	u32 total;
 	u32 ready;
 	u32 running;
 };
 
-struct zfcp_rec_dbf_record_target {
+struct zfcp_dbf_rec_record_target {
 	u64 ref;
 	u32 status;
 	u32 d_id;
@@ -50,7 +52,7 @@
 	u32 erp_count;
 };
 
-struct zfcp_rec_dbf_record_trigger {
+struct zfcp_dbf_rec_record_trigger {
 	u8 want;
 	u8 need;
 	u32 as;
@@ -62,21 +64,21 @@
 	u64 fcp_lun;
 };
 
-struct zfcp_rec_dbf_record_action {
+struct zfcp_dbf_rec_record_action {
 	u32 status;
 	u32 step;
 	u64 action;
 	u64 fsf_req;
 };
 
-struct zfcp_rec_dbf_record {
+struct zfcp_dbf_rec_record {
 	u8 id;
 	char id2[7];
 	union {
-		struct zfcp_rec_dbf_record_action action;
-		struct zfcp_rec_dbf_record_thread thread;
-		struct zfcp_rec_dbf_record_target target;
-		struct zfcp_rec_dbf_record_trigger trigger;
+		struct zfcp_dbf_rec_record_action action;
+		struct zfcp_dbf_rec_record_thread thread;
+		struct zfcp_dbf_rec_record_target target;
+		struct zfcp_dbf_rec_record_trigger trigger;
 	} u;
 };
 
@@ -87,7 +89,7 @@
 	ZFCP_REC_DBF_ID_TRIGGER,
 };
 
-struct zfcp_hba_dbf_record_response {
+struct zfcp_dbf_hba_record_response {
 	u32 fsf_command;
 	u64 fsf_reqid;
 	u32 fsf_seqno;
@@ -125,7 +127,7 @@
 	} u;
 } __attribute__ ((packed));
 
-struct zfcp_hba_dbf_record_status {
+struct zfcp_dbf_hba_record_status {
 	u8 failed;
 	u32 status_type;
 	u32 status_subtype;
@@ -139,24 +141,24 @@
 	u8 payload[ZFCP_DBF_UNSOL_PAYLOAD];
 } __attribute__ ((packed));
 
-struct zfcp_hba_dbf_record_qdio {
+struct zfcp_dbf_hba_record_qdio {
 	u32 qdio_error;
 	u8 sbal_index;
 	u8 sbal_count;
 } __attribute__ ((packed));
 
-struct zfcp_hba_dbf_record {
+struct zfcp_dbf_hba_record {
 	u8 tag[ZFCP_DBF_TAG_SIZE];
 	u8 tag2[ZFCP_DBF_TAG_SIZE];
 	union {
-		struct zfcp_hba_dbf_record_response response;
-		struct zfcp_hba_dbf_record_status status;
-		struct zfcp_hba_dbf_record_qdio qdio;
+		struct zfcp_dbf_hba_record_response response;
+		struct zfcp_dbf_hba_record_status status;
+		struct zfcp_dbf_hba_record_qdio qdio;
 		struct fsf_bit_error_payload berr;
 	} u;
 } __attribute__ ((packed));
 
-struct zfcp_san_dbf_record_ct_request {
+struct zfcp_dbf_san_record_ct_request {
 	u16 cmd_req_code;
 	u8 revision;
 	u8 gs_type;
@@ -166,7 +168,7 @@
 	u32 len;
 } __attribute__ ((packed));
 
-struct zfcp_san_dbf_record_ct_response {
+struct zfcp_dbf_san_record_ct_response {
 	u16 cmd_rsp_code;
 	u8 revision;
 	u8 reason_code;
@@ -176,27 +178,27 @@
 	u32 len;
 } __attribute__ ((packed));
 
-struct zfcp_san_dbf_record_els {
+struct zfcp_dbf_san_record_els {
 	u8 ls_code;
 	u32 len;
 } __attribute__ ((packed));
 
-struct zfcp_san_dbf_record {
+struct zfcp_dbf_san_record {
 	u8 tag[ZFCP_DBF_TAG_SIZE];
 	u64 fsf_reqid;
 	u32 fsf_seqno;
 	u32 s_id;
 	u32 d_id;
 	union {
-		struct zfcp_san_dbf_record_ct_request ct_req;
-		struct zfcp_san_dbf_record_ct_response ct_resp;
-		struct zfcp_san_dbf_record_els els;
+		struct zfcp_dbf_san_record_ct_request ct_req;
+		struct zfcp_dbf_san_record_ct_response ct_resp;
+		struct zfcp_dbf_san_record_els els;
 	} u;
 #define ZFCP_DBF_SAN_MAX_PAYLOAD 1024
 	u8 payload[32];
 } __attribute__ ((packed));
 
-struct zfcp_scsi_dbf_record {
+struct zfcp_dbf_scsi_record {
 	u8 tag[ZFCP_DBF_TAG_SIZE];
 	u8 tag2[ZFCP_DBF_TAG_SIZE];
 	u32 scsi_id;
@@ -222,4 +224,127 @@
 	u8 sns_info[ZFCP_DBF_SCSI_FCP_SNS_INFO];
 } __attribute__ ((packed));
 
+struct zfcp_dbf {
+	debug_info_t			*rec;
+	debug_info_t			*hba;
+	debug_info_t			*san;
+	debug_info_t			*scsi;
+	spinlock_t			rec_lock;
+	spinlock_t			hba_lock;
+	spinlock_t			san_lock;
+	spinlock_t			scsi_lock;
+	struct zfcp_dbf_rec_record	rec_buf;
+	struct zfcp_dbf_hba_record	hba_buf;
+	struct zfcp_dbf_san_record	san_buf;
+	struct zfcp_dbf_scsi_record	scsi_buf;
+	struct zfcp_adapter		*adapter;
+};
+
+static inline
+void zfcp_dbf_hba_fsf_resp(const char *tag2, int level,
+			   struct zfcp_fsf_req *req, struct zfcp_dbf *dbf)
+{
+	if (level <= dbf->hba->level)
+		_zfcp_dbf_hba_fsf_response(tag2, level, req, dbf);
+}
+
+/**
+ * zfcp_dbf_hba_fsf_response - trace event for request completion
+ * @fsf_req: request that has been completed
+ */
+static inline void zfcp_dbf_hba_fsf_response(struct zfcp_fsf_req *req)
+{
+	struct zfcp_dbf *dbf = req->adapter->dbf;
+	struct fsf_qtcb *qtcb = req->qtcb;
+
+	if ((qtcb->prefix.prot_status != FSF_PROT_GOOD) &&
+	    (qtcb->prefix.prot_status != FSF_PROT_FSF_STATUS_PRESENTED)) {
+		zfcp_dbf_hba_fsf_resp("perr", 1, req, dbf);
+
+	} else if (qtcb->header.fsf_status != FSF_GOOD) {
+		zfcp_dbf_hba_fsf_resp("ferr", 1, req, dbf);
+
+	} else if ((req->fsf_command == FSF_QTCB_OPEN_PORT_WITH_DID) ||
+		   (req->fsf_command == FSF_QTCB_OPEN_LUN)) {
+		zfcp_dbf_hba_fsf_resp("open", 4, req, dbf);
+
+	} else if (qtcb->header.log_length) {
+		zfcp_dbf_hba_fsf_resp("qtcb", 5, req, dbf);
+
+	} else {
+		zfcp_dbf_hba_fsf_resp("norm", 6, req, dbf);
+	}
+ }
+
+/**
+ * zfcp_dbf_hba_fsf_unsol - trace event for an unsolicited status buffer
+ * @tag: tag indicating which kind of unsolicited status has been received
+ * @dbf: reference to dbf structure
+ * @status_buffer: buffer containing payload of unsolicited status
+ */
+static inline
+void zfcp_dbf_hba_fsf_unsol(const char *tag, struct zfcp_dbf *dbf,
+			    struct fsf_status_read_buffer *buf)
+{
+	int level = 2;
+
+	if (level <= dbf->hba->level)
+		_zfcp_dbf_hba_fsf_unsol(tag, level, dbf, buf);
+}
+
+static inline
+void zfcp_dbf_scsi(const char *tag, const char *tag2, int level,
+		   struct zfcp_dbf *dbf, struct scsi_cmnd *scmd,
+		   struct zfcp_fsf_req *req, unsigned long old_id)
+{
+	if (level <= dbf->scsi->level)
+		_zfcp_dbf_scsi(tag, tag2, level, dbf, scmd, req, old_id);
+}
+
+/**
+ * zfcp_dbf_scsi_result - trace event for SCSI command completion
+ * @tag: tag indicating success or failure of SCSI command
+ * @level: trace level applicable for this event
+ * @adapter: adapter that has been used to issue the SCSI command
+ * @scmd: SCSI command pointer
+ * @fsf_req: request used to issue SCSI command (might be NULL)
+ */
+static inline
+void zfcp_dbf_scsi_result(const char *tag, int level, struct zfcp_dbf *dbf,
+			  struct scsi_cmnd *scmd, struct zfcp_fsf_req *fsf_req)
+{
+	zfcp_dbf_scsi("rslt", tag, level, dbf, scmd, fsf_req, 0);
+}
+
+/**
+ * zfcp_dbf_scsi_abort - trace event for SCSI command abort
+ * @tag: tag indicating success or failure of abort operation
+ * @adapter: adapter thas has been used to issue SCSI command to be aborted
+ * @scmd: SCSI command to be aborted
+ * @new_req: request containing abort (might be NULL)
+ * @old_id: identifier of request containg SCSI command to be aborted
+ */
+static inline
+void zfcp_dbf_scsi_abort(const char *tag, struct zfcp_dbf *dbf,
+			 struct scsi_cmnd *scmd, struct zfcp_fsf_req *new_req,
+			 unsigned long old_id)
+{
+	zfcp_dbf_scsi("abrt", tag, 1, dbf, scmd, new_req, old_id);
+}
+
+/**
+ * zfcp_dbf_scsi_devreset - trace event for Logical Unit or Target Reset
+ * @tag: tag indicating success or failure of reset operation
+ * @flag: indicates type of reset (Target Reset, Logical Unit Reset)
+ * @unit: unit that needs reset
+ * @scsi_cmnd: SCSI command which caused this error recovery
+ */
+static inline
+void zfcp_dbf_scsi_devreset(const char *tag, u8 flag, struct zfcp_unit *unit,
+			    struct scsi_cmnd *scsi_cmnd)
+{
+	zfcp_dbf_scsi(flag == FCP_TARGET_RESET ? "trst" : "lrst", tag, 1,
+			    unit->port->adapter->dbf, scsi_cmnd, NULL, 0);
+}
+
 #endif /* ZFCP_DBF_H */
diff --git a/drivers/s390/scsi/zfcp_def.h b/drivers/s390/scsi/zfcp_def.h
index 49d0532..7da2fad 100644
--- a/drivers/s390/scsi/zfcp_def.h
+++ b/drivers/s390/scsi/zfcp_def.h
@@ -37,10 +37,8 @@
 #include <asm/debug.h>
 #include <asm/ebcdic.h>
 #include <asm/sysinfo.h>
-#include "zfcp_dbf.h"
 #include "zfcp_fsf.h"
 
-
 /********************* GENERAL DEFINES *********************************/
 
 #define REQUEST_LIST_SIZE 128
@@ -75,9 +73,6 @@
 
 /*************** FIBRE CHANNEL PROTOCOL SPECIFIC DEFINES ********************/
 
-/* timeout for name-server lookup (in seconds) */
-#define ZFCP_NS_GID_PN_TIMEOUT		10
-
 /* task attribute values in FCP-2 FCP_CMND IU */
 #define SIMPLE_Q	0
 #define HEAD_OF_Q	1
@@ -224,8 +219,6 @@
 #define ZFCP_STATUS_ADAPTER_QDIOUP		0x00000002
 #define ZFCP_STATUS_ADAPTER_XCONFIG_OK		0x00000008
 #define ZFCP_STATUS_ADAPTER_HOST_CON_INIT	0x00000010
-#define ZFCP_STATUS_ADAPTER_ERP_THREAD_UP	0x00000020
-#define ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL	0x00000080
 #define ZFCP_STATUS_ADAPTER_ERP_PENDING		0x00000100
 #define ZFCP_STATUS_ADAPTER_LINK_UNPLUGGED	0x00000200
 
@@ -234,6 +227,7 @@
 
 /* remote port status */
 #define ZFCP_STATUS_PORT_PHYS_OPEN		0x00000001
+#define ZFCP_STATUS_PORT_LINK_TEST		0x00000002
 
 /* well known address (WKA) port status*/
 enum zfcp_wka_status {
@@ -249,7 +243,6 @@
 
 /* FSF request status (this does not have a common part) */
 #define ZFCP_STATUS_FSFREQ_TASK_MANAGEMENT	0x00000002
-#define ZFCP_STATUS_FSFREQ_COMPLETED		0x00000004
 #define ZFCP_STATUS_FSFREQ_ERROR		0x00000008
 #define ZFCP_STATUS_FSFREQ_CLEANUP		0x00000010
 #define ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED	0x00000040
@@ -266,12 +259,14 @@
 
 /* holds various memory pools of an adapter */
 struct zfcp_adapter_mempool {
-	mempool_t *fsf_req_erp;
-	mempool_t *fsf_req_scsi;
-	mempool_t *fsf_req_abort;
-	mempool_t *fsf_req_status_read;
-	mempool_t *data_status_read;
-	mempool_t *data_gid_pn;
+	mempool_t *erp_req;
+	mempool_t *gid_pn_req;
+	mempool_t *scsi_req;
+	mempool_t *scsi_abort;
+	mempool_t *status_read_req;
+	mempool_t *status_read_data;
+	mempool_t *gid_pn_data;
+	mempool_t *qtcb_pool;
 };
 
 /*
@@ -305,6 +300,15 @@
 	u32 d_id;
 } __attribute__ ((packed));
 
+struct ct_iu_gpn_ft_req {
+	struct ct_hdr header;
+	u8 flags;
+	u8 domain_id_scope;
+	u8 area_id_scope;
+	u8 fc4_type;
+} __attribute__ ((packed));
+
+
 /**
  * struct zfcp_send_ct - used to pass parameters to function zfcp_fsf_send_ct
  * @wka_port: port where the request is sent to
@@ -312,7 +316,6 @@
  * @resp: scatter-gather list for response
  * @handler: handler function (called for response to the request)
  * @handler_data: data passed to handler function
- * @timeout: FSF timeout for this request
  * @completion: completion for synchronization purposes
  * @status: used to pass error status to calling function
  */
@@ -322,7 +325,6 @@
 	struct scatterlist *resp;
 	void (*handler)(unsigned long);
 	unsigned long handler_data;
-	int timeout;
 	struct completion *completion;
 	int status;
 };
@@ -420,6 +422,29 @@
 	spinlock_t lock;
 };
 
+/** struct zfcp_qdio - basic QDIO data structure
+ * @resp_q: response queue
+ * @req_q: request queue
+ * @stat_lock: lock to protect req_q_util and req_q_time
+ * @req_q_lock; lock to serialize access to request queue
+ * @req_q_time: time of last fill level change
+ * @req_q_util: used for accounting
+ * @req_q_full: queue full incidents
+ * @req_q_wq: used to wait for SBAL availability
+ * @adapter: adapter used in conjunction with this QDIO structure
+ */
+struct zfcp_qdio {
+	struct zfcp_qdio_queue	resp_q;
+	struct zfcp_qdio_queue	req_q;
+	spinlock_t		stat_lock;
+	spinlock_t		req_q_lock;
+	unsigned long long	req_q_time;
+	u64			req_q_util;
+	atomic_t		req_q_full;
+	wait_queue_head_t	req_q_wq;
+	struct zfcp_adapter	*adapter;
+};
+
 struct zfcp_adapter {
 	atomic_t                refcount;          /* reference count */
 	wait_queue_head_t	remove_wq;         /* can be used to wait for
@@ -428,6 +453,7 @@
 	u64			peer_wwpn;	   /* P2P peer WWPN */
 	u32			peer_d_id;	   /* P2P peer D_ID */
 	struct ccw_device       *ccw_device;	   /* S/390 ccw device */
+	struct zfcp_qdio	*qdio;
 	u32			hydra_version;	   /* Hydra version */
 	u32			fsf_lic_version;
 	u32			adapter_features;  /* FCP channel features */
@@ -439,15 +465,7 @@
 	unsigned long		req_no;		   /* unique FSF req number */
 	struct list_head	*req_list;	   /* list of pending reqs */
 	spinlock_t		req_list_lock;	   /* request list lock */
-	struct zfcp_qdio_queue	req_q;		   /* request queue */
-	spinlock_t		req_q_lock;	   /* for operations on queue */
-	ktime_t			req_q_time; /* time of last fill level change */
-	u64			req_q_util; /* for accounting */
-	spinlock_t		qdio_stat_lock;
 	u32			fsf_req_seq_no;	   /* FSF cmnd seq number */
-	wait_queue_head_t	request_wq;	   /* can be used to wait for
-						      more avaliable SBALs */
-	struct zfcp_qdio_queue	resp_q;	   /* response queue */
 	rwlock_t		abort_lock;        /* Protects against SCSI
 						      stack abort/command
 						      completion races */
@@ -456,10 +474,9 @@
 	atomic_t		status;	           /* status of this adapter */
 	struct list_head	erp_ready_head;	   /* error recovery for this
 						      adapter/devices */
+	wait_queue_head_t	erp_ready_wq;
 	struct list_head	erp_running_head;
 	rwlock_t		erp_lock;
-	struct semaphore	erp_ready_sem;
-	wait_queue_head_t	erp_thread_wqh;
 	wait_queue_head_t	erp_done_wqh;
 	struct zfcp_erp_action	erp_action;	   /* pending error recovery */
         atomic_t                erp_counter;
@@ -467,27 +484,16 @@
 						      actions */
 	u32			erp_low_mem_count; /* nr of erp actions waiting
 						      for memory */
+	struct task_struct	*erp_thread;
 	struct zfcp_wka_ports	*gs;		   /* generic services */
-	debug_info_t		*rec_dbf;
-	debug_info_t		*hba_dbf;
-	debug_info_t		*san_dbf;          /* debug feature areas */
-	debug_info_t		*scsi_dbf;
-	spinlock_t		rec_dbf_lock;
-	spinlock_t		hba_dbf_lock;
-	spinlock_t		san_dbf_lock;
-	spinlock_t		scsi_dbf_lock;
-	struct zfcp_rec_dbf_record	rec_dbf_buf;
-	struct zfcp_hba_dbf_record	hba_dbf_buf;
-	struct zfcp_san_dbf_record	san_dbf_buf;
-	struct zfcp_scsi_dbf_record	scsi_dbf_buf;
+	struct zfcp_dbf		*dbf;		   /* debug traces */
 	struct zfcp_adapter_mempool	pool;      /* Adapter memory pools */
-	struct qdio_initialize  qdio_init_data;    /* for qdio_establish */
 	struct fc_host_statistics *fc_stats;
 	struct fsf_qtcb_bottom_port *stats_reset_data;
 	unsigned long		stats_reset;
 	struct work_struct	scan_work;
 	struct service_level	service_level;
-	atomic_t		qdio_outb_full;	   /* queue full incidents */
+	struct workqueue_struct	*work_queue;
 };
 
 struct zfcp_port {
@@ -531,36 +537,64 @@
 	struct work_struct	scsi_work;
 };
 
-/* FSF request */
+/**
+ * struct zfcp_queue_req - queue related values for a request
+ * @sbal_number: number of free SBALs
+ * @sbal_first: first SBAL for this request
+ * @sbal_last: last SBAL for this request
+ * @sbal_limit: last possible SBAL for this request
+ * @sbale_curr: current SBALE at creation of this request
+ * @sbal_response: SBAL used in interrupt
+ * @qdio_outb_usage: usage of outbound queue
+ * @qdio_inb_usage: usage of inbound queue
+ */
+struct zfcp_queue_req {
+	u8		       sbal_number;
+	u8		       sbal_first;
+	u8		       sbal_last;
+	u8		       sbal_limit;
+	u8		       sbale_curr;
+	u8		       sbal_response;
+	u16		       qdio_outb_usage;
+	u16		       qdio_inb_usage;
+};
+
+/**
+ * struct zfcp_fsf_req - basic FSF request structure
+ * @list: list of FSF requests
+ * @req_id: unique request ID
+ * @adapter: adapter this request belongs to
+ * @queue_req: queue related values
+ * @completion: used to signal the completion of the request
+ * @status: status of the request
+ * @fsf_command: FSF command issued
+ * @qtcb: associated QTCB
+ * @seq_no: sequence number of this request
+ * @data: private data
+ * @timer: timer data of this request
+ * @erp_action: reference to erp action if request issued on behalf of ERP
+ * @pool: reference to memory pool if used for this request
+ * @issued: time when request was send (STCK)
+ * @unit: reference to unit if this request is a SCSI request
+ * @handler: handler which should be called to process response
+ */
 struct zfcp_fsf_req {
-	struct list_head       list;	       /* list of FSF requests */
-	unsigned long	       req_id;	       /* unique request ID */
-	struct zfcp_adapter    *adapter;       /* adapter request belongs to */
-	u8		       sbal_number;    /* nr of SBALs free for use */
-	u8		       sbal_first;     /* first SBAL for this request */
-	u8		       sbal_last;      /* last SBAL for this request */
-	u8		       sbal_limit;      /* last possible SBAL for
-						  this reuest */
-	u8		       sbale_curr;     /* current SBALE during creation
-						  of request */
-	u8			sbal_response;	/* SBAL used in interrupt */
-	wait_queue_head_t      completion_wq;  /* can be used by a routine
-						  to wait for completion */
-	u32			status;	       /* status of this request */
-	u32		       fsf_command;    /* FSF Command copy */
-	struct fsf_qtcb	       *qtcb;	       /* address of associated QTCB */
-	u32		       seq_no;         /* Sequence number of request */
-	void			*data;           /* private data of request */
-	struct timer_list     timer;	       /* used for erp or scsi er */
-	struct zfcp_erp_action *erp_action;    /* used if this request is
-						  issued on behalf of erp */
-	mempool_t	       *pool;	       /* used if request was alloacted
-						  from emergency pool */
-	unsigned long long     issued;         /* request sent time (STCK) */
-	struct zfcp_unit       *unit;
+	struct list_head	list;
+	unsigned long		req_id;
+	struct zfcp_adapter	*adapter;
+	struct zfcp_queue_req	queue_req;
+	struct completion	completion;
+	u32			status;
+	u32			fsf_command;
+	struct fsf_qtcb		*qtcb;
+	u32			seq_no;
+	void			*data;
+	struct timer_list	timer;
+	struct zfcp_erp_action	*erp_action;
+	mempool_t		*pool;
+	unsigned long long	issued;
+	struct zfcp_unit	*unit;
 	void			(*handler)(struct zfcp_fsf_req *);
-	u16			qdio_outb_usage;/* usage of outbound queue */
-	u16			qdio_inb_usage;	/* usage of inbound queue */
 };
 
 /* driver data */
@@ -570,18 +604,11 @@
 	rwlock_t                config_lock;        /* serialises changes
 						       to adapter/port/unit
 						       lists */
-	struct semaphore        config_sema;        /* serialises configuration
-						       changes */
-	struct kmem_cache	*fsf_req_qtcb_cache;
+	struct mutex		config_mutex;
+	struct kmem_cache	*gpn_ft_cache;
+	struct kmem_cache	*qtcb_cache;
 	struct kmem_cache	*sr_buffer_cache;
 	struct kmem_cache	*gid_pn_cache;
-	struct workqueue_struct	*work_queue;
-};
-
-/* struct used by memory pools for fsf_requests */
-struct zfcp_fsf_req_qtcb {
-	struct zfcp_fsf_req fsf_req;
-	struct fsf_qtcb qtcb;
 };
 
 /********************** ZFCP SPECIFIC DEFINES ********************************/
diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c
index c75d6f3..73d366b 100644
--- a/drivers/s390/scsi/zfcp_erp.c
+++ b/drivers/s390/scsi/zfcp_erp.c
@@ -9,6 +9,7 @@
 #define KMSG_COMPONENT "zfcp"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
+#include <linux/kthread.h>
 #include "zfcp_ext.h"
 
 #define ZFCP_MAX_ERPS                   3
@@ -26,7 +27,6 @@
 	ZFCP_ERP_STEP_FSF_XCONFIG	= 0x0001,
 	ZFCP_ERP_STEP_PHYS_PORT_CLOSING	= 0x0010,
 	ZFCP_ERP_STEP_PORT_CLOSING	= 0x0100,
-	ZFCP_ERP_STEP_NAMESERVER_LOOKUP	= 0x0400,
 	ZFCP_ERP_STEP_PORT_OPENING	= 0x0800,
 	ZFCP_ERP_STEP_UNIT_CLOSING	= 0x1000,
 	ZFCP_ERP_STEP_UNIT_OPENING	= 0x2000,
@@ -75,9 +75,9 @@
 	struct zfcp_adapter *adapter = act->adapter;
 
 	list_move(&act->list, &act->adapter->erp_ready_head);
-	zfcp_rec_dbf_event_action("erardy1", act);
-	up(&adapter->erp_ready_sem);
-	zfcp_rec_dbf_event_thread("erardy2", adapter);
+	zfcp_dbf_rec_action("erardy1", act);
+	wake_up(&adapter->erp_ready_wq);
+	zfcp_dbf_rec_thread("erardy2", adapter->dbf);
 }
 
 static void zfcp_erp_action_dismiss(struct zfcp_erp_action *act)
@@ -150,6 +150,9 @@
 		a_status = atomic_read(&adapter->status);
 		if (a_status & ZFCP_STATUS_COMMON_ERP_INUSE)
 			return 0;
+		if (!(a_status & ZFCP_STATUS_COMMON_RUNNING) &&
+		    !(a_status & ZFCP_STATUS_COMMON_OPEN))
+			return 0; /* shutdown requested for closed adapter */
 	}
 
 	return need;
@@ -213,8 +216,7 @@
 	int retval = 1, need;
 	struct zfcp_erp_action *act = NULL;
 
-	if (!(atomic_read(&adapter->status) &
-	      ZFCP_STATUS_ADAPTER_ERP_THREAD_UP))
+	if (!adapter->erp_thread)
 		return -EIO;
 
 	need = zfcp_erp_required_act(want, adapter, port, unit);
@@ -227,12 +229,11 @@
 		goto out;
 	++adapter->erp_total_count;
 	list_add_tail(&act->list, &adapter->erp_ready_head);
-	up(&adapter->erp_ready_sem);
-	zfcp_rec_dbf_event_thread("eracte1", adapter);
+	wake_up(&adapter->erp_ready_wq);
+	zfcp_dbf_rec_thread("eracte1", adapter->dbf);
 	retval = 0;
  out:
-	zfcp_rec_dbf_event_trigger(id, ref, want, need, act,
-				   adapter, port, unit);
+	zfcp_dbf_rec_trigger(id, ref, want, need, act, adapter, port, unit);
 	return retval;
 }
 
@@ -443,28 +444,28 @@
 static void zfcp_erp_adapter_unblock(struct zfcp_adapter *adapter)
 {
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status))
-		zfcp_rec_dbf_event_adapter("eraubl1", NULL, adapter);
+		zfcp_dbf_rec_adapter("eraubl1", NULL, adapter->dbf);
 	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &adapter->status);
 }
 
 static void zfcp_erp_port_unblock(struct zfcp_port *port)
 {
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status))
-		zfcp_rec_dbf_event_port("erpubl1", NULL, port);
+		zfcp_dbf_rec_port("erpubl1", NULL, port);
 	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &port->status);
 }
 
 static void zfcp_erp_unit_unblock(struct zfcp_unit *unit)
 {
 	if (status_change_set(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status))
-		zfcp_rec_dbf_event_unit("eruubl1", NULL, unit);
+		zfcp_dbf_rec_unit("eruubl1", NULL, unit);
 	atomic_set_mask(ZFCP_STATUS_COMMON_UNBLOCKED, &unit->status);
 }
 
 static void zfcp_erp_action_to_running(struct zfcp_erp_action *erp_action)
 {
 	list_move(&erp_action->list, &erp_action->adapter->erp_running_head);
-	zfcp_rec_dbf_event_action("erator1", erp_action);
+	zfcp_dbf_rec_action("erator1", erp_action);
 }
 
 static void zfcp_erp_strategy_check_fsfreq(struct zfcp_erp_action *act)
@@ -480,13 +481,12 @@
 		if (act->status & (ZFCP_STATUS_ERP_DISMISSED |
 				   ZFCP_STATUS_ERP_TIMEDOUT)) {
 			act->fsf_req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
-			zfcp_rec_dbf_event_action("erscf_1", act);
+			zfcp_dbf_rec_action("erscf_1", act);
 			act->fsf_req->erp_action = NULL;
 		}
 		if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
-			zfcp_rec_dbf_event_action("erscf_2", act);
-		if (act->fsf_req->status & (ZFCP_STATUS_FSFREQ_COMPLETED |
-					    ZFCP_STATUS_FSFREQ_DISMISSED))
+			zfcp_dbf_rec_action("erscf_2", act);
+		if (act->fsf_req->status & ZFCP_STATUS_FSFREQ_DISMISSED)
 			act->fsf_req = NULL;
 	} else
 		act->fsf_req = NULL;
@@ -604,9 +604,11 @@
 
 static int zfcp_erp_adapter_strategy_open_qdio(struct zfcp_erp_action *act)
 {
-	if (zfcp_qdio_open(act->adapter))
+	struct zfcp_qdio *qdio = act->adapter->qdio;
+
+	if (zfcp_qdio_open(qdio))
 		return ZFCP_ERP_FAILED;
-	init_waitqueue_head(&act->adapter->request_wq);
+	init_waitqueue_head(&qdio->req_q_wq);
 	atomic_set_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &act->adapter->status);
 	return ZFCP_ERP_SUCCEEDED;
 }
@@ -641,9 +643,10 @@
 			return ZFCP_ERP_FAILED;
 		}
 
-		zfcp_rec_dbf_event_thread_lock("erasfx1", adapter);
-		down(&adapter->erp_ready_sem);
-		zfcp_rec_dbf_event_thread_lock("erasfx2", adapter);
+		zfcp_dbf_rec_thread_lock("erasfx1", adapter->dbf);
+		wait_event(adapter->erp_ready_wq,
+			   !list_empty(&adapter->erp_ready_head));
+		zfcp_dbf_rec_thread_lock("erasfx2", adapter->dbf);
 		if (erp_action->status & ZFCP_STATUS_ERP_TIMEDOUT)
 			break;
 
@@ -682,9 +685,10 @@
 	if (ret)
 		return ZFCP_ERP_FAILED;
 
-	zfcp_rec_dbf_event_thread_lock("erasox1", adapter);
-	down(&adapter->erp_ready_sem);
-	zfcp_rec_dbf_event_thread_lock("erasox2", adapter);
+	zfcp_dbf_rec_thread_lock("erasox1", adapter->dbf);
+	wait_event(adapter->erp_ready_wq,
+		   !list_empty(&adapter->erp_ready_head));
+	zfcp_dbf_rec_thread_lock("erasox2", adapter->dbf);
 	if (act->status & ZFCP_STATUS_ERP_TIMEDOUT)
 		return ZFCP_ERP_FAILED;
 
@@ -711,10 +715,10 @@
 	struct zfcp_adapter *adapter = act->adapter;
 
 	/* close queues to ensure that buffers are not accessed by adapter */
-	zfcp_qdio_close(adapter);
+	zfcp_qdio_close(adapter->qdio);
 	zfcp_fsf_req_dismiss_all(adapter);
 	adapter->fsf_req_seq_no = 0;
-	zfcp_fc_wka_port_force_offline(&adapter->gs->ds);
+	zfcp_fc_wka_ports_force_offline(adapter->gs);
 	/* all ports and units are closed */
 	zfcp_erp_modify_adapter_status(adapter, "erascl1", NULL,
 				       ZFCP_STATUS_COMMON_OPEN, ZFCP_CLEAR);
@@ -841,27 +845,6 @@
 	return zfcp_erp_port_strategy_open_port(act);
 }
 
-void zfcp_erp_port_strategy_open_lookup(struct work_struct *work)
-{
-	int retval;
-	struct zfcp_port *port = container_of(work, struct zfcp_port,
-					      gid_pn_work);
-
-	retval = zfcp_fc_ns_gid_pn(&port->erp_action);
-	if (!retval) {
-		port->erp_action.step = ZFCP_ERP_STEP_NAMESERVER_LOOKUP;
-		goto out;
-	}
-	if (retval == -ENOMEM) {
-		zfcp_erp_notify(&port->erp_action, ZFCP_STATUS_ERP_LOWMEM);
-		goto out;
-	}
-	/* all other error condtions */
-	zfcp_erp_notify(&port->erp_action, 0);
-out:
-	zfcp_port_put(port);
-}
-
 static int zfcp_erp_port_strategy_open_common(struct zfcp_erp_action *act)
 {
 	struct zfcp_adapter *adapter = act->adapter;
@@ -876,15 +859,11 @@
 			return zfcp_erp_open_ptp_port(act);
 		if (!port->d_id) {
 			zfcp_port_get(port);
-			if (!queue_work(zfcp_data.work_queue,
+			if (!queue_work(adapter->work_queue,
 					&port->gid_pn_work))
 				zfcp_port_put(port);
-			return ZFCP_ERP_CONTINUES;
+			return ZFCP_ERP_EXIT;
 		}
-		/* fall through */
-	case ZFCP_ERP_STEP_NAMESERVER_LOOKUP:
-		if (!port->d_id)
-			return ZFCP_ERP_FAILED;
 		return zfcp_erp_port_strategy_open_port(act);
 
 	case ZFCP_ERP_STEP_PORT_OPENING:
@@ -1163,7 +1142,7 @@
 	}
 
 	list_del(&erp_action->list);
-	zfcp_rec_dbf_event_action("eractd1", erp_action);
+	zfcp_dbf_rec_action("eractd1", erp_action);
 
 	switch (erp_action->action) {
 	case ZFCP_ERP_ACTION_REOPEN_UNIT:
@@ -1311,20 +1290,16 @@
 	struct list_head *next;
 	struct zfcp_erp_action *act;
 	unsigned long flags;
-	int ignore;
 
-	daemonize("zfcperp%s", dev_name(&adapter->ccw_device->dev));
-	/* Block all signals */
-	siginitsetinv(&current->blocked, 0);
-	atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
-	wake_up(&adapter->erp_thread_wqh);
+	for (;;) {
+		zfcp_dbf_rec_thread_lock("erthrd1", adapter->dbf);
+		wait_event_interruptible(adapter->erp_ready_wq,
+			   !list_empty(&adapter->erp_ready_head) ||
+			   kthread_should_stop());
+		zfcp_dbf_rec_thread_lock("erthrd2", adapter->dbf);
 
-	while (!(atomic_read(&adapter->status) &
-		 ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL)) {
-
-		zfcp_rec_dbf_event_thread_lock("erthrd1", adapter);
-		ignore = down_interruptible(&adapter->erp_ready_sem);
-		zfcp_rec_dbf_event_thread_lock("erthrd2", adapter);
+		if (kthread_should_stop())
+			break;
 
 		write_lock_irqsave(&adapter->erp_lock, flags);
 		next = adapter->erp_ready_head.next;
@@ -1339,9 +1314,6 @@
 		}
 	}
 
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
-	wake_up(&adapter->erp_thread_wqh);
-
 	return 0;
 }
 
@@ -1353,18 +1325,17 @@
  */
 int zfcp_erp_thread_setup(struct zfcp_adapter *adapter)
 {
-	int retval;
+	struct task_struct *thread;
 
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_UP, &adapter->status);
-	retval = kernel_thread(zfcp_erp_thread, adapter, SIGCHLD);
-	if (retval < 0) {
+	thread = kthread_run(zfcp_erp_thread, adapter, "zfcperp%s",
+			     dev_name(&adapter->ccw_device->dev));
+	if (IS_ERR(thread)) {
 		dev_err(&adapter->ccw_device->dev,
 			"Creating an ERP thread for the FCP device failed.\n");
-		return retval;
+		return PTR_ERR(thread);
 	}
-	wait_event(adapter->erp_thread_wqh,
-		   atomic_read(&adapter->status) &
-			ZFCP_STATUS_ADAPTER_ERP_THREAD_UP);
+
+	adapter->erp_thread = thread;
 	return 0;
 }
 
@@ -1379,16 +1350,10 @@
  */
 void zfcp_erp_thread_kill(struct zfcp_adapter *adapter)
 {
-	atomic_set_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL, &adapter->status);
-	up(&adapter->erp_ready_sem);
-	zfcp_rec_dbf_event_thread_lock("erthrk1", adapter);
-
-	wait_event(adapter->erp_thread_wqh,
-		   !(atomic_read(&adapter->status) &
-				ZFCP_STATUS_ADAPTER_ERP_THREAD_UP));
-
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_ERP_THREAD_KILL,
-			  &adapter->status);
+	kthread_stop(adapter->erp_thread);
+	adapter->erp_thread = NULL;
+	WARN_ON(!list_empty(&adapter->erp_ready_head));
+	WARN_ON(!list_empty(&adapter->erp_running_head));
 }
 
 /**
@@ -1456,11 +1421,11 @@
 
 	if (set_or_clear == ZFCP_SET) {
 		if (status_change_set(mask, &adapter->status))
-			zfcp_rec_dbf_event_adapter(id, ref, adapter);
+			zfcp_dbf_rec_adapter(id, ref, adapter->dbf);
 		atomic_set_mask(mask, &adapter->status);
 	} else {
 		if (status_change_clear(mask, &adapter->status))
-			zfcp_rec_dbf_event_adapter(id, ref, adapter);
+			zfcp_dbf_rec_adapter(id, ref, adapter->dbf);
 		atomic_clear_mask(mask, &adapter->status);
 		if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
 			atomic_set(&adapter->erp_counter, 0);
@@ -1490,11 +1455,11 @@
 
 	if (set_or_clear == ZFCP_SET) {
 		if (status_change_set(mask, &port->status))
-			zfcp_rec_dbf_event_port(id, ref, port);
+			zfcp_dbf_rec_port(id, ref, port);
 		atomic_set_mask(mask, &port->status);
 	} else {
 		if (status_change_clear(mask, &port->status))
-			zfcp_rec_dbf_event_port(id, ref, port);
+			zfcp_dbf_rec_port(id, ref, port);
 		atomic_clear_mask(mask, &port->status);
 		if (mask & ZFCP_STATUS_COMMON_ERP_FAILED)
 			atomic_set(&port->erp_counter, 0);
@@ -1519,11 +1484,11 @@
 {
 	if (set_or_clear == ZFCP_SET) {
 		if (status_change_set(mask, &unit->status))
-			zfcp_rec_dbf_event_unit(id, ref, unit);
+			zfcp_dbf_rec_unit(id, ref, unit);
 		atomic_set_mask(mask, &unit->status);
 	} else {
 		if (status_change_clear(mask, &unit->status))
-			zfcp_rec_dbf_event_unit(id, ref, unit);
+			zfcp_dbf_rec_unit(id, ref, unit);
 		atomic_clear_mask(mask, &unit->status);
 		if (mask & ZFCP_STATUS_COMMON_ERP_FAILED) {
 			atomic_set(&unit->erp_counter, 0);
diff --git a/drivers/s390/scsi/zfcp_ext.h b/drivers/s390/scsi/zfcp_ext.h
index 3044c60..36935bc 100644
--- a/drivers/s390/scsi/zfcp_ext.h
+++ b/drivers/s390/scsi/zfcp_ext.h
@@ -34,37 +34,31 @@
 extern struct miscdevice zfcp_cfdc_misc;
 
 /* zfcp_dbf.c */
-extern int zfcp_adapter_debug_register(struct zfcp_adapter *);
-extern void zfcp_adapter_debug_unregister(struct zfcp_adapter *);
-extern void zfcp_rec_dbf_event_thread(char *, struct zfcp_adapter *);
-extern void zfcp_rec_dbf_event_thread_lock(char *, struct zfcp_adapter *);
-extern void zfcp_rec_dbf_event_adapter(char *, void *, struct zfcp_adapter *);
-extern void zfcp_rec_dbf_event_port(char *, void *, struct zfcp_port *);
-extern void zfcp_rec_dbf_event_unit(char *, void *, struct zfcp_unit *);
-extern void zfcp_rec_dbf_event_trigger(char *, void *, u8, u8, void *,
-				       struct zfcp_adapter *,
-				       struct zfcp_port *, struct zfcp_unit *);
-extern void zfcp_rec_dbf_event_action(char *, struct zfcp_erp_action *);
-extern void zfcp_hba_dbf_event_fsf_response(struct zfcp_fsf_req *);
-extern void zfcp_hba_dbf_event_fsf_unsol(const char *, struct zfcp_adapter *,
-					 struct fsf_status_read_buffer *);
-extern void zfcp_hba_dbf_event_qdio(struct zfcp_adapter *, unsigned int, int,
-				    int);
-extern void zfcp_hba_dbf_event_berr(struct zfcp_adapter *,
-				    struct zfcp_fsf_req *);
-extern void zfcp_san_dbf_event_ct_request(struct zfcp_fsf_req *);
-extern void zfcp_san_dbf_event_ct_response(struct zfcp_fsf_req *);
-extern void zfcp_san_dbf_event_els_request(struct zfcp_fsf_req *);
-extern void zfcp_san_dbf_event_els_response(struct zfcp_fsf_req *);
-extern void zfcp_san_dbf_event_incoming_els(struct zfcp_fsf_req *);
-extern void zfcp_scsi_dbf_event_result(const char *, int, struct zfcp_adapter *,
-				       struct scsi_cmnd *,
-				       struct zfcp_fsf_req *);
-extern void zfcp_scsi_dbf_event_abort(const char *, struct zfcp_adapter *,
-				      struct scsi_cmnd *, struct zfcp_fsf_req *,
-				      unsigned long);
-extern void zfcp_scsi_dbf_event_devreset(const char *, u8, struct zfcp_unit *,
-					 struct scsi_cmnd *);
+extern int zfcp_dbf_adapter_register(struct zfcp_adapter *);
+extern void zfcp_dbf_adapter_unregister(struct zfcp_dbf *);
+extern void zfcp_dbf_rec_thread(char *, struct zfcp_dbf *);
+extern void zfcp_dbf_rec_thread_lock(char *, struct zfcp_dbf *);
+extern void zfcp_dbf_rec_adapter(char *, void *, struct zfcp_dbf *);
+extern void zfcp_dbf_rec_port(char *, void *, struct zfcp_port *);
+extern void zfcp_dbf_rec_unit(char *, void *, struct zfcp_unit *);
+extern void zfcp_dbf_rec_trigger(char *, void *, u8, u8, void *,
+				 struct zfcp_adapter *, struct zfcp_port *,
+				 struct zfcp_unit *);
+extern void zfcp_dbf_rec_action(char *, struct zfcp_erp_action *);
+extern void _zfcp_dbf_hba_fsf_response(const char *, int, struct zfcp_fsf_req *,
+				       struct zfcp_dbf *);
+extern void _zfcp_dbf_hba_fsf_unsol(const char *, int level, struct zfcp_dbf *,
+					  struct fsf_status_read_buffer *);
+extern void zfcp_dbf_hba_qdio(struct zfcp_dbf *, unsigned int, int, int);
+extern void zfcp_dbf_hba_berr(struct zfcp_dbf *, struct zfcp_fsf_req *);
+extern void zfcp_dbf_san_ct_request(struct zfcp_fsf_req *);
+extern void zfcp_dbf_san_ct_response(struct zfcp_fsf_req *);
+extern void zfcp_dbf_san_els_request(struct zfcp_fsf_req *);
+extern void zfcp_dbf_san_els_response(struct zfcp_fsf_req *);
+extern void zfcp_dbf_san_incoming_els(struct zfcp_fsf_req *);
+extern void _zfcp_dbf_scsi(const char *, const char *, int, struct zfcp_dbf *,
+			   struct scsi_cmnd *, struct zfcp_fsf_req *,
+			   unsigned long);
 
 /* zfcp_erp.c */
 extern void zfcp_erp_modify_adapter_status(struct zfcp_adapter *, char *,
@@ -96,22 +90,20 @@
 extern void zfcp_erp_adapter_access_changed(struct zfcp_adapter *, char *,
 					    void *);
 extern void zfcp_erp_timeout_handler(unsigned long);
-extern void zfcp_erp_port_strategy_open_lookup(struct work_struct *);
 
 /* zfcp_fc.c */
-extern int zfcp_scan_ports(struct zfcp_adapter *);
-extern void _zfcp_scan_ports_later(struct work_struct *);
+extern int zfcp_fc_scan_ports(struct zfcp_adapter *);
+extern void _zfcp_fc_scan_ports_later(struct work_struct *);
 extern void zfcp_fc_incoming_els(struct zfcp_fsf_req *);
-extern int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *);
+extern void zfcp_fc_port_did_lookup(struct work_struct *);
 extern void zfcp_fc_plogi_evaluate(struct zfcp_port *, struct fsf_plogi *);
-extern void zfcp_test_link(struct zfcp_port *);
+extern void zfcp_fc_test_link(struct zfcp_port *);
 extern void zfcp_fc_link_test_work(struct work_struct *);
-extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *);
-extern void zfcp_fc_wka_ports_init(struct zfcp_adapter *);
+extern void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *);
+extern int zfcp_fc_gs_setup(struct zfcp_adapter *);
+extern void zfcp_fc_gs_destroy(struct zfcp_adapter *);
 extern int zfcp_fc_execute_els_fc_job(struct fc_bsg_job *);
 extern int zfcp_fc_execute_ct_fc_job(struct fc_bsg_job *);
-extern void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *);
-
 
 /* zfcp_fsf.c */
 extern int zfcp_fsf_open_port(struct zfcp_erp_action *);
@@ -122,37 +114,39 @@
 extern int zfcp_fsf_open_unit(struct zfcp_erp_action *);
 extern int zfcp_fsf_close_unit(struct zfcp_erp_action *);
 extern int zfcp_fsf_exchange_config_data(struct zfcp_erp_action *);
-extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *,
+extern int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *,
 					      struct fsf_qtcb_bottom_config *);
 extern int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *);
-extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *,
+extern int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *,
 					    struct fsf_qtcb_bottom_port *);
 extern struct zfcp_fsf_req *zfcp_fsf_control_file(struct zfcp_adapter *,
 						  struct zfcp_fsf_cfdc *);
 extern void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *);
-extern int zfcp_fsf_status_read(struct zfcp_adapter *);
+extern int zfcp_fsf_status_read(struct zfcp_qdio *);
 extern int zfcp_status_read_refill(struct zfcp_adapter *adapter);
-extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *,
-			    struct zfcp_erp_action *);
+extern int zfcp_fsf_send_ct(struct zfcp_send_ct *, mempool_t *);
 extern int zfcp_fsf_send_els(struct zfcp_send_els *);
 extern int zfcp_fsf_send_fcp_command_task(struct zfcp_unit *,
 					  struct scsi_cmnd *);
-extern void zfcp_fsf_req_complete(struct zfcp_fsf_req *);
 extern void zfcp_fsf_req_free(struct zfcp_fsf_req *);
 extern struct zfcp_fsf_req *zfcp_fsf_send_fcp_ctm(struct zfcp_unit *, u8);
 extern struct zfcp_fsf_req *zfcp_fsf_abort_fcp_command(unsigned long,
 						       struct zfcp_unit *);
+extern void zfcp_fsf_reqid_check(struct zfcp_qdio *, int);
 
 /* zfcp_qdio.c */
-extern int zfcp_qdio_allocate(struct zfcp_adapter *);
-extern void zfcp_qdio_free(struct zfcp_adapter *);
-extern int zfcp_qdio_send(struct zfcp_fsf_req *);
-extern struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *);
-extern struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *);
-extern int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *, unsigned long,
+extern int zfcp_qdio_setup(struct zfcp_adapter *);
+extern void zfcp_qdio_destroy(struct zfcp_qdio *);
+extern int zfcp_qdio_send(struct zfcp_qdio *, struct zfcp_queue_req *);
+extern struct qdio_buffer_element
+	*zfcp_qdio_sbale_req(struct zfcp_qdio *, struct zfcp_queue_req *);
+extern struct qdio_buffer_element
+	*zfcp_qdio_sbale_curr(struct zfcp_qdio *, struct zfcp_queue_req *);
+extern int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *,
+				   struct zfcp_queue_req *, unsigned long,
 				   struct scatterlist *, int);
-extern int zfcp_qdio_open(struct zfcp_adapter *);
-extern void zfcp_qdio_close(struct zfcp_adapter *);
+extern int zfcp_qdio_open(struct zfcp_qdio *);
+extern void zfcp_qdio_close(struct zfcp_qdio *);
 
 /* zfcp_scsi.c */
 extern struct zfcp_data zfcp_data;
diff --git a/drivers/s390/scsi/zfcp_fc.c b/drivers/s390/scsi/zfcp_fc.c
index 47daebf..722f22d 100644
--- a/drivers/s390/scsi/zfcp_fc.c
+++ b/drivers/s390/scsi/zfcp_fc.c
@@ -25,14 +25,6 @@
 	[RSCN_FABRIC_ADDRESS]		= 0x000000,
 };
 
-struct ct_iu_gpn_ft_req {
-	struct ct_hdr header;
-	u8 flags;
-	u8 domain_id_scope;
-	u8 area_id_scope;
-	u8 fc4_type;
-} __attribute__ ((packed));
-
 struct gpn_ft_resp_acc {
 	u8 control;
 	u8 port_id[3];
@@ -65,7 +57,7 @@
 	unsigned long handler_data;
 };
 
-static int zfcp_wka_port_get(struct zfcp_wka_port *wka_port)
+static int zfcp_fc_wka_port_get(struct zfcp_wka_port *wka_port)
 {
 	if (mutex_lock_interruptible(&wka_port->mutex))
 		return -ERESTARTSYS;
@@ -90,7 +82,7 @@
 	return -EIO;
 }
 
-static void zfcp_wka_port_offline(struct work_struct *work)
+static void zfcp_fc_wka_port_offline(struct work_struct *work)
 {
 	struct delayed_work *dw = to_delayed_work(work);
 	struct zfcp_wka_port *wka_port =
@@ -110,7 +102,7 @@
 	mutex_unlock(&wka_port->mutex);
 }
 
-static void zfcp_wka_port_put(struct zfcp_wka_port *wka_port)
+static void zfcp_fc_wka_port_put(struct zfcp_wka_port *wka_port)
 {
 	if (atomic_dec_return(&wka_port->refcount) != 0)
 		return;
@@ -129,10 +121,10 @@
 	wka_port->status = ZFCP_WKA_PORT_OFFLINE;
 	atomic_set(&wka_port->refcount, 0);
 	mutex_init(&wka_port->mutex);
-	INIT_DELAYED_WORK(&wka_port->work, zfcp_wka_port_offline);
+	INIT_DELAYED_WORK(&wka_port->work, zfcp_fc_wka_port_offline);
 }
 
-void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka)
+static void zfcp_fc_wka_port_force_offline(struct zfcp_wka_port *wka)
 {
 	cancel_delayed_work_sync(&wka->work);
 	mutex_lock(&wka->mutex);
@@ -140,15 +132,13 @@
 	mutex_unlock(&wka->mutex);
 }
 
-void zfcp_fc_wka_ports_init(struct zfcp_adapter *adapter)
+void zfcp_fc_wka_ports_force_offline(struct zfcp_wka_ports *gs)
 {
-	struct zfcp_wka_ports *gs = adapter->gs;
-
-	zfcp_fc_wka_port_init(&gs->ms, FC_FID_MGMT_SERV, adapter);
-	zfcp_fc_wka_port_init(&gs->ts, FC_FID_TIME_SERV, adapter);
-	zfcp_fc_wka_port_init(&gs->ds, FC_FID_DIR_SERV, adapter);
-	zfcp_fc_wka_port_init(&gs->as, FC_FID_ALIASES, adapter);
-	zfcp_fc_wka_port_init(&gs->ks, FC_FID_SEC_KEY, adapter);
+	zfcp_fc_wka_port_force_offline(&gs->ms);
+	zfcp_fc_wka_port_force_offline(&gs->ts);
+	zfcp_fc_wka_port_force_offline(&gs->ds);
+	zfcp_fc_wka_port_force_offline(&gs->as);
+	zfcp_fc_wka_port_force_offline(&gs->ks);
 }
 
 static void _zfcp_fc_incoming_rscn(struct zfcp_fsf_req *fsf_req, u32 range,
@@ -160,7 +150,7 @@
 	read_lock_irqsave(&zfcp_data.config_lock, flags);
 	list_for_each_entry(port, &fsf_req->adapter->port_list_head, list) {
 		if ((port->d_id & range) == (elem->nport_did & range))
-			zfcp_test_link(port);
+			zfcp_fc_test_link(port);
 		if (!port->d_id)
 			zfcp_erp_port_reopen(port,
 					     ZFCP_STATUS_COMMON_ERP_FAILED,
@@ -241,7 +231,7 @@
 		(struct fsf_status_read_buffer *) fsf_req->data;
 	unsigned int els_type = status_buffer->payload.data[0];
 
-	zfcp_san_dbf_event_incoming_els(fsf_req);
+	zfcp_dbf_san_incoming_els(fsf_req);
 	if (els_type == LS_PLOGI)
 		zfcp_fc_incoming_plogi(fsf_req);
 	else if (els_type == LS_LOGO)
@@ -281,19 +271,18 @@
 	port->d_id = ct_iu_resp->d_id & ZFCP_DID_MASK;
 }
 
-int static zfcp_fc_ns_gid_pn_request(struct zfcp_erp_action *erp_action,
+static int zfcp_fc_ns_gid_pn_request(struct zfcp_port *port,
 				     struct zfcp_gid_pn_data *gid_pn)
 {
-	struct zfcp_adapter *adapter = erp_action->adapter;
+	struct zfcp_adapter *adapter = port->adapter;
 	struct zfcp_fc_ns_handler_data compl_rec;
 	int ret;
 
 	/* setup parameters for send generic command */
-	gid_pn->port = erp_action->port;
+	gid_pn->port = port;
 	gid_pn->ct.wka_port = &adapter->gs->ds;
 	gid_pn->ct.handler = zfcp_fc_ns_handler;
 	gid_pn->ct.handler_data = (unsigned long) &compl_rec;
-	gid_pn->ct.timeout = ZFCP_NS_GID_PN_TIMEOUT;
 	gid_pn->ct.req = &gid_pn->req;
 	gid_pn->ct.resp = &gid_pn->resp;
 	sg_init_one(&gid_pn->req, &gid_pn->ct_iu_req,
@@ -308,13 +297,12 @@
 	gid_pn->ct_iu_req.header.options = ZFCP_CT_SYNCHRONOUS;
 	gid_pn->ct_iu_req.header.cmd_rsp_code = ZFCP_CT_GID_PN;
 	gid_pn->ct_iu_req.header.max_res_size = ZFCP_CT_SIZE_ONE_PAGE / 4;
-	gid_pn->ct_iu_req.wwpn = erp_action->port->wwpn;
+	gid_pn->ct_iu_req.wwpn = port->wwpn;
 
 	init_completion(&compl_rec.done);
 	compl_rec.handler = zfcp_fc_ns_gid_pn_eval;
 	compl_rec.handler_data = (unsigned long) gid_pn;
-	ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.fsf_req_erp,
-			       erp_action);
+	ret = zfcp_fsf_send_ct(&gid_pn->ct, adapter->pool.gid_pn_req);
 	if (!ret)
 		wait_for_completion(&compl_rec.done);
 	return ret;
@@ -322,33 +310,56 @@
 
 /**
  * zfcp_fc_ns_gid_pn_request - initiate GID_PN nameserver request
- * @erp_action: pointer to zfcp_erp_action where GID_PN request is needed
+ * @port: port where GID_PN request is needed
  * return: -ENOMEM on error, 0 otherwise
  */
-int zfcp_fc_ns_gid_pn(struct zfcp_erp_action *erp_action)
+static int zfcp_fc_ns_gid_pn(struct zfcp_port *port)
 {
 	int ret;
 	struct zfcp_gid_pn_data *gid_pn;
-	struct zfcp_adapter *adapter = erp_action->adapter;
+	struct zfcp_adapter *adapter = port->adapter;
 
-	gid_pn = mempool_alloc(adapter->pool.data_gid_pn, GFP_ATOMIC);
+	gid_pn = mempool_alloc(adapter->pool.gid_pn_data, GFP_ATOMIC);
 	if (!gid_pn)
 		return -ENOMEM;
 
 	memset(gid_pn, 0, sizeof(*gid_pn));
 
-	ret = zfcp_wka_port_get(&adapter->gs->ds);
+	ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
 	if (ret)
 		goto out;
 
-	ret = zfcp_fc_ns_gid_pn_request(erp_action, gid_pn);
+	ret = zfcp_fc_ns_gid_pn_request(port, gid_pn);
 
-	zfcp_wka_port_put(&adapter->gs->ds);
+	zfcp_fc_wka_port_put(&adapter->gs->ds);
 out:
-	mempool_free(gid_pn, adapter->pool.data_gid_pn);
+	mempool_free(gid_pn, adapter->pool.gid_pn_data);
 	return ret;
 }
 
+void zfcp_fc_port_did_lookup(struct work_struct *work)
+{
+	int ret;
+	struct zfcp_port *port = container_of(work, struct zfcp_port,
+					      gid_pn_work);
+
+	ret = zfcp_fc_ns_gid_pn(port);
+	if (ret) {
+		/* could not issue gid_pn for some reason */
+		zfcp_erp_adapter_reopen(port->adapter, 0, "fcgpn_1", NULL);
+		goto out;
+	}
+
+	if (!port->d_id) {
+		zfcp_erp_port_failed(port, "fcgpn_2", NULL);
+		goto out;
+	}
+
+	zfcp_erp_port_reopen(port, 0, "fcgpn_3", NULL);
+out:
+	zfcp_port_put(port);
+}
+
 /**
  * zfcp_fc_plogi_evaluate - evaluate PLOGI playload
  * @port: zfcp_port structure
@@ -404,6 +415,7 @@
 	/* port is good, unblock rport without going through erp */
 	zfcp_scsi_schedule_rport_register(port);
  out:
+	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
 	zfcp_port_put(port);
 	kfree(adisc);
 }
@@ -450,28 +462,36 @@
 	port->rport_task = RPORT_DEL;
 	zfcp_scsi_rport_work(&port->rport_work);
 
+	/* only issue one test command at one time per port */
+	if (atomic_read(&port->status) & ZFCP_STATUS_PORT_LINK_TEST)
+		goto out;
+
+	atomic_set_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
+
 	retval = zfcp_fc_adisc(port);
 	if (retval == 0)
 		return;
 
 	/* send of ADISC was not possible */
+	atomic_clear_mask(ZFCP_STATUS_PORT_LINK_TEST, &port->status);
 	zfcp_erp_port_forced_reopen(port, 0, "fcltwk1", NULL);
 
+out:
 	zfcp_port_put(port);
 }
 
 /**
- * zfcp_test_link - lightweight link test procedure
+ * zfcp_fc_test_link - lightweight link test procedure
  * @port: port to be tested
  *
  * Test status of a link to a remote port using the ELS command ADISC.
  * If there is a problem with the remote port, error recovery steps
  * will be triggered.
  */
-void zfcp_test_link(struct zfcp_port *port)
+void zfcp_fc_test_link(struct zfcp_port *port)
 {
 	zfcp_port_get(port);
-	if (!queue_work(zfcp_data.work_queue, &port->test_link_work))
+	if (!queue_work(port->adapter->work_queue, &port->test_link_work))
 		zfcp_port_put(port);
 }
 
@@ -479,7 +499,7 @@
 {
 	struct scatterlist *sg = &gpn_ft->sg_req;
 
-	kfree(sg_virt(sg)); /* free request buffer */
+	kmem_cache_free(zfcp_data.gpn_ft_cache, sg_virt(sg));
 	zfcp_sg_free_table(gpn_ft->sg_resp, buf_num);
 
 	kfree(gpn_ft);
@@ -494,7 +514,7 @@
 	if (!gpn_ft)
 		return NULL;
 
-	req = kzalloc(sizeof(struct ct_iu_gpn_ft_req), GFP_KERNEL);
+	req = kmem_cache_alloc(zfcp_data.gpn_ft_cache, GFP_KERNEL);
 	if (!req) {
 		kfree(gpn_ft);
 		gpn_ft = NULL;
@@ -511,9 +531,8 @@
 }
 
 
-static int zfcp_scan_issue_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
-				  struct zfcp_adapter *adapter,
-				  int max_bytes)
+static int zfcp_fc_send_gpn_ft(struct zfcp_gpn_ft *gpn_ft,
+			       struct zfcp_adapter *adapter, int max_bytes)
 {
 	struct zfcp_send_ct *ct = &gpn_ft->ct;
 	struct ct_iu_gpn_ft_req *req = sg_virt(&gpn_ft->sg_req);
@@ -536,19 +555,18 @@
 	ct->wka_port = &adapter->gs->ds;
 	ct->handler = zfcp_fc_ns_handler;
 	ct->handler_data = (unsigned long)&compl_rec;
-	ct->timeout = 10;
 	ct->req = &gpn_ft->sg_req;
 	ct->resp = gpn_ft->sg_resp;
 
 	init_completion(&compl_rec.done);
 	compl_rec.handler = NULL;
-	ret = zfcp_fsf_send_ct(ct, NULL, NULL);
+	ret = zfcp_fsf_send_ct(ct, NULL);
 	if (!ret)
 		wait_for_completion(&compl_rec.done);
 	return ret;
 }
 
-static void zfcp_validate_port(struct zfcp_port *port)
+static void zfcp_fc_validate_port(struct zfcp_port *port)
 {
 	struct zfcp_adapter *adapter = port->adapter;
 
@@ -568,7 +586,7 @@
 	zfcp_port_dequeue(port);
 }
 
-static int zfcp_scan_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
+static int zfcp_fc_eval_gpn_ft(struct zfcp_gpn_ft *gpn_ft, int max_entries)
 {
 	struct zfcp_send_ct *ct = &gpn_ft->ct;
 	struct scatterlist *sg = gpn_ft->sg_resp;
@@ -595,7 +613,7 @@
 		return -E2BIG;
 	}
 
-	down(&zfcp_data.config_sema);
+	mutex_lock(&zfcp_data.config_mutex);
 
 	/* first entry is the header */
 	for (x = 1; x < max_entries && !last; x++) {
@@ -628,16 +646,16 @@
 
 	zfcp_erp_wait(adapter);
 	list_for_each_entry_safe(port, tmp, &adapter->port_list_head, list)
-		zfcp_validate_port(port);
-	up(&zfcp_data.config_sema);
+		zfcp_fc_validate_port(port);
+	mutex_unlock(&zfcp_data.config_mutex);
 	return ret;
 }
 
 /**
- * zfcp_scan_ports - scan remote ports and attach new ports
+ * zfcp_fc_scan_ports - scan remote ports and attach new ports
  * @adapter: pointer to struct zfcp_adapter
  */
-int zfcp_scan_ports(struct zfcp_adapter *adapter)
+int zfcp_fc_scan_ports(struct zfcp_adapter *adapter)
 {
 	int ret, i;
 	struct zfcp_gpn_ft *gpn_ft;
@@ -652,7 +670,7 @@
 	    fc_host_port_type(adapter->scsi_host) != FC_PORTTYPE_NPIV)
 		return 0;
 
-	ret = zfcp_wka_port_get(&adapter->gs->ds);
+	ret = zfcp_fc_wka_port_get(&adapter->gs->ds);
 	if (ret)
 		return ret;
 
@@ -663,9 +681,9 @@
 	}
 
 	for (i = 0; i < 3; i++) {
-		ret = zfcp_scan_issue_gpn_ft(gpn_ft, adapter, max_bytes);
+		ret = zfcp_fc_send_gpn_ft(gpn_ft, adapter, max_bytes);
 		if (!ret) {
-			ret = zfcp_scan_eval_gpn_ft(gpn_ft, max_entries);
+			ret = zfcp_fc_eval_gpn_ft(gpn_ft, max_entries);
 			if (ret == -EAGAIN)
 				ssleep(1);
 			else
@@ -674,14 +692,14 @@
 	}
 	zfcp_free_sg_env(gpn_ft, buf_num);
 out:
-	zfcp_wka_port_put(&adapter->gs->ds);
+	zfcp_fc_wka_port_put(&adapter->gs->ds);
 	return ret;
 }
 
 
-void _zfcp_scan_ports_later(struct work_struct *work)
+void _zfcp_fc_scan_ports_later(struct work_struct *work)
 {
-	zfcp_scan_ports(container_of(work, struct zfcp_adapter, scan_work));
+	zfcp_fc_scan_ports(container_of(work, struct zfcp_adapter, scan_work));
 }
 
 struct zfcp_els_fc_job {
@@ -732,7 +750,7 @@
 	els_fc_job->els.adapter = adapter;
 	if (rport) {
 		read_lock_irq(&zfcp_data.config_lock);
-		port = rport->dd_data;
+		port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
 		if (port)
 			els_fc_job->els.d_id = port->d_id;
 		read_unlock_irq(&zfcp_data.config_lock);
@@ -771,7 +789,7 @@
 	job->state_flags = FC_RQST_STATE_DONE;
 	job->job_done(job);
 
-	zfcp_wka_port_put(ct_fc_job->ct.wka_port);
+	zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port);
 
 	kfree(ct_fc_job);
 }
@@ -817,7 +835,7 @@
 		return -EINVAL; /* no such service */
 	}
 
-	ret = zfcp_wka_port_get(ct_fc_job->ct.wka_port);
+	ret = zfcp_fc_wka_port_get(ct_fc_job->ct.wka_port);
 	if (ret) {
 		kfree(ct_fc_job);
 		return ret;
@@ -825,16 +843,40 @@
 
 	ct_fc_job->ct.req = job->request_payload.sg_list;
 	ct_fc_job->ct.resp = job->reply_payload.sg_list;
-	ct_fc_job->ct.timeout = ZFCP_FSF_REQUEST_TIMEOUT;
 	ct_fc_job->ct.handler = zfcp_fc_generic_ct_handler;
 	ct_fc_job->ct.handler_data = (unsigned long) ct_fc_job;
 	ct_fc_job->ct.completion = NULL;
 	ct_fc_job->job = job;
 
-	ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL, NULL);
+	ret = zfcp_fsf_send_ct(&ct_fc_job->ct, NULL);
 	if (ret) {
 		kfree(ct_fc_job);
-		zfcp_wka_port_put(ct_fc_job->ct.wka_port);
+		zfcp_fc_wka_port_put(ct_fc_job->ct.wka_port);
 	}
 	return ret;
 }
+
+int zfcp_fc_gs_setup(struct zfcp_adapter *adapter)
+{
+	struct zfcp_wka_ports *wka_ports;
+
+	wka_ports = kzalloc(sizeof(struct zfcp_wka_ports), GFP_KERNEL);
+	if (!wka_ports)
+		return -ENOMEM;
+
+	adapter->gs = wka_ports;
+	zfcp_fc_wka_port_init(&wka_ports->ms, FC_FID_MGMT_SERV, adapter);
+	zfcp_fc_wka_port_init(&wka_ports->ts, FC_FID_TIME_SERV, adapter);
+	zfcp_fc_wka_port_init(&wka_ports->ds, FC_FID_DIR_SERV, adapter);
+	zfcp_fc_wka_port_init(&wka_ports->as, FC_FID_ALIASES, adapter);
+	zfcp_fc_wka_port_init(&wka_ports->ks, FC_FID_SEC_KEY, adapter);
+
+	return 0;
+}
+
+void zfcp_fc_gs_destroy(struct zfcp_adapter *adapter)
+{
+	kfree(adapter->gs);
+	adapter->gs = NULL;
+}
+
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index 47795fb..f09c863 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -11,9 +11,7 @@
 
 #include <linux/blktrace_api.h>
 #include "zfcp_ext.h"
-
-#define ZFCP_REQ_AUTO_CLEANUP	0x00000002
-#define ZFCP_REQ_NO_QTCB	0x00000008
+#include "zfcp_dbf.h"
 
 static void zfcp_fsf_request_timeout_handler(unsigned long data)
 {
@@ -111,43 +109,15 @@
 void zfcp_fsf_req_free(struct zfcp_fsf_req *req)
 {
 	if (likely(req->pool)) {
+		if (likely(req->qtcb))
+			mempool_free(req->qtcb, req->adapter->pool.qtcb_pool);
 		mempool_free(req, req->pool);
 		return;
 	}
 
-	if (req->qtcb) {
-		kmem_cache_free(zfcp_data.fsf_req_qtcb_cache, req);
-		return;
-	}
-}
-
-/**
- * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
- * @adapter: pointer to struct zfcp_adapter
- *
- * Never ever call this without shutting down the adapter first.
- * Otherwise the adapter would continue using and corrupting s390 storage.
- * Included BUG_ON() call to ensure this is done.
- * ERP is supposed to be the only user of this function.
- */
-void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
-{
-	struct zfcp_fsf_req *req, *tmp;
-	unsigned long flags;
-	LIST_HEAD(remove_queue);
-	unsigned int i;
-
-	BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
-	spin_lock_irqsave(&adapter->req_list_lock, flags);
-	for (i = 0; i < REQUEST_LIST_SIZE; i++)
-		list_splice_init(&adapter->req_list[i], &remove_queue);
-	spin_unlock_irqrestore(&adapter->req_list_lock, flags);
-
-	list_for_each_entry_safe(req, tmp, &remove_queue, list) {
-		list_del(&req->list);
-		req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
-		zfcp_fsf_req_complete(req);
-	}
+	if (likely(req->qtcb))
+		kmem_cache_free(zfcp_data.qtcb_cache, req->qtcb);
+	kfree(req);
 }
 
 static void zfcp_fsf_status_read_port_closed(struct zfcp_fsf_req *req)
@@ -278,13 +248,13 @@
 	struct fsf_status_read_buffer *sr_buf = req->data;
 
 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
-		zfcp_hba_dbf_event_fsf_unsol("dism", adapter, sr_buf);
-		mempool_free(sr_buf, adapter->pool.data_status_read);
+		zfcp_dbf_hba_fsf_unsol("dism", adapter->dbf, sr_buf);
+		mempool_free(sr_buf, adapter->pool.status_read_data);
 		zfcp_fsf_req_free(req);
 		return;
 	}
 
-	zfcp_hba_dbf_event_fsf_unsol("read", adapter, sr_buf);
+	zfcp_dbf_hba_fsf_unsol("read", adapter->dbf, sr_buf);
 
 	switch (sr_buf->status_type) {
 	case FSF_STATUS_READ_PORT_CLOSED:
@@ -299,7 +269,7 @@
 		dev_warn(&adapter->ccw_device->dev,
 			 "The error threshold for checksum statistics "
 			 "has been exceeded\n");
-		zfcp_hba_dbf_event_berr(adapter, req);
+		zfcp_dbf_hba_berr(adapter->dbf, req);
 		break;
 	case FSF_STATUS_READ_LINK_DOWN:
 		zfcp_fsf_status_read_link_down(req);
@@ -331,11 +301,11 @@
 		break;
 	}
 
-	mempool_free(sr_buf, adapter->pool.data_status_read);
+	mempool_free(sr_buf, adapter->pool.status_read_data);
 	zfcp_fsf_req_free(req);
 
 	atomic_inc(&adapter->stat_miss);
-	queue_work(zfcp_data.work_queue, &adapter->stat_work);
+	queue_work(adapter->work_queue, &adapter->stat_work);
 }
 
 static void zfcp_fsf_fsfstatus_qual_eval(struct zfcp_fsf_req *req)
@@ -385,7 +355,7 @@
 	struct fsf_qtcb *qtcb = req->qtcb;
 	union fsf_prot_status_qual *psq = &qtcb->prefix.prot_status_qual;
 
-	zfcp_hba_dbf_event_fsf_response(req);
+	zfcp_dbf_hba_fsf_response(req);
 
 	if (req->status & ZFCP_STATUS_FSFREQ_DISMISSED) {
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR |
@@ -458,7 +428,7 @@
  * is called to process the completion status and trigger further
  * events related to the FSF request.
  */
-void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
+static void zfcp_fsf_req_complete(struct zfcp_fsf_req *req)
 {
 	if (unlikely(req->fsf_command == FSF_QTCB_UNSOLICITED_STATUS)) {
 		zfcp_fsf_status_read_handler(req);
@@ -472,23 +442,40 @@
 
 	if (req->erp_action)
 		zfcp_erp_notify(req->erp_action, 0);
-	req->status |= ZFCP_STATUS_FSFREQ_COMPLETED;
 
 	if (likely(req->status & ZFCP_STATUS_FSFREQ_CLEANUP))
 		zfcp_fsf_req_free(req);
 	else
-	/* notify initiator waiting for the requests completion */
-	/*
-	 * FIXME: Race! We must not access fsf_req here as it might have been
-	 * cleaned up already due to the set ZFCP_STATUS_FSFREQ_COMPLETED
-	 * flag. It's an improbable case. But, we have the same paranoia for
-	 * the cleanup flag already.
-	 * Might better be handled using complete()?
-	 * (setting the flag and doing wakeup ought to be atomic
-	 *  with regard to checking the flag as long as waitqueue is
-	 *  part of the to be released structure)
-	 */
-		wake_up(&req->completion_wq);
+		complete(&req->completion);
+}
+
+/**
+ * zfcp_fsf_req_dismiss_all - dismiss all fsf requests
+ * @adapter: pointer to struct zfcp_adapter
+ *
+ * Never ever call this without shutting down the adapter first.
+ * Otherwise the adapter would continue using and corrupting s390 storage.
+ * Included BUG_ON() call to ensure this is done.
+ * ERP is supposed to be the only user of this function.
+ */
+void zfcp_fsf_req_dismiss_all(struct zfcp_adapter *adapter)
+{
+	struct zfcp_fsf_req *req, *tmp;
+	unsigned long flags;
+	LIST_HEAD(remove_queue);
+	unsigned int i;
+
+	BUG_ON(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP);
+	spin_lock_irqsave(&adapter->req_list_lock, flags);
+	for (i = 0; i < REQUEST_LIST_SIZE; i++)
+		list_splice_init(&adapter->req_list[i], &remove_queue);
+	spin_unlock_irqrestore(&adapter->req_list_lock, flags);
+
+	list_for_each_entry_safe(req, tmp, &remove_queue, list) {
+		list_del(&req->list);
+		req->status |= ZFCP_STATUS_FSFREQ_DISMISSED;
+		zfcp_fsf_req_complete(req);
+	}
 }
 
 static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req)
@@ -650,79 +637,77 @@
 	}
 }
 
-static int zfcp_fsf_sbal_check(struct zfcp_adapter *adapter)
+static int zfcp_fsf_sbal_check(struct zfcp_qdio *qdio)
 {
-	struct zfcp_qdio_queue *req_q = &adapter->req_q;
+	struct zfcp_qdio_queue *req_q = &qdio->req_q;
 
-	spin_lock_bh(&adapter->req_q_lock);
+	spin_lock_bh(&qdio->req_q_lock);
 	if (atomic_read(&req_q->count))
 		return 1;
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return 0;
 }
 
-static int zfcp_fsf_req_sbal_get(struct zfcp_adapter *adapter)
+static int zfcp_fsf_req_sbal_get(struct zfcp_qdio *qdio)
 {
+	struct zfcp_adapter *adapter = qdio->adapter;
 	long ret;
 
-	spin_unlock_bh(&adapter->req_q_lock);
-	ret = wait_event_interruptible_timeout(adapter->request_wq,
-			       zfcp_fsf_sbal_check(adapter), 5 * HZ);
+	spin_unlock_bh(&qdio->req_q_lock);
+	ret = wait_event_interruptible_timeout(qdio->req_q_wq,
+			       zfcp_fsf_sbal_check(qdio), 5 * HZ);
 	if (ret > 0)
 		return 0;
 	if (!ret) {
-		atomic_inc(&adapter->qdio_outb_full);
+		atomic_inc(&qdio->req_q_full);
 		/* assume hanging outbound queue, try queue recovery */
 		zfcp_erp_adapter_reopen(adapter, 0, "fsrsg_1", NULL);
 	}
 
-	spin_lock_bh(&adapter->req_q_lock);
+	spin_lock_bh(&qdio->req_q_lock);
 	return -EIO;
 }
 
-static struct zfcp_fsf_req *zfcp_fsf_alloc_noqtcb(mempool_t *pool)
+static struct zfcp_fsf_req *zfcp_fsf_alloc(mempool_t *pool)
 {
 	struct zfcp_fsf_req *req;
-	req = mempool_alloc(pool, GFP_ATOMIC);
-	if (!req)
+
+	if (likely(pool))
+		req = mempool_alloc(pool, GFP_ATOMIC);
+	else
+		req = kmalloc(sizeof(*req), GFP_ATOMIC);
+
+	if (unlikely(!req))
 		return NULL;
+
 	memset(req, 0, sizeof(*req));
 	req->pool = pool;
 	return req;
 }
 
-static struct zfcp_fsf_req *zfcp_fsf_alloc_qtcb(mempool_t *pool)
+static struct fsf_qtcb *zfcp_qtcb_alloc(mempool_t *pool)
 {
-	struct zfcp_fsf_req_qtcb *qtcb;
+	struct fsf_qtcb *qtcb;
 
 	if (likely(pool))
 		qtcb = mempool_alloc(pool, GFP_ATOMIC);
 	else
-		qtcb = kmem_cache_alloc(zfcp_data.fsf_req_qtcb_cache,
-					GFP_ATOMIC);
+		qtcb = kmem_cache_alloc(zfcp_data.qtcb_cache, GFP_ATOMIC);
+
 	if (unlikely(!qtcb))
 		return NULL;
 
 	memset(qtcb, 0, sizeof(*qtcb));
-	qtcb->fsf_req.qtcb = &qtcb->qtcb;
-	qtcb->fsf_req.pool = pool;
-
-	return &qtcb->fsf_req;
+	return qtcb;
 }
 
-static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_adapter *adapter,
-						u32 fsf_cmd, int req_flags,
-						mempool_t *pool)
+static struct zfcp_fsf_req *zfcp_fsf_req_create(struct zfcp_qdio *qdio,
+						u32 fsf_cmd, mempool_t *pool)
 {
 	struct qdio_buffer_element *sbale;
-
-	struct zfcp_fsf_req *req;
-	struct zfcp_qdio_queue *req_q = &adapter->req_q;
-
-	if (req_flags & ZFCP_REQ_NO_QTCB)
-		req = zfcp_fsf_alloc_noqtcb(pool);
-	else
-		req = zfcp_fsf_alloc_qtcb(pool);
+	struct zfcp_qdio_queue *req_q = &qdio->req_q;
+	struct zfcp_adapter *adapter = qdio->adapter;
+	struct zfcp_fsf_req *req = zfcp_fsf_alloc(pool);
 
 	if (unlikely(!req))
 		return ERR_PTR(-ENOMEM);
@@ -732,22 +717,32 @@
 
 	INIT_LIST_HEAD(&req->list);
 	init_timer(&req->timer);
-	init_waitqueue_head(&req->completion_wq);
+	init_completion(&req->completion);
 
 	req->adapter = adapter;
 	req->fsf_command = fsf_cmd;
 	req->req_id = adapter->req_no;
-	req->sbal_number = 1;
-	req->sbal_first = req_q->first;
-	req->sbal_last = req_q->first;
-	req->sbale_curr = 1;
+	req->queue_req.sbal_number = 1;
+	req->queue_req.sbal_first = req_q->first;
+	req->queue_req.sbal_last = req_q->first;
+	req->queue_req.sbale_curr = 1;
 
-	sbale = zfcp_qdio_sbale_req(req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
 	sbale[0].addr = (void *) req->req_id;
 	sbale[0].flags |= SBAL_FLAGS0_COMMAND;
 
-	if (likely(req->qtcb)) {
-		req->qtcb->prefix.req_seq_no = req->adapter->fsf_req_seq_no;
+	if (likely(fsf_cmd != FSF_QTCB_UNSOLICITED_STATUS)) {
+		if (likely(pool))
+			req->qtcb = zfcp_qtcb_alloc(adapter->pool.qtcb_pool);
+		else
+			req->qtcb = zfcp_qtcb_alloc(NULL);
+
+		if (unlikely(!req->qtcb)) {
+			zfcp_fsf_req_free(req);
+			return ERR_PTR(-ENOMEM);
+		}
+
+		req->qtcb->prefix.req_seq_no = adapter->fsf_req_seq_no;
 		req->qtcb->prefix.req_id = req->req_id;
 		req->qtcb->prefix.ulp_info = 26;
 		req->qtcb->prefix.qtcb_type = fsf_qtcb_type[req->fsf_command];
@@ -765,15 +760,13 @@
 		return ERR_PTR(-EIO);
 	}
 
-	if (likely(req_flags & ZFCP_REQ_AUTO_CLEANUP))
-		req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
-
 	return req;
 }
 
 static int zfcp_fsf_req_send(struct zfcp_fsf_req *req)
 {
 	struct zfcp_adapter *adapter = req->adapter;
+	struct zfcp_qdio *qdio = adapter->qdio;
 	unsigned long	     flags;
 	int		     idx;
 	int		     with_qtcb = (req->qtcb != NULL);
@@ -784,9 +777,9 @@
 	list_add_tail(&req->list, &adapter->req_list[idx]);
 	spin_unlock_irqrestore(&adapter->req_list_lock, flags);
 
-	req->qdio_outb_usage = atomic_read(&adapter->req_q.count);
+	req->queue_req.qdio_outb_usage = atomic_read(&qdio->req_q.count);
 	req->issued = get_clock();
-	if (zfcp_qdio_send(req)) {
+	if (zfcp_qdio_send(qdio, &req->queue_req)) {
 		del_timer(&req->timer);
 		spin_lock_irqsave(&adapter->req_list_lock, flags);
 		/* lookup request again, list might have changed */
@@ -811,38 +804,37 @@
  * @req_flags: request flags
  * Returns: 0 on success, ERROR otherwise
  */
-int zfcp_fsf_status_read(struct zfcp_adapter *adapter)
+int zfcp_fsf_status_read(struct zfcp_qdio *qdio)
 {
+	struct zfcp_adapter *adapter = qdio->adapter;
 	struct zfcp_fsf_req *req;
 	struct fsf_status_read_buffer *sr_buf;
 	struct qdio_buffer_element *sbale;
 	int retval = -EIO;
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out;
 
-	req = zfcp_fsf_req_create(adapter, FSF_QTCB_UNSOLICITED_STATUS,
-				  ZFCP_REQ_NO_QTCB,
-				  adapter->pool.fsf_req_status_read);
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_UNSOLICITED_STATUS,
+				  adapter->pool.status_read_req);
 	if (IS_ERR(req)) {
 		retval = PTR_ERR(req);
 		goto out;
 	}
 
-	sbale = zfcp_qdio_sbale_req(req);
-	sbale[0].flags |= SBAL_FLAGS0_TYPE_STATUS;
+	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
 	sbale[2].flags |= SBAL_FLAGS_LAST_ENTRY;
-	req->sbale_curr = 2;
+	req->queue_req.sbale_curr = 2;
 
-	sr_buf = mempool_alloc(adapter->pool.data_status_read, GFP_ATOMIC);
+	sr_buf = mempool_alloc(adapter->pool.status_read_data, GFP_ATOMIC);
 	if (!sr_buf) {
 		retval = -ENOMEM;
 		goto failed_buf;
 	}
 	memset(sr_buf, 0, sizeof(*sr_buf));
 	req->data = sr_buf;
-	sbale = zfcp_qdio_sbale_curr(req);
+	sbale = zfcp_qdio_sbale_curr(qdio, &req->queue_req);
 	sbale->addr = (void *) sr_buf;
 	sbale->length = sizeof(*sr_buf);
 
@@ -853,12 +845,12 @@
 	goto out;
 
 failed_req_send:
-	mempool_free(sr_buf, adapter->pool.data_status_read);
+	mempool_free(sr_buf, adapter->pool.status_read_data);
 failed_buf:
 	zfcp_fsf_req_free(req);
-	zfcp_hba_dbf_event_fsf_unsol("fail", adapter, NULL);
+	zfcp_dbf_hba_fsf_unsol("fail", adapter->dbf, NULL);
 out:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return retval;
 }
 
@@ -900,7 +892,7 @@
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		switch (fsq->word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
-			zfcp_test_link(unit->port);
+			zfcp_fc_test_link(unit->port);
 			/* fall through */
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -925,13 +917,13 @@
 {
 	struct qdio_buffer_element *sbale;
 	struct zfcp_fsf_req *req = NULL;
-	struct zfcp_adapter *adapter = unit->port->adapter;
+	struct zfcp_qdio *qdio = unit->port->adapter->qdio;
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out;
-	req = zfcp_fsf_req_create(adapter, FSF_QTCB_ABORT_FCP_CMND,
-				  0, adapter->pool.fsf_req_abort);
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_ABORT_FCP_CMND,
+				  qdio->adapter->pool.scsi_abort);
 	if (IS_ERR(req)) {
 		req = NULL;
 		goto out;
@@ -941,7 +933,7 @@
 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
 		goto out_error_free;
 
-	sbale = zfcp_qdio_sbale_req(req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -959,7 +951,7 @@
 	zfcp_fsf_req_free(req);
 	req = NULL;
 out:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return req;
 }
 
@@ -976,7 +968,7 @@
 
 	switch (header->fsf_status) {
         case FSF_GOOD:
-		zfcp_san_dbf_event_ct_response(req);
+		zfcp_dbf_san_ct_response(req);
 		send_ct->status = 0;
 		break;
         case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -1035,8 +1027,10 @@
 				       struct scatterlist *sg_resp,
 				       int max_sbals)
 {
-	struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(req);
-	u32 feat = req->adapter->adapter_features;
+	struct zfcp_adapter *adapter = req->adapter;
+	struct qdio_buffer_element *sbale = zfcp_qdio_sbale_req(adapter->qdio,
+							       &req->queue_req);
+	u32 feat = adapter->adapter_features;
 	int bytes;
 
 	if (!(feat & FSF_FEATURE_ELS_CT_CHAINED_SBALS)) {
@@ -1053,18 +1047,25 @@
 		return 0;
 	}
 
-	bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
+	bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
+					SBAL_FLAGS0_TYPE_WRITE_READ,
 					sg_req, max_sbals);
 	if (bytes <= 0)
 		return -EIO;
 	req->qtcb->bottom.support.req_buf_length = bytes;
-	req->sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
+	req->queue_req.sbale_curr = ZFCP_LAST_SBALE_PER_SBAL;
 
-	bytes = zfcp_qdio_sbals_from_sg(req, SBAL_FLAGS0_TYPE_WRITE_READ,
+	bytes = zfcp_qdio_sbals_from_sg(adapter->qdio, &req->queue_req,
+					SBAL_FLAGS0_TYPE_WRITE_READ,
 					sg_resp, max_sbals);
 	if (bytes <= 0)
 		return -EIO;
+
+	/* common settings for ct/gs and els requests */
 	req->qtcb->bottom.support.resp_buf_length = bytes;
+	req->qtcb->bottom.support.service_class = FSF_CLASS_3;
+	req->qtcb->bottom.support.timeout = 2 * R_A_TOV;
+	zfcp_fsf_start_timer(req, 2 * R_A_TOV + 10);
 
 	return 0;
 }
@@ -1073,27 +1074,26 @@
  * zfcp_fsf_send_ct - initiate a Generic Service request (FC-GS)
  * @ct: pointer to struct zfcp_send_ct with data for request
  * @pool: if non-null this mempool is used to allocate struct zfcp_fsf_req
- * @erp_action: if non-null the Generic Service request sent within ERP
  */
-int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool,
-		     struct zfcp_erp_action *erp_action)
+int zfcp_fsf_send_ct(struct zfcp_send_ct *ct, mempool_t *pool)
 {
 	struct zfcp_wka_port *wka_port = ct->wka_port;
-	struct zfcp_adapter *adapter = wka_port->adapter;
+	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
 	struct zfcp_fsf_req *req;
 	int ret = -EIO;
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out;
 
-	req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_GENERIC,
-				  ZFCP_REQ_AUTO_CLEANUP, pool);
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_GENERIC, pool);
+
 	if (IS_ERR(req)) {
 		ret = PTR_ERR(req);
 		goto out;
 	}
 
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
 	ret = zfcp_fsf_setup_ct_els_sbals(req, ct->req, ct->resp,
 					  FSF_MAX_SBALS_PER_REQ);
 	if (ret)
@@ -1101,18 +1101,9 @@
 
 	req->handler = zfcp_fsf_send_ct_handler;
 	req->qtcb->header.port_handle = wka_port->handle;
-	req->qtcb->bottom.support.service_class = FSF_CLASS_3;
-	req->qtcb->bottom.support.timeout = ct->timeout;
 	req->data = ct;
 
-	zfcp_san_dbf_event_ct_request(req);
-
-	if (erp_action) {
-		erp_action->fsf_req = req;
-		req->erp_action = erp_action;
-		zfcp_fsf_start_erp_timer(req);
-	} else
-		zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
+	zfcp_dbf_san_ct_request(req);
 
 	ret = zfcp_fsf_req_send(req);
 	if (ret)
@@ -1122,10 +1113,8 @@
 
 failed_send:
 	zfcp_fsf_req_free(req);
-	if (erp_action)
-		erp_action->fsf_req = NULL;
 out:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return ret;
 }
 
@@ -1142,7 +1131,7 @@
 
 	switch (header->fsf_status) {
 	case FSF_GOOD:
-		zfcp_san_dbf_event_els_response(req);
+		zfcp_dbf_san_els_response(req);
 		send_els->status = 0;
 		break;
 	case FSF_SERVICE_CLASS_NOT_SUPPORTED:
@@ -1152,7 +1141,7 @@
 		switch (header->fsf_status_qual.word[0]){
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
 			if (port && (send_els->ls_code != ZFCP_LS_ADISC))
-				zfcp_test_link(port);
+				zfcp_fc_test_link(port);
 			/*fall through */
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 		case FSF_SQ_RETRY_IF_POSSIBLE:
@@ -1188,35 +1177,32 @@
 int zfcp_fsf_send_els(struct zfcp_send_els *els)
 {
 	struct zfcp_fsf_req *req;
-	struct zfcp_adapter *adapter = els->adapter;
-	struct fsf_qtcb_bottom_support *bottom;
+	struct zfcp_qdio *qdio = els->adapter->qdio;
 	int ret = -EIO;
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out;
-	req = zfcp_fsf_req_create(adapter, FSF_QTCB_SEND_ELS,
-				  ZFCP_REQ_AUTO_CLEANUP, NULL);
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_SEND_ELS, NULL);
+
 	if (IS_ERR(req)) {
 		ret = PTR_ERR(req);
 		goto out;
 	}
 
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
 	ret = zfcp_fsf_setup_ct_els_sbals(req, els->req, els->resp, 2);
 
 	if (ret)
 		goto failed_send;
 
-	bottom = &req->qtcb->bottom.support;
+	req->qtcb->bottom.support.d_id = els->d_id;
 	req->handler = zfcp_fsf_send_els_handler;
-	bottom->d_id = els->d_id;
-	bottom->service_class = FSF_CLASS_3;
-	bottom->timeout = 2 * R_A_TOV;
 	req->data = els;
 
-	zfcp_san_dbf_event_els_request(req);
+	zfcp_dbf_san_els_request(req);
 
-	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
 	ret = zfcp_fsf_req_send(req);
 	if (ret)
 		goto failed_send;
@@ -1226,7 +1212,7 @@
 failed_send:
 	zfcp_fsf_req_free(req);
 out:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return ret;
 }
 
@@ -1234,22 +1220,23 @@
 {
 	struct qdio_buffer_element *sbale;
 	struct zfcp_fsf_req *req;
-	struct zfcp_adapter *adapter = erp_action->adapter;
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
 	int retval = -EIO;
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out;
-	req = zfcp_fsf_req_create(adapter,
-				  FSF_QTCB_EXCHANGE_CONFIG_DATA,
-				  ZFCP_REQ_AUTO_CLEANUP,
-				  adapter->pool.fsf_req_erp);
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA,
+				  qdio->adapter->pool.erp_req);
+
 	if (IS_ERR(req)) {
 		retval = PTR_ERR(req);
 		goto out;
 	}
 
-	sbale = zfcp_qdio_sbale_req(req);
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -1269,29 +1256,29 @@
 		erp_action->fsf_req = NULL;
 	}
 out:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return retval;
 }
 
-int zfcp_fsf_exchange_config_data_sync(struct zfcp_adapter *adapter,
+int zfcp_fsf_exchange_config_data_sync(struct zfcp_qdio *qdio,
 				       struct fsf_qtcb_bottom_config *data)
 {
 	struct qdio_buffer_element *sbale;
 	struct zfcp_fsf_req *req = NULL;
 	int retval = -EIO;
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out_unlock;
 
-	req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_CONFIG_DATA,
-				  0, NULL);
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_CONFIG_DATA, NULL);
+
 	if (IS_ERR(req)) {
 		retval = PTR_ERR(req);
 		goto out_unlock;
 	}
 
-	sbale = zfcp_qdio_sbale_req(req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 	req->handler = zfcp_fsf_exchange_config_data_handler;
@@ -1307,16 +1294,15 @@
 
 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
 	retval = zfcp_fsf_req_send(req);
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	if (!retval)
-		wait_event(req->completion_wq,
-			   req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+		wait_for_completion(&req->completion);
 
 	zfcp_fsf_req_free(req);
 	return retval;
 
 out_unlock:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return retval;
 }
 
@@ -1327,26 +1313,28 @@
  */
 int zfcp_fsf_exchange_port_data(struct zfcp_erp_action *erp_action)
 {
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
 	struct qdio_buffer_element *sbale;
 	struct zfcp_fsf_req *req;
-	struct zfcp_adapter *adapter = erp_action->adapter;
 	int retval = -EIO;
 
-	if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
+	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
 		return -EOPNOTSUPP;
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out;
-	req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA,
-				  ZFCP_REQ_AUTO_CLEANUP,
-				  adapter->pool.fsf_req_erp);
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA,
+				  qdio->adapter->pool.erp_req);
+
 	if (IS_ERR(req)) {
 		retval = PTR_ERR(req);
 		goto out;
 	}
 
-	sbale = zfcp_qdio_sbale_req(req);
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -1361,32 +1349,32 @@
 		erp_action->fsf_req = NULL;
 	}
 out:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return retval;
 }
 
 /**
  * zfcp_fsf_exchange_port_data_sync - request information about local port
- * @adapter: pointer to struct zfcp_adapter
+ * @qdio: pointer to struct zfcp_qdio
  * @data: pointer to struct fsf_qtcb_bottom_port
  * Returns: 0 on success, error otherwise
  */
-int zfcp_fsf_exchange_port_data_sync(struct zfcp_adapter *adapter,
+int zfcp_fsf_exchange_port_data_sync(struct zfcp_qdio *qdio,
 				     struct fsf_qtcb_bottom_port *data)
 {
 	struct qdio_buffer_element *sbale;
 	struct zfcp_fsf_req *req = NULL;
 	int retval = -EIO;
 
-	if (!(adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
+	if (!(qdio->adapter->adapter_features & FSF_FEATURE_HBAAPI_MANAGEMENT))
 		return -EOPNOTSUPP;
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out_unlock;
 
-	req = zfcp_fsf_req_create(adapter, FSF_QTCB_EXCHANGE_PORT_DATA, 0,
-				  NULL);
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_EXCHANGE_PORT_DATA, NULL);
+
 	if (IS_ERR(req)) {
 		retval = PTR_ERR(req);
 		goto out_unlock;
@@ -1395,24 +1383,24 @@
 	if (data)
 		req->data = data;
 
-	sbale = zfcp_qdio_sbale_req(req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
 	req->handler = zfcp_fsf_exchange_port_data_handler;
 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
 	retval = zfcp_fsf_req_send(req);
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 
 	if (!retval)
-		wait_event(req->completion_wq,
-			   req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+		wait_for_completion(&req->completion);
+
 	zfcp_fsf_req_free(req);
 
 	return retval;
 
 out_unlock:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return retval;
 }
 
@@ -1498,25 +1486,25 @@
 int zfcp_fsf_open_port(struct zfcp_erp_action *erp_action)
 {
 	struct qdio_buffer_element *sbale;
-	struct zfcp_adapter *adapter = erp_action->adapter;
-	struct zfcp_fsf_req *req;
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
 	struct zfcp_port *port = erp_action->port;
+	struct zfcp_fsf_req *req;
 	int retval = -EIO;
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out;
 
-	req = zfcp_fsf_req_create(adapter,
-				  FSF_QTCB_OPEN_PORT_WITH_DID,
-				  ZFCP_REQ_AUTO_CLEANUP,
-				  adapter->pool.fsf_req_erp);
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+				  qdio->adapter->pool.erp_req);
+
 	if (IS_ERR(req)) {
 		retval = PTR_ERR(req);
 		goto out;
 	}
 
-	sbale = zfcp_qdio_sbale_req(req);
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
         sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
         sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -1535,7 +1523,7 @@
 		zfcp_port_put(port);
 	}
 out:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return retval;
 }
 
@@ -1569,23 +1557,24 @@
 int zfcp_fsf_close_port(struct zfcp_erp_action *erp_action)
 {
 	struct qdio_buffer_element *sbale;
-	struct zfcp_adapter *adapter = erp_action->adapter;
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
 	struct zfcp_fsf_req *req;
 	int retval = -EIO;
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out;
 
-	req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
-				  ZFCP_REQ_AUTO_CLEANUP,
-				  adapter->pool.fsf_req_erp);
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+				  qdio->adapter->pool.erp_req);
+
 	if (IS_ERR(req)) {
 		retval = PTR_ERR(req);
 		goto out;
 	}
 
-	sbale = zfcp_qdio_sbale_req(req);
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -1602,7 +1591,7 @@
 		erp_action->fsf_req = NULL;
 	}
 out:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return retval;
 }
 
@@ -1645,24 +1634,24 @@
 int zfcp_fsf_open_wka_port(struct zfcp_wka_port *wka_port)
 {
 	struct qdio_buffer_element *sbale;
-	struct zfcp_adapter *adapter = wka_port->adapter;
+	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
 	struct zfcp_fsf_req *req;
 	int retval = -EIO;
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out;
 
-	req = zfcp_fsf_req_create(adapter,
-				  FSF_QTCB_OPEN_PORT_WITH_DID,
-				  ZFCP_REQ_AUTO_CLEANUP,
-				  adapter->pool.fsf_req_erp);
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_PORT_WITH_DID,
+				  qdio->adapter->pool.erp_req);
+
 	if (unlikely(IS_ERR(req))) {
 		retval = PTR_ERR(req);
 		goto out;
 	}
 
-	sbale = zfcp_qdio_sbale_req(req);
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -1675,7 +1664,7 @@
 	if (retval)
 		zfcp_fsf_req_free(req);
 out:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return retval;
 }
 
@@ -1700,23 +1689,24 @@
 int zfcp_fsf_close_wka_port(struct zfcp_wka_port *wka_port)
 {
 	struct qdio_buffer_element *sbale;
-	struct zfcp_adapter *adapter = wka_port->adapter;
+	struct zfcp_qdio *qdio = wka_port->adapter->qdio;
 	struct zfcp_fsf_req *req;
 	int retval = -EIO;
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out;
 
-	req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PORT,
-				  ZFCP_REQ_AUTO_CLEANUP,
-				  adapter->pool.fsf_req_erp);
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PORT,
+				  qdio->adapter->pool.erp_req);
+
 	if (unlikely(IS_ERR(req))) {
 		retval = PTR_ERR(req);
 		goto out;
 	}
 
-	sbale = zfcp_qdio_sbale_req(req);
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -1729,7 +1719,7 @@
 	if (retval)
 		zfcp_fsf_req_free(req);
 out:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return retval;
 }
 
@@ -1791,23 +1781,24 @@
 int zfcp_fsf_close_physical_port(struct zfcp_erp_action *erp_action)
 {
 	struct qdio_buffer_element *sbale;
-	struct zfcp_adapter *adapter = erp_action->adapter;
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
 	struct zfcp_fsf_req *req;
 	int retval = -EIO;
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out;
 
-	req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_PHYSICAL_PORT,
-				  ZFCP_REQ_AUTO_CLEANUP,
-				  adapter->pool.fsf_req_erp);
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_PHYSICAL_PORT,
+				  qdio->adapter->pool.erp_req);
+
 	if (IS_ERR(req)) {
 		retval = PTR_ERR(req);
 		goto out;
 	}
 
-	sbale = zfcp_qdio_sbale_req(req);
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -1824,7 +1815,7 @@
 		erp_action->fsf_req = NULL;
 	}
 out:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return retval;
 }
 
@@ -1895,7 +1886,7 @@
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		switch (header->fsf_status_qual.word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
-			zfcp_test_link(unit->port);
+			zfcp_fc_test_link(unit->port);
 			/* fall through */
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -1964,22 +1955,24 @@
 {
 	struct qdio_buffer_element *sbale;
 	struct zfcp_adapter *adapter = erp_action->adapter;
+	struct zfcp_qdio *qdio = adapter->qdio;
 	struct zfcp_fsf_req *req;
 	int retval = -EIO;
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out;
 
-	req = zfcp_fsf_req_create(adapter, FSF_QTCB_OPEN_LUN,
-				  ZFCP_REQ_AUTO_CLEANUP,
-				  adapter->pool.fsf_req_erp);
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_OPEN_LUN,
+				  adapter->pool.erp_req);
+
 	if (IS_ERR(req)) {
 		retval = PTR_ERR(req);
 		goto out;
 	}
 
-	sbale = zfcp_qdio_sbale_req(req);
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
         sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
         sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -2000,7 +1993,7 @@
 		erp_action->fsf_req = NULL;
 	}
 out:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return retval;
 }
 
@@ -2028,7 +2021,7 @@
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		switch (req->qtcb->header.fsf_status_qual.word[0]) {
 		case FSF_SQ_INVOKE_LINK_TEST_PROCEDURE:
-			zfcp_test_link(unit->port);
+			zfcp_fc_test_link(unit->port);
 			/* fall through */
 		case FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED:
 			req->status |= ZFCP_STATUS_FSFREQ_ERROR;
@@ -2049,22 +2042,24 @@
 int zfcp_fsf_close_unit(struct zfcp_erp_action *erp_action)
 {
 	struct qdio_buffer_element *sbale;
-	struct zfcp_adapter *adapter = erp_action->adapter;
+	struct zfcp_qdio *qdio = erp_action->adapter->qdio;
 	struct zfcp_fsf_req *req;
 	int retval = -EIO;
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out;
-	req = zfcp_fsf_req_create(adapter, FSF_QTCB_CLOSE_LUN,
-				  ZFCP_REQ_AUTO_CLEANUP,
-				  adapter->pool.fsf_req_erp);
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_CLOSE_LUN,
+				  qdio->adapter->pool.erp_req);
+
 	if (IS_ERR(req)) {
 		retval = PTR_ERR(req);
 		goto out;
 	}
 
-	sbale = zfcp_qdio_sbale_req(req);
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
+	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_READ;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -2082,7 +2077,7 @@
 		erp_action->fsf_req = NULL;
 	}
 out:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return retval;
 }
 
@@ -2141,8 +2136,8 @@
 	}
 	if (fsf_req->status & ZFCP_STATUS_FSFREQ_ERROR)
 		trace.flags |= ZFCP_BLK_REQ_ERROR;
-	trace.inb_usage = fsf_req->qdio_inb_usage;
-	trace.outb_usage = fsf_req->qdio_outb_usage;
+	trace.inb_usage = fsf_req->queue_req.qdio_inb_usage;
+	trace.outb_usage = fsf_req->queue_req.qdio_outb_usage;
 
 	blk_add_driver_data(req->q, req, &trace, sizeof(trace));
 }
@@ -2215,11 +2210,11 @@
 	}
 skip_fsfstatus:
 	if (scpnt->result != 0)
-		zfcp_scsi_dbf_event_result("erro", 3, req->adapter, scpnt, req);
+		zfcp_dbf_scsi_result("erro", 3, req->adapter->dbf, scpnt, req);
 	else if (scpnt->retries > 0)
-		zfcp_scsi_dbf_event_result("retr", 4, req->adapter, scpnt, req);
+		zfcp_dbf_scsi_result("retr", 4, req->adapter->dbf, scpnt, req);
 	else
-		zfcp_scsi_dbf_event_result("norm", 6, req->adapter, scpnt, req);
+		zfcp_dbf_scsi_result("norm", 6, req->adapter->dbf, scpnt, req);
 
 	scpnt->host_scribble = NULL;
 	(scpnt->scsi_done) (scpnt);
@@ -2309,7 +2304,7 @@
 	case FSF_ADAPTER_STATUS_AVAILABLE:
 		if (header->fsf_status_qual.word[0] ==
 		    FSF_SQ_INVOKE_LINK_TEST_PROCEDURE)
-			zfcp_test_link(unit->port);
+			zfcp_fc_test_link(unit->port);
 		req->status |= ZFCP_STATUS_FSFREQ_ERROR;
 		break;
 	}
@@ -2350,24 +2345,27 @@
 	unsigned int sbtype = SBAL_FLAGS0_TYPE_READ;
 	int real_bytes, retval = -EIO;
 	struct zfcp_adapter *adapter = unit->port->adapter;
+	struct zfcp_qdio *qdio = adapter->qdio;
 
 	if (unlikely(!(atomic_read(&unit->status) &
 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
 		return -EBUSY;
 
-	spin_lock(&adapter->req_q_lock);
-	if (atomic_read(&adapter->req_q.count) <= 0) {
-		atomic_inc(&adapter->qdio_outb_full);
+	spin_lock(&qdio->req_q_lock);
+	if (atomic_read(&qdio->req_q.count) <= 0) {
+		atomic_inc(&qdio->req_q_full);
 		goto out;
 	}
-	req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND,
-				  ZFCP_REQ_AUTO_CLEANUP,
-				  adapter->pool.fsf_req_scsi);
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
+				  adapter->pool.scsi_req);
+
 	if (IS_ERR(req)) {
 		retval = PTR_ERR(req);
 		goto out;
 	}
 
+	req->status |= ZFCP_STATUS_FSFREQ_CLEANUP;
 	zfcp_unit_get(unit);
 	req->unit = unit;
 	req->data = scsi_cmnd;
@@ -2419,11 +2417,11 @@
 	req->qtcb->bottom.io.fcp_cmnd_length = sizeof(struct fcp_cmnd_iu) +
 		fcp_cmnd_iu->add_fcp_cdb_length + sizeof(u32);
 
-	real_bytes = zfcp_qdio_sbals_from_sg(req, sbtype,
+	real_bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req, sbtype,
 					     scsi_sglist(scsi_cmnd),
 					     FSF_MAX_SBALS_PER_REQ);
 	if (unlikely(real_bytes < 0)) {
-		if (req->sbal_number >= FSF_MAX_SBALS_PER_REQ) {
+		if (req->queue_req.sbal_number >= FSF_MAX_SBALS_PER_REQ) {
 			dev_err(&adapter->ccw_device->dev,
 				"Oversize data package, unit 0x%016Lx "
 				"on port 0x%016Lx closed\n",
@@ -2448,7 +2446,7 @@
 	zfcp_fsf_req_free(req);
 	scsi_cmnd->host_scribble = NULL;
 out:
-	spin_unlock(&adapter->req_q_lock);
+	spin_unlock(&qdio->req_q_lock);
 	return retval;
 }
 
@@ -2463,17 +2461,19 @@
 	struct qdio_buffer_element *sbale;
 	struct zfcp_fsf_req *req = NULL;
 	struct fcp_cmnd_iu *fcp_cmnd_iu;
-	struct zfcp_adapter *adapter = unit->port->adapter;
+	struct zfcp_qdio *qdio = unit->port->adapter->qdio;
 
 	if (unlikely(!(atomic_read(&unit->status) &
 		       ZFCP_STATUS_COMMON_UNBLOCKED)))
 		return NULL;
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out;
-	req = zfcp_fsf_req_create(adapter, FSF_QTCB_FCP_CMND, 0,
-				  adapter->pool.fsf_req_scsi);
+
+	req = zfcp_fsf_req_create(qdio, FSF_QTCB_FCP_CMND,
+				  qdio->adapter->pool.scsi_req);
+
 	if (IS_ERR(req)) {
 		req = NULL;
 		goto out;
@@ -2489,7 +2489,7 @@
 	req->qtcb->bottom.io.fcp_cmnd_length = 	sizeof(struct fcp_cmnd_iu) +
 						sizeof(u32);
 
-	sbale = zfcp_qdio_sbale_req(req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
 	sbale[0].flags |= SBAL_FLAGS0_TYPE_WRITE;
 	sbale[1].flags |= SBAL_FLAGS_LAST_ENTRY;
 
@@ -2504,7 +2504,7 @@
 	zfcp_fsf_req_free(req);
 	req = NULL;
 out:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 	return req;
 }
 
@@ -2522,6 +2522,7 @@
 					   struct zfcp_fsf_cfdc *fsf_cfdc)
 {
 	struct qdio_buffer_element *sbale;
+	struct zfcp_qdio *qdio = adapter->qdio;
 	struct zfcp_fsf_req *req = NULL;
 	struct fsf_qtcb_bottom_support *bottom;
 	int direction, retval = -EIO, bytes;
@@ -2540,11 +2541,11 @@
 		return ERR_PTR(-EINVAL);
 	}
 
-	spin_lock_bh(&adapter->req_q_lock);
-	if (zfcp_fsf_req_sbal_get(adapter))
+	spin_lock_bh(&qdio->req_q_lock);
+	if (zfcp_fsf_req_sbal_get(qdio))
 		goto out;
 
-	req = zfcp_fsf_req_create(adapter, fsf_cfdc->command, 0, NULL);
+	req = zfcp_fsf_req_create(qdio, fsf_cfdc->command, NULL);
 	if (IS_ERR(req)) {
 		retval = -EPERM;
 		goto out;
@@ -2552,14 +2553,15 @@
 
 	req->handler = zfcp_fsf_control_file_handler;
 
-	sbale = zfcp_qdio_sbale_req(req);
+	sbale = zfcp_qdio_sbale_req(qdio, &req->queue_req);
 	sbale[0].flags |= direction;
 
 	bottom = &req->qtcb->bottom.support;
 	bottom->operation_subtype = FSF_CFDC_OPERATION_SUBTYPE;
 	bottom->option = fsf_cfdc->option;
 
-	bytes = zfcp_qdio_sbals_from_sg(req, direction, fsf_cfdc->sg,
+	bytes = zfcp_qdio_sbals_from_sg(qdio, &req->queue_req,
+					direction, fsf_cfdc->sg,
 					FSF_MAX_SBALS_PER_REQ);
 	if (bytes != ZFCP_CFDC_MAX_SIZE) {
 		zfcp_fsf_req_free(req);
@@ -2569,12 +2571,53 @@
 	zfcp_fsf_start_timer(req, ZFCP_FSF_REQUEST_TIMEOUT);
 	retval = zfcp_fsf_req_send(req);
 out:
-	spin_unlock_bh(&adapter->req_q_lock);
+	spin_unlock_bh(&qdio->req_q_lock);
 
 	if (!retval) {
-		wait_event(req->completion_wq,
-			   req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+		wait_for_completion(&req->completion);
 		return req;
 	}
 	return ERR_PTR(retval);
 }
+
+/**
+ * zfcp_fsf_reqid_check - validate req_id contained in SBAL returned by QDIO
+ * @adapter: pointer to struct zfcp_adapter
+ * @sbal_idx: response queue index of SBAL to be processed
+ */
+void zfcp_fsf_reqid_check(struct zfcp_qdio *qdio, int sbal_idx)
+{
+	struct zfcp_adapter *adapter = qdio->adapter;
+	struct qdio_buffer *sbal = qdio->resp_q.sbal[sbal_idx];
+	struct qdio_buffer_element *sbale;
+	struct zfcp_fsf_req *fsf_req;
+	unsigned long flags, req_id;
+	int idx;
+
+	for (idx = 0; idx < QDIO_MAX_ELEMENTS_PER_BUFFER; idx++) {
+
+		sbale = &sbal->element[idx];
+		req_id = (unsigned long) sbale->addr;
+		spin_lock_irqsave(&adapter->req_list_lock, flags);
+		fsf_req = zfcp_reqlist_find(adapter, req_id);
+
+		if (!fsf_req)
+			/*
+			 * Unknown request means that we have potentially memory
+			 * corruption and must stop the machine immediately.
+			 */
+			panic("error: unknown req_id (%lx) on adapter %s.\n",
+			      req_id, dev_name(&adapter->ccw_device->dev));
+
+		list_del(&fsf_req->list);
+		spin_unlock_irqrestore(&adapter->req_list_lock, flags);
+
+		fsf_req->queue_req.sbal_response = sbal_idx;
+		fsf_req->queue_req.qdio_inb_usage =
+			atomic_read(&qdio->resp_q.count);
+		zfcp_fsf_req_complete(fsf_req);
+
+		if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
+			break;
+	}
+}
diff --git a/drivers/s390/scsi/zfcp_fsf.h b/drivers/s390/scsi/zfcp_fsf.h
index df7f232..dcc7c1d 100644
--- a/drivers/s390/scsi/zfcp_fsf.h
+++ b/drivers/s390/scsi/zfcp_fsf.h
@@ -3,13 +3,14 @@
  *
  * Interface to the FSF support functions.
  *
- * Copyright IBM Corporation 2002, 2008
+ * Copyright IBM Corporation 2002, 2009
  */
 
 #ifndef FSF_H
 #define FSF_H
 
 #include <linux/pfn.h>
+#include <linux/scatterlist.h>
 
 #define FSF_QTCB_CURRENT_VERSION		0x00000001
 
diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c
index e0a2153..6c5228b 100644
--- a/drivers/s390/scsi/zfcp_qdio.c
+++ b/drivers/s390/scsi/zfcp_qdio.c
@@ -3,7 +3,7 @@
  *
  * Setup and helper functions to access QDIO.
  *
- * Copyright IBM Corporation 2002, 2008
+ * Copyright IBM Corporation 2002, 2009
  */
 
 #define KMSG_COMPONENT "zfcp"
@@ -34,29 +34,10 @@
 	return &q->sbal[sbal_idx]->element[sbale_idx];
 }
 
-/**
- * zfcp_qdio_free - free memory used by request- and resposne queue
- * @adapter: pointer to the zfcp_adapter structure
- */
-void zfcp_qdio_free(struct zfcp_adapter *adapter)
+static void zfcp_qdio_handler_error(struct zfcp_qdio *qdio, char *id)
 {
-	struct qdio_buffer **sbal_req, **sbal_resp;
-	int p;
+	struct zfcp_adapter *adapter = qdio->adapter;
 
-	if (adapter->ccw_device)
-		qdio_free(adapter->ccw_device);
-
-	sbal_req = adapter->req_q.sbal;
-	sbal_resp = adapter->resp_q.sbal;
-
-	for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
-		free_page((unsigned long) sbal_req[p]);
-		free_page((unsigned long) sbal_resp[p]);
-	}
-}
-
-static void zfcp_qdio_handler_error(struct zfcp_adapter *adapter, char *id)
-{
 	dev_warn(&adapter->ccw_device->dev, "A QDIO problem occurred\n");
 
 	zfcp_erp_adapter_reopen(adapter,
@@ -75,72 +56,47 @@
 }
 
 /* this needs to be called prior to updating the queue fill level */
-static void zfcp_qdio_account(struct zfcp_adapter *adapter)
+static inline void zfcp_qdio_account(struct zfcp_qdio *qdio)
 {
-	ktime_t now;
-	s64 span;
+	unsigned long long now, span;
 	int free, used;
 
-	spin_lock(&adapter->qdio_stat_lock);
-	now = ktime_get();
-	span = ktime_us_delta(now, adapter->req_q_time);
-	free = max(0, atomic_read(&adapter->req_q.count));
+	spin_lock(&qdio->stat_lock);
+	now = get_clock_monotonic();
+	span = (now - qdio->req_q_time) >> 12;
+	free = atomic_read(&qdio->req_q.count);
 	used = QDIO_MAX_BUFFERS_PER_Q - free;
-	adapter->req_q_util += used * span;
-	adapter->req_q_time = now;
-	spin_unlock(&adapter->qdio_stat_lock);
+	qdio->req_q_util += used * span;
+	qdio->req_q_time = now;
+	spin_unlock(&qdio->stat_lock);
 }
 
 static void zfcp_qdio_int_req(struct ccw_device *cdev, unsigned int qdio_err,
 			      int queue_no, int first, int count,
 			      unsigned long parm)
 {
-	struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
-	struct zfcp_qdio_queue *queue = &adapter->req_q;
+	struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
+	struct zfcp_qdio_queue *queue = &qdio->req_q;
 
 	if (unlikely(qdio_err)) {
-		zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
-		zfcp_qdio_handler_error(adapter, "qdireq1");
+		zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first,
+					count);
+		zfcp_qdio_handler_error(qdio, "qdireq1");
 		return;
 	}
 
 	/* cleanup all SBALs being program-owned now */
 	zfcp_qdio_zero_sbals(queue->sbal, first, count);
 
-	zfcp_qdio_account(adapter);
+	zfcp_qdio_account(qdio);
 	atomic_add(count, &queue->count);
-	wake_up(&adapter->request_wq);
+	wake_up(&qdio->req_q_wq);
 }
 
-static void zfcp_qdio_reqid_check(struct zfcp_adapter *adapter,
-				  unsigned long req_id, int sbal_idx)
+static void zfcp_qdio_resp_put_back(struct zfcp_qdio *qdio, int processed)
 {
-	struct zfcp_fsf_req *fsf_req;
-	unsigned long flags;
-
-	spin_lock_irqsave(&adapter->req_list_lock, flags);
-	fsf_req = zfcp_reqlist_find(adapter, req_id);
-
-	if (!fsf_req)
-		/*
-		 * Unknown request means that we have potentially memory
-		 * corruption and must stop the machine immediatly.
-		 */
-		panic("error: unknown request id (%lx) on adapter %s.\n",
-		      req_id, dev_name(&adapter->ccw_device->dev));
-
-	zfcp_reqlist_remove(adapter, fsf_req);
-	spin_unlock_irqrestore(&adapter->req_list_lock, flags);
-
-	fsf_req->sbal_response = sbal_idx;
-	fsf_req->qdio_inb_usage = atomic_read(&adapter->resp_q.count);
-	zfcp_fsf_req_complete(fsf_req);
-}
-
-static void zfcp_qdio_resp_put_back(struct zfcp_adapter *adapter, int processed)
-{
-	struct zfcp_qdio_queue *queue = &adapter->resp_q;
-	struct ccw_device *cdev = adapter->ccw_device;
+	struct zfcp_qdio_queue *queue = &qdio->resp_q;
+	struct ccw_device *cdev = qdio->adapter->ccw_device;
 	u8 count, start = queue->first;
 	unsigned int retval;
 
@@ -162,14 +118,13 @@
 			       int queue_no, int first, int count,
 			       unsigned long parm)
 {
-	struct zfcp_adapter *adapter = (struct zfcp_adapter *) parm;
-	struct zfcp_qdio_queue *queue = &adapter->resp_q;
-	struct qdio_buffer_element *sbale;
-	int sbal_idx, sbale_idx, sbal_no;
+	struct zfcp_qdio *qdio = (struct zfcp_qdio *) parm;
+	int sbal_idx, sbal_no;
 
 	if (unlikely(qdio_err)) {
-		zfcp_hba_dbf_event_qdio(adapter, qdio_err, first, count);
-		zfcp_qdio_handler_error(adapter, "qdires1");
+		zfcp_dbf_hba_qdio(qdio->adapter->dbf, qdio_err, first,
+					count);
+		zfcp_qdio_handler_error(qdio, "qdires1");
 		return;
 	}
 
@@ -179,39 +134,27 @@
 	 */
 	for (sbal_no = 0; sbal_no < count; sbal_no++) {
 		sbal_idx = (first + sbal_no) % QDIO_MAX_BUFFERS_PER_Q;
-
 		/* go through all SBALEs of SBAL */
-		for (sbale_idx = 0; sbale_idx < QDIO_MAX_ELEMENTS_PER_BUFFER;
-		     sbale_idx++) {
-			sbale = zfcp_qdio_sbale(queue, sbal_idx, sbale_idx);
-			zfcp_qdio_reqid_check(adapter,
-					      (unsigned long) sbale->addr,
-					      sbal_idx);
-			if (likely(sbale->flags & SBAL_FLAGS_LAST_ENTRY))
-				break;
-		};
-
-		if (unlikely(!(sbale->flags & SBAL_FLAGS_LAST_ENTRY)))
-			dev_warn(&adapter->ccw_device->dev,
-				 "A QDIO protocol error occurred, "
-				 "operations continue\n");
+		zfcp_fsf_reqid_check(qdio, sbal_idx);
 	}
 
 	/*
 	 * put range of SBALs back to response queue
 	 * (including SBALs which have already been free before)
 	 */
-	zfcp_qdio_resp_put_back(adapter, count);
+	zfcp_qdio_resp_put_back(qdio, count);
 }
 
 /**
  * zfcp_qdio_sbale_req - return ptr to SBALE of req_q for a struct zfcp_fsf_req
- * @fsf_req: pointer to struct fsf_req
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_rec: pointer to struct zfcp_queue_rec
  * Returns: pointer to qdio_buffer_element (SBALE) structure
  */
-struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_fsf_req *req)
+struct qdio_buffer_element *zfcp_qdio_sbale_req(struct zfcp_qdio *qdio,
+						struct zfcp_queue_req *q_req)
 {
-	return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last, 0);
+	return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last, 0);
 }
 
 /**
@@ -219,74 +162,80 @@
  * @fsf_req: pointer to struct fsf_req
  * Returns: pointer to qdio_buffer_element (SBALE) structure
  */
-struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_fsf_req *req)
+struct qdio_buffer_element *zfcp_qdio_sbale_curr(struct zfcp_qdio *qdio,
+						 struct zfcp_queue_req *q_req)
 {
-	return zfcp_qdio_sbale(&req->adapter->req_q, req->sbal_last,
-			       req->sbale_curr);
+	return zfcp_qdio_sbale(&qdio->req_q, q_req->sbal_last,
+			       q_req->sbale_curr);
 }
 
-static void zfcp_qdio_sbal_limit(struct zfcp_fsf_req *fsf_req, int max_sbals)
+static void zfcp_qdio_sbal_limit(struct zfcp_qdio *qdio,
+				 struct zfcp_queue_req *q_req, int max_sbals)
 {
-	int count = atomic_read(&fsf_req->adapter->req_q.count);
+	int count = atomic_read(&qdio->req_q.count);
 	count = min(count, max_sbals);
-	fsf_req->sbal_limit = (fsf_req->sbal_first + count - 1)
+	q_req->sbal_limit = (q_req->sbal_first + count - 1)
 					% QDIO_MAX_BUFFERS_PER_Q;
 }
 
 static struct qdio_buffer_element *
-zfcp_qdio_sbal_chain(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
+zfcp_qdio_sbal_chain(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
+		     unsigned long sbtype)
 {
 	struct qdio_buffer_element *sbale;
 
 	/* set last entry flag in current SBALE of current SBAL */
-	sbale = zfcp_qdio_sbale_curr(fsf_req);
+	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
 	sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
 
 	/* don't exceed last allowed SBAL */
-	if (fsf_req->sbal_last == fsf_req->sbal_limit)
+	if (q_req->sbal_last == q_req->sbal_limit)
 		return NULL;
 
 	/* set chaining flag in first SBALE of current SBAL */
-	sbale = zfcp_qdio_sbale_req(fsf_req);
+	sbale = zfcp_qdio_sbale_req(qdio, q_req);
 	sbale->flags |= SBAL_FLAGS0_MORE_SBALS;
 
 	/* calculate index of next SBAL */
-	fsf_req->sbal_last++;
-	fsf_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
+	q_req->sbal_last++;
+	q_req->sbal_last %= QDIO_MAX_BUFFERS_PER_Q;
 
 	/* keep this requests number of SBALs up-to-date */
-	fsf_req->sbal_number++;
+	q_req->sbal_number++;
 
 	/* start at first SBALE of new SBAL */
-	fsf_req->sbale_curr = 0;
+	q_req->sbale_curr = 0;
 
 	/* set storage-block type for new SBAL */
-	sbale = zfcp_qdio_sbale_curr(fsf_req);
+	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
 	sbale->flags |= sbtype;
 
 	return sbale;
 }
 
 static struct qdio_buffer_element *
-zfcp_qdio_sbale_next(struct zfcp_fsf_req *fsf_req, unsigned long sbtype)
+zfcp_qdio_sbale_next(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req,
+		     unsigned int sbtype)
 {
-	if (fsf_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
-		return zfcp_qdio_sbal_chain(fsf_req, sbtype);
-	fsf_req->sbale_curr++;
-	return zfcp_qdio_sbale_curr(fsf_req);
+	if (q_req->sbale_curr == ZFCP_LAST_SBALE_PER_SBAL)
+		return zfcp_qdio_sbal_chain(qdio, q_req, sbtype);
+	q_req->sbale_curr++;
+	return zfcp_qdio_sbale_curr(qdio, q_req);
 }
 
-static void zfcp_qdio_undo_sbals(struct zfcp_fsf_req *fsf_req)
+static void zfcp_qdio_undo_sbals(struct zfcp_qdio *qdio,
+				 struct zfcp_queue_req *q_req)
 {
-	struct qdio_buffer **sbal = fsf_req->adapter->req_q.sbal;
-	int first = fsf_req->sbal_first;
-	int last = fsf_req->sbal_last;
+	struct qdio_buffer **sbal = qdio->req_q.sbal;
+	int first = q_req->sbal_first;
+	int last = q_req->sbal_last;
 	int count = (last - first + QDIO_MAX_BUFFERS_PER_Q) %
 		QDIO_MAX_BUFFERS_PER_Q + 1;
 	zfcp_qdio_zero_sbals(sbal, first, count);
 }
 
-static int zfcp_qdio_fill_sbals(struct zfcp_fsf_req *fsf_req,
+static int zfcp_qdio_fill_sbals(struct zfcp_qdio *qdio,
+				struct zfcp_queue_req *q_req,
 				unsigned int sbtype, void *start_addr,
 				unsigned int total_length)
 {
@@ -297,10 +246,10 @@
 	/* split segment up */
 	for (addr = start_addr, remaining = total_length; remaining > 0;
 	     addr += length, remaining -= length) {
-		sbale = zfcp_qdio_sbale_next(fsf_req, sbtype);
+		sbale = zfcp_qdio_sbale_next(qdio, q_req, sbtype);
 		if (!sbale) {
-			atomic_inc(&fsf_req->adapter->qdio_outb_full);
-			zfcp_qdio_undo_sbals(fsf_req);
+			atomic_inc(&qdio->req_q_full);
+			zfcp_qdio_undo_sbals(qdio, q_req);
 			return -EINVAL;
 		}
 
@@ -322,29 +271,31 @@
  * @max_sbals: upper bound for number of SBALs to be used
  * Returns: number of bytes, or error (negativ)
  */
-int zfcp_qdio_sbals_from_sg(struct zfcp_fsf_req *fsf_req, unsigned long sbtype,
-			    struct scatterlist *sg, int max_sbals)
+int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio,
+			    struct zfcp_queue_req *q_req,
+			    unsigned long sbtype, struct scatterlist *sg,
+			    int max_sbals)
 {
 	struct qdio_buffer_element *sbale;
 	int retval, bytes = 0;
 
 	/* figure out last allowed SBAL */
-	zfcp_qdio_sbal_limit(fsf_req, max_sbals);
+	zfcp_qdio_sbal_limit(qdio, q_req, max_sbals);
 
 	/* set storage-block type for this request */
-	sbale = zfcp_qdio_sbale_req(fsf_req);
+	sbale = zfcp_qdio_sbale_req(qdio, q_req);
 	sbale->flags |= sbtype;
 
 	for (; sg; sg = sg_next(sg)) {
-		retval = zfcp_qdio_fill_sbals(fsf_req, sbtype, sg_virt(sg),
-					      sg->length);
+		retval = zfcp_qdio_fill_sbals(qdio, q_req, sbtype,
+					      sg_virt(sg), sg->length);
 		if (retval < 0)
 			return retval;
 		bytes += sg->length;
 	}
 
 	/* assume that no other SBALEs are to follow in the same SBAL */
-	sbale = zfcp_qdio_sbale_curr(fsf_req);
+	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
 	sbale->flags |= SBAL_FLAGS_LAST_ENTRY;
 
 	return bytes;
@@ -352,21 +303,22 @@
 
 /**
  * zfcp_qdio_send - set PCI flag in first SBALE and send req to QDIO
- * @fsf_req: pointer to struct zfcp_fsf_req
+ * @qdio: pointer to struct zfcp_qdio
+ * @q_req: pointer to struct zfcp_queue_req
  * Returns: 0 on success, error otherwise
  */
-int zfcp_qdio_send(struct zfcp_fsf_req *fsf_req)
+int zfcp_qdio_send(struct zfcp_qdio *qdio, struct zfcp_queue_req *q_req)
 {
-	struct zfcp_adapter *adapter = fsf_req->adapter;
-	struct zfcp_qdio_queue *req_q = &adapter->req_q;
-	int first = fsf_req->sbal_first;
-	int count = fsf_req->sbal_number;
+	struct zfcp_qdio_queue *req_q = &qdio->req_q;
+	int first = q_req->sbal_first;
+	int count = q_req->sbal_number;
 	int retval;
 	unsigned int qdio_flags = QDIO_FLAG_SYNC_OUTPUT;
 
-	zfcp_qdio_account(adapter);
+	zfcp_qdio_account(qdio);
 
-	retval = do_QDIO(adapter->ccw_device, qdio_flags, 0, first, count);
+	retval = do_QDIO(qdio->adapter->ccw_device, qdio_flags, 0, first,
+			 count);
 	if (unlikely(retval)) {
 		zfcp_qdio_zero_sbals(req_q->sbal, first, count);
 		return retval;
@@ -379,63 +331,69 @@
 	return 0;
 }
 
+
+static void zfcp_qdio_setup_init_data(struct qdio_initialize *id,
+				      struct zfcp_qdio *qdio)
+{
+
+	id->cdev = qdio->adapter->ccw_device;
+	id->q_format = QDIO_ZFCP_QFMT;
+	memcpy(id->adapter_name, dev_name(&id->cdev->dev), 8);
+	ASCEBC(id->adapter_name, 8);
+	id->qib_param_field_format = 0;
+	id->qib_param_field = NULL;
+	id->input_slib_elements = NULL;
+	id->output_slib_elements = NULL;
+	id->no_input_qs = 1;
+	id->no_output_qs = 1;
+	id->input_handler = zfcp_qdio_int_resp;
+	id->output_handler = zfcp_qdio_int_req;
+	id->int_parm = (unsigned long) qdio;
+	id->flags = QDIO_INBOUND_0COPY_SBALS |
+		    QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
+	id->input_sbal_addr_array = (void **) (qdio->resp_q.sbal);
+	id->output_sbal_addr_array = (void **) (qdio->req_q.sbal);
+
+}
 /**
  * zfcp_qdio_allocate - allocate queue memory and initialize QDIO data
  * @adapter: pointer to struct zfcp_adapter
  * Returns: -ENOMEM on memory allocation error or return value from
  *          qdio_allocate
  */
-int zfcp_qdio_allocate(struct zfcp_adapter *adapter)
+static int zfcp_qdio_allocate(struct zfcp_qdio *qdio)
 {
-	struct qdio_initialize *init_data;
+	struct qdio_initialize init_data;
 
-	if (zfcp_qdio_buffers_enqueue(adapter->req_q.sbal) ||
-		   zfcp_qdio_buffers_enqueue(adapter->resp_q.sbal))
+	if (zfcp_qdio_buffers_enqueue(qdio->req_q.sbal) ||
+	    zfcp_qdio_buffers_enqueue(qdio->resp_q.sbal))
 		return -ENOMEM;
 
-	init_data = &adapter->qdio_init_data;
+	zfcp_qdio_setup_init_data(&init_data, qdio);
 
-	init_data->cdev = adapter->ccw_device;
-	init_data->q_format = QDIO_ZFCP_QFMT;
-	memcpy(init_data->adapter_name, dev_name(&adapter->ccw_device->dev), 8);
-	ASCEBC(init_data->adapter_name, 8);
-	init_data->qib_param_field_format = 0;
-	init_data->qib_param_field = NULL;
-	init_data->input_slib_elements = NULL;
-	init_data->output_slib_elements = NULL;
-	init_data->no_input_qs = 1;
-	init_data->no_output_qs = 1;
-	init_data->input_handler = zfcp_qdio_int_resp;
-	init_data->output_handler = zfcp_qdio_int_req;
-	init_data->int_parm = (unsigned long) adapter;
-	init_data->flags = QDIO_INBOUND_0COPY_SBALS |
-			QDIO_OUTBOUND_0COPY_SBALS | QDIO_USE_OUTBOUND_PCIS;
-	init_data->input_sbal_addr_array =
-			(void **) (adapter->resp_q.sbal);
-	init_data->output_sbal_addr_array =
-			(void **) (adapter->req_q.sbal);
-
-	return qdio_allocate(init_data);
+	return qdio_allocate(&init_data);
 }
 
 /**
  * zfcp_close_qdio - close qdio queues for an adapter
+ * @qdio: pointer to structure zfcp_qdio
  */
-void zfcp_qdio_close(struct zfcp_adapter *adapter)
+void zfcp_qdio_close(struct zfcp_qdio *qdio)
 {
 	struct zfcp_qdio_queue *req_q;
 	int first, count;
 
-	if (!(atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
+	if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP))
 		return;
 
 	/* clear QDIOUP flag, thus do_QDIO is not called during qdio_shutdown */
-	req_q = &adapter->req_q;
-	spin_lock_bh(&adapter->req_q_lock);
-	atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &adapter->status);
-	spin_unlock_bh(&adapter->req_q_lock);
+	req_q = &qdio->req_q;
+	spin_lock_bh(&qdio->req_q_lock);
+	atomic_clear_mask(ZFCP_STATUS_ADAPTER_QDIOUP, &qdio->adapter->status);
+	spin_unlock_bh(&qdio->req_q_lock);
 
-	qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
+	qdio_shutdown(qdio->adapter->ccw_device,
+		      QDIO_FLAG_CLEANUP_USING_CLEAR);
 
 	/* cleanup used outbound sbals */
 	count = atomic_read(&req_q->count);
@@ -446,50 +404,99 @@
 	}
 	req_q->first = 0;
 	atomic_set(&req_q->count, 0);
-	adapter->resp_q.first = 0;
-	atomic_set(&adapter->resp_q.count, 0);
+	qdio->resp_q.first = 0;
+	atomic_set(&qdio->resp_q.count, 0);
 }
 
 /**
  * zfcp_qdio_open - prepare and initialize response queue
- * @adapter: pointer to struct zfcp_adapter
+ * @qdio: pointer to struct zfcp_qdio
  * Returns: 0 on success, otherwise -EIO
  */
-int zfcp_qdio_open(struct zfcp_adapter *adapter)
+int zfcp_qdio_open(struct zfcp_qdio *qdio)
 {
 	struct qdio_buffer_element *sbale;
+	struct qdio_initialize init_data;
+	struct ccw_device *cdev = qdio->adapter->ccw_device;
 	int cc;
 
-	if (atomic_read(&adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
+	if (atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)
 		return -EIO;
 
-	if (qdio_establish(&adapter->qdio_init_data))
+	zfcp_qdio_setup_init_data(&init_data, qdio);
+
+	if (qdio_establish(&init_data))
 		goto failed_establish;
 
-	if (qdio_activate(adapter->ccw_device))
+	if (qdio_activate(cdev))
 		goto failed_qdio;
 
 	for (cc = 0; cc < QDIO_MAX_BUFFERS_PER_Q; cc++) {
-		sbale = &(adapter->resp_q.sbal[cc]->element[0]);
+		sbale = &(qdio->resp_q.sbal[cc]->element[0]);
 		sbale->length = 0;
 		sbale->flags = SBAL_FLAGS_LAST_ENTRY;
 		sbale->addr = NULL;
 	}
 
-	if (do_QDIO(adapter->ccw_device, QDIO_FLAG_SYNC_INPUT, 0, 0,
+	if (do_QDIO(cdev, QDIO_FLAG_SYNC_INPUT, 0, 0,
 		     QDIO_MAX_BUFFERS_PER_Q))
 		goto failed_qdio;
 
 	/* set index of first avalable SBALS / number of available SBALS */
-	adapter->req_q.first = 0;
-	atomic_set(&adapter->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
+	qdio->req_q.first = 0;
+	atomic_set(&qdio->req_q.count, QDIO_MAX_BUFFERS_PER_Q);
 
 	return 0;
 
 failed_qdio:
-	qdio_shutdown(adapter->ccw_device, QDIO_FLAG_CLEANUP_USING_CLEAR);
+	qdio_shutdown(cdev, QDIO_FLAG_CLEANUP_USING_CLEAR);
 failed_establish:
-	dev_err(&adapter->ccw_device->dev,
+	dev_err(&cdev->dev,
 		"Setting up the QDIO connection to the FCP adapter failed\n");
 	return -EIO;
 }
+
+void zfcp_qdio_destroy(struct zfcp_qdio *qdio)
+{
+	struct qdio_buffer **sbal_req, **sbal_resp;
+	int p;
+
+	if (!qdio)
+		return;
+
+	if (qdio->adapter->ccw_device)
+		qdio_free(qdio->adapter->ccw_device);
+
+	sbal_req = qdio->req_q.sbal;
+	sbal_resp = qdio->resp_q.sbal;
+
+	for (p = 0; p < QDIO_MAX_BUFFERS_PER_Q; p += QBUFF_PER_PAGE) {
+		free_page((unsigned long) sbal_req[p]);
+		free_page((unsigned long) sbal_resp[p]);
+	}
+
+	kfree(qdio);
+}
+
+int zfcp_qdio_setup(struct zfcp_adapter *adapter)
+{
+	struct zfcp_qdio *qdio;
+
+	qdio = kzalloc(sizeof(struct zfcp_qdio), GFP_KERNEL);
+	if (!qdio)
+		return -ENOMEM;
+
+	qdio->adapter = adapter;
+
+	if (zfcp_qdio_allocate(qdio)) {
+		zfcp_qdio_destroy(qdio);
+		return -ENOMEM;
+	}
+
+	spin_lock_init(&qdio->req_q_lock);
+	spin_lock_init(&qdio->stat_lock);
+
+	adapter->qdio = qdio;
+	return 0;
+}
+
diff --git a/drivers/s390/scsi/zfcp_scsi.c b/drivers/s390/scsi/zfcp_scsi.c
index 6925a17..3ff726a 100644
--- a/drivers/s390/scsi/zfcp_scsi.c
+++ b/drivers/s390/scsi/zfcp_scsi.c
@@ -9,8 +9,9 @@
 #define KMSG_COMPONENT "zfcp"
 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
 
-#include "zfcp_ext.h"
 #include <asm/atomic.h>
+#include "zfcp_ext.h"
+#include "zfcp_dbf.h"
 
 static unsigned int default_depth = 32;
 module_param_named(queue_depth, default_depth, uint, 0600);
@@ -52,11 +53,11 @@
 
 static void zfcp_scsi_command_fail(struct scsi_cmnd *scpnt, int result)
 {
+	struct zfcp_adapter *adapter =
+		(struct zfcp_adapter *) scpnt->device->host->hostdata[0];
 	set_host_byte(scpnt, result);
 	if ((scpnt->device != NULL) && (scpnt->device->host != NULL))
-		zfcp_scsi_dbf_event_result("fail", 4,
-			(struct zfcp_adapter*) scpnt->device->host->hostdata[0],
-			scpnt, NULL);
+		zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
 	/* return directly */
 	scpnt->scsi_done(scpnt);
 }
@@ -92,7 +93,7 @@
 	scsi_result = fc_remote_port_chkready(rport);
 	if (unlikely(scsi_result)) {
 		scpnt->result = scsi_result;
-		zfcp_scsi_dbf_event_result("fail", 4, adapter, scpnt, NULL);
+		zfcp_dbf_scsi_result("fail", 4, adapter->dbf, scpnt, NULL);
 		scpnt->scsi_done(scpnt);
 		return 0;
 	}
@@ -180,8 +181,8 @@
 	spin_unlock(&adapter->req_list_lock);
 	if (!old_req) {
 		write_unlock_irqrestore(&adapter->abort_lock, flags);
-		zfcp_scsi_dbf_event_abort("lte1", adapter, scpnt, NULL,
-					  old_reqid);
+		zfcp_dbf_scsi_abort("lte1", adapter->dbf, scpnt, NULL,
+				    old_reqid);
 		return FAILED; /* completion could be in progress */
 	}
 	old_req->data = NULL;
@@ -197,16 +198,15 @@
 		zfcp_erp_wait(adapter);
 		if (!(atomic_read(&adapter->status) &
 		      ZFCP_STATUS_COMMON_RUNNING)) {
-			zfcp_scsi_dbf_event_abort("nres", adapter, scpnt, NULL,
-						  old_reqid);
+			zfcp_dbf_scsi_abort("nres", adapter->dbf, scpnt, NULL,
+					    old_reqid);
 			return SUCCESS;
 		}
 	}
 	if (!abrt_req)
 		return FAILED;
 
-	wait_event(abrt_req->completion_wq,
-		   abrt_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+	wait_for_completion(&abrt_req->completion);
 
 	if (abrt_req->status & ZFCP_STATUS_FSFREQ_ABORTSUCCEEDED)
 		dbf_tag = "okay";
@@ -216,7 +216,7 @@
 		dbf_tag = "fail";
 		retval = FAILED;
 	}
-	zfcp_scsi_dbf_event_abort(dbf_tag, adapter, scpnt, abrt_req, old_reqid);
+	zfcp_dbf_scsi_abort(dbf_tag, adapter->dbf, scpnt, abrt_req, old_reqid);
 	zfcp_fsf_req_free(abrt_req);
 	return retval;
 }
@@ -225,7 +225,7 @@
 {
 	struct zfcp_unit *unit = scpnt->device->hostdata;
 	struct zfcp_adapter *adapter = unit->port->adapter;
-	struct zfcp_fsf_req *fsf_req;
+	struct zfcp_fsf_req *fsf_req = NULL;
 	int retval = SUCCESS;
 	int retry = 3;
 
@@ -237,25 +237,23 @@
 		zfcp_erp_wait(adapter);
 		if (!(atomic_read(&adapter->status) &
 		      ZFCP_STATUS_COMMON_RUNNING)) {
-			zfcp_scsi_dbf_event_devreset("nres", tm_flags, unit,
-						     scpnt);
+			zfcp_dbf_scsi_devreset("nres", tm_flags, unit, scpnt);
 			return SUCCESS;
 		}
 	}
 	if (!fsf_req)
 		return FAILED;
 
-	wait_event(fsf_req->completion_wq,
-		   fsf_req->status & ZFCP_STATUS_FSFREQ_COMPLETED);
+	wait_for_completion(&fsf_req->completion);
 
 	if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCFAILED) {
-		zfcp_scsi_dbf_event_devreset("fail", tm_flags, unit, scpnt);
+		zfcp_dbf_scsi_devreset("fail", tm_flags, unit, scpnt);
 		retval = FAILED;
 	} else if (fsf_req->status & ZFCP_STATUS_FSFREQ_TMFUNCNOTSUPP) {
-		zfcp_scsi_dbf_event_devreset("nsup", tm_flags, unit, scpnt);
+		zfcp_dbf_scsi_devreset("nsup", tm_flags, unit, scpnt);
 		retval = FAILED;
 	} else
-		zfcp_scsi_dbf_event_devreset("okay", tm_flags, unit, scpnt);
+		zfcp_dbf_scsi_devreset("okay", tm_flags, unit, scpnt);
 
 	zfcp_fsf_req_free(fsf_req);
 	return retval;
@@ -430,7 +428,7 @@
 	if (!data)
 		return NULL;
 
-	ret = zfcp_fsf_exchange_port_data_sync(adapter, data);
+	ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
 	if (ret) {
 		kfree(data);
 		return NULL;
@@ -459,7 +457,7 @@
 	if (!data)
 		return;
 
-	ret = zfcp_fsf_exchange_port_data_sync(adapter, data);
+	ret = zfcp_fsf_exchange_port_data_sync(adapter->qdio, data);
 	if (ret)
 		kfree(data);
 	else {
@@ -493,21 +491,6 @@
 }
 
 /**
- * zfcp_scsi_dev_loss_tmo_callbk - Free any reference to rport
- * @rport: The rport that is about to be deleted.
- */
-static void zfcp_scsi_dev_loss_tmo_callbk(struct fc_rport *rport)
-{
-	struct zfcp_port *port;
-
-	write_lock_irq(&zfcp_data.config_lock);
-	port = rport->dd_data;
-	if (port)
-		port->rport = NULL;
-	write_unlock_irq(&zfcp_data.config_lock);
-}
-
-/**
  * zfcp_scsi_terminate_rport_io - Terminate all I/O on a rport
  * @rport: The FC rport where to teminate I/O
  *
@@ -518,9 +501,12 @@
 static void zfcp_scsi_terminate_rport_io(struct fc_rport *rport)
 {
 	struct zfcp_port *port;
+	struct Scsi_Host *shost = rport_to_shost(rport);
+	struct zfcp_adapter *adapter =
+		(struct zfcp_adapter *)shost->hostdata[0];
 
 	write_lock_irq(&zfcp_data.config_lock);
-	port = rport->dd_data;
+	port = zfcp_get_port_by_wwpn(adapter, rport->port_name);
 	if (port)
 		zfcp_port_get(port);
 	write_unlock_irq(&zfcp_data.config_lock);
@@ -552,7 +538,6 @@
 		return;
 	}
 
-	rport->dd_data = port;
 	rport->maxframe_size = port->maxframe_size;
 	rport->supported_classes = port->supported_classes;
 	port->rport = rport;
@@ -573,7 +558,7 @@
 	zfcp_port_get(port);
 	port->rport_task = RPORT_ADD;
 
-	if (!queue_work(zfcp_data.work_queue, &port->rport_work))
+	if (!queue_work(port->adapter->work_queue, &port->rport_work))
 		zfcp_port_put(port);
 }
 
@@ -582,8 +567,11 @@
 	zfcp_port_get(port);
 	port->rport_task = RPORT_DEL;
 
-	if (!queue_work(zfcp_data.work_queue, &port->rport_work))
-		zfcp_port_put(port);
+	if (port->rport && queue_work(port->adapter->work_queue,
+				      &port->rport_work))
+		return;
+
+	zfcp_port_put(port);
 }
 
 void zfcp_scsi_schedule_rports_block(struct zfcp_adapter *adapter)
@@ -662,7 +650,6 @@
 	.reset_fc_host_stats = zfcp_reset_fc_host_stats,
 	.set_rport_dev_loss_tmo = zfcp_set_rport_dev_loss_tmo,
 	.get_host_port_state = zfcp_get_host_port_state,
-	.dev_loss_tmo_callbk = zfcp_scsi_dev_loss_tmo_callbk,
 	.terminate_rport_io = zfcp_scsi_terminate_rport_io,
 	.show_host_port_state = 1,
 	.bsg_request = zfcp_execute_fc_job,
diff --git a/drivers/s390/scsi/zfcp_sysfs.c b/drivers/s390/scsi/zfcp_sysfs.c
index 0fe5cce..079a8cf 100644
--- a/drivers/s390/scsi/zfcp_sysfs.c
+++ b/drivers/s390/scsi/zfcp_sysfs.c
@@ -88,7 +88,7 @@
 	unsigned long val;						       \
 	int retval = 0;							       \
 									       \
-	down(&zfcp_data.config_sema);					       \
+	mutex_lock(&zfcp_data.config_mutex);				       \
 	if (atomic_read(&_feat->status) & ZFCP_STATUS_COMMON_REMOVE) {	       \
 		retval = -EBUSY;					       \
 		goto out;						       \
@@ -105,7 +105,7 @@
 				  _reopen_id, NULL);			       \
 	zfcp_erp_wait(_adapter);					       \
 out:									       \
-	up(&zfcp_data.config_sema);					       \
+	mutex_unlock(&zfcp_data.config_mutex);				       \
 	return retval ? retval : (ssize_t) count;			       \
 }									       \
 static ZFCP_DEV_ATTR(_feat, failed, S_IWUSR | S_IRUGO,			       \
@@ -126,7 +126,7 @@
 	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE)
 		return -EBUSY;
 
-	ret = zfcp_scan_ports(adapter);
+	ret = zfcp_fc_scan_ports(adapter);
 	return ret ? ret : (ssize_t) count;
 }
 static ZFCP_DEV_ATTR(adapter, port_rescan, S_IWUSR, NULL,
@@ -142,7 +142,7 @@
 	int retval = 0;
 	LIST_HEAD(port_remove_lh);
 
-	down(&zfcp_data.config_sema);
+	mutex_lock(&zfcp_data.config_mutex);
 	if (atomic_read(&adapter->status) & ZFCP_STATUS_COMMON_REMOVE) {
 		retval = -EBUSY;
 		goto out;
@@ -173,7 +173,7 @@
 	zfcp_port_put(port);
 	zfcp_port_dequeue(port);
  out:
-	up(&zfcp_data.config_sema);
+	mutex_unlock(&zfcp_data.config_mutex);
 	return retval ? retval : (ssize_t) count;
 }
 static ZFCP_DEV_ATTR(adapter, port_remove, S_IWUSR, NULL,
@@ -207,7 +207,7 @@
 	u64 fcp_lun;
 	int retval = -EINVAL;
 
-	down(&zfcp_data.config_sema);
+	mutex_lock(&zfcp_data.config_mutex);
 	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
 		retval = -EBUSY;
 		goto out;
@@ -226,7 +226,7 @@
 	zfcp_erp_wait(unit->port->adapter);
 	zfcp_unit_put(unit);
 out:
-	up(&zfcp_data.config_sema);
+	mutex_unlock(&zfcp_data.config_mutex);
 	return retval ? retval : (ssize_t) count;
 }
 static DEVICE_ATTR(unit_add, S_IWUSR, NULL, zfcp_sysfs_unit_add_store);
@@ -241,7 +241,7 @@
 	int retval = 0;
 	LIST_HEAD(unit_remove_lh);
 
-	down(&zfcp_data.config_sema);
+	mutex_lock(&zfcp_data.config_mutex);
 	if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_REMOVE) {
 		retval = -EBUSY;
 		goto out;
@@ -282,7 +282,7 @@
 	zfcp_unit_put(unit);
 	zfcp_unit_dequeue(unit);
 out:
-	up(&zfcp_data.config_sema);
+	mutex_unlock(&zfcp_data.config_mutex);
 	return retval ? retval : (ssize_t) count;
 }
 static DEVICE_ATTR(unit_remove, S_IWUSR, NULL, zfcp_sysfs_unit_remove_store);
@@ -425,7 +425,7 @@
 	if (!qtcb_port)
 		return -ENOMEM;
 
-	retval = zfcp_fsf_exchange_port_data_sync(adapter, qtcb_port);
+	retval = zfcp_fsf_exchange_port_data_sync(adapter->qdio, qtcb_port);
 	if (!retval)
 		retval = sprintf(buf, "%u %u %u\n", qtcb_port->cp_util,
 				 qtcb_port->cb_util, qtcb_port->a_util);
@@ -451,7 +451,7 @@
 	if (!qtcb_config)
 		return -ENOMEM;
 
-	retval = zfcp_fsf_exchange_config_data_sync(adapter, qtcb_config);
+	retval = zfcp_fsf_exchange_config_data_sync(adapter->qdio, qtcb_config);
 	if (!retval)
 		*stat_inf = qtcb_config->stat_info;
 
@@ -492,15 +492,15 @@
 					      char *buf)
 {
 	struct Scsi_Host *scsi_host = class_to_shost(dev);
-	struct zfcp_adapter *adapter =
-		(struct zfcp_adapter *) scsi_host->hostdata[0];
+	struct zfcp_qdio *qdio =
+		((struct zfcp_adapter *) scsi_host->hostdata[0])->qdio;
 	u64 util;
 
-	spin_lock_bh(&adapter->qdio_stat_lock);
-	util = adapter->req_q_util;
-	spin_unlock_bh(&adapter->qdio_stat_lock);
+	spin_lock_bh(&qdio->stat_lock);
+	util = qdio->req_q_util;
+	spin_unlock_bh(&qdio->stat_lock);
 
-	return sprintf(buf, "%d %llu\n", atomic_read(&adapter->qdio_outb_full),
+	return sprintf(buf, "%d %llu\n", atomic_read(&qdio->req_q_full),
 		       (unsigned long long)util);
 }
 static DEVICE_ATTR(queue_full, S_IRUGO, zfcp_sysfs_adapter_q_full_show, NULL);
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 9c23122..82bb3b2 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1811,6 +1811,12 @@
           called zfcp. If you want to compile it as a module, say M here
           and read <file:Documentation/kbuild/modules.txt>.
 
+config SCSI_PMCRAID
+	tristate "PMC SIERRA Linux MaxRAID adapter support"
+	depends on PCI && SCSI
+	---help---
+	  This driver supports the PMC SIERRA MaxRAID adapters.
+
 config SCSI_SRP
 	tristate "SCSI RDMA Protocol helper library"
 	depends on SCSI && PCI
diff --git a/drivers/scsi/Makefile b/drivers/scsi/Makefile
index 25429ea..61a94af 100644
--- a/drivers/scsi/Makefile
+++ b/drivers/scsi/Makefile
@@ -130,6 +130,7 @@
 obj-$(CONFIG_PS3_ROM)		+= ps3rom.o
 obj-$(CONFIG_SCSI_CXGB3_ISCSI)	+= libiscsi.o libiscsi_tcp.o cxgb3i/
 obj-$(CONFIG_SCSI_BNX2_ISCSI)	+= libiscsi.o bnx2i/
+obj-$(CONFIG_SCSI_PMCRAID)	+= pmcraid.o
 
 obj-$(CONFIG_ARM)		+= arm/
 
diff --git a/drivers/scsi/bnx2i/bnx2i_init.c b/drivers/scsi/bnx2i/bnx2i_init.c
index ae4b2d5..0c4210d 100644
--- a/drivers/scsi/bnx2i/bnx2i_init.c
+++ b/drivers/scsi/bnx2i/bnx2i_init.c
@@ -15,11 +15,10 @@
 
 static struct list_head adapter_list = LIST_HEAD_INIT(adapter_list);
 static u32 adapter_count;
-static int bnx2i_reg_device;
 
 #define DRV_MODULE_NAME		"bnx2i"
-#define DRV_MODULE_VERSION	"2.0.1d"
-#define DRV_MODULE_RELDATE	"Mar 25, 2009"
+#define DRV_MODULE_VERSION	"2.0.1e"
+#define DRV_MODULE_RELDATE	"June 22, 2009"
 
 static char version[] __devinitdata =
 		"Broadcom NetXtreme II iSCSI Driver " DRV_MODULE_NAME \
@@ -31,7 +30,7 @@
 MODULE_LICENSE("GPL");
 MODULE_VERSION(DRV_MODULE_VERSION);
 
-static DEFINE_RWLOCK(bnx2i_dev_lock);
+static DEFINE_MUTEX(bnx2i_dev_lock);
 
 unsigned int event_coal_div = 1;
 module_param(event_coal_div, int, 0664);
@@ -100,14 +99,14 @@
 	if (!adapter_count)
 		goto hba_not_found;
 
-	read_lock(&bnx2i_dev_lock);
+	mutex_lock(&bnx2i_dev_lock);
 	list_for_each_entry(tmp_hba, &adapter_list, link) {
 		if (tmp_hba->cnic && tmp_hba->cnic->cm_select_dev) {
 			hba = tmp_hba;
 			break;
 		}
 	}
-	read_unlock(&bnx2i_dev_lock);
+	mutex_unlock(&bnx2i_dev_lock);
 hba_not_found:
 	return hba;
 }
@@ -122,14 +121,14 @@
 {
 	struct bnx2i_hba *hba, *temp;
 
-	read_lock(&bnx2i_dev_lock);
+	mutex_lock(&bnx2i_dev_lock);
 	list_for_each_entry_safe(hba, temp, &adapter_list, link) {
 		if (hba->cnic == cnic) {
-			read_unlock(&bnx2i_dev_lock);
+			mutex_unlock(&bnx2i_dev_lock);
 			return hba;
 		}
 	}
-	read_unlock(&bnx2i_dev_lock);
+	mutex_unlock(&bnx2i_dev_lock);
 	return NULL;
 }
 
@@ -186,18 +185,17 @@
  */
 void bnx2i_register_device(struct bnx2i_hba *hba)
 {
+	int rc;
+
 	if (test_bit(ADAPTER_STATE_GOING_DOWN, &hba->adapter_state) ||
 	    test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
 		return;
 	}
 
-	hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
+	rc = hba->cnic->register_device(hba->cnic, CNIC_ULP_ISCSI, hba);
 
-	spin_lock(&hba->lock);
-	bnx2i_reg_device++;
-	spin_unlock(&hba->lock);
-
-	set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
+	if (!rc)
+		set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
 }
 
 
@@ -211,10 +209,10 @@
 {
 	struct bnx2i_hba *hba, *temp;
 
-	read_lock(&bnx2i_dev_lock);
+	mutex_lock(&bnx2i_dev_lock);
 	list_for_each_entry_safe(hba, temp, &adapter_list, link)
 		bnx2i_register_device(hba);
-	read_unlock(&bnx2i_dev_lock);
+	mutex_unlock(&bnx2i_dev_lock);
 }
 
 
@@ -234,10 +232,6 @@
 
 	hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
 
-	spin_lock(&hba->lock);
-	bnx2i_reg_device--;
-	spin_unlock(&hba->lock);
-
 	/* ep_disconnect could come before NETDEV_DOWN, driver won't
 	 * see NETDEV_DOWN as it already unregistered itself.
 	 */
@@ -255,10 +249,10 @@
 {
 	struct bnx2i_hba *hba, *temp;
 
-	read_lock(&bnx2i_dev_lock);
+	mutex_lock(&bnx2i_dev_lock);
 	list_for_each_entry_safe(hba, temp, &adapter_list, link)
 		bnx2i_unreg_one_device(hba);
-	read_unlock(&bnx2i_dev_lock);
+	mutex_unlock(&bnx2i_dev_lock);
 }
 
 
@@ -267,35 +261,34 @@
  * @hba:	bnx2i adapter instance
  * @cnic:	cnic device handle
  *
- * Global resource lock and host adapter lock is held during critical sections
- *	below. This routine is called from cnic_register_driver() context and
- *	work horse thread which does majority of device specific initialization
+ * Global resource lock is held during critical sections below. This routine is
+ *	called from either cnic_register_driver() or device hot plug context and
+ *	and does majority of device specific initialization
  */
 static int bnx2i_init_one(struct bnx2i_hba *hba, struct cnic_dev *cnic)
 {
 	int rc;
 
-	read_lock(&bnx2i_dev_lock);
-	if (bnx2i_reg_device &&
-	    !test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
-		rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
-		if (rc)		/* duplicate registration */
-			printk(KERN_ERR "bnx2i- dev reg failed\n");
-
-		spin_lock(&hba->lock);
-		bnx2i_reg_device++;
+	mutex_lock(&bnx2i_dev_lock);
+	rc = cnic->register_device(cnic, CNIC_ULP_ISCSI, hba);
+	if (!rc) {
 		hba->age++;
-		spin_unlock(&hba->lock);
-
 		set_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
-	}
-	read_unlock(&bnx2i_dev_lock);
+		list_add_tail(&hba->link, &adapter_list);
+		adapter_count++;
+	} else if (rc == -EBUSY) 	/* duplicate registration */
+		printk(KERN_ALERT "bnx2i, duplicate registration"
+				  "hba=%p, cnic=%p\n", hba, cnic);
+	else if (rc == -EAGAIN)
+		printk(KERN_ERR "bnx2i, driver not registered\n");
+	else if (rc == -EINVAL)
+		printk(KERN_ERR "bnx2i, invalid type %d\n", CNIC_ULP_ISCSI);
+	else
+		printk(KERN_ERR "bnx2i dev reg, unknown error, %d\n", rc);
 
-	write_lock(&bnx2i_dev_lock);
-	list_add_tail(&hba->link, &adapter_list);
-	adapter_count++;
-	write_unlock(&bnx2i_dev_lock);
-	return 0;
+	mutex_unlock(&bnx2i_dev_lock);
+
+	return rc;
 }
 
 
@@ -343,19 +336,15 @@
 				 "found, dev 0x%p\n", dev);
 		return;
 	}
-	write_lock(&bnx2i_dev_lock);
+	mutex_lock(&bnx2i_dev_lock);
 	list_del_init(&hba->link);
 	adapter_count--;
 
 	if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
 		hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
 		clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
-
-		spin_lock(&hba->lock);
-		bnx2i_reg_device--;
-		spin_unlock(&hba->lock);
 	}
-	write_unlock(&bnx2i_dev_lock);
+	mutex_unlock(&bnx2i_dev_lock);
 
 	bnx2i_free_hba(hba);
 }
@@ -377,6 +366,8 @@
 	if (!is_power_of_2(sq_size))
 		sq_size = roundup_pow_of_two(sq_size);
 
+	mutex_init(&bnx2i_dev_lock);
+
 	bnx2i_scsi_xport_template =
 			iscsi_register_transport(&bnx2i_iscsi_transport);
 	if (!bnx2i_scsi_xport_template) {
@@ -412,7 +403,7 @@
 {
 	struct bnx2i_hba *hba;
 
-	write_lock(&bnx2i_dev_lock);
+	mutex_lock(&bnx2i_dev_lock);
 	while (!list_empty(&adapter_list)) {
 		hba = list_entry(adapter_list.next, struct bnx2i_hba, link);
 		list_del(&hba->link);
@@ -421,14 +412,11 @@
 		if (test_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic)) {
 			hba->cnic->unregister_device(hba->cnic, CNIC_ULP_ISCSI);
 			clear_bit(BNX2I_CNIC_REGISTERED, &hba->reg_with_cnic);
-			bnx2i_reg_device--;
 		}
 
-		write_unlock(&bnx2i_dev_lock);
 		bnx2i_free_hba(hba);
-		write_lock(&bnx2i_dev_lock);
 	}
-	write_unlock(&bnx2i_dev_lock);
+	mutex_unlock(&bnx2i_dev_lock);
 
 	iscsi_unregister_transport(&bnx2i_iscsi_transport);
 	cnic_unregister_driver(CNIC_ULP_ISCSI);
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index f741219..9a7ba71 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -387,6 +387,7 @@
 	bnx2i_ep = ep->dd_data;
 	INIT_LIST_HEAD(&bnx2i_ep->link);
 	bnx2i_ep->state = EP_STATE_IDLE;
+	bnx2i_ep->ep_iscsi_cid = (u16) -1;
 	bnx2i_ep->hba = hba;
 	bnx2i_ep->hba_age = hba->age;
 	hba->ofld_conns_active++;
@@ -1160,9 +1161,6 @@
 	struct bnx2i_cmd *cmd = task->dd_data;
 	struct iscsi_cmd *hdr = (struct iscsi_cmd *) task->hdr;
 
-	if (test_bit(ADAPTER_STATE_LINK_DOWN, &hba->adapter_state))
-		return -ENOTCONN;
-
 	if (!bnx2i_conn->is_bound)
 		return -ENOTCONN;
 
@@ -1653,15 +1651,18 @@
 	struct iscsi_endpoint *ep;
 	int rc = 0;
 
-	if (shost)
+	if (shost) {
 		/* driver is given scsi host to work with */
 		hba = iscsi_host_priv(shost);
-	else
+		/* Register the device with cnic if not already done so */
+		bnx2i_register_device(hba);
+	} else
 		/*
 		 * check if the given destination can be reached through
 		 * a iscsi capable NetXtreme2 device
 		 */
 		hba = bnx2i_check_route(dst_addr);
+
 	if (!hba) {
 		rc = -ENOMEM;
 		goto check_busy;
@@ -1681,8 +1682,6 @@
 		goto net_if_down;
 	}
 
-	bnx2i_ep->state = EP_STATE_IDLE;
-	bnx2i_ep->ep_iscsi_cid = (u16) -1;
 	bnx2i_ep->num_active_cmds = 0;
 	iscsi_cid = bnx2i_alloc_iscsi_cid(hba);
 	if (iscsi_cid == -1) {
diff --git a/drivers/scsi/ch.c b/drivers/scsi/ch.c
index 7b1633a..fe11c1d 100644
--- a/drivers/scsi/ch.c
+++ b/drivers/scsi/ch.c
@@ -353,6 +353,12 @@
 	/* look up the devices of the data transfer elements */
 	ch->dt = kmalloc(ch->counts[CHET_DT]*sizeof(struct scsi_device),
 			 GFP_KERNEL);
+
+	if (!ch->dt) {
+		kfree(buffer);
+		return -ENOMEM;
+	}
+
 	for (elem = 0; elem < ch->counts[CHET_DT]; elem++) {
 		id  = -1;
 		lun = 0;
diff --git a/drivers/scsi/constants.c b/drivers/scsi/constants.c
index e79e181..63abb06 100644
--- a/drivers/scsi/constants.c
+++ b/drivers/scsi/constants.c
@@ -4,8 +4,7 @@
  * Additions for SCSI 2 and Linux 2.2.x by D. Gilbert (990422)
  * Additions for SCSI 3+ (SPC-3 T10/1416-D Rev 07 3 May 2002)
  *   by D. Gilbert and aeb (20020609)
- * Additions for SPC-3 T10/1416-D Rev 21 22 Sept 2004, D. Gilbert 20041025
- * Update to SPC-4 T10/1713-D Rev 5a, 14 June 2006, D. Gilbert 20060702
+ * Update to SPC-4 T10/1713-D Rev 20, 22 May 2009, D. Gilbert 20090624
  */
 
 #include <linux/blkdev.h>
@@ -56,9 +55,9 @@
             "Read Buffer", 
 /* 3d-3f */ "Update Block", "Read Long(10)",  "Write Long(10)",
 /* 40-41 */ "Change Definition", "Write Same(10)",
-/* 42-48 */ "Read sub-channel", "Read TOC/PMA/ATIP", "Read density support",
-            "Play audio(10)", "Get configuration", "Play audio msf",
-            "Play audio track/index",
+/* 42-48 */ "Unmap/Read sub-channel", "Read TOC/PMA/ATIP",
+	    "Read density support", "Play audio(10)", "Get configuration",
+	    "Play audio msf", "Play audio track/index",
 /* 49-4f */ "Play track relative(10)", "Get event status notification",
             "Pause/resume", "Log Select", "Log Sense", "Stop play/scan",
             NULL,
@@ -71,12 +70,13 @@
 /* 60-67 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
 /* 68-6f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
 /* 70-77 */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
-/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, NULL, "Variable length",
+/* 78-7f */ NULL, NULL, NULL, NULL, NULL, NULL, "Extended CDB",
+	    "Variable length",
 /* 80-84 */ "Xdwrite(16)", "Rebuild(16)", "Regenerate(16)", "Extended copy",
             "Receive copy results",
 /* 85-89 */ "ATA command pass through(16)", "Access control in",
 	    "Access control out", "Read(16)", "Memory Export Out(16)",
-/* 8a-8f */ "Write(16)", NULL, "Read attributes", "Write attributes",
+/* 8a-8f */ "Write(16)", "ORWrite", "Read attributes", "Write attributes",
             "Write and verify(16)", "Verify(16)",
 /* 90-94 */ "Pre-fetch(16)", "Synchronize cache(16)",
             "Lock/unlock cache(16)", "Write same(16)", NULL,
@@ -107,22 +107,24 @@
 };
 
 static const struct value_name_pair maint_in_arr[] = {
-	{0x5, "Report device identifier"},
+	{0x5, "Report identifying information"},
 	{0xa, "Report target port groups"},
 	{0xb, "Report aliases"},
 	{0xc, "Report supported operation codes"},
 	{0xd, "Report supported task management functions"},
 	{0xe, "Report priority"},
 	{0xf, "Report timestamp"},
+	{0x10, "Management protocol in"},
 };
 #define MAINT_IN_SZ ARRAY_SIZE(maint_in_arr)
 
 static const struct value_name_pair maint_out_arr[] = {
-	{0x6, "Set device identifier"},
+	{0x6, "Set identifying information"},
 	{0xa, "Set target port groups"},
 	{0xb, "Change aliases"},
 	{0xe, "Set priority"},
-	{0xe, "Set timestamp"},
+	{0xf, "Set timestamp"},
+	{0x10, "Management protocol out"},
 };
 #define MAINT_OUT_SZ ARRAY_SIZE(maint_out_arr)
 
@@ -412,6 +414,7 @@
 	{0x0004, "Beginning-of-partition/medium detected"},
 	{0x0005, "End-of-data detected"},
 	{0x0006, "I/O process terminated"},
+	{0x0007, "Programmable early warning detected"},
 	{0x0011, "Audio play operation in progress"},
 	{0x0012, "Audio play operation paused"},
 	{0x0013, "Audio play operation successfully completed"},
@@ -425,6 +428,7 @@
 	{0x001B, "Set capacity operation in progress"},
 	{0x001C, "Verify operation in progress"},
 	{0x001D, "ATA pass through information available"},
+	{0x001E, "Conflicting SA creation request"},
 
 	{0x0100, "No index/sector signal"},
 
@@ -449,9 +453,12 @@
 	{0x040B, "Logical unit not accessible, target port in standby state"},
 	{0x040C, "Logical unit not accessible, target port in unavailable "
 	 "state"},
+	{0x040D, "Logical unit not ready, structure check required"},
 	{0x0410, "Logical unit not ready, auxiliary memory not accessible"},
 	{0x0411, "Logical unit not ready, notify (enable spinup) required"},
 	{0x0412, "Logical unit not ready, offline"},
+	{0x0413, "Logical unit not ready, SA creation in progress"},
+	{0x0414, "Logical unit not ready, space allocation in progress"},
 
 	{0x0500, "Logical unit does not respond to selection"},
 
@@ -479,6 +486,9 @@
 	{0x0B03, "Warning - background self-test failed"},
 	{0x0B04, "Warning - background pre-scan detected medium error"},
 	{0x0B05, "Warning - background medium scan detected medium error"},
+	{0x0B06, "Warning - non-volatile cache now volatile"},
+	{0x0B07, "Warning - degraded power to non-volatile cache"},
+	{0x0B08, "Warning - power loss expected"},
 
 	{0x0C00, "Write error"},
 	{0x0C01, "Write error - recovered with auto reallocation"},
@@ -593,6 +603,7 @@
 	{0x1C02, "Grown defect list not found"},
 
 	{0x1D00, "Miscompare during verify operation"},
+	{0x1D01, "Miscompare verify of unmapped LBA"},
 
 	{0x1E00, "Recovered id with ECC correction"},
 
@@ -626,6 +637,7 @@
 	{0x2405, "Security working key frozen"},
 	{0x2406, "Nonce not unique"},
 	{0x2407, "Nonce timestamp out of range"},
+	{0x2408, "Invalid XCDB"},
 
 	{0x2500, "Logical unit not supported"},
 
@@ -656,10 +668,12 @@
 	{0x2704, "Persistent write protect"},
 	{0x2705, "Permanent write protect"},
 	{0x2706, "Conditional write protect"},
+	{0x2707, "Space allocation failed write protect"},
 
 	{0x2800, "Not ready to ready change, medium may have changed"},
 	{0x2801, "Import or export element accessed"},
 	{0x2802, "Format-layer may have changed"},
+	{0x2803, "Import/export element accessed, medium changed"},
 
 	{0x2900, "Power on, reset, or bus device reset occurred"},
 	{0x2901, "Power on occurred"},
@@ -680,11 +694,16 @@
 	{0x2A07, "Implicit asymmetric access state transition failed"},
 	{0x2A08, "Priority changed"},
 	{0x2A09, "Capacity data has changed"},
+	{0x2A0A, "Error history I_T nexus cleared"},
+	{0x2A0B, "Error history snapshot released"},
+	{0x2A0C, "Error recovery attributes have changed"},
+	{0x2A0D, "Data encryption capabilities changed"},
 	{0x2A10, "Timestamp changed"},
 	{0x2A11, "Data encryption parameters changed by another i_t nexus"},
 	{0x2A12, "Data encryption parameters changed by vendor specific "
 		 "event"},
 	{0x2A13, "Data encryption key instance counter has changed"},
+	{0x2A14, "SA creation capabilities data has changed"},
 
 	{0x2B00, "Copy cannot execute since host cannot disconnect"},
 
@@ -723,6 +742,8 @@
 	{0x300C, "WORM medium - overwrite attempted"},
 	{0x300D, "WORM medium - integrity check"},
 	{0x3010, "Medium not formatted"},
+	{0x3011, "Incompatible volume type"},
+	{0x3012, "Incompatible volume qualifier"},
 
 	{0x3100, "Medium format corrupted"},
 	{0x3101, "Format command failed"},
@@ -782,6 +803,10 @@
 	{0x3B15, "Medium magazine unlocked"},
 	{0x3B16, "Mechanical positioning or changer error"},
 	{0x3B17, "Read past end of user object"},
+	{0x3B18, "Element disabled"},
+	{0x3B19, "Element enabled"},
+	{0x3B1A, "Data transfer device removed"},
+	{0x3B1B, "Data transfer device inserted"},
 
 	{0x3D00, "Invalid bits in identify message"},
 
@@ -882,6 +907,8 @@
 	{0x5506, "Auxiliary memory out of space"},
 	{0x5507, "Quota error"},
 	{0x5508, "Maximum number of supplemental decryption keys exceeded"},
+	{0x5509, "Medium auxiliary memory not accessible"},
+	{0x550A, "Data currently unavailable"},
 
 	{0x5700, "Unable to recover table-of-contents"},
 
@@ -993,6 +1020,12 @@
 	{0x5E02, "Standby condition activated by timer"},
 	{0x5E03, "Idle condition activated by command"},
 	{0x5E04, "Standby condition activated by command"},
+	{0x5E05, "Idle_b condition activated by timer"},
+	{0x5E06, "Idle_b condition activated by command"},
+	{0x5E07, "Idle_c condition activated by timer"},
+	{0x5E08, "Idle_c condition activated by command"},
+	{0x5E09, "Standby_y condition activated by timer"},
+	{0x5E0A, "Standby_y condition activated by command"},
 	{0x5E41, "Power state change to active"},
 	{0x5E42, "Power state change to idle"},
 	{0x5E43, "Power state change to standby"},
@@ -1091,7 +1124,28 @@
 	{0x7403, "Incorrect data encryption key"},
 	{0x7404, "Cryptographic integrity validation failed"},
 	{0x7405, "Error decrypting data"},
+	{0x7406, "Unknown signature verification key"},
+	{0x7407, "Encryption parameters not useable"},
+	{0x7408, "Digital signature validation failure"},
+	{0x7409, "Encryption mode mismatch on read"},
+	{0x740A, "Encrypted block not raw read enabled"},
+	{0x740B, "Incorrect Encryption parameters"},
+	{0x740C, "Unable to decrypt parameter list"},
+	{0x740D, "Encryption algorithm disabled"},
+	{0x7410, "SA creation parameter value invalid"},
+	{0x7411, "SA creation parameter value rejected"},
+	{0x7412, "Invalid SA usage"},
+	{0x7421, "Data Encryption configuration prevented"},
+	{0x7430, "SA creation parameter not supported"},
+	{0x7440, "Authentication failed"},
+	{0x7461, "External data encryption key manager access error"},
+	{0x7462, "External data encryption key manager error"},
+	{0x7463, "External data encryption key not found"},
+	{0x7464, "External data encryption request not authorized"},
+	{0x746E, "External data encryption control timeout"},
+	{0x746F, "External data encryption control error"},
 	{0x7471, "Logical unit access not authorized"},
+	{0x7479, "Security conflict in translated device"},
 
 	{0, NULL}
 };
@@ -1103,12 +1157,12 @@
 
 static const struct error_info2 additional2[] =
 {
-	{0x40,0x00,0x7f,"Ram failure (%x)"},
-	{0x40,0x80,0xff,"Diagnostic failure on component (%x)"},
-	{0x41,0x00,0xff,"Data path failure (%x)"},
-	{0x42,0x00,0xff,"Power-on or self-test failure (%x)"},
-	{0x4D,0x00,0xff,"Tagged overlapped commands (queue tag %x)"},
-	{0x70,0x00,0xff,"Decompression exception short algorithm id of %x"},
+	{0x40, 0x00, 0x7f, "Ram failure (%x)"},
+	{0x40, 0x80, 0xff, "Diagnostic failure on component (%x)"},
+	{0x41, 0x00, 0xff, "Data path failure (%x)"},
+	{0x42, 0x00, 0xff, "Power-on or self-test failure (%x)"},
+	{0x4D, 0x00, 0xff, "Tagged overlapped commands (task tag %x)"},
+	{0x70, 0x00, 0xff, "Decompression exception short algorithm id of %x"},
 	{0, 0, 0, NULL}
 };
 
@@ -1157,14 +1211,15 @@
 	int i;
 	unsigned short code = ((asc << 8) | ascq);
 
-	for (i=0; additional[i].text; i++)
+	for (i = 0; additional[i].text; i++)
 		if (additional[i].code12 == code)
 			return additional[i].text;
-	for (i=0; additional2[i].fmt; i++)
+	for (i = 0; additional2[i].fmt; i++) {
 		if (additional2[i].code1 == asc &&
-		    additional2[i].code2_min >= ascq &&
-		    additional2[i].code2_max <= ascq)
+		    ascq >= additional2[i].code2_min &&
+		    ascq <= additional2[i].code2_max)
 			return additional2[i].fmt;
+	}
 #endif
 	return NULL;
 }
diff --git a/drivers/scsi/device_handler/scsi_dh.c b/drivers/scsi/device_handler/scsi_dh.c
index a518f2e..3ee1cbc 100644
--- a/drivers/scsi/device_handler/scsi_dh.c
+++ b/drivers/scsi/device_handler/scsi_dh.c
@@ -153,12 +153,24 @@
 	if (sdev->scsi_dh_data) {
 		if (sdev->scsi_dh_data->scsi_dh != scsi_dh)
 			err = -EBUSY;
-	} else if (scsi_dh->attach)
+		else
+			kref_get(&sdev->scsi_dh_data->kref);
+	} else if (scsi_dh->attach) {
 		err = scsi_dh->attach(sdev);
-
+		if (!err) {
+			kref_init(&sdev->scsi_dh_data->kref);
+			sdev->scsi_dh_data->sdev = sdev;
+		}
+	}
 	return err;
 }
 
+static void __detach_handler (struct kref *kref)
+{
+	struct scsi_dh_data *scsi_dh_data = container_of(kref, struct scsi_dh_data, kref);
+	scsi_dh_data->scsi_dh->detach(scsi_dh_data->sdev);
+}
+
 /*
  * scsi_dh_handler_detach - Detach a device handler from a device
  * @sdev - SCSI device the device handler should be detached from
@@ -180,7 +192,7 @@
 		scsi_dh = sdev->scsi_dh_data->scsi_dh;
 
 	if (scsi_dh && scsi_dh->detach)
-		scsi_dh->detach(sdev);
+		kref_put(&sdev->scsi_dh_data->kref, __detach_handler);
 }
 
 /*
@@ -440,6 +452,39 @@
 EXPORT_SYMBOL_GPL(scsi_dh_activate);
 
 /*
+ * scsi_dh_set_params - set the parameters for the device as per the
+ *      string specified in params.
+ * @q - Request queue that is associated with the scsi_device for
+ *      which the parameters to be set.
+ * @params - parameters in the following format
+ *      "no_of_params\0param1\0param2\0param3\0...\0"
+ *      for example, string for 2 parameters with value 10 and 21
+ *      is specified as "2\010\021\0".
+ */
+int scsi_dh_set_params(struct request_queue *q, const char *params)
+{
+	int err = -SCSI_DH_NOSYS;
+	unsigned long flags;
+	struct scsi_device *sdev;
+	struct scsi_device_handler *scsi_dh = NULL;
+
+	spin_lock_irqsave(q->queue_lock, flags);
+	sdev = q->queuedata;
+	if (sdev && sdev->scsi_dh_data)
+		scsi_dh = sdev->scsi_dh_data->scsi_dh;
+	if (scsi_dh && scsi_dh->set_params && get_device(&sdev->sdev_gendev))
+		err = 0;
+	spin_unlock_irqrestore(q->queue_lock, flags);
+
+	if (err)
+		return err;
+	err = scsi_dh->set_params(sdev, params);
+	put_device(&sdev->sdev_gendev);
+	return err;
+}
+EXPORT_SYMBOL_GPL(scsi_dh_set_params);
+
+/*
  * scsi_dh_handler_exist - Return TRUE(1) if a device handler exists for
  *	the given name. FALSE(0) otherwise.
  * @name - name of the device handler.
@@ -474,7 +519,6 @@
 
 	if (!err) {
 		err = scsi_dh_handler_attach(sdev, scsi_dh);
-
 		put_device(&sdev->sdev_gendev);
 	}
 	return err;
@@ -505,10 +549,8 @@
 		return;
 
 	if (sdev->scsi_dh_data) {
-		/* if sdev is not on internal list, detach */
 		scsi_dh = sdev->scsi_dh_data->scsi_dh;
-		if (!device_handler_match(scsi_dh, sdev))
-			scsi_dh_handler_detach(sdev, scsi_dh);
+		scsi_dh_handler_detach(sdev, scsi_dh);
 	}
 	put_device(&sdev->sdev_gendev);
 }
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index dba154c..b5cdefa 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -663,7 +663,7 @@
 			goto out;
 	}
 
-	if (h->tpgs == TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED)
+	if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED)
 		err = alua_stpg(sdev, TPGS_STATE_OPTIMIZED, h);
 
 out:
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 0e572d2..0cffe84 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -561,6 +561,61 @@
 
 	return result;
 }
+/*
+ * params - parameters in the following format
+ *      "no_of_params\0param1\0param2\0param3\0...\0"
+ *      for example, string for 2 parameters with value 10 and 21
+ *      is specified as "2\010\021\0".
+ */
+static int clariion_set_params(struct scsi_device *sdev, const char *params)
+{
+	struct clariion_dh_data *csdev = get_clariion_data(sdev);
+	unsigned int hr = 0, st = 0, argc;
+	const char *p = params;
+	int result = SCSI_DH_OK;
+
+	if ((sscanf(params, "%u", &argc) != 1) || (argc != 2))
+		return -EINVAL;
+
+	while (*p++)
+		;
+	if ((sscanf(p, "%u", &st) != 1) || (st > 1))
+		return -EINVAL;
+
+	while (*p++)
+		;
+	if ((sscanf(p, "%u", &hr) != 1) || (hr > 1))
+		return -EINVAL;
+
+	if (st)
+		csdev->flags |= CLARIION_SHORT_TRESPASS;
+	else
+		csdev->flags &= ~CLARIION_SHORT_TRESPASS;
+
+	if (hr)
+		csdev->flags |= CLARIION_HONOR_RESERVATIONS;
+	else
+		csdev->flags &= ~CLARIION_HONOR_RESERVATIONS;
+
+	/*
+	 * If this path is owned, we have to send a trespass command
+	 * with the new parameters. If not, simply return. Next trespass
+	 * command would use the parameters.
+	 */
+	if (csdev->lun_state != CLARIION_LUN_OWNED)
+		goto done;
+
+	csdev->lun_state = CLARIION_LUN_UNINITIALIZED;
+	result = send_trespass_cmd(sdev, csdev);
+	if (result != SCSI_DH_OK)
+		goto done;
+
+	/* Update status */
+	result = clariion_send_inquiry(sdev, csdev);
+
+done:
+	return result;
+}
 
 static const struct scsi_dh_devlist clariion_dev_list[] = {
 	{"DGC", "RAID"},
@@ -581,11 +636,9 @@
 	.check_sense	= clariion_check_sense,
 	.activate	= clariion_activate,
 	.prep_fn	= clariion_prep_fn,
+	.set_params	= clariion_set_params,
 };
 
-/*
- * TODO: need some interface so we can set trespass values
- */
 static int clariion_bus_attach(struct scsi_device *sdev)
 {
 	struct scsi_dh_data *scsi_dh_data;
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index fd0544f..11c8931 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -112,6 +112,7 @@
 
 #define SUBSYS_ID_LEN	16
 #define SLOT_ID_LEN	2
+#define ARRAY_LABEL_LEN	31
 
 struct c4_inquiry {
 	u8	peripheral_info;
@@ -135,6 +136,8 @@
 		struct rdac_pg_legacy legacy;
 		struct rdac_pg_expanded expanded;
 	} mode_select;
+	u8	index;
+	u8	array_name[ARRAY_LABEL_LEN];
 };
 struct c8_inquiry {
 	u8	peripheral_info;
@@ -198,6 +201,31 @@
 static LIST_HEAD(ctlr_list);
 static DEFINE_SPINLOCK(list_lock);
 
+/*
+ * module parameter to enable rdac debug logging.
+ * 2 bits for each type of logging, only two types defined for now
+ * Can be enhanced if required at later point
+ */
+static int rdac_logging = 1;
+module_param(rdac_logging, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(rdac_logging, "A bit mask of rdac logging levels, "
+		"Default is 1 - failover logging enabled, "
+		"set it to 0xF to enable all the logs");
+
+#define RDAC_LOG_FAILOVER	0
+#define RDAC_LOG_SENSE		2
+
+#define RDAC_LOG_BITS		2
+
+#define RDAC_LOG_LEVEL(SHIFT)  \
+	((rdac_logging >> (SHIFT)) & ((1 << (RDAC_LOG_BITS)) - 1))
+
+#define RDAC_LOG(SHIFT, sdev, f, arg...) \
+do { \
+	if (unlikely(RDAC_LOG_LEVEL(SHIFT))) \
+		sdev_printk(KERN_INFO, sdev, RDAC_NAME ": " f "\n", ## arg); \
+} while (0);
+
 static inline struct rdac_dh_data *get_rdac_data(struct scsi_device *sdev)
 {
 	struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data;
@@ -303,7 +331,8 @@
 	kfree(ctlr);
 }
 
-static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id)
+static struct rdac_controller *get_controller(u8 *subsys_id, u8 *slot_id,
+						char *array_name)
 {
 	struct rdac_controller *ctlr, *tmp;
 
@@ -324,6 +353,14 @@
 	/* initialize fields of controller */
 	memcpy(ctlr->subsys_id, subsys_id, SUBSYS_ID_LEN);
 	memcpy(ctlr->slot_id, slot_id, SLOT_ID_LEN);
+	memcpy(ctlr->array_name, array_name, ARRAY_LABEL_LEN);
+
+	/* update the controller index */
+	if (slot_id[1] == 0x31)
+		ctlr->index = 0;
+	else
+		ctlr->index = 1;
+
 	kref_init(&ctlr->kref);
 	ctlr->use_ms10 = -1;
 	list_add(&ctlr->node, &ctlr_list);
@@ -363,9 +400,10 @@
 	return err;
 }
 
-static int get_lun(struct scsi_device *sdev, struct rdac_dh_data *h)
+static int get_lun_info(struct scsi_device *sdev, struct rdac_dh_data *h,
+			char *array_name)
 {
-	int err;
+	int err, i;
 	struct c8_inquiry *inqp;
 
 	err = submit_inquiry(sdev, 0xC8, sizeof(struct c8_inquiry), h);
@@ -377,6 +415,11 @@
 		    inqp->page_id[2] != 'i' || inqp->page_id[3] != 'd')
 			return SCSI_DH_NOSYS;
 		h->lun = inqp->lun[7]; /* Uses only the last byte */
+
+		for(i=0; i<ARRAY_LABEL_LEN-1; ++i)
+			*(array_name+i) = inqp->array_user_label[(2*i)+1];
+
+		*(array_name+ARRAY_LABEL_LEN-1) = '\0';
 	}
 	return err;
 }
@@ -410,7 +453,7 @@
 }
 
 static int initialize_controller(struct scsi_device *sdev,
-				 struct rdac_dh_data *h)
+				 struct rdac_dh_data *h, char *array_name)
 {
 	int err;
 	struct c4_inquiry *inqp;
@@ -418,7 +461,8 @@
 	err = submit_inquiry(sdev, 0xC4, sizeof(struct c4_inquiry), h);
 	if (err == SCSI_DH_OK) {
 		inqp = &h->inq.c4;
-		h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id);
+		h->ctlr = get_controller(inqp->subsys_id, inqp->slot_id,
+					array_name);
 		if (!h->ctlr)
 			err = SCSI_DH_RES_TEMP_UNAVAIL;
 	}
@@ -450,6 +494,7 @@
 {
 	struct scsi_sense_hdr sense_hdr;
 	int err = SCSI_DH_IO, ret;
+	struct rdac_dh_data *h = get_rdac_data(sdev);
 
 	ret = scsi_normalize_sense(sensebuf, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
 	if (!ret)
@@ -478,11 +523,14 @@
 			err = SCSI_DH_RETRY;
 		break;
 	default:
-		sdev_printk(KERN_INFO, sdev,
-			    "MODE_SELECT failed with sense %02x/%02x/%02x.\n",
-			    sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
+		break;
 	}
 
+	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
+		"MODE_SELECT returned with sense %02x/%02x/%02x",
+		(char *) h->ctlr->array_name, h->ctlr->index,
+		sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq);
+
 done:
 	return err;
 }
@@ -499,7 +547,9 @@
 	if (!rq)
 		goto done;
 
-	sdev_printk(KERN_INFO, sdev, "%s MODE_SELECT command.\n",
+	RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
+		"%s MODE_SELECT command",
+		(char *) h->ctlr->array_name, h->ctlr->index,
 		(retry_cnt == RDAC_RETRY_COUNT) ? "queueing" : "retrying");
 
 	err = blk_execute_rq(q, NULL, rq, 1);
@@ -509,8 +559,12 @@
 		if (err == SCSI_DH_RETRY && retry_cnt--)
 			goto retry;
 	}
-	if (err == SCSI_DH_OK)
+	if (err == SCSI_DH_OK) {
 		h->state = RDAC_STATE_ACTIVE;
+		RDAC_LOG(RDAC_LOG_FAILOVER, sdev, "array %s, ctlr %d, "
+				"MODE_SELECT completed",
+				(char *) h->ctlr->array_name, h->ctlr->index);
+	}
 
 done:
 	return err;
@@ -525,17 +579,6 @@
 	if (err != SCSI_DH_OK)
 		goto done;
 
-	if (!h->ctlr) {
-		err = initialize_controller(sdev, h);
-		if (err != SCSI_DH_OK)
-			goto done;
-	}
-
-	if (h->ctlr->use_ms10 == -1) {
-		err = set_mode_select(sdev, h);
-		if (err != SCSI_DH_OK)
-			goto done;
-	}
 	if (h->lun_state == RDAC_LUN_UNOWNED)
 		err = send_mode_select(sdev, h);
 done:
@@ -559,6 +602,12 @@
 				struct scsi_sense_hdr *sense_hdr)
 {
 	struct rdac_dh_data *h = get_rdac_data(sdev);
+
+	RDAC_LOG(RDAC_LOG_SENSE, sdev, "array %s, ctlr %d, "
+			"I/O returned with sense %02x/%02x/%02x",
+			(char *) h->ctlr->array_name, h->ctlr->index,
+			sense_hdr->sense_key, sense_hdr->asc, sense_hdr->ascq);
+
 	switch (sense_hdr->sense_key) {
 	case NOT_READY:
 		if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x01)
@@ -628,11 +677,18 @@
 	{"SGI", "IS"},
 	{"STK", "OPENstorage D280"},
 	{"SUN", "CSM200_R"},
+	{"SUN", "LCSM100_I"},
+	{"SUN", "LCSM100_S"},
+	{"SUN", "LCSM100_E"},
 	{"SUN", "LCSM100_F"},
 	{"DELL", "MD3000"},
 	{"DELL", "MD3000i"},
+	{"DELL", "MD32xx"},
+	{"DELL", "MD32xxi"},
 	{"LSI", "INF-01-00"},
 	{"ENGENIO", "INF-01-00"},
+	{"STK", "FLEXLINE 380"},
+	{"SUN", "CSM100_R_FC"},
 	{NULL, NULL},
 };
 
@@ -656,6 +712,7 @@
 	struct rdac_dh_data *h;
 	unsigned long flags;
 	int err;
+	char array_name[ARRAY_LABEL_LEN];
 
 	scsi_dh_data = kzalloc(sizeof(struct scsi_device_handler *)
 			       + sizeof(*h) , GFP_KERNEL);
@@ -670,16 +727,24 @@
 	h->lun = UNINITIALIZED_LUN;
 	h->state = RDAC_STATE_ACTIVE;
 
-	err = get_lun(sdev, h);
+	err = get_lun_info(sdev, h, array_name);
+	if (err != SCSI_DH_OK)
+		goto failed;
+
+	err = initialize_controller(sdev, h, array_name);
 	if (err != SCSI_DH_OK)
 		goto failed;
 
 	err = check_ownership(sdev, h);
 	if (err != SCSI_DH_OK)
-		goto failed;
+		goto clean_ctlr;
+
+	err = set_mode_select(sdev, h);
+	if (err != SCSI_DH_OK)
+		goto clean_ctlr;
 
 	if (!try_module_get(THIS_MODULE))
-		goto failed;
+		goto clean_ctlr;
 
 	spin_lock_irqsave(sdev->request_queue->queue_lock, flags);
 	sdev->scsi_dh_data = scsi_dh_data;
@@ -691,6 +756,9 @@
 
 	return 0;
 
+clean_ctlr:
+	kref_put(&h->ctlr->kref, release_controller);
+
 failed:
 	kfree(scsi_dh_data);
 	sdev_printk(KERN_ERR, sdev, "%s: not attached\n",
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 0a5609b..704b8e0 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -1,5 +1,5 @@
 /*
- * Copyright(c) 2007 - 2008 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved.
  *
  * This program is free software; you can redistribute it and/or modify it
  * under the terms and conditions of the GNU General Public License,
@@ -49,9 +49,20 @@
 MODULE_DESCRIPTION("FCoE");
 MODULE_LICENSE("GPL v2");
 
+/* Performance tuning parameters for fcoe */
+static unsigned int fcoe_ddp_min;
+module_param_named(ddp_min, fcoe_ddp_min, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(ddp_min, "Minimum I/O size in bytes for "	\
+		 "Direct Data Placement (DDP).");
+
+DEFINE_MUTEX(fcoe_config_mutex);
+
+/* fcoe_percpu_clean completion.  Waiter protected by fcoe_create_mutex */
+static DECLARE_COMPLETION(fcoe_flush_completion);
+
 /* fcoe host list */
+/* must only by accessed under the RTNL mutex */
 LIST_HEAD(fcoe_hostlist);
-DEFINE_RWLOCK(fcoe_hostlist_lock);
 DEFINE_PER_CPU(struct fcoe_percpu_s, fcoe_percpu);
 
 /* Function Prototypes */
@@ -66,12 +77,13 @@
 
 static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *);
 static int fcoe_hostlist_add(const struct fc_lport *);
-static int fcoe_hostlist_remove(const struct fc_lport *);
 
 static void fcoe_check_wait_queue(struct fc_lport *, struct sk_buff *);
 static int fcoe_device_notification(struct notifier_block *, ulong, void *);
 static void fcoe_dev_setup(void);
 static void fcoe_dev_cleanup(void);
+static struct fcoe_interface *
+	fcoe_hostlist_lookup_port(const struct net_device *dev);
 
 /* notification function from net device */
 static struct notifier_block fcoe_notifier = {
@@ -132,6 +144,180 @@
 	.max_sectors = 0xffff,
 };
 
+static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *dev,
+			 struct packet_type *ptype,
+			 struct net_device *orig_dev);
+/**
+ * fcoe_interface_setup()
+ * @fcoe: new fcoe_interface
+ * @netdev : ptr to the associated netdevice struct
+ *
+ * Returns : 0 for success
+ * Locking: must be called with the RTNL mutex held
+ */
+static int fcoe_interface_setup(struct fcoe_interface *fcoe,
+				struct net_device *netdev)
+{
+	struct fcoe_ctlr *fip = &fcoe->ctlr;
+	struct netdev_hw_addr *ha;
+	u8 flogi_maddr[ETH_ALEN];
+
+	fcoe->netdev = netdev;
+
+	/* Do not support for bonding device */
+	if ((netdev->priv_flags & IFF_MASTER_ALB) ||
+	    (netdev->priv_flags & IFF_SLAVE_INACTIVE) ||
+	    (netdev->priv_flags & IFF_MASTER_8023AD)) {
+		return -EOPNOTSUPP;
+	}
+
+	/* look for SAN MAC address, if multiple SAN MACs exist, only
+	 * use the first one for SPMA */
+	rcu_read_lock();
+	for_each_dev_addr(netdev, ha) {
+		if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
+		    (is_valid_ether_addr(fip->ctl_src_addr))) {
+			memcpy(fip->ctl_src_addr, ha->addr, ETH_ALEN);
+			fip->spma = 1;
+			break;
+		}
+	}
+	rcu_read_unlock();
+
+	/* setup Source Mac Address */
+	if (!fip->spma)
+		memcpy(fip->ctl_src_addr, netdev->dev_addr, netdev->addr_len);
+
+	/*
+	 * Add FCoE MAC address as second unicast MAC address
+	 * or enter promiscuous mode if not capable of listening
+	 * for multiple unicast MACs.
+	 */
+	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
+	dev_unicast_add(netdev, flogi_maddr);
+	if (fip->spma)
+		dev_unicast_add(netdev, fip->ctl_src_addr);
+	dev_mc_add(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
+
+	/*
+	 * setup the receive function from ethernet driver
+	 * on the ethertype for the given device
+	 */
+	fcoe->fcoe_packet_type.func = fcoe_rcv;
+	fcoe->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
+	fcoe->fcoe_packet_type.dev = netdev;
+	dev_add_pack(&fcoe->fcoe_packet_type);
+
+	fcoe->fip_packet_type.func = fcoe_fip_recv;
+	fcoe->fip_packet_type.type = htons(ETH_P_FIP);
+	fcoe->fip_packet_type.dev = netdev;
+	dev_add_pack(&fcoe->fip_packet_type);
+
+	return 0;
+}
+
+static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb);
+static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new);
+static void fcoe_destroy_work(struct work_struct *work);
+
+/**
+ * fcoe_interface_create()
+ * @netdev: network interface
+ *
+ * Returns: pointer to a struct fcoe_interface or NULL on error
+ */
+static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev)
+{
+	struct fcoe_interface *fcoe;
+
+	fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL);
+	if (!fcoe) {
+		FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n");
+		return NULL;
+	}
+
+	dev_hold(netdev);
+	kref_init(&fcoe->kref);
+
+	/*
+	 * Initialize FIP.
+	 */
+	fcoe_ctlr_init(&fcoe->ctlr);
+	fcoe->ctlr.send = fcoe_fip_send;
+	fcoe->ctlr.update_mac = fcoe_update_src_mac;
+
+	fcoe_interface_setup(fcoe, netdev);
+
+	return fcoe;
+}
+
+/**
+ * fcoe_interface_cleanup() - clean up netdev configurations
+ * @fcoe:
+ *
+ * Caller must be holding the RTNL mutex
+ */
+void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
+{
+	struct net_device *netdev = fcoe->netdev;
+	struct fcoe_ctlr *fip = &fcoe->ctlr;
+	u8 flogi_maddr[ETH_ALEN];
+
+	/*
+	 * Don't listen for Ethernet packets anymore.
+	 * synchronize_net() ensures that the packet handlers are not running
+	 * on another CPU. dev_remove_pack() would do that, this calls the
+	 * unsyncronized version __dev_remove_pack() to avoid multiple delays.
+	 */
+	__dev_remove_pack(&fcoe->fcoe_packet_type);
+	__dev_remove_pack(&fcoe->fip_packet_type);
+	synchronize_net();
+
+	/* Delete secondary MAC addresses */
+	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
+	dev_unicast_delete(netdev, flogi_maddr);
+	if (!is_zero_ether_addr(fip->data_src_addr))
+		dev_unicast_delete(netdev, fip->data_src_addr);
+	if (fip->spma)
+		dev_unicast_delete(netdev, fip->ctl_src_addr);
+	dev_mc_delete(netdev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
+}
+
+/**
+ * fcoe_interface_release() - fcoe_port kref release function
+ * @kref: embedded reference count in an fcoe_interface struct
+ */
+static void fcoe_interface_release(struct kref *kref)
+{
+	struct fcoe_interface *fcoe;
+	struct net_device *netdev;
+
+	fcoe = container_of(kref, struct fcoe_interface, kref);
+	netdev = fcoe->netdev;
+	/* tear-down the FCoE controller */
+	fcoe_ctlr_destroy(&fcoe->ctlr);
+	kfree(fcoe);
+	dev_put(netdev);
+}
+
+/**
+ * fcoe_interface_get()
+ * @fcoe:
+ */
+static inline void fcoe_interface_get(struct fcoe_interface *fcoe)
+{
+	kref_get(&fcoe->kref);
+}
+
+/**
+ * fcoe_interface_put()
+ * @fcoe:
+ */
+static inline void fcoe_interface_put(struct fcoe_interface *fcoe)
+{
+	kref_put(&fcoe->kref, fcoe_interface_release);
+}
+
 /**
  * fcoe_fip_recv - handle a received FIP frame.
  * @skb: the receive skb
@@ -145,10 +331,10 @@
 			 struct packet_type *ptype,
 			 struct net_device *orig_dev)
 {
-	struct fcoe_softc *fc;
+	struct fcoe_interface *fcoe;
 
-	fc = container_of(ptype, struct fcoe_softc, fip_packet_type);
-	fcoe_ctlr_recv(&fc->ctlr, skb);
+	fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
+	fcoe_ctlr_recv(&fcoe->ctlr, skb);
 	return 0;
 }
 
@@ -159,7 +345,7 @@
  */
 static void fcoe_fip_send(struct fcoe_ctlr *fip, struct sk_buff *skb)
 {
-	skb->dev = fcoe_from_ctlr(fip)->real_dev;
+	skb->dev = fcoe_from_ctlr(fip)->netdev;
 	dev_queue_xmit(skb);
 }
 
@@ -174,13 +360,13 @@
  */
 static void fcoe_update_src_mac(struct fcoe_ctlr *fip, u8 *old, u8 *new)
 {
-	struct fcoe_softc *fc;
+	struct fcoe_interface *fcoe;
 
-	fc = fcoe_from_ctlr(fip);
+	fcoe = fcoe_from_ctlr(fip);
 	rtnl_lock();
 	if (!is_zero_ether_addr(old))
-		dev_unicast_delete(fc->real_dev, old);
-	dev_unicast_add(fc->real_dev, new);
+		dev_unicast_delete(fcoe->netdev, old);
+	dev_unicast_add(fcoe->netdev, new);
 	rtnl_unlock();
 }
 
@@ -217,30 +403,6 @@
 }
 
 /**
- * fcoe_netdev_cleanup() - clean up netdev configurations
- * @fc: ptr to the fcoe_softc
- */
-void fcoe_netdev_cleanup(struct fcoe_softc *fc)
-{
-	u8 flogi_maddr[ETH_ALEN];
-
-	/* Don't listen for Ethernet packets anymore */
-	dev_remove_pack(&fc->fcoe_packet_type);
-	dev_remove_pack(&fc->fip_packet_type);
-
-	/* Delete secondary MAC addresses */
-	rtnl_lock();
-	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
-	dev_unicast_delete(fc->real_dev, flogi_maddr);
-	if (!is_zero_ether_addr(fc->ctlr.data_src_addr))
-		dev_unicast_delete(fc->real_dev, fc->ctlr.data_src_addr);
-	if (fc->ctlr.spma)
-		dev_unicast_delete(fc->real_dev, fc->ctlr.ctl_src_addr);
-	dev_mc_delete(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
-	rtnl_unlock();
-}
-
-/**
  * fcoe_queue_timer() - fcoe queue timer
  * @lp: the fc_lport pointer
  *
@@ -265,116 +427,53 @@
 {
 	u32 mfs;
 	u64 wwnn, wwpn;
-	struct fcoe_softc *fc;
-	u8 flogi_maddr[ETH_ALEN];
-	struct netdev_hw_addr *ha;
+	struct fcoe_interface *fcoe;
+	struct fcoe_port *port;
 
 	/* Setup lport private data to point to fcoe softc */
-	fc = lport_priv(lp);
-	fc->ctlr.lp = lp;
-	fc->real_dev = netdev;
-	fc->phys_dev = netdev;
-
-	/* Require support for get_pauseparam ethtool op. */
-	if (netdev->priv_flags & IFF_802_1Q_VLAN)
-		fc->phys_dev = vlan_dev_real_dev(netdev);
-
-	/* Do not support for bonding device */
-	if ((fc->real_dev->priv_flags & IFF_MASTER_ALB) ||
-	    (fc->real_dev->priv_flags & IFF_SLAVE_INACTIVE) ||
-	    (fc->real_dev->priv_flags & IFF_MASTER_8023AD)) {
-		return -EOPNOTSUPP;
-	}
+	port = lport_priv(lp);
+	fcoe = port->fcoe;
 
 	/*
 	 * Determine max frame size based on underlying device and optional
 	 * user-configured limit.  If the MFS is too low, fcoe_link_ok()
 	 * will return 0, so do this first.
 	 */
-	mfs = fc->real_dev->mtu - (sizeof(struct fcoe_hdr) +
-				   sizeof(struct fcoe_crc_eof));
+	mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
+			     sizeof(struct fcoe_crc_eof));
 	if (fc_set_mfs(lp, mfs))
 		return -EINVAL;
 
 	/* offload features support */
-	if (fc->real_dev->features & NETIF_F_SG)
+	if (netdev->features & NETIF_F_SG)
 		lp->sg_supp = 1;
 
-#ifdef NETIF_F_FCOE_CRC
 	if (netdev->features & NETIF_F_FCOE_CRC) {
 		lp->crc_offload = 1;
 		FCOE_NETDEV_DBG(netdev, "Supports FCCRC offload\n");
 	}
-#endif
-#ifdef NETIF_F_FSO
 	if (netdev->features & NETIF_F_FSO) {
 		lp->seq_offload = 1;
 		lp->lso_max = netdev->gso_max_size;
 		FCOE_NETDEV_DBG(netdev, "Supports LSO for max len 0x%x\n",
 				lp->lso_max);
 	}
-#endif
 	if (netdev->fcoe_ddp_xid) {
 		lp->lro_enabled = 1;
 		lp->lro_xid = netdev->fcoe_ddp_xid;
 		FCOE_NETDEV_DBG(netdev, "Supports LRO for max xid 0x%x\n",
 				lp->lro_xid);
 	}
-	skb_queue_head_init(&fc->fcoe_pending_queue);
-	fc->fcoe_pending_queue_active = 0;
-	setup_timer(&fc->timer, fcoe_queue_timer, (unsigned long)lp);
+	skb_queue_head_init(&port->fcoe_pending_queue);
+	port->fcoe_pending_queue_active = 0;
+	setup_timer(&port->timer, fcoe_queue_timer, (unsigned long)lp);
 
-	/* look for SAN MAC address, if multiple SAN MACs exist, only
-	 * use the first one for SPMA */
-	rcu_read_lock();
-	for_each_dev_addr(netdev, ha) {
-		if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
-		    (is_valid_ether_addr(fc->ctlr.ctl_src_addr))) {
-			memcpy(fc->ctlr.ctl_src_addr, ha->addr, ETH_ALEN);
-			fc->ctlr.spma = 1;
-			break;
-		}
-	}
-	rcu_read_unlock();
-
-	/* setup Source Mac Address */
-	if (!fc->ctlr.spma)
-		memcpy(fc->ctlr.ctl_src_addr, fc->real_dev->dev_addr,
-		       fc->real_dev->addr_len);
-
-	wwnn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 1, 0);
+	wwnn = fcoe_wwn_from_mac(netdev->dev_addr, 1, 0);
 	fc_set_wwnn(lp, wwnn);
 	/* XXX - 3rd arg needs to be vlan id */
-	wwpn = fcoe_wwn_from_mac(fc->real_dev->dev_addr, 2, 0);
+	wwpn = fcoe_wwn_from_mac(netdev->dev_addr, 2, 0);
 	fc_set_wwpn(lp, wwpn);
 
-	/*
-	 * Add FCoE MAC address as second unicast MAC address
-	 * or enter promiscuous mode if not capable of listening
-	 * for multiple unicast MACs.
-	 */
-	rtnl_lock();
-	memcpy(flogi_maddr, (u8[6]) FC_FCOE_FLOGI_MAC, ETH_ALEN);
-	dev_unicast_add(fc->real_dev, flogi_maddr);
-	if (fc->ctlr.spma)
-		dev_unicast_add(fc->real_dev, fc->ctlr.ctl_src_addr);
-	dev_mc_add(fc->real_dev, FIP_ALL_ENODE_MACS, ETH_ALEN, 0);
-	rtnl_unlock();
-
-	/*
-	 * setup the receive function from ethernet driver
-	 * on the ethertype for the given device
-	 */
-	fc->fcoe_packet_type.func = fcoe_rcv;
-	fc->fcoe_packet_type.type = __constant_htons(ETH_P_FCOE);
-	fc->fcoe_packet_type.dev = fc->real_dev;
-	dev_add_pack(&fc->fcoe_packet_type);
-
-	fc->fip_packet_type.func = fcoe_fip_recv;
-	fc->fip_packet_type.type = htons(ETH_P_FIP);
-	fc->fip_packet_type.dev = fc->real_dev;
-	dev_add_pack(&fc->fip_packet_type);
-
 	return 0;
 }
 
@@ -415,86 +514,140 @@
 	return 0;
 }
 
+/*
+ * fcoe_oem_match() - match for read types IO
+ * @fp: the fc_frame for new IO.
+ *
+ * Returns : true for read types IO, otherwise returns false.
+ */
+bool fcoe_oem_match(struct fc_frame *fp)
+{
+	return fc_fcp_is_read(fr_fsp(fp)) &&
+		(fr_fsp(fp)->data_len > fcoe_ddp_min);
+}
+
 /**
  * fcoe_em_config() - allocates em for this lport
- * @lp: the port that em is to allocated for
+ * @lp: the fcoe that em is to allocated for
  *
  * Returns : 0 on success
  */
 static inline int fcoe_em_config(struct fc_lport *lp)
 {
-	BUG_ON(lp->emp);
+	struct fcoe_port *port = lport_priv(lp);
+	struct fcoe_interface *fcoe = port->fcoe;
+	struct fcoe_interface *oldfcoe = NULL;
+	struct net_device *old_real_dev, *cur_real_dev;
+	u16 min_xid = FCOE_MIN_XID;
+	u16 max_xid = FCOE_MAX_XID;
 
-	lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
-				    FCOE_MIN_XID, FCOE_MAX_XID);
-	if (!lp->emp)
+	/*
+	 * Check if need to allocate an em instance for
+	 * offload exchange ids to be shared across all VN_PORTs/lport.
+	 */
+	if (!lp->lro_enabled || !lp->lro_xid || (lp->lro_xid >= max_xid)) {
+		lp->lro_xid = 0;
+		goto skip_oem;
+	}
+
+	/*
+	 * Reuse existing offload em instance in case
+	 * it is already allocated on real eth device
+	 */
+	if (fcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+		cur_real_dev = vlan_dev_real_dev(fcoe->netdev);
+	else
+		cur_real_dev = fcoe->netdev;
+
+	list_for_each_entry(oldfcoe, &fcoe_hostlist, list) {
+		if (oldfcoe->netdev->priv_flags & IFF_802_1Q_VLAN)
+			old_real_dev = vlan_dev_real_dev(oldfcoe->netdev);
+		else
+			old_real_dev = oldfcoe->netdev;
+
+		if (cur_real_dev == old_real_dev) {
+			fcoe->oem = oldfcoe->oem;
+			break;
+		}
+	}
+
+	if (fcoe->oem) {
+		if (!fc_exch_mgr_add(lp, fcoe->oem, fcoe_oem_match)) {
+			printk(KERN_ERR "fcoe_em_config: failed to add "
+			       "offload em:%p on interface:%s\n",
+			       fcoe->oem, fcoe->netdev->name);
+			return -ENOMEM;
+		}
+	} else {
+		fcoe->oem = fc_exch_mgr_alloc(lp, FC_CLASS_3,
+					    FCOE_MIN_XID, lp->lro_xid,
+					    fcoe_oem_match);
+		if (!fcoe->oem) {
+			printk(KERN_ERR "fcoe_em_config: failed to allocate "
+			       "em for offload exches on interface:%s\n",
+			       fcoe->netdev->name);
+			return -ENOMEM;
+		}
+	}
+
+	/*
+	 * Exclude offload EM xid range from next EM xid range.
+	 */
+	min_xid += lp->lro_xid + 1;
+
+skip_oem:
+	if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, min_xid, max_xid, NULL)) {
+		printk(KERN_ERR "fcoe_em_config: failed to "
+		       "allocate em on interface %s\n", fcoe->netdev->name);
 		return -ENOMEM;
+	}
 
 	return 0;
 }
 
 /**
  * fcoe_if_destroy() - FCoE software HBA tear-down function
- * @netdev: ptr to the associated net_device
- *
- * Returns: 0 if link is OK for use by FCoE.
+ * @lport: fc_lport to destroy
  */
-static int fcoe_if_destroy(struct net_device *netdev)
+static void fcoe_if_destroy(struct fc_lport *lport)
 {
-	struct fc_lport *lp = NULL;
-	struct fcoe_softc *fc;
-
-	BUG_ON(!netdev);
+	struct fcoe_port *port = lport_priv(lport);
+	struct fcoe_interface *fcoe = port->fcoe;
+	struct net_device *netdev = fcoe->netdev;
 
 	FCOE_NETDEV_DBG(netdev, "Destroying interface\n");
 
-	lp = fcoe_hostlist_lookup(netdev);
-	if (!lp)
-		return -ENODEV;
-
-	fc = lport_priv(lp);
-
 	/* Logout of the fabric */
-	fc_fabric_logoff(lp);
-
-	/* Remove the instance from fcoe's list */
-	fcoe_hostlist_remove(lp);
-
-	/* clean up netdev configurations */
-	fcoe_netdev_cleanup(fc);
-
-	/* tear-down the FCoE controller */
-	fcoe_ctlr_destroy(&fc->ctlr);
+	fc_fabric_logoff(lport);
 
 	/* Cleanup the fc_lport */
-	fc_lport_destroy(lp);
-	fc_fcp_destroy(lp);
+	fc_lport_destroy(lport);
+	fc_fcp_destroy(lport);
+
+	/* Stop the transmit retry timer */
+	del_timer_sync(&port->timer);
+
+	/* Free existing transmit skbs */
+	fcoe_clean_pending_queue(lport);
+
+	/* receives may not be stopped until after this */
+	fcoe_interface_put(fcoe);
+
+	/* Free queued packets for the per-CPU receive threads */
+	fcoe_percpu_clean(lport);
 
 	/* Detach from the scsi-ml */
-	fc_remove_host(lp->host);
-	scsi_remove_host(lp->host);
+	fc_remove_host(lport->host);
+	scsi_remove_host(lport->host);
 
 	/* There are no more rports or I/O, free the EM */
-	if (lp->emp)
-		fc_exch_mgr_free(lp->emp);
-
-	/* Free the per-CPU receive threads */
-	fcoe_percpu_clean(lp);
-
-	/* Free existing skbs */
-	fcoe_clean_pending_queue(lp);
-
-	/* Stop the timer */
-	del_timer_sync(&fc->timer);
+	fc_exch_mgr_free(lport);
 
 	/* Free memory used by statistical counters */
-	fc_lport_free_stats(lp);
+	fc_lport_free_stats(lport);
 
-	/* Release the net_device and Scsi_Host */
-	dev_put(fc->real_dev);
-	scsi_host_put(lp->host);
-
-	return 0;
+	/* Release the Scsi_Host */
+	scsi_host_put(lport->host);
 }
 
 /*
@@ -540,106 +693,96 @@
 };
 
 /**
- * fcoe_if_create() - this function creates the fcoe interface
- * @netdev: pointer the associated netdevice
+ * fcoe_if_create() - this function creates the fcoe port
+ * @fcoe: fcoe_interface structure to create an fc_lport instance on
+ * @parent: device pointer to be the parent in sysfs for the SCSI host
  *
- * Creates fc_lport struct and scsi_host for lport, configures lport
- * and starts fabric login.
+ * Creates fc_lport struct and scsi_host for lport, configures lport.
  *
- * Returns : 0 on success
+ * Returns : The allocated fc_lport or an error pointer
  */
-static int fcoe_if_create(struct net_device *netdev)
+static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
+				       struct device *parent)
 {
 	int rc;
-	struct fc_lport *lp = NULL;
-	struct fcoe_softc *fc;
+	struct fc_lport *lport = NULL;
+	struct fcoe_port *port;
 	struct Scsi_Host *shost;
-
-	BUG_ON(!netdev);
+	struct net_device *netdev = fcoe->netdev;
 
 	FCOE_NETDEV_DBG(netdev, "Create Interface\n");
 
-	lp = fcoe_hostlist_lookup(netdev);
-	if (lp)
-		return -EEXIST;
-
 	shost = libfc_host_alloc(&fcoe_shost_template,
-				 sizeof(struct fcoe_softc));
+				 sizeof(struct fcoe_port));
 	if (!shost) {
 		FCOE_NETDEV_DBG(netdev, "Could not allocate host structure\n");
-		return -ENOMEM;
+		rc = -ENOMEM;
+		goto out;
 	}
-	lp = shost_priv(shost);
-	fc = lport_priv(lp);
+	lport = shost_priv(shost);
+	port = lport_priv(lport);
+	port->lport = lport;
+	port->fcoe = fcoe;
+	INIT_WORK(&port->destroy_work, fcoe_destroy_work);
 
 	/* configure fc_lport, e.g., em */
-	rc = fcoe_lport_config(lp);
+	rc = fcoe_lport_config(lport);
 	if (rc) {
 		FCOE_NETDEV_DBG(netdev, "Could not configure lport for the "
 				"interface\n");
 		goto out_host_put;
 	}
 
-	/*
-	 * Initialize FIP.
-	 */
-	fcoe_ctlr_init(&fc->ctlr);
-	fc->ctlr.send = fcoe_fip_send;
-	fc->ctlr.update_mac = fcoe_update_src_mac;
-
 	/* configure lport network properties */
-	rc = fcoe_netdev_config(lp, netdev);
+	rc = fcoe_netdev_config(lport, netdev);
 	if (rc) {
 		FCOE_NETDEV_DBG(netdev, "Could not configure netdev for the "
 				"interface\n");
-		goto out_netdev_cleanup;
+		goto out_lp_destroy;
 	}
 
 	/* configure lport scsi host properties */
-	rc = fcoe_shost_config(lp, shost, &netdev->dev);
+	rc = fcoe_shost_config(lport, shost, parent);
 	if (rc) {
 		FCOE_NETDEV_DBG(netdev, "Could not configure shost for the "
 				"interface\n");
-		goto out_netdev_cleanup;
-	}
-
-	/* lport exch manager allocation */
-	rc = fcoe_em_config(lp);
-	if (rc) {
-		FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the "
-				"interface\n");
-		goto out_netdev_cleanup;
+		goto out_lp_destroy;
 	}
 
 	/* Initialize the library */
-	rc = fcoe_libfc_config(lp, &fcoe_libfc_fcn_templ);
+	rc = fcoe_libfc_config(lport, &fcoe_libfc_fcn_templ);
 	if (rc) {
 		FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
 				"interface\n");
 		goto out_lp_destroy;
 	}
 
-	/* add to lports list */
-	fcoe_hostlist_add(lp);
+	/*
+	 * fcoe_em_alloc() and fcoe_hostlist_add() both
+	 * need to be atomic with respect to other changes to the hostlist
+	 * since fcoe_em_alloc() looks for an existing EM
+	 * instance on host list updated by fcoe_hostlist_add().
+	 *
+	 * This is currently handled through the fcoe_config_mutex begin held.
+	 */
 
-	lp->boot_time = jiffies;
+	/* lport exch manager allocation */
+	rc = fcoe_em_config(lport);
+	if (rc) {
+		FCOE_NETDEV_DBG(netdev, "Could not configure the EM for the "
+				"interface\n");
+		goto out_lp_destroy;
+	}
 
-	fc_fabric_login(lp);
-
-	if (!fcoe_link_ok(lp))
-		fcoe_ctlr_link_up(&fc->ctlr);
-
-	dev_hold(netdev);
-
-	return rc;
+	fcoe_interface_get(fcoe);
+	return lport;
 
 out_lp_destroy:
-	fc_exch_mgr_free(lp->emp); /* Free the EM */
-out_netdev_cleanup:
-	fcoe_netdev_cleanup(fc);
+	fc_exch_mgr_free(lport);
 out_host_put:
-	scsi_host_put(lp->host);
-	return rc;
+	scsi_host_put(lport->host);
+out:
+	return ERR_PTR(rc);
 }
 
 /**
@@ -669,6 +812,7 @@
 int __exit fcoe_if_exit(void)
 {
 	fc_release_transport(scsi_transport_fcoe_sw);
+	scsi_transport_fcoe_sw = NULL;
 	return 0;
 }
 
@@ -686,7 +830,7 @@
 	thread = kthread_create(fcoe_percpu_receive_thread,
 				(void *)p, "fcoethread/%d", cpu);
 
-	if (likely(!IS_ERR(p->thread))) {
+	if (likely(!IS_ERR(thread))) {
 		kthread_bind(thread, cpu);
 		wake_up_process(thread);
 
@@ -838,14 +982,13 @@
 {
 	struct fc_lport *lp;
 	struct fcoe_rcv_info *fr;
-	struct fcoe_softc *fc;
+	struct fcoe_interface *fcoe;
 	struct fc_frame_header *fh;
 	struct fcoe_percpu_s *fps;
-	unsigned short oxid;
-	unsigned int cpu = 0;
+	unsigned int cpu;
 
-	fc = container_of(ptype, struct fcoe_softc, fcoe_packet_type);
-	lp = fc->ctlr.lp;
+	fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
+	lp = fcoe->ctlr.lp;
 	if (unlikely(lp == NULL)) {
 		FCOE_NETDEV_DBG(dev, "Cannot find hba structure");
 		goto err2;
@@ -876,20 +1019,20 @@
 	skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
 	fh = (struct fc_frame_header *) skb_transport_header(skb);
 
-	oxid = ntohs(fh->fh_ox_id);
-
 	fr = fcoe_dev_from_skb(skb);
 	fr->fr_dev = lp;
 	fr->ptype = ptype;
 
-#ifdef CONFIG_SMP
 	/*
-	 * The incoming frame exchange id(oxid) is ANDed with num of online
-	 * cpu bits to get cpu and then this cpu is used for selecting
-	 * a per cpu kernel thread from fcoe_percpu.
+	 * In case the incoming frame's exchange is originated from
+	 * the initiator, then received frame's exchange id is ANDed
+	 * with fc_cpu_mask bits to get the same cpu on which exchange
+	 * was originated, otherwise just use the current cpu.
 	 */
-	cpu = oxid & (num_online_cpus() - 1);
-#endif
+	if (ntoh24(fh->fh_f_ctl) & FC_FC_EX_CTX)
+		cpu = ntohs(fh->fh_ox_id) & fc_cpu_mask;
+	else
+		cpu = smp_processor_id();
 
 	fps = &per_cpu(fcoe_percpu, cpu);
 	spin_lock_bh(&fps->fcoe_rx_list.lock);
@@ -996,7 +1139,7 @@
  * fcoe_fc_crc() - calculates FC CRC in this fcoe skb
  * @fp: the fc_frame containing data to be checksummed
  *
- * This uses crc32() to calculate the crc for fc frame
+ * This uses crc32() to calculate the crc for port frame
  * Return   : 32 bit crc
  */
 u32 fcoe_fc_crc(struct fc_frame *fp)
@@ -1029,7 +1172,7 @@
 
 /**
  * fcoe_xmit() - FCoE frame transmit function
- * @lp:	the associated local port
+ * @lp:	the associated local fcoe
  * @fp: the fc_frame to be transmitted
  *
  * Return   : 0 for success
@@ -1046,13 +1189,13 @@
 	unsigned int hlen;		/* header length implies the version */
 	unsigned int tlen;		/* trailer length */
 	unsigned int elen;		/* eth header, may include vlan */
-	struct fcoe_softc *fc;
+	struct fcoe_port *port = lport_priv(lp);
+	struct fcoe_interface *fcoe = port->fcoe;
 	u8 sof, eof;
 	struct fcoe_hdr *hp;
 
 	WARN_ON((fr_len(fp) % sizeof(u32)) != 0);
 
-	fc = lport_priv(lp);
 	fh = fc_frame_header_get(fp);
 	skb = fp_skb(fp);
 	wlen = skb->len / FCOE_WORD_TO_BYTE;
@@ -1063,7 +1206,7 @@
 	}
 
 	if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ) &&
-	    fcoe_ctlr_els_send(&fc->ctlr, skb))
+	    fcoe_ctlr_els_send(&fcoe->ctlr, skb))
 		return 0;
 
 	sof = fr_sof(fp);
@@ -1085,7 +1228,7 @@
 		crc = fcoe_fc_crc(fp);
 	}
 
-	/* copy fc crc and eof to the skb buff */
+	/* copy port crc and eof to the skb buff */
 	if (skb_is_nonlinear(skb)) {
 		skb_frag_t *frag;
 		if (fcoe_get_paged_crc_eof(skb, tlen)) {
@@ -1108,27 +1251,27 @@
 		cp = NULL;
 	}
 
-	/* adjust skb network/transport offsets to match mac/fcoe/fc */
+	/* adjust skb network/transport offsets to match mac/fcoe/port */
 	skb_push(skb, elen + hlen);
 	skb_reset_mac_header(skb);
 	skb_reset_network_header(skb);
 	skb->mac_len = elen;
 	skb->protocol = htons(ETH_P_FCOE);
-	skb->dev = fc->real_dev;
+	skb->dev = fcoe->netdev;
 
 	/* fill up mac and fcoe headers */
 	eh = eth_hdr(skb);
 	eh->h_proto = htons(ETH_P_FCOE);
-	if (fc->ctlr.map_dest)
+	if (fcoe->ctlr.map_dest)
 		fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
 	else
 		/* insert GW address */
-		memcpy(eh->h_dest, fc->ctlr.dest_addr, ETH_ALEN);
+		memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN);
 
-	if (unlikely(fc->ctlr.flogi_oxid != FC_XID_UNKNOWN))
-		memcpy(eh->h_source, fc->ctlr.ctl_src_addr, ETH_ALEN);
+	if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN))
+		memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN);
 	else
-		memcpy(eh->h_source, fc->ctlr.data_src_addr, ETH_ALEN);
+		memcpy(eh->h_source, fcoe->ctlr.data_src_addr, ETH_ALEN);
 
 	hp = (struct fcoe_hdr *)(eh + 1);
 	memset(hp, 0, sizeof(*hp));
@@ -1136,7 +1279,6 @@
 		FC_FCOE_ENCAPS_VER(hp, FC_FCOE_VER);
 	hp->fcoe_sof = sof;
 
-#ifdef NETIF_F_FSO
 	/* fcoe lso, mss is in max_payload which is non-zero for FCP data */
 	if (lp->seq_offload && fr_max_payload(fp)) {
 		skb_shinfo(skb)->gso_type = SKB_GSO_FCOE;
@@ -1145,7 +1287,6 @@
 		skb_shinfo(skb)->gso_type = 0;
 		skb_shinfo(skb)->gso_size = 0;
 	}
-#endif
 	/* update tx stats: regardless if LLD fails */
 	stats = fc_lport_get_stats(lp);
 	stats->TxFrames++;
@@ -1153,7 +1294,7 @@
 
 	/* send down to lld */
 	fr_dev(fp) = lp;
-	if (fc->fcoe_pending_queue.qlen)
+	if (port->fcoe_pending_queue.qlen)
 		fcoe_check_wait_queue(lp, skb);
 	else if (fcoe_start_io(skb))
 		fcoe_check_wait_queue(lp, skb);
@@ -1162,6 +1303,15 @@
 }
 
 /**
+ * fcoe_percpu_flush_done() - Indicate percpu queue flush completion.
+ * @skb: the skb being completed.
+ */
+static void fcoe_percpu_flush_done(struct sk_buff *skb)
+{
+	complete(&fcoe_flush_completion);
+}
+
+/**
  * fcoe_percpu_receive_thread() - recv thread per cpu
  * @arg: ptr to the fcoe per cpu struct
  *
@@ -1179,7 +1329,7 @@
 	struct fcoe_crc_eof crc_eof;
 	struct fc_frame *fp;
 	u8 *mac = NULL;
-	struct fcoe_softc *fc;
+	struct fcoe_port *port;
 	struct fcoe_hdr *hp;
 
 	set_user_nice(current, -20);
@@ -1200,7 +1350,8 @@
 		fr = fcoe_dev_from_skb(skb);
 		lp = fr->fr_dev;
 		if (unlikely(lp == NULL)) {
-			FCOE_NETDEV_DBG(skb->dev, "Invalid HBA Structure");
+			if (skb->destructor != fcoe_percpu_flush_done)
+				FCOE_NETDEV_DBG(skb->dev, "NULL lport in skb");
 			kfree_skb(skb);
 			continue;
 		}
@@ -1215,7 +1366,7 @@
 		/*
 		 * Save source MAC address before discarding header.
 		 */
-		fc = lport_priv(lp);
+		port = lport_priv(lp);
 		if (skb_is_nonlinear(skb))
 			skb_linearize(skb);	/* not ideal */
 		mac = eth_hdr(skb)->h_source;
@@ -1277,7 +1428,7 @@
 		fh = fc_frame_header_get(fp);
 		if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
 		    fh->fh_type == FC_TYPE_FCP) {
-			fc_exch_recv(lp, lp->emp, fp);
+			fc_exch_recv(lp, fp);
 			continue;
 		}
 		if (fr_flags(fp) & FCPHF_CRC_UNCHECKED) {
@@ -1293,12 +1444,12 @@
 			}
 			fr_flags(fp) &= ~FCPHF_CRC_UNCHECKED;
 		}
-		if (unlikely(fc->ctlr.flogi_oxid != FC_XID_UNKNOWN) &&
-		    fcoe_ctlr_recv_flogi(&fc->ctlr, fp, mac)) {
+		if (unlikely(port->fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN) &&
+		    fcoe_ctlr_recv_flogi(&port->fcoe->ctlr, fp, mac)) {
 			fc_frame_free(fp);
 			continue;
 		}
-		fc_exch_recv(lp, lp->emp, fp);
+		fc_exch_recv(lp, fp);
 	}
 	return 0;
 }
@@ -1318,46 +1469,46 @@
  */
 static void fcoe_check_wait_queue(struct fc_lport *lp, struct sk_buff *skb)
 {
-	struct fcoe_softc *fc = lport_priv(lp);
+	struct fcoe_port *port = lport_priv(lp);
 	int rc;
 
-	spin_lock_bh(&fc->fcoe_pending_queue.lock);
+	spin_lock_bh(&port->fcoe_pending_queue.lock);
 
 	if (skb)
-		__skb_queue_tail(&fc->fcoe_pending_queue, skb);
+		__skb_queue_tail(&port->fcoe_pending_queue, skb);
 
-	if (fc->fcoe_pending_queue_active)
+	if (port->fcoe_pending_queue_active)
 		goto out;
-	fc->fcoe_pending_queue_active = 1;
+	port->fcoe_pending_queue_active = 1;
 
-	while (fc->fcoe_pending_queue.qlen) {
+	while (port->fcoe_pending_queue.qlen) {
 		/* keep qlen > 0 until fcoe_start_io succeeds */
-		fc->fcoe_pending_queue.qlen++;
-		skb = __skb_dequeue(&fc->fcoe_pending_queue);
+		port->fcoe_pending_queue.qlen++;
+		skb = __skb_dequeue(&port->fcoe_pending_queue);
 
-		spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+		spin_unlock_bh(&port->fcoe_pending_queue.lock);
 		rc = fcoe_start_io(skb);
-		spin_lock_bh(&fc->fcoe_pending_queue.lock);
+		spin_lock_bh(&port->fcoe_pending_queue.lock);
 
 		if (rc) {
-			__skb_queue_head(&fc->fcoe_pending_queue, skb);
+			__skb_queue_head(&port->fcoe_pending_queue, skb);
 			/* undo temporary increment above */
-			fc->fcoe_pending_queue.qlen--;
+			port->fcoe_pending_queue.qlen--;
 			break;
 		}
 		/* undo temporary increment above */
-		fc->fcoe_pending_queue.qlen--;
+		port->fcoe_pending_queue.qlen--;
 	}
 
-	if (fc->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
+	if (port->fcoe_pending_queue.qlen < FCOE_LOW_QUEUE_DEPTH)
 		lp->qfull = 0;
-	if (fc->fcoe_pending_queue.qlen && !timer_pending(&fc->timer))
-		mod_timer(&fc->timer, jiffies + 2);
-	fc->fcoe_pending_queue_active = 0;
+	if (port->fcoe_pending_queue.qlen && !timer_pending(&port->timer))
+		mod_timer(&port->timer, jiffies + 2);
+	port->fcoe_pending_queue_active = 0;
 out:
-	if (fc->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
+	if (port->fcoe_pending_queue.qlen > FCOE_MAX_QUEUE_DEPTH)
 		lp->qfull = 1;
-	spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+	spin_unlock_bh(&port->fcoe_pending_queue.lock);
 	return;
 }
 
@@ -1391,21 +1542,20 @@
 				    ulong event, void *ptr)
 {
 	struct fc_lport *lp = NULL;
-	struct net_device *real_dev = ptr;
-	struct fcoe_softc *fc;
+	struct net_device *netdev = ptr;
+	struct fcoe_interface *fcoe;
+	struct fcoe_port *port;
 	struct fcoe_dev_stats *stats;
 	u32 link_possible = 1;
 	u32 mfs;
 	int rc = NOTIFY_OK;
 
-	read_lock(&fcoe_hostlist_lock);
-	list_for_each_entry(fc, &fcoe_hostlist, list) {
-		if (fc->real_dev == real_dev) {
-			lp = fc->ctlr.lp;
+	list_for_each_entry(fcoe, &fcoe_hostlist, list) {
+		if (fcoe->netdev == netdev) {
+			lp = fcoe->ctlr.lp;
 			break;
 		}
 	}
-	read_unlock(&fcoe_hostlist_lock);
 	if (lp == NULL) {
 		rc = NOTIFY_DONE;
 		goto out;
@@ -1420,21 +1570,27 @@
 	case NETDEV_CHANGE:
 		break;
 	case NETDEV_CHANGEMTU:
-		mfs = fc->real_dev->mtu -
-			(sizeof(struct fcoe_hdr) +
-			 sizeof(struct fcoe_crc_eof));
+		mfs = netdev->mtu - (sizeof(struct fcoe_hdr) +
+				     sizeof(struct fcoe_crc_eof));
 		if (mfs >= FC_MIN_MAX_FRAME)
 			fc_set_mfs(lp, mfs);
 		break;
 	case NETDEV_REGISTER:
 		break;
+	case NETDEV_UNREGISTER:
+		list_del(&fcoe->list);
+		port = lport_priv(fcoe->ctlr.lp);
+		fcoe_interface_cleanup(fcoe);
+		schedule_work(&port->destroy_work);
+		goto out;
+		break;
 	default:
-		FCOE_NETDEV_DBG(real_dev, "Unknown event %ld "
+		FCOE_NETDEV_DBG(netdev, "Unknown event %ld "
 				"from netdev netlink\n", event);
 	}
 	if (link_possible && !fcoe_link_ok(lp))
-		fcoe_ctlr_link_up(&fc->ctlr);
-	else if (fcoe_ctlr_link_down(&fc->ctlr)) {
+		fcoe_ctlr_link_up(&fcoe->ctlr);
+	else if (fcoe_ctlr_link_down(&fcoe->ctlr)) {
 		stats = fc_lport_get_stats(lp);
 		stats->LinkFailureCount++;
 		fcoe_clean_pending_queue(lp);
@@ -1465,75 +1621,6 @@
 }
 
 /**
- * fcoe_netdev_to_module_owner() - finds out the driver module of the netdev
- * @netdev: the target netdev
- *
- * Returns: ptr to the struct module, NULL for failure
- */
-static struct module *
-fcoe_netdev_to_module_owner(const struct net_device *netdev)
-{
-	struct device *dev;
-
-	if (!netdev)
-		return NULL;
-
-	dev = netdev->dev.parent;
-	if (!dev)
-		return NULL;
-
-	if (!dev->driver)
-		return NULL;
-
-	return dev->driver->owner;
-}
-
-/**
- * fcoe_ethdrv_get() - Hold the Ethernet driver
- * @netdev: the target netdev
- *
- * Holds the Ethernet driver module by try_module_get() for
- * the corresponding netdev.
- *
- * Returns: 0 for success
- */
-static int fcoe_ethdrv_get(const struct net_device *netdev)
-{
-	struct module *owner;
-
-	owner = fcoe_netdev_to_module_owner(netdev);
-	if (owner) {
-		FCOE_NETDEV_DBG(netdev, "Hold driver module %s\n",
-				module_name(owner));
-		return  try_module_get(owner);
-	}
-	return -ENODEV;
-}
-
-/**
- * fcoe_ethdrv_put() - Release the Ethernet driver
- * @netdev: the target netdev
- *
- * Releases the Ethernet driver module by module_put for
- * the corresponding netdev.
- *
- * Returns: 0 for success
- */
-static int fcoe_ethdrv_put(const struct net_device *netdev)
-{
-	struct module *owner;
-
-	owner = fcoe_netdev_to_module_owner(netdev);
-	if (owner) {
-		FCOE_NETDEV_DBG(netdev, "Release driver module %s\n",
-				module_name(owner));
-		module_put(owner);
-		return 0;
-	}
-	return -ENODEV;
-}
-
-/**
  * fcoe_destroy() - handles the destroy from sysfs
  * @buffer: expected to be an eth if name
  * @kp: associated kernel param
@@ -1542,34 +1629,57 @@
  */
 static int fcoe_destroy(const char *buffer, struct kernel_param *kp)
 {
-	int rc;
+	struct fcoe_interface *fcoe;
 	struct net_device *netdev;
+	int rc;
+
+	mutex_lock(&fcoe_config_mutex);
+#ifdef CONFIG_FCOE_MODULE
+	/*
+	 * Make sure the module has been initialized, and is not about to be
+	 * removed.  Module paramter sysfs files are writable before the
+	 * module_init function is called and after module_exit.
+	 */
+	if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+		rc = -ENODEV;
+		goto out_nodev;
+	}
+#endif
 
 	netdev = fcoe_if_to_netdev(buffer);
 	if (!netdev) {
 		rc = -ENODEV;
 		goto out_nodev;
 	}
-	/* look for existing lport */
-	if (!fcoe_hostlist_lookup(netdev)) {
+
+	rtnl_lock();
+	fcoe = fcoe_hostlist_lookup_port(netdev);
+	if (!fcoe) {
+		rtnl_unlock();
 		rc = -ENODEV;
 		goto out_putdev;
 	}
-	rc = fcoe_if_destroy(netdev);
-	if (rc) {
-		printk(KERN_ERR "fcoe: Failed to destroy interface (%s)\n",
-		       netdev->name);
-		rc = -EIO;
-		goto out_putdev;
-	}
-	fcoe_ethdrv_put(netdev);
-	rc = 0;
+	list_del(&fcoe->list);
+	fcoe_interface_cleanup(fcoe);
+	rtnl_unlock();
+	fcoe_if_destroy(fcoe->ctlr.lp);
 out_putdev:
 	dev_put(netdev);
 out_nodev:
+	mutex_unlock(&fcoe_config_mutex);
 	return rc;
 }
 
+static void fcoe_destroy_work(struct work_struct *work)
+{
+	struct fcoe_port *port;
+
+	port = container_of(work, struct fcoe_port, destroy_work);
+	mutex_lock(&fcoe_config_mutex);
+	fcoe_if_destroy(port->lport);
+	mutex_unlock(&fcoe_config_mutex);
+}
+
 /**
  * fcoe_create() - Handles the create call from sysfs
  * @buffer: expected to be an eth if name
@@ -1580,41 +1690,84 @@
 static int fcoe_create(const char *buffer, struct kernel_param *kp)
 {
 	int rc;
+	struct fcoe_interface *fcoe;
+	struct fc_lport *lport;
 	struct net_device *netdev;
 
+	mutex_lock(&fcoe_config_mutex);
+#ifdef CONFIG_FCOE_MODULE
+	/*
+	 * Make sure the module has been initialized, and is not about to be
+	 * removed.  Module paramter sysfs files are writable before the
+	 * module_init function is called and after module_exit.
+	 */
+	if (THIS_MODULE->state != MODULE_STATE_LIVE) {
+		rc = -ENODEV;
+		goto out_nodev;
+	}
+#endif
+
+	rtnl_lock();
 	netdev = fcoe_if_to_netdev(buffer);
 	if (!netdev) {
 		rc = -ENODEV;
 		goto out_nodev;
 	}
+
 	/* look for existing lport */
 	if (fcoe_hostlist_lookup(netdev)) {
 		rc = -EEXIST;
 		goto out_putdev;
 	}
-	fcoe_ethdrv_get(netdev);
 
-	rc = fcoe_if_create(netdev);
-	if (rc) {
-		printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
-		       netdev->name);
-		fcoe_ethdrv_put(netdev);
-		rc = -EIO;
+	fcoe = fcoe_interface_create(netdev);
+	if (!fcoe) {
+		rc = -ENOMEM;
 		goto out_putdev;
 	}
+
+	lport = fcoe_if_create(fcoe, &netdev->dev);
+	if (IS_ERR(lport)) {
+		printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
+		       netdev->name);
+		rc = -EIO;
+		fcoe_interface_cleanup(fcoe);
+		goto out_free;
+	}
+
+	/* Make this the "master" N_Port */
+	fcoe->ctlr.lp = lport;
+
+	/* add to lports list */
+	fcoe_hostlist_add(lport);
+
+	/* start FIP Discovery and FLOGI */
+	lport->boot_time = jiffies;
+	fc_fabric_login(lport);
+	if (!fcoe_link_ok(lport))
+		fcoe_ctlr_link_up(&fcoe->ctlr);
+
 	rc = 0;
+out_free:
+	/*
+	 * Release from init in fcoe_interface_create(), on success lport
+	 * should be holding a reference taken in fcoe_if_create().
+	 */
+	fcoe_interface_put(fcoe);
 out_putdev:
 	dev_put(netdev);
 out_nodev:
+	rtnl_unlock();
+	mutex_unlock(&fcoe_config_mutex);
 	return rc;
 }
 
 module_param_call(create, fcoe_create, NULL, NULL, S_IWUSR);
 __MODULE_PARM_TYPE(create, "string");
-MODULE_PARM_DESC(create, "Create fcoe port using net device passed in.");
+MODULE_PARM_DESC(create, "Create fcoe fcoe using net device passed in.");
 module_param_call(destroy, fcoe_destroy, NULL, NULL, S_IWUSR);
 __MODULE_PARM_TYPE(destroy, "string");
-MODULE_PARM_DESC(destroy, "Destroy fcoe port");
+MODULE_PARM_DESC(destroy, "Destroy fcoe fcoe");
 
 /**
  * fcoe_link_ok() - Check if link is ok for the fc_lport
@@ -1632,37 +1785,40 @@
  */
 int fcoe_link_ok(struct fc_lport *lp)
 {
-	struct fcoe_softc *fc = lport_priv(lp);
-	struct net_device *dev = fc->real_dev;
+	struct fcoe_port *port = lport_priv(lp);
+	struct net_device *dev = port->fcoe->netdev;
 	struct ethtool_cmd ecmd = { ETHTOOL_GSET };
-	int rc = 0;
 
-	if ((dev->flags & IFF_UP) && netif_carrier_ok(dev)) {
-		dev = fc->phys_dev;
-		if (dev->ethtool_ops->get_settings) {
-			dev->ethtool_ops->get_settings(dev, &ecmd);
-			lp->link_supported_speeds &=
-				~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
-			if (ecmd.supported & (SUPPORTED_1000baseT_Half |
-					      SUPPORTED_1000baseT_Full))
-				lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
-			if (ecmd.supported & SUPPORTED_10000baseT_Full)
-				lp->link_supported_speeds |=
-					FC_PORTSPEED_10GBIT;
-			if (ecmd.speed == SPEED_1000)
-				lp->link_speed = FC_PORTSPEED_1GBIT;
-			if (ecmd.speed == SPEED_10000)
-				lp->link_speed = FC_PORTSPEED_10GBIT;
-		}
-	} else
-		rc = -1;
+	if ((dev->flags & IFF_UP) && netif_carrier_ok(dev) &&
+	    (!dev_ethtool_get_settings(dev, &ecmd))) {
+		lp->link_supported_speeds &=
+			~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+		if (ecmd.supported & (SUPPORTED_1000baseT_Half |
+				      SUPPORTED_1000baseT_Full))
+			lp->link_supported_speeds |= FC_PORTSPEED_1GBIT;
+		if (ecmd.supported & SUPPORTED_10000baseT_Full)
+			lp->link_supported_speeds |=
+				FC_PORTSPEED_10GBIT;
+		if (ecmd.speed == SPEED_1000)
+			lp->link_speed = FC_PORTSPEED_1GBIT;
+		if (ecmd.speed == SPEED_10000)
+			lp->link_speed = FC_PORTSPEED_10GBIT;
 
-	return rc;
+		return 0;
+	}
+	return -1;
 }
 
 /**
  * fcoe_percpu_clean() - Clear the pending skbs for an lport
  * @lp: the fc_lport
+ *
+ * Must be called with fcoe_create_mutex held to single-thread completion.
+ *
+ * This flushes the pending skbs by adding a new skb to each queue and
+ * waiting until they are all freed.  This assures us that not only are
+ * there no packets that will be handled by the lport, but also that any
+ * threads already handling packet have returned.
  */
 void fcoe_percpu_clean(struct fc_lport *lp)
 {
@@ -1687,7 +1843,25 @@
 				kfree_skb(skb);
 			}
 		}
+
+		if (!pp->thread || !cpu_online(cpu)) {
+			spin_unlock_bh(&pp->fcoe_rx_list.lock);
+			continue;
+		}
+
+		skb = dev_alloc_skb(0);
+		if (!skb) {
+			spin_unlock_bh(&pp->fcoe_rx_list.lock);
+			continue;
+		}
+		skb->destructor = fcoe_percpu_flush_done;
+
+		__skb_queue_tail(&pp->fcoe_rx_list, skb);
+		if (pp->fcoe_rx_list.qlen == 1)
+			wake_up_process(pp->thread);
 		spin_unlock_bh(&pp->fcoe_rx_list.lock);
+
+		wait_for_completion(&fcoe_flush_completion);
 	}
 }
 
@@ -1699,16 +1873,16 @@
  */
 void fcoe_clean_pending_queue(struct fc_lport *lp)
 {
-	struct fcoe_softc  *fc = lport_priv(lp);
+	struct fcoe_port  *port = lport_priv(lp);
 	struct sk_buff *skb;
 
-	spin_lock_bh(&fc->fcoe_pending_queue.lock);
-	while ((skb = __skb_dequeue(&fc->fcoe_pending_queue)) != NULL) {
-		spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+	spin_lock_bh(&port->fcoe_pending_queue.lock);
+	while ((skb = __skb_dequeue(&port->fcoe_pending_queue)) != NULL) {
+		spin_unlock_bh(&port->fcoe_pending_queue.lock);
 		kfree_skb(skb);
-		spin_lock_bh(&fc->fcoe_pending_queue.lock);
+		spin_lock_bh(&port->fcoe_pending_queue.lock);
 	}
-	spin_unlock_bh(&fc->fcoe_pending_queue.lock);
+	spin_unlock_bh(&port->fcoe_pending_queue.lock);
 }
 
 /**
@@ -1725,24 +1899,21 @@
 }
 
 /**
- * fcoe_hostlist_lookup_softc() - find the corresponding lport by a given device
+ * fcoe_hostlist_lookup_port() - find the corresponding lport by a given device
  * @dev: this is currently ptr to net_device
  *
- * Returns: NULL or the located fcoe_softc
+ * Returns: NULL or the located fcoe_port
+ * Locking: must be called with the RNL mutex held
  */
-static struct fcoe_softc *
-fcoe_hostlist_lookup_softc(const struct net_device *dev)
+static struct fcoe_interface *
+fcoe_hostlist_lookup_port(const struct net_device *dev)
 {
-	struct fcoe_softc *fc;
+	struct fcoe_interface *fcoe;
 
-	read_lock(&fcoe_hostlist_lock);
-	list_for_each_entry(fc, &fcoe_hostlist, list) {
-		if (fc->real_dev == dev) {
-			read_unlock(&fcoe_hostlist_lock);
-			return fc;
-		}
+	list_for_each_entry(fcoe, &fcoe_hostlist, list) {
+		if (fcoe->netdev == dev)
+			return fcoe;
 	}
-	read_unlock(&fcoe_hostlist_lock);
 	return NULL;
 }
 
@@ -1751,14 +1922,14 @@
  * @netdev: ptr to net_device
  *
  * Returns: 0 for success
+ * Locking: must be called with the RTNL mutex held
  */
-struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
+static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
 {
-	struct fcoe_softc *fc;
+	struct fcoe_interface *fcoe;
 
-	fc = fcoe_hostlist_lookup_softc(netdev);
-
-	return (fc) ? fc->ctlr.lp : NULL;
+	fcoe = fcoe_hostlist_lookup_port(netdev);
+	return (fcoe) ? fcoe->ctlr.lp : NULL;
 }
 
 /**
@@ -1766,41 +1937,23 @@
  * @lp: ptr to the fc_lport to be added
  *
  * Returns: 0 for success
+ * Locking: must be called with the RTNL mutex held
  */
-int fcoe_hostlist_add(const struct fc_lport *lp)
+static int fcoe_hostlist_add(const struct fc_lport *lport)
 {
-	struct fcoe_softc *fc;
+	struct fcoe_interface *fcoe;
+	struct fcoe_port *port;
 
-	fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
-	if (!fc) {
-		fc = lport_priv(lp);
-		write_lock_bh(&fcoe_hostlist_lock);
-		list_add_tail(&fc->list, &fcoe_hostlist);
-		write_unlock_bh(&fcoe_hostlist_lock);
+	fcoe = fcoe_hostlist_lookup_port(fcoe_netdev(lport));
+	if (!fcoe) {
+		port = lport_priv(lport);
+		fcoe = port->fcoe;
+		list_add_tail(&fcoe->list, &fcoe_hostlist);
 	}
 	return 0;
 }
 
 /**
- * fcoe_hostlist_remove() - remove a lport from lports list
- * @lp: ptr to the fc_lport to be removed
- *
- * Returns: 0 for success
- */
-int fcoe_hostlist_remove(const struct fc_lport *lp)
-{
-	struct fcoe_softc *fc;
-
-	fc = fcoe_hostlist_lookup_softc(fcoe_netdev(lp));
-	BUG_ON(!fc);
-	write_lock_bh(&fcoe_hostlist_lock);
-	list_del(&fc->list);
-	write_unlock_bh(&fcoe_hostlist_lock);
-
-	return 0;
-}
-
-/**
  * fcoe_init() - fcoe module loading initialization
  *
  * Returns 0 on success, negative on failure
@@ -1811,8 +1964,7 @@
 	int rc = 0;
 	struct fcoe_percpu_s *p;
 
-	INIT_LIST_HEAD(&fcoe_hostlist);
-	rwlock_init(&fcoe_hostlist_lock);
+	mutex_lock(&fcoe_config_mutex);
 
 	for_each_possible_cpu(cpu) {
 		p = &per_cpu(fcoe_percpu, cpu);
@@ -1830,15 +1982,18 @@
 	/* Setup link change notification */
 	fcoe_dev_setup();
 
-	fcoe_if_init();
+	rc = fcoe_if_init();
+	if (rc)
+		goto out_free;
 
+	mutex_unlock(&fcoe_config_mutex);
 	return 0;
 
 out_free:
 	for_each_online_cpu(cpu) {
 		fcoe_percpu_thread_destroy(cpu);
 	}
-
+	mutex_unlock(&fcoe_config_mutex);
 	return rc;
 }
 module_init(fcoe_init);
@@ -1851,21 +2006,36 @@
 static void __exit fcoe_exit(void)
 {
 	unsigned int cpu;
-	struct fcoe_softc *fc, *tmp;
+	struct fcoe_interface *fcoe, *tmp;
+	struct fcoe_port *port;
+
+	mutex_lock(&fcoe_config_mutex);
 
 	fcoe_dev_cleanup();
 
 	/* releases the associated fcoe hosts */
-	list_for_each_entry_safe(fc, tmp, &fcoe_hostlist, list)
-		fcoe_if_destroy(fc->real_dev);
+	rtnl_lock();
+	list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
+		list_del(&fcoe->list);
+		port = lport_priv(fcoe->ctlr.lp);
+		fcoe_interface_cleanup(fcoe);
+		schedule_work(&port->destroy_work);
+	}
+	rtnl_unlock();
 
 	unregister_hotcpu_notifier(&fcoe_cpu_notifier);
 
-	for_each_online_cpu(cpu) {
+	for_each_online_cpu(cpu)
 		fcoe_percpu_thread_destroy(cpu);
-	}
 
-	/* detach from scsi transport */
+	mutex_unlock(&fcoe_config_mutex);
+
+	/* flush any asyncronous interface destroys,
+	 * this should happen after the netdev notifier is unregistered */
+	flush_scheduled_work();
+
+	/* detach from scsi transport
+	 * must happen after all destroys are done, therefor after the flush */
 	fcoe_if_exit();
 }
 module_exit(fcoe_exit);
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 0d724fa..ce7f60f 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -37,8 +37,8 @@
 
 #define FCOE_MAX_OUTSTANDING_COMMANDS	1024
 
-#define FCOE_MIN_XID		0x0001	/* the min xid supported by fcoe_sw */
-#define FCOE_MAX_XID		0x07ef	/* the max xid supported by fcoe_sw */
+#define FCOE_MIN_XID		0x0000	/* the min xid supported by fcoe_sw */
+#define FCOE_MAX_XID		0x0FFF	/* the max xid supported by fcoe_sw */
 
 unsigned int fcoe_debug_logging;
 module_param_named(debug_logging, fcoe_debug_logging, int, S_IRUGO|S_IWUSR);
@@ -53,7 +53,7 @@
 		do {							\
 			CMD;						\
 		} while (0);						\
-} while (0);
+} while (0)
 
 #define FCOE_DBG(fmt, args...)						\
 	FCOE_CHECK_LOGGING(FCOE_LOGGING,				\
@@ -61,7 +61,7 @@
 
 #define FCOE_NETDEV_DBG(netdev, fmt, args...)			\
 	FCOE_CHECK_LOGGING(FCOE_NETDEV_LOGGING,			\
-			   printk(KERN_INFO "fcoe: %s" fmt,	\
+			   printk(KERN_INFO "fcoe: %s: " fmt,	\
 				  netdev->name, ##args);)
 
 /*
@@ -75,26 +75,36 @@
 };
 
 /*
- * the fcoe sw transport private data
+ * an FCoE interface, 1:1 with netdev
  */
-struct fcoe_softc {
+struct fcoe_interface {
 	struct list_head list;
-	struct net_device *real_dev;
-	struct net_device *phys_dev;		/* device with ethtool_ops */
+	struct net_device *netdev;
 	struct packet_type  fcoe_packet_type;
 	struct packet_type  fip_packet_type;
+	struct fcoe_ctlr ctlr;
+	struct fc_exch_mgr *oem;		/* offload exchange manager */
+	struct kref kref;
+};
+
+/*
+ * the FCoE private structure that's allocated along with the
+ * Scsi_Host and libfc fc_lport structures
+ */
+struct fcoe_port {
+	struct fcoe_interface *fcoe;
+	struct fc_lport *lport;
 	struct sk_buff_head fcoe_pending_queue;
 	u8	fcoe_pending_queue_active;
 	struct timer_list timer;		/* queue timer */
-	struct fcoe_ctlr ctlr;
+	struct work_struct destroy_work;	/* to prevent rtnl deadlocks */
 };
 
-#define fcoe_from_ctlr(fc) container_of(fc, struct fcoe_softc, ctlr)
+#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr)
 
-static inline struct net_device *fcoe_netdev(
-	const struct fc_lport *lp)
+static inline struct net_device *fcoe_netdev(const struct fc_lport *lp)
 {
-	return ((struct fcoe_softc *)lport_priv(lp))->real_dev;
+	return ((struct fcoe_port *)lport_priv(lp))->fcoe->netdev;
 }
 
 #endif /* _FCOE_H_ */
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index f544340..62a4c20 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -69,7 +69,7 @@
 		do {							\
 			CMD;						\
 		} while (0);						\
-} while (0);
+} while (0)
 
 #define LIBFCOE_DBG(fmt, args...)					\
 	LIBFCOE_CHECK_LOGGING(LIBFCOE_LOGGING,				\
@@ -148,13 +148,17 @@
  */
 void fcoe_ctlr_destroy(struct fcoe_ctlr *fip)
 {
-	flush_work(&fip->recv_work);
+	cancel_work_sync(&fip->recv_work);
+	spin_lock_bh(&fip->fip_recv_list.lock);
+	__skb_queue_purge(&fip->fip_recv_list);
+	spin_unlock_bh(&fip->fip_recv_list.lock);
+
 	spin_lock_bh(&fip->lock);
 	fip->state = FIP_ST_DISABLED;
 	fcoe_ctlr_reset_fcfs(fip);
 	spin_unlock_bh(&fip->lock);
 	del_timer_sync(&fip->timer);
-	flush_work(&fip->link_work);
+	cancel_work_sync(&fip->link_work);
 }
 EXPORT_SYMBOL(fcoe_ctlr_destroy);
 
@@ -413,10 +417,18 @@
 	struct fip_mac_desc *mac;
 	struct fcoe_fcf *fcf;
 	size_t dlen;
+	u16 fip_flags;
 
 	fcf = fip->sel_fcf;
 	if (!fcf)
 		return -ENODEV;
+
+	/* set flags according to both FCF and lport's capability on SPMA */
+	fip_flags = fcf->flags;
+	fip_flags &= fip->spma ? FIP_FL_SPMA | FIP_FL_FPMA : FIP_FL_FPMA;
+	if (!fip_flags)
+		return -ENODEV;
+
 	dlen = sizeof(struct fip_encaps) + skb->len;	/* len before push */
 	cap = (struct fip_encaps_head *)skb_push(skb, sizeof(*cap));
 
@@ -429,9 +441,7 @@
 	cap->fip.fip_op = htons(FIP_OP_LS);
 	cap->fip.fip_subcode = FIP_SC_REQ;
 	cap->fip.fip_dl_len = htons((dlen + sizeof(*mac)) / FIP_BPW);
-	cap->fip.fip_flags = htons(FIP_FL_FPMA);
-	if (fip->spma)
-		cap->fip.fip_flags |= htons(FIP_FL_SPMA);
+	cap->fip.fip_flags = htons(fip_flags);
 
 	cap->encaps.fd_desc.fip_dtype = dtype;
 	cap->encaps.fd_desc.fip_dlen = dlen / FIP_BPW;
@@ -879,7 +889,7 @@
 	stats->RxFrames++;
 	stats->RxWords += skb->len / FIP_BPW;
 
-	fc_exch_recv(lp, lp->emp, fp);
+	fc_exch_recv(lp, fp);
 	return;
 
 len_err:
@@ -1104,7 +1114,6 @@
 	struct fcoe_fcf *sel;
 	struct fcoe_fcf *fcf;
 	unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
-	DECLARE_MAC_BUF(buf);
 	u8 send_ctlr_ka;
 	u8 send_port_ka;
 
@@ -1128,9 +1137,8 @@
 		fcf = sel;		/* the old FCF may have been freed */
 		if (sel) {
 			printk(KERN_INFO "libfcoe: host%d: FIP selected "
-			       "Fibre-Channel Forwarder MAC %s\n",
-			       fip->lp->host->host_no,
-			       print_mac(buf, sel->fcf_mac));
+			       "Fibre-Channel Forwarder MAC %pM\n",
+			       fip->lp->host->host_no, sel->fcf_mac);
 			memcpy(fip->dest_addr, sel->fcf_mac, ETH_ALEN);
 			fip->port_ka_time = jiffies +
 					    msecs_to_jiffies(FIP_VN_KA_PERIOD);
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 07e6eed..50db3e3 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -115,7 +115,7 @@
 		}
 		spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 
-		fc_exch_recv(lp, lp->emp, fp);
+		fc_exch_recv(lp, fp);
 	}
 
 }
diff --git a/drivers/scsi/fnic/fnic_main.c b/drivers/scsi/fnic/fnic_main.c
index 2c266c0..71c7bbe 100644
--- a/drivers/scsi/fnic/fnic_main.c
+++ b/drivers/scsi/fnic/fnic_main.c
@@ -671,14 +671,6 @@
 	lp->link_up = 0;
 	lp->tt = fnic_transport_template;
 
-	lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
-				    FCPIO_HOST_EXCH_RANGE_START,
-				    FCPIO_HOST_EXCH_RANGE_END);
-	if (!lp->emp) {
-		err = -ENOMEM;
-		goto err_out_remove_scsi_host;
-	}
-
 	lp->max_retry_count = fnic->config.flogi_retries;
 	lp->max_rport_retry_count = fnic->config.plogi_retries;
 	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
@@ -693,12 +685,18 @@
 	fc_set_wwnn(lp, fnic->config.node_wwn);
 	fc_set_wwpn(lp, fnic->config.port_wwn);
 
-	fc_exch_init(lp);
 	fc_lport_init(lp);
+	fc_exch_init(lp);
 	fc_elsct_init(lp);
 	fc_rport_init(lp);
 	fc_disc_init(lp);
 
+	if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
+			       FCPIO_HOST_EXCH_RANGE_END, NULL)) {
+		err = -ENOMEM;
+		goto err_out_remove_scsi_host;
+	}
+
 	fc_lport_config(lp);
 
 	if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
@@ -738,7 +736,7 @@
 	return 0;
 
 err_out_free_exch_mgr:
-	fc_exch_mgr_free(lp->emp);
+	fc_exch_mgr_free(lp);
 err_out_remove_scsi_host:
 	fc_remove_host(fnic->lport->host);
 	scsi_remove_host(fnic->lport->host);
@@ -827,7 +825,7 @@
 
 	fc_remove_host(fnic->lport->host);
 	scsi_remove_host(fnic->lport->host);
-	fc_exch_mgr_free(fnic->lport->emp);
+	fc_exch_mgr_free(fnic->lport);
 	vnic_dev_notify_unset(fnic->vdev);
 	fnic_free_vnic_resources(fnic);
 	fnic_free_intr(fnic);
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 166d964..bb2c696 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -4217,7 +4217,7 @@
 	if (!vhost->trace)
 		goto free_disc_buffer;
 
-	vhost->tgt_pool = mempool_create_kzalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
+	vhost->tgt_pool = mempool_create_kmalloc_pool(IBMVFC_TGT_MEMPOOL_SZ,
 						      sizeof(struct ibmvfc_target));
 
 	if (!vhost->tgt_pool) {
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 4b63dd6..163245a 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1199,7 +1199,7 @@
 
 	struct ata_host ata_host;
 	char ipr_cmd_label[8];
-#define IPR_CMD_LABEL		"ipr_cmnd"
+#define IPR_CMD_LABEL		"ipr_cmd"
 	struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS];
 	u32 ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS];
 };
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 518dbd9..2b1b834 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -99,6 +99,27 @@
 	return total_consumed;
 }
 
+/**
+ * iscsi_sw_sk_state_check - check socket state
+ * @sk: socket
+ *
+ * If the socket is in CLOSE or CLOSE_WAIT we should
+ * not close the connection if there is still some
+ * data pending.
+ */
+static inline int iscsi_sw_sk_state_check(struct sock *sk)
+{
+	struct iscsi_conn *conn = (struct iscsi_conn*)sk->sk_user_data;
+
+	if ((sk->sk_state == TCP_CLOSE_WAIT || sk->sk_state == TCP_CLOSE) &&
+	    !atomic_read(&sk->sk_rmem_alloc)) {
+		ISCSI_SW_TCP_DBG(conn, "TCP_CLOSE|TCP_CLOSE_WAIT\n");
+		iscsi_conn_failure(conn, ISCSI_ERR_TCP_CONN_CLOSE);
+		return -ECONNRESET;
+	}
+	return 0;
+}
+
 static void iscsi_sw_tcp_data_ready(struct sock *sk, int flag)
 {
 	struct iscsi_conn *conn = sk->sk_user_data;
@@ -117,6 +138,8 @@
 	rd_desc.count = 1;
 	tcp_read_sock(sk, &rd_desc, iscsi_sw_tcp_recv);
 
+	iscsi_sw_sk_state_check(sk);
+
 	read_unlock(&sk->sk_callback_lock);
 
 	/* If we had to (atomically) map a highmem page,
@@ -137,13 +160,7 @@
 	conn = (struct iscsi_conn*)sk->sk_user_data;
 	session = conn->session;
 
-	if ((sk->sk_state == TCP_CLOSE_WAIT ||
-	     sk->sk_state == TCP_CLOSE) &&
-	    !atomic_read(&sk->sk_rmem_alloc)) {
-		ISCSI_SW_TCP_DBG(conn, "iscsi_tcp_state_change: "
-				 "TCP_CLOSE|TCP_CLOSE_WAIT\n");
-		iscsi_conn_failure(conn, ISCSI_ERR_CONN_FAILED);
-	}
+	iscsi_sw_sk_state_check(sk);
 
 	tcp_conn = conn->dd_data;
 	tcp_sw_conn = tcp_conn->dd_data;
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 6fabf66..c48799e 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -43,47 +43,14 @@
 #define FC_DISC_RETRY_LIMIT	3	/* max retries */
 #define FC_DISC_RETRY_DELAY	500UL	/* (msecs) delay */
 
-#define	FC_DISC_DELAY		3
-
 static void fc_disc_gpn_ft_req(struct fc_disc *);
 static void fc_disc_gpn_ft_resp(struct fc_seq *, struct fc_frame *, void *);
-static int fc_disc_new_target(struct fc_disc *, struct fc_rport *,
-			      struct fc_rport_identifiers *);
-static void fc_disc_del_target(struct fc_disc *, struct fc_rport *);
-static void fc_disc_done(struct fc_disc *);
+static void fc_disc_done(struct fc_disc *, enum fc_disc_event);
 static void fc_disc_timeout(struct work_struct *);
-static void fc_disc_single(struct fc_disc *, struct fc_disc_port *);
+static int fc_disc_single(struct fc_lport *, struct fc_disc_port *);
 static void fc_disc_restart(struct fc_disc *);
 
 /**
- * fc_disc_lookup_rport() - lookup a remote port by port_id
- * @lport: Fibre Channel host port instance
- * @port_id: remote port port_id to match
- */
-struct fc_rport *fc_disc_lookup_rport(const struct fc_lport *lport,
-				      u32 port_id)
-{
-	const struct fc_disc *disc = &lport->disc;
-	struct fc_rport *rport, *found = NULL;
-	struct fc_rport_libfc_priv *rdata;
-	int disc_found = 0;
-
-	list_for_each_entry(rdata, &disc->rports, peers) {
-		rport = PRIV_TO_RPORT(rdata);
-		if (rport->port_id == port_id) {
-			disc_found = 1;
-			found = rport;
-			break;
-		}
-	}
-
-	if (!disc_found)
-		found = NULL;
-
-	return found;
-}
-
-/**
  * fc_disc_stop_rports() - delete all the remote ports associated with the lport
  * @disc: The discovery job to stop rports on
  *
@@ -93,70 +60,17 @@
 void fc_disc_stop_rports(struct fc_disc *disc)
 {
 	struct fc_lport *lport;
-	struct fc_rport *rport;
-	struct fc_rport_libfc_priv *rdata, *next;
+	struct fc_rport_priv *rdata, *next;
 
 	lport = disc->lport;
 
 	mutex_lock(&disc->disc_mutex);
-	list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
-		rport = PRIV_TO_RPORT(rdata);
-		list_del(&rdata->peers);
-		lport->tt.rport_logoff(rport);
-	}
-
-	list_for_each_entry_safe(rdata, next, &disc->rogue_rports, peers) {
-		rport = PRIV_TO_RPORT(rdata);
-		lport->tt.rport_logoff(rport);
-	}
-
+	list_for_each_entry_safe(rdata, next, &disc->rports, peers)
+		lport->tt.rport_logoff(rdata);
 	mutex_unlock(&disc->disc_mutex);
 }
 
 /**
- * fc_disc_rport_callback() - Event handler for rport events
- * @lport: The lport which is receiving the event
- * @rport: The rport which the event has occured on
- * @event: The event that occured
- *
- * Locking Note: The rport lock should not be held when calling
- *		 this function.
- */
-static void fc_disc_rport_callback(struct fc_lport *lport,
-				   struct fc_rport *rport,
-				   enum fc_rport_event event)
-{
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
-	struct fc_disc *disc = &lport->disc;
-
-	FC_DISC_DBG(disc, "Received a %d event for port (%6x)\n", event,
-		    rport->port_id);
-
-	switch (event) {
-	case RPORT_EV_CREATED:
-		if (disc) {
-			mutex_lock(&disc->disc_mutex);
-			list_add_tail(&rdata->peers, &disc->rports);
-			mutex_unlock(&disc->disc_mutex);
-		}
-		break;
-	case RPORT_EV_LOGO:
-	case RPORT_EV_FAILED:
-	case RPORT_EV_STOP:
-		mutex_lock(&disc->disc_mutex);
-		mutex_lock(&rdata->rp_mutex);
-		if (rdata->trans_state == FC_PORTSTATE_ROGUE)
-			list_del(&rdata->peers);
-		mutex_unlock(&rdata->rp_mutex);
-		mutex_unlock(&disc->disc_mutex);
-		break;
-	default:
-		break;
-	}
-
-}
-
-/**
  * fc_disc_recv_rscn_req() - Handle Registered State Change Notification (RSCN)
  * @sp: Current sequence of the RSCN exchange
  * @fp: RSCN Frame
@@ -169,8 +83,6 @@
 				  struct fc_disc *disc)
 {
 	struct fc_lport *lport;
-	struct fc_rport *rport;
-	struct fc_rport_libfc_priv *rdata;
 	struct fc_els_rscn *rp;
 	struct fc_els_rscn_page *pp;
 	struct fc_seq_els_data rjt_data;
@@ -224,10 +136,7 @@
 				break;
 			}
 			dp->lp = lport;
-			dp->ids.port_id = ntoh24(pp->rscn_fid);
-			dp->ids.port_name = -1;
-			dp->ids.node_name = -1;
-			dp->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+			dp->port_id = ntoh24(pp->rscn_fid);
 			list_add_tail(&dp->peers, &disc_ports);
 			break;
 		case ELS_ADDR_FMT_AREA:
@@ -240,6 +149,19 @@
 		}
 	}
 	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+
+	/*
+	 * If not doing a complete rediscovery, do GPN_ID on
+	 * the individual ports mentioned in the list.
+	 * If any of these get an error, do a full rediscovery.
+	 * In any case, go through the list and free the entries.
+	 */
+	list_for_each_entry_safe(dp, next, &disc_ports, peers) {
+		list_del(&dp->peers);
+		if (!redisc)
+			redisc = fc_disc_single(lport, dp);
+		kfree(dp);
+	}
 	if (redisc) {
 		FC_DISC_DBG(disc, "RSCN received: rediscovering\n");
 		fc_disc_restart(disc);
@@ -247,16 +169,6 @@
 		FC_DISC_DBG(disc, "RSCN received: not rediscovering. "
 			    "redisc %d state %d in_prog %d\n",
 			    redisc, lport->state, disc->pending);
-		list_for_each_entry_safe(dp, next, &disc_ports, peers) {
-			list_del(&dp->peers);
-			rport = lport->tt.rport_lookup(lport, dp->ids.port_id);
-			if (rport) {
-				rdata = rport->dd_data;
-				list_del(&rdata->peers);
-				lport->tt.rport_logoff(rport);
-			}
-			fc_disc_single(disc, dp);
-		}
 	}
 	fc_frame_free(fp);
 	return;
@@ -308,35 +220,34 @@
  */
 static void fc_disc_restart(struct fc_disc *disc)
 {
-	struct fc_rport *rport;
-	struct fc_rport_libfc_priv *rdata, *next;
-	struct fc_lport *lport = disc->lport;
+	if (!disc->disc_callback)
+		return;
 
 	FC_DISC_DBG(disc, "Restarting discovery\n");
 
-	list_for_each_entry_safe(rdata, next, &disc->rports, peers) {
-		rport = PRIV_TO_RPORT(rdata);
-		list_del(&rdata->peers);
-		lport->tt.rport_logoff(rport);
-	}
-
 	disc->requested = 1;
-	if (!disc->pending)
-		fc_disc_gpn_ft_req(disc);
+	if (disc->pending)
+		return;
+
+	/*
+	 * Advance disc_id.  This is an arbitrary non-zero number that will
+	 * match the value in the fc_rport_priv after discovery for all
+	 * freshly-discovered remote ports.  Avoid wrapping to zero.
+	 */
+	disc->disc_id = (disc->disc_id + 2) | 1;
+	disc->retry_count = 0;
+	fc_disc_gpn_ft_req(disc);
 }
 
 /**
  * fc_disc_start() - Fibre Channel Target discovery
  * @lport: FC local port
- *
- * Returns non-zero if discovery cannot be started.
+ * @disc_callback: function to be called when discovery is complete
  */
 static void fc_disc_start(void (*disc_callback)(struct fc_lport *,
 						enum fc_disc_event),
 			  struct fc_lport *lport)
 {
-	struct fc_rport *rport;
-	struct fc_rport_identifiers ids;
 	struct fc_disc *disc = &lport->disc;
 
 	/*
@@ -345,145 +256,47 @@
 	 * and send the GPN_FT request.
 	 */
 	mutex_lock(&disc->disc_mutex);
-
 	disc->disc_callback = disc_callback;
-
-	/*
-	 * If not ready, or already running discovery, just set request flag.
-	 */
-	disc->requested = 1;
-
-	if (disc->pending) {
-		mutex_unlock(&disc->disc_mutex);
-		return;
-	}
-
-	/*
-	 * Handle point-to-point mode as a simple discovery
-	 * of the remote port. Yucky, yucky, yuck, yuck!
-	 */
-	rport = disc->lport->ptp_rp;
-	if (rport) {
-		ids.port_id = rport->port_id;
-		ids.port_name = rport->port_name;
-		ids.node_name = rport->node_name;
-		ids.roles = FC_RPORT_ROLE_UNKNOWN;
-		get_device(&rport->dev);
-
-		if (!fc_disc_new_target(disc, rport, &ids)) {
-			disc->event = DISC_EV_SUCCESS;
-			fc_disc_done(disc);
-		}
-		put_device(&rport->dev);
-	} else {
-		fc_disc_gpn_ft_req(disc);	/* get ports by FC-4 type */
-	}
-
+	fc_disc_restart(disc);
 	mutex_unlock(&disc->disc_mutex);
 }
 
-static struct fc_rport_operations fc_disc_rport_ops = {
-	.event_callback = fc_disc_rport_callback,
-};
-
-/**
- * fc_disc_new_target() - Handle new target found by discovery
- * @lport: FC local port
- * @rport: The previous FC remote port (NULL if new remote port)
- * @ids: Identifiers for the new FC remote port
- *
- * Locking Note: This function expects that the disc_mutex is locked
- *		 before it is called.
- */
-static int fc_disc_new_target(struct fc_disc *disc,
-			      struct fc_rport *rport,
-			      struct fc_rport_identifiers *ids)
-{
-	struct fc_lport *lport = disc->lport;
-	struct fc_rport_libfc_priv *rdata;
-	int error = 0;
-
-	if (rport && ids->port_name) {
-		if (rport->port_name == -1) {
-			/*
-			 * Set WWN and fall through to notify of create.
-			 */
-			fc_rport_set_name(rport, ids->port_name,
-					  rport->node_name);
-		} else if (rport->port_name != ids->port_name) {
-			/*
-			 * This is a new port with the same FCID as
-			 * a previously-discovered port.  Presumably the old
-			 * port logged out and a new port logged in and was
-			 * assigned the same FCID.  This should be rare.
-			 * Delete the old one and fall thru to re-create.
-			 */
-			fc_disc_del_target(disc, rport);
-			rport = NULL;
-		}
-	}
-	if (((ids->port_name != -1) || (ids->port_id != -1)) &&
-	    ids->port_id != fc_host_port_id(lport->host) &&
-	    ids->port_name != lport->wwpn) {
-		if (!rport) {
-			rport = lport->tt.rport_lookup(lport, ids->port_id);
-			if (!rport) {
-				struct fc_disc_port dp;
-				dp.lp = lport;
-				dp.ids.port_id = ids->port_id;
-				dp.ids.port_name = ids->port_name;
-				dp.ids.node_name = ids->node_name;
-				dp.ids.roles = ids->roles;
-				rport = lport->tt.rport_create(&dp);
-			}
-			if (!rport)
-				error = -ENOMEM;
-		}
-		if (rport) {
-			rdata = rport->dd_data;
-			rdata->ops = &fc_disc_rport_ops;
-			rdata->rp_state = RPORT_ST_INIT;
-			list_add_tail(&rdata->peers, &disc->rogue_rports);
-			lport->tt.rport_login(rport);
-		}
-	}
-	return error;
-}
-
-/**
- * fc_disc_del_target() - Delete a target
- * @disc: FC discovery context
- * @rport: The remote port to be removed
- */
-static void fc_disc_del_target(struct fc_disc *disc, struct fc_rport *rport)
-{
-	struct fc_lport *lport = disc->lport;
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
-	list_del(&rdata->peers);
-	lport->tt.rport_logoff(rport);
-}
-
 /**
  * fc_disc_done() - Discovery has been completed
  * @disc: FC discovery context
+ * @event: discovery completion status
+ *
  * Locking Note: This function expects that the disc mutex is locked before
  * it is called. The discovery callback is then made with the lock released,
  * and the lock is re-taken before returning from this function
  */
-static void fc_disc_done(struct fc_disc *disc)
+static void fc_disc_done(struct fc_disc *disc, enum fc_disc_event event)
 {
 	struct fc_lport *lport = disc->lport;
-	enum fc_disc_event event;
+	struct fc_rport_priv *rdata;
 
 	FC_DISC_DBG(disc, "Discovery complete\n");
 
-	event = disc->event;
-	disc->event = DISC_EV_NONE;
+	disc->pending = 0;
+	if (disc->requested) {
+		fc_disc_restart(disc);
+		return;
+	}
 
-	if (disc->requested)
-		fc_disc_gpn_ft_req(disc);
-	else
-		disc->pending = 0;
+	/*
+	 * Go through all remote ports.  If they were found in the latest
+	 * discovery, reverify or log them in.  Otherwise, log them out.
+	 * Skip ports which were never discovered.  These are the dNS port
+	 * and ports which were created by PLOGI.
+	 */
+	list_for_each_entry(rdata, &disc->rports, peers) {
+		if (!rdata->disc_id)
+			continue;
+		if (rdata->disc_id == disc->disc_id)
+			lport->tt.rport_login(rdata);
+		else
+			lport->tt.rport_logoff(rdata);
+	}
 
 	mutex_unlock(&disc->disc_mutex);
 	disc->disc_callback(lport, event);
@@ -522,11 +335,8 @@
 			}
 			disc->retry_count++;
 			schedule_delayed_work(&disc->disc_work, delay);
-		} else {
-			/* exceeded retries */
-			disc->event = DISC_EV_FAILED;
-			fc_disc_done(disc);
-		}
+		} else
+			fc_disc_done(disc, DISC_EV_FAILED);
 	}
 }
 
@@ -555,7 +365,7 @@
 	if (!fp)
 		goto err;
 
-	if (lport->tt.elsct_send(lport, NULL, fp,
+	if (lport->tt.elsct_send(lport, 0, fp,
 				 FC_NS_GPN_FT,
 				 fc_disc_gpn_ft_resp,
 				 disc, lport->e_d_tov))
@@ -565,10 +375,12 @@
 }
 
 /**
- * fc_disc_gpn_ft_parse() - Parse the list of IDs and names resulting from a request
+ * fc_disc_gpn_ft_parse() - Parse the body of the dNS GPN_FT response.
  * @lport: Fibre Channel host port instance
  * @buf: GPN_FT response buffer
  * @len: size of response buffer
+ *
+ * Goes through the list of IDs and names resulting from a request.
  */
 static int fc_disc_gpn_ft_parse(struct fc_disc *disc, void *buf, size_t len)
 {
@@ -578,11 +390,11 @@
 	size_t plen;
 	size_t tlen;
 	int error = 0;
-	struct fc_disc_port dp;
-	struct fc_rport *rport;
-	struct fc_rport_libfc_priv *rdata;
+	struct fc_rport_identifiers ids;
+	struct fc_rport_priv *rdata;
 
 	lport = disc->lport;
+	disc->seq_count++;
 
 	/*
 	 * Handle partial name record left over from previous call.
@@ -591,6 +403,7 @@
 	plen = len;
 	np = (struct fc_gpn_ft_resp *)bp;
 	tlen = disc->buf_len;
+	disc->buf_len = 0;
 	if (tlen) {
 		WARN_ON(tlen >= sizeof(*np));
 		plen = sizeof(*np) - tlen;
@@ -621,31 +434,25 @@
 	 * After the first time through the loop, things return to "normal".
 	 */
 	while (plen >= sizeof(*np)) {
-		dp.lp = lport;
-		dp.ids.port_id = ntoh24(np->fp_fid);
-		dp.ids.port_name = ntohll(np->fp_wwpn);
-		dp.ids.node_name = -1;
-		dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
+		ids.port_id = ntoh24(np->fp_fid);
+		ids.port_name = ntohll(np->fp_wwpn);
 
-		if ((dp.ids.port_id != fc_host_port_id(lport->host)) &&
-		    (dp.ids.port_name != lport->wwpn)) {
-			rport = lport->tt.rport_create(&dp);
-			if (rport) {
-				rdata = rport->dd_data;
-				rdata->ops = &fc_disc_rport_ops;
-				rdata->local_port = lport;
-				list_add_tail(&rdata->peers,
-					      &disc->rogue_rports);
-				lport->tt.rport_login(rport);
-			} else
+		if (ids.port_id != fc_host_port_id(lport->host) &&
+		    ids.port_name != lport->wwpn) {
+			rdata = lport->tt.rport_create(lport, ids.port_id);
+			if (rdata) {
+				rdata->ids.port_name = ids.port_name;
+				rdata->disc_id = disc->disc_id;
+			} else {
 				printk(KERN_WARNING "libfc: Failed to allocate "
 				       "memory for the newly discovered port "
-				       "(%6x)\n", dp.ids.port_id);
+				       "(%6x)\n", ids.port_id);
+				error = -ENOMEM;
+			}
 		}
 
 		if (np->fp_flags & FC_NS_FID_LAST) {
-			disc->event = DISC_EV_SUCCESS;
-			fc_disc_done(disc);
+			fc_disc_done(disc, DISC_EV_SUCCESS);
 			len = 0;
 			break;
 		}
@@ -665,8 +472,6 @@
 			memcpy(&disc->partial_buf, np, len);
 		}
 		disc->buf_len = (unsigned char) len;
-	} else {
-		disc->buf_len = 0;
 	}
 	return error;
 }
@@ -683,8 +488,7 @@
 					    struct fc_disc,
 					    disc_work.work);
 	mutex_lock(&disc->disc_mutex);
-	if (disc->requested && !disc->pending)
-		fc_disc_gpn_ft_req(disc);
+	fc_disc_gpn_ft_req(disc);
 	mutex_unlock(&disc->disc_mutex);
 }
 
@@ -703,10 +507,10 @@
 	struct fc_disc *disc = disc_arg;
 	struct fc_ct_hdr *cp;
 	struct fc_frame_header *fh;
+	enum fc_disc_event event = DISC_EV_NONE;
 	unsigned int seq_cnt;
-	void *buf = NULL;
 	unsigned int len;
-	int error;
+	int error = 0;
 
 	mutex_lock(&disc->disc_mutex);
 	FC_DISC_DBG(disc, "Received a GPN_FT response\n");
@@ -721,77 +525,158 @@
 	fh = fc_frame_header_get(fp);
 	len = fr_len(fp) - sizeof(*fh);
 	seq_cnt = ntohs(fh->fh_seq_cnt);
-	if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 &&
-	    disc->seq_count == 0) {
+	if (fr_sof(fp) == FC_SOF_I3 && seq_cnt == 0 && disc->seq_count == 0) {
 		cp = fc_frame_payload_get(fp, sizeof(*cp));
 		if (!cp) {
 			FC_DISC_DBG(disc, "GPN_FT response too short, len %d\n",
 				    fr_len(fp));
+			event = DISC_EV_FAILED;
 		} else if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
 
 			/* Accepted, parse the response. */
-			buf = cp + 1;
 			len -= sizeof(*cp);
+			error = fc_disc_gpn_ft_parse(disc, cp + 1, len);
 		} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
 			FC_DISC_DBG(disc, "GPN_FT rejected reason %x exp %x "
 				    "(check zoning)\n", cp->ct_reason,
 				    cp->ct_explan);
-			disc->event = DISC_EV_FAILED;
-			fc_disc_done(disc);
+			event = DISC_EV_FAILED;
+			if (cp->ct_reason == FC_FS_RJT_UNABL &&
+			    cp->ct_explan == FC_FS_EXP_FTNR)
+				event = DISC_EV_SUCCESS;
 		} else {
 			FC_DISC_DBG(disc, "GPN_FT unexpected response code "
 				    "%x\n", ntohs(cp->ct_cmd));
+			event = DISC_EV_FAILED;
 		}
-	} else if (fr_sof(fp) == FC_SOF_N3 &&
-		   seq_cnt == disc->seq_count) {
-		buf = fh + 1;
+	} else if (fr_sof(fp) == FC_SOF_N3 && seq_cnt == disc->seq_count) {
+		error = fc_disc_gpn_ft_parse(disc, fh + 1, len);
 	} else {
 		FC_DISC_DBG(disc, "GPN_FT unexpected frame - out of sequence? "
 			    "seq_cnt %x expected %x sof %x eof %x\n",
 			    seq_cnt, disc->seq_count, fr_sof(fp), fr_eof(fp));
+		event = DISC_EV_FAILED;
 	}
-	if (buf) {
-		error = fc_disc_gpn_ft_parse(disc, buf, len);
-		if (error)
-			fc_disc_error(disc, fp);
-		else
-			disc->seq_count++;
-	}
+	if (error)
+		fc_disc_error(disc, fp);
+	else if (event != DISC_EV_NONE)
+		fc_disc_done(disc, event);
 	fc_frame_free(fp);
-
 	mutex_unlock(&disc->disc_mutex);
 }
 
 /**
+ * fc_disc_gpn_id_resp() - Handle a response frame from Get Port Names (GPN_ID)
+ * @sp: exchange sequence
+ * @fp: response frame
+ * @rdata_arg: remote port private data
+ *
+ * Locking Note: This function is called without disc mutex held.
+ */
+static void fc_disc_gpn_id_resp(struct fc_seq *sp, struct fc_frame *fp,
+				void *rdata_arg)
+{
+	struct fc_rport_priv *rdata = rdata_arg;
+	struct fc_rport_priv *new_rdata;
+	struct fc_lport *lport;
+	struct fc_disc *disc;
+	struct fc_ct_hdr *cp;
+	struct fc_ns_gid_pn *pn;
+	u64 port_name;
+
+	lport = rdata->local_port;
+	disc = &lport->disc;
+
+	mutex_lock(&disc->disc_mutex);
+	if (PTR_ERR(fp) == -FC_EX_CLOSED)
+		goto out;
+	if (IS_ERR(fp))
+		goto redisc;
+
+	cp = fc_frame_payload_get(fp, sizeof(*cp));
+	if (!cp)
+		goto redisc;
+	if (ntohs(cp->ct_cmd) == FC_FS_ACC) {
+		if (fr_len(fp) < sizeof(struct fc_frame_header) +
+		    sizeof(*cp) + sizeof(*pn))
+			goto redisc;
+		pn = (struct fc_ns_gid_pn *)(cp + 1);
+		port_name = get_unaligned_be64(&pn->fn_wwpn);
+		if (rdata->ids.port_name == -1)
+			rdata->ids.port_name = port_name;
+		else if (rdata->ids.port_name != port_name) {
+			FC_DISC_DBG(disc, "GPN_ID accepted.  WWPN changed. "
+				    "Port-id %x wwpn %llx\n",
+				    rdata->ids.port_id, port_name);
+			lport->tt.rport_logoff(rdata);
+
+			new_rdata = lport->tt.rport_create(lport,
+							   rdata->ids.port_id);
+			if (new_rdata) {
+				new_rdata->disc_id = disc->disc_id;
+				lport->tt.rport_login(new_rdata);
+			}
+			goto out;
+		}
+		rdata->disc_id = disc->disc_id;
+		lport->tt.rport_login(rdata);
+	} else if (ntohs(cp->ct_cmd) == FC_FS_RJT) {
+		FC_DISC_DBG(disc, "GPN_ID rejected reason %x exp %x\n",
+			    cp->ct_reason, cp->ct_explan);
+		lport->tt.rport_logoff(rdata);
+	} else {
+		FC_DISC_DBG(disc, "GPN_ID unexpected response code %x\n",
+			    ntohs(cp->ct_cmd));
+redisc:
+		fc_disc_restart(disc);
+	}
+out:
+	mutex_unlock(&disc->disc_mutex);
+	kref_put(&rdata->kref, lport->tt.rport_destroy);
+}
+
+/**
+ * fc_disc_gpn_id_req() - Send Get Port Names by ID (GPN_ID) request
+ * @lport: local port
+ * @rdata: remote port private data
+ *
+ * Locking Note: This function expects that the disc_mutex is locked
+ *		 before it is called.
+ * On failure, an error code is returned.
+ */
+static int fc_disc_gpn_id_req(struct fc_lport *lport,
+			      struct fc_rport_priv *rdata)
+{
+	struct fc_frame *fp;
+
+	fp = fc_frame_alloc(lport, sizeof(struct fc_ct_hdr) +
+			    sizeof(struct fc_ns_fid));
+	if (!fp)
+		return -ENOMEM;
+	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, FC_NS_GPN_ID,
+				 fc_disc_gpn_id_resp, rdata, lport->e_d_tov))
+		return -ENOMEM;
+	kref_get(&rdata->kref);
+	return 0;
+}
+
+/**
  * fc_disc_single() - Discover the directory information for a single target
- * @lport: FC local port
+ * @lport: local port
  * @dp: The port to rediscover
  *
  * Locking Note: This function expects that the disc_mutex is locked
  *		 before it is called.
  */
-static void fc_disc_single(struct fc_disc *disc, struct fc_disc_port *dp)
+static int fc_disc_single(struct fc_lport *lport, struct fc_disc_port *dp)
 {
-	struct fc_lport *lport;
-	struct fc_rport *new_rport;
-	struct fc_rport_libfc_priv *rdata;
+	struct fc_rport_priv *rdata;
 
-	lport = disc->lport;
-
-	if (dp->ids.port_id == fc_host_port_id(lport->host))
-		goto out;
-
-	new_rport = lport->tt.rport_create(dp);
-	if (new_rport) {
-		rdata = new_rport->dd_data;
-		rdata->ops = &fc_disc_rport_ops;
-		kfree(dp);
-		list_add_tail(&rdata->peers, &disc->rogue_rports);
-		lport->tt.rport_login(new_rport);
-	}
-	return;
-out:
-	kfree(dp);
+	rdata = lport->tt.rport_create(lport, dp->port_id);
+	if (!rdata)
+		return -ENOMEM;
+	rdata->disc_id = 0;
+	return fc_disc_gpn_id_req(lport, rdata);
 }
 
 /**
@@ -841,18 +726,12 @@
 	if (!lport->tt.disc_recv_req)
 		lport->tt.disc_recv_req = fc_disc_recv_req;
 
-	if (!lport->tt.rport_lookup)
-		lport->tt.rport_lookup = fc_disc_lookup_rport;
-
 	disc = &lport->disc;
 	INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
 	mutex_init(&disc->disc_mutex);
 	INIT_LIST_HEAD(&disc->rports);
-	INIT_LIST_HEAD(&disc->rogue_rports);
 
 	disc->lport = lport;
-	disc->delay = FC_DISC_DELAY;
-	disc->event = DISC_EV_NONE;
 
 	return 0;
 }
diff --git a/drivers/scsi/libfc/fc_elsct.c b/drivers/scsi/libfc/fc_elsct.c
index 5878b34..5cfa687 100644
--- a/drivers/scsi/libfc/fc_elsct.c
+++ b/drivers/scsi/libfc/fc_elsct.c
@@ -32,7 +32,7 @@
  * fc_elsct_send - sends ELS/CT frame
  */
 static struct fc_seq *fc_elsct_send(struct fc_lport *lport,
-				    struct fc_rport *rport,
+				    u32 did,
 				    struct fc_frame *fp,
 				    unsigned int op,
 				    void (*resp)(struct fc_seq *,
@@ -41,16 +41,17 @@
 				    void *arg, u32 timer_msec)
 {
 	enum fc_rctl r_ctl;
-	u32 did = FC_FID_NONE;
 	enum fc_fh_type fh_type;
 	int rc;
 
 	/* ELS requests */
 	if ((op >= ELS_LS_RJT) && (op <= ELS_AUTH_ELS))
-		rc = fc_els_fill(lport, rport, fp, op, &r_ctl, &did, &fh_type);
-	else
+		rc = fc_els_fill(lport, did, fp, op, &r_ctl, &fh_type);
+	else {
 		/* CT requests */
-		rc = fc_ct_fill(lport, fp, op, &r_ctl, &did, &fh_type);
+		rc = fc_ct_fill(lport, did, fp, op, &r_ctl, &fh_type);
+		did = FC_FID_DIR_SERV;
+	}
 
 	if (rc)
 		return NULL;
@@ -69,3 +70,41 @@
 	return 0;
 }
 EXPORT_SYMBOL(fc_elsct_init);
+
+/**
+ * fc_els_resp_type() - return string describing ELS response for debug.
+ * @fp: frame pointer with possible error code.
+ */
+const char *fc_els_resp_type(struct fc_frame *fp)
+{
+	const char *msg;
+	if (IS_ERR(fp)) {
+		switch (-PTR_ERR(fp)) {
+		case FC_NO_ERR:
+			msg = "response no error";
+			break;
+		case FC_EX_TIMEOUT:
+			msg = "response timeout";
+			break;
+		case FC_EX_CLOSED:
+			msg = "response closed";
+			break;
+		default:
+			msg = "response unknown error";
+			break;
+		}
+	} else {
+		switch (fc_frame_payload_op(fp)) {
+		case ELS_LS_ACC:
+			msg = "accept";
+			break;
+		case ELS_LS_RJT:
+			msg = "reject";
+			break;
+		default:
+			msg = "response unknown ELS";
+			break;
+		}
+	}
+	return msg;
+}
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 145ab9b..c1c1574 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -32,6 +32,9 @@
 #include <scsi/libfc.h>
 #include <scsi/fc_encode.h>
 
+u16	fc_cpu_mask;		/* cpu mask for possible cpus */
+EXPORT_SYMBOL(fc_cpu_mask);
+static u16	fc_cpu_order;	/* 2's power to represent total possible cpus */
 static struct kmem_cache *fc_em_cachep;        /* cache for exchanges */
 
 /*
@@ -48,6 +51,20 @@
  */
 
 /*
+ * Per cpu exchange pool
+ *
+ * This structure manages per cpu exchanges in array of exchange pointers.
+ * This array is allocated followed by struct fc_exch_pool memory for
+ * assigned range of exchanges to per cpu pool.
+ */
+struct fc_exch_pool {
+	u16		next_index;	/* next possible free exchange index */
+	u16		total_exches;	/* total allocated exchanges */
+	spinlock_t	lock;		/* exch pool lock */
+	struct list_head	ex_list;	/* allocated exchanges list */
+};
+
+/*
  * Exchange manager.
  *
  * This structure is the center for creating exchanges and sequences.
@@ -55,17 +72,13 @@
  */
 struct fc_exch_mgr {
 	enum fc_class	class;		/* default class for sequences */
-	spinlock_t	em_lock;	/* exchange manager lock,
-					   must be taken before ex_lock */
-	u16		last_xid;	/* last allocated exchange ID */
+	struct kref	kref;		/* exchange mgr reference count */
 	u16		min_xid;	/* min exchange ID */
 	u16		max_xid;	/* max exchange ID */
-	u16		max_read;	/* max exchange ID for read */
-	u16		last_read;	/* last xid allocated for read */
-	u32	total_exches;		/* total allocated exchanges */
 	struct list_head	ex_list;	/* allocated exchanges list */
-	struct fc_lport	*lp;		/* fc device instance */
 	mempool_t	*ep_pool;	/* reserve ep's */
+	u16		pool_max_index;	/* max exch array index in exch pool */
+	struct fc_exch_pool *pool;	/* per cpu exch pool */
 
 	/*
 	 * currently exchange mgr stats are updated but not used.
@@ -80,10 +93,15 @@
 		atomic_t seq_not_found;
 		atomic_t non_bls_resp;
 	} stats;
-	struct fc_exch **exches;	/* for exch pointers indexed by xid */
 };
 #define	fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
 
+struct fc_exch_mgr_anchor {
+	struct list_head ema_list;
+	struct fc_exch_mgr *mp;
+	bool (*match)(struct fc_frame *);
+};
+
 static void fc_exch_rrq(struct fc_exch *);
 static void fc_seq_ls_acc(struct fc_seq *);
 static void fc_seq_ls_rjt(struct fc_seq *, enum fc_els_rjt_reason,
@@ -167,8 +185,8 @@
  * sequence allocation and deallocation must be locked.
  *  - exchange refcnt can be done atomicly without locks.
  *  - sequence allocation must be locked by exch lock.
- *  - If the em_lock and ex_lock must be taken at the same time, then the
- *    em_lock must be taken before the ex_lock.
+ *  - If the EM pool lock and ex_lock must be taken at the same time, then the
+ *    EM pool lock must be taken before the ex_lock.
  */
 
 /*
@@ -268,8 +286,6 @@
 		mp = ep->em;
 		if (ep->destructor)
 			ep->destructor(&ep->seq, ep->arg);
-		if (ep->lp->tt.exch_put)
-			ep->lp->tt.exch_put(ep->lp, mp, ep->xid);
 		WARN_ON(!(ep->esb_stat & ESB_ST_COMPLETE));
 		mempool_free(ep, mp->ep_pool);
 	}
@@ -299,17 +315,31 @@
 	return rc;
 }
 
-static void fc_exch_mgr_delete_ep(struct fc_exch *ep)
+static inline struct fc_exch *fc_exch_ptr_get(struct fc_exch_pool *pool,
+					      u16 index)
 {
-	struct fc_exch_mgr *mp;
+	struct fc_exch **exches = (struct fc_exch **)(pool + 1);
+	return exches[index];
+}
 
-	mp = ep->em;
-	spin_lock_bh(&mp->em_lock);
-	WARN_ON(mp->total_exches <= 0);
-	mp->total_exches--;
-	mp->exches[ep->xid - mp->min_xid] = NULL;
+static inline void fc_exch_ptr_set(struct fc_exch_pool *pool, u16 index,
+				   struct fc_exch *ep)
+{
+	((struct fc_exch **)(pool + 1))[index] = ep;
+}
+
+static void fc_exch_delete(struct fc_exch *ep)
+{
+	struct fc_exch_pool *pool;
+
+	pool = ep->pool;
+	spin_lock_bh(&pool->lock);
+	WARN_ON(pool->total_exches <= 0);
+	pool->total_exches--;
+	fc_exch_ptr_set(pool, (ep->xid - ep->em->min_xid) >> fc_cpu_order,
+			NULL);
 	list_del(&ep->ex_list);
-	spin_unlock_bh(&mp->em_lock);
+	spin_unlock_bh(&pool->lock);
 	fc_exch_release(ep);	/* drop hold for exch in mp */
 }
 
@@ -322,7 +352,7 @@
 	if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
 		return;
 
-	FC_EXCH_DBG(ep, "Exchange timed out, notifying the upper layer\n");
+	FC_EXCH_DBG(ep, "Exchange timer armed\n");
 
 	if (schedule_delayed_work(&ep->timeout_work,
 				  msecs_to_jiffies(timer_msec)))
@@ -408,6 +438,8 @@
 	u32 e_stat;
 	int rc = 1;
 
+	FC_EXCH_DBG(ep, "Exchange timed out\n");
+
 	spin_lock_bh(&ep->ex_lock);
 	if (ep->state & (FC_EX_RST_CLEANUP | FC_EX_DONE))
 		goto unlock;
@@ -427,7 +459,7 @@
 			rc = fc_exch_done_locked(ep);
 		spin_unlock_bh(&ep->ex_lock);
 		if (!rc)
-			fc_exch_mgr_delete_ep(ep);
+			fc_exch_delete(ep);
 		if (resp)
 			resp(sp, ERR_PTR(-FC_EX_TIMEOUT), arg);
 		fc_seq_exch_abort(sp, 2 * ep->r_a_tov);
@@ -460,65 +492,20 @@
 	return sp;
 }
 
-/*
- * fc_em_alloc_xid - returns an xid based on request type
- * @lp : ptr to associated lport
- * @fp : ptr to the assocated frame
+/**
+ * fc_exch_em_alloc() - allocate an exchange from a specified EM.
+ * @lport:	ptr to the local port
+ * @mp:		ptr to the exchange manager
  *
- * check the associated fc_fsp_pkt to get scsi command type and
- * command direction to decide from which range this exch id
- * will be allocated from.
- *
- * Returns : 0 or an valid xid
+ * Returns pointer to allocated fc_exch with exch lock held.
  */
-static u16 fc_em_alloc_xid(struct fc_exch_mgr *mp, const struct fc_frame *fp)
-{
-	u16 xid, min, max;
-	u16 *plast;
-	struct fc_exch *ep = NULL;
-
-	if (mp->max_read) {
-		if (fc_fcp_is_read(fr_fsp(fp))) {
-			min = mp->min_xid;
-			max = mp->max_read;
-			plast = &mp->last_read;
-		} else {
-			min = mp->max_read + 1;
-			max = mp->max_xid;
-			plast = &mp->last_xid;
-		}
-	} else {
-		min = mp->min_xid;
-		max = mp->max_xid;
-		plast = &mp->last_xid;
-	}
-	xid = *plast;
-	do {
-		xid = (xid == max) ? min : xid + 1;
-		ep = mp->exches[xid - mp->min_xid];
-	} while ((ep != NULL) && (xid != *plast));
-
-	if (unlikely(ep))
-		xid = 0;
-	else
-		*plast = xid;
-
-	return xid;
-}
-
-/*
- * fc_exch_alloc - allocate an exchange.
- * @mp : ptr to the exchange manager
- * @xid: input xid
- *
- * if xid is supplied zero then assign next free exchange ID
- * from exchange manager, otherwise use supplied xid.
- * Returns with exch lock held.
- */
-struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
-			      struct fc_frame *fp, u16 xid)
+static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
+					struct fc_exch_mgr *mp)
 {
 	struct fc_exch *ep;
+	unsigned int cpu;
+	u16 index;
+	struct fc_exch_pool *pool;
 
 	/* allocate memory for exchange */
 	ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
@@ -528,16 +515,17 @@
 	}
 	memset(ep, 0, sizeof(*ep));
 
-	spin_lock_bh(&mp->em_lock);
-	/* alloc xid if input xid 0 */
-	if (!xid) {
-		/* alloc a new xid */
-		xid = fc_em_alloc_xid(mp, fp);
-		if (!xid) {
-			printk(KERN_WARNING "libfc: Failed to allocate an exhange\n");
+	cpu = smp_processor_id();
+	pool = per_cpu_ptr(mp->pool, cpu);
+	spin_lock_bh(&pool->lock);
+	index = pool->next_index;
+	/* allocate new exch from pool */
+	while (fc_exch_ptr_get(pool, index)) {
+		index = index == mp->pool_max_index ? 0 : index + 1;
+		if (index == pool->next_index)
 			goto err;
-		}
 	}
+	pool->next_index = index == mp->pool_max_index ? 0 : index + 1;
 
 	fc_exch_hold(ep);	/* hold for exch in mp */
 	spin_lock_init(&ep->ex_lock);
@@ -548,18 +536,19 @@
 	 */
 	spin_lock_bh(&ep->ex_lock);
 
-	mp->exches[xid - mp->min_xid] = ep;
-	list_add_tail(&ep->ex_list, &mp->ex_list);
+	fc_exch_ptr_set(pool, index, ep);
+	list_add_tail(&ep->ex_list, &pool->ex_list);
 	fc_seq_alloc(ep, ep->seq_id++);
-	mp->total_exches++;
-	spin_unlock_bh(&mp->em_lock);
+	pool->total_exches++;
+	spin_unlock_bh(&pool->lock);
 
 	/*
 	 *  update exchange
 	 */
-	ep->oxid = ep->xid = xid;
+	ep->oxid = ep->xid = (index << fc_cpu_order | cpu) + mp->min_xid;
 	ep->em = mp;
-	ep->lp = mp->lp;
+	ep->pool = pool;
+	ep->lp = lport;
 	ep->f_ctl = FC_FC_FIRST_SEQ;	/* next seq is first seq */
 	ep->rxid = FC_XID_UNKNOWN;
 	ep->class = mp->class;
@@ -567,11 +556,36 @@
 out:
 	return ep;
 err:
-	spin_unlock_bh(&mp->em_lock);
+	spin_unlock_bh(&pool->lock);
 	atomic_inc(&mp->stats.no_free_exch_xid);
 	mempool_free(ep, mp->ep_pool);
 	return NULL;
 }
+
+/**
+ * fc_exch_alloc() - allocate an exchange.
+ * @lport:	ptr to the local port
+ * @fp:		ptr to the FC frame
+ *
+ * This function walks the list of the exchange manager(EM)
+ * anchors to select a EM for new exchange allocation. The
+ * EM is selected having either a NULL match function pointer
+ * or call to match function returning true.
+ */
+struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp)
+{
+	struct fc_exch_mgr_anchor *ema;
+	struct fc_exch *ep;
+
+	list_for_each_entry(ema, &lport->ema_list, ema_list) {
+		if (!ema->match || ema->match(fp)) {
+			ep = fc_exch_em_alloc(lport, ema->mp);
+			if (ep)
+				return ep;
+		}
+	}
+	return NULL;
+}
 EXPORT_SYMBOL(fc_exch_alloc);
 
 /*
@@ -579,16 +593,18 @@
  */
 static struct fc_exch *fc_exch_find(struct fc_exch_mgr *mp, u16 xid)
 {
+	struct fc_exch_pool *pool;
 	struct fc_exch *ep = NULL;
 
 	if ((xid >= mp->min_xid) && (xid <= mp->max_xid)) {
-		spin_lock_bh(&mp->em_lock);
-		ep = mp->exches[xid - mp->min_xid];
+		pool = per_cpu_ptr(mp->pool, xid & fc_cpu_mask);
+		spin_lock_bh(&pool->lock);
+		ep = fc_exch_ptr_get(pool, (xid - mp->min_xid) >> fc_cpu_order);
 		if (ep) {
 			fc_exch_hold(ep);
 			WARN_ON(ep->xid != xid);
 		}
-		spin_unlock_bh(&mp->em_lock);
+		spin_unlock_bh(&pool->lock);
 	}
 	return ep;
 }
@@ -602,7 +618,7 @@
 	rc = fc_exch_done_locked(ep);
 	spin_unlock_bh(&ep->ex_lock);
 	if (!rc)
-		fc_exch_mgr_delete_ep(ep);
+		fc_exch_delete(ep);
 }
 EXPORT_SYMBOL(fc_exch_done);
 
@@ -610,12 +626,14 @@
  * Allocate a new exchange as responder.
  * Sets the responder ID in the frame header.
  */
-static struct fc_exch *fc_exch_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
+static struct fc_exch *fc_exch_resp(struct fc_lport *lport,
+				    struct fc_exch_mgr *mp,
+				    struct fc_frame *fp)
 {
 	struct fc_exch *ep;
 	struct fc_frame_header *fh;
 
-	ep = mp->lp->tt.exch_get(mp->lp, fp);
+	ep = fc_exch_alloc(lport, fp);
 	if (ep) {
 		ep->class = fc_frame_class(fp);
 
@@ -641,7 +659,7 @@
 			ep->esb_stat &= ~ESB_ST_SEQ_INIT;
 
 		fc_exch_hold(ep);	/* hold for caller */
-		spin_unlock_bh(&ep->ex_lock);	/* lock from exch_get */
+		spin_unlock_bh(&ep->ex_lock);	/* lock from fc_exch_alloc */
 	}
 	return ep;
 }
@@ -651,7 +669,8 @@
  * If fc_pf_rjt_reason is FC_RJT_NONE then this function will have a hold
  * on the ep that should be released by the caller.
  */
-static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_exch_mgr *mp,
+static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
+						 struct fc_exch_mgr *mp,
 						 struct fc_frame *fp)
 {
 	struct fc_frame_header *fh = fc_frame_header_get(fp);
@@ -705,7 +724,7 @@
 				reject = FC_RJT_RX_ID;
 				goto rel;
 			}
-			ep = fc_exch_resp(mp, fp);
+			ep = fc_exch_resp(lport, mp, fp);
 			if (!ep) {
 				reject = FC_RJT_EXCH_EST;	/* XXX */
 				goto out;
@@ -822,7 +841,6 @@
 	struct fc_exch *ep = fc_seq_exch(sp);
 
 	spin_lock_bh(&ep->ex_lock);
-	WARN_ON((ep->esb_stat & ESB_ST_COMPLETE) != 0);
 	sp = fc_seq_start_next_locked(sp);
 	spin_unlock_bh(&ep->ex_lock);
 
@@ -999,8 +1017,8 @@
 	 */
 	memcpy(fh->fh_s_id, rx_fh->fh_d_id, 3);
 	memcpy(fh->fh_d_id, rx_fh->fh_s_id, 3);
-	fh->fh_ox_id = rx_fh->fh_rx_id;
-	fh->fh_rx_id = rx_fh->fh_ox_id;
+	fh->fh_ox_id = rx_fh->fh_ox_id;
+	fh->fh_rx_id = rx_fh->fh_rx_id;
 	fh->fh_seq_cnt = rx_fh->fh_seq_cnt;
 	fh->fh_r_ctl = FC_RCTL_BA_RJT;
 	fh->fh_type = FC_TYPE_BLS;
@@ -1097,7 +1115,7 @@
 	enum fc_pf_rjt_reason reject;
 
 	fr_seq(fp) = NULL;
-	reject = fc_seq_lookup_recip(mp, fp);
+	reject = fc_seq_lookup_recip(lp, mp, fp);
 	if (reject == FC_RJT_NONE) {
 		sp = fr_seq(fp);	/* sequence will be held */
 		ep = fc_seq_exch(sp);
@@ -1123,7 +1141,7 @@
 			lp->tt.lport_recv(lp, sp, fp);
 		fc_exch_release(ep);	/* release from lookup */
 	} else {
-		FC_EM_DBG(mp, "exch/seq lookup failed: reject %x\n", reject);
+		FC_LPORT_DBG(lp, "exch/seq lookup failed: reject %x\n", reject);
 		fc_frame_free(fp);
 	}
 }
@@ -1193,7 +1211,7 @@
 		WARN_ON(fc_seq_exch(sp) != ep);
 		spin_unlock_bh(&ep->ex_lock);
 		if (!rc)
-			fc_exch_mgr_delete_ep(ep);
+			fc_exch_delete(ep);
 	}
 
 	/*
@@ -1229,13 +1247,12 @@
 	struct fc_seq *sp;
 
 	sp = fc_seq_lookup_orig(mp, fp);	/* doesn't hold sequence */
-	if (!sp) {
+
+	if (!sp)
 		atomic_inc(&mp->stats.xid_not_found);
-		FC_EM_DBG(mp, "seq lookup failed\n");
-	} else {
+	else
 		atomic_inc(&mp->stats.non_bls_resp);
-		FC_EM_DBG(mp, "non-BLS response to sequence");
-	}
+
 	fc_frame_free(fp);
 }
 
@@ -1304,7 +1321,7 @@
 		rc = fc_exch_done_locked(ep);
 	spin_unlock_bh(&ep->ex_lock);
 	if (!rc)
-		fc_exch_mgr_delete_ep(ep);
+		fc_exch_delete(ep);
 
 	if (resp)
 		resp(sp, fp, ex_resp_arg);
@@ -1447,44 +1464,77 @@
 	rc = fc_exch_done_locked(ep);
 	spin_unlock_bh(&ep->ex_lock);
 	if (!rc)
-		fc_exch_mgr_delete_ep(ep);
+		fc_exch_delete(ep);
 
 	if (resp)
 		resp(sp, ERR_PTR(-FC_EX_CLOSED), arg);
 }
 
-/*
- * Reset an exchange manager, releasing all sequences and exchanges.
- * If sid is non-zero, reset only exchanges we source from that FID.
- * If did is non-zero, reset only exchanges destined to that FID.
+/**
+ * fc_exch_pool_reset() - Resets an per cpu exches pool.
+ * @lport:	ptr to the local port
+ * @pool:	ptr to the per cpu exches pool
+ * @sid:	source FC ID
+ * @did:	destination FC ID
+ *
+ * Resets an per cpu exches pool, releasing its all sequences
+ * and exchanges. If sid is non-zero, then reset only exchanges
+ * we sourced from that FID. If did is non-zero, reset only
+ * exchanges destined to that FID.
  */
-void fc_exch_mgr_reset(struct fc_lport *lp, u32 sid, u32 did)
+static void fc_exch_pool_reset(struct fc_lport *lport,
+			       struct fc_exch_pool *pool,
+			       u32 sid, u32 did)
 {
 	struct fc_exch *ep;
 	struct fc_exch *next;
-	struct fc_exch_mgr *mp = lp->emp;
 
-	spin_lock_bh(&mp->em_lock);
+	spin_lock_bh(&pool->lock);
 restart:
-	list_for_each_entry_safe(ep, next, &mp->ex_list, ex_list) {
-		if ((sid == 0 || sid == ep->sid) &&
+	list_for_each_entry_safe(ep, next, &pool->ex_list, ex_list) {
+		if ((lport == ep->lp) &&
+		    (sid == 0 || sid == ep->sid) &&
 		    (did == 0 || did == ep->did)) {
 			fc_exch_hold(ep);
-			spin_unlock_bh(&mp->em_lock);
+			spin_unlock_bh(&pool->lock);
 
 			fc_exch_reset(ep);
 
 			fc_exch_release(ep);
-			spin_lock_bh(&mp->em_lock);
+			spin_lock_bh(&pool->lock);
 
 			/*
-			 * must restart loop incase while lock was down
-			 * multiple eps were released.
+			 * must restart loop incase while lock
+			 * was down multiple eps were released.
 			 */
 			goto restart;
 		}
 	}
-	spin_unlock_bh(&mp->em_lock);
+	spin_unlock_bh(&pool->lock);
+}
+
+/**
+ * fc_exch_mgr_reset() - Resets all EMs of a lport
+ * @lport:	ptr to the local port
+ * @sid:	source FC ID
+ * @did:	destination FC ID
+ *
+ * Reset all EMs of a lport, releasing its all sequences and
+ * exchanges. If sid is non-zero, then reset only exchanges
+ * we sourced from that FID. If did is non-zero, reset only
+ * exchanges destined to that FID.
+ */
+void fc_exch_mgr_reset(struct fc_lport *lport, u32 sid, u32 did)
+{
+	struct fc_exch_mgr_anchor *ema;
+	unsigned int cpu;
+
+	list_for_each_entry(ema, &lport->ema_list, ema_list) {
+		for_each_possible_cpu(cpu)
+			fc_exch_pool_reset(lport,
+					   per_cpu_ptr(ema->mp->pool, cpu),
+					   sid, did);
+	}
 }
 EXPORT_SYMBOL(fc_exch_mgr_reset);
 
@@ -1730,85 +1780,129 @@
 	fc_frame_free(fp);
 }
 
+struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
+					   struct fc_exch_mgr *mp,
+					   bool (*match)(struct fc_frame *))
+{
+	struct fc_exch_mgr_anchor *ema;
+
+	ema = kmalloc(sizeof(*ema), GFP_ATOMIC);
+	if (!ema)
+		return ema;
+
+	ema->mp = mp;
+	ema->match = match;
+	/* add EM anchor to EM anchors list */
+	list_add_tail(&ema->ema_list, &lport->ema_list);
+	kref_get(&mp->kref);
+	return ema;
+}
+EXPORT_SYMBOL(fc_exch_mgr_add);
+
+static void fc_exch_mgr_destroy(struct kref *kref)
+{
+	struct fc_exch_mgr *mp = container_of(kref, struct fc_exch_mgr, kref);
+
+	mempool_destroy(mp->ep_pool);
+	free_percpu(mp->pool);
+	kfree(mp);
+}
+
+void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema)
+{
+	/* remove EM anchor from EM anchors list */
+	list_del(&ema->ema_list);
+	kref_put(&ema->mp->kref, fc_exch_mgr_destroy);
+	kfree(ema);
+}
+EXPORT_SYMBOL(fc_exch_mgr_del);
+
 struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
 				      enum fc_class class,
-				      u16 min_xid, u16 max_xid)
+				      u16 min_xid, u16 max_xid,
+				      bool (*match)(struct fc_frame *))
 {
 	struct fc_exch_mgr *mp;
-	size_t len;
+	u16 pool_exch_range;
+	size_t pool_size;
+	unsigned int cpu;
+	struct fc_exch_pool *pool;
 
-	if (max_xid <= min_xid || min_xid == 0 || max_xid == FC_XID_UNKNOWN) {
+	if (max_xid <= min_xid || max_xid == FC_XID_UNKNOWN ||
+	    (min_xid & fc_cpu_mask) != 0) {
 		FC_LPORT_DBG(lp, "Invalid min_xid 0x:%x and max_xid 0x:%x\n",
 			     min_xid, max_xid);
 		return NULL;
 	}
 
 	/*
-	 * Memory need for EM
+	 * allocate memory for EM
 	 */
-#define xid_ok(i, m1, m2) (((i) >= (m1)) && ((i) <= (m2)))
-	len = (max_xid - min_xid + 1) * (sizeof(struct fc_exch *));
-	len += sizeof(struct fc_exch_mgr);
-
-	mp = kzalloc(len, GFP_ATOMIC);
+	mp = kzalloc(sizeof(struct fc_exch_mgr), GFP_ATOMIC);
 	if (!mp)
 		return NULL;
 
 	mp->class = class;
-	mp->total_exches = 0;
-	mp->exches = (struct fc_exch **)(mp + 1);
-	mp->lp = lp;
 	/* adjust em exch xid range for offload */
 	mp->min_xid = min_xid;
 	mp->max_xid = max_xid;
-	mp->last_xid = min_xid - 1;
-	mp->max_read = 0;
-	mp->last_read = 0;
-	if (lp->lro_enabled && xid_ok(lp->lro_xid, min_xid, max_xid)) {
-		mp->max_read = lp->lro_xid;
-		mp->last_read = min_xid - 1;
-		mp->last_xid = mp->max_read;
-	} else {
-		/* disable lro if no xid control over read */
-		lp->lro_enabled = 0;
-	}
-
-	INIT_LIST_HEAD(&mp->ex_list);
-	spin_lock_init(&mp->em_lock);
 
 	mp->ep_pool = mempool_create_slab_pool(2, fc_em_cachep);
 	if (!mp->ep_pool)
 		goto free_mp;
 
+	/*
+	 * Setup per cpu exch pool with entire exchange id range equally
+	 * divided across all cpus. The exch pointers array memory is
+	 * allocated for exch range per pool.
+	 */
+	pool_exch_range = (mp->max_xid - mp->min_xid + 1) / (fc_cpu_mask + 1);
+	mp->pool_max_index = pool_exch_range - 1;
+
+	/*
+	 * Allocate and initialize per cpu exch pool
+	 */
+	pool_size = sizeof(*pool) + pool_exch_range * sizeof(struct fc_exch *);
+	mp->pool = __alloc_percpu(pool_size, __alignof__(struct fc_exch_pool));
+	if (!mp->pool)
+		goto free_mempool;
+	for_each_possible_cpu(cpu) {
+		pool = per_cpu_ptr(mp->pool, cpu);
+		spin_lock_init(&pool->lock);
+		INIT_LIST_HEAD(&pool->ex_list);
+	}
+
+	kref_init(&mp->kref);
+	if (!fc_exch_mgr_add(lp, mp, match)) {
+		free_percpu(mp->pool);
+		goto free_mempool;
+	}
+
+	/*
+	 * Above kref_init() sets mp->kref to 1 and then
+	 * call to fc_exch_mgr_add incremented mp->kref again,
+	 * so adjust that extra increment.
+	 */
+	kref_put(&mp->kref, fc_exch_mgr_destroy);
 	return mp;
 
+free_mempool:
+	mempool_destroy(mp->ep_pool);
 free_mp:
 	kfree(mp);
 	return NULL;
 }
 EXPORT_SYMBOL(fc_exch_mgr_alloc);
 
-void fc_exch_mgr_free(struct fc_exch_mgr *mp)
+void fc_exch_mgr_free(struct fc_lport *lport)
 {
-	WARN_ON(!mp);
-	/*
-	 * The total exch count must be zero
-	 * before freeing exchange manager.
-	 */
-	WARN_ON(mp->total_exches != 0);
-	mempool_destroy(mp->ep_pool);
-	kfree(mp);
+	struct fc_exch_mgr_anchor *ema, *next;
+
+	list_for_each_entry_safe(ema, next, &lport->ema_list, ema_list)
+		fc_exch_mgr_del(ema);
 }
 EXPORT_SYMBOL(fc_exch_mgr_free);
 
-struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp)
-{
-	if (!lp || !lp->emp)
-		return NULL;
-
-	return fc_exch_alloc(lp->emp, fp, 0);
-}
-EXPORT_SYMBOL(fc_exch_get);
 
 struct fc_seq *fc_exch_seq_send(struct fc_lport *lp,
 				struct fc_frame *fp,
@@ -1823,7 +1917,7 @@
 	struct fc_frame_header *fh;
 	int rc = 1;
 
-	ep = lp->tt.exch_get(lp, fp);
+	ep = fc_exch_alloc(lp, fp);
 	if (!ep) {
 		fc_frame_free(fp);
 		return NULL;
@@ -1843,7 +1937,8 @@
 	fc_exch_setup_hdr(ep, fp, ep->f_ctl);
 	sp->cnt++;
 
-	fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
+	if (ep->xid <= lp->lro_xid)
+		fc_fcp_ddp_setup(fr_fsp(fp), ep->xid);
 
 	if (unlikely(lp->tt.frame_send(lp, fp)))
 		goto err;
@@ -1860,7 +1955,7 @@
 	rc = fc_exch_done_locked(ep);
 	spin_unlock_bh(&ep->ex_lock);
 	if (!rc)
-		fc_exch_mgr_delete_ep(ep);
+		fc_exch_delete(ep);
 	return NULL;
 }
 EXPORT_SYMBOL(fc_exch_seq_send);
@@ -1868,24 +1963,44 @@
 /*
  * Receive a frame
  */
-void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
-		  struct fc_frame *fp)
+void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp)
 {
 	struct fc_frame_header *fh = fc_frame_header_get(fp);
-	u32 f_ctl;
+	struct fc_exch_mgr_anchor *ema;
+	u32 f_ctl, found = 0;
+	u16 oxid;
 
 	/* lport lock ? */
-	if (!lp || !mp || (lp->state == LPORT_ST_NONE)) {
+	if (!lp || lp->state == LPORT_ST_DISABLED) {
 		FC_LPORT_DBG(lp, "Receiving frames for an lport that "
 			     "has not been initialized correctly\n");
 		fc_frame_free(fp);
 		return;
 	}
 
+	f_ctl = ntoh24(fh->fh_f_ctl);
+	oxid = ntohs(fh->fh_ox_id);
+	if (f_ctl & FC_FC_EX_CTX) {
+		list_for_each_entry(ema, &lp->ema_list, ema_list) {
+			if ((oxid >= ema->mp->min_xid) &&
+			    (oxid <= ema->mp->max_xid)) {
+				found = 1;
+				break;
+			}
+		}
+
+		if (!found) {
+			FC_LPORT_DBG(lp, "Received response for out "
+				     "of range oxid:%hx\n", oxid);
+			fc_frame_free(fp);
+			return;
+		}
+	} else
+		ema = list_entry(lp->ema_list.prev, typeof(*ema), ema_list);
+
 	/*
 	 * If frame is marked invalid, just drop it.
 	 */
-	f_ctl = ntoh24(fh->fh_f_ctl);
 	switch (fr_eof(fp)) {
 	case FC_EOF_T:
 		if (f_ctl & FC_FC_END_SEQ)
@@ -1893,34 +2008,24 @@
 		/* fall through */
 	case FC_EOF_N:
 		if (fh->fh_type == FC_TYPE_BLS)
-			fc_exch_recv_bls(mp, fp);
+			fc_exch_recv_bls(ema->mp, fp);
 		else if ((f_ctl & (FC_FC_EX_CTX | FC_FC_SEQ_CTX)) ==
 			 FC_FC_EX_CTX)
-			fc_exch_recv_seq_resp(mp, fp);
+			fc_exch_recv_seq_resp(ema->mp, fp);
 		else if (f_ctl & FC_FC_SEQ_CTX)
-			fc_exch_recv_resp(mp, fp);
+			fc_exch_recv_resp(ema->mp, fp);
 		else
-			fc_exch_recv_req(lp, mp, fp);
+			fc_exch_recv_req(lp, ema->mp, fp);
 		break;
 	default:
-		FC_EM_DBG(mp, "dropping invalid frame (eof %x)", fr_eof(fp));
+		FC_LPORT_DBG(lp, "dropping invalid frame (eof %x)", fr_eof(fp));
 		fc_frame_free(fp);
-		break;
 	}
 }
 EXPORT_SYMBOL(fc_exch_recv);
 
 int fc_exch_init(struct fc_lport *lp)
 {
-	if (!lp->tt.exch_get) {
-		/*
-		 *  exch_put() should be NULL if
-		 *  exch_get() is NULL
-		 */
-		WARN_ON(lp->tt.exch_put);
-		lp->tt.exch_get = fc_exch_get;
-	}
-
 	if (!lp->tt.seq_start_next)
 		lp->tt.seq_start_next = fc_seq_start_next;
 
@@ -1942,6 +2047,28 @@
 	if (!lp->tt.seq_exch_abort)
 		lp->tt.seq_exch_abort = fc_seq_exch_abort;
 
+	/*
+	 * Initialize fc_cpu_mask and fc_cpu_order. The
+	 * fc_cpu_mask is set for nr_cpu_ids rounded up
+	 * to order of 2's * power and order is stored
+	 * in fc_cpu_order as this is later required in
+	 * mapping between an exch id and exch array index
+	 * in per cpu exch pool.
+	 *
+	 * This round up is required to align fc_cpu_mask
+	 * to exchange id's lower bits such that all incoming
+	 * frames of an exchange gets delivered to the same
+	 * cpu on which exchange originated by simple bitwise
+	 * AND operation between fc_cpu_mask and exchange id.
+	 */
+	fc_cpu_mask = 1;
+	fc_cpu_order = 0;
+	while (fc_cpu_mask < nr_cpu_ids) {
+		fc_cpu_mask <<= 1;
+		fc_cpu_order++;
+	}
+	fc_cpu_mask--;
+
 	return 0;
 }
 EXPORT_SYMBOL(fc_exch_init);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index e303e0d..59a4408 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -507,33 +507,6 @@
 	f_ctl = FC_FC_REL_OFF;
 	WARN_ON(!seq);
 
-	/*
-	 * If a get_page()/put_page() will fail, don't use sg lists
-	 * in the fc_frame structure.
-	 *
-	 * The put_page() may be long after the I/O has completed
-	 * in the case of FCoE, since the network driver does it
-	 * via free_skb().  See the test in free_pages_check().
-	 *
-	 * Test this case with 'dd </dev/zero >/dev/st0 bs=64k'.
-	 */
-	if (using_sg) {
-		for (sg = scsi_sglist(sc); sg; sg = sg_next(sg)) {
-			if (page_count(sg_page(sg)) == 0 ||
-			    (sg_page(sg)->flags & (1 << PG_lru |
-						   1 << PG_private |
-						   1 << PG_locked |
-						   1 << PG_active |
-						   1 << PG_slab |
-						   1 << PG_swapcache |
-						   1 << PG_writeback |
-						   1 << PG_reserved |
-						   1 << PG_buddy))) {
-				using_sg = 0;
-				break;
-			}
-		}
-	}
 	sg = scsi_sglist(sc);
 
 	while (remaining > 0 && sg) {
@@ -569,8 +542,6 @@
 		}
 		sg_bytes = min(tlen, sg->length - offset);
 		if (using_sg) {
-			WARN_ON(skb_shinfo(fp_skb(fp))->nr_frags >
-				FC_FRAME_SG_LEN);
 			get_page(sg_page(sg));
 			skb_fill_page_desc(fp_skb(fp),
 					   skb_shinfo(fp_skb(fp))->nr_frags,
@@ -1337,7 +1308,7 @@
 	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REQ, rport->port_id,
 		       fc_host_port_id(rp->local_port->host), FC_TYPE_ELS,
 		       FC_FC_FIRST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT, 0);
-	if (lp->tt.elsct_send(lp, rport, fp, ELS_REC, fc_fcp_rec_resp,
+	if (lp->tt.elsct_send(lp, rport->port_id, fp, ELS_REC, fc_fcp_rec_resp,
 			      fsp, jiffies_to_msecs(FC_SCSI_REC_TOV))) {
 		fc_fcp_pkt_hold(fsp);		/* hold while REC outstanding */
 		return;
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index 745fa55..bd2f771 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -113,7 +113,7 @@
 static void fc_lport_enter_logo(struct fc_lport *);
 
 static const char *fc_lport_state_names[] = {
-	[LPORT_ST_NONE] =     "none",
+	[LPORT_ST_DISABLED] = "disabled",
 	[LPORT_ST_FLOGI] =    "FLOGI",
 	[LPORT_ST_DNS] =      "dNS",
 	[LPORT_ST_RPN_ID] =   "RPN_ID",
@@ -133,57 +133,44 @@
 /**
  * fc_lport_rport_callback() - Event handler for rport events
  * @lport: The lport which is receiving the event
- * @rport: The rport which the event has occured on
+ * @rdata: private remote port data
  * @event: The event that occured
  *
  * Locking Note: The rport lock should not be held when calling
  *		 this function.
  */
 static void fc_lport_rport_callback(struct fc_lport *lport,
-				    struct fc_rport *rport,
+				    struct fc_rport_priv *rdata,
 				    enum fc_rport_event event)
 {
 	FC_LPORT_DBG(lport, "Received a %d event for port (%6x)\n", event,
-		     rport->port_id);
+		     rdata->ids.port_id);
 
+	mutex_lock(&lport->lp_mutex);
 	switch (event) {
-	case RPORT_EV_CREATED:
-		if (rport->port_id == FC_FID_DIR_SERV) {
-			mutex_lock(&lport->lp_mutex);
-			if (lport->state == LPORT_ST_DNS) {
-				lport->dns_rp = rport;
-				fc_lport_enter_rpn_id(lport);
-			} else {
-				FC_LPORT_DBG(lport, "Received an CREATED event "
-					     "on port (%6x) for the directory "
-					     "server, but the lport is not "
-					     "in the DNS state, it's in the "
-					     "%d state", rport->port_id,
-					     lport->state);
-				lport->tt.rport_logoff(rport);
-			}
-			mutex_unlock(&lport->lp_mutex);
-		} else
-			FC_LPORT_DBG(lport, "Received an event for port (%6x) "
-				     "which is not the directory server\n",
-				     rport->port_id);
+	case RPORT_EV_READY:
+		if (lport->state == LPORT_ST_DNS) {
+			lport->dns_rp = rdata;
+			fc_lport_enter_rpn_id(lport);
+		} else {
+			FC_LPORT_DBG(lport, "Received an READY event "
+				     "on port (%6x) for the directory "
+				     "server, but the lport is not "
+				     "in the DNS state, it's in the "
+				     "%d state", rdata->ids.port_id,
+				     lport->state);
+			lport->tt.rport_logoff(rdata);
+		}
 		break;
 	case RPORT_EV_LOGO:
 	case RPORT_EV_FAILED:
 	case RPORT_EV_STOP:
-		if (rport->port_id == FC_FID_DIR_SERV) {
-			mutex_lock(&lport->lp_mutex);
-			lport->dns_rp = NULL;
-			mutex_unlock(&lport->lp_mutex);
-
-		} else
-			FC_LPORT_DBG(lport, "Received an event for port (%6x) "
-				     "which is not the directory server\n",
-				     rport->port_id);
+		lport->dns_rp = NULL;
 		break;
 	case RPORT_EV_NONE:
 		break;
 	}
+	mutex_unlock(&lport->lp_mutex);
 }
 
 /**
@@ -211,20 +198,13 @@
 			       u32 remote_fid, u64 remote_wwpn,
 			       u64 remote_wwnn)
 {
-	struct fc_disc_port dp;
-
-	dp.lp = lport;
-	dp.ids.port_id = remote_fid;
-	dp.ids.port_name = remote_wwpn;
-	dp.ids.node_name = remote_wwnn;
-	dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
-
-	if (lport->ptp_rp) {
+	mutex_lock(&lport->disc.disc_mutex);
+	if (lport->ptp_rp)
 		lport->tt.rport_logoff(lport->ptp_rp);
-		lport->ptp_rp = NULL;
-	}
-
-	lport->ptp_rp = lport->tt.rport_create(&dp);
+	lport->ptp_rp = lport->tt.rport_create(lport, remote_fid);
+	lport->ptp_rp->ids.port_name = remote_wwpn;
+	lport->ptp_rp->ids.node_name = remote_wwnn;
+	mutex_unlock(&lport->disc.disc_mutex);
 
 	lport->tt.rport_login(lport->ptp_rp);
 
@@ -472,56 +452,6 @@
 }
 
 /**
- * fc_lport_recv_adisc_req() - Handle received Address Discovery Request
- * @lport: Fibre Channel local port recieving the ADISC
- * @sp: current sequence in the ADISC exchange
- * @fp: ADISC request frame
- *
- * Locking Note: The lport lock is expected to be held before calling
- * this function.
- */
-static void fc_lport_recv_adisc_req(struct fc_seq *sp, struct fc_frame *in_fp,
-				    struct fc_lport *lport)
-{
-	struct fc_frame *fp;
-	struct fc_exch *ep = fc_seq_exch(sp);
-	struct fc_els_adisc *req, *rp;
-	struct fc_seq_els_data rjt_data;
-	size_t len;
-	u32 f_ctl;
-
-	FC_LPORT_DBG(lport, "Received ADISC request while in state %s\n",
-		     fc_lport_state(lport));
-
-	req = fc_frame_payload_get(in_fp, sizeof(*req));
-	if (!req) {
-		rjt_data.fp = NULL;
-		rjt_data.reason = ELS_RJT_LOGIC;
-		rjt_data.explan = ELS_EXPL_NONE;
-		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
-	} else {
-		len = sizeof(*rp);
-		fp = fc_frame_alloc(lport, len);
-		if (fp) {
-			rp = fc_frame_payload_get(fp, len);
-			memset(rp, 0, len);
-			rp->adisc_cmd = ELS_LS_ACC;
-			rp->adisc_wwpn = htonll(lport->wwpn);
-			rp->adisc_wwnn = htonll(lport->wwnn);
-			hton24(rp->adisc_port_id,
-			       fc_host_port_id(lport->host));
-			sp = lport->tt.seq_start_next(sp);
-			f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
-			f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
-			fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
-				       FC_TYPE_ELS, f_ctl, 0);
-			lport->tt.seq_send(lport, sp, fp);
-		}
-	}
-	fc_frame_free(in_fp);
-}
-
-/**
  * fc_lport_recv_logo_req() - Handle received fabric LOGO request
  * @lport: Fibre Channel local port recieving the LOGO
  * @sp: current sequence in the LOGO exchange
@@ -550,7 +480,7 @@
 	int rc = -1;
 
 	mutex_lock(&lport->lp_mutex);
-	if (lport->state == LPORT_ST_NONE) {
+	if (lport->state == LPORT_ST_DISABLED) {
 		fc_lport_enter_reset(lport);
 		rc = 0;
 	}
@@ -637,12 +567,13 @@
 int fc_lport_destroy(struct fc_lport *lport)
 {
 	mutex_lock(&lport->lp_mutex);
-	lport->state = LPORT_ST_NONE;
+	lport->state = LPORT_ST_DISABLED;
 	lport->link_up = 0;
 	lport->tt.frame_send = fc_frame_drop;
 	mutex_unlock(&lport->lp_mutex);
 
 	lport->tt.fcp_abort_io(lport);
+	lport->tt.disc_stop_final(lport);
 	lport->tt.exch_mgr_reset(lport, 0, 0);
 	return 0;
 }
@@ -722,7 +653,8 @@
 
 	fc_lport_state_enter(lport, LPORT_ST_READY);
 
-	lport->tt.disc_start(fc_lport_disc_callback, lport);
+	if (!lport->ptp_rp)
+		lport->tt.disc_start(fc_lport_disc_callback, lport);
 }
 
 /**
@@ -808,8 +740,6 @@
 	fc_lport_ptp_setup(lport, remote_fid, remote_wwpn,
 			   get_unaligned_be64(&flp->fl_wwnn));
 
-	lport->tt.disc_start(fc_lport_disc_callback, lport);
-
 out:
 	sp = fr_seq(rx_fp);
 	fc_frame_free(rx_fp);
@@ -832,10 +762,6 @@
 {
 	struct fc_frame_header *fh = fc_frame_header_get(fp);
 	void (*recv) (struct fc_seq *, struct fc_frame *, struct fc_lport *);
-	struct fc_rport *rport;
-	u32 s_id;
-	u32 d_id;
-	struct fc_seq_els_data rjt_data;
 
 	mutex_lock(&lport->lp_mutex);
 
@@ -844,11 +770,14 @@
 	 * RSCN here.  These don't require a session.
 	 * Even if we had a session, it might not be ready.
 	 */
-	if (fh->fh_type == FC_TYPE_ELS && fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
+	if (!lport->link_up)
+		fc_frame_free(fp);
+	else if (fh->fh_type == FC_TYPE_ELS &&
+		 fh->fh_r_ctl == FC_RCTL_ELS_REQ) {
 		/*
 		 * Check opcode.
 		 */
-		recv = NULL;
+		recv = lport->tt.rport_recv_req;
 		switch (fc_frame_payload_op(fp)) {
 		case ELS_FLOGI:
 			recv = fc_lport_recv_flogi_req;
@@ -870,34 +799,9 @@
 		case ELS_RNID:
 			recv = fc_lport_recv_rnid_req;
 			break;
-		case ELS_ADISC:
-			recv = fc_lport_recv_adisc_req;
-			break;
 		}
 
-		if (recv)
-			recv(sp, fp, lport);
-		else {
-			/*
-			 * Find session.
-			 * If this is a new incoming PLOGI, we won't find it.
-			 */
-			s_id = ntoh24(fh->fh_s_id);
-			d_id = ntoh24(fh->fh_d_id);
-
-			rport = lport->tt.rport_lookup(lport, s_id);
-			if (rport)
-				lport->tt.rport_recv_req(sp, fp, rport);
-			else {
-				rjt_data.fp = NULL;
-				rjt_data.reason = ELS_RJT_UNAB;
-				rjt_data.explan = ELS_EXPL_NONE;
-				lport->tt.seq_els_rsp_send(sp,
-							   ELS_LS_RJT,
-							   &rjt_data);
-				fc_frame_free(fp);
-			}
-		}
+		recv(sp, fp, lport);
 	} else {
 		FC_LPORT_DBG(lport, "dropping invalid frame (eof %x)\n",
 			     fr_eof(fp));
@@ -930,7 +834,28 @@
 EXPORT_SYMBOL(fc_lport_reset);
 
 /**
- * fc_rport_enter_reset() - Reset the local port
+ * fc_lport_reset_locked() - Reset the local port
+ * @lport: Fibre Channel local port to be reset
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_reset_locked(struct fc_lport *lport)
+{
+	if (lport->dns_rp)
+		lport->tt.rport_logoff(lport->dns_rp);
+
+	lport->ptp_rp = NULL;
+
+	lport->tt.disc_stop(lport);
+
+	lport->tt.exch_mgr_reset(lport, 0, 0);
+	fc_host_fabric_name(lport->host) = 0;
+	fc_host_port_id(lport->host) = 0;
+}
+
+/**
+ * fc_lport_enter_reset() - Reset the local port
  * @lport: Fibre Channel local port to be reset
  *
  * Locking Note: The lport lock is expected to be held before calling
@@ -942,26 +867,28 @@
 		     fc_lport_state(lport));
 
 	fc_lport_state_enter(lport, LPORT_ST_RESET);
-
-	if (lport->dns_rp)
-		lport->tt.rport_logoff(lport->dns_rp);
-
-	if (lport->ptp_rp) {
-		lport->tt.rport_logoff(lport->ptp_rp);
-		lport->ptp_rp = NULL;
-	}
-
-	lport->tt.disc_stop(lport);
-
-	lport->tt.exch_mgr_reset(lport, 0, 0);
-	fc_host_fabric_name(lport->host) = 0;
-	fc_host_port_id(lport->host) = 0;
-
+	fc_lport_reset_locked(lport);
 	if (lport->link_up)
 		fc_lport_enter_flogi(lport);
 }
 
 /**
+ * fc_lport_enter_disabled() - disable the local port
+ * @lport: Fibre Channel local port to be reset
+ *
+ * Locking Note: The lport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_lport_enter_disabled(struct fc_lport *lport)
+{
+	FC_LPORT_DBG(lport, "Entered disabled state from %s state\n",
+		     fc_lport_state(lport));
+
+	fc_lport_state_enter(lport, LPORT_ST_DISABLED);
+	fc_lport_reset_locked(lport);
+}
+
+/**
  * fc_lport_error() - Handler for any errors
  * @lport: The fc_lport object
  * @fp: The frame pointer
@@ -992,7 +919,7 @@
 			schedule_delayed_work(&lport->retry_work, delay);
 		} else {
 			switch (lport->state) {
-			case LPORT_ST_NONE:
+			case LPORT_ST_DISABLED:
 			case LPORT_ST_READY:
 			case LPORT_ST_RESET:
 			case LPORT_ST_RPN_ID:
@@ -1026,13 +953,13 @@
 	struct fc_frame_header *fh;
 	struct fc_ct_hdr *ct;
 
+	FC_LPORT_DBG(lport, "Received a RFT_ID %s\n", fc_els_resp_type(fp));
+
 	if (fp == ERR_PTR(-FC_EX_CLOSED))
 		return;
 
 	mutex_lock(&lport->lp_mutex);
 
-	FC_LPORT_DBG(lport, "Received a RFT_ID response\n");
-
 	if (lport->state != LPORT_ST_RFT_ID) {
 		FC_LPORT_DBG(lport, "Received a RFT_ID response, but in state "
 			     "%s\n", fc_lport_state(lport));
@@ -1080,13 +1007,13 @@
 	struct fc_frame_header *fh;
 	struct fc_ct_hdr *ct;
 
+	FC_LPORT_DBG(lport, "Received a RPN_ID %s\n", fc_els_resp_type(fp));
+
 	if (fp == ERR_PTR(-FC_EX_CLOSED))
 		return;
 
 	mutex_lock(&lport->lp_mutex);
 
-	FC_LPORT_DBG(lport, "Received a RPN_ID response\n");
-
 	if (lport->state != LPORT_ST_RPN_ID) {
 		FC_LPORT_DBG(lport, "Received a RPN_ID response, but in state "
 			     "%s\n", fc_lport_state(lport));
@@ -1132,13 +1059,13 @@
 	struct fc_lport *lport = lp_arg;
 	u8 op;
 
+	FC_LPORT_DBG(lport, "Received a SCR %s\n", fc_els_resp_type(fp));
+
 	if (fp == ERR_PTR(-FC_EX_CLOSED))
 		return;
 
 	mutex_lock(&lport->lp_mutex);
 
-	FC_LPORT_DBG(lport, "Received a SCR response\n");
-
 	if (lport->state != LPORT_ST_SCR) {
 		FC_LPORT_DBG(lport, "Received a SCR response, but in state "
 			     "%s\n", fc_lport_state(lport));
@@ -1186,7 +1113,7 @@
 		return;
 	}
 
-	if (!lport->tt.elsct_send(lport, NULL, fp, ELS_SCR,
+	if (!lport->tt.elsct_send(lport, FC_FID_FCTRL, fp, ELS_SCR,
 				  fc_lport_scr_resp, lport, lport->e_d_tov))
 		fc_lport_error(lport, fp);
 }
@@ -1227,7 +1154,7 @@
 		return;
 	}
 
-	if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RFT_ID,
+	if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RFT_ID,
 				  fc_lport_rft_id_resp,
 				  lport, lport->e_d_tov))
 		fc_lport_error(lport, fp);
@@ -1256,7 +1183,7 @@
 		return;
 	}
 
-	if (!lport->tt.elsct_send(lport, NULL, fp, FC_NS_RPN_ID,
+	if (!lport->tt.elsct_send(lport, FC_FID_DIR_SERV, fp, FC_NS_RPN_ID,
 				  fc_lport_rpn_id_resp,
 				  lport, lport->e_d_tov))
 		fc_lport_error(lport, fp);
@@ -1275,28 +1202,21 @@
  */
 static void fc_lport_enter_dns(struct fc_lport *lport)
 {
-	struct fc_rport *rport;
-	struct fc_rport_libfc_priv *rdata;
-	struct fc_disc_port dp;
-
-	dp.ids.port_id = FC_FID_DIR_SERV;
-	dp.ids.port_name = -1;
-	dp.ids.node_name = -1;
-	dp.ids.roles = FC_RPORT_ROLE_UNKNOWN;
-	dp.lp = lport;
+	struct fc_rport_priv *rdata;
 
 	FC_LPORT_DBG(lport, "Entered DNS state from %s state\n",
 		     fc_lport_state(lport));
 
 	fc_lport_state_enter(lport, LPORT_ST_DNS);
 
-	rport = lport->tt.rport_create(&dp);
-	if (!rport)
+	mutex_lock(&lport->disc.disc_mutex);
+	rdata = lport->tt.rport_create(lport, FC_FID_DIR_SERV);
+	mutex_unlock(&lport->disc.disc_mutex);
+	if (!rdata)
 		goto err;
 
-	rdata = rport->dd_data;
 	rdata->ops = &fc_lport_rport_ops;
-	lport->tt.rport_login(rport);
+	lport->tt.rport_login(rdata);
 	return;
 
 err:
@@ -1316,7 +1236,7 @@
 	mutex_lock(&lport->lp_mutex);
 
 	switch (lport->state) {
-	case LPORT_ST_NONE:
+	case LPORT_ST_DISABLED:
 	case LPORT_ST_READY:
 	case LPORT_ST_RESET:
 		WARN_ON(1);
@@ -1360,13 +1280,13 @@
 	struct fc_lport *lport = lp_arg;
 	u8 op;
 
+	FC_LPORT_DBG(lport, "Received a LOGO %s\n", fc_els_resp_type(fp));
+
 	if (fp == ERR_PTR(-FC_EX_CLOSED))
 		return;
 
 	mutex_lock(&lport->lp_mutex);
 
-	FC_LPORT_DBG(lport, "Received a LOGO response\n");
-
 	if (lport->state != LPORT_ST_LOGO) {
 		FC_LPORT_DBG(lport, "Received a LOGO response, but in state "
 			     "%s\n", fc_lport_state(lport));
@@ -1382,7 +1302,7 @@
 
 	op = fc_frame_payload_op(fp);
 	if (op == ELS_LS_ACC)
-		fc_lport_enter_reset(lport);
+		fc_lport_enter_disabled(lport);
 	else
 		fc_lport_error(lport, fp);
 
@@ -1415,8 +1335,8 @@
 		return;
 	}
 
-	if (!lport->tt.elsct_send(lport, NULL, fp, ELS_LOGO, fc_lport_logo_resp,
-				  lport, lport->e_d_tov))
+	if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_LOGO,
+				  fc_lport_logo_resp, lport, lport->e_d_tov))
 		fc_lport_error(lport, fp);
 }
 
@@ -1442,13 +1362,13 @@
 	unsigned int e_d_tov;
 	u16 mfs;
 
+	FC_LPORT_DBG(lport, "Received a FLOGI %s\n", fc_els_resp_type(fp));
+
 	if (fp == ERR_PTR(-FC_EX_CLOSED))
 		return;
 
 	mutex_lock(&lport->lp_mutex);
 
-	FC_LPORT_DBG(lport, "Received a FLOGI response\n");
-
 	if (lport->state != LPORT_ST_FLOGI) {
 		FC_LPORT_DBG(lport, "Received a FLOGI response, but in state "
 			     "%s\n", fc_lport_state(lport));
@@ -1501,14 +1421,6 @@
 				fc_lport_enter_dns(lport);
 			}
 		}
-
-		if (flp) {
-			csp_flags = ntohs(flp->fl_csp.sp_features);
-			if ((csp_flags & FC_SP_FT_FPORT) == 0) {
-				lport->tt.disc_start(fc_lport_disc_callback,
-						     lport);
-			}
-		}
 	} else {
 		FC_LPORT_DBG(lport, "Bad FLOGI response\n");
 	}
@@ -1539,7 +1451,7 @@
 	if (!fp)
 		return fc_lport_error(lport, fp);
 
-	if (!lport->tt.elsct_send(lport, NULL, fp, ELS_FLOGI,
+	if (!lport->tt.elsct_send(lport, FC_FID_FLOGI, fp, ELS_FLOGI,
 				  fc_lport_flogi_resp, lport, lport->e_d_tov))
 		fc_lport_error(lport, fp);
 }
@@ -1550,7 +1462,7 @@
 	INIT_DELAYED_WORK(&lport->retry_work, fc_lport_timeout);
 	mutex_init(&lport->lp_mutex);
 
-	fc_lport_state_enter(lport, LPORT_ST_NONE);
+	fc_lport_state_enter(lport, LPORT_ST_DISABLED);
 
 	fc_lport_add_fc4_type(lport, FC_TYPE_FCP);
 	fc_lport_add_fc4_type(lport, FC_TYPE_CT);
@@ -1588,6 +1500,7 @@
 	if (lport->link_supported_speeds & FC_PORTSPEED_10GBIT)
 		fc_host_supported_speeds(lport->host) |= FC_PORTSPEED_10GBIT;
 
+	INIT_LIST_HEAD(&lport->ema_list);
 	return 0;
 }
 EXPORT_SYMBOL(fc_lport_init);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 7162385f..03ea674 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -57,94 +57,114 @@
 
 struct workqueue_struct *rport_event_queue;
 
-static void fc_rport_enter_plogi(struct fc_rport *);
-static void fc_rport_enter_prli(struct fc_rport *);
-static void fc_rport_enter_rtv(struct fc_rport *);
-static void fc_rport_enter_ready(struct fc_rport *);
-static void fc_rport_enter_logo(struct fc_rport *);
+static void fc_rport_enter_plogi(struct fc_rport_priv *);
+static void fc_rport_enter_prli(struct fc_rport_priv *);
+static void fc_rport_enter_rtv(struct fc_rport_priv *);
+static void fc_rport_enter_ready(struct fc_rport_priv *);
+static void fc_rport_enter_logo(struct fc_rport_priv *);
+static void fc_rport_enter_adisc(struct fc_rport_priv *);
 
-static void fc_rport_recv_plogi_req(struct fc_rport *,
+static void fc_rport_recv_plogi_req(struct fc_lport *,
 				    struct fc_seq *, struct fc_frame *);
-static void fc_rport_recv_prli_req(struct fc_rport *,
+static void fc_rport_recv_prli_req(struct fc_rport_priv *,
 				   struct fc_seq *, struct fc_frame *);
-static void fc_rport_recv_prlo_req(struct fc_rport *,
+static void fc_rport_recv_prlo_req(struct fc_rport_priv *,
 				   struct fc_seq *, struct fc_frame *);
-static void fc_rport_recv_logo_req(struct fc_rport *,
+static void fc_rport_recv_logo_req(struct fc_lport *,
 				   struct fc_seq *, struct fc_frame *);
 static void fc_rport_timeout(struct work_struct *);
-static void fc_rport_error(struct fc_rport *, struct fc_frame *);
-static void fc_rport_error_retry(struct fc_rport *, struct fc_frame *);
+static void fc_rport_error(struct fc_rport_priv *, struct fc_frame *);
+static void fc_rport_error_retry(struct fc_rport_priv *, struct fc_frame *);
 static void fc_rport_work(struct work_struct *);
 
 static const char *fc_rport_state_names[] = {
-	[RPORT_ST_NONE] = "None",
 	[RPORT_ST_INIT] = "Init",
 	[RPORT_ST_PLOGI] = "PLOGI",
 	[RPORT_ST_PRLI] = "PRLI",
 	[RPORT_ST_RTV] = "RTV",
 	[RPORT_ST_READY] = "Ready",
 	[RPORT_ST_LOGO] = "LOGO",
+	[RPORT_ST_ADISC] = "ADISC",
+	[RPORT_ST_DELETE] = "Delete",
 };
 
-static void fc_rport_rogue_destroy(struct device *dev)
+/**
+ * fc_rport_lookup() - lookup a remote port by port_id
+ * @lport: Fibre Channel host port instance
+ * @port_id: remote port port_id to match
+ */
+static struct fc_rport_priv *fc_rport_lookup(const struct fc_lport *lport,
+					     u32 port_id)
 {
-	struct fc_rport *rport = dev_to_rport(dev);
-	FC_RPORT_DBG(rport, "Destroying rogue rport\n");
-	kfree(rport);
+	struct fc_rport_priv *rdata;
+
+	list_for_each_entry(rdata, &lport->disc.rports, peers)
+		if (rdata->ids.port_id == port_id &&
+		    rdata->rp_state != RPORT_ST_DELETE)
+			return rdata;
+	return NULL;
 }
 
-struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *dp)
+/**
+ * fc_rport_create() - Create a new remote port
+ * @lport:   The local port that the new remote port is for
+ * @port_id: The port ID for the new remote port
+ *
+ * Locking note:  must be called with the disc_mutex held.
+ */
+static struct fc_rport_priv *fc_rport_create(struct fc_lport *lport,
+					     u32 port_id)
 {
-	struct fc_rport *rport;
-	struct fc_rport_libfc_priv *rdata;
-	rport = kzalloc(sizeof(*rport) + sizeof(*rdata), GFP_KERNEL);
+	struct fc_rport_priv *rdata;
 
-	if (!rport)
+	rdata = lport->tt.rport_lookup(lport, port_id);
+	if (rdata)
+		return rdata;
+
+	rdata = kzalloc(sizeof(*rdata), GFP_KERNEL);
+	if (!rdata)
 		return NULL;
 
-	rdata = RPORT_TO_PRIV(rport);
+	rdata->ids.node_name = -1;
+	rdata->ids.port_name = -1;
+	rdata->ids.port_id = port_id;
+	rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
 
-	rport->dd_data = rdata;
-	rport->port_id = dp->ids.port_id;
-	rport->port_name = dp->ids.port_name;
-	rport->node_name = dp->ids.node_name;
-	rport->roles = dp->ids.roles;
-	rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
-	/*
-	 * Note: all this libfc rogue rport code will be removed for
-	 * upstream so it fine that this is really ugly and hacky right now.
-	 */
-	device_initialize(&rport->dev);
-	rport->dev.release = fc_rport_rogue_destroy;
-
+	kref_init(&rdata->kref);
 	mutex_init(&rdata->rp_mutex);
-	rdata->local_port = dp->lp;
-	rdata->trans_state = FC_PORTSTATE_ROGUE;
+	rdata->local_port = lport;
 	rdata->rp_state = RPORT_ST_INIT;
 	rdata->event = RPORT_EV_NONE;
 	rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
-	rdata->ops = NULL;
-	rdata->e_d_tov = dp->lp->e_d_tov;
-	rdata->r_a_tov = dp->lp->r_a_tov;
+	rdata->e_d_tov = lport->e_d_tov;
+	rdata->r_a_tov = lport->r_a_tov;
+	rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
 	INIT_DELAYED_WORK(&rdata->retry_work, fc_rport_timeout);
 	INIT_WORK(&rdata->event_work, fc_rport_work);
-	/*
-	 * For good measure, but not necessary as we should only
-	 * add REAL rport to the lport list.
-	 */
-	INIT_LIST_HEAD(&rdata->peers);
+	if (port_id != FC_FID_DIR_SERV)
+		list_add(&rdata->peers, &lport->disc.rports);
+	return rdata;
+}
 
-	return rport;
+/**
+ * fc_rport_destroy() - free a remote port after last reference is released.
+ * @kref: pointer to kref inside struct fc_rport_priv
+ */
+static void fc_rport_destroy(struct kref *kref)
+{
+	struct fc_rport_priv *rdata;
+
+	rdata = container_of(kref, struct fc_rport_priv, kref);
+	kfree(rdata);
 }
 
 /**
  * fc_rport_state() - return a string for the state the rport is in
- * @rport: The rport whose state we want to get a string for
+ * @rdata: remote port private data
  */
-static const char *fc_rport_state(struct fc_rport *rport)
+static const char *fc_rport_state(struct fc_rport_priv *rdata)
 {
 	const char *cp;
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
 
 	cp = fc_rport_state_names[rdata->rp_state];
 	if (!cp)
@@ -191,15 +211,14 @@
 
 /**
  * fc_rport_state_enter() - Change the rport's state
- * @rport: The rport whose state should change
+ * @rdata: The rport whose state should change
  * @new: The new state of the rport
  *
  * Locking Note: Called with the rport lock held
  */
-static void fc_rport_state_enter(struct fc_rport *rport,
+static void fc_rport_state_enter(struct fc_rport_priv *rdata,
 				 enum fc_rport_state new)
 {
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
 	if (rdata->rp_state != new)
 		rdata->retries = 0;
 	rdata->rp_state = new;
@@ -208,147 +227,187 @@
 static void fc_rport_work(struct work_struct *work)
 {
 	u32 port_id;
-	struct fc_rport_libfc_priv *rdata =
-		container_of(work, struct fc_rport_libfc_priv, event_work);
+	struct fc_rport_priv *rdata =
+		container_of(work, struct fc_rport_priv, event_work);
+	struct fc_rport_libfc_priv *rp;
 	enum fc_rport_event event;
-	enum fc_rport_trans_state trans_state;
 	struct fc_lport *lport = rdata->local_port;
 	struct fc_rport_operations *rport_ops;
-	struct fc_rport *rport = PRIV_TO_RPORT(rdata);
+	struct fc_rport_identifiers ids;
+	struct fc_rport *rport;
 
 	mutex_lock(&rdata->rp_mutex);
 	event = rdata->event;
 	rport_ops = rdata->ops;
+	rport = rdata->rport;
 
-	if (event == RPORT_EV_CREATED) {
-		struct fc_rport *new_rport;
-		struct fc_rport_libfc_priv *new_rdata;
-		struct fc_rport_identifiers ids;
+	FC_RPORT_DBG(rdata, "work event %u\n", event);
 
-		ids.port_id = rport->port_id;
-		ids.roles = rport->roles;
-		ids.port_name = rport->port_name;
-		ids.node_name = rport->node_name;
-
+	switch (event) {
+	case RPORT_EV_READY:
+		ids = rdata->ids;
+		rdata->event = RPORT_EV_NONE;
+		kref_get(&rdata->kref);
 		mutex_unlock(&rdata->rp_mutex);
 
-		new_rport = fc_remote_port_add(lport->host, 0, &ids);
-		if (new_rport) {
-			/*
-			 * Switch from the rogue rport to the rport
-			 * returned by the FC class.
-			 */
-			new_rport->maxframe_size = rport->maxframe_size;
-
-			new_rdata = new_rport->dd_data;
-			new_rdata->e_d_tov = rdata->e_d_tov;
-			new_rdata->r_a_tov = rdata->r_a_tov;
-			new_rdata->ops = rdata->ops;
-			new_rdata->local_port = rdata->local_port;
-			new_rdata->flags = FC_RP_FLAGS_REC_SUPPORTED;
-			new_rdata->trans_state = FC_PORTSTATE_REAL;
-			mutex_init(&new_rdata->rp_mutex);
-			INIT_DELAYED_WORK(&new_rdata->retry_work,
-					  fc_rport_timeout);
-			INIT_LIST_HEAD(&new_rdata->peers);
-			INIT_WORK(&new_rdata->event_work, fc_rport_work);
-
-			fc_rport_state_enter(new_rport, RPORT_ST_READY);
-		} else {
-			printk(KERN_WARNING "libfc: Failed to allocate "
-			       " memory for rport (%6x)\n", ids.port_id);
-			event = RPORT_EV_FAILED;
+		if (!rport)
+			rport = fc_remote_port_add(lport->host, 0, &ids);
+		if (!rport) {
+			FC_RPORT_DBG(rdata, "Failed to add the rport\n");
+			lport->tt.rport_logoff(rdata);
+			kref_put(&rdata->kref, lport->tt.rport_destroy);
+			return;
 		}
-		if (rport->port_id != FC_FID_DIR_SERV)
-			if (rport_ops->event_callback)
-				rport_ops->event_callback(lport, rport,
-							  RPORT_EV_FAILED);
-		put_device(&rport->dev);
-		rport = new_rport;
-		rdata = new_rport->dd_data;
-		if (rport_ops->event_callback)
-			rport_ops->event_callback(lport, rport, event);
-	} else if ((event == RPORT_EV_FAILED) ||
-		   (event == RPORT_EV_LOGO) ||
-		   (event == RPORT_EV_STOP)) {
-		trans_state = rdata->trans_state;
+		mutex_lock(&rdata->rp_mutex);
+		if (rdata->rport)
+			FC_RPORT_DBG(rdata, "rport already allocated\n");
+		rdata->rport = rport;
+		rport->maxframe_size = rdata->maxframe_size;
+		rport->supported_classes = rdata->supported_classes;
+
+		rp = rport->dd_data;
+		rp->local_port = lport;
+		rp->rp_state = rdata->rp_state;
+		rp->flags = rdata->flags;
+		rp->e_d_tov = rdata->e_d_tov;
+		rp->r_a_tov = rdata->r_a_tov;
 		mutex_unlock(&rdata->rp_mutex);
-		if (rport_ops->event_callback)
-			rport_ops->event_callback(lport, rport, event);
-		if (trans_state == FC_PORTSTATE_ROGUE)
-			put_device(&rport->dev);
-		else {
-			port_id = rport->port_id;
+
+		if (rport_ops && rport_ops->event_callback) {
+			FC_RPORT_DBG(rdata, "callback ev %d\n", event);
+			rport_ops->event_callback(lport, rdata, event);
+		}
+		kref_put(&rdata->kref, lport->tt.rport_destroy);
+		break;
+
+	case RPORT_EV_FAILED:
+	case RPORT_EV_LOGO:
+	case RPORT_EV_STOP:
+		port_id = rdata->ids.port_id;
+		mutex_unlock(&rdata->rp_mutex);
+
+		if (port_id != FC_FID_DIR_SERV) {
+			mutex_lock(&lport->disc.disc_mutex);
+			list_del(&rdata->peers);
+			mutex_unlock(&lport->disc.disc_mutex);
+		}
+
+		if (rport_ops && rport_ops->event_callback) {
+			FC_RPORT_DBG(rdata, "callback ev %d\n", event);
+			rport_ops->event_callback(lport, rdata, event);
+		}
+		cancel_delayed_work_sync(&rdata->retry_work);
+
+		/*
+		 * Reset any outstanding exchanges before freeing rport.
+		 */
+		lport->tt.exch_mgr_reset(lport, 0, port_id);
+		lport->tt.exch_mgr_reset(lport, port_id, 0);
+
+		if (rport) {
+			rp = rport->dd_data;
+			rp->rp_state = RPORT_ST_DELETE;
+			mutex_lock(&rdata->rp_mutex);
+			rdata->rport = NULL;
+			mutex_unlock(&rdata->rp_mutex);
 			fc_remote_port_delete(rport);
-			lport->tt.exch_mgr_reset(lport, 0, port_id);
-			lport->tt.exch_mgr_reset(lport, port_id, 0);
 		}
-	} else
+		kref_put(&rdata->kref, lport->tt.rport_destroy);
+		break;
+
+	default:
 		mutex_unlock(&rdata->rp_mutex);
+		break;
+	}
 }
 
 /**
  * fc_rport_login() - Start the remote port login state machine
- * @rport: Fibre Channel remote port
+ * @rdata: private remote port
  *
  * Locking Note: Called without the rport lock held. This
  * function will hold the rport lock, call an _enter_*
  * function and then unlock the rport.
+ *
+ * This indicates the intent to be logged into the remote port.
+ * If it appears we are already logged in, ADISC is used to verify
+ * the setup.
  */
-int fc_rport_login(struct fc_rport *rport)
+int fc_rport_login(struct fc_rport_priv *rdata)
 {
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
-
 	mutex_lock(&rdata->rp_mutex);
 
-	FC_RPORT_DBG(rport, "Login to port\n");
-
-	fc_rport_enter_plogi(rport);
-
+	switch (rdata->rp_state) {
+	case RPORT_ST_READY:
+		FC_RPORT_DBG(rdata, "ADISC port\n");
+		fc_rport_enter_adisc(rdata);
+		break;
+	default:
+		FC_RPORT_DBG(rdata, "Login to port\n");
+		fc_rport_enter_plogi(rdata);
+		break;
+	}
 	mutex_unlock(&rdata->rp_mutex);
 
 	return 0;
 }
 
 /**
+ * fc_rport_enter_delete() - schedule a remote port to be deleted.
+ * @rdata: private remote port
+ * @event: event to report as the reason for deletion
+ *
+ * Locking Note: Called with the rport lock held.
+ *
+ * Allow state change into DELETE only once.
+ *
+ * Call queue_work only if there's no event already pending.
+ * Set the new event so that the old pending event will not occur.
+ * Since we have the mutex, even if fc_rport_work() is already started,
+ * it'll see the new event.
+ */
+static void fc_rport_enter_delete(struct fc_rport_priv *rdata,
+				  enum fc_rport_event event)
+{
+	if (rdata->rp_state == RPORT_ST_DELETE)
+		return;
+
+	FC_RPORT_DBG(rdata, "Delete port\n");
+
+	fc_rport_state_enter(rdata, RPORT_ST_DELETE);
+
+	if (rdata->event == RPORT_EV_NONE)
+		queue_work(rport_event_queue, &rdata->event_work);
+	rdata->event = event;
+}
+
+/**
  * fc_rport_logoff() - Logoff and remove an rport
- * @rport: Fibre Channel remote port to be removed
+ * @rdata: private remote port
  *
  * Locking Note: Called without the rport lock held. This
  * function will hold the rport lock, call an _enter_*
  * function and then unlock the rport.
  */
-int fc_rport_logoff(struct fc_rport *rport)
+int fc_rport_logoff(struct fc_rport_priv *rdata)
 {
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
-
 	mutex_lock(&rdata->rp_mutex);
 
-	FC_RPORT_DBG(rport, "Remove port\n");
+	FC_RPORT_DBG(rdata, "Remove port\n");
 
-	if (rdata->rp_state == RPORT_ST_NONE) {
-		FC_RPORT_DBG(rport, "Port in NONE state, not removing\n");
+	if (rdata->rp_state == RPORT_ST_DELETE) {
+		FC_RPORT_DBG(rdata, "Port in Delete state, not removing\n");
 		mutex_unlock(&rdata->rp_mutex);
 		goto out;
 	}
 
-	fc_rport_enter_logo(rport);
+	fc_rport_enter_logo(rdata);
 
 	/*
-	 * Change the state to NONE so that we discard
+	 * Change the state to Delete so that we discard
 	 * the response.
 	 */
-	fc_rport_state_enter(rport, RPORT_ST_NONE);
-
-	mutex_unlock(&rdata->rp_mutex);
-
-	cancel_delayed_work_sync(&rdata->retry_work);
-
-	mutex_lock(&rdata->rp_mutex);
-
-	rdata->event = RPORT_EV_STOP;
-	queue_work(rport_event_queue, &rdata->event_work);
-
+	fc_rport_enter_delete(rdata, RPORT_EV_STOP);
 	mutex_unlock(&rdata->rp_mutex);
 
 out:
@@ -357,26 +416,25 @@
 
 /**
  * fc_rport_enter_ready() - The rport is ready
- * @rport: Fibre Channel remote port that is ready
+ * @rdata: private remote port
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
  */
-static void fc_rport_enter_ready(struct fc_rport *rport)
+static void fc_rport_enter_ready(struct fc_rport_priv *rdata)
 {
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	fc_rport_state_enter(rdata, RPORT_ST_READY);
 
-	fc_rport_state_enter(rport, RPORT_ST_READY);
+	FC_RPORT_DBG(rdata, "Port is Ready\n");
 
-	FC_RPORT_DBG(rport, "Port is Ready\n");
-
-	rdata->event = RPORT_EV_CREATED;
-	queue_work(rport_event_queue, &rdata->event_work);
+	if (rdata->event == RPORT_EV_NONE)
+		queue_work(rport_event_queue, &rdata->event_work);
+	rdata->event = RPORT_EV_READY;
 }
 
 /**
  * fc_rport_timeout() - Handler for the retry_work timer.
- * @work: The work struct of the fc_rport_libfc_priv
+ * @work: The work struct of the fc_rport_priv
  *
  * Locking Note: Called without the rport lock held. This
  * function will hold the rport lock, call an _enter_*
@@ -384,63 +442,63 @@
  */
 static void fc_rport_timeout(struct work_struct *work)
 {
-	struct fc_rport_libfc_priv *rdata =
-		container_of(work, struct fc_rport_libfc_priv, retry_work.work);
-	struct fc_rport *rport = PRIV_TO_RPORT(rdata);
+	struct fc_rport_priv *rdata =
+		container_of(work, struct fc_rport_priv, retry_work.work);
 
 	mutex_lock(&rdata->rp_mutex);
 
 	switch (rdata->rp_state) {
 	case RPORT_ST_PLOGI:
-		fc_rport_enter_plogi(rport);
+		fc_rport_enter_plogi(rdata);
 		break;
 	case RPORT_ST_PRLI:
-		fc_rport_enter_prli(rport);
+		fc_rport_enter_prli(rdata);
 		break;
 	case RPORT_ST_RTV:
-		fc_rport_enter_rtv(rport);
+		fc_rport_enter_rtv(rdata);
 		break;
 	case RPORT_ST_LOGO:
-		fc_rport_enter_logo(rport);
+		fc_rport_enter_logo(rdata);
+		break;
+	case RPORT_ST_ADISC:
+		fc_rport_enter_adisc(rdata);
 		break;
 	case RPORT_ST_READY:
 	case RPORT_ST_INIT:
-	case RPORT_ST_NONE:
+	case RPORT_ST_DELETE:
 		break;
 	}
 
 	mutex_unlock(&rdata->rp_mutex);
-	put_device(&rport->dev);
 }
 
 /**
  * fc_rport_error() - Error handler, called once retries have been exhausted
- * @rport: The fc_rport object
+ * @rdata: private remote port
  * @fp: The frame pointer
  *
  * Locking Note: The rport lock is expected to be held before
  * calling this routine
  */
-static void fc_rport_error(struct fc_rport *rport, struct fc_frame *fp)
+static void fc_rport_error(struct fc_rport_priv *rdata, struct fc_frame *fp)
 {
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
-
-	FC_RPORT_DBG(rport, "Error %ld in state %s, retries %d\n",
-		     PTR_ERR(fp), fc_rport_state(rport), rdata->retries);
+	FC_RPORT_DBG(rdata, "Error %ld in state %s, retries %d\n",
+		     IS_ERR(fp) ? -PTR_ERR(fp) : 0,
+		     fc_rport_state(rdata), rdata->retries);
 
 	switch (rdata->rp_state) {
 	case RPORT_ST_PLOGI:
-	case RPORT_ST_PRLI:
 	case RPORT_ST_LOGO:
-		rdata->event = RPORT_EV_FAILED;
-		fc_rport_state_enter(rport, RPORT_ST_NONE);
-		queue_work(rport_event_queue,
-			   &rdata->event_work);
+		fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
 		break;
 	case RPORT_ST_RTV:
-		fc_rport_enter_ready(rport);
+		fc_rport_enter_ready(rdata);
 		break;
-	case RPORT_ST_NONE:
+	case RPORT_ST_PRLI:
+	case RPORT_ST_ADISC:
+		fc_rport_enter_logo(rdata);
+		break;
+	case RPORT_ST_DELETE:
 	case RPORT_ST_READY:
 	case RPORT_ST_INIT:
 		break;
@@ -449,7 +507,7 @@
 
 /**
  * fc_rport_error_retry() - Error handler when retries are desired
- * @rport: The fc_rport object
+ * @rdata: private remote port data
  * @fp: The frame pointer
  *
  * If the error was an exchange timeout retry immediately,
@@ -458,45 +516,43 @@
  * Locking Note: The rport lock is expected to be held before
  * calling this routine
  */
-static void fc_rport_error_retry(struct fc_rport *rport, struct fc_frame *fp)
+static void fc_rport_error_retry(struct fc_rport_priv *rdata,
+				 struct fc_frame *fp)
 {
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
 	unsigned long delay = FC_DEF_E_D_TOV;
 
 	/* make sure this isn't an FC_EX_CLOSED error, never retry those */
 	if (PTR_ERR(fp) == -FC_EX_CLOSED)
-		return fc_rport_error(rport, fp);
+		return fc_rport_error(rdata, fp);
 
 	if (rdata->retries < rdata->local_port->max_rport_retry_count) {
-		FC_RPORT_DBG(rport, "Error %ld in state %s, retrying\n",
-			     PTR_ERR(fp), fc_rport_state(rport));
+		FC_RPORT_DBG(rdata, "Error %ld in state %s, retrying\n",
+			     PTR_ERR(fp), fc_rport_state(rdata));
 		rdata->retries++;
 		/* no additional delay on exchange timeouts */
 		if (PTR_ERR(fp) == -FC_EX_TIMEOUT)
 			delay = 0;
-		get_device(&rport->dev);
 		schedule_delayed_work(&rdata->retry_work, delay);
 		return;
 	}
 
-	return fc_rport_error(rport, fp);
+	return fc_rport_error(rdata, fp);
 }
 
 /**
  * fc_rport_plogi_recv_resp() - Handle incoming ELS PLOGI response
  * @sp: current sequence in the PLOGI exchange
  * @fp: response frame
- * @rp_arg: Fibre Channel remote port
+ * @rdata_arg: private remote port data
  *
  * Locking Note: This function will be called without the rport lock
  * held, but it will lock, call an _enter_* function or fc_rport_error
  * and then unlock the rport.
  */
 static void fc_rport_plogi_resp(struct fc_seq *sp, struct fc_frame *fp,
-				void *rp_arg)
+				void *rdata_arg)
 {
-	struct fc_rport *rport = rp_arg;
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_rport_priv *rdata = rdata_arg;
 	struct fc_lport *lport = rdata->local_port;
 	struct fc_els_flogi *plp = NULL;
 	unsigned int tov;
@@ -506,26 +562,26 @@
 
 	mutex_lock(&rdata->rp_mutex);
 
-	FC_RPORT_DBG(rport, "Received a PLOGI response\n");
+	FC_RPORT_DBG(rdata, "Received a PLOGI %s\n", fc_els_resp_type(fp));
 
 	if (rdata->rp_state != RPORT_ST_PLOGI) {
-		FC_RPORT_DBG(rport, "Received a PLOGI response, but in state "
-			     "%s\n", fc_rport_state(rport));
+		FC_RPORT_DBG(rdata, "Received a PLOGI response, but in state "
+			     "%s\n", fc_rport_state(rdata));
 		if (IS_ERR(fp))
 			goto err;
 		goto out;
 	}
 
 	if (IS_ERR(fp)) {
-		fc_rport_error_retry(rport, fp);
+		fc_rport_error_retry(rdata, fp);
 		goto err;
 	}
 
 	op = fc_frame_payload_op(fp);
 	if (op == ELS_LS_ACC &&
 	    (plp = fc_frame_payload_get(fp, sizeof(*plp))) != NULL) {
-		rport->port_name = get_unaligned_be64(&plp->fl_wwpn);
-		rport->node_name = get_unaligned_be64(&plp->fl_wwnn);
+		rdata->ids.port_name = get_unaligned_be64(&plp->fl_wwpn);
+		rdata->ids.node_name = get_unaligned_be64(&plp->fl_wwnn);
 
 		tov = ntohl(plp->fl_csp.sp_e_d_tov);
 		if (ntohs(plp->fl_csp.sp_features) & FC_SP_FT_EDTR)
@@ -537,75 +593,64 @@
 		if (cssp_seq < csp_seq)
 			csp_seq = cssp_seq;
 		rdata->max_seq = csp_seq;
-		rport->maxframe_size =
-			fc_plogi_get_maxframe(plp, lport->mfs);
-
-		/*
-		 * If the rport is one of the well known addresses
-		 * we skip PRLI and RTV and go straight to READY.
-		 */
-		if (rport->port_id >= FC_FID_DOM_MGR)
-			fc_rport_enter_ready(rport);
-		else
-			fc_rport_enter_prli(rport);
+		rdata->maxframe_size = fc_plogi_get_maxframe(plp, lport->mfs);
+		fc_rport_enter_prli(rdata);
 	} else
-		fc_rport_error_retry(rport, fp);
+		fc_rport_error_retry(rdata, fp);
 
 out:
 	fc_frame_free(fp);
 err:
 	mutex_unlock(&rdata->rp_mutex);
-	put_device(&rport->dev);
+	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
 }
 
 /**
  * fc_rport_enter_plogi() - Send Port Login (PLOGI) request to peer
- * @rport: Fibre Channel remote port to send PLOGI to
+ * @rdata: private remote port data
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
  */
-static void fc_rport_enter_plogi(struct fc_rport *rport)
+static void fc_rport_enter_plogi(struct fc_rport_priv *rdata)
 {
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
 	struct fc_lport *lport = rdata->local_port;
 	struct fc_frame *fp;
 
-	FC_RPORT_DBG(rport, "Port entered PLOGI state from %s state\n",
-		     fc_rport_state(rport));
+	FC_RPORT_DBG(rdata, "Port entered PLOGI state from %s state\n",
+		     fc_rport_state(rdata));
 
-	fc_rport_state_enter(rport, RPORT_ST_PLOGI);
+	fc_rport_state_enter(rdata, RPORT_ST_PLOGI);
 
-	rport->maxframe_size = FC_MIN_MAX_PAYLOAD;
+	rdata->maxframe_size = FC_MIN_MAX_PAYLOAD;
 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_flogi));
 	if (!fp) {
-		fc_rport_error_retry(rport, fp);
+		fc_rport_error_retry(rdata, fp);
 		return;
 	}
 	rdata->e_d_tov = lport->e_d_tov;
 
-	if (!lport->tt.elsct_send(lport, rport, fp, ELS_PLOGI,
-				  fc_rport_plogi_resp, rport, lport->e_d_tov))
-		fc_rport_error_retry(rport, fp);
+	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PLOGI,
+				  fc_rport_plogi_resp, rdata, lport->e_d_tov))
+		fc_rport_error_retry(rdata, fp);
 	else
-		get_device(&rport->dev);
+		kref_get(&rdata->kref);
 }
 
 /**
  * fc_rport_prli_resp() - Process Login (PRLI) response handler
  * @sp: current sequence in the PRLI exchange
  * @fp: response frame
- * @rp_arg: Fibre Channel remote port
+ * @rdata_arg: private remote port data
  *
  * Locking Note: This function will be called without the rport lock
  * held, but it will lock, call an _enter_* function or fc_rport_error
  * and then unlock the rport.
  */
 static void fc_rport_prli_resp(struct fc_seq *sp, struct fc_frame *fp,
-			       void *rp_arg)
+			       void *rdata_arg)
 {
-	struct fc_rport *rport = rp_arg;
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_rport_priv *rdata = rdata_arg;
 	struct {
 		struct fc_els_prli prli;
 		struct fc_els_spp spp;
@@ -616,21 +661,24 @@
 
 	mutex_lock(&rdata->rp_mutex);
 
-	FC_RPORT_DBG(rport, "Received a PRLI response\n");
+	FC_RPORT_DBG(rdata, "Received a PRLI %s\n", fc_els_resp_type(fp));
 
 	if (rdata->rp_state != RPORT_ST_PRLI) {
-		FC_RPORT_DBG(rport, "Received a PRLI response, but in state "
-			     "%s\n", fc_rport_state(rport));
+		FC_RPORT_DBG(rdata, "Received a PRLI response, but in state "
+			     "%s\n", fc_rport_state(rdata));
 		if (IS_ERR(fp))
 			goto err;
 		goto out;
 	}
 
 	if (IS_ERR(fp)) {
-		fc_rport_error_retry(rport, fp);
+		fc_rport_error_retry(rdata, fp);
 		goto err;
 	}
 
+	/* reinitialize remote port roles */
+	rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
 	op = fc_frame_payload_op(fp);
 	if (op == ELS_LS_ACC) {
 		pp = fc_frame_payload_get(fp, sizeof(*pp));
@@ -640,90 +688,82 @@
 				rdata->flags |= FC_RP_FLAGS_RETRY;
 		}
 
-		rport->supported_classes = FC_COS_CLASS3;
+		rdata->supported_classes = FC_COS_CLASS3;
 		if (fcp_parm & FCP_SPPF_INIT_FCN)
 			roles |= FC_RPORT_ROLE_FCP_INITIATOR;
 		if (fcp_parm & FCP_SPPF_TARG_FCN)
 			roles |= FC_RPORT_ROLE_FCP_TARGET;
 
-		rport->roles = roles;
-		fc_rport_enter_rtv(rport);
+		rdata->ids.roles = roles;
+		fc_rport_enter_rtv(rdata);
 
 	} else {
-		FC_RPORT_DBG(rport, "Bad ELS response for PRLI command\n");
-		rdata->event = RPORT_EV_FAILED;
-		fc_rport_state_enter(rport, RPORT_ST_NONE);
-		queue_work(rport_event_queue, &rdata->event_work);
+		FC_RPORT_DBG(rdata, "Bad ELS response for PRLI command\n");
+		fc_rport_enter_delete(rdata, RPORT_EV_FAILED);
 	}
 
 out:
 	fc_frame_free(fp);
 err:
 	mutex_unlock(&rdata->rp_mutex);
-	put_device(&rport->dev);
+	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
 }
 
 /**
  * fc_rport_logo_resp() - Logout (LOGO) response handler
  * @sp: current sequence in the LOGO exchange
  * @fp: response frame
- * @rp_arg: Fibre Channel remote port
+ * @rdata_arg: private remote port data
  *
  * Locking Note: This function will be called without the rport lock
  * held, but it will lock, call an _enter_* function or fc_rport_error
  * and then unlock the rport.
  */
 static void fc_rport_logo_resp(struct fc_seq *sp, struct fc_frame *fp,
-			       void *rp_arg)
+			       void *rdata_arg)
 {
-	struct fc_rport *rport = rp_arg;
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_rport_priv *rdata = rdata_arg;
 	u8 op;
 
 	mutex_lock(&rdata->rp_mutex);
 
-	FC_RPORT_DBG(rport, "Received a LOGO response\n");
+	FC_RPORT_DBG(rdata, "Received a LOGO %s\n", fc_els_resp_type(fp));
 
 	if (rdata->rp_state != RPORT_ST_LOGO) {
-		FC_RPORT_DBG(rport, "Received a LOGO response, but in state "
-			     "%s\n", fc_rport_state(rport));
+		FC_RPORT_DBG(rdata, "Received a LOGO response, but in state "
+			     "%s\n", fc_rport_state(rdata));
 		if (IS_ERR(fp))
 			goto err;
 		goto out;
 	}
 
 	if (IS_ERR(fp)) {
-		fc_rport_error_retry(rport, fp);
+		fc_rport_error_retry(rdata, fp);
 		goto err;
 	}
 
 	op = fc_frame_payload_op(fp);
-	if (op == ELS_LS_ACC) {
-		fc_rport_enter_rtv(rport);
-	} else {
-		FC_RPORT_DBG(rport, "Bad ELS response for LOGO command\n");
-		rdata->event = RPORT_EV_LOGO;
-		fc_rport_state_enter(rport, RPORT_ST_NONE);
-		queue_work(rport_event_queue, &rdata->event_work);
-	}
+	if (op != ELS_LS_ACC)
+		FC_RPORT_DBG(rdata, "Bad ELS response op %x for LOGO command\n",
+			     op);
+	fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
 
 out:
 	fc_frame_free(fp);
 err:
 	mutex_unlock(&rdata->rp_mutex);
-	put_device(&rport->dev);
+	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
 }
 
 /**
  * fc_rport_enter_prli() - Send Process Login (PRLI) request to peer
- * @rport: Fibre Channel remote port to send PRLI to
+ * @rdata: private remote port data
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
  */
-static void fc_rport_enter_prli(struct fc_rport *rport)
+static void fc_rport_enter_prli(struct fc_rport_priv *rdata)
 {
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
 	struct fc_lport *lport = rdata->local_port;
 	struct {
 		struct fc_els_prli prli;
@@ -731,29 +771,38 @@
 	} *pp;
 	struct fc_frame *fp;
 
-	FC_RPORT_DBG(rport, "Port entered PRLI state from %s state\n",
-		     fc_rport_state(rport));
-
-	fc_rport_state_enter(rport, RPORT_ST_PRLI);
-
-	fp = fc_frame_alloc(lport, sizeof(*pp));
-	if (!fp) {
-		fc_rport_error_retry(rport, fp);
+	/*
+	 * If the rport is one of the well known addresses
+	 * we skip PRLI and RTV and go straight to READY.
+	 */
+	if (rdata->ids.port_id >= FC_FID_DOM_MGR) {
+		fc_rport_enter_ready(rdata);
 		return;
 	}
 
-	if (!lport->tt.elsct_send(lport, rport, fp, ELS_PRLI,
-				  fc_rport_prli_resp, rport, lport->e_d_tov))
-		fc_rport_error_retry(rport, fp);
+	FC_RPORT_DBG(rdata, "Port entered PRLI state from %s state\n",
+		     fc_rport_state(rdata));
+
+	fc_rport_state_enter(rdata, RPORT_ST_PRLI);
+
+	fp = fc_frame_alloc(lport, sizeof(*pp));
+	if (!fp) {
+		fc_rport_error_retry(rdata, fp);
+		return;
+	}
+
+	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_PRLI,
+				  fc_rport_prli_resp, rdata, lport->e_d_tov))
+		fc_rport_error_retry(rdata, fp);
 	else
-		get_device(&rport->dev);
+		kref_get(&rdata->kref);
 }
 
 /**
  * fc_rport_els_rtv_resp() - Request Timeout Value response handler
  * @sp: current sequence in the RTV exchange
  * @fp: response frame
- * @rp_arg: Fibre Channel remote port
+ * @rdata_arg: private remote port data
  *
  * Many targets don't seem to support this.
  *
@@ -762,26 +811,25 @@
  * and then unlock the rport.
  */
 static void fc_rport_rtv_resp(struct fc_seq *sp, struct fc_frame *fp,
-			      void *rp_arg)
+			      void *rdata_arg)
 {
-	struct fc_rport *rport = rp_arg;
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
+	struct fc_rport_priv *rdata = rdata_arg;
 	u8 op;
 
 	mutex_lock(&rdata->rp_mutex);
 
-	FC_RPORT_DBG(rport, "Received a RTV response\n");
+	FC_RPORT_DBG(rdata, "Received a RTV %s\n", fc_els_resp_type(fp));
 
 	if (rdata->rp_state != RPORT_ST_RTV) {
-		FC_RPORT_DBG(rport, "Received a RTV response, but in state "
-			     "%s\n", fc_rport_state(rport));
+		FC_RPORT_DBG(rdata, "Received a RTV response, but in state "
+			     "%s\n", fc_rport_state(rdata));
 		if (IS_ERR(fp))
 			goto err;
 		goto out;
 	}
 
 	if (IS_ERR(fp)) {
-		fc_rport_error(rport, fp);
+		fc_rport_error(rdata, fp);
 		goto err;
 	}
 
@@ -807,184 +855,376 @@
 		}
 	}
 
-	fc_rport_enter_ready(rport);
+	fc_rport_enter_ready(rdata);
 
 out:
 	fc_frame_free(fp);
 err:
 	mutex_unlock(&rdata->rp_mutex);
-	put_device(&rport->dev);
+	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
 }
 
 /**
  * fc_rport_enter_rtv() - Send Request Timeout Value (RTV) request to peer
- * @rport: Fibre Channel remote port to send RTV to
+ * @rdata: private remote port data
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
  */
-static void fc_rport_enter_rtv(struct fc_rport *rport)
+static void fc_rport_enter_rtv(struct fc_rport_priv *rdata)
 {
 	struct fc_frame *fp;
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
 	struct fc_lport *lport = rdata->local_port;
 
-	FC_RPORT_DBG(rport, "Port entered RTV state from %s state\n",
-		     fc_rport_state(rport));
+	FC_RPORT_DBG(rdata, "Port entered RTV state from %s state\n",
+		     fc_rport_state(rdata));
 
-	fc_rport_state_enter(rport, RPORT_ST_RTV);
+	fc_rport_state_enter(rdata, RPORT_ST_RTV);
 
 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_rtv));
 	if (!fp) {
-		fc_rport_error_retry(rport, fp);
+		fc_rport_error_retry(rdata, fp);
 		return;
 	}
 
-	if (!lport->tt.elsct_send(lport, rport, fp, ELS_RTV,
-				     fc_rport_rtv_resp, rport, lport->e_d_tov))
-		fc_rport_error_retry(rport, fp);
+	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_RTV,
+				     fc_rport_rtv_resp, rdata, lport->e_d_tov))
+		fc_rport_error_retry(rdata, fp);
 	else
-		get_device(&rport->dev);
+		kref_get(&rdata->kref);
 }
 
 /**
  * fc_rport_enter_logo() - Send Logout (LOGO) request to peer
- * @rport: Fibre Channel remote port to send LOGO to
+ * @rdata: private remote port data
  *
  * Locking Note: The rport lock is expected to be held before calling
  * this routine.
  */
-static void fc_rport_enter_logo(struct fc_rport *rport)
+static void fc_rport_enter_logo(struct fc_rport_priv *rdata)
 {
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
 	struct fc_lport *lport = rdata->local_port;
 	struct fc_frame *fp;
 
-	FC_RPORT_DBG(rport, "Port entered LOGO state from %s state\n",
-		     fc_rport_state(rport));
+	FC_RPORT_DBG(rdata, "Port entered LOGO state from %s state\n",
+		     fc_rport_state(rdata));
 
-	fc_rport_state_enter(rport, RPORT_ST_LOGO);
+	fc_rport_state_enter(rdata, RPORT_ST_LOGO);
 
 	fp = fc_frame_alloc(lport, sizeof(struct fc_els_logo));
 	if (!fp) {
-		fc_rport_error_retry(rport, fp);
+		fc_rport_error_retry(rdata, fp);
 		return;
 	}
 
-	if (!lport->tt.elsct_send(lport, rport, fp, ELS_LOGO,
-				  fc_rport_logo_resp, rport, lport->e_d_tov))
-		fc_rport_error_retry(rport, fp);
+	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_LOGO,
+				  fc_rport_logo_resp, rdata, lport->e_d_tov))
+		fc_rport_error_retry(rdata, fp);
 	else
-		get_device(&rport->dev);
+		kref_get(&rdata->kref);
 }
 
-
 /**
- * fc_rport_recv_req() - Receive a request from a rport
- * @sp: current sequence in the PLOGI exchange
+ * fc_rport_els_adisc_resp() - Address Discovery response handler
+ * @sp: current sequence in the ADISC exchange
  * @fp: response frame
- * @rp_arg: Fibre Channel remote port
+ * @rdata_arg: remote port private.
  *
- * Locking Note: Called without the rport lock held. This
- * function will hold the rport lock, call an _enter_*
- * function and then unlock the rport.
+ * Locking Note: This function will be called without the rport lock
+ * held, but it will lock, call an _enter_* function or fc_rport_error
+ * and then unlock the rport.
  */
-void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
-		       struct fc_rport *rport)
+static void fc_rport_adisc_resp(struct fc_seq *sp, struct fc_frame *fp,
+			      void *rdata_arg)
 {
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
-	struct fc_lport *lport = rdata->local_port;
-
-	struct fc_frame_header *fh;
-	struct fc_seq_els_data els_data;
+	struct fc_rport_priv *rdata = rdata_arg;
+	struct fc_els_adisc *adisc;
 	u8 op;
 
 	mutex_lock(&rdata->rp_mutex);
 
+	FC_RPORT_DBG(rdata, "Received a ADISC response\n");
+
+	if (rdata->rp_state != RPORT_ST_ADISC) {
+		FC_RPORT_DBG(rdata, "Received a ADISC resp but in state %s\n",
+			     fc_rport_state(rdata));
+		if (IS_ERR(fp))
+			goto err;
+		goto out;
+	}
+
+	if (IS_ERR(fp)) {
+		fc_rport_error(rdata, fp);
+		goto err;
+	}
+
+	/*
+	 * If address verification failed.  Consider us logged out of the rport.
+	 * Since the rport is still in discovery, we want to be
+	 * logged in, so go to PLOGI state.  Otherwise, go back to READY.
+	 */
+	op = fc_frame_payload_op(fp);
+	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+	if (op != ELS_LS_ACC || !adisc ||
+	    ntoh24(adisc->adisc_port_id) != rdata->ids.port_id ||
+	    get_unaligned_be64(&adisc->adisc_wwpn) != rdata->ids.port_name ||
+	    get_unaligned_be64(&adisc->adisc_wwnn) != rdata->ids.node_name) {
+		FC_RPORT_DBG(rdata, "ADISC error or mismatch\n");
+		fc_rport_enter_plogi(rdata);
+	} else {
+		FC_RPORT_DBG(rdata, "ADISC OK\n");
+		fc_rport_enter_ready(rdata);
+	}
+out:
+	fc_frame_free(fp);
+err:
+	mutex_unlock(&rdata->rp_mutex);
+	kref_put(&rdata->kref, rdata->local_port->tt.rport_destroy);
+}
+
+/**
+ * fc_rport_enter_adisc() - Send Address Discover (ADISC) request to peer
+ * @rdata: remote port private data
+ *
+ * Locking Note: The rport lock is expected to be held before calling
+ * this routine.
+ */
+static void fc_rport_enter_adisc(struct fc_rport_priv *rdata)
+{
+	struct fc_lport *lport = rdata->local_port;
+	struct fc_frame *fp;
+
+	FC_RPORT_DBG(rdata, "sending ADISC from %s state\n",
+		     fc_rport_state(rdata));
+
+	fc_rport_state_enter(rdata, RPORT_ST_ADISC);
+
+	fp = fc_frame_alloc(lport, sizeof(struct fc_els_adisc));
+	if (!fp) {
+		fc_rport_error_retry(rdata, fp);
+		return;
+	}
+	if (!lport->tt.elsct_send(lport, rdata->ids.port_id, fp, ELS_ADISC,
+				  fc_rport_adisc_resp, rdata, lport->e_d_tov))
+		fc_rport_error_retry(rdata, fp);
+	else
+		kref_get(&rdata->kref);
+}
+
+/**
+ * fc_rport_recv_adisc_req() - Handle incoming Address Discovery (ADISC) Request
+ * @rdata: remote port private
+ * @sp: current sequence in the ADISC exchange
+ * @in_fp: ADISC request frame
+ *
+ * Locking Note:  Called with the lport and rport locks held.
+ */
+static void fc_rport_recv_adisc_req(struct fc_rport_priv *rdata,
+				    struct fc_seq *sp, struct fc_frame *in_fp)
+{
+	struct fc_lport *lport = rdata->local_port;
+	struct fc_frame *fp;
+	struct fc_exch *ep = fc_seq_exch(sp);
+	struct fc_els_adisc *adisc;
+	struct fc_seq_els_data rjt_data;
+	u32 f_ctl;
+
+	FC_RPORT_DBG(rdata, "Received ADISC request\n");
+
+	adisc = fc_frame_payload_get(in_fp, sizeof(*adisc));
+	if (!adisc) {
+		rjt_data.fp = NULL;
+		rjt_data.reason = ELS_RJT_PROT;
+		rjt_data.explan = ELS_EXPL_INV_LEN;
+		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+		goto drop;
+	}
+
+	fp = fc_frame_alloc(lport, sizeof(*adisc));
+	if (!fp)
+		goto drop;
+	fc_adisc_fill(lport, fp);
+	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+	adisc->adisc_cmd = ELS_LS_ACC;
+	sp = lport->tt.seq_start_next(sp);
+	f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+		       FC_TYPE_ELS, f_ctl, 0);
+	lport->tt.seq_send(lport, sp, fp);
+drop:
+	fc_frame_free(in_fp);
+}
+
+/**
+ * fc_rport_recv_els_req() - handle a validated ELS request.
+ * @lport: Fibre Channel local port
+ * @sp: current sequence in the PLOGI exchange
+ * @fp: response frame
+ *
+ * Handle incoming ELS requests that require port login.
+ * The ELS opcode has already been validated by the caller.
+ *
+ * Locking Note: Called with the lport lock held.
+ */
+static void fc_rport_recv_els_req(struct fc_lport *lport,
+				  struct fc_seq *sp, struct fc_frame *fp)
+{
+	struct fc_rport_priv *rdata;
+	struct fc_frame_header *fh;
+	struct fc_seq_els_data els_data;
+
 	els_data.fp = NULL;
-	els_data.explan = ELS_EXPL_NONE;
-	els_data.reason = ELS_RJT_NONE;
+	els_data.reason = ELS_RJT_UNAB;
+	els_data.explan = ELS_EXPL_PLOGI_REQD;
 
 	fh = fc_frame_header_get(fp);
 
-	if (fh->fh_r_ctl == FC_RCTL_ELS_REQ && fh->fh_type == FC_TYPE_ELS) {
-		op = fc_frame_payload_op(fp);
-		switch (op) {
-		case ELS_PLOGI:
-			fc_rport_recv_plogi_req(rport, sp, fp);
-			break;
-		case ELS_PRLI:
-			fc_rport_recv_prli_req(rport, sp, fp);
-			break;
-		case ELS_PRLO:
-			fc_rport_recv_prlo_req(rport, sp, fp);
-			break;
-		case ELS_LOGO:
-			fc_rport_recv_logo_req(rport, sp, fp);
-			break;
-		case ELS_RRQ:
-			els_data.fp = fp;
-			lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
-			break;
-		case ELS_REC:
-			els_data.fp = fp;
-			lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
-			break;
-		default:
-			els_data.reason = ELS_RJT_UNSUP;
-			lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
-			break;
-		}
+	mutex_lock(&lport->disc.disc_mutex);
+	rdata = lport->tt.rport_lookup(lport, ntoh24(fh->fh_s_id));
+	if (!rdata) {
+		mutex_unlock(&lport->disc.disc_mutex);
+		goto reject;
+	}
+	mutex_lock(&rdata->rp_mutex);
+	mutex_unlock(&lport->disc.disc_mutex);
+
+	switch (rdata->rp_state) {
+	case RPORT_ST_PRLI:
+	case RPORT_ST_RTV:
+	case RPORT_ST_READY:
+	case RPORT_ST_ADISC:
+		break;
+	default:
+		mutex_unlock(&rdata->rp_mutex);
+		goto reject;
+	}
+
+	switch (fc_frame_payload_op(fp)) {
+	case ELS_PRLI:
+		fc_rport_recv_prli_req(rdata, sp, fp);
+		break;
+	case ELS_PRLO:
+		fc_rport_recv_prlo_req(rdata, sp, fp);
+		break;
+	case ELS_ADISC:
+		fc_rport_recv_adisc_req(rdata, sp, fp);
+		break;
+	case ELS_RRQ:
+		els_data.fp = fp;
+		lport->tt.seq_els_rsp_send(sp, ELS_RRQ, &els_data);
+		break;
+	case ELS_REC:
+		els_data.fp = fp;
+		lport->tt.seq_els_rsp_send(sp, ELS_REC, &els_data);
+		break;
+	default:
+		fc_frame_free(fp);	/* can't happen */
+		break;
 	}
 
 	mutex_unlock(&rdata->rp_mutex);
+	return;
+
+reject:
+	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
+	fc_frame_free(fp);
+}
+
+/**
+ * fc_rport_recv_req() - Handle a received ELS request from a rport
+ * @sp: current sequence in the PLOGI exchange
+ * @fp: response frame
+ * @lport: Fibre Channel local port
+ *
+ * Locking Note: Called with the lport lock held.
+ */
+void fc_rport_recv_req(struct fc_seq *sp, struct fc_frame *fp,
+		       struct fc_lport *lport)
+{
+	struct fc_seq_els_data els_data;
+
+	/*
+	 * Handle PLOGI and LOGO requests separately, since they
+	 * don't require prior login.
+	 * Check for unsupported opcodes first and reject them.
+	 * For some ops, it would be incorrect to reject with "PLOGI required".
+	 */
+	switch (fc_frame_payload_op(fp)) {
+	case ELS_PLOGI:
+		fc_rport_recv_plogi_req(lport, sp, fp);
+		break;
+	case ELS_LOGO:
+		fc_rport_recv_logo_req(lport, sp, fp);
+		break;
+	case ELS_PRLI:
+	case ELS_PRLO:
+	case ELS_ADISC:
+	case ELS_RRQ:
+	case ELS_REC:
+		fc_rport_recv_els_req(lport, sp, fp);
+		break;
+	default:
+		fc_frame_free(fp);
+		els_data.fp = NULL;
+		els_data.reason = ELS_RJT_UNSUP;
+		els_data.explan = ELS_EXPL_NONE;
+		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &els_data);
+		break;
+	}
 }
 
 /**
  * fc_rport_recv_plogi_req() - Handle incoming Port Login (PLOGI) request
- * @rport: Fibre Channel remote port that initiated PLOGI
+ * @lport: local port
  * @sp: current sequence in the PLOGI exchange
  * @fp: PLOGI request frame
  *
- * Locking Note: The rport lock is exected to be held before calling
- * this function.
+ * Locking Note: The rport lock is held before calling this function.
  */
-static void fc_rport_recv_plogi_req(struct fc_rport *rport,
+static void fc_rport_recv_plogi_req(struct fc_lport *lport,
 				    struct fc_seq *sp, struct fc_frame *rx_fp)
 {
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
-	struct fc_lport *lport = rdata->local_port;
+	struct fc_disc *disc;
+	struct fc_rport_priv *rdata;
 	struct fc_frame *fp = rx_fp;
 	struct fc_exch *ep;
 	struct fc_frame_header *fh;
 	struct fc_els_flogi *pl;
 	struct fc_seq_els_data rjt_data;
-	u32 sid;
-	u64 wwpn;
-	u64 wwnn;
-	enum fc_els_rjt_reason reject = 0;
-	u32 f_ctl;
+	u32 sid, f_ctl;
+
 	rjt_data.fp = NULL;
-
 	fh = fc_frame_header_get(fp);
-
-	FC_RPORT_DBG(rport, "Received PLOGI request while in state %s\n",
-		     fc_rport_state(rport));
-
 	sid = ntoh24(fh->fh_s_id);
+
+	FC_RPORT_ID_DBG(lport, sid, "Received PLOGI request\n");
+
 	pl = fc_frame_payload_get(fp, sizeof(*pl));
 	if (!pl) {
-		FC_RPORT_DBG(rport, "Received PLOGI too short\n");
-		WARN_ON(1);
-		/* XXX TBD: send reject? */
-		fc_frame_free(fp);
-		return;
+		FC_RPORT_ID_DBG(lport, sid, "Received PLOGI too short\n");
+		rjt_data.reason = ELS_RJT_PROT;
+		rjt_data.explan = ELS_EXPL_INV_LEN;
+		goto reject;
 	}
-	wwpn = get_unaligned_be64(&pl->fl_wwpn);
-	wwnn = get_unaligned_be64(&pl->fl_wwnn);
+
+	disc = &lport->disc;
+	mutex_lock(&disc->disc_mutex);
+	rdata = lport->tt.rport_create(lport, sid);
+	if (!rdata) {
+		mutex_unlock(&disc->disc_mutex);
+		rjt_data.reason = ELS_RJT_UNAB;
+		rjt_data.explan = ELS_EXPL_INSUF_RES;
+		goto reject;
+	}
+
+	mutex_lock(&rdata->rp_mutex);
+	mutex_unlock(&disc->disc_mutex);
+
+	rdata->ids.port_name = get_unaligned_be64(&pl->fl_wwpn);
+	rdata->ids.node_name = get_unaligned_be64(&pl->fl_wwnn);
 
 	/*
-	 * If the session was just created, possibly due to the incoming PLOGI,
+	 * If the rport was just created, possibly due to the incoming PLOGI,
 	 * set the state appropriately and accept the PLOGI.
 	 *
 	 * If we had also sent a PLOGI, and if the received PLOGI is from a
@@ -996,86 +1236,76 @@
 	 */
 	switch (rdata->rp_state) {
 	case RPORT_ST_INIT:
-		FC_RPORT_DBG(rport, "Received PLOGI, wwpn %llx state INIT "
-			     "- reject\n", (unsigned long long)wwpn);
-		reject = ELS_RJT_UNSUP;
+		FC_RPORT_DBG(rdata, "Received PLOGI in INIT state\n");
 		break;
 	case RPORT_ST_PLOGI:
-		FC_RPORT_DBG(rport, "Received PLOGI in PLOGI state %d\n",
-			     rdata->rp_state);
-		if (wwpn < lport->wwpn)
-			reject = ELS_RJT_INPROG;
+		FC_RPORT_DBG(rdata, "Received PLOGI in PLOGI state\n");
+		if (rdata->ids.port_name < lport->wwpn) {
+			mutex_unlock(&rdata->rp_mutex);
+			rjt_data.reason = ELS_RJT_INPROG;
+			rjt_data.explan = ELS_EXPL_NONE;
+			goto reject;
+		}
 		break;
 	case RPORT_ST_PRLI:
 	case RPORT_ST_READY:
-		FC_RPORT_DBG(rport, "Received PLOGI in logged-in state %d "
+	case RPORT_ST_ADISC:
+		FC_RPORT_DBG(rdata, "Received PLOGI in logged-in state %d "
 			     "- ignored for now\n", rdata->rp_state);
 		/* XXX TBD - should reset */
 		break;
-	case RPORT_ST_NONE:
+	case RPORT_ST_DELETE:
 	default:
-		FC_RPORT_DBG(rport, "Received PLOGI in unexpected "
-			     "state %d\n", rdata->rp_state);
-		fc_frame_free(fp);
-		return;
-		break;
+		FC_RPORT_DBG(rdata, "Received PLOGI in unexpected state %d\n",
+			     rdata->rp_state);
+		fc_frame_free(rx_fp);
+		goto out;
 	}
 
-	if (reject) {
-		rjt_data.reason = reject;
-		rjt_data.explan = ELS_EXPL_NONE;
-		lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
-		fc_frame_free(fp);
-	} else {
-		fp = fc_frame_alloc(lport, sizeof(*pl));
-		if (fp == NULL) {
-			fp = rx_fp;
-			rjt_data.reason = ELS_RJT_UNAB;
-			rjt_data.explan = ELS_EXPL_NONE;
-			lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
-			fc_frame_free(fp);
-		} else {
-			sp = lport->tt.seq_start_next(sp);
-			WARN_ON(!sp);
-			fc_rport_set_name(rport, wwpn, wwnn);
+	/*
+	 * Get session payload size from incoming PLOGI.
+	 */
+	rdata->maxframe_size = fc_plogi_get_maxframe(pl, lport->mfs);
+	fc_frame_free(rx_fp);
 
-			/*
-			 * Get session payload size from incoming PLOGI.
-			 */
-			rport->maxframe_size =
-				fc_plogi_get_maxframe(pl, lport->mfs);
-			fc_frame_free(rx_fp);
-			fc_plogi_fill(lport, fp, ELS_LS_ACC);
+	/*
+	 * Send LS_ACC.	 If this fails, the originator should retry.
+	 */
+	sp = lport->tt.seq_start_next(sp);
+	if (!sp)
+		goto out;
+	fp = fc_frame_alloc(lport, sizeof(*pl));
+	if (!fp)
+		goto out;
 
-			/*
-			 * Send LS_ACC.	 If this fails,
-			 * the originator should retry.
-			 */
-			f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ;
-			f_ctl |= FC_FC_END_SEQ | FC_FC_SEQ_INIT;
-			ep = fc_seq_exch(sp);
-			fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
-				       FC_TYPE_ELS, f_ctl, 0);
-			lport->tt.seq_send(lport, sp, fp);
-			if (rdata->rp_state == RPORT_ST_PLOGI)
-				fc_rport_enter_prli(rport);
-		}
-	}
+	fc_plogi_fill(lport, fp, ELS_LS_ACC);
+	f_ctl = FC_FC_EX_CTX | FC_FC_LAST_SEQ | FC_FC_END_SEQ | FC_FC_SEQ_INIT;
+	ep = fc_seq_exch(sp);
+	fc_fill_fc_hdr(fp, FC_RCTL_ELS_REP, ep->did, ep->sid,
+		       FC_TYPE_ELS, f_ctl, 0);
+	lport->tt.seq_send(lport, sp, fp);
+	fc_rport_enter_prli(rdata);
+out:
+	mutex_unlock(&rdata->rp_mutex);
+	return;
+
+reject:
+	lport->tt.seq_els_rsp_send(sp, ELS_LS_RJT, &rjt_data);
+	fc_frame_free(fp);
 }
 
 /**
  * fc_rport_recv_prli_req() - Handle incoming Process Login (PRLI) request
- * @rport: Fibre Channel remote port that initiated PRLI
+ * @rdata: private remote port data
  * @sp: current sequence in the PRLI exchange
  * @fp: PRLI request frame
  *
  * Locking Note: The rport lock is exected to be held before calling
  * this function.
  */
-static void fc_rport_recv_prli_req(struct fc_rport *rport,
+static void fc_rport_recv_prli_req(struct fc_rport_priv *rdata,
 				   struct fc_seq *sp, struct fc_frame *rx_fp)
 {
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
 	struct fc_lport *lport = rdata->local_port;
 	struct fc_exch *ep;
 	struct fc_frame *fp;
@@ -1099,12 +1329,14 @@
 
 	fh = fc_frame_header_get(rx_fp);
 
-	FC_RPORT_DBG(rport, "Received PRLI request while in state %s\n",
-		     fc_rport_state(rport));
+	FC_RPORT_DBG(rdata, "Received PRLI request while in state %s\n",
+		     fc_rport_state(rdata));
 
 	switch (rdata->rp_state) {
 	case RPORT_ST_PRLI:
+	case RPORT_ST_RTV:
 	case RPORT_ST_READY:
+	case RPORT_ST_ADISC:
 		reason = ELS_RJT_NONE;
 		break;
 	default:
@@ -1149,6 +1381,9 @@
 		pp->prli.prli_len = htons(len);
 		len -= sizeof(struct fc_els_prli);
 
+		/* reinitialize remote port roles */
+		rdata->ids.roles = FC_RPORT_ROLE_UNKNOWN;
+
 		/*
 		 * Go through all the service parameter pages and build
 		 * response.  If plen indicates longer SPP than standard,
@@ -1169,12 +1404,12 @@
 				fcp_parm = ntohl(rspp->spp_params);
 				if (fcp_parm * FCP_SPPF_RETRY)
 					rdata->flags |= FC_RP_FLAGS_RETRY;
-				rport->supported_classes = FC_COS_CLASS3;
+				rdata->supported_classes = FC_COS_CLASS3;
 				if (fcp_parm & FCP_SPPF_INIT_FCN)
 					roles |= FC_RPORT_ROLE_FCP_INITIATOR;
 				if (fcp_parm & FCP_SPPF_TARG_FCN)
 					roles |= FC_RPORT_ROLE_FCP_TARGET;
-				rport->roles = roles;
+				rdata->ids.roles = roles;
 
 				spp->spp_params =
 					htonl(lport->service_params);
@@ -1204,9 +1439,10 @@
 		 */
 		switch (rdata->rp_state) {
 		case RPORT_ST_PRLI:
-			fc_rport_enter_ready(rport);
+			fc_rport_enter_ready(rdata);
 			break;
 		case RPORT_ST_READY:
+		case RPORT_ST_ADISC:
 			break;
 		default:
 			break;
@@ -1217,17 +1453,17 @@
 
 /**
  * fc_rport_recv_prlo_req() - Handle incoming Process Logout (PRLO) request
- * @rport: Fibre Channel remote port that initiated PRLO
+ * @rdata: private remote port data
  * @sp: current sequence in the PRLO exchange
  * @fp: PRLO request frame
  *
  * Locking Note: The rport lock is exected to be held before calling
  * this function.
  */
-static void fc_rport_recv_prlo_req(struct fc_rport *rport, struct fc_seq *sp,
+static void fc_rport_recv_prlo_req(struct fc_rport_priv *rdata,
+				   struct fc_seq *sp,
 				   struct fc_frame *fp)
 {
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
 	struct fc_lport *lport = rdata->local_port;
 
 	struct fc_frame_header *fh;
@@ -1235,13 +1471,8 @@
 
 	fh = fc_frame_header_get(fp);
 
-	FC_RPORT_DBG(rport, "Received PRLO request while in state %s\n",
-		     fc_rport_state(rport));
-
-	if (rdata->rp_state == RPORT_ST_NONE) {
-		fc_frame_free(fp);
-		return;
-	}
+	FC_RPORT_DBG(rdata, "Received PRLO request while in state %s\n",
+		     fc_rport_state(rdata));
 
 	rjt_data.fp = NULL;
 	rjt_data.reason = ELS_RJT_UNAB;
@@ -1252,35 +1483,46 @@
 
 /**
  * fc_rport_recv_logo_req() - Handle incoming Logout (LOGO) request
- * @rport: Fibre Channel remote port that initiated LOGO
+ * @lport: local port.
  * @sp: current sequence in the LOGO exchange
  * @fp: LOGO request frame
  *
  * Locking Note: The rport lock is exected to be held before calling
  * this function.
  */
-static void fc_rport_recv_logo_req(struct fc_rport *rport, struct fc_seq *sp,
+static void fc_rport_recv_logo_req(struct fc_lport *lport,
+				   struct fc_seq *sp,
 				   struct fc_frame *fp)
 {
 	struct fc_frame_header *fh;
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
-	struct fc_lport *lport = rdata->local_port;
-
-	fh = fc_frame_header_get(fp);
-
-	FC_RPORT_DBG(rport, "Received LOGO request while in state %s\n",
-		     fc_rport_state(rport));
-
-	if (rdata->rp_state == RPORT_ST_NONE) {
-		fc_frame_free(fp);
-		return;
-	}
-
-	rdata->event = RPORT_EV_LOGO;
-	fc_rport_state_enter(rport, RPORT_ST_NONE);
-	queue_work(rport_event_queue, &rdata->event_work);
+	struct fc_rport_priv *rdata;
+	u32 sid;
 
 	lport->tt.seq_els_rsp_send(sp, ELS_LS_ACC, NULL);
+
+	fh = fc_frame_header_get(fp);
+	sid = ntoh24(fh->fh_s_id);
+
+	mutex_lock(&lport->disc.disc_mutex);
+	rdata = lport->tt.rport_lookup(lport, sid);
+	if (rdata) {
+		mutex_lock(&rdata->rp_mutex);
+		FC_RPORT_DBG(rdata, "Received LOGO request while in state %s\n",
+			     fc_rport_state(rdata));
+
+		/*
+		 * If the remote port was created due to discovery,
+		 * log back in.  It may have seen a stale RSCN about us.
+		 */
+		if (rdata->rp_state != RPORT_ST_DELETE && rdata->disc_id)
+			fc_rport_enter_plogi(rdata);
+		else
+			fc_rport_enter_delete(rdata, RPORT_EV_LOGO);
+		mutex_unlock(&rdata->rp_mutex);
+	} else
+		FC_RPORT_ID_DBG(lport, sid,
+				"Received LOGO from non-logged-in port\n");
+	mutex_unlock(&lport->disc.disc_mutex);
 	fc_frame_free(fp);
 }
 
@@ -1291,8 +1533,11 @@
 
 int fc_rport_init(struct fc_lport *lport)
 {
+	if (!lport->tt.rport_lookup)
+		lport->tt.rport_lookup = fc_rport_lookup;
+
 	if (!lport->tt.rport_create)
-		lport->tt.rport_create = fc_rport_rogue_create;
+		lport->tt.rport_create = fc_rport_create;
 
 	if (!lport->tt.rport_login)
 		lport->tt.rport_login = fc_rport_login;
@@ -1306,6 +1551,9 @@
 	if (!lport->tt.rport_flush_queue)
 		lport->tt.rport_flush_queue = fc_rport_flush_queue;
 
+	if (!lport->tt.rport_destroy)
+		lport->tt.rport_destroy = fc_rport_destroy;
+
 	return 0;
 }
 EXPORT_SYMBOL(fc_rport_init);
@@ -1327,8 +1575,8 @@
 
 void fc_rport_terminate_io(struct fc_rport *rport)
 {
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;
-	struct fc_lport *lport = rdata->local_port;
+	struct fc_rport_libfc_priv *rp = rport->dd_data;
+	struct fc_lport *lport = rp->local_port;
 
 	lport->tt.exch_mgr_reset(lport, 0, rport->port_id);
 	lport->tt.exch_mgr_reset(lport, rport->port_id, 0);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index a751f62..8dc73c4 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -109,12 +109,9 @@
 }
 EXPORT_SYMBOL_GPL(iscsi_conn_queue_work);
 
-void
-iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
+static void __iscsi_update_cmdsn(struct iscsi_session *session,
+				 uint32_t exp_cmdsn, uint32_t max_cmdsn)
 {
-	uint32_t max_cmdsn = be32_to_cpu(hdr->max_cmdsn);
-	uint32_t exp_cmdsn = be32_to_cpu(hdr->exp_cmdsn);
-
 	/*
 	 * standard specifies this check for when to update expected and
 	 * max sequence numbers
@@ -138,6 +135,12 @@
 			iscsi_conn_queue_work(session->leadconn);
 	}
 }
+
+void iscsi_update_cmdsn(struct iscsi_session *session, struct iscsi_nopin *hdr)
+{
+	__iscsi_update_cmdsn(session, be32_to_cpu(hdr->exp_cmdsn),
+			     be32_to_cpu(hdr->max_cmdsn));
+}
 EXPORT_SYMBOL_GPL(iscsi_update_cmdsn);
 
 /**
@@ -301,8 +304,6 @@
 	hdr->flags = ISCSI_ATTR_SIMPLE;
 	int_to_scsilun(sc->device->lun, (struct scsi_lun *)hdr->lun);
 	memcpy(task->lun, hdr->lun, sizeof(task->lun));
-	hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
-	session->cmdsn++;
 	hdr->exp_statsn = cpu_to_be32(conn->exp_statsn);
 	cmd_len = sc->cmd_len;
 	if (cmd_len < ISCSI_CDB_SIZE)
@@ -388,6 +389,8 @@
 		return -EIO;
 
 	task->state = ISCSI_TASK_RUNNING;
+	hdr->cmdsn = task->cmdsn = cpu_to_be32(session->cmdsn);
+	session->cmdsn++;
 
 	conn->scsicmd_pdus_cnt++;
 	ISCSI_DBG_SESSION(session, "iscsi prep [%s cid %d sc %p cdb 0x%x "
@@ -499,6 +502,31 @@
 	__iscsi_put_task(task);
 }
 
+/**
+ * iscsi_complete_scsi_task - finish scsi task normally
+ * @task: iscsi task for scsi cmd
+ * @exp_cmdsn: expected cmd sn in cpu format
+ * @max_cmdsn: max cmd sn in cpu format
+ *
+ * This is used when drivers do not need or cannot perform
+ * lower level pdu processing.
+ *
+ * Called with session lock
+ */
+void iscsi_complete_scsi_task(struct iscsi_task *task,
+			      uint32_t exp_cmdsn, uint32_t max_cmdsn)
+{
+	struct iscsi_conn *conn = task->conn;
+
+	ISCSI_DBG_SESSION(conn->session, "[itt 0x%x]\n", task->itt);
+
+	conn->last_recv = jiffies;
+	__iscsi_update_cmdsn(conn->session, exp_cmdsn, max_cmdsn);
+	iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
+}
+EXPORT_SYMBOL_GPL(iscsi_complete_scsi_task);
+
+
 /*
  * session lock must be held and if not called for a task that is
  * still pending or from the xmit thread, then xmit thread must
@@ -857,27 +885,102 @@
 	}
 }
 
+static int iscsi_nop_out_rsp(struct iscsi_task *task,
+			     struct iscsi_nopin *nop, char *data, int datalen)
+{
+	struct iscsi_conn *conn = task->conn;
+	int rc = 0;
+
+	if (conn->ping_task != task) {
+		/*
+		 * If this is not in response to one of our
+		 * nops then it must be from userspace.
+		 */
+		if (iscsi_recv_pdu(conn->cls_conn, (struct iscsi_hdr *)nop,
+				   data, datalen))
+			rc = ISCSI_ERR_CONN_FAILED;
+	} else
+		mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
+	iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
+	return rc;
+}
+
 static int iscsi_handle_reject(struct iscsi_conn *conn, struct iscsi_hdr *hdr,
 			       char *data, int datalen)
 {
 	struct iscsi_reject *reject = (struct iscsi_reject *)hdr;
 	struct iscsi_hdr rejected_pdu;
+	int opcode, rc = 0;
 
 	conn->exp_statsn = be32_to_cpu(reject->statsn) + 1;
 
-	if (reject->reason == ISCSI_REASON_DATA_DIGEST_ERROR) {
-		if (ntoh24(reject->dlength) > datalen)
-			return ISCSI_ERR_PROTO;
-
-		if (ntoh24(reject->dlength) >= sizeof(struct iscsi_hdr)) {
-			memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
-			iscsi_conn_printk(KERN_ERR, conn,
-					  "pdu (op 0x%x) rejected "
-					  "due to DataDigest error.\n",
-					  rejected_pdu.opcode);
-		}
+	if (ntoh24(reject->dlength) > datalen ||
+	    ntoh24(reject->dlength) < sizeof(struct iscsi_hdr)) {
+		iscsi_conn_printk(KERN_ERR, conn, "Cannot handle rejected "
+				  "pdu. Invalid data length (pdu dlength "
+				  "%u, datalen %d\n", ntoh24(reject->dlength),
+				  datalen);
+		return ISCSI_ERR_PROTO;
 	}
-	return 0;
+	memcpy(&rejected_pdu, data, sizeof(struct iscsi_hdr));
+	opcode = rejected_pdu.opcode & ISCSI_OPCODE_MASK;
+
+	switch (reject->reason) {
+	case ISCSI_REASON_DATA_DIGEST_ERROR:
+		iscsi_conn_printk(KERN_ERR, conn,
+				  "pdu (op 0x%x itt 0x%x) rejected "
+				  "due to DataDigest error.\n",
+				  rejected_pdu.itt, opcode);
+		break;
+	case ISCSI_REASON_IMM_CMD_REJECT:
+		iscsi_conn_printk(KERN_ERR, conn,
+				  "pdu (op 0x%x itt 0x%x) rejected. Too many "
+				  "immediate commands.\n",
+				  rejected_pdu.itt, opcode);
+		/*
+		 * We only send one TMF at a time so if the target could not
+		 * handle it, then it should get fixed (RFC mandates that
+		 * a target can handle one immediate TMF per conn).
+		 *
+		 * For nops-outs, we could have sent more than one if
+		 * the target is sending us lots of nop-ins
+		 */
+		if (opcode != ISCSI_OP_NOOP_OUT)
+			return 0;
+
+		 if (rejected_pdu.itt == cpu_to_be32(ISCSI_RESERVED_TAG))
+			/*
+			 * nop-out in response to target's nop-out rejected.
+			 * Just resend.
+			 */
+			iscsi_send_nopout(conn,
+					  (struct iscsi_nopin*)&rejected_pdu);
+		else {
+			struct iscsi_task *task;
+			/*
+			 * Our nop as ping got dropped. We know the target
+			 * and transport are ok so just clean up
+			 */
+			task = iscsi_itt_to_task(conn, rejected_pdu.itt);
+			if (!task) {
+				iscsi_conn_printk(KERN_ERR, conn,
+						 "Invalid pdu reject. Could "
+						 "not lookup rejected task.\n");
+				rc = ISCSI_ERR_BAD_ITT;
+			} else
+				rc = iscsi_nop_out_rsp(task,
+					(struct iscsi_nopin*)&rejected_pdu,
+					NULL, 0);
+		}
+		break;
+	default:
+		iscsi_conn_printk(KERN_ERR, conn,
+				  "pdu (op 0x%x itt 0x%x) rejected. Reason "
+				  "code 0x%x\n", rejected_pdu.itt,
+				  rejected_pdu.opcode, reject->reason);
+		break;
+	}
+	return rc;
 }
 
 /**
@@ -1038,15 +1141,8 @@
 		}
 		conn->exp_statsn = be32_to_cpu(hdr->statsn) + 1;
 
-		if (conn->ping_task != task)
-			/*
-			 * If this is not in response to one of our
-			 * nops then it must be from userspace.
-			 */
-			goto recv_pdu;
-
-		mod_timer(&conn->transport_timer, jiffies + conn->recv_timeout);
-		iscsi_complete_task(task, ISCSI_TASK_COMPLETED);
+		rc = iscsi_nop_out_rsp(task, (struct iscsi_nopin*)hdr,
+				       data, datalen);
 		break;
 	default:
 		rc = ISCSI_ERR_BAD_OPCODE;
@@ -1212,6 +1308,9 @@
 	struct iscsi_task *task = conn->task;
 	int rc;
 
+	if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx))
+		return -ENODATA;
+
 	__iscsi_get_task(task);
 	spin_unlock_bh(&conn->session->lock);
 	rc = conn->session->tt->xmit_task(task);
@@ -1261,7 +1360,7 @@
 	int rc = 0;
 
 	spin_lock_bh(&conn->session->lock);
-	if (unlikely(conn->suspend_tx)) {
+	if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
 		ISCSI_DBG_SESSION(conn->session, "Tx suspended!\n");
 		spin_unlock_bh(&conn->session->lock);
 		return -ENODATA;
@@ -1270,7 +1369,7 @@
 	if (conn->task) {
 		rc = iscsi_xmit_task(conn);
 	        if (rc)
-		        goto again;
+		        goto done;
 	}
 
 	/*
@@ -1290,7 +1389,7 @@
 		}
 		rc = iscsi_xmit_task(conn);
 		if (rc)
-			goto again;
+			goto done;
 	}
 
 	/* process pending command queue */
@@ -1311,14 +1410,14 @@
 				list_add_tail(&conn->task->running,
 					      &conn->cmdqueue);
 				conn->task = NULL;
-				goto again;
+				goto done;
 			} else
 				fail_scsi_task(conn->task, DID_ABORT);
 			continue;
 		}
 		rc = iscsi_xmit_task(conn);
 		if (rc)
-			goto again;
+			goto done;
 		/*
 		 * we could continuously get new task requests so
 		 * we need to check the mgmt queue for nops that need to
@@ -1344,16 +1443,14 @@
 		conn->task->state = ISCSI_TASK_RUNNING;
 		rc = iscsi_xmit_task(conn);
 		if (rc)
-			goto again;
+			goto done;
 		if (!list_empty(&conn->mgmtqueue))
 			goto check_mgmt;
 	}
 	spin_unlock_bh(&conn->session->lock);
 	return -ENODATA;
 
-again:
-	if (unlikely(conn->suspend_tx))
-		rc = -ENODATA;
+done:
 	spin_unlock_bh(&conn->session->lock);
 	return rc;
 }
@@ -1474,6 +1571,12 @@
 		goto fault;
 	}
 
+	if (test_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx)) {
+		reason = FAILURE_SESSION_IN_RECOVERY;
+		sc->result = DID_REQUEUE;
+		goto fault;
+	}
+
 	if (iscsi_check_cmdsn_window_closed(conn)) {
 		reason = FAILURE_WINDOW_CLOSED;
 		goto reject;
@@ -1497,6 +1600,7 @@
 			}
 		}
 		if (session->tt->xmit_task(task)) {
+			session->cmdsn--;
 			reason = FAILURE_SESSION_NOT_READY;
 			goto prepd_reject;
 		}
@@ -1712,6 +1816,33 @@
 	}
 }
 
+/**
+ * iscsi_suspend_queue - suspend iscsi_queuecommand
+ * @conn: iscsi conn to stop queueing IO on
+ *
+ * This grabs the session lock to make sure no one is in
+ * xmit_task/queuecommand, and then sets suspend to prevent
+ * new commands from being queued. This only needs to be called
+ * by offload drivers that need to sync a path like ep disconnect
+ * with the iscsi_queuecommand/xmit_task. To start IO again libiscsi
+ * will call iscsi_start_tx and iscsi_unblock_session when in FFP.
+ */
+void iscsi_suspend_queue(struct iscsi_conn *conn)
+{
+	spin_lock_bh(&conn->session->lock);
+	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_tx);
+	spin_unlock_bh(&conn->session->lock);
+}
+EXPORT_SYMBOL_GPL(iscsi_suspend_queue);
+
+/**
+ * iscsi_suspend_tx - suspend iscsi_data_xmit
+ * @conn: iscsi conn tp stop processing IO on.
+ *
+ * This function sets the suspend bit to prevent iscsi_data_xmit
+ * from sending new IO, and if work is queued on the xmit thread
+ * it will wait for it to be completed.
+ */
 void iscsi_suspend_tx(struct iscsi_conn *conn)
 {
 	struct Scsi_Host *shost = conn->session->host;
diff --git a/drivers/scsi/libsrp.c b/drivers/scsi/libsrp.c
index 2742ae8..9ad38e8 100644
--- a/drivers/scsi/libsrp.c
+++ b/drivers/scsi/libsrp.c
@@ -124,6 +124,7 @@
 		dma_free_coherent(dev, size, ring[i]->buf, ring[i]->dma);
 		kfree(ring[i]);
 	}
+	kfree(ring);
 }
 
 int srp_target_alloc(struct srp_target *target, struct device *dev,
diff --git a/drivers/scsi/lpfc/Makefile b/drivers/scsi/lpfc/Makefile
index 1c28670..ad05d6e 100644
--- a/drivers/scsi/lpfc/Makefile
+++ b/drivers/scsi/lpfc/Makefile
@@ -28,4 +28,4 @@
 
 lpfc-objs := lpfc_mem.o lpfc_sli.o lpfc_ct.o lpfc_els.o lpfc_hbadisc.o	\
 	lpfc_init.o lpfc_mbox.o lpfc_nportdisc.o lpfc_scsi.o lpfc_attr.o \
-	lpfc_vport.o lpfc_debugfs.o
+	lpfc_vport.o lpfc_debugfs.o lpfc_bsg.o
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 1877d98..aa10f79 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -312,6 +312,7 @@
 #define FC_BYPASSED_MODE        0x20000	 /* NPort is in bypassed mode */
 #define FC_VPORT_NEEDS_REG_VPI	0x80000  /* Needs to have its vpi registered */
 #define FC_RSCN_DEFERRED	0x100000 /* A deferred RSCN being processed */
+#define FC_VPORT_NEEDS_INIT_VPI 0x200000 /* Need to INIT_VPI before FDISC */
 
 	uint32_t ct_flags;
 #define FC_CT_RFF_ID		0x1	 /* RFF_ID accepted by switch */
@@ -440,6 +441,12 @@
 	MSIX,
 };
 
+struct unsol_rcv_ct_ctx {
+	uint32_t ctxt_id;
+	uint32_t SID;
+	uint32_t oxid;
+};
+
 struct lpfc_hba {
 	/* SCSI interface function jump table entries */
 	int (*lpfc_new_scsi_buf)
@@ -525,6 +532,8 @@
 #define FCP_XRI_ABORT_EVENT	0x20
 #define ELS_XRI_ABORT_EVENT	0x40
 #define ASYNC_EVENT		0x80
+#define LINK_DISABLED		0x100 /* Link disabled by user */
+#define FCF_DISC_INPROGRESS	0x200 /* FCF discovery in progress */
 	struct lpfc_dmabuf slim2p;
 
 	MAILBOX_t *mbox;
@@ -616,6 +625,8 @@
 	uint32_t hbq_count;	        /* Count of configured HBQs */
 	struct hbq_s hbqs[LPFC_MAX_HBQS]; /* local copy of hbq indicies  */
 
+	uint32_t fcp_qidx;		/* next work queue to post work to */
+
 	unsigned long pci_bar0_map;     /* Physical address for PCI BAR0 */
 	unsigned long pci_bar1_map;     /* Physical address for PCI BAR1 */
 	unsigned long pci_bar2_map;     /* Physical address for PCI BAR2 */
@@ -682,6 +693,7 @@
 	struct pci_pool *lpfc_mbuf_pool;
 	struct pci_pool *lpfc_hrb_pool;	/* header receive buffer pool */
 	struct pci_pool *lpfc_drb_pool; /* data receive buffer pool */
+	struct pci_pool *lpfc_hbq_pool;	/* SLI3 hbq buffer pool */
 	struct lpfc_dma_pool lpfc_mbuf_safety_pool;
 
 	mempool_t *mbox_mem_pool;
@@ -763,11 +775,18 @@
 /* Maximum number of events that can be outstanding at any time*/
 #define LPFC_MAX_EVT_COUNT 512
 	atomic_t fast_event_count;
+	uint32_t fcoe_eventtag;
+	uint32_t fcoe_eventtag_at_fcf_scan;
 	struct lpfc_fcf fcf;
 	uint8_t fc_map[3];
 	uint8_t valid_vlan;
 	uint16_t vlan_id;
 	struct list_head fcf_conn_rec_list;
+
+	struct mutex ct_event_mutex; /* synchronize access to ct_ev_waiters */
+	struct list_head ct_ev_waiters;
+	struct unsol_rcv_ct_ctx ct_ctx[64];
+	uint32_t ctx_idx;
 };
 
 static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index fc07be5..e1a30a1 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -394,7 +394,12 @@
 	case LPFC_INIT_MBX_CMDS:
 	case LPFC_LINK_DOWN:
 	case LPFC_HBA_ERROR:
-		len += snprintf(buf + len, PAGE_SIZE-len, "Link Down\n");
+		if (phba->hba_flag & LINK_DISABLED)
+			len += snprintf(buf + len, PAGE_SIZE-len,
+				"Link Down - User disabled\n");
+		else
+			len += snprintf(buf + len, PAGE_SIZE-len,
+				"Link Down\n");
 		break;
 	case LPFC_LINK_UP:
 	case LPFC_CLEAR_LA:
@@ -4127,6 +4132,9 @@
 	.vport_disable = lpfc_vport_disable,
 
 	.set_vport_symbolic_name = lpfc_set_vport_symbolic_name,
+
+	.bsg_request = lpfc_bsg_request,
+	.bsg_timeout = lpfc_bsg_timeout,
 };
 
 struct fc_function_template lpfc_vport_transport_functions = {
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
new file mode 100644
index 0000000..da6bf5a
--- /dev/null
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -0,0 +1,904 @@
+/*******************************************************************
+ * This file is part of the Emulex Linux Device Driver for         *
+ * Fibre Channel Host Bus Adapters.                                *
+ * Copyright (C) 2009 Emulex.  All rights reserved.                *
+ * EMULEX and SLI are trademarks of Emulex.                        *
+ * www.emulex.com                                                  *
+ *                                                                 *
+ * This program is free software; you can redistribute it and/or   *
+ * modify it under the terms of version 2 of the GNU General       *
+ * Public License as published by the Free Software Foundation.    *
+ * This program is distributed in the hope that it will be useful. *
+ * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND          *
+ * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY,  *
+ * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE      *
+ * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
+ * TO BE LEGALLY INVALID.  See the GNU General Public License for  *
+ * more details, a copy of which can be found in the file COPYING  *
+ * included with this package.                                     *
+ *******************************************************************/
+
+#include <linux/interrupt.h>
+#include <linux/mempool.h>
+#include <linux/pci.h>
+
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_bsg_fc.h>
+
+#include "lpfc_hw4.h"
+#include "lpfc_hw.h"
+#include "lpfc_sli.h"
+#include "lpfc_sli4.h"
+#include "lpfc_nl.h"
+#include "lpfc_disc.h"
+#include "lpfc_scsi.h"
+#include "lpfc.h"
+#include "lpfc_logmsg.h"
+#include "lpfc_crtn.h"
+#include "lpfc_vport.h"
+#include "lpfc_version.h"
+
+/**
+ * lpfc_bsg_rport_ct - send a CT command from a bsg request
+ * @job: fc_bsg_job to handle
+ */
+static int
+lpfc_bsg_rport_ct(struct fc_bsg_job *job)
+{
+	struct Scsi_Host *shost = job->shost;
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_rport_data *rdata = job->rport->dd_data;
+	struct lpfc_nodelist *ndlp = rdata->pnode;
+	struct ulp_bde64 *bpl = NULL;
+	uint32_t timeout;
+	struct lpfc_iocbq *cmdiocbq = NULL;
+	struct lpfc_iocbq *rspiocbq = NULL;
+	IOCB_t *cmd;
+	IOCB_t *rsp;
+	struct lpfc_dmabuf *bmp = NULL;
+	int request_nseg;
+	int reply_nseg;
+	struct scatterlist *sgel = NULL;
+	int numbde;
+	dma_addr_t busaddr;
+	int rc = 0;
+
+	/* in case no data is transferred */
+	job->reply->reply_payload_rcv_len = 0;
+
+	if (!lpfc_nlp_get(ndlp)) {
+		job->reply->result = -ENODEV;
+		return 0;
+	}
+
+	if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
+		rc = -ENODEV;
+		goto free_ndlp_exit;
+	}
+
+	spin_lock_irq(shost->host_lock);
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	if (!cmdiocbq) {
+		rc = -ENOMEM;
+		spin_unlock_irq(shost->host_lock);
+		goto free_ndlp_exit;
+	}
+	cmd = &cmdiocbq->iocb;
+
+	rspiocbq = lpfc_sli_get_iocbq(phba);
+	if (!rspiocbq) {
+		rc = -ENOMEM;
+		goto free_cmdiocbq;
+	}
+	spin_unlock_irq(shost->host_lock);
+
+	rsp = &rspiocbq->iocb;
+
+	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp) {
+		rc = -ENOMEM;
+		spin_lock_irq(shost->host_lock);
+		goto free_rspiocbq;
+	}
+
+	spin_lock_irq(shost->host_lock);
+	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+	if (!bmp->virt) {
+		rc = -ENOMEM;
+		goto free_bmp;
+	}
+	spin_unlock_irq(shost->host_lock);
+
+	INIT_LIST_HEAD(&bmp->list);
+	bpl = (struct ulp_bde64 *) bmp->virt;
+
+	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
+				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
+		busaddr = sg_dma_address(sgel);
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		bpl->tus.f.bdeSize = sg_dma_len(sgel);
+		bpl->tus.w = cpu_to_le32(bpl->tus.w);
+		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+		bpl++;
+	}
+
+	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
+				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
+		busaddr = sg_dma_address(sgel);
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		bpl->tus.f.bdeSize = sg_dma_len(sgel);
+		bpl->tus.w = cpu_to_le32(bpl->tus.w);
+		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+		bpl++;
+	}
+
+	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
+	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	cmd->un.genreq64.bdl.bdeSize =
+		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
+	cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
+	cmd->un.genreq64.w5.hcsw.Rctl = FC_UNSOL_CTL;
+	cmd->un.genreq64.w5.hcsw.Type = FC_COMMON_TRANSPORT_ULP;
+	cmd->ulpBdeCount = 1;
+	cmd->ulpLe = 1;
+	cmd->ulpClass = CLASS3;
+	cmd->ulpContext = ndlp->nlp_rpi;
+	cmd->ulpOwner = OWN_CHIP;
+	cmdiocbq->vport = phba->pport;
+	cmdiocbq->context1 = NULL;
+	cmdiocbq->context2 = NULL;
+	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+
+	timeout = phba->fc_ratov * 2;
+	job->dd_data = cmdiocbq;
+
+	rc = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq, rspiocbq,
+					timeout + LPFC_DRVR_TIMEOUT);
+
+	if (rc != IOCB_TIMEDOUT) {
+		pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+		pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+			     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	}
+
+	if (rc == IOCB_TIMEDOUT) {
+		lpfc_sli_release_iocbq(phba, rspiocbq);
+		rc = -EACCES;
+		goto free_ndlp_exit;
+	}
+
+	if (rc != IOCB_SUCCESS) {
+		rc = -EACCES;
+		goto free_outdmp;
+	}
+
+	if (rsp->ulpStatus) {
+		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+			switch (rsp->un.ulpWord[4] & 0xff) {
+			case IOERR_SEQUENCE_TIMEOUT:
+				rc = -ETIMEDOUT;
+				break;
+			case IOERR_INVALID_RPI:
+				rc = -EFAULT;
+				break;
+			default:
+				rc = -EACCES;
+				break;
+			}
+			goto free_outdmp;
+		}
+	} else
+		job->reply->reply_payload_rcv_len =
+			rsp->un.genreq64.bdl.bdeSize;
+
+free_outdmp:
+	spin_lock_irq(shost->host_lock);
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+free_bmp:
+	kfree(bmp);
+free_rspiocbq:
+	lpfc_sli_release_iocbq(phba, rspiocbq);
+free_cmdiocbq:
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+	spin_unlock_irq(shost->host_lock);
+free_ndlp_exit:
+	lpfc_nlp_put(ndlp);
+
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	/* complete the job back to userspace */
+	job->job_done(job);
+
+	return 0;
+}
+
+/**
+ * lpfc_bsg_rport_els - send an ELS command from a bsg request
+ * @job: fc_bsg_job to handle
+ */
+static int
+lpfc_bsg_rport_els(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_rport_data *rdata = job->rport->dd_data;
+	struct lpfc_nodelist *ndlp = rdata->pnode;
+
+	uint32_t elscmd;
+	uint32_t cmdsize;
+	uint32_t rspsize;
+	struct lpfc_iocbq *rspiocbq;
+	struct lpfc_iocbq *cmdiocbq;
+	IOCB_t *rsp;
+	uint16_t rpi = 0;
+	struct lpfc_dmabuf *pcmd;
+	struct lpfc_dmabuf *prsp;
+	struct lpfc_dmabuf *pbuflist = NULL;
+	struct ulp_bde64 *bpl;
+	int iocb_status;
+	int request_nseg;
+	int reply_nseg;
+	struct scatterlist *sgel = NULL;
+	int numbde;
+	dma_addr_t busaddr;
+	int rc = 0;
+
+	/* in case no data is transferred */
+	job->reply->reply_payload_rcv_len = 0;
+
+	if (!lpfc_nlp_get(ndlp)) {
+		rc = -ENODEV;
+		goto out;
+	}
+
+	elscmd = job->request->rqst_data.r_els.els_code;
+	cmdsize = job->request_payload.payload_len;
+	rspsize = job->reply_payload.payload_len;
+	rspiocbq = lpfc_sli_get_iocbq(phba);
+	if (!rspiocbq) {
+		lpfc_nlp_put(ndlp);
+		rc = -ENOMEM;
+		goto out;
+	}
+
+	rsp = &rspiocbq->iocb;
+	rpi = ndlp->nlp_rpi;
+
+	cmdiocbq = lpfc_prep_els_iocb(phba->pport, 1, cmdsize, 0, ndlp,
+				      ndlp->nlp_DID, elscmd);
+
+	if (!cmdiocbq) {
+		lpfc_sli_release_iocbq(phba, rspiocbq);
+		return -EIO;
+	}
+
+	job->dd_data = cmdiocbq;
+	pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
+	prsp = (struct lpfc_dmabuf *) pcmd->list.next;
+
+	lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
+	kfree(pcmd);
+	lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
+	kfree(prsp);
+	cmdiocbq->context2 = NULL;
+
+	pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
+	bpl = (struct ulp_bde64 *) pbuflist->virt;
+
+	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
+				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
+
+	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
+		busaddr = sg_dma_address(sgel);
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		bpl->tus.f.bdeSize = sg_dma_len(sgel);
+		bpl->tus.w = cpu_to_le32(bpl->tus.w);
+		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+		bpl++;
+	}
+
+	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
+				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
+		busaddr = sg_dma_address(sgel);
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		bpl->tus.f.bdeSize = sg_dma_len(sgel);
+		bpl->tus.w = cpu_to_le32(bpl->tus.w);
+		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+		bpl++;
+	}
+
+	cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
+		(request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
+	cmdiocbq->iocb.ulpContext = rpi;
+	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+	cmdiocbq->context1 = NULL;
+	cmdiocbq->context2 = NULL;
+
+	iocb_status = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
+					rspiocbq, (phba->fc_ratov * 2)
+					       + LPFC_DRVR_TIMEOUT);
+
+	/* release the new ndlp once the iocb completes */
+	lpfc_nlp_put(ndlp);
+	if (iocb_status != IOCB_TIMEDOUT) {
+		pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+			     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+		pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+			     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	}
+
+	if (iocb_status == IOCB_SUCCESS) {
+		if (rsp->ulpStatus == IOSTAT_SUCCESS) {
+			job->reply->reply_payload_rcv_len =
+				rsp->un.elsreq64.bdl.bdeSize;
+			rc = 0;
+		} else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
+			struct fc_bsg_ctels_reply *els_reply;
+			/* LS_RJT data returned in word 4 */
+			uint8_t *rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
+
+			els_reply = &job->reply->reply_data.ctels_reply;
+			job->reply->result = 0;
+			els_reply->status = FC_CTELS_STATUS_REJECT;
+			els_reply->rjt_data.action = rjt_data[0];
+			els_reply->rjt_data.reason_code = rjt_data[1];
+			els_reply->rjt_data.reason_explanation = rjt_data[2];
+			els_reply->rjt_data.vendor_unique = rjt_data[3];
+		} else
+			rc = -EIO;
+	} else
+		rc = -EIO;
+
+	if (iocb_status != IOCB_TIMEDOUT)
+		lpfc_els_free_iocb(phba, cmdiocbq);
+
+	lpfc_sli_release_iocbq(phba, rspiocbq);
+
+out:
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	/* complete the job back to userspace */
+	job->job_done(job);
+
+	return 0;
+}
+
+struct lpfc_ct_event {
+	struct list_head node;
+	int ref;
+	wait_queue_head_t wq;
+
+	/* Event type and waiter identifiers */
+	uint32_t type_mask;
+	uint32_t req_id;
+	uint32_t reg_id;
+
+	/* next two flags are here for the auto-delete logic */
+	unsigned long wait_time_stamp;
+	int waiting;
+
+	/* seen and not seen events */
+	struct list_head events_to_get;
+	struct list_head events_to_see;
+};
+
+struct event_data {
+	struct list_head node;
+	uint32_t type;
+	uint32_t immed_dat;
+	void *data;
+	uint32_t len;
+};
+
+static struct lpfc_ct_event *
+lpfc_ct_event_new(int ev_reg_id, uint32_t ev_req_id)
+{
+	struct lpfc_ct_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
+	if (!evt)
+		return NULL;
+
+	INIT_LIST_HEAD(&evt->events_to_get);
+	INIT_LIST_HEAD(&evt->events_to_see);
+	evt->req_id = ev_req_id;
+	evt->reg_id = ev_reg_id;
+	evt->wait_time_stamp = jiffies;
+	init_waitqueue_head(&evt->wq);
+
+	return evt;
+}
+
+static void
+lpfc_ct_event_free(struct lpfc_ct_event *evt)
+{
+	struct event_data *ed;
+
+	list_del(&evt->node);
+
+	while (!list_empty(&evt->events_to_get)) {
+		ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
+		list_del(&ed->node);
+		kfree(ed->data);
+		kfree(ed);
+	}
+
+	while (!list_empty(&evt->events_to_see)) {
+		ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
+		list_del(&ed->node);
+		kfree(ed->data);
+		kfree(ed);
+	}
+
+	kfree(evt);
+}
+
+static inline void
+lpfc_ct_event_ref(struct lpfc_ct_event *evt)
+{
+	evt->ref++;
+}
+
+static inline void
+lpfc_ct_event_unref(struct lpfc_ct_event *evt)
+{
+	if (--evt->ref < 0)
+		lpfc_ct_event_free(evt);
+}
+
+#define SLI_CT_ELX_LOOPBACK 0x10
+
+enum ELX_LOOPBACK_CMD {
+	ELX_LOOPBACK_XRI_SETUP,
+	ELX_LOOPBACK_DATA,
+};
+
+/**
+ * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
+ * @phba:
+ * @pring:
+ * @piocbq:
+ *
+ * This function is called when an unsolicited CT command is received.  It
+ * forwards the event to any processes registerd to receive CT events.
+ */
+void
+lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			struct lpfc_iocbq *piocbq)
+{
+	uint32_t evt_req_id = 0;
+	uint32_t cmd;
+	uint32_t len;
+	struct lpfc_dmabuf *dmabuf = NULL;
+	struct lpfc_ct_event *evt;
+	struct event_data *evt_dat = NULL;
+	struct lpfc_iocbq *iocbq;
+	size_t offset = 0;
+	struct list_head head;
+	struct ulp_bde64 *bde;
+	dma_addr_t dma_addr;
+	int i;
+	struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
+	struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
+	struct lpfc_hbq_entry *hbqe;
+	struct lpfc_sli_ct_request *ct_req;
+
+	INIT_LIST_HEAD(&head);
+	list_add_tail(&head, &piocbq->list);
+
+	if (piocbq->iocb.ulpBdeCount == 0 ||
+	    piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
+		goto error_ct_unsol_exit;
+
+	if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
+		dmabuf = bdeBuf1;
+	else {
+		dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
+				    piocbq->iocb.un.cont64[0].addrLow);
+		dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
+	}
+
+	ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
+	evt_req_id = ct_req->FsType;
+	cmd = ct_req->CommandResponse.bits.CmdRsp;
+	len = ct_req->CommandResponse.bits.Size;
+	if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
+		lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
+
+	mutex_lock(&phba->ct_event_mutex);
+	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+		if (evt->req_id != evt_req_id)
+			continue;
+
+		lpfc_ct_event_ref(evt);
+
+		evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
+		if (!evt_dat) {
+			lpfc_ct_event_unref(evt);
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+					"2614 Memory allocation failed for "
+					"CT event\n");
+			break;
+		}
+
+		mutex_unlock(&phba->ct_event_mutex);
+
+		if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+			/* take accumulated byte count from the last iocbq */
+			iocbq = list_entry(head.prev, typeof(*iocbq), list);
+			evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
+		} else {
+			list_for_each_entry(iocbq, &head, list) {
+				for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
+					evt_dat->len +=
+					iocbq->iocb.un.cont64[i].tus.f.bdeSize;
+			}
+		}
+
+		evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
+		if (!evt_dat->data) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+					"2615 Memory allocation failed for "
+					"CT event data, size %d\n",
+					evt_dat->len);
+			kfree(evt_dat);
+			mutex_lock(&phba->ct_event_mutex);
+			lpfc_ct_event_unref(evt);
+			mutex_unlock(&phba->ct_event_mutex);
+			goto error_ct_unsol_exit;
+		}
+
+		list_for_each_entry(iocbq, &head, list) {
+			if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
+				bdeBuf1 = iocbq->context2;
+				bdeBuf2 = iocbq->context3;
+			}
+			for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
+				int size = 0;
+				if (phba->sli3_options &
+				    LPFC_SLI3_HBQ_ENABLED) {
+					if (i == 0) {
+						hbqe = (struct lpfc_hbq_entry *)
+						  &iocbq->iocb.un.ulpWord[0];
+						size = hbqe->bde.tus.f.bdeSize;
+						dmabuf = bdeBuf1;
+					} else if (i == 1) {
+						hbqe = (struct lpfc_hbq_entry *)
+							&iocbq->iocb.unsli3.
+							sli3Words[4];
+						size = hbqe->bde.tus.f.bdeSize;
+						dmabuf = bdeBuf2;
+					}
+					if ((offset + size) > evt_dat->len)
+						size = evt_dat->len - offset;
+				} else {
+					size = iocbq->iocb.un.cont64[i].
+						tus.f.bdeSize;
+					bde = &iocbq->iocb.un.cont64[i];
+					dma_addr = getPaddr(bde->addrHigh,
+							    bde->addrLow);
+					dmabuf = lpfc_sli_ringpostbuf_get(phba,
+							pring, dma_addr);
+				}
+				if (!dmabuf) {
+					lpfc_printf_log(phba, KERN_ERR,
+						LOG_LIBDFC, "2616 No dmabuf "
+						"found for iocbq 0x%p\n",
+						iocbq);
+					kfree(evt_dat->data);
+					kfree(evt_dat);
+					mutex_lock(&phba->ct_event_mutex);
+					lpfc_ct_event_unref(evt);
+					mutex_unlock(&phba->ct_event_mutex);
+					goto error_ct_unsol_exit;
+				}
+				memcpy((char *)(evt_dat->data) + offset,
+				       dmabuf->virt, size);
+				offset += size;
+				if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
+				    !(phba->sli3_options &
+				      LPFC_SLI3_HBQ_ENABLED)) {
+					lpfc_sli_ringpostbuf_put(phba, pring,
+								 dmabuf);
+				} else {
+					switch (cmd) {
+					case ELX_LOOPBACK_XRI_SETUP:
+						if (!(phba->sli3_options &
+						      LPFC_SLI3_HBQ_ENABLED))
+							lpfc_post_buffer(phba,
+									 pring,
+									 1);
+						else
+							lpfc_in_buf_free(phba,
+									dmabuf);
+						break;
+					default:
+						if (!(phba->sli3_options &
+						      LPFC_SLI3_HBQ_ENABLED))
+							lpfc_post_buffer(phba,
+									 pring,
+									 1);
+						break;
+					}
+				}
+			}
+		}
+
+		mutex_lock(&phba->ct_event_mutex);
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			evt_dat->immed_dat = phba->ctx_idx;
+			phba->ctx_idx = (phba->ctx_idx + 1) % 64;
+			phba->ct_ctx[evt_dat->immed_dat].oxid =
+						piocbq->iocb.ulpContext;
+			phba->ct_ctx[evt_dat->immed_dat].SID =
+				piocbq->iocb.un.rcvels.remoteID;
+		} else
+			evt_dat->immed_dat = piocbq->iocb.ulpContext;
+
+		evt_dat->type = FC_REG_CT_EVENT;
+		list_add(&evt_dat->node, &evt->events_to_see);
+		wake_up_interruptible(&evt->wq);
+		lpfc_ct_event_unref(evt);
+		if (evt_req_id == SLI_CT_ELX_LOOPBACK)
+			break;
+	}
+	mutex_unlock(&phba->ct_event_mutex);
+
+error_ct_unsol_exit:
+	if (!list_empty(&head))
+		list_del(&head);
+
+	return;
+}
+
+/**
+ * lpfc_bsg_set_event - process a SET_EVENT bsg vendor command
+ * @job: SET_EVENT fc_bsg_job
+ */
+static int
+lpfc_bsg_set_event(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct set_ct_event *event_req;
+	struct lpfc_ct_event *evt;
+	int rc = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2612 Received SET_CT_EVENT below minimum "
+				"size\n");
+		return -EINVAL;
+	}
+
+	event_req = (struct set_ct_event *)
+		job->request->rqst_data.h_vendor.vendor_cmd;
+
+	mutex_lock(&phba->ct_event_mutex);
+	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+		if (evt->reg_id == event_req->ev_reg_id) {
+			lpfc_ct_event_ref(evt);
+			evt->wait_time_stamp = jiffies;
+			break;
+		}
+	}
+	mutex_unlock(&phba->ct_event_mutex);
+
+	if (&evt->node == &phba->ct_ev_waiters) {
+		/* no event waiting struct yet - first call */
+		evt = lpfc_ct_event_new(event_req->ev_reg_id,
+					event_req->ev_req_id);
+		if (!evt) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+					"2617 Failed allocation of event "
+					"waiter\n");
+			return -ENOMEM;
+		}
+
+		mutex_lock(&phba->ct_event_mutex);
+		list_add(&evt->node, &phba->ct_ev_waiters);
+		lpfc_ct_event_ref(evt);
+		mutex_unlock(&phba->ct_event_mutex);
+	}
+
+	evt->waiting = 1;
+	if (wait_event_interruptible(evt->wq,
+				     !list_empty(&evt->events_to_see))) {
+		mutex_lock(&phba->ct_event_mutex);
+		lpfc_ct_event_unref(evt); /* release ref */
+		lpfc_ct_event_unref(evt); /* delete */
+		mutex_unlock(&phba->ct_event_mutex);
+		rc = -EINTR;
+		goto set_event_out;
+	}
+
+	evt->wait_time_stamp = jiffies;
+	evt->waiting = 0;
+
+	mutex_lock(&phba->ct_event_mutex);
+	list_move(evt->events_to_see.prev, &evt->events_to_get);
+	lpfc_ct_event_unref(evt); /* release ref */
+	mutex_unlock(&phba->ct_event_mutex);
+
+set_event_out:
+	/* set_event carries no reply payload */
+	job->reply->reply_payload_rcv_len = 0;
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	/* complete the job back to userspace */
+	job->job_done(job);
+
+	return 0;
+}
+
+/**
+ * lpfc_bsg_get_event - process a GET_EVENT bsg vendor command
+ * @job: GET_EVENT fc_bsg_job
+ */
+static int
+lpfc_bsg_get_event(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct get_ct_event *event_req;
+	struct get_ct_event_reply *event_reply;
+	struct lpfc_ct_event *evt;
+	struct event_data *evt_dat = NULL;
+	int rc = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2613 Received GET_CT_EVENT request below "
+				"minimum size\n");
+		return -EINVAL;
+	}
+
+	event_req = (struct get_ct_event *)
+		job->request->rqst_data.h_vendor.vendor_cmd;
+
+	event_reply = (struct get_ct_event_reply *)
+		job->reply->reply_data.vendor_reply.vendor_rsp;
+
+	mutex_lock(&phba->ct_event_mutex);
+	list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
+		if (evt->reg_id == event_req->ev_reg_id) {
+			if (list_empty(&evt->events_to_get))
+				break;
+			lpfc_ct_event_ref(evt);
+			evt->wait_time_stamp = jiffies;
+			evt_dat = list_entry(evt->events_to_get.prev,
+					     struct event_data, node);
+			list_del(&evt_dat->node);
+			break;
+		}
+	}
+	mutex_unlock(&phba->ct_event_mutex);
+
+	if (!evt_dat) {
+		job->reply->reply_payload_rcv_len = 0;
+		rc = -ENOENT;
+		goto error_get_event_exit;
+	}
+
+	if (evt_dat->len > job->reply_payload.payload_len) {
+		evt_dat->len = job->reply_payload.payload_len;
+			lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+					"2618 Truncated event data at %d "
+					"bytes\n",
+					job->reply_payload.payload_len);
+	}
+
+	event_reply->immed_data = evt_dat->immed_dat;
+
+	if (evt_dat->len > 0)
+		job->reply->reply_payload_rcv_len =
+			sg_copy_from_buffer(job->reply_payload.sg_list,
+					    job->reply_payload.sg_cnt,
+					    evt_dat->data, evt_dat->len);
+	else
+		job->reply->reply_payload_rcv_len = 0;
+	rc = 0;
+
+	if (evt_dat)
+		kfree(evt_dat->data);
+	kfree(evt_dat);
+	mutex_lock(&phba->ct_event_mutex);
+	lpfc_ct_event_unref(evt);
+	mutex_unlock(&phba->ct_event_mutex);
+
+error_get_event_exit:
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	/* complete the job back to userspace */
+	job->job_done(job);
+
+	return rc;
+}
+
+/**
+ * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
+ * @job: fc_bsg_job to handle
+ */
+static int
+lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
+{
+	int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
+
+	switch (command) {
+	case LPFC_BSG_VENDOR_SET_CT_EVENT:
+		return lpfc_bsg_set_event(job);
+		break;
+
+	case LPFC_BSG_VENDOR_GET_CT_EVENT:
+		return lpfc_bsg_get_event(job);
+		break;
+
+	default:
+		return -EINVAL;
+	}
+}
+
+/**
+ * lpfc_bsg_request - handle a bsg request from the FC transport
+ * @job: fc_bsg_job to handle
+ */
+int
+lpfc_bsg_request(struct fc_bsg_job *job)
+{
+	uint32_t msgcode;
+	int rc = -EINVAL;
+
+	msgcode = job->request->msgcode;
+
+	switch (msgcode) {
+	case FC_BSG_HST_VENDOR:
+		rc = lpfc_bsg_hst_vendor(job);
+		break;
+	case FC_BSG_RPT_ELS:
+		rc = lpfc_bsg_rport_els(job);
+		break;
+	case FC_BSG_RPT_CT:
+		rc = lpfc_bsg_rport_ct(job);
+		break;
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
+ * @job: fc_bsg_job that has timed out
+ *
+ * This function just aborts the job's IOCB.  The aborted IOCB will return to
+ * the waiting function which will handle passing the error back to userspace
+ */
+int
+lpfc_bsg_timeout(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocb = (struct lpfc_iocbq *)job->dd_data;
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
+
+	if (cmdiocb)
+		lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+
+	return 0;
+}
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index d2a9229..0830f37 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -21,9 +21,11 @@
 typedef int (*node_filter)(struct lpfc_nodelist *, void *);
 
 struct fc_rport;
-void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
+void lpfc_down_link(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_sli_read_link_ste(struct lpfc_hba *);
+void lpfc_dump_mem(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t, uint16_t);
 void lpfc_dump_wakeup_param(struct lpfc_hba *, LPFC_MBOXQ_t *);
-void lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
+int lpfc_dump_static_vport(struct lpfc_hba *, LPFC_MBOXQ_t *, uint16_t);
 int lpfc_dump_fcoe_param(struct lpfc_hba *, struct lpfcMboxq *);
 void lpfc_read_nv(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_config_async(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t);
@@ -135,6 +137,9 @@
 int lpfc_els_disc_plogi(struct lpfc_vport *);
 void lpfc_els_timeout(unsigned long);
 void lpfc_els_timeout_handler(struct lpfc_vport *);
+struct lpfc_iocbq *lpfc_prep_els_iocb(struct lpfc_vport *, uint8_t, uint16_t,
+				      uint8_t, struct lpfc_nodelist *,
+				      uint32_t, uint32_t);
 void lpfc_hb_timeout_handler(struct lpfc_hba *);
 
 void lpfc_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
@@ -182,11 +187,12 @@
 int lpfc_mbox_tmo_val(struct lpfc_hba *, int);
 void lpfc_init_vfi(struct lpfcMboxq *, struct lpfc_vport *);
 void lpfc_reg_vfi(struct lpfcMboxq *, struct lpfc_vport *, dma_addr_t);
-void lpfc_init_vpi(struct lpfcMboxq *, uint16_t);
+void lpfc_init_vpi(struct lpfc_hba *, struct lpfcMboxq *, uint16_t);
 void lpfc_unreg_vfi(struct lpfcMboxq *, uint16_t);
 void lpfc_reg_fcfi(struct lpfc_hba *, struct lpfcMboxq *);
 void lpfc_unreg_fcfi(struct lpfcMboxq *, uint16_t);
 void lpfc_resume_rpi(struct lpfcMboxq *, struct lpfc_nodelist *);
+int lpfc_check_pending_fcoe_event(struct lpfc_hba *, uint8_t);
 
 void lpfc_config_hbq(struct lpfc_hba *, uint32_t, struct lpfc_hbq_init *,
 	uint32_t , LPFC_MBOXQ_t *);
@@ -234,6 +240,7 @@
 int lpfc_sli_issue_iocb(struct lpfc_hba *, uint32_t,
 			struct lpfc_iocbq *, uint32_t);
 void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
+void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
 void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
 void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
 int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
@@ -360,3 +367,8 @@
 #define HBA_EVENT_LINK_UP                2
 #define HBA_EVENT_LINK_DOWN              3
 
+/* functions to support SGIOv4/bsg interface */
+int lpfc_bsg_request(struct fc_bsg_job *);
+int lpfc_bsg_timeout(struct fc_bsg_job *);
+void lpfc_bsg_ct_unsol_event(struct lpfc_hba *, struct lpfc_sli_ring *,
+			     struct lpfc_iocbq *);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index 0e532f0..9df7ed3 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -97,6 +97,8 @@
 	struct list_head head;
 	struct lpfc_dmabuf *bdeBuf;
 
+	lpfc_bsg_ct_unsol_event(phba, pring, piocbq);
+
 	if (unlikely(icmd->ulpStatus == IOSTAT_NEED_BUFFER)) {
 		lpfc_sli_hbqbuf_add_hbqs(phba, LPFC_ELS_HBQ);
 	} else if ((icmd->ulpStatus == IOSTAT_LOCAL_REJECT) &&
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index f72fdf2..45337cd 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -146,7 +146,7 @@
  *   Pointer to the newly allocated/prepared els iocb data structure
  *   NULL - when els iocb data structure allocation/preparation failed
  **/
-static struct lpfc_iocbq *
+struct lpfc_iocbq *
 lpfc_prep_els_iocb(struct lpfc_vport *vport, uint8_t expectRsp,
 		   uint16_t cmdSize, uint8_t retry,
 		   struct lpfc_nodelist *ndlp, uint32_t did,
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index ed46b24..e6a47e2 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -61,6 +61,7 @@
 
 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
 static void lpfc_disc_flush_list(struct lpfc_vport *vport);
+static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
 
 void
 lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -1009,9 +1010,15 @@
 	spin_lock_irqsave(&phba->hbalock, flags);
 	phba->fcf.fcf_flag |= FCF_REGISTERED;
 	spin_unlock_irqrestore(&phba->hbalock, flags);
+	/* If there is a pending FCoE event, restart FCF table scan. */
+	if (lpfc_check_pending_fcoe_event(phba, 1)) {
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		return;
+	}
 	if (vport->port_state != LPFC_FLOGI) {
 		spin_lock_irqsave(&phba->hbalock, flags);
 		phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
+		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
 		spin_unlock_irqrestore(&phba->hbalock, flags);
 		lpfc_initial_flogi(vport);
 	}
@@ -1054,6 +1061,39 @@
 }
 
 /**
+ * lpfc_sw_name_match - Check if the fcf switch name match.
+ * @fab_name: pointer to fabric name.
+ * @new_fcf_record: pointer to fcf record.
+ *
+ * This routine compare the fcf record's switch name with provided
+ * switch name. If the switch name are identical this function
+ * returns 1 else return 0.
+ **/
+static uint32_t
+lpfc_sw_name_match(uint8_t *sw_name, struct fcf_record *new_fcf_record)
+{
+	if ((sw_name[0] ==
+		bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record)) &&
+	    (sw_name[1] ==
+		bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record)) &&
+	    (sw_name[2] ==
+		bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record)) &&
+	    (sw_name[3] ==
+		bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record)) &&
+	    (sw_name[4] ==
+		bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record)) &&
+	    (sw_name[5] ==
+		bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record)) &&
+	    (sw_name[6] ==
+		bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record)) &&
+	    (sw_name[7] ==
+		bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record)))
+		return 1;
+	else
+		return 0;
+}
+
+/**
  * lpfc_mac_addr_match - Check if the fcf mac address match.
  * @phba: pointer to lpfc hba data structure.
  * @new_fcf_record: pointer to fcf record.
@@ -1123,6 +1163,22 @@
 		bf_get(lpfc_fcf_record_mac_5, new_fcf_record);
 	phba->fcf.fcf_indx = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
 	phba->fcf.priority = new_fcf_record->fip_priority;
+	phba->fcf.switch_name[0] =
+		bf_get(lpfc_fcf_record_switch_name_0, new_fcf_record);
+	phba->fcf.switch_name[1] =
+		bf_get(lpfc_fcf_record_switch_name_1, new_fcf_record);
+	phba->fcf.switch_name[2] =
+		bf_get(lpfc_fcf_record_switch_name_2, new_fcf_record);
+	phba->fcf.switch_name[3] =
+		bf_get(lpfc_fcf_record_switch_name_3, new_fcf_record);
+	phba->fcf.switch_name[4] =
+		bf_get(lpfc_fcf_record_switch_name_4, new_fcf_record);
+	phba->fcf.switch_name[5] =
+		bf_get(lpfc_fcf_record_switch_name_5, new_fcf_record);
+	phba->fcf.switch_name[6] =
+		bf_get(lpfc_fcf_record_switch_name_6, new_fcf_record);
+	phba->fcf.switch_name[7] =
+		bf_get(lpfc_fcf_record_switch_name_7, new_fcf_record);
 }
 
 /**
@@ -1150,6 +1206,7 @@
 	/* The FCF is already registered, start discovery */
 	if (phba->fcf.fcf_flag & FCF_REGISTERED) {
 		phba->fcf.fcf_flag |= (FCF_DISCOVERED | FCF_IN_USE);
+		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
 		spin_unlock_irqrestore(&phba->hbalock, flags);
 		if (phba->pport->port_state != LPFC_FLOGI)
 			lpfc_initial_flogi(phba->pport);
@@ -1239,9 +1296,12 @@
 
 		if ((conn_entry->conn_rec.flags & FCFCNCT_FBNM_VALID) &&
 			!lpfc_fab_name_match(conn_entry->conn_rec.fabric_name,
-				new_fcf_record))
+					     new_fcf_record))
 			continue;
-
+		if ((conn_entry->conn_rec.flags & FCFCNCT_SWNM_VALID) &&
+			!lpfc_sw_name_match(conn_entry->conn_rec.switch_name,
+					    new_fcf_record))
+			continue;
 		if (conn_entry->conn_rec.flags & FCFCNCT_VLAN_VALID) {
 			/*
 			 * If the vlan bit map does not have the bit set for the
@@ -1336,6 +1396,60 @@
 }
 
 /**
+ * lpfc_check_pending_fcoe_event - Check if there is pending fcoe event.
+ * @phba: pointer to lpfc hba data structure.
+ * @unreg_fcf: Unregister FCF if FCF table need to be re-scaned.
+ *
+ * This function check if there is any fcoe event pending while driver
+ * scan FCF entries. If there is any pending event, it will restart the
+ * FCF saning and return 1 else return 0.
+ */
+int
+lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
+{
+	LPFC_MBOXQ_t *mbox;
+	int rc;
+	/*
+	 * If the Link is up and no FCoE events while in the
+	 * FCF discovery, no need to restart FCF discovery.
+	 */
+	if ((phba->link_state  >= LPFC_LINK_UP) &&
+		(phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
+		return 0;
+
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
+	spin_unlock_irq(&phba->hbalock);
+
+	if (phba->link_state >= LPFC_LINK_UP)
+		lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+
+	if (unreg_fcf) {
+		spin_lock_irq(&phba->hbalock);
+		phba->fcf.fcf_flag &= ~FCF_REGISTERED;
+		spin_unlock_irq(&phba->hbalock);
+		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+		if (!mbox) {
+			lpfc_printf_log(phba, KERN_ERR,
+				LOG_DISCOVERY|LOG_MBOX,
+				"2610 UNREG_FCFI mbox allocation failed\n");
+			return 1;
+		}
+		lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
+		mbox->vport = phba->pport;
+		mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+		if (rc == MBX_NOT_FINISHED) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
+				"2611 UNREG_FCFI issue mbox failed\n");
+			mempool_free(mbox, phba->mbox_mem_pool);
+		}
+	}
+
+	return 1;
+}
+
+/**
  * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
  * @phba: pointer to lpfc hba data structure.
  * @mboxq: pointer to mailbox object.
@@ -1367,6 +1481,12 @@
 	unsigned long flags;
 	uint16_t vlan_id;
 
+	/* If there is pending FCoE event restart FCF table scan */
+	if (lpfc_check_pending_fcoe_event(phba, 0)) {
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		return;
+	}
+
 	/* Get the first SGE entry from the non-embedded DMA memory. This
 	 * routine only uses a single SGE.
 	 */
@@ -1424,7 +1544,9 @@
 	spin_lock_irqsave(&phba->hbalock, flags);
 	if (phba->fcf.fcf_flag & FCF_IN_USE) {
 		if (lpfc_fab_name_match(phba->fcf.fabric_name,
-			new_fcf_record) &&
+					new_fcf_record) &&
+		    lpfc_sw_name_match(phba->fcf.switch_name,
+					new_fcf_record) &&
 		    lpfc_mac_addr_match(phba, new_fcf_record)) {
 			phba->fcf.fcf_flag |= FCF_AVAILABLE;
 			spin_unlock_irqrestore(&phba->hbalock, flags);
@@ -1464,9 +1586,9 @@
 		 * If there is a record with lower priority value for
 		 * the current FCF, use that record.
 		 */
-		if (lpfc_fab_name_match(phba->fcf.fabric_name, new_fcf_record)
-			&& (new_fcf_record->fip_priority <
-				phba->fcf.priority)) {
+		if (lpfc_fab_name_match(phba->fcf.fabric_name,
+					new_fcf_record) &&
+		    (new_fcf_record->fip_priority < phba->fcf.priority)) {
 			/* Use this FCF record */
 			lpfc_copy_fcf_record(phba, new_fcf_record);
 			phba->fcf.addr_mode = addr_mode;
@@ -1512,6 +1634,39 @@
 }
 
 /**
+ * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox data structure.
+ *
+ * This function handles completion of init vpi mailbox command.
+ */
+static void
+lpfc_init_vpi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct lpfc_vport *vport = mboxq->vport;
+	if (mboxq->u.mb.mbxStatus) {
+		lpfc_printf_vlog(vport, KERN_ERR,
+				LOG_MBOX,
+				"2609 Init VPI mailbox failed 0x%x\n",
+				mboxq->u.mb.mbxStatus);
+		mempool_free(mboxq, phba->mbox_mem_pool);
+		lpfc_vport_set_state(vport, FC_VPORT_FAILED);
+		return;
+	}
+	vport->fc_flag &= ~FC_VPORT_NEEDS_INIT_VPI;
+
+	if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
+		lpfc_initial_fdisc(vport);
+	else {
+		lpfc_vport_set_state(vport, FC_VPORT_NO_FABRIC_SUPP);
+		lpfc_printf_vlog(vport, KERN_ERR,
+			LOG_ELS,
+			"2606 No NPIV Fabric support\n");
+	}
+	return;
+}
+
+/**
  * lpfc_start_fdiscs - send fdiscs for each vports on this port.
  * @phba: pointer to lpfc hba data structure.
  *
@@ -1523,6 +1678,8 @@
 {
 	struct lpfc_vport **vports;
 	int i;
+	LPFC_MBOXQ_t *mboxq;
+	int rc;
 
 	vports = lpfc_create_vport_work_array(phba);
 	if (vports != NULL) {
@@ -1540,6 +1697,29 @@
 						     FC_VPORT_LINKDOWN);
 				continue;
 			}
+			if (vports[i]->fc_flag & FC_VPORT_NEEDS_INIT_VPI) {
+				mboxq = mempool_alloc(phba->mbox_mem_pool,
+					GFP_KERNEL);
+				if (!mboxq) {
+					lpfc_printf_vlog(vports[i], KERN_ERR,
+					LOG_MBOX, "2607 Failed to allocate "
+					"init_vpi mailbox\n");
+					continue;
+				}
+				lpfc_init_vpi(phba, mboxq, vports[i]->vpi);
+				mboxq->vport = vports[i];
+				mboxq->mbox_cmpl = lpfc_init_vpi_cmpl;
+				rc = lpfc_sli_issue_mbox(phba, mboxq,
+					MBX_NOWAIT);
+				if (rc == MBX_NOT_FINISHED) {
+					lpfc_printf_vlog(vports[i], KERN_ERR,
+					LOG_MBOX, "2608 Failed to issue "
+					"init_vpi mailbox\n");
+					mempool_free(mboxq,
+						phba->mbox_mem_pool);
+				}
+				continue;
+			}
 			if (phba->link_flag & LS_NPIV_FAB_SUPPORTED)
 				lpfc_initial_fdisc(vports[i]);
 			else {
@@ -1769,6 +1949,7 @@
 			goto out;
 		}
 	} else {
+		vport->port_state = LPFC_VPORT_UNKNOWN;
 		/*
 		 * Add the driver's default FCF record at FCF index 0 now. This
 		 * is phase 1 implementation that support FCF index 0 and driver
@@ -1804,6 +1985,12 @@
 		 * The driver is expected to do FIP/FCF. Call the port
 		 * and get the FCF Table.
 		 */
+		spin_lock_irq(&phba->hbalock);
+		if (phba->hba_flag & FCF_DISC_INPROGRESS) {
+			spin_unlock_irq(&phba->hbalock);
+			return;
+		}
+		spin_unlock_irq(&phba->hbalock);
 		rc = lpfc_sli4_read_fcf_record(phba,
 					LPFC_FCOE_FCF_GET_FIRST);
 		if (rc)
@@ -2113,13 +2300,15 @@
 	LPFC_MBOXQ_t *pmb = NULL;
 	MAILBOX_t *mb;
 	struct static_vport_info *vport_info;
-	int rc, i;
+	int rc = 0, i;
 	struct fc_vport_identifiers vport_id;
 	struct fc_vport *new_fc_vport;
 	struct Scsi_Host *shost;
 	struct lpfc_vport *vport;
 	uint16_t offset = 0;
 	uint8_t *vport_buff;
+	struct lpfc_dmabuf *mp;
+	uint32_t byte_count = 0;
 
 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!pmb) {
@@ -2142,7 +2331,9 @@
 
 	vport_buff = (uint8_t *) vport_info;
 	do {
-		lpfc_dump_static_vport(phba, pmb, offset);
+		if (lpfc_dump_static_vport(phba, pmb, offset))
+			goto out;
+
 		pmb->vport = phba->pport;
 		rc = lpfc_sli_issue_mbox_wait(phba, pmb, LPFC_MBOX_TMO);
 
@@ -2155,17 +2346,30 @@
 			goto out;
 		}
 
-		if (mb->un.varDmp.word_cnt >
-			sizeof(struct static_vport_info) - offset)
-			mb->un.varDmp.word_cnt =
-			sizeof(struct static_vport_info) - offset;
+		if (phba->sli_rev == LPFC_SLI_REV4) {
+			byte_count = pmb->u.mqe.un.mb_words[5];
+			mp = (struct lpfc_dmabuf *) pmb->context2;
+			if (byte_count > sizeof(struct static_vport_info) -
+					offset)
+				byte_count = sizeof(struct static_vport_info)
+					- offset;
+			memcpy(vport_buff + offset, mp->virt, byte_count);
+			offset += byte_count;
+		} else {
+			if (mb->un.varDmp.word_cnt >
+				sizeof(struct static_vport_info) - offset)
+				mb->un.varDmp.word_cnt =
+					sizeof(struct static_vport_info)
+						- offset;
+			byte_count = mb->un.varDmp.word_cnt;
+			lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+				vport_buff + offset,
+				byte_count);
 
-		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
-			vport_buff + offset,
-			mb->un.varDmp.word_cnt);
-		offset += mb->un.varDmp.word_cnt;
+			offset += byte_count;
+		}
 
-	} while (mb->un.varDmp.word_cnt &&
+	} while (byte_count &&
 		offset < sizeof(struct static_vport_info));
 
 
@@ -2198,7 +2402,7 @@
 		if (!new_fc_vport) {
 			lpfc_printf_log(phba, KERN_WARNING, LOG_INIT,
 				"0546 lpfc_create_static_vport failed to"
-				" create vport \n");
+				" create vport\n");
 			continue;
 		}
 
@@ -2207,16 +2411,15 @@
 	}
 
 out:
-	/*
-	 * If this is timed out command, setting NULL to context2 tell SLI
-	 * layer not to use this buffer.
-	 */
-	spin_lock_irq(&phba->hbalock);
-	pmb->context2 = NULL;
-	spin_unlock_irq(&phba->hbalock);
 	kfree(vport_info);
-	if (rc != MBX_TIMEOUT)
+	if (rc != MBX_TIMEOUT) {
+		if (pmb->context2) {
+			mp = (struct lpfc_dmabuf *) pmb->context2;
+			lpfc_mbuf_free(phba, mp->virt, mp->phys);
+			kfree(mp);
+		}
 		mempool_free(pmb, phba->mbox_mem_pool);
+	}
 
 	return;
 }
@@ -4360,7 +4563,7 @@
 	fcoe_param_hdr = (struct lpfc_fip_param_hdr *)
 		buff;
 	fcoe_param = (struct lpfc_fcoe_params *)
-		buff + sizeof(struct lpfc_fip_param_hdr);
+		(buff + sizeof(struct lpfc_fip_param_hdr));
 
 	if ((fcoe_param_hdr->parm_version != FIPP_VERSION) ||
 		(fcoe_param_hdr->length != FCOE_PARAM_LENGTH))
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index 8a3a026..ccb2672 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -2496,8 +2496,8 @@
 #define  DMP_VPORT_REGION_SIZE	 0x200
 #define  DMP_MBOX_OFFSET_WORD	 0x5
 
-#define  DMP_REGION_FCOEPARAM	 0x17   /* fcoe param region */
-#define  DMP_FCOEPARAM_RGN_SIZE	 0x400
+#define  DMP_REGION_23	 	 0x17   /* fcoe param  and port state region */
+#define  DMP_RGN23_SIZE	 	 0x400
 
 #define  WAKE_UP_PARMS_REGION_ID    4
 #define  WAKE_UP_PARMS_WORD_SIZE   15
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index 2995d12..3689eee0 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -52,6 +52,31 @@
 	uint32_t addr_hi;
 };
 
+#define LPFC_SLIREV_CONF_WORD	0x58
+struct lpfc_sli_intf {
+	uint32_t word0;
+#define lpfc_sli_intf_iftype_MASK 	0x00000007
+#define lpfc_sli_intf_iftype_SHIFT	0
+#define lpfc_sli_intf_iftype_WORD	word0
+#define lpfc_sli_intf_rev_MASK 		0x0000000f
+#define lpfc_sli_intf_rev_SHIFT		4
+#define lpfc_sli_intf_rev_WORD		word0
+#define LPFC_SLIREV_CONF_SLI4	4
+#define lpfc_sli_intf_family_MASK 	0x000000ff
+#define lpfc_sli_intf_family_SHIFT	8
+#define lpfc_sli_intf_family_WORD	word0
+#define lpfc_sli_intf_feat1_MASK 	0x000000ff
+#define lpfc_sli_intf_feat1_SHIFT	16
+#define lpfc_sli_intf_feat1_WORD	word0
+#define lpfc_sli_intf_feat2_MASK 	0x0000001f
+#define lpfc_sli_intf_feat2_SHIFT	24
+#define lpfc_sli_intf_feat2_WORD	word0
+#define lpfc_sli_intf_valid_MASK 	0x00000007
+#define lpfc_sli_intf_valid_SHIFT	29
+#define lpfc_sli_intf_valid_WORD	word0
+#define LPFC_SLI_INTF_VALID		6
+};
+
 #define LPFC_SLI4_BAR0		1
 #define LPFC_SLI4_BAR1		2
 #define LPFC_SLI4_BAR2		4
@@ -1181,6 +1206,32 @@
 #define lpfc_fcf_record_fcf_state_MASK		0x0000FFFF
 #define lpfc_fcf_record_fcf_state_WORD		word8
 	uint8_t vlan_bitmap[512];
+	uint32_t word137;
+#define lpfc_fcf_record_switch_name_0_SHIFT	0
+#define lpfc_fcf_record_switch_name_0_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_0_WORD	word137
+#define lpfc_fcf_record_switch_name_1_SHIFT	8
+#define lpfc_fcf_record_switch_name_1_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_1_WORD	word137
+#define lpfc_fcf_record_switch_name_2_SHIFT	16
+#define lpfc_fcf_record_switch_name_2_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_2_WORD	word137
+#define lpfc_fcf_record_switch_name_3_SHIFT	24
+#define lpfc_fcf_record_switch_name_3_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_3_WORD	word137
+	uint32_t word138;
+#define lpfc_fcf_record_switch_name_4_SHIFT	0
+#define lpfc_fcf_record_switch_name_4_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_4_WORD	word138
+#define lpfc_fcf_record_switch_name_5_SHIFT	8
+#define lpfc_fcf_record_switch_name_5_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_5_WORD	word138
+#define lpfc_fcf_record_switch_name_6_SHIFT	16
+#define lpfc_fcf_record_switch_name_6_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_6_WORD	word138
+#define lpfc_fcf_record_switch_name_7_SHIFT	24
+#define lpfc_fcf_record_switch_name_7_MASK	0x000000FF
+#define lpfc_fcf_record_switch_name_7_WORD	word138
 };
 
 struct lpfc_mbx_read_fcf_tbl {
@@ -1385,20 +1436,17 @@
 
 struct lpfc_mbx_resume_rpi {
 	uint32_t word1;
-#define lpfc_resume_rpi_rpi_SHIFT	0
-#define lpfc_resume_rpi_rpi_MASK	0x0000FFFF
-#define lpfc_resume_rpi_rpi_WORD	word1
+#define lpfc_resume_rpi_index_SHIFT	0
+#define lpfc_resume_rpi_index_MASK	0x0000FFFF
+#define lpfc_resume_rpi_index_WORD	word1
+#define lpfc_resume_rpi_ii_SHIFT	30
+#define lpfc_resume_rpi_ii_MASK		0x00000003
+#define lpfc_resume_rpi_ii_WORD		word1
+#define RESUME_INDEX_RPI		0
+#define RESUME_INDEX_VPI		1
+#define RESUME_INDEX_VFI		2
+#define RESUME_INDEX_FCFI		3
 	uint32_t event_tag;
-	uint32_t word3_rsvd;
-	uint32_t word4_rsvd;
-	uint32_t word5_rsvd;
-	uint32_t word6;
-#define lpfc_resume_rpi_vpi_SHIFT	0
-#define lpfc_resume_rpi_vpi_MASK	0x0000FFFF
-#define lpfc_resume_rpi_vpi_WORD	word6
-#define lpfc_resume_rpi_vfi_SHIFT	16
-#define lpfc_resume_rpi_vfi_MASK	0x0000FFFF
-#define lpfc_resume_rpi_vfi_WORD	word6
 };
 
 #define REG_FCF_INVALID_QID	0xFFFF
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index fc67cc6..562d8ce 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -211,7 +211,7 @@
 		goto out_free_mbox;
 
 	do {
-		lpfc_dump_mem(phba, pmb, offset);
+		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_VPD);
 		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
 
 		if (rc != MBX_SUCCESS) {
@@ -425,6 +425,9 @@
 		return -EIO;
 	}
 
+	/* Check if the port is disabled */
+	lpfc_sli_read_link_ste(phba);
+
 	/* Reset the DFT_HBA_Q_DEPTH to the max xri  */
 	if (phba->cfg_hba_queue_depth > (mb->un.varRdConfig.max_xri+1))
 		phba->cfg_hba_queue_depth =
@@ -524,27 +527,46 @@
 	/* Set up error attention (ERATT) polling timer */
 	mod_timer(&phba->eratt_poll, jiffies + HZ * LPFC_ERATT_POLL_INTERVAL);
 
-	lpfc_init_link(phba, pmb, phba->cfg_topology, phba->cfg_link_speed);
-	pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-	lpfc_set_loopback_flag(phba);
-	rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
-	if (rc != MBX_SUCCESS) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+	if (phba->hba_flag & LINK_DISABLED) {
+		lpfc_printf_log(phba,
+			KERN_ERR, LOG_INIT,
+			"2598 Adapter Link is disabled.\n");
+		lpfc_down_link(phba, pmb);
+		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+		if ((rc != MBX_SUCCESS) && (rc != MBX_BUSY)) {
+			lpfc_printf_log(phba,
+			KERN_ERR, LOG_INIT,
+			"2599 Adapter failed to issue DOWN_LINK"
+			" mbox command rc 0x%x\n", rc);
+
+			mempool_free(pmb, phba->mbox_mem_pool);
+			return -EIO;
+		}
+	} else {
+		lpfc_init_link(phba, pmb, phba->cfg_topology,
+			phba->cfg_link_speed);
+		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		lpfc_set_loopback_flag(phba);
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0454 Adapter failed to init, mbxCmd x%x "
 				"INIT_LINK, mbxStatus x%x\n",
 				mb->mbxCommand, mb->mbxStatus);
 
-		/* Clear all interrupt enable conditions */
-		writel(0, phba->HCregaddr);
-		readl(phba->HCregaddr); /* flush */
-		/* Clear all pending interrupts */
-		writel(0xffffffff, phba->HAregaddr);
-		readl(phba->HAregaddr); /* flush */
+			/* Clear all interrupt enable conditions */
+			writel(0, phba->HCregaddr);
+			readl(phba->HCregaddr); /* flush */
+			/* Clear all pending interrupts */
+			writel(0xffffffff, phba->HAregaddr);
+			readl(phba->HAregaddr); /* flush */
 
-		phba->link_state = LPFC_HBA_ERROR;
-		if (rc != MBX_BUSY)
-			mempool_free(pmb, phba->mbox_mem_pool);
-		return -EIO;
+			phba->link_state = LPFC_HBA_ERROR;
+			if (rc != MBX_BUSY)
+				mempool_free(pmb, phba->mbox_mem_pool);
+			return -EIO;
+		}
 	}
 	/* MBOX buffer will be freed in mbox compl */
 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -558,7 +580,7 @@
 				KERN_ERR,
 				LOG_INIT,
 				"0456 Adapter failed to issue "
-				"ASYNCEVT_ENABLE mbox status x%x \n.",
+				"ASYNCEVT_ENABLE mbox status x%x\n",
 				rc);
 		mempool_free(pmb, phba->mbox_mem_pool);
 	}
@@ -572,7 +594,7 @@
 
 	if ((rc != MBX_BUSY) && (rc != MBX_SUCCESS)) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT, "0435 Adapter failed "
-				"to get Option ROM version status x%x\n.", rc);
+				"to get Option ROM version status x%x\n", rc);
 		mempool_free(pmb, phba->mbox_mem_pool);
 	}
 
@@ -2133,6 +2155,8 @@
 			vports[i]->fc_flag &= ~FC_OFFLINE_MODE;
 			if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED)
 				vports[i]->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
+			if (phba->sli_rev == LPFC_SLI_REV4)
+				vports[i]->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
 			spin_unlock_irq(shost->host_lock);
 		}
 		lpfc_destroy_vport_work_array(phba, vports);
@@ -2807,6 +2831,7 @@
 	att_type = lpfc_sli4_parse_latt_type(phba, acqe_link);
 	if (att_type != AT_LINK_DOWN && att_type != AT_LINK_UP)
 		return;
+	phba->fcoe_eventtag = acqe_link->event_tag;
 	pmb = (LPFC_MBOXQ_t *)mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!pmb) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -2894,18 +2919,20 @@
 	uint8_t event_type = bf_get(lpfc_acqe_fcoe_event_type, acqe_fcoe);
 	int rc;
 
+	phba->fcoe_eventtag = acqe_fcoe->event_tag;
 	switch (event_type) {
 	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-			"2546 New FCF found index 0x%x tag 0x%x \n",
+			"2546 New FCF found index 0x%x tag 0x%x\n",
 			acqe_fcoe->fcf_index,
 			acqe_fcoe->event_tag);
 		/*
-		 * If the current FCF is in discovered state,
-		 * do nothing.
+		 * If the current FCF is in discovered state, or
+		 * FCF discovery is in progress do nothing.
 		 */
 		spin_lock_irq(&phba->hbalock);
-		if (phba->fcf.fcf_flag & FCF_DISCOVERED) {
+		if ((phba->fcf.fcf_flag & FCF_DISCOVERED) ||
+		   (phba->hba_flag & FCF_DISC_INPROGRESS)) {
 			spin_unlock_irq(&phba->hbalock);
 			break;
 		}
@@ -2922,7 +2949,7 @@
 
 	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
-			"2548 FCF Table full count 0x%x tag 0x%x \n",
+			"2548 FCF Table full count 0x%x tag 0x%x\n",
 			bf_get(lpfc_acqe_fcoe_fcf_count, acqe_fcoe),
 			acqe_fcoe->event_tag);
 		break;
@@ -2930,7 +2957,7 @@
 	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
 			"2549 FCF disconnected fron network index 0x%x"
-			" tag 0x%x \n", acqe_fcoe->fcf_index,
+			" tag 0x%x\n", acqe_fcoe->fcf_index,
 			acqe_fcoe->event_tag);
 		/* If the event is not for currently used fcf do nothing */
 		if (phba->fcf.fcf_indx != acqe_fcoe->fcf_index)
@@ -4130,8 +4157,7 @@
 	/* Allocate memory for HBA structure */
 	phba = kzalloc(sizeof(struct lpfc_hba), GFP_KERNEL);
 	if (!phba) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-				"1417 Failed to allocate hba struct.\n");
+		dev_err(&pdev->dev, "failed to allocate hba struct\n");
 		return NULL;
 	}
 
@@ -4145,6 +4171,9 @@
 		return NULL;
 	}
 
+	mutex_init(&phba->ct_event_mutex);
+	INIT_LIST_HEAD(&phba->ct_ev_waiters);
+
 	return phba;
 }
 
@@ -4489,23 +4518,6 @@
 	if (!phba->sli4_hba.STAregaddr)
 		return -ENODEV;
 
-	/* With uncoverable error, log the error message and return error */
-	onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
-	onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
-	if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
-		uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
-		uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
-		if (uerrlo_reg.word0 || uerrhi_reg.word0) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"1422 HBA Unrecoverable error: "
-					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
-					"online0_reg=0x%x, online1_reg=0x%x\n",
-					uerrlo_reg.word0, uerrhi_reg.word0,
-					onlnreg0, onlnreg1);
-		}
-		return -ENODEV;
-	}
-
 	/* Wait up to 30 seconds for the SLI Port POST done and ready */
 	for (i = 0; i < 3000; i++) {
 		sta_reg.word0 = readl(phba->sli4_hba.STAregaddr);
@@ -4545,6 +4557,23 @@
 			bf_get(lpfc_scratchpad_featurelevel1, &scratchpad),
 			bf_get(lpfc_scratchpad_featurelevel2, &scratchpad));
 
+	/* With uncoverable error, log the error message and return error */
+	onlnreg0 = readl(phba->sli4_hba.ONLINE0regaddr);
+	onlnreg1 = readl(phba->sli4_hba.ONLINE1regaddr);
+	if ((onlnreg0 != LPFC_ONLINE_NERR) || (onlnreg1 != LPFC_ONLINE_NERR)) {
+		uerrlo_reg.word0 = readl(phba->sli4_hba.UERRLOregaddr);
+		uerrhi_reg.word0 = readl(phba->sli4_hba.UERRHIregaddr);
+		if (uerrlo_reg.word0 || uerrhi_reg.word0) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"1422 HBA Unrecoverable error: "
+					"uerr_lo_reg=0x%x, uerr_hi_reg=0x%x, "
+					"online0_reg=0x%x, online1_reg=0x%x\n",
+					uerrlo_reg.word0, uerrhi_reg.word0,
+					onlnreg0, onlnreg1);
+		}
+		return -ENODEV;
+	}
+
 	return port_error;
 }
 
@@ -7347,6 +7376,9 @@
 	/* Perform post initialization setup */
 	lpfc_post_init_setup(phba);
 
+	/* Check if there are static vports to be created. */
+	lpfc_create_static_vport(phba);
+
 	return 0;
 
 out_disable_intr:
@@ -7636,19 +7668,17 @@
 lpfc_pci_probe_one(struct pci_dev *pdev, const struct pci_device_id *pid)
 {
 	int rc;
-	uint16_t dev_id;
+	struct lpfc_sli_intf intf;
 
-	if (pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id))
+	if (pci_read_config_dword(pdev, LPFC_SLIREV_CONF_WORD, &intf.word0))
 		return -ENODEV;
 
-	switch (dev_id) {
-	case PCI_DEVICE_ID_TIGERSHARK:
+	if ((bf_get(lpfc_sli_intf_valid, &intf) == LPFC_SLI_INTF_VALID) &&
+		(bf_get(lpfc_sli_intf_rev, &intf) == LPFC_SLIREV_CONF_SLI4))
 		rc = lpfc_pci_probe_one_s4(pdev, pid);
-		break;
-	default:
+	else
 		rc = lpfc_pci_probe_one_s3(pdev, pid);
-		break;
-	}
+
 	return rc;
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 3423571..1ab4059 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -52,48 +52,85 @@
  * This routine prepares the mailbox command for dumping list of static
  * vports to be created.
  **/
-void
+int
 lpfc_dump_static_vport(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb,
 		uint16_t offset)
 {
 	MAILBOX_t *mb;
-	void *ctx;
+	struct lpfc_dmabuf *mp;
 
 	mb = &pmb->u.mb;
-	ctx = pmb->context2;
 
 	/* Setup to dump vport info region */
 	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
 	mb->mbxCommand = MBX_DUMP_MEMORY;
-	mb->un.varDmp.cv = 1;
 	mb->un.varDmp.type = DMP_NV_PARAMS;
 	mb->un.varDmp.entry_index = offset;
 	mb->un.varDmp.region_id = DMP_REGION_VPORT;
-	mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
-	mb->un.varDmp.co = 0;
-	mb->un.varDmp.resp_offset = 0;
-	pmb->context2 = ctx;
 	mb->mbxOwner = OWN_HOST;
 
-	return;
+	/* For SLI3 HBAs data is embedded in mailbox */
+	if (phba->sli_rev != LPFC_SLI_REV4) {
+		mb->un.varDmp.cv = 1;
+		mb->un.varDmp.word_cnt = DMP_RSP_SIZE/sizeof(uint32_t);
+		return 0;
+	}
+
+	/* For SLI4 HBAs driver need to allocate memory */
+	mp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (mp)
+		mp->virt = lpfc_mbuf_alloc(phba, 0, &mp->phys);
+
+	if (!mp || !mp->virt) {
+		kfree(mp);
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
+			"2605 lpfc_dump_static_vport: memory"
+			" allocation failed\n");
+		return 1;
+	}
+	memset(mp->virt, 0, LPFC_BPL_SIZE);
+	INIT_LIST_HEAD(&mp->list);
+	/* save address for completion */
+	pmb->context2 = (uint8_t *) mp;
+	mb->un.varWords[3] = putPaddrLow(mp->phys);
+	mb->un.varWords[4] = putPaddrHigh(mp->phys);
+	mb->un.varDmp.sli4_length = sizeof(struct static_vport_info);
+
+	return 0;
 }
 
 /**
- * lpfc_dump_mem - Prepare a mailbox command for retrieving HBA's VPD memory
+ * lpfc_down_link - Bring down HBAs link.
  * @phba: pointer to lpfc hba data structure.
  * @pmb: pointer to the driver internal queue element for mailbox command.
- * @offset: offset for dumping VPD memory mailbox command.
+ *
+ * This routine prepares a mailbox command to bring down HBA link.
+ **/
+void
+lpfc_down_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb;
+	memset(pmb, 0, sizeof(LPFC_MBOXQ_t));
+	mb = &pmb->u.mb;
+	mb->mbxCommand = MBX_DOWN_LINK;
+	mb->mbxOwner = OWN_HOST;
+}
+
+/**
+ * lpfc_dump_mem - Prepare a mailbox command for reading a region.
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ * @offset: offset into the region.
+ * @region_id: config region id.
  *
  * The dump mailbox command provides a method for the device driver to obtain
  * various types of information from the HBA device.
  *
- * This routine prepares the mailbox command for dumping HBA Vital Product
- * Data (VPD) memory. This mailbox command is to be used for retrieving a
- * portion (DMP_RSP_SIZE bytes) of a HBA's VPD from the HBA at an address
- * offset specified by the offset parameter.
+ * This routine prepares the mailbox command for dumping HBA's config region.
  **/
 void
-lpfc_dump_mem(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmb, uint16_t offset)
+lpfc_dump_mem(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb, uint16_t offset,
+		uint16_t region_id)
 {
 	MAILBOX_t *mb;
 	void *ctx;
@@ -107,7 +144,7 @@
 	mb->un.varDmp.cv = 1;
 	mb->un.varDmp.type = DMP_NV_PARAMS;
 	mb->un.varDmp.entry_index = offset;
-	mb->un.varDmp.region_id = DMP_REGION_VPD;
+	mb->un.varDmp.region_id = region_id;
 	mb->un.varDmp.word_cnt = (DMP_RSP_SIZE / sizeof (uint32_t));
 	mb->un.varDmp.co = 0;
 	mb->un.varDmp.resp_offset = 0;
@@ -1789,6 +1826,7 @@
 
 /**
  * lpfc_init_vpi - Initialize the INIT_VPI mailbox command
+ * @phba: pointer to the hba structure to init the VPI for.
  * @mbox: pointer to lpfc mbox command to initialize.
  * @vpi: VPI to be initialized.
  *
@@ -1799,11 +1837,14 @@
  * successful virtual NPort login.
  **/
 void
-lpfc_init_vpi(struct lpfcMboxq *mbox, uint16_t vpi)
+lpfc_init_vpi(struct lpfc_hba *phba, struct lpfcMboxq *mbox, uint16_t vpi)
 {
 	memset(mbox, 0, sizeof(*mbox));
 	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_INIT_VPI);
-	bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi, vpi);
+	bf_set(lpfc_init_vpi_vpi, &mbox->u.mqe.un.init_vpi,
+	       vpi + phba->vpi_base);
+	bf_set(lpfc_init_vpi_vfi, &mbox->u.mqe.un.init_vpi,
+	       phba->pport->vfi + phba->vfi_base);
 }
 
 /**
@@ -1852,7 +1893,7 @@
 		/* dump_fcoe_param failed to allocate memory */
 		lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
 			"2569 lpfc_dump_fcoe_param: memory"
-			" allocation failed \n");
+			" allocation failed\n");
 		return 1;
 	}
 
@@ -1864,8 +1905,8 @@
 
 	mb->mbxCommand = MBX_DUMP_MEMORY;
 	mb->un.varDmp.type = DMP_NV_PARAMS;
-	mb->un.varDmp.region_id = DMP_REGION_FCOEPARAM;
-	mb->un.varDmp.sli4_length = DMP_FCOEPARAM_RGN_SIZE;
+	mb->un.varDmp.region_id = DMP_REGION_23;
+	mb->un.varDmp.sli4_length = DMP_RGN23_SIZE;
 	mb->un.varWords[3] = putPaddrLow(mp->phys);
 	mb->un.varWords[4] = putPaddrHigh(mp->phys);
 	return 0;
@@ -1938,9 +1979,7 @@
 	memset(mbox, 0, sizeof(*mbox));
 	resume_rpi = &mbox->u.mqe.un.resume_rpi;
 	bf_set(lpfc_mqe_command, &mbox->u.mqe, MBX_RESUME_RPI);
-	bf_set(lpfc_resume_rpi_rpi, resume_rpi, ndlp->nlp_rpi);
-	bf_set(lpfc_resume_rpi_vpi, resume_rpi,
-	       ndlp->vport->vpi + ndlp->vport->phba->vpi_base);
-	bf_set(lpfc_resume_rpi_vfi, resume_rpi,
-	       ndlp->vport->vfi + ndlp->vport->phba->vfi_base);
+	bf_set(lpfc_resume_rpi_index, resume_rpi, ndlp->nlp_rpi);
+	bf_set(lpfc_resume_rpi_ii, resume_rpi, RESUME_INDEX_RPI);
+	resume_rpi->event_tag = ndlp->phba->fc_eventTag;
 }
diff --git a/drivers/scsi/lpfc/lpfc_mem.c b/drivers/scsi/lpfc/lpfc_mem.c
index e198c91..a1b6db6 100644
--- a/drivers/scsi/lpfc/lpfc_mem.c
+++ b/drivers/scsi/lpfc/lpfc_mem.c
@@ -110,17 +110,28 @@
 						sizeof(struct lpfc_nodelist));
 	if (!phba->nlp_mem_pool)
 		goto fail_free_mbox_pool;
-	phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		phba->lpfc_hrb_pool = pci_pool_create("lpfc_hrb_pool",
 					      phba->pcidev,
 					      LPFC_HDR_BUF_SIZE, align, 0);
-	if (!phba->lpfc_hrb_pool)
-		goto fail_free_nlp_mem_pool;
-	phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
+		if (!phba->lpfc_hrb_pool)
+			goto fail_free_nlp_mem_pool;
+
+		phba->lpfc_drb_pool = pci_pool_create("lpfc_drb_pool",
 					      phba->pcidev,
 					      LPFC_DATA_BUF_SIZE, align, 0);
-	if (!phba->lpfc_drb_pool)
-		goto fail_free_hbq_pool;
-
+		if (!phba->lpfc_drb_pool)
+			goto fail_free_hrb_pool;
+		phba->lpfc_hbq_pool = NULL;
+	} else {
+		phba->lpfc_hbq_pool = pci_pool_create("lpfc_hbq_pool",
+			phba->pcidev, LPFC_BPL_SIZE, align, 0);
+		if (!phba->lpfc_hbq_pool)
+			goto fail_free_nlp_mem_pool;
+		phba->lpfc_hrb_pool = NULL;
+		phba->lpfc_drb_pool = NULL;
+	}
 	/* vpi zero is reserved for the physical port so add 1 to max */
 	longs = ((phba->max_vpi + 1) + BITS_PER_LONG - 1) / BITS_PER_LONG;
 	phba->vpi_bmask = kzalloc(longs * sizeof(unsigned long), GFP_KERNEL);
@@ -132,7 +143,7 @@
  fail_free_dbq_pool:
 	pci_pool_destroy(phba->lpfc_drb_pool);
 	phba->lpfc_drb_pool = NULL;
- fail_free_hbq_pool:
+ fail_free_hrb_pool:
 	pci_pool_destroy(phba->lpfc_hrb_pool);
 	phba->lpfc_hrb_pool = NULL;
  fail_free_nlp_mem_pool:
@@ -176,11 +187,17 @@
 
 	/* Free HBQ pools */
 	lpfc_sli_hbqbuf_free_all(phba);
-	pci_pool_destroy(phba->lpfc_drb_pool);
+	if (phba->lpfc_drb_pool)
+		pci_pool_destroy(phba->lpfc_drb_pool);
 	phba->lpfc_drb_pool = NULL;
-	pci_pool_destroy(phba->lpfc_hrb_pool);
+	if (phba->lpfc_hrb_pool)
+		pci_pool_destroy(phba->lpfc_hrb_pool);
 	phba->lpfc_hrb_pool = NULL;
 
+	if (phba->lpfc_hbq_pool)
+		pci_pool_destroy(phba->lpfc_hbq_pool);
+	phba->lpfc_hbq_pool = NULL;
+
 	/* Free NLP memory pool */
 	mempool_destroy(phba->nlp_mem_pool);
 	phba->nlp_mem_pool = NULL;
@@ -380,7 +397,7 @@
 	if (!hbqbp)
 		return NULL;
 
-	hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hrb_pool, GFP_KERNEL,
+	hbqbp->dbuf.virt = pci_pool_alloc(phba->lpfc_hbq_pool, GFP_KERNEL,
 					  &hbqbp->dbuf.phys);
 	if (!hbqbp->dbuf.virt) {
 		kfree(hbqbp);
@@ -405,7 +422,7 @@
 void
 lpfc_els_hbq_free(struct lpfc_hba *phba, struct hbq_dmabuf *hbqbp)
 {
-	pci_pool_free(phba->lpfc_hrb_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
+	pci_pool_free(phba->lpfc_hbq_pool, hbqbp->dbuf.virt, hbqbp->dbuf.phys);
 	kfree(hbqbp);
 	return;
 }
diff --git a/drivers/scsi/lpfc/lpfc_nl.h b/drivers/scsi/lpfc/lpfc_nl.h
index 27d1a88..d655ed3 100644
--- a/drivers/scsi/lpfc/lpfc_nl.h
+++ b/drivers/scsi/lpfc/lpfc_nl.h
@@ -177,3 +177,23 @@
 	uint32_t data;
 };
 
+/* bsg definitions */
+#define LPFC_BSG_VENDOR_SET_CT_EVENT	1
+#define LPFC_BSG_VENDOR_GET_CT_EVENT	2
+
+struct set_ct_event {
+	uint32_t command;
+	uint32_t ev_req_id;
+	uint32_t ev_reg_id;
+};
+
+struct get_ct_event {
+	uint32_t command;
+	uint32_t ev_reg_id;
+	uint32_t ev_req_id;
+};
+
+struct get_ct_event_reply {
+	uint32_t immed_data;
+	uint32_t type;
+};
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index da59c4f..61d0897 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -2142,7 +2142,7 @@
 	} else if (resp_info & RESID_OVER) {
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_FCP,
 				 "9028 FCP command x%x residual overrun error. "
-				 "Data: x%x x%x \n", cmnd->cmnd[0],
+				 "Data: x%x x%x\n", cmnd->cmnd[0],
 				 scsi_bufflen(cmnd), scsi_get_resid(cmnd));
 		host_status = DID_ERROR;
 
@@ -2843,7 +2843,7 @@
 				dif_op_str[scsi_get_prot_op(cmnd)]);
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
 				"9034 BLKGRD: CDB: %02x %02x %02x %02x %02x "
-				"%02x %02x %02x %02x %02x \n",
+				"%02x %02x %02x %02x %02x\n",
 				cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
 				cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
 				cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
@@ -2871,7 +2871,7 @@
 				dif_op_str[scsi_get_prot_op(cmnd)]);
 		lpfc_printf_vlog(vport, KERN_WARNING, LOG_BG,
 				 "9039 BLKGRD: CDB: %02x %02x %02x %02x %02x "
-				 "%02x %02x %02x %02x %02x \n",
+				 "%02x %02x %02x %02x %02x\n",
 				 cmnd->cmnd[0], cmnd->cmnd[1], cmnd->cmnd[2],
 				 cmnd->cmnd[3], cmnd->cmnd[4], cmnd->cmnd[5],
 				 cmnd->cmnd[6], cmnd->cmnd[7], cmnd->cmnd[8],
@@ -3584,6 +3584,7 @@
 	.use_clustering		= ENABLE_CLUSTERING,
 	.shost_attrs		= lpfc_hba_attrs,
 	.max_sectors		= 0xFFFF,
+	.vendor_id		= LPFC_NL_VENDOR_ID,
 };
 
 struct scsi_host_template lpfc_vport_template = {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index acc43b0..43cbe33 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -4139,7 +4139,7 @@
 		return -EIO;
 	}
 	data_length = mqe->un.mb_words[5];
-	if (data_length > DMP_FCOEPARAM_RGN_SIZE) {
+	if (data_length > DMP_RGN23_SIZE) {
 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
 		kfree(mp);
 		return -EIO;
@@ -4304,7 +4304,7 @@
 	 */
 	if (lpfc_sli4_read_fcoe_params(phba, mboxq))
 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_INIT,
-			"2570 Failed to read FCoE parameters \n");
+			"2570 Failed to read FCoE parameters\n");
 
 	/* Issue READ_REV to collect vpd and FW information. */
 	vpd_size = PAGE_SIZE;
@@ -4522,12 +4522,8 @@
 	lpfc_sli4_rb_setup(phba);
 
 	/* Start the ELS watchdog timer */
-	/*
-	 * The driver for SLI4 is not yet ready to process timeouts
-	 * or interrupts.  Once it is, the comment bars can be removed.
-	 */
-	/* mod_timer(&vport->els_tmofunc,
-	 *           jiffies + HZ * (phba->fc_ratov*2)); */
+	mod_timer(&vport->els_tmofunc,
+		  jiffies + HZ * (phba->fc_ratov * 2));
 
 	/* Start heart beat timer */
 	mod_timer(&phba->hb_tmofunc,
@@ -4706,13 +4702,13 @@
 
 	spin_lock_irqsave(&phba->hbalock, drvr_flag);
 	if (!pmbox) {
+		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
 		/* processing mbox queue from intr_handler */
 		if (unlikely(psli->sli_flag & LPFC_SLI_ASYNC_MBX_BLK)) {
 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
 			return MBX_SUCCESS;
 		}
 		processing_queue = 1;
-		phba->sli.sli_flag &= ~LPFC_SLI_MBOX_ACTIVE;
 		pmbox = lpfc_mbox_get(phba);
 		if (!pmbox) {
 			spin_unlock_irqrestore(&phba->hbalock, drvr_flag);
@@ -5279,6 +5275,18 @@
 	unsigned long iflags;
 	int rc;
 
+	rc = lpfc_mbox_dev_check(phba);
+	if (unlikely(rc)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
+				"(%d):2544 Mailbox command x%x (x%x) "
+				"cannot issue Data: x%x x%x\n",
+				mboxq->vport ? mboxq->vport->vpi : 0,
+				mboxq->u.mb.mbxCommand,
+				lpfc_sli4_mbox_opcode_get(phba, mboxq),
+				psli->sli_flag, flag);
+		goto out_not_finished;
+	}
+
 	/* Detect polling mode and jump to a handler */
 	if (!phba->sli4_hba.intr_enable) {
 		if (flag == MBX_POLL)
@@ -5338,17 +5346,6 @@
 				psli->sli_flag, flag);
 		goto out_not_finished;
 	}
-	rc = lpfc_mbox_dev_check(phba);
-	if (unlikely(rc)) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX | LOG_SLI,
-				"(%d):2544 Mailbox command x%x (x%x) "
-				"cannot issue Data: x%x x%x\n",
-				mboxq->vport ? mboxq->vport->vpi : 0,
-				mboxq->u.mb.mbxCommand,
-				lpfc_sli4_mbox_opcode_get(phba, mboxq),
-				psli->sli_flag, flag);
-		goto out_not_finished;
-	}
 
 	/* Put the mailbox command to the driver internal FIFO */
 	psli->slistat.mbox_busy++;
@@ -5817,19 +5814,21 @@
 /**
  * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
  * @phba: Pointer to HBA context object.
- * @piocb: Pointer to command iocb.
  *
  * This routine performs a round robin SCSI command to SLI4 FCP WQ index
- * distribution.
+ * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
+ * held.
  *
  * Return: index into SLI4 fast-path FCP queue index.
  **/
 static uint32_t
-lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba, struct lpfc_iocbq *piocb)
+lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
 {
-	static uint32_t fcp_qidx;
+	++phba->fcp_qidx;
+	if (phba->fcp_qidx >= phba->cfg_fcp_wq_count)
+		phba->fcp_qidx = 0;
 
-	return fcp_qidx++ % phba->cfg_fcp_wq_count;
+	return phba->fcp_qidx;
 }
 
 /**
@@ -6156,7 +6155,7 @@
 		return IOCB_ERROR;
 
 	if (piocb->iocb_flag &  LPFC_IO_FCP) {
-		fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba, piocb);
+		fcp_wqidx = lpfc_sli4_scmd_to_wqidx_distr(phba);
 		if (lpfc_sli4_wq_put(phba->sli4_hba.fcp_wq[fcp_wqidx], &wqe))
 			return IOCB_ERROR;
 	} else {
@@ -6327,7 +6326,7 @@
 			KERN_ERR,
 			LOG_SLI,
 			"0346 Ring %d handler: unexpected ASYNC_STATUS"
-			" evt_code 0x%x \n"
+			" evt_code 0x%x\n"
 			"W0  0x%08x W1  0x%08x W2  0x%08x W3  0x%08x\n"
 			"W4  0x%08x W5  0x%08x W6  0x%08x W7  0x%08x\n"
 			"W8  0x%08x W9  0x%08x W10 0x%08x W11 0x%08x\n"
@@ -6790,6 +6789,33 @@
 
 
 /**
+ * lpfc_sli_bemem_bcopy - SLI memory copy function
+ * @srcp: Source memory pointer.
+ * @destp: Destination memory pointer.
+ * @cnt: Number of words required to be copied.
+ *
+ * This function is used for copying data between a data structure
+ * with big endian representation to local endianness.
+ * This function can be called with or without lock.
+ **/
+void
+lpfc_sli_bemem_bcopy(void *srcp, void *destp, uint32_t cnt)
+{
+	uint32_t *src = srcp;
+	uint32_t *dest = destp;
+	uint32_t ldata;
+	int i;
+
+	for (i = 0; i < (int)cnt; i += sizeof(uint32_t)) {
+		ldata = *src;
+		ldata = be32_to_cpu(ldata);
+		*dest = ldata;
+		src++;
+		dest++;
+	}
+}
+
+/**
  * lpfc_sli_ringpostbuf_put - Function to add a buffer to postbufq
  * @phba: Pointer to HBA context object.
  * @pring: Pointer to driver SLI ring object.
@@ -7678,12 +7704,6 @@
 					"online0_reg=0x%x, online1_reg=0x%x\n",
 					uerr_sta_lo, uerr_sta_hi,
 					onlnreg0, onlnreg1);
-			/* TEMP: as the driver error recover logic is not
-			 * fully developed, we just log the error message
-			 * and the device error attention action is now
-			 * temporarily disabled.
-			 */
-			return 0;
 			phba->work_status[0] = uerr_sta_lo;
 			phba->work_status[1] = uerr_sta_hi;
 			/* Set the driver HA work bitmap */
@@ -9499,8 +9519,7 @@
 	eq->host_index = 0;
 	eq->hba_index = 0;
 
-	if (rc != MBX_TIMEOUT)
-		mempool_free(mbox, phba->mbox_mem_pool);
+	mempool_free(mbox, phba->mbox_mem_pool);
 	return status;
 }
 
@@ -9604,10 +9623,9 @@
 	cq->queue_id = bf_get(lpfc_mbx_cq_create_q_id, &cq_create->u.response);
 	cq->host_index = 0;
 	cq->hba_index = 0;
-out:
 
-	if (rc != MBX_TIMEOUT)
-		mempool_free(mbox, phba->mbox_mem_pool);
+out:
+	mempool_free(mbox, phba->mbox_mem_pool);
 	return status;
 }
 
@@ -9712,8 +9730,7 @@
 	/* link the mq onto the parent cq child list */
 	list_add_tail(&mq->list, &cq->child_list);
 out:
-	if (rc != MBX_TIMEOUT)
-		mempool_free(mbox, phba->mbox_mem_pool);
+	mempool_free(mbox, phba->mbox_mem_pool);
 	return status;
 }
 
@@ -9795,8 +9812,7 @@
 	/* link the wq onto the parent cq child list */
 	list_add_tail(&wq->list, &cq->child_list);
 out:
-	if (rc != MBX_TIMEOUT)
-		mempool_free(mbox, phba->mbox_mem_pool);
+	mempool_free(mbox, phba->mbox_mem_pool);
 	return status;
 }
 
@@ -9970,8 +9986,7 @@
 	list_add_tail(&drq->list, &cq->child_list);
 
 out:
-	if (rc != MBX_TIMEOUT)
-		mempool_free(mbox, phba->mbox_mem_pool);
+	mempool_free(mbox, phba->mbox_mem_pool);
 	return status;
 }
 
@@ -10026,8 +10041,7 @@
 
 	/* Remove eq from any list */
 	list_del_init(&eq->list);
-	if (rc != MBX_TIMEOUT)
-		mempool_free(mbox, eq->phba->mbox_mem_pool);
+	mempool_free(mbox, eq->phba->mbox_mem_pool);
 	return status;
 }
 
@@ -10080,8 +10094,7 @@
 	}
 	/* Remove cq from any list */
 	list_del_init(&cq->list);
-	if (rc != MBX_TIMEOUT)
-		mempool_free(mbox, cq->phba->mbox_mem_pool);
+	mempool_free(mbox, cq->phba->mbox_mem_pool);
 	return status;
 }
 
@@ -10134,8 +10147,7 @@
 	}
 	/* Remove mq from any list */
 	list_del_init(&mq->list);
-	if (rc != MBX_TIMEOUT)
-		mempool_free(mbox, mq->phba->mbox_mem_pool);
+	mempool_free(mbox, mq->phba->mbox_mem_pool);
 	return status;
 }
 
@@ -10187,8 +10199,7 @@
 	}
 	/* Remove wq from any list */
 	list_del_init(&wq->list);
-	if (rc != MBX_TIMEOUT)
-		mempool_free(mbox, wq->phba->mbox_mem_pool);
+	mempool_free(mbox, wq->phba->mbox_mem_pool);
 	return status;
 }
 
@@ -10258,8 +10269,7 @@
 	}
 	list_del_init(&hrq->list);
 	list_del_init(&drq->list);
-	if (rc != MBX_TIMEOUT)
-		mempool_free(mbox, hrq->phba->mbox_mem_pool);
+	mempool_free(mbox, hrq->phba->mbox_mem_pool);
 	return status;
 }
 
@@ -10933,6 +10943,7 @@
 	first_iocbq = lpfc_sli_get_iocbq(vport->phba);
 	if (first_iocbq) {
 		/* Initialize the first IOCB. */
+		first_iocbq->iocb.unsli3.rcvsli3.acc_len = 0;
 		first_iocbq->iocb.ulpStatus = IOSTAT_SUCCESS;
 		first_iocbq->iocb.ulpCommand = CMD_IOCB_RCV_SEQ64_CX;
 		first_iocbq->iocb.ulpContext = be16_to_cpu(fc_hdr->fh_ox_id);
@@ -10945,6 +10956,8 @@
 		first_iocbq->iocb.un.cont64[0].tus.f.bdeSize =
 							LPFC_DATA_BUF_SIZE;
 		first_iocbq->iocb.un.rcvels.remoteID = sid;
+		first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
+				bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
 	}
 	iocbq = first_iocbq;
 	/*
@@ -10961,6 +10974,8 @@
 			iocbq->iocb.ulpBdeCount++;
 			iocbq->iocb.unsli3.rcvsli3.bde2.tus.f.bdeSize =
 							LPFC_DATA_BUF_SIZE;
+			first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
+				bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
 		} else {
 			iocbq = lpfc_sli_get_iocbq(vport->phba);
 			if (!iocbq) {
@@ -10978,6 +10993,8 @@
 			iocbq->iocb.ulpBdeCount = 1;
 			iocbq->iocb.un.cont64[0].tus.f.bdeSize =
 							LPFC_DATA_BUF_SIZE;
+			first_iocbq->iocb.unsli3.rcvsli3.acc_len +=
+				bf_get(lpfc_rcqe_length, &seq_dmabuf->rcqe);
 			iocbq->iocb.un.rcvels.remoteID = sid;
 			list_add_tail(&iocbq->list, &first_iocbq->list);
 		}
@@ -11324,7 +11341,7 @@
 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!mboxq)
 		return -ENOMEM;
-	lpfc_init_vpi(mboxq, vpi);
+	lpfc_init_vpi(phba, mboxq, vpi);
 	mbox_tmo = lpfc_mbox_tmo_val(phba, MBX_INIT_VPI);
 	rc = lpfc_sli_issue_mbox_wait(phba, mboxq, mbox_tmo);
 	if (rc != MBX_TIMEOUT)
@@ -11519,6 +11536,7 @@
 	uint32_t alloc_len, req_len;
 	struct lpfc_mbx_read_fcf_tbl *read_fcf;
 
+	phba->fcoe_eventtag_at_fcf_scan = phba->fcoe_eventtag;
 	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!mboxq) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -11570,7 +11588,140 @@
 	if (rc == MBX_NOT_FINISHED) {
 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
 		error = -EIO;
-	} else
+	} else {
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag |= FCF_DISC_INPROGRESS;
+		spin_unlock_irq(&phba->hbalock);
 		error = 0;
+	}
 	return error;
 }
+
+/**
+ * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function read region 23 and parse TLV for port status to
+ * decide if the user disaled the port. If the TLV indicates the
+ * port is disabled, the hba_flag is set accordingly.
+ **/
+void
+lpfc_sli_read_link_ste(struct lpfc_hba *phba)
+{
+	LPFC_MBOXQ_t *pmb = NULL;
+	MAILBOX_t *mb;
+	uint8_t *rgn23_data = NULL;
+	uint32_t offset = 0, data_size, sub_tlv_len, tlv_offset;
+	int rc;
+
+	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2600 lpfc_sli_read_serdes_param failed to"
+			" allocate mailbox memory\n");
+		goto out;
+	}
+	mb = &pmb->u.mb;
+
+	/* Get adapter Region 23 data */
+	rgn23_data = kzalloc(DMP_RGN23_SIZE, GFP_KERNEL);
+	if (!rgn23_data)
+		goto out;
+
+	do {
+		lpfc_dump_mem(phba, pmb, offset, DMP_REGION_23);
+		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_POLL);
+
+		if (rc != MBX_SUCCESS) {
+			lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+				"2601 lpfc_sli_read_link_ste failed to"
+				" read config region 23 rc 0x%x Status 0x%x\n",
+				rc, mb->mbxStatus);
+			mb->un.varDmp.word_cnt = 0;
+		}
+		/*
+		 * dump mem may return a zero when finished or we got a
+		 * mailbox error, either way we are done.
+		 */
+		if (mb->un.varDmp.word_cnt == 0)
+			break;
+		if (mb->un.varDmp.word_cnt > DMP_RGN23_SIZE - offset)
+			mb->un.varDmp.word_cnt = DMP_RGN23_SIZE - offset;
+
+		lpfc_sli_pcimem_bcopy(((uint8_t *)mb) + DMP_RSP_OFFSET,
+			rgn23_data + offset,
+			mb->un.varDmp.word_cnt);
+		offset += mb->un.varDmp.word_cnt;
+	} while (mb->un.varDmp.word_cnt && offset < DMP_RGN23_SIZE);
+
+	data_size = offset;
+	offset = 0;
+
+	if (!data_size)
+		goto out;
+
+	/* Check the region signature first */
+	if (memcmp(&rgn23_data[offset], LPFC_REGION23_SIGNATURE, 4)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2619 Config region 23 has bad signature\n");
+			goto out;
+	}
+	offset += 4;
+
+	/* Check the data structure version */
+	if (rgn23_data[offset] != LPFC_REGION23_VERSION) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+			"2620 Config region 23 has bad version\n");
+		goto out;
+	}
+	offset += 4;
+
+	/* Parse TLV entries in the region */
+	while (offset < data_size) {
+		if (rgn23_data[offset] == LPFC_REGION23_LAST_REC)
+			break;
+		/*
+		 * If the TLV is not driver specific TLV or driver id is
+		 * not linux driver id, skip the record.
+		 */
+		if ((rgn23_data[offset] != DRIVER_SPECIFIC_TYPE) ||
+		    (rgn23_data[offset + 2] != LINUX_DRIVER_ID) ||
+		    (rgn23_data[offset + 3] != 0)) {
+			offset += rgn23_data[offset + 1] * 4 + 4;
+			continue;
+		}
+
+		/* Driver found a driver specific TLV in the config region */
+		sub_tlv_len = rgn23_data[offset + 1] * 4;
+		offset += 4;
+		tlv_offset = 0;
+
+		/*
+		 * Search for configured port state sub-TLV.
+		 */
+		while ((offset < data_size) &&
+			(tlv_offset < sub_tlv_len)) {
+			if (rgn23_data[offset] == LPFC_REGION23_LAST_REC) {
+				offset += 4;
+				tlv_offset += 4;
+				break;
+			}
+			if (rgn23_data[offset] != PORT_STE_TYPE) {
+				offset += rgn23_data[offset + 1] * 4 + 4;
+				tlv_offset += rgn23_data[offset + 1] * 4 + 4;
+				continue;
+			}
+
+			/* This HBA contains PORT_STE configured */
+			if (!rgn23_data[offset + 2])
+				phba->hba_flag |= LINK_DISABLED;
+
+			goto out;
+		}
+	}
+out:
+	if (pmb)
+		mempool_free(pmb, phba->mbox_mem_pool);
+	kfree(rgn23_data);
+	return;
+}
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 3b276b4..b5f4ba1 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -132,6 +132,7 @@
 
 struct lpfc_fcf {
 	uint8_t	 fabric_name[8];
+	uint8_t	 switch_name[8];
 	uint8_t  mac_addr[6];
 	uint16_t fcf_indx;
 	uint16_t fcfi;
@@ -150,6 +151,10 @@
 #define LPFC_REGION23_SIGNATURE "RG23"
 #define LPFC_REGION23_VERSION	1
 #define LPFC_REGION23_LAST_REC  0xff
+#define DRIVER_SPECIFIC_TYPE	0xA2
+#define LINUX_DRIVER_ID		0x20
+#define PORT_STE_TYPE		0x1
+
 struct lpfc_fip_param_hdr {
 	uint8_t type;
 #define FCOE_PARAM_TYPE		0xA0
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 41094e0..9ae20af 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.3"
+#define LPFC_DRIVER_VERSION "8.3.4"
 
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index e0b4992..606efa7 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -313,22 +313,6 @@
 		goto error_out;
 	}
 
-	/*
-	 * In SLI4, the vpi must be activated before it can be used
-	 * by the port.
-	 */
-	if (phba->sli_rev == LPFC_SLI_REV4) {
-		rc = lpfc_sli4_init_vpi(phba, vpi);
-		if (rc) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
-					"1838 Failed to INIT_VPI on vpi %d "
-					"status %d\n", vpi, rc);
-			rc = VPORT_NORESOURCES;
-			lpfc_free_vpi(phba, vpi);
-			goto error_out;
-		}
-	}
-
 	/* Assign an unused board number */
 	if ((instance = lpfc_get_instance()) < 0) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
@@ -367,12 +351,8 @@
 		goto error_out;
 	}
 
-	memcpy(vport->fc_portname.u.wwn, vport->fc_sparam.portName.u.wwn, 8);
-	memcpy(vport->fc_nodename.u.wwn, vport->fc_sparam.nodeName.u.wwn, 8);
-	if (fc_vport->node_name != 0)
-		u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
-	if (fc_vport->port_name != 0)
-		u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
+	u64_to_wwn(fc_vport->node_name, vport->fc_nodename.u.wwn);
+	u64_to_wwn(fc_vport->port_name, vport->fc_portname.u.wwn);
 
 	memcpy(&vport->fc_sparam.portName, vport->fc_portname.u.wwn, 8);
 	memcpy(&vport->fc_sparam.nodeName, vport->fc_nodename.u.wwn, 8);
@@ -404,7 +384,34 @@
 	*(struct lpfc_vport **)fc_vport->dd_data = vport;
 	vport->fc_vport = fc_vport;
 
+	/*
+	 * In SLI4, the vpi must be activated before it can be used
+	 * by the port.
+	 */
+	if ((phba->sli_rev == LPFC_SLI_REV4) &&
+		(pport->vfi_state & LPFC_VFI_REGISTERED)) {
+		rc = lpfc_sli4_init_vpi(phba, vpi);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_VPORT,
+					"1838 Failed to INIT_VPI on vpi %d "
+					"status %d\n", vpi, rc);
+			rc = VPORT_NORESOURCES;
+			lpfc_free_vpi(phba, vpi);
+			goto error_out;
+		}
+	} else if (phba->sli_rev == LPFC_SLI_REV4) {
+		/*
+		 * Driver cannot INIT_VPI now. Set the flags to
+		 * init_vpi when reg_vfi complete.
+		 */
+		vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+		lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
+		rc = VPORT_OK;
+		goto out;
+	}
+
 	if ((phba->link_state < LPFC_LINK_UP) ||
+	    (pport->port_state < LPFC_FABRIC_CFG_LINK) ||
 	    (phba->fc_topology == TOPOLOGY_LOOP)) {
 		lpfc_vport_set_state(vport, FC_VPORT_LINKDOWN);
 		rc = VPORT_OK;
@@ -661,7 +668,7 @@
 				lpfc_printf_log(vport->phba, KERN_WARNING,
 						LOG_VPORT,
 						"1829 CT command failed to "
-						"delete objects on fabric. \n");
+						"delete objects on fabric\n");
 		}
 		/* First look for the Fabric ndlp */
 		ndlp = lpfc_findnode_did(vport, Fabric_DID);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 35a1386..d95d2f2 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -94,7 +94,7 @@
 	int rc;
 
 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
-	if (ioc->ioc_reset_in_progress)
+	if (ioc->shost_recovery)
 		goto rearm_timer;
 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 
@@ -687,6 +687,14 @@
 	ioc->mask_interrupts = 0;
 }
 
+union reply_descriptor {
+	u64 word;
+	struct {
+		u32 low;
+		u32 high;
+	} u;
+};
+
 /**
  * _base_interrupt - MPT adapter (IOC) specific interrupt handler.
  * @irq: irq number (not used)
@@ -698,47 +706,38 @@
 static irqreturn_t
 _base_interrupt(int irq, void *bus_id)
 {
-	union reply_descriptor {
-		u64 word;
-		struct {
-			u32 low;
-			u32 high;
-		} u;
-	};
 	union reply_descriptor rd;
-	u32 post_index, post_index_next, completed_cmds;
+	u32 completed_cmds;
 	u8 request_desript_type;
 	u16 smid;
 	u8 cb_idx;
 	u32 reply;
 	u8 VF_ID;
-	int i;
 	struct MPT2SAS_ADAPTER *ioc = bus_id;
+	Mpi2ReplyDescriptorsUnion_t *rpf;
 
 	if (ioc->mask_interrupts)
 		return IRQ_NONE;
 
-	post_index = ioc->reply_post_host_index;
-	request_desript_type = ioc->reply_post_free[post_index].
-	    Default.ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+	rpf = &ioc->reply_post_free[ioc->reply_post_host_index];
+	request_desript_type = rpf->Default.ReplyFlags
+	     & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
 	if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
 		return IRQ_NONE;
 
 	completed_cmds = 0;
 	do {
-		rd.word = ioc->reply_post_free[post_index].Words;
+		rd.word = rpf->Words;
 		if (rd.u.low == UINT_MAX || rd.u.high == UINT_MAX)
 			goto out;
 		reply = 0;
 		cb_idx = 0xFF;
-		smid = le16_to_cpu(ioc->reply_post_free[post_index].
-		    Default.DescriptorTypeDependent1);
-		VF_ID = ioc->reply_post_free[post_index].
-		    Default.VF_ID;
+		smid = le16_to_cpu(rpf->Default.DescriptorTypeDependent1);
+		VF_ID = rpf->Default.VF_ID;
 		if (request_desript_type ==
 		    MPI2_RPY_DESCRIPT_FLAGS_ADDRESS_REPLY) {
-			reply = le32_to_cpu(ioc->reply_post_free[post_index].
-			    AddressReply.ReplyFrameAddress);
+			reply = le32_to_cpu
+				(rpf->AddressReply.ReplyFrameAddress);
 		} else if (request_desript_type ==
 		    MPI2_RPY_DESCRIPT_FLAGS_TARGET_COMMAND_BUFFER)
 			goto next;
@@ -765,21 +764,27 @@
 			    0 : ioc->reply_free_host_index + 1;
 			ioc->reply_free[ioc->reply_free_host_index] =
 			    cpu_to_le32(reply);
+			wmb();
 			writel(ioc->reply_free_host_index,
 			    &ioc->chip->ReplyFreeHostIndex);
-			wmb();
 		}
 
  next:
-		post_index_next = (post_index == (ioc->reply_post_queue_depth -
-		    1)) ? 0 : post_index + 1;
+
+		rpf->Words = ULLONG_MAX;
+		ioc->reply_post_host_index = (ioc->reply_post_host_index ==
+		    (ioc->reply_post_queue_depth - 1)) ? 0 :
+		    ioc->reply_post_host_index + 1;
 		request_desript_type =
-		    ioc->reply_post_free[post_index_next].Default.ReplyFlags
-		    & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
+		    ioc->reply_post_free[ioc->reply_post_host_index].Default.
+		    ReplyFlags & MPI2_RPY_DESCRIPT_FLAGS_TYPE_MASK;
 		completed_cmds++;
 		if (request_desript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
 			goto out;
-		post_index = post_index_next;
+		if (!ioc->reply_post_host_index)
+			rpf = ioc->reply_post_free;
+		else
+			rpf++;
 	} while (1);
 
  out:
@@ -787,19 +792,8 @@
 	if (!completed_cmds)
 		return IRQ_NONE;
 
-	/* reply post descriptor handling */
-	post_index_next = ioc->reply_post_host_index;
-	for (i = 0 ; i < completed_cmds; i++) {
-		post_index = post_index_next;
-		/* poison the reply post descriptor */
-		ioc->reply_post_free[post_index_next].Words = ULLONG_MAX;
-		post_index_next = (post_index ==
-		    (ioc->reply_post_queue_depth - 1))
-		    ? 0 : post_index + 1;
-	}
-	ioc->reply_post_host_index = post_index_next;
-	writel(post_index_next, &ioc->chip->ReplyPostHostIndex);
 	wmb();
+	writel(ioc->reply_post_host_index, &ioc->chip->ReplyPostHostIndex);
 	return IRQ_HANDLED;
 }
 
@@ -1542,6 +1536,8 @@
 	   (ioc->bios_pg3.BiosVersion & 0x0000FF00) >> 8,
 	    ioc->bios_pg3.BiosVersion & 0x000000FF);
 
+	_base_display_dell_branding(ioc);
+
 	printk(MPT2SAS_INFO_FMT "Protocol=(", ioc->name);
 
 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
@@ -1554,8 +1550,6 @@
 		i++;
 	}
 
-	_base_display_dell_branding(ioc);
-
 	i = 0;
 	printk("), ");
 	printk("Capabilities=(");
@@ -1627,6 +1621,9 @@
 	u32 iounit_pg1_flags;
 
 	mpt2sas_config_get_manufacturing_pg0(ioc, &mpi_reply, &ioc->manu_pg0);
+	if (ioc->ir_firmware)
+		mpt2sas_config_get_manufacturing_pg10(ioc, &mpi_reply,
+		    &ioc->manu_pg10);
 	mpt2sas_config_get_bios_pg2(ioc, &mpi_reply, &ioc->bios_pg2);
 	mpt2sas_config_get_bios_pg3(ioc, &mpi_reply, &ioc->bios_pg3);
 	mpt2sas_config_get_ioc_pg8(ioc, &mpi_reply, &ioc->ioc_pg8);
@@ -1647,7 +1644,7 @@
 		iounit_pg1_flags |=
 		    MPI2_IOUNITPAGE1_DISABLE_TASK_SET_FULL_HANDLING;
 	ioc->iounit_pg1.Flags = cpu_to_le32(iounit_pg1_flags);
-	mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, ioc->iounit_pg1);
+	mpt2sas_config_set_iounit_pg1(ioc, &mpi_reply, &ioc->iounit_pg1);
 }
 
 /**
@@ -3303,13 +3300,11 @@
 	ioc->tm_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
 	ioc->tm_cmds.status = MPT2_CMD_NOT_USED;
 	mutex_init(&ioc->tm_cmds.mutex);
-	init_completion(&ioc->tm_cmds.done);
 
 	/* config page internal command bits */
 	ioc->config_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
 	ioc->config_cmds.status = MPT2_CMD_NOT_USED;
 	mutex_init(&ioc->config_cmds.mutex);
-	init_completion(&ioc->config_cmds.done);
 
 	/* ctl module internal command bits */
 	ioc->ctl_cmds.reply = kzalloc(ioc->reply_sz, GFP_KERNEL);
@@ -3433,6 +3428,7 @@
 		if (ioc->config_cmds.status & MPT2_CMD_PENDING) {
 			ioc->config_cmds.status |= MPT2_CMD_RESET;
 			mpt2sas_base_free_smid(ioc, ioc->config_cmds.smid);
+			ioc->config_cmds.smid = USHORT_MAX;
 			complete(&ioc->config_cmds.done);
 		}
 		break;
@@ -3501,20 +3497,13 @@
 	    __func__));
 
 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
-	if (ioc->ioc_reset_in_progress) {
+	if (ioc->shost_recovery) {
 		spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 		printk(MPT2SAS_ERR_FMT "%s: busy\n",
 		    ioc->name, __func__);
 		return -EBUSY;
 	}
-	ioc->ioc_reset_in_progress = 1;
 	ioc->shost_recovery = 1;
-	if (ioc->shost->shost_state == SHOST_RUNNING) {
-		/* set back to SHOST_RUNNING in mpt2sas_scsih.c */
-		scsi_host_set_state(ioc->shost, SHOST_RECOVERY);
-		printk(MPT2SAS_INFO_FMT "putting controller into "
-		    "SHOST_RECOVERY\n", ioc->name);
-	}
 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 
 	_base_reset_handler(ioc, MPT2_IOC_PRE_RESET);
@@ -3534,7 +3523,10 @@
 	    ioc->name, __func__, ((r == 0) ? "SUCCESS" : "FAILED")));
 
 	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
-	ioc->ioc_reset_in_progress = 0;
+	ioc->shost_recovery = 0;
 	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+
+	if (!r)
+		_base_reset_handler(ioc, MPT2_IOC_RUNNING);
 	return r;
 }
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index acdcff1..2faab1e 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -69,10 +69,10 @@
 #define MPT2SAS_DRIVER_NAME		"mpt2sas"
 #define MPT2SAS_AUTHOR	"LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION	"LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION		"01.100.04.00"
+#define MPT2SAS_DRIVER_VERSION		"01.100.06.00"
 #define MPT2SAS_MAJOR_VERSION		01
 #define MPT2SAS_MINOR_VERSION		100
-#define MPT2SAS_BUILD_VERSION		04
+#define MPT2SAS_BUILD_VERSION		06
 #define MPT2SAS_RELEASE_VERSION		00
 
 /*
@@ -119,6 +119,7 @@
 #define MPT2_IOC_PRE_RESET		1 /* prior to host reset */
 #define MPT2_IOC_AFTER_RESET		2 /* just after host reset */
 #define MPT2_IOC_DONE_RESET		3 /* links re-initialized */
+#define MPT2_IOC_RUNNING		4 /* shost running */
 
 /*
  * logging format
@@ -196,6 +197,38 @@
  * @block: device is in SDEV_BLOCK state
  * @tlr_snoop_check: flag used in determining whether to disable TLR
  */
+
+/* OEM Identifiers */
+#define MFG10_OEM_ID_INVALID                   (0x00000000)
+#define MFG10_OEM_ID_DELL                      (0x00000001)
+#define MFG10_OEM_ID_FSC                       (0x00000002)
+#define MFG10_OEM_ID_SUN                       (0x00000003)
+#define MFG10_OEM_ID_IBM                       (0x00000004)
+
+/* GENERIC Flags 0*/
+#define MFG10_GF0_OCE_DISABLED                 (0x00000001)
+#define MFG10_GF0_R1E_DRIVE_COUNT              (0x00000002)
+#define MFG10_GF0_R10_DISPLAY                  (0x00000004)
+#define MFG10_GF0_SSD_DATA_SCRUB_DISABLE       (0x00000008)
+#define MFG10_GF0_SINGLE_DRIVE_R0              (0x00000010)
+
+/* OEM Specific Flags will come from OEM specific header files */
+typedef struct _MPI2_CONFIG_PAGE_MAN_10 {
+    MPI2_CONFIG_PAGE_HEADER Header;                                 /* 00h */
+    U8                      OEMIdentifier;                          /* 04h */
+    U8                      Reserved1;                              /* 05h */
+    U16                     Reserved2;                              /* 08h */
+    U32                     Reserved3;                              /* 0Ch */
+    U32                     GenericFlags0;                          /* 10h */
+    U32                     GenericFlags1;                          /* 14h */
+    U32                     Reserved4;                              /* 18h */
+    U32                     OEMSpecificFlags0;                      /* 1Ch */
+    U32                     OEMSpecificFlags1;                      /* 20h */
+    U32                     Reserved5[18];                          /* 24h-60h*/
+} MPI2_CONFIG_PAGE_MAN_10,
+  MPI2_POINTER PTR_MPI2_CONFIG_PAGE_MAN_10,
+  Mpi2ManufacturingPage10_t, MPI2_POINTER pMpi2ManufacturingPage10_t;
+
 struct MPT2SAS_DEVICE {
 	struct MPT2SAS_TARGET *sas_target;
 	unsigned int	lun;
@@ -431,7 +464,7 @@
  * @fw_event_list: list of fw events
  * @aen_event_read_flag: event log was read
  * @broadcast_aen_busy: broadcast aen waiting to be serviced
- * @ioc_reset_in_progress: host reset in progress
+ * @shost_recovery: host reset in progress
  * @ioc_reset_in_progress_lock:
  * @ioc_link_reset_in_progress: phy/hard reset in progress
  * @ignore_loginfos: ignore loginfos during task managment
@@ -460,6 +493,7 @@
  * @facts: static facts data
  * @pfacts: static port facts data
  * @manu_pg0: static manufacturing page 0
+ * @manu_pg10: static manufacturing page 10
  * @bios_pg2: static bios page 2
  * @bios_pg3: static bios page 3
  * @ioc_pg8: static ioc page 8
@@ -544,7 +578,6 @@
 	 /* misc flags */
 	int		aen_event_read_flag;
 	u8		broadcast_aen_busy;
-	u8		ioc_reset_in_progress;
 	u8		shost_recovery;
 	spinlock_t 	ioc_reset_in_progress_lock;
 	u8		ioc_link_reset_in_progress;
@@ -663,6 +696,7 @@
 	dma_addr_t	diag_buffer_dma[MPI2_DIAG_BUF_TYPE_COUNT];
 	u8		diag_buffer_status[MPI2_DIAG_BUF_TYPE_COUNT];
 	u32		unique_id[MPI2_DIAG_BUF_TYPE_COUNT];
+	Mpi2ManufacturingPage10_t manu_pg10;
 	u32		product_specific[MPI2_DIAG_BUF_TYPE_COUNT][23];
 	u32		diagnostic_flags[MPI2_DIAG_BUF_TYPE_COUNT];
 };
@@ -734,6 +768,8 @@
 int mpt2sas_config_get_number_hba_phys(struct MPT2SAS_ADAPTER *ioc, u8 *num_phys);
 int mpt2sas_config_get_manufacturing_pg0(struct MPT2SAS_ADAPTER *ioc,
     Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage0_t *config_page);
+int mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc,
+    Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage10_t *config_page);
 int mpt2sas_config_get_bios_pg2(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
     *mpi_reply, Mpi2BiosPage2_t *config_page);
 int mpt2sas_config_get_bios_pg3(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -749,7 +785,7 @@
 int mpt2sas_config_get_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
     *mpi_reply, Mpi2IOUnitPage1_t *config_page);
 int mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
-    *mpi_reply, Mpi2IOUnitPage1_t config_page);
+    *mpi_reply, Mpi2IOUnitPage1_t *config_page);
 int mpt2sas_config_get_sas_iounit_pg1(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
     *mpi_reply, Mpi2SasIOUnitPage1_t *config_page, u16 sz);
 int mpt2sas_config_get_ioc_pg8(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigReply_t
@@ -776,7 +812,6 @@
     u16 *volume_handle);
 int mpt2sas_config_get_volume_wwid(struct MPT2SAS_ADAPTER *ioc, u16 volume_handle,
     u64 *wwid);
-
 /* ctl shared API */
 extern struct device_attribute *mpt2sas_host_attrs[];
 extern struct device_attribute *mpt2sas_dev_attrs[];
@@ -798,9 +833,11 @@
     *mpt2sas_phy, Mpi2SasPhyPage0_t phy_pg0, struct device *parent_dev);
 int mpt2sas_transport_add_expander_phy(struct MPT2SAS_ADAPTER *ioc, struct _sas_phy
     *mpt2sas_phy, Mpi2ExpanderPage1_t expander_pg1, struct device *parent_dev);
-void mpt2sas_transport_update_phy_link_change(struct MPT2SAS_ADAPTER *ioc, u16 handle,
+void mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc, u16 handle,
    u16 attached_handle, u8 phy_number, u8 link_rate);
 extern struct sas_function_template mpt2sas_transport_functions;
 extern struct scsi_transport_template *mpt2sas_transport_template;
+extern int scsi_internal_device_block(struct scsi_device *sdev);
+extern int scsi_internal_device_unblock(struct scsi_device *sdev);
 
 #endif /* MPT2SAS_BASE_H_INCLUDED */
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 6ddee16..ab8c560 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -72,15 +72,15 @@
 
 /**
  * struct config_request - obtain dma memory via routine
- * @config_page_sz: size
- * @config_page: virt pointer
- * @config_page_dma: phys pointer
+ * @sz: size
+ * @page: virt pointer
+ * @page_dma: phys pointer
  *
  */
 struct config_request{
-	u16			config_page_sz;
-	void			*config_page;
-	dma_addr_t		config_page_dma;
+	u16			sz;
+	void			*page;
+	dma_addr_t		page_dma;
 };
 
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
@@ -175,6 +175,55 @@
 #endif
 
 /**
+ * _config_alloc_config_dma_memory - obtain physical memory
+ * @ioc: per adapter object
+ * @mem: struct config_request
+ *
+ * A wrapper for obtaining dma-able memory for config page request.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_config_alloc_config_dma_memory(struct MPT2SAS_ADAPTER *ioc,
+    struct config_request *mem)
+{
+	int r = 0;
+
+	if (mem->sz > ioc->config_page_sz) {
+		mem->page = dma_alloc_coherent(&ioc->pdev->dev, mem->sz,
+		    &mem->page_dma, GFP_KERNEL);
+		if (!mem->page) {
+			printk(MPT2SAS_ERR_FMT "%s: dma_alloc_coherent"
+			    " failed asking for (%d) bytes!!\n",
+			    ioc->name, __func__, mem->sz);
+			r = -ENOMEM;
+		}
+	} else { /* use tmp buffer if less than 512 bytes */
+		mem->page = ioc->config_page;
+		mem->page_dma = ioc->config_page_dma;
+	}
+	return r;
+}
+
+/**
+ * _config_free_config_dma_memory - wrapper to free the memory
+ * @ioc: per adapter object
+ * @mem: struct config_request
+ *
+ * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static void
+_config_free_config_dma_memory(struct MPT2SAS_ADAPTER *ioc,
+    struct config_request *mem)
+{
+	if (mem->sz > ioc->config_page_sz)
+		dma_free_coherent(&ioc->pdev->dev, mem->sz, mem->page,
+		    mem->page_dma);
+}
+
+/**
  * mpt2sas_config_done - config page completion routine
  * @ioc: per adapter object
  * @smid: system request message index
@@ -206,6 +255,7 @@
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 	_config_display_some_debug(ioc, smid, "config_done", mpi_reply);
 #endif
+	ioc->config_cmds.smid = USHORT_MAX;
 	complete(&ioc->config_cmds.done);
 }
 
@@ -215,7 +265,9 @@
  * @mpi_request: request message frame
  * @mpi_reply: reply mf payload returned from firmware
  * @timeout: timeout in seconds
- * Context: sleep, the calling function needs to acquire the config_cmds.mutex
+ * @config_page: contents of the config page
+ * @config_page_sz: size of config page
+ * Context: sleep
  *
  * A generic API for config page requests to firmware.
  *
@@ -228,16 +280,17 @@
  */
 static int
 _config_request(struct MPT2SAS_ADAPTER *ioc, Mpi2ConfigRequest_t
-    *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout)
+    *mpi_request, Mpi2ConfigReply_t *mpi_reply, int timeout,
+    void *config_page, u16 config_page_sz)
 {
 	u16 smid;
 	u32 ioc_state;
 	unsigned long timeleft;
 	Mpi2ConfigRequest_t *config_request;
 	int r;
-	u8 retry_count;
-	u8 issue_host_reset = 0;
+	u8 retry_count, issue_host_reset = 0;
 	u16 wait_state_count;
+	struct config_request mem;
 
 	mutex_lock(&ioc->config_cmds.mutex);
 	if (ioc->config_cmds.status != MPT2_CMD_NOT_USED) {
@@ -246,12 +299,44 @@
 		mutex_unlock(&ioc->config_cmds.mutex);
 		return -EAGAIN;
 	}
+
 	retry_count = 0;
+	memset(&mem, 0, sizeof(struct config_request));
+
+	if (config_page) {
+		mpi_request->Header.PageVersion = mpi_reply->Header.PageVersion;
+		mpi_request->Header.PageNumber = mpi_reply->Header.PageNumber;
+		mpi_request->Header.PageType = mpi_reply->Header.PageType;
+		mpi_request->Header.PageLength = mpi_reply->Header.PageLength;
+		mpi_request->ExtPageLength = mpi_reply->ExtPageLength;
+		mpi_request->ExtPageType = mpi_reply->ExtPageType;
+		if (mpi_request->Header.PageLength)
+			mem.sz = mpi_request->Header.PageLength * 4;
+		else
+			mem.sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
+		r = _config_alloc_config_dma_memory(ioc, &mem);
+		if (r != 0)
+			goto out;
+		if (mpi_request->Action ==
+		    MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT) {
+			ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
+			    MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.sz,
+			    mem.page_dma);
+			memcpy(mem.page, config_page, min_t(u16, mem.sz,
+			    config_page_sz));
+		} else {
+			memset(config_page, 0, config_page_sz);
+			ioc->base_add_sg_single(&mpi_request->PageBufferSGE,
+			    MPT2_CONFIG_COMMON_SGLFLAGS | mem.sz, mem.page_dma);
+		}
+	}
 
  retry_config:
 	if (retry_count) {
-		if (retry_count > 2) /* attempt only 2 retries */
-			return -EFAULT;
+		if (retry_count > 2) { /* attempt only 2 retries */
+			r = -EFAULT;
+			goto free_mem;
+		}
 		printk(MPT2SAS_INFO_FMT "%s: attempting retry (%d)\n",
 		    ioc->name, __func__, retry_count);
 	}
@@ -262,8 +347,9 @@
 			printk(MPT2SAS_ERR_FMT
 			    "%s: failed due to ioc not operational\n",
 			    ioc->name, __func__);
+			ioc->config_cmds.status = MPT2_CMD_NOT_USED;
 			r = -EFAULT;
-			goto out;
+			goto free_mem;
 		}
 		ssleep(1);
 		ioc_state = mpt2sas_base_get_iocstate(ioc, 1);
@@ -279,8 +365,9 @@
 	if (!smid) {
 		printk(MPT2SAS_ERR_FMT "%s: failed obtaining a smid\n",
 		    ioc->name, __func__);
+		ioc->config_cmds.status = MPT2_CMD_NOT_USED;
 		r = -EAGAIN;
-		goto out;
+		goto free_mem;
 	}
 
 	r = 0;
@@ -292,6 +379,7 @@
 #ifdef CONFIG_SCSI_MPT2SAS_LOGGING
 	_config_display_some_debug(ioc, smid, "config_request", NULL);
 #endif
+	init_completion(&ioc->config_cmds.done);
 	mpt2sas_base_put_smid_default(ioc, smid, config_request->VF_ID);
 	timeleft = wait_for_completion_timeout(&ioc->config_cmds.done,
 	    timeout*HZ);
@@ -303,22 +391,31 @@
 		retry_count++;
 		if (ioc->config_cmds.smid == smid)
 			mpt2sas_base_free_smid(ioc, smid);
-		if ((ioc->shost_recovery) ||
-		    (ioc->config_cmds.status & MPT2_CMD_RESET))
+		if ((ioc->shost_recovery) || (ioc->config_cmds.status &
+		    MPT2_CMD_RESET))
 			goto retry_config;
 		issue_host_reset = 1;
 		r = -EFAULT;
-		goto out;
+		goto free_mem;
 	}
+
 	if (ioc->config_cmds.status & MPT2_CMD_REPLY_VALID)
 		memcpy(mpi_reply, ioc->config_cmds.reply,
 		    sizeof(Mpi2ConfigReply_t));
 	if (retry_count)
-		printk(MPT2SAS_INFO_FMT "%s: retry completed!!\n",
-		    ioc->name, __func__);
-out:
+		printk(MPT2SAS_INFO_FMT "%s: retry (%d) completed!!\n",
+		    ioc->name, __func__, retry_count);
+	if (config_page && mpi_request->Action ==
+	    MPI2_CONFIG_ACTION_PAGE_READ_CURRENT)
+		memcpy(config_page, mem.page, min_t(u16, mem.sz,
+		    config_page_sz));
+ free_mem:
+	if (config_page)
+		_config_free_config_dma_memory(ioc, &mem);
+ out:
 	ioc->config_cmds.status = MPT2_CMD_NOT_USED;
 	mutex_unlock(&ioc->config_cmds.mutex);
+
 	if (issue_host_reset)
 		mpt2sas_base_hard_reset_handler(ioc, CAN_SLEEP,
 		    FORCE_BIG_HAMMER);
@@ -326,45 +423,6 @@
 }
 
 /**
- * _config_alloc_config_dma_memory - obtain physical memory
- * @ioc: per adapter object
- * @mem: struct config_request
- *
- * A wrapper for obtaining dma-able memory for config page request.
- *
- * Returns 0 for success, non-zero for failure.
- */
-static int
-_config_alloc_config_dma_memory(struct MPT2SAS_ADAPTER *ioc,
-    struct config_request *mem)
-{
-	int r = 0;
-
-	mem->config_page = pci_alloc_consistent(ioc->pdev, mem->config_page_sz,
-	    &mem->config_page_dma);
-	if (!mem->config_page)
-		r = -ENOMEM;
-	return r;
-}
-
-/**
- * _config_free_config_dma_memory - wrapper to free the memory
- * @ioc: per adapter object
- * @mem: struct config_request
- *
- * A wrapper to free dma-able memory when using _config_alloc_config_dma_memory.
- *
- * Returns 0 for success, non-zero for failure.
- */
-static void
-_config_free_config_dma_memory(struct MPT2SAS_ADAPTER *ioc,
-    struct config_request *mem)
-{
-	pci_free_consistent(ioc->pdev, mem->config_page_sz, mem->config_page,
-	    mem->config_page_dma);
-}
-
-/**
  * mpt2sas_config_get_manufacturing_pg0 - obtain manufacturing page 0
  * @ioc: per adapter object
  * @mpi_reply: reply mf payload returned from firmware
@@ -379,9 +437,7 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
-	memset(config_page, 0, sizeof(Mpi2ManufacturingPage0_t));
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -390,37 +446,50 @@
 	mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, mem.config_page_sz,
-		    sizeof(Mpi2ManufacturingPage0_t)));
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
+ out:
+	return r;
+}
 
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
+/**
+ * mpt2sas_config_get_manufacturing_pg10 - obtain manufacturing page 10
+ * @ioc: per adapter object
+ * @mpi_reply: reply mf payload returned from firmware
+ * @config_page: contents of the config page
+ * Context: sleep.
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+int
+mpt2sas_config_get_manufacturing_pg10(struct MPT2SAS_ADAPTER *ioc,
+    Mpi2ConfigReply_t *mpi_reply, Mpi2ManufacturingPage10_t *config_page)
+{
+	Mpi2ConfigRequest_t mpi_request;
+	int r;
 
+	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
+	mpi_request.Function = MPI2_FUNCTION_CONFIG;
+	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
+	mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_MANUFACTURING;
+	mpi_request.Header.PageNumber = 10;
+	mpi_request.Header.PageVersion = MPI2_MANUFACTURING0_PAGEVERSION;
+	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
+	r = _config_request(ioc, &mpi_request, mpi_reply,
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
+	if (r)
+		goto out;
+
+	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
+	r = _config_request(ioc, &mpi_request, mpi_reply,
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -440,9 +509,7 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
-	memset(config_page, 0, sizeof(Mpi2BiosPage2_t));
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -451,37 +518,14 @@
 	mpi_request.Header.PageVersion = MPI2_BIOSPAGE2_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, mem.config_page_sz,
-		    sizeof(Mpi2BiosPage2_t)));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -501,9 +545,7 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
-	memset(config_page, 0, sizeof(Mpi2BiosPage3_t));
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -512,37 +554,14 @@
 	mpi_request.Header.PageVersion = MPI2_BIOSPAGE3_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, mem.config_page_sz,
-		    sizeof(Mpi2BiosPage3_t)));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -562,9 +581,7 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
-	memset(config_page, 0, sizeof(Mpi2IOUnitPage0_t));
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -573,37 +590,14 @@
 	mpi_request.Header.PageVersion = MPI2_IOUNITPAGE0_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, mem.config_page_sz,
-		    sizeof(Mpi2IOUnitPage0_t)));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -623,9 +617,7 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
-	memset(config_page, 0, sizeof(Mpi2IOUnitPage1_t));
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -634,37 +626,14 @@
 	mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, mem.config_page_sz,
-		    sizeof(Mpi2IOUnitPage1_t)));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -680,11 +649,10 @@
  */
 int
 mpt2sas_config_set_iounit_pg1(struct MPT2SAS_ADAPTER *ioc,
-    Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t config_page)
+    Mpi2ConfigReply_t *mpi_reply, Mpi2IOUnitPage1_t *config_page)
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
@@ -694,38 +662,14 @@
 	mpi_request.Header.PageVersion = MPI2_IOUNITPAGE1_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_WRITE_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-
-	memset(mem.config_page, 0, mem.config_page_sz);
-	memcpy(mem.config_page, &config_page,
-	    sizeof(Mpi2IOUnitPage1_t));
-
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_WRITE_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -745,9 +689,7 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
-	memset(config_page, 0, sizeof(Mpi2IOCPage8_t));
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -756,37 +698,14 @@
 	mpi_request.Header.PageVersion = MPI2_IOCPAGE8_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, mem.config_page_sz,
-		    sizeof(Mpi2IOCPage8_t)));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -808,9 +727,7 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
-	memset(config_page, 0, sizeof(Mpi2SasDevicePage0_t));
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -820,39 +737,15 @@
 	mpi_request.Header.PageNumber = 0;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.PageAddress = cpu_to_le32(form | handle);
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
-	mpi_request.ExtPageType = mpi_reply->ExtPageType;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, mem.config_page_sz,
-		    sizeof(Mpi2SasDevicePage0_t)));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -874,9 +767,7 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
-	memset(config_page, 0, sizeof(Mpi2SasDevicePage1_t));
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -886,39 +777,15 @@
 	mpi_request.Header.PageNumber = 1;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.PageAddress = cpu_to_le32(form | handle);
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
-	mpi_request.ExtPageType = mpi_reply->ExtPageType;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, mem.config_page_sz,
-		    sizeof(Mpi2SasDevicePage1_t)));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -936,11 +803,11 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 	u16 ioc_status;
 	Mpi2ConfigReply_t mpi_reply;
 	Mpi2SasIOUnitPage0_t config_page;
 
+	*num_phys = 0;
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -950,44 +817,20 @@
 	mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, &mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply.Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply.Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply.Header.PageType;
-	mpi_request.ExtPageLength = mpi_reply.ExtPageLength;
-	mpi_request.ExtPageType = mpi_reply.ExtPageType;
-	mem.config_page_sz = le16_to_cpu(mpi_reply.ExtPageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, &mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page,
+	    sizeof(Mpi2SasIOUnitPage0_t));
 	if (!r) {
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
-			memcpy(&config_page, mem.config_page,
-			    min_t(u16, mem.config_page_sz,
-			    sizeof(Mpi2SasIOUnitPage0_t)));
+		if (ioc_status == MPI2_IOCSTATUS_SUCCESS)
 			*num_phys = config_page.NumPhys;
-		}
 	}
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
  out:
 	return r;
 }
@@ -1011,8 +854,7 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
-	memset(config_page, 0, sz);
+
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1022,37 +864,13 @@
 	mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
-	mpi_request.ExtPageType = mpi_reply->ExtPageType;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, sz, mem.config_page_sz));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
  out:
 	return r;
 }
@@ -1076,9 +894,7 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
-	memset(config_page, 0, sz);
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1088,37 +904,13 @@
 	mpi_request.Header.PageVersion = MPI2_SASIOUNITPAGE0_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
-	mpi_request.ExtPageType = mpi_reply->ExtPageType;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, sz, mem.config_page_sz));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
  out:
 	return r;
 }
@@ -1140,9 +932,7 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
-	memset(config_page, 0, sizeof(Mpi2ExpanderPage0_t));
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1152,39 +942,15 @@
 	mpi_request.Header.PageVersion = MPI2_SASEXPANDER0_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.PageAddress = cpu_to_le32(form | handle);
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
-	mpi_request.ExtPageType = mpi_reply->ExtPageType;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, mem.config_page_sz,
-		    sizeof(Mpi2ExpanderPage0_t)));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -1207,9 +973,7 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
-	memset(config_page, 0, sizeof(Mpi2ExpanderPage1_t));
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1219,7 +983,7 @@
 	mpi_request.Header.PageVersion = MPI2_SASEXPANDER1_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
@@ -1227,33 +991,9 @@
 	    cpu_to_le32(MPI2_SAS_EXPAND_PGAD_FORM_HNDL_PHY_NUM |
 	    (phy_number << MPI2_SAS_EXPAND_PGAD_PHYNUM_SHIFT) | handle);
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
-	mpi_request.ExtPageType = mpi_reply->ExtPageType;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, mem.config_page_sz,
-		    sizeof(Mpi2ExpanderPage1_t)));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -1275,9 +1015,7 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
-	memset(config_page, 0, sizeof(Mpi2SasEnclosurePage0_t));
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1287,39 +1025,15 @@
 	mpi_request.Header.PageVersion = MPI2_SASENCLOSURE0_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.PageAddress = cpu_to_le32(form | handle);
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
-	mpi_request.ExtPageType = mpi_reply->ExtPageType;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, mem.config_page_sz,
-		    sizeof(Mpi2SasEnclosurePage0_t)));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -1340,9 +1054,7 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
-	memset(config_page, 0, sizeof(Mpi2SasPhyPage0_t));
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1352,40 +1064,16 @@
 	mpi_request.Header.PageVersion = MPI2_SASPHY0_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.PageAddress =
 	    cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
-	mpi_request.ExtPageType = mpi_reply->ExtPageType;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, mem.config_page_sz,
-		    sizeof(Mpi2SasPhyPage0_t)));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -1406,9 +1094,7 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
-	memset(config_page, 0, sizeof(Mpi2SasPhyPage1_t));
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1418,40 +1104,16 @@
 	mpi_request.Header.PageVersion = MPI2_SASPHY1_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.PageAddress =
 	    cpu_to_le32(MPI2_SAS_PHY_PGAD_FORM_PHY_NUMBER | phy_number);
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.ExtPageLength = mpi_reply->ExtPageLength;
-	mpi_request.ExtPageType = mpi_reply->ExtPageType;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->ExtPageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, mem.config_page_sz,
-		    sizeof(Mpi2SasPhyPage1_t)));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -1474,9 +1136,7 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
-	memset(config_page, 0, sizeof(Mpi2RaidVolPage1_t));
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
@@ -1485,38 +1145,15 @@
 	mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE1_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.PageAddress = cpu_to_le32(form | handle);
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, mem.config_page_sz,
-		    sizeof(Mpi2RaidVolPage1_t)));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -1535,10 +1172,9 @@
     u8 *num_pds)
 {
 	Mpi2ConfigRequest_t mpi_request;
-	Mpi2RaidVolPage0_t *config_page;
+	Mpi2RaidVolPage0_t config_page;
 	Mpi2ConfigReply_t mpi_reply;
 	int r;
-	struct config_request mem;
 	u16 ioc_status;
 
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
@@ -1550,43 +1186,23 @@
 	mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, &mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.PageAddress =
 	    cpu_to_le32(MPI2_RAID_VOLUME_PGAD_FORM_HANDLE | handle);
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply.Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply.Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply.Header.PageType;
-	mpi_request.Header.PageLength = mpi_reply.Header.PageLength;
-	mem.config_page_sz = le16_to_cpu(mpi_reply.Header.PageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, &mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, &config_page,
+	    sizeof(Mpi2RaidVolPage0_t));
 	if (!r) {
 		ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
 		    MPI2_IOCSTATUS_MASK;
-		if (ioc_status == MPI2_IOCSTATUS_SUCCESS) {
-			config_page = mem.config_page;
-			*num_pds = config_page->NumPhysDisks;
-		}
+		if (ioc_status == MPI2_IOCSTATUS_SUCCESS)
+			*num_pds = config_page.NumPhysDisks;
 	}
 
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
  out:
 	return r;
 }
@@ -1610,10 +1226,8 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
-	memset(config_page, 0, sz);
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
 	mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_VOLUME;
@@ -1621,37 +1235,14 @@
 	mpi_request.Header.PageVersion = MPI2_RAIDVOLPAGE0_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.PageAddress = cpu_to_le32(form | handle);
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, sz, mem.config_page_sz));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page, sz);
  out:
 	return r;
 }
@@ -1674,10 +1265,8 @@
 {
 	Mpi2ConfigRequest_t mpi_request;
 	int r;
-	struct config_request mem;
 
 	memset(&mpi_request, 0, sizeof(Mpi2ConfigRequest_t));
-	memset(config_page, 0, sizeof(Mpi2RaidPhysDiskPage0_t));
 	mpi_request.Function = MPI2_FUNCTION_CONFIG;
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_HEADER;
 	mpi_request.Header.PageType = MPI2_CONFIG_PAGETYPE_RAID_PHYSDISK;
@@ -1685,38 +1274,15 @@
 	mpi_request.Header.PageVersion = MPI2_RAIDPHYSDISKPAGE0_PAGEVERSION;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.PageAddress = cpu_to_le32(form | form_specific);
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply->Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply->Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply->Header.PageType;
-	mpi_request.Header.PageLength = mpi_reply->Header.PageLength;
-	mem.config_page_sz = le16_to_cpu(mpi_reply->Header.PageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
 	r = _config_request(ioc, &mpi_request, mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
-	if (!r)
-		memcpy(config_page, mem.config_page,
-		    min_t(u16, mem.config_page_sz,
-		    sizeof(Mpi2RaidPhysDiskPage0_t)));
-
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    sizeof(*config_page));
  out:
 	return r;
 }
@@ -1734,11 +1300,10 @@
 mpt2sas_config_get_volume_handle(struct MPT2SAS_ADAPTER *ioc, u16 pd_handle,
     u16 *volume_handle)
 {
-	Mpi2RaidConfigurationPage0_t *config_page;
+	Mpi2RaidConfigurationPage0_t *config_page = NULL;
 	Mpi2ConfigRequest_t mpi_request;
 	Mpi2ConfigReply_t mpi_reply;
-	int r, i;
-	struct config_request mem;
+	int r, i, config_page_sz;
 	u16 ioc_status;
 
 	*volume_handle = 0;
@@ -1751,40 +1316,27 @@
 	mpi_request.Header.PageNumber = 0;
 	mpt2sas_base_build_zero_len_sge(ioc, &mpi_request.PageBufferSGE);
 	r = _config_request(ioc, &mpi_request, &mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, NULL, 0);
 	if (r)
 		goto out;
 
 	mpi_request.PageAddress =
 	    cpu_to_le32(MPI2_RAID_PGAD_FORM_ACTIVE_CONFIG);
 	mpi_request.Action = MPI2_CONFIG_ACTION_PAGE_READ_CURRENT;
-	mpi_request.Header.PageVersion = mpi_reply.Header.PageVersion;
-	mpi_request.Header.PageNumber = mpi_reply.Header.PageNumber;
-	mpi_request.Header.PageType = mpi_reply.Header.PageType;
-	mpi_request.ExtPageLength = mpi_reply.ExtPageLength;
-	mpi_request.ExtPageType = mpi_reply.ExtPageType;
-	mem.config_page_sz = le16_to_cpu(mpi_reply.ExtPageLength) * 4;
-	if (mem.config_page_sz > ioc->config_page_sz) {
-		r = _config_alloc_config_dma_memory(ioc, &mem);
-		if (r)
-			goto out;
-	} else {
-		mem.config_page_dma = ioc->config_page_dma;
-		mem.config_page = ioc->config_page;
-	}
-	ioc->base_add_sg_single(&mpi_request.PageBufferSGE,
-	    MPT2_CONFIG_COMMON_SGLFLAGS | mem.config_page_sz,
-	    mem.config_page_dma);
+	config_page_sz = (le16_to_cpu(mpi_reply.ExtPageLength) * 4);
+	config_page = kmalloc(config_page_sz, GFP_KERNEL);
+	if (!config_page)
+		goto out;
 	r = _config_request(ioc, &mpi_request, &mpi_reply,
-	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT);
+	    MPT2_CONFIG_PAGE_DEFAULT_TIMEOUT, config_page,
+	    config_page_sz);
 	if (r)
 		goto out;
 
 	r = -1;
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
 	if (ioc_status != MPI2_IOCSTATUS_SUCCESS)
-		goto done;
-	config_page = mem.config_page;
+		goto out;
 	for (i = 0; i < config_page->NumElements; i++) {
 		if ((config_page->ConfigElement[i].ElementFlags &
 		    MPI2_RAIDCONFIG0_EFLAGS_MASK_ELEMENT_TYPE) !=
@@ -1795,15 +1347,11 @@
 			*volume_handle = le16_to_cpu(config_page->
 			    ConfigElement[i].VolDevHandle);
 			r = 0;
-			goto done;
+			goto out;
 		}
 	}
-
- done:
-	if (mem.config_page_sz > ioc->config_page_sz)
-		_config_free_config_dma_memory(ioc, &mem);
-
  out:
+	kfree(config_page);
 	return r;
 }
 
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 14e473d..c2a5101 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -1963,7 +1963,6 @@
 {
 	enum block_state state;
 	long ret = -EINVAL;
-	unsigned long flags;
 
 	state = (file->f_flags & O_NONBLOCK) ? NON_BLOCKING :
 	    BLOCKING;
@@ -1989,13 +1988,8 @@
 		    !ioc)
 			return -ENODEV;
 
-		spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
-		if (ioc->shost_recovery) {
-			spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
-			    flags);
+		if (ioc->shost_recovery)
 			return -EAGAIN;
-		}
-		spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 
 		if (_IOC_SIZE(cmd) == sizeof(struct mpt2_ioctl_command)) {
 			uarg = arg;
@@ -2098,7 +2092,6 @@
 	struct mpt2_ioctl_command karg;
 	struct MPT2SAS_ADAPTER *ioc;
 	enum block_state state;
-	unsigned long flags;
 
 	if (_IOC_SIZE(cmd) != sizeof(struct mpt2_ioctl_command32))
 		return -EINVAL;
@@ -2113,13 +2106,8 @@
 	if (_ctl_verify_adapter(karg32.hdr.ioc_number, &ioc) == -1 || !ioc)
 		return -ENODEV;
 
-	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
-	if (ioc->shost_recovery) {
-		spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock,
-		    flags);
+	if (ioc->shost_recovery)
 		return -EAGAIN;
-	}
-	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 
 	memset(&karg, 0, sizeof(struct mpt2_ioctl_command));
 	karg.hdr.ioc_number = karg32.hdr.ioc_number;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index 2e9a444..774b345 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -103,7 +103,6 @@
 };
 
 
-#define MPT2SAS_RESCAN_AFTER_HOST_RESET (0xFFFF)
 /**
  * struct fw_event_work - firmware event struct
  * @list: link list framework
@@ -1502,7 +1501,13 @@
 			break;
 		case MPI2_RAID_VOL_TYPE_RAID1E:
 			qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
-			r_level = "RAID1E";
+			if (ioc->manu_pg10.OEMIdentifier &&
+			    (ioc->manu_pg10.GenericFlags0 &
+			    MFG10_GF0_R10_DISPLAY) &&
+			    !(raid_device->num_pds % 2))
+				r_level = "RAID10";
+			else
+				r_level = "RAID1E";
 			break;
 		case MPI2_RAID_VOL_TYPE_RAID1:
 			qdepth = MPT2SAS_RAID_QUEUE_DEPTH;
@@ -1786,17 +1791,18 @@
 	u32 ioc_state;
 	unsigned long timeleft;
 	u8 VF_ID = 0;
-	unsigned long flags;
 
-	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
-	if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED ||
-	    ioc->shost_recovery) {
-		spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+	if (ioc->tm_cmds.status != MPT2_CMD_NOT_USED) {
+		printk(MPT2SAS_INFO_FMT "%s: tm_cmd busy!!!\n",
+		    __func__, ioc->name);
+		return;
+	}
+
+	if (ioc->shost_recovery) {
 		printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
 		    __func__, ioc->name);
 		return;
 	}
-	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 
 	ioc_state = mpt2sas_base_get_iocstate(ioc, 0);
 	if (ioc_state & MPI2_DOORBELL_USED) {
@@ -1830,6 +1836,7 @@
 	mpi_request->TaskMID = cpu_to_le16(smid_task);
 	int_to_scsilun(lun, (struct scsi_lun *)mpi_request->LUN);
 	mpt2sas_scsih_set_tm_flag(ioc, handle);
+	init_completion(&ioc->tm_cmds.done);
 	mpt2sas_base_put_smid_hi_priority(ioc, smid, VF_ID);
 	timeleft = wait_for_completion_timeout(&ioc->tm_cmds.done, timeout*HZ);
 	mpt2sas_scsih_clear_tm_flag(ioc, handle);
@@ -2222,7 +2229,7 @@
 			    MPT2SAS_INFO_FMT "SDEV_RUNNING: "
 			    "handle(0x%04x)\n", ioc->name, handle));
 			sas_device_priv_data->block = 0;
-			scsi_device_set_state(sdev, SDEV_RUNNING);
+			scsi_internal_device_unblock(sdev);
 		}
 	}
 }
@@ -2251,7 +2258,7 @@
 			    MPT2SAS_INFO_FMT "SDEV_BLOCK: "
 			    "handle(0x%04x)\n", ioc->name, handle));
 			sas_device_priv_data->block = 1;
-			scsi_device_set_state(sdev, SDEV_BLOCK);
+			scsi_internal_device_block(sdev);
 		}
 	}
 }
@@ -2327,6 +2334,7 @@
 	u16 handle;
 	u16 reason_code;
 	u8 phy_number;
+	u8 link_rate;
 
 	for (i = 0; i < event_data->NumEntries; i++) {
 		handle = le16_to_cpu(event_data->PHY[i].AttachedDevHandle);
@@ -2337,6 +2345,11 @@
 		    MPI2_EVENT_SAS_TOPO_RC_MASK;
 		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_DELAY_NOT_RESPONDING)
 			_scsih_block_io_device(ioc, handle);
+		if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) {
+			link_rate = event_data->PHY[i].LinkRate >> 4;
+			if (link_rate >= MPI2_SAS_NEG_LINK_RATE_1_5)
+				_scsih_ublock_io_device(ioc, handle);
+		}
 	}
 }
 
@@ -2405,27 +2418,6 @@
 }
 
 /**
- * _scsih_queue_rescan - queue a topology rescan from user context
- * @ioc: per adapter object
- *
- * Return nothing.
- */
-static void
-_scsih_queue_rescan(struct MPT2SAS_ADAPTER *ioc)
-{
-	struct fw_event_work *fw_event;
-
-	if (ioc->wait_for_port_enable_to_complete)
-		return;
-	fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
-	if (!fw_event)
-		return;
-	fw_event->event = MPT2SAS_RESCAN_AFTER_HOST_RESET;
-	fw_event->ioc = ioc;
-	_scsih_fw_event_add(ioc, fw_event);
-}
-
-/**
  * _scsih_flush_running_cmds - completing outstanding commands.
  * @ioc: per adapter object
  *
@@ -2456,46 +2448,6 @@
 }
 
 /**
- * mpt2sas_scsih_reset_handler - reset callback handler (for scsih)
- * @ioc: per adapter object
- * @reset_phase: phase
- *
- * The handler for doing any required cleanup or initialization.
- *
- * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
- * MPT2_IOC_DONE_RESET
- *
- * Return nothing.
- */
-void
-mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
-{
-	switch (reset_phase) {
-	case MPT2_IOC_PRE_RESET:
-		dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
-		    "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
-		_scsih_fw_event_off(ioc);
-		break;
-	case MPT2_IOC_AFTER_RESET:
-		dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
-		    "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
-		if (ioc->tm_cmds.status & MPT2_CMD_PENDING) {
-			ioc->tm_cmds.status |= MPT2_CMD_RESET;
-			mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid);
-			complete(&ioc->tm_cmds.done);
-		}
-		_scsih_fw_event_on(ioc);
-		_scsih_flush_running_cmds(ioc);
-		break;
-	case MPT2_IOC_DONE_RESET:
-		dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
-		    "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
-		_scsih_queue_rescan(ioc);
-		break;
-	}
-}
-
-/**
  * _scsih_setup_eedp - setup MPI request for EEDP transfer
  * @scmd: pointer to scsi command object
  * @mpi_request: pointer to the SCSI_IO reqest message frame
@@ -2615,7 +2567,6 @@
 	Mpi2SCSIIORequest_t *mpi_request;
 	u32 mpi_control;
 	u16 smid;
-	unsigned long flags;
 
 	scmd->scsi_done = done;
 	sas_device_priv_data = scmd->device->hostdata;
@@ -2634,13 +2585,10 @@
 	}
 
 	/* see if we are busy with task managment stuff */
-	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
-	if (sas_target_priv_data->tm_busy ||
-	    ioc->shost_recovery || ioc->ioc_link_reset_in_progress) {
-		spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+	if (sas_target_priv_data->tm_busy)
+		return SCSI_MLQUEUE_DEVICE_BUSY;
+	else if (ioc->shost_recovery || ioc->ioc_link_reset_in_progress)
 		return SCSI_MLQUEUE_HOST_BUSY;
-	}
-	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 
 	if (scmd->sc_data_direction == DMA_FROM_DEVICE)
 		mpi_control = MPI2_SCSIIO_CONTROL_READ;
@@ -3189,25 +3137,6 @@
 }
 
 /**
- * _scsih_link_change - process phy link changes
- * @ioc: per adapter object
- * @handle: phy handle
- * @attached_handle: valid for devices attached to link
- * @phy_number: phy number
- * @link_rate: new link rate
- * Context: user.
- *
- * Return nothing.
- */
-static void
-_scsih_link_change(struct MPT2SAS_ADAPTER *ioc, u16 handle, u16 attached_handle,
-   u8 phy_number, u8 link_rate)
-{
-	mpt2sas_transport_update_phy_link_change(ioc, handle, attached_handle,
-	    phy_number, link_rate);
-}
-
-/**
  * _scsih_sas_host_refresh - refreshing sas host object contents
  * @ioc: per adapter object
  * @update: update link information
@@ -3251,7 +3180,8 @@
 			    le16_to_cpu(sas_iounit_pg0->PhyData[i].
 				ControllerDevHandle);
 			if (update)
-				_scsih_link_change(ioc,
+				mpt2sas_transport_update_links(
+				    ioc,
 				    ioc->sas_hba.phy[i].handle,
 				    le16_to_cpu(sas_iounit_pg0->PhyData[i].
 				    AttachedDevHandle), i,
@@ -3436,6 +3366,9 @@
 	if (!handle)
 		return -1;
 
+	if (ioc->shost_recovery)
+		return -1;
+
 	if ((mpt2sas_config_get_expander_pg0(ioc, &mpi_reply, &expander_pg0,
 	    MPI2_SAS_EXPAND_PGAD_FORM_HNDL, handle))) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
@@ -3572,6 +3505,9 @@
 	struct _sas_node *sas_expander;
 	unsigned long flags;
 
+	if (ioc->shost_recovery)
+		return;
+
 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
 	sas_expander = mpt2sas_scsih_expander_find_by_handle(ioc, handle);
 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
@@ -3743,6 +3679,8 @@
 		mutex_unlock(&ioc->tm_cmds.mutex);
 		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "issue target reset "
 		    "done: handle(0x%04x)\n", ioc->name, device_handle));
+		if (ioc->shost_recovery)
+			goto out;
 	}
 
 	/* SAS_IO_UNIT_CNTR - send REMOVE_DEVICE */
@@ -3765,6 +3703,9 @@
 	    le32_to_cpu(mpi_reply.IOCLogInfo)));
 
  out:
+
+	_scsih_ublock_io_device(ioc, handle);
+
 	mpt2sas_transport_port_remove(ioc, sas_device->sas_address,
 	    sas_device->parent_handle);
 
@@ -3908,6 +3849,8 @@
 			    "expander event\n", ioc->name));
 			return;
 		}
+		if (ioc->shost_recovery)
+			return;
 		if (event_data->PHY[i].PhyStatus &
 		    MPI2_EVENT_SAS_TOPO_PHYSTATUS_VACANT)
 			continue;
@@ -3923,9 +3866,10 @@
 		case MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED:
 			if (!parent_handle) {
 				if (phy_number < ioc->sas_hba.num_phys)
-					_scsih_link_change(ioc,
-					   ioc->sas_hba.phy[phy_number].handle,
-					   handle, phy_number, link_rate_);
+					mpt2sas_transport_update_links(
+					ioc,
+					ioc->sas_hba.phy[phy_number].handle,
+					handle, phy_number, link_rate_);
 			} else {
 				spin_lock_irqsave(&ioc->sas_node_lock, flags);
 				sas_expander =
@@ -3935,17 +3879,14 @@
 				    flags);
 				if (sas_expander) {
 					if (phy_number < sas_expander->num_phys)
-						_scsih_link_change(ioc,
-						   sas_expander->
-						   phy[phy_number].handle,
-						   handle, phy_number,
-						   link_rate_);
+						mpt2sas_transport_update_links(
+						ioc,
+						sas_expander->
+						phy[phy_number].handle,
+						handle, phy_number,
+						link_rate_);
 				}
 			}
-			if (reason_code == MPI2_EVENT_SAS_TOPO_RC_PHY_CHANGED) {
-				if (link_rate_ >= MPI2_SAS_NEG_LINK_RATE_1_5)
-					_scsih_ublock_io_device(ioc, handle);
-			}
 			if (reason_code == MPI2_EVENT_SAS_TOPO_RC_TARG_ADDED) {
 				if (link_rate_ < MPI2_SAS_NEG_LINK_RATE_1_5)
 					break;
@@ -4455,7 +4396,7 @@
 		return;
 	}
 
-	_scsih_link_change(ioc,
+	mpt2sas_transport_update_links(ioc,
 	    le16_to_cpu(sas_device_pg0.ParentDevHandle),
 	    handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
 
@@ -4744,7 +4685,7 @@
 			return;
 		}
 
-		_scsih_link_change(ioc,
+		mpt2sas_transport_update_links(ioc,
 		    le16_to_cpu(sas_device_pg0.ParentDevHandle),
 		    handle, sas_device_pg0.PhyNum, MPI2_SAS_NEG_LINK_RATE_1_5);
 
@@ -5156,22 +5097,9 @@
 _scsih_remove_unresponding_devices(struct MPT2SAS_ADAPTER *ioc)
 {
 	struct _sas_device *sas_device, *sas_device_next;
-	struct _sas_node *sas_expander, *sas_expander_next;
+	struct _sas_node *sas_expander;
 	struct _raid_device *raid_device, *raid_device_next;
-	unsigned long flags;
 
-	_scsih_search_responding_sas_devices(ioc);
-	_scsih_search_responding_raid_devices(ioc);
-	_scsih_search_responding_expanders(ioc);
-
-	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
-	ioc->shost_recovery = 0;
-	if (ioc->shost->shost_state == SHOST_RECOVERY) {
-		printk(MPT2SAS_INFO_FMT "putting controller into "
-		    "SHOST_RUNNING\n", ioc->name);
-		scsi_host_set_state(ioc->shost, SHOST_RUNNING);
-	}
-	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 
 	list_for_each_entry_safe(sas_device, sas_device_next,
 	    &ioc->sas_device_list, list) {
@@ -5207,16 +5135,63 @@
 		_scsih_raid_device_remove(ioc, raid_device);
 	}
 
-	list_for_each_entry_safe(sas_expander, sas_expander_next,
-	    &ioc->sas_expander_list, list) {
+ retry_expander_search:
+	sas_expander = NULL;
+	list_for_each_entry(sas_expander, &ioc->sas_expander_list, list) {
 		if (sas_expander->responding) {
 			sas_expander->responding = 0;
 			continue;
 		}
-		printk("\tremoving expander: handle(0x%04x), "
-		    " sas_addr(0x%016llx)\n", sas_expander->handle,
-		    (unsigned long long)sas_expander->sas_address);
 		_scsih_expander_remove(ioc, sas_expander->handle);
+		goto retry_expander_search;
+	}
+}
+
+/**
+ * mpt2sas_scsih_reset_handler - reset callback handler (for scsih)
+ * @ioc: per adapter object
+ * @reset_phase: phase
+ *
+ * The handler for doing any required cleanup or initialization.
+ *
+ * The reset phase can be MPT2_IOC_PRE_RESET, MPT2_IOC_AFTER_RESET,
+ * MPT2_IOC_DONE_RESET
+ *
+ * Return nothing.
+ */
+void
+mpt2sas_scsih_reset_handler(struct MPT2SAS_ADAPTER *ioc, int reset_phase)
+{
+	switch (reset_phase) {
+	case MPT2_IOC_PRE_RESET:
+		dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
+		    "MPT2_IOC_PRE_RESET\n", ioc->name, __func__));
+		_scsih_fw_event_off(ioc);
+		break;
+	case MPT2_IOC_AFTER_RESET:
+		dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
+		    "MPT2_IOC_AFTER_RESET\n", ioc->name, __func__));
+		if (ioc->tm_cmds.status & MPT2_CMD_PENDING) {
+			ioc->tm_cmds.status |= MPT2_CMD_RESET;
+			mpt2sas_base_free_smid(ioc, ioc->tm_cmds.smid);
+			complete(&ioc->tm_cmds.done);
+		}
+		_scsih_fw_event_on(ioc);
+		_scsih_flush_running_cmds(ioc);
+		break;
+	case MPT2_IOC_DONE_RESET:
+		dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
+		    "MPT2_IOC_DONE_RESET\n", ioc->name, __func__));
+		_scsih_sas_host_refresh(ioc, 0);
+		_scsih_search_responding_sas_devices(ioc);
+		_scsih_search_responding_raid_devices(ioc);
+		_scsih_search_responding_expanders(ioc);
+		break;
+	case MPT2_IOC_RUNNING:
+		dtmprintk(ioc, printk(MPT2SAS_DEBUG_FMT "%s: "
+		    "MPT2_IOC_RUNNING\n", ioc->name, __func__));
+		_scsih_remove_unresponding_devices(ioc);
+		break;
 	}
 }
 
@@ -5236,14 +5211,6 @@
 	unsigned long flags;
 	struct MPT2SAS_ADAPTER *ioc = fw_event->ioc;
 
-	/* This is invoked by calling _scsih_queue_rescan(). */
-	if (fw_event->event == MPT2SAS_RESCAN_AFTER_HOST_RESET) {
-		_scsih_fw_event_free(ioc, fw_event);
-		_scsih_sas_host_refresh(ioc, 1);
-		_scsih_remove_unresponding_devices(ioc);
-		return;
-	}
-
 	/* the queue is being flushed so ignore this event */
 	spin_lock_irqsave(&ioc->fw_event_lock, flags);
 	if (ioc->fw_events_off || ioc->remove_host) {
@@ -5253,13 +5220,10 @@
 	}
 	spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
 
-	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
 	if (ioc->shost_recovery) {
-		spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 		_scsih_fw_event_requeue(ioc, fw_event, 1000);
 		return;
 	}
-	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 
 	switch (fw_event->event) {
 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
@@ -5461,6 +5425,8 @@
 			if (!sas_device)
 				continue;
 			_scsih_remove_device(ioc, sas_device->handle);
+			if (ioc->shost_recovery)
+				return;
 			goto retry_device_search;
 		}
 	}
@@ -5482,6 +5448,8 @@
 			if (!expander_sibling)
 				continue;
 			_scsih_expander_remove(ioc, expander_sibling->handle);
+			if (ioc->shost_recovery)
+				return;
 			goto retry_expander_search;
 		}
 	}
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 686695b..742324a 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -140,11 +140,18 @@
 	u32 device_info;
 	u32 ioc_status;
 
+	if (ioc->shost_recovery) {
+		printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
+		    __func__, ioc->name);
+		return -EFAULT;
+	}
+
 	if ((mpt2sas_config_get_sas_device_pg0(ioc, &mpi_reply, &sas_device_pg0,
 	    MPI2_SAS_DEVICE_PGAD_FORM_HANDLE, handle))) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
+
 		    ioc->name, __FILE__, __LINE__, __func__);
-		return -1;
+		return -ENXIO;
 	}
 
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) &
@@ -153,7 +160,7 @@
 		printk(MPT2SAS_ERR_FMT "handle(0x%04x), ioc_status(0x%04x)"
 		    "\nfailure at %s:%d/%s()!\n", ioc->name, handle, ioc_status,
 		     __FILE__, __LINE__, __func__);
-		return -1;
+		return -EIO;
 	}
 
 	memset(identify, 0, sizeof(identify));
@@ -288,21 +295,17 @@
 	void *psge;
 	u32 sgl_flags;
 	u8 issue_reset = 0;
-	unsigned long flags;
 	void *data_out = NULL;
 	dma_addr_t data_out_dma;
 	u32 sz;
 	u64 *sas_address_le;
 	u16 wait_state_count;
 
-	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
-	if (ioc->ioc_reset_in_progress) {
-		spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+	if (ioc->shost_recovery) {
 		printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
 		    __func__, ioc->name);
 		return -EFAULT;
 	}
-	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 
 	mutex_lock(&ioc->transport_cmds.mutex);
 
@@ -789,7 +792,7 @@
 }
 
 /**
- * mpt2sas_transport_update_phy_link_change - refreshing phy link changes and attached devices
+ * mpt2sas_transport_update_links - refreshing phy link changes
  * @ioc: per adapter object
  * @handle: handle to sas_host or expander
  * @attached_handle: attached device handle
@@ -799,13 +802,19 @@
  * Returns nothing.
  */
 void
-mpt2sas_transport_update_phy_link_change(struct MPT2SAS_ADAPTER *ioc,
+mpt2sas_transport_update_links(struct MPT2SAS_ADAPTER *ioc,
     u16 handle, u16 attached_handle, u8 phy_number, u8 link_rate)
 {
 	unsigned long flags;
 	struct _sas_node *sas_node;
 	struct _sas_phy *mpt2sas_phy;
 
+	if (ioc->shost_recovery) {
+		printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
+			__func__, ioc->name);
+		return;
+	}
+
 	spin_lock_irqsave(&ioc->sas_node_lock, flags);
 	sas_node = _transport_sas_node_find_by_handle(ioc, handle);
 	spin_unlock_irqrestore(&ioc->sas_node_lock, flags);
@@ -1025,7 +1034,6 @@
 	void *psge;
 	u32 sgl_flags;
 	u8 issue_reset = 0;
-	unsigned long flags;
 	dma_addr_t dma_addr_in = 0;
 	dma_addr_t dma_addr_out = 0;
 	u16 wait_state_count;
@@ -1045,14 +1053,11 @@
 		return -EINVAL;
 	}
 
-	spin_lock_irqsave(&ioc->ioc_reset_in_progress_lock, flags);
-	if (ioc->ioc_reset_in_progress) {
-		spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
+	if (ioc->shost_recovery) {
 		printk(MPT2SAS_INFO_FMT "%s: host reset in progress!\n",
 		    __func__, ioc->name);
 		return -EFAULT;
 	}
-	spin_unlock_irqrestore(&ioc->ioc_reset_in_progress_lock, flags);
 
 	rc = mutex_lock_interruptible(&ioc->transport_cmds.mutex);
 	if (rc)
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index 70b60ad..e32c344 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1713,7 +1713,7 @@
 
 	nsp_dbg(NSP_DEBUG_INIT, "in");
 
-	cfg_mem = kzalloc(sizeof(cfg_mem), GFP_KERNEL);
+	cfg_mem = kzalloc(sizeof(*cfg_mem), GFP_KERNEL);
 	if (!cfg_mem)
 		return -ENOMEM;
 	cfg_mem->data = data;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
new file mode 100644
index 0000000..4302f06
--- /dev/null
+++ b/drivers/scsi/pmcraid.c
@@ -0,0 +1,5604 @@
+/*
+ * pmcraid.c -- driver for PMC Sierra MaxRAID controller adapters
+ *
+ * Written By: PMC Sierra Corporation
+ *
+ * Copyright (C) 2008, 2009 PMC Sierra Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307,
+ * USA
+ *
+ */
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/blkdev.h>
+#include <linux/firmware.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/hdreg.h>
+#include <linux/version.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/processor.h>
+#include <linux/libata.h>
+#include <linux/mutex.h>
+#include <scsi/scsi.h>
+#include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
+#include <scsi/scsi_eh.h>
+#include <scsi/scsi_cmnd.h>
+#include <scsi/scsicam.h>
+
+#include "pmcraid.h"
+
+/*
+ *   Module configuration parameters
+ */
+static unsigned int pmcraid_debug_log;
+static unsigned int pmcraid_disable_aen;
+static unsigned int pmcraid_log_level = IOASC_LOG_LEVEL_MUST;
+
+/*
+ * Data structures to support multiple adapters by the LLD.
+ * pmcraid_adapter_count - count of configured adapters
+ */
+static atomic_t pmcraid_adapter_count = ATOMIC_INIT(0);
+
+/*
+ * Supporting user-level control interface through IOCTL commands.
+ * pmcraid_major - major number to use
+ * pmcraid_minor - minor number(s) to use
+ */
+static unsigned int pmcraid_major;
+static struct class *pmcraid_class;
+DECLARE_BITMAP(pmcraid_minor, PMCRAID_MAX_ADAPTERS);
+
+/*
+ * Module parameters
+ */
+MODULE_AUTHOR("PMC Sierra Corporation, anil_ravindranath@pmc-sierra.com");
+MODULE_DESCRIPTION("PMC Sierra MaxRAID Controller Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(PMCRAID_DRIVER_VERSION);
+
+module_param_named(log_level, pmcraid_log_level, uint, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(log_level,
+		 "Enables firmware error code logging, default :1 high-severity"
+		 " errors, 2: all errors including high-severity errors,"
+		 " 0: disables logging");
+
+module_param_named(debug, pmcraid_debug_log, uint, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(debug,
+		 "Enable driver verbose message logging. Set 1 to enable."
+		 "(default: 0)");
+
+module_param_named(disable_aen, pmcraid_disable_aen, uint, (S_IRUGO | S_IWUSR));
+MODULE_PARM_DESC(disable_aen,
+		 "Disable driver aen notifications to apps. Set 1 to disable."
+		 "(default: 0)");
+
+/* chip specific constants for PMC MaxRAID controllers (same for
+ * 0x5220 and 0x8010
+ */
+static struct pmcraid_chip_details pmcraid_chip_cfg[] = {
+	{
+	 .ioastatus = 0x0,
+	 .ioarrin = 0x00040,
+	 .mailbox = 0x7FC30,
+	 .global_intr_mask = 0x00034,
+	 .ioa_host_intr = 0x0009C,
+	 .ioa_host_intr_clr = 0x000A0,
+	 .ioa_host_mask = 0x7FC28,
+	 .ioa_host_mask_clr = 0x7FC28,
+	 .host_ioa_intr = 0x00020,
+	 .host_ioa_intr_clr = 0x00020,
+	 .transop_timeout = 300
+	 }
+};
+
+/*
+ * PCI device ids supported by pmcraid driver
+ */
+static struct pci_device_id pmcraid_pci_table[] __devinitdata = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_PMC, PCI_DEVICE_ID_PMC_MAXRAID),
+	  0, 0, (kernel_ulong_t)&pmcraid_chip_cfg[0]
+	},
+	{}
+};
+
+MODULE_DEVICE_TABLE(pci, pmcraid_pci_table);
+
+
+
+/**
+ * pmcraid_slave_alloc - Prepare for commands to a device
+ * @scsi_dev: scsi device struct
+ *
+ * This function is called by mid-layer prior to sending any command to the new
+ * device. Stores resource entry details of the device in scsi_device struct.
+ * Queuecommand uses the resource handle and other details to fill up IOARCB
+ * while sending commands to the device.
+ *
+ * Return value:
+ *	  0 on success / -ENXIO if device does not exist
+ */
+static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
+{
+	struct pmcraid_resource_entry *temp, *res = NULL;
+	struct pmcraid_instance *pinstance;
+	u8 target, bus, lun;
+	unsigned long lock_flags;
+	int rc = -ENXIO;
+	pinstance = shost_priv(scsi_dev->host);
+
+	/* Driver exposes VSET and GSCSI resources only; all other device types
+	 * are not exposed. Resource list is synchronized using resource lock
+	 * so any traversal or modifications to the list should be done inside
+	 * this lock
+	 */
+	spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
+	list_for_each_entry(temp, &pinstance->used_res_q, queue) {
+
+		/* do not expose VSETs with order-ids >= 240 */
+		if (RES_IS_VSET(temp->cfg_entry)) {
+			target = temp->cfg_entry.unique_flags1;
+			if (target >= PMCRAID_MAX_VSET_TARGETS)
+				continue;
+			bus = PMCRAID_VSET_BUS_ID;
+			lun = 0;
+		} else if (RES_IS_GSCSI(temp->cfg_entry)) {
+			target = RES_TARGET(temp->cfg_entry.resource_address);
+			bus = PMCRAID_PHYS_BUS_ID;
+			lun = RES_LUN(temp->cfg_entry.resource_address);
+		} else {
+			continue;
+		}
+
+		if (bus == scsi_dev->channel &&
+		    target == scsi_dev->id &&
+		    lun == scsi_dev->lun) {
+			res = temp;
+			break;
+		}
+	}
+
+	if (res) {
+		res->scsi_dev = scsi_dev;
+		scsi_dev->hostdata = res;
+		res->change_detected = 0;
+		atomic_set(&res->read_failures, 0);
+		atomic_set(&res->write_failures, 0);
+		rc = 0;
+	}
+	spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
+	return rc;
+}
+
+/**
+ * pmcraid_slave_configure - Configures a SCSI device
+ * @scsi_dev: scsi device struct
+ *
+ * This fucntion is executed by SCSI mid layer just after a device is first
+ * scanned (i.e. it has responded to an INQUIRY). For VSET resources, the
+ * timeout value (default 30s) will be over-written to a higher value (60s)
+ * and max_sectors value will be over-written to 512. It also sets queue depth
+ * to host->cmd_per_lun value
+ *
+ * Return value:
+ *	  0 on success
+ */
+static int pmcraid_slave_configure(struct scsi_device *scsi_dev)
+{
+	struct pmcraid_resource_entry *res = scsi_dev->hostdata;
+
+	if (!res)
+		return 0;
+
+	/* LLD exposes VSETs and Enclosure devices only */
+	if (RES_IS_GSCSI(res->cfg_entry) &&
+	    scsi_dev->type != TYPE_ENCLOSURE)
+		return -ENXIO;
+
+	pmcraid_info("configuring %x:%x:%x:%x\n",
+		     scsi_dev->host->unique_id,
+		     scsi_dev->channel,
+		     scsi_dev->id,
+		     scsi_dev->lun);
+
+	if (RES_IS_GSCSI(res->cfg_entry)) {
+		scsi_dev->allow_restart = 1;
+	} else if (RES_IS_VSET(res->cfg_entry)) {
+		scsi_dev->allow_restart = 1;
+		blk_queue_rq_timeout(scsi_dev->request_queue,
+				     PMCRAID_VSET_IO_TIMEOUT);
+		blk_queue_max_sectors(scsi_dev->request_queue,
+				      PMCRAID_VSET_MAX_SECTORS);
+	}
+
+	if (scsi_dev->tagged_supported &&
+	    (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) {
+		scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
+		scsi_adjust_queue_depth(scsi_dev, MSG_SIMPLE_TAG,
+					scsi_dev->host->cmd_per_lun);
+	} else {
+		scsi_adjust_queue_depth(scsi_dev, 0,
+					scsi_dev->host->cmd_per_lun);
+	}
+
+	return 0;
+}
+
+/**
+ * pmcraid_slave_destroy - Unconfigure a SCSI device before removing it
+ *
+ * @scsi_dev: scsi device struct
+ *
+ * This is called by mid-layer before removing a device. Pointer assignments
+ * done in pmcraid_slave_alloc will be reset to NULL here.
+ *
+ * Return value
+ *   none
+ */
+static void pmcraid_slave_destroy(struct scsi_device *scsi_dev)
+{
+	struct pmcraid_resource_entry *res;
+
+	res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
+
+	if (res)
+		res->scsi_dev = NULL;
+
+	scsi_dev->hostdata = NULL;
+}
+
+/**
+ * pmcraid_change_queue_depth - Change the device's queue depth
+ * @scsi_dev: scsi device struct
+ * @depth: depth to set
+ *
+ * Return value
+ * 	actual depth set
+ */
+static int pmcraid_change_queue_depth(struct scsi_device *scsi_dev, int depth)
+{
+	if (depth > PMCRAID_MAX_CMD_PER_LUN)
+		depth = PMCRAID_MAX_CMD_PER_LUN;
+
+	scsi_adjust_queue_depth(scsi_dev, scsi_get_tag_type(scsi_dev), depth);
+
+	return scsi_dev->queue_depth;
+}
+
+/**
+ * pmcraid_change_queue_type - Change the device's queue type
+ * @scsi_dev: scsi device struct
+ * @tag: type of tags to use
+ *
+ * Return value:
+ * 	actual queue type set
+ */
+static int pmcraid_change_queue_type(struct scsi_device *scsi_dev, int tag)
+{
+	struct pmcraid_resource_entry *res;
+
+	res = (struct pmcraid_resource_entry *)scsi_dev->hostdata;
+
+	if ((res) && scsi_dev->tagged_supported &&
+	    (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry))) {
+		scsi_set_tag_type(scsi_dev, tag);
+
+		if (tag)
+			scsi_activate_tcq(scsi_dev, scsi_dev->queue_depth);
+		else
+			scsi_deactivate_tcq(scsi_dev, scsi_dev->queue_depth);
+	} else
+		tag = 0;
+
+	return tag;
+}
+
+
+/**
+ * pmcraid_init_cmdblk - initializes a command block
+ *
+ * @cmd: pointer to struct pmcraid_cmd to be initialized
+ * @index: if >=0 first time initialization; otherwise reinitialization
+ *
+ * Return Value
+ *	 None
+ */
+void pmcraid_init_cmdblk(struct pmcraid_cmd *cmd, int index)
+{
+	struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
+	dma_addr_t dma_addr = cmd->ioa_cb_bus_addr;
+
+	if (index >= 0) {
+		/* first time initialization (called from  probe) */
+		u32 ioasa_offset =
+			offsetof(struct pmcraid_control_block, ioasa);
+
+		cmd->index = index;
+		ioarcb->response_handle = cpu_to_le32(index << 2);
+		ioarcb->ioarcb_bus_addr = cpu_to_le64(dma_addr);
+		ioarcb->ioasa_bus_addr = cpu_to_le64(dma_addr + ioasa_offset);
+		ioarcb->ioasa_len = cpu_to_le16(sizeof(struct pmcraid_ioasa));
+	} else {
+		/* re-initialization of various lengths, called once command is
+		 * processed by IOA
+		 */
+		memset(&cmd->ioa_cb->ioarcb.cdb, 0, PMCRAID_MAX_CDB_LEN);
+		ioarcb->request_flags0 = 0;
+		ioarcb->request_flags1 = 0;
+		ioarcb->cmd_timeout = 0;
+		ioarcb->ioarcb_bus_addr &= (~0x1FULL);
+		ioarcb->ioadl_bus_addr = 0;
+		ioarcb->ioadl_length = 0;
+		ioarcb->data_transfer_length = 0;
+		ioarcb->add_cmd_param_length = 0;
+		ioarcb->add_cmd_param_offset = 0;
+		cmd->ioa_cb->ioasa.ioasc = 0;
+		cmd->ioa_cb->ioasa.residual_data_length = 0;
+		cmd->u.time_left = 0;
+	}
+
+	cmd->cmd_done = NULL;
+	cmd->scsi_cmd = NULL;
+	cmd->release = 0;
+	cmd->completion_req = 0;
+	cmd->dma_handle = 0;
+	init_timer(&cmd->timer);
+}
+
+/**
+ * pmcraid_reinit_cmdblk - reinitialize a command block
+ *
+ * @cmd: pointer to struct pmcraid_cmd to be reinitialized
+ *
+ * Return Value
+ *	 None
+ */
+static void pmcraid_reinit_cmdblk(struct pmcraid_cmd *cmd)
+{
+	pmcraid_init_cmdblk(cmd, -1);
+}
+
+/**
+ * pmcraid_get_free_cmd - get a free cmd block from command block pool
+ * @pinstance: adapter instance structure
+ *
+ * Return Value:
+ *	returns pointer to cmd block or NULL if no blocks are available
+ */
+static struct pmcraid_cmd *pmcraid_get_free_cmd(
+	struct pmcraid_instance *pinstance
+)
+{
+	struct pmcraid_cmd *cmd = NULL;
+	unsigned long lock_flags;
+
+	/* free cmd block list is protected by free_pool_lock */
+	spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
+
+	if (!list_empty(&pinstance->free_cmd_pool)) {
+		cmd = list_entry(pinstance->free_cmd_pool.next,
+				 struct pmcraid_cmd, free_list);
+		list_del(&cmd->free_list);
+	}
+	spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
+
+	/* Initialize the command block before giving it the caller */
+	if (cmd != NULL)
+		pmcraid_reinit_cmdblk(cmd);
+	return cmd;
+}
+
+/**
+ * pmcraid_return_cmd - return a completed command block back into free pool
+ * @cmd: pointer to the command block
+ *
+ * Return Value:
+ *	nothing
+ */
+void pmcraid_return_cmd(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	unsigned long lock_flags;
+
+	spin_lock_irqsave(&pinstance->free_pool_lock, lock_flags);
+	list_add_tail(&cmd->free_list, &pinstance->free_cmd_pool);
+	spin_unlock_irqrestore(&pinstance->free_pool_lock, lock_flags);
+}
+
+/**
+ * pmcraid_read_interrupts -  reads IOA interrupts
+ *
+ * @pinstance: pointer to adapter instance structure
+ *
+ * Return value
+ *	 interrupts read from IOA
+ */
+static u32 pmcraid_read_interrupts(struct pmcraid_instance *pinstance)
+{
+	return ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
+}
+
+/**
+ * pmcraid_disable_interrupts - Masks and clears all specified interrupts
+ *
+ * @pinstance: pointer to per adapter instance structure
+ * @intrs: interrupts to disable
+ *
+ * Return Value
+ *	 None
+ */
+static void pmcraid_disable_interrupts(
+	struct pmcraid_instance *pinstance,
+	u32 intrs
+)
+{
+	u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
+	u32 nmask = gmask | GLOBAL_INTERRUPT_MASK;
+
+	iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
+	iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_clr_reg);
+	iowrite32(intrs, pinstance->int_regs.ioa_host_interrupt_mask_reg);
+	ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
+}
+
+/**
+ * pmcraid_enable_interrupts - Enables specified interrupts
+ *
+ * @pinstance: pointer to per adapter instance structure
+ * @intr: interrupts to enable
+ *
+ * Return Value
+ *	 None
+ */
+static void pmcraid_enable_interrupts(
+	struct pmcraid_instance *pinstance,
+	u32 intrs
+)
+{
+	u32 gmask = ioread32(pinstance->int_regs.global_interrupt_mask_reg);
+	u32 nmask = gmask & (~GLOBAL_INTERRUPT_MASK);
+
+	iowrite32(nmask, pinstance->int_regs.global_interrupt_mask_reg);
+	iowrite32(~intrs, pinstance->int_regs.ioa_host_interrupt_mask_reg);
+	ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
+
+	pmcraid_info("enabled interrupts global mask = %x intr_mask = %x\n",
+		ioread32(pinstance->int_regs.global_interrupt_mask_reg),
+		ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg));
+}
+
+/**
+ * pmcraid_reset_type - Determine the required reset type
+ * @pinstance: pointer to adapter instance structure
+ *
+ * IOA requires hard reset if any of the following conditions is true.
+ * 1. If HRRQ valid interrupt is not masked
+ * 2. IOA reset alert doorbell is set
+ * 3. If there are any error interrupts
+ */
+static void pmcraid_reset_type(struct pmcraid_instance *pinstance)
+{
+	u32 mask;
+	u32 intrs;
+	u32 alerts;
+
+	mask = ioread32(pinstance->int_regs.ioa_host_interrupt_mask_reg);
+	intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
+	alerts = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
+
+	if ((mask & INTRS_HRRQ_VALID) == 0 ||
+	    (alerts & DOORBELL_IOA_RESET_ALERT) ||
+	    (intrs & PMCRAID_ERROR_INTERRUPTS)) {
+		pmcraid_info("IOA requires hard reset\n");
+		pinstance->ioa_hard_reset = 1;
+	}
+
+	/* If unit check is active, trigger the dump */
+	if (intrs & INTRS_IOA_UNIT_CHECK)
+		pinstance->ioa_unit_check = 1;
+}
+
+/**
+ * pmcraid_bist_done - completion function for PCI BIST
+ * @cmd: pointer to reset command
+ * Return Value
+ * 	none
+ */
+
+static void pmcraid_ioa_reset(struct pmcraid_cmd *);
+
+static void pmcraid_bist_done(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	unsigned long lock_flags;
+	int rc;
+	u16 pci_reg;
+
+	rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
+
+	/* If PCI config space can't be accessed wait for another two secs */
+	if ((rc != PCIBIOS_SUCCESSFUL || (!(pci_reg & PCI_COMMAND_MEMORY))) &&
+	    cmd->u.time_left > 0) {
+		pmcraid_info("BIST not complete, waiting another 2 secs\n");
+		cmd->timer.expires = jiffies + cmd->u.time_left;
+		cmd->u.time_left = 0;
+		cmd->timer.data = (unsigned long)cmd;
+		cmd->timer.function =
+			(void (*)(unsigned long))pmcraid_bist_done;
+		add_timer(&cmd->timer);
+	} else {
+		cmd->u.time_left = 0;
+		pmcraid_info("BIST is complete, proceeding with reset\n");
+		spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+		pmcraid_ioa_reset(cmd);
+		spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+	}
+}
+
+/**
+ * pmcraid_start_bist - starts BIST
+ * @cmd: pointer to reset cmd
+ * Return Value
+ *   none
+ */
+static void pmcraid_start_bist(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	u32 doorbells, intrs;
+
+	/* proceed with bist and wait for 2 seconds */
+	iowrite32(DOORBELL_IOA_START_BIST,
+		pinstance->int_regs.host_ioa_interrupt_reg);
+	doorbells = ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
+	intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
+	pmcraid_info("doorbells after start bist: %x intrs: %x \n",
+		      doorbells, intrs);
+
+	cmd->u.time_left = msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
+	cmd->timer.data = (unsigned long)cmd;
+	cmd->timer.expires = jiffies + msecs_to_jiffies(PMCRAID_BIST_TIMEOUT);
+	cmd->timer.function = (void (*)(unsigned long))pmcraid_bist_done;
+	add_timer(&cmd->timer);
+}
+
+/**
+ * pmcraid_reset_alert_done - completion routine for reset_alert
+ * @cmd: pointer to command block used in reset sequence
+ * Return value
+ *  None
+ */
+static void pmcraid_reset_alert_done(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	u32 status = ioread32(pinstance->ioa_status);
+	unsigned long lock_flags;
+
+	/* if the critical operation in progress bit is set or the wait times
+	 * out, invoke reset engine to proceed with hard reset. If there is
+	 * some more time to wait, restart the timer
+	 */
+	if (((status & INTRS_CRITICAL_OP_IN_PROGRESS) == 0) ||
+	    cmd->u.time_left <= 0) {
+		pmcraid_info("critical op is reset proceeding with reset\n");
+		spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+		pmcraid_ioa_reset(cmd);
+		spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+	} else {
+		pmcraid_info("critical op is not yet reset waiting again\n");
+		/* restart timer if some more time is available to wait */
+		cmd->u.time_left -= PMCRAID_CHECK_FOR_RESET_TIMEOUT;
+		cmd->timer.data = (unsigned long)cmd;
+		cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
+		cmd->timer.function =
+			(void (*)(unsigned long))pmcraid_reset_alert_done;
+		add_timer(&cmd->timer);
+	}
+}
+
+/**
+ * pmcraid_reset_alert - alerts IOA for a possible reset
+ * @cmd : command block to be used for reset sequence.
+ *
+ * Return Value
+ *	returns 0 if pci config-space is accessible and RESET_DOORBELL is
+ *	successfully written to IOA. Returns non-zero in case pci_config_space
+ *	is not accessible
+ */
+static void pmcraid_reset_alert(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	u32 doorbells;
+	int rc;
+	u16 pci_reg;
+
+	/* If we are able to access IOA PCI config space, alert IOA that we are
+	 * going to reset it soon. This enables IOA to preserv persistent error
+	 * data if any. In case memory space is not accessible, proceed with
+	 * BIST or slot_reset
+	 */
+	rc = pci_read_config_word(pinstance->pdev, PCI_COMMAND, &pci_reg);
+	if ((rc == PCIBIOS_SUCCESSFUL) && (pci_reg & PCI_COMMAND_MEMORY)) {
+
+		/* wait for IOA permission i.e until CRITICAL_OPERATION bit is
+		 * reset IOA doesn't generate any interrupts when CRITICAL
+		 * OPERATION bit is reset. A timer is started to wait for this
+		 * bit to be reset.
+		 */
+		cmd->u.time_left = PMCRAID_RESET_TIMEOUT;
+		cmd->timer.data = (unsigned long)cmd;
+		cmd->timer.expires = jiffies + PMCRAID_CHECK_FOR_RESET_TIMEOUT;
+		cmd->timer.function =
+			(void (*)(unsigned long))pmcraid_reset_alert_done;
+		add_timer(&cmd->timer);
+
+		iowrite32(DOORBELL_IOA_RESET_ALERT,
+			pinstance->int_regs.host_ioa_interrupt_reg);
+		doorbells =
+			ioread32(pinstance->int_regs.host_ioa_interrupt_reg);
+		pmcraid_info("doorbells after reset alert: %x\n", doorbells);
+	} else {
+		pmcraid_info("PCI config is not accessible starting BIST\n");
+		pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
+		pmcraid_start_bist(cmd);
+	}
+}
+
+/**
+ * pmcraid_timeout_handler -  Timeout handler for internally generated ops
+ *
+ * @cmd : pointer to command structure, that got timedout
+ *
+ * This function blocks host requests and initiates an adapter reset.
+ *
+ * Return value:
+ *   None
+ */
+static void pmcraid_timeout_handler(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	unsigned long lock_flags;
+
+	dev_err(&pinstance->pdev->dev,
+		"Adapter being reset due to command timeout.\n");
+
+	/* Command timeouts result in hard reset sequence. The command that got
+	 * timed out may be the one used as part of reset sequence. In this
+	 * case restart reset sequence using the same command block even if
+	 * reset is in progress. Otherwise fail this command and get a free
+	 * command block to restart the reset sequence.
+	 */
+	spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+	if (!pinstance->ioa_reset_in_progress) {
+		pinstance->ioa_reset_attempts = 0;
+		cmd = pmcraid_get_free_cmd(pinstance);
+
+		/* If we are out of command blocks, just return here itself.
+		 * Some other command's timeout handler can do the reset job
+		 */
+		if (cmd == NULL) {
+			spin_unlock_irqrestore(pinstance->host->host_lock,
+					       lock_flags);
+			pmcraid_err("no free cmnd block for timeout handler\n");
+			return;
+		}
+
+		pinstance->reset_cmd = cmd;
+		pinstance->ioa_reset_in_progress = 1;
+	} else {
+		pmcraid_info("reset is already in progress\n");
+
+		if (pinstance->reset_cmd != cmd) {
+			/* This command should have been given to IOA, this
+			 * command will be completed by fail_outstanding_cmds
+			 * anyway
+			 */
+			pmcraid_err("cmd is pending but reset in progress\n");
+		}
+
+		/* If this command was being used as part of the reset
+		 * sequence, set cmd_done pointer to pmcraid_ioa_reset. This
+		 * causes fail_outstanding_commands not to return the command
+		 * block back to free pool
+		 */
+		if (cmd == pinstance->reset_cmd)
+			cmd->cmd_done = pmcraid_ioa_reset;
+
+	}
+
+	pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
+	scsi_block_requests(pinstance->host);
+	pmcraid_reset_alert(cmd);
+	spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+}
+
+/**
+ * pmcraid_internal_done - completion routine for internally generated cmds
+ *
+ * @cmd: command that got response from IOA
+ *
+ * Return Value:
+ *	 none
+ */
+static void pmcraid_internal_done(struct pmcraid_cmd *cmd)
+{
+	pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
+		     cmd->ioa_cb->ioarcb.cdb[0],
+		     le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
+
+	/* Some of the internal commands are sent with callers blocking for the
+	 * response. Same will be indicated as part of cmd->completion_req
+	 * field. Response path needs to wake up any waiters waiting for cmd
+	 * completion if this flag is set.
+	 */
+	if (cmd->completion_req) {
+		cmd->completion_req = 0;
+		complete(&cmd->wait_for_completion);
+	}
+
+	/* most of the internal commands are completed by caller itself, so
+	 * no need to return the command block back to free pool until we are
+	 * required to do so (e.g once done with initialization).
+	 */
+	if (cmd->release) {
+		cmd->release = 0;
+		pmcraid_return_cmd(cmd);
+	}
+}
+
+/**
+ * pmcraid_reinit_cfgtable_done - done function for cfg table reinitialization
+ *
+ * @cmd: command that got response from IOA
+ *
+ * This routine is called after driver re-reads configuration table due to a
+ * lost CCN. It returns the command block back to free pool and schedules
+ * worker thread to add/delete devices into the system.
+ *
+ * Return Value:
+ *	 none
+ */
+static void pmcraid_reinit_cfgtable_done(struct pmcraid_cmd *cmd)
+{
+	pmcraid_info("response internal cmd CDB[0] = %x ioasc = %x\n",
+		     cmd->ioa_cb->ioarcb.cdb[0],
+		     le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
+
+	if (cmd->release) {
+		cmd->release = 0;
+		pmcraid_return_cmd(cmd);
+	}
+	pmcraid_info("scheduling worker for config table reinitialization\n");
+	schedule_work(&cmd->drv_inst->worker_q);
+}
+
+/**
+ * pmcraid_erp_done - Process completion of SCSI error response from device
+ * @cmd: pmcraid_command
+ *
+ * This function copies the sense buffer into the scsi_cmd struct and completes
+ * scsi_cmd by calling scsi_done function.
+ *
+ * Return value:
+ *  none
+ */
+static void pmcraid_erp_done(struct pmcraid_cmd *cmd)
+{
+	struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
+
+	if (PMCRAID_IOASC_SENSE_KEY(ioasc) > 0) {
+		scsi_cmd->result |= (DID_ERROR << 16);
+		pmcraid_err("command CDB[0] = %x failed with IOASC: 0x%08X\n",
+			     cmd->ioa_cb->ioarcb.cdb[0], ioasc);
+	}
+
+	/* if we had allocated sense buffers for request sense, copy the sense
+	 * release the buffers
+	 */
+	if (cmd->sense_buffer != NULL) {
+		memcpy(scsi_cmd->sense_buffer,
+		       cmd->sense_buffer,
+		       SCSI_SENSE_BUFFERSIZE);
+		pci_free_consistent(pinstance->pdev,
+				    SCSI_SENSE_BUFFERSIZE,
+				    cmd->sense_buffer, cmd->sense_buffer_dma);
+		cmd->sense_buffer = NULL;
+		cmd->sense_buffer_dma = 0;
+	}
+
+	scsi_dma_unmap(scsi_cmd);
+	pmcraid_return_cmd(cmd);
+	scsi_cmd->scsi_done(scsi_cmd);
+}
+
+/**
+ * pmcraid_fire_command - sends an IOA command to adapter
+ *
+ * This function adds the given block into pending command list
+ * and returns without waiting
+ *
+ * @cmd : command to be sent to the device
+ *
+ * Return Value
+ *	None
+ */
+static void _pmcraid_fire_command(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	unsigned long lock_flags;
+
+	/* Add this command block to pending cmd pool. We do this prior to
+	 * writting IOARCB to ioarrin because IOA might complete the command
+	 * by the time we are about to add it to the list. Response handler
+	 * (isr/tasklet) looks for cmb block in the pending pending list.
+	 */
+	spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
+	list_add_tail(&cmd->free_list, &pinstance->pending_cmd_pool);
+	spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
+	atomic_inc(&pinstance->outstanding_cmds);
+
+	/* driver writes lower 32-bit value of IOARCB address only */
+	mb();
+	iowrite32(le32_to_cpu(cmd->ioa_cb->ioarcb.ioarcb_bus_addr),
+		  pinstance->ioarrin);
+}
+
+/**
+ * pmcraid_send_cmd - fires a command to IOA
+ *
+ * This function also sets up timeout function, and command completion
+ * function
+ *
+ * @cmd: pointer to the command block to be fired to IOA
+ * @cmd_done: command completion function, called once IOA responds
+ * @timeout: timeout to wait for this command completion
+ * @timeout_func: timeout handler
+ *
+ * Return value
+ *   none
+ */
+static void pmcraid_send_cmd(
+	struct pmcraid_cmd *cmd,
+	void (*cmd_done) (struct pmcraid_cmd *),
+	unsigned long timeout,
+	void (*timeout_func) (struct pmcraid_cmd *)
+)
+{
+	/* initialize done function */
+	cmd->cmd_done = cmd_done;
+
+	if (timeout_func) {
+		/* setup timeout handler */
+		cmd->timer.data = (unsigned long)cmd;
+		cmd->timer.expires = jiffies + timeout;
+		cmd->timer.function = (void (*)(unsigned long))timeout_func;
+		add_timer(&cmd->timer);
+	}
+
+	/* fire the command to IOA */
+	_pmcraid_fire_command(cmd);
+}
+
+/**
+ * pmcraid_ioa_shutdown - sends SHUTDOWN command to ioa
+ *
+ * @cmd: pointer to the command block used as part of reset sequence
+ *
+ * Return Value
+ *  None
+ */
+static void pmcraid_ioa_shutdown(struct pmcraid_cmd *cmd)
+{
+	pmcraid_info("response for Cancel CCN CDB[0] = %x ioasc = %x\n",
+		     cmd->ioa_cb->ioarcb.cdb[0],
+		     le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
+
+	/* Note that commands sent during reset require next command to be sent
+	 * to IOA. Hence reinit the done function as well as timeout function
+	 */
+	pmcraid_reinit_cmdblk(cmd);
+	cmd->ioa_cb->ioarcb.request_type = REQ_TYPE_IOACMD;
+	cmd->ioa_cb->ioarcb.resource_handle =
+		cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+	cmd->ioa_cb->ioarcb.cdb[0] = PMCRAID_IOA_SHUTDOWN;
+	cmd->ioa_cb->ioarcb.cdb[1] = PMCRAID_SHUTDOWN_NORMAL;
+
+	/* fire shutdown command to hardware. */
+	pmcraid_info("firing normal shutdown command (%d) to IOA\n",
+		     le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle));
+
+	pmcraid_send_cmd(cmd, pmcraid_ioa_reset,
+			 PMCRAID_SHUTDOWN_TIMEOUT,
+			 pmcraid_timeout_handler);
+}
+
+/**
+ * pmcraid_identify_hrrq - registers host rrq buffers with IOA
+ * @cmd: pointer to command block to be used for identify hrrq
+ *
+ * Return Value
+ *	 0 in case of success, otherwise non-zero failure code
+ */
+
+static void pmcraid_querycfg(struct pmcraid_cmd *);
+
+static void pmcraid_identify_hrrq(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+	int index = 0;
+	__be64 hrrq_addr = cpu_to_be64(pinstance->hrrq_start_bus_addr[index]);
+	u32 hrrq_size = cpu_to_be32(sizeof(u32) * PMCRAID_MAX_CMD);
+
+	pmcraid_reinit_cmdblk(cmd);
+
+	/* Initialize ioarcb */
+	ioarcb->request_type = REQ_TYPE_IOACMD;
+	ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+
+	/* initialize the hrrq number where IOA will respond to this command */
+	ioarcb->hrrq_id = index;
+	ioarcb->cdb[0] = PMCRAID_IDENTIFY_HRRQ;
+	ioarcb->cdb[1] = index;
+
+	/* IOA expects 64-bit pci address to be written in B.E format
+	 * (i.e cdb[2]=MSByte..cdb[9]=LSB.
+	 */
+	pmcraid_info("HRRQ_IDENTIFY with hrrq:ioarcb => %llx:%llx\n",
+		     hrrq_addr, ioarcb->ioarcb_bus_addr);
+
+	memcpy(&(ioarcb->cdb[2]), &hrrq_addr, sizeof(hrrq_addr));
+	memcpy(&(ioarcb->cdb[10]), &hrrq_size, sizeof(hrrq_size));
+
+	/* Subsequent commands require HRRQ identification to be successful.
+	 * Note that this gets called even during reset from SCSI mid-layer
+	 * or tasklet
+	 */
+	pmcraid_send_cmd(cmd, pmcraid_querycfg,
+			 PMCRAID_INTERNAL_TIMEOUT,
+			 pmcraid_timeout_handler);
+}
+
+static void pmcraid_process_ccn(struct pmcraid_cmd *cmd);
+static void pmcraid_process_ldn(struct pmcraid_cmd *cmd);
+
+/**
+ * pmcraid_send_hcam_cmd - send an initialized command block(HCAM) to IOA
+ *
+ * @cmd: initialized command block pointer
+ *
+ * Return Value
+ *   none
+ */
+static void pmcraid_send_hcam_cmd(struct pmcraid_cmd *cmd)
+{
+	if (cmd->ioa_cb->ioarcb.cdb[1] == PMCRAID_HCAM_CODE_CONFIG_CHANGE)
+		atomic_set(&(cmd->drv_inst->ccn.ignore), 0);
+	else
+		atomic_set(&(cmd->drv_inst->ldn.ignore), 0);
+
+	pmcraid_send_cmd(cmd, cmd->cmd_done, 0, NULL);
+}
+
+/**
+ * pmcraid_init_hcam - send an initialized command block(HCAM) to IOA
+ *
+ * @pinstance: pointer to adapter instance structure
+ * @type: HCAM type
+ *
+ * Return Value
+ *   pointer to initialized pmcraid_cmd structure or NULL
+ */
+static struct pmcraid_cmd *pmcraid_init_hcam
+(
+	struct pmcraid_instance *pinstance,
+	u8 type
+)
+{
+	struct pmcraid_cmd *cmd;
+	struct pmcraid_ioarcb *ioarcb;
+	struct pmcraid_ioadl_desc *ioadl;
+	struct pmcraid_hostrcb *hcam;
+	void (*cmd_done) (struct pmcraid_cmd *);
+	dma_addr_t dma;
+	int rcb_size;
+
+	cmd = pmcraid_get_free_cmd(pinstance);
+
+	if (!cmd) {
+		pmcraid_err("no free command blocks for hcam\n");
+		return cmd;
+	}
+
+	if (type == PMCRAID_HCAM_CODE_CONFIG_CHANGE) {
+		rcb_size = sizeof(struct pmcraid_hcam_ccn);
+		cmd_done = pmcraid_process_ccn;
+		dma = pinstance->ccn.baddr + PMCRAID_AEN_HDR_SIZE;
+		hcam = &pinstance->ccn;
+	} else {
+		rcb_size = sizeof(struct pmcraid_hcam_ldn);
+		cmd_done = pmcraid_process_ldn;
+		dma = pinstance->ldn.baddr + PMCRAID_AEN_HDR_SIZE;
+		hcam = &pinstance->ldn;
+	}
+
+	/* initialize command pointer used for HCAM registration */
+	hcam->cmd = cmd;
+
+	ioarcb = &cmd->ioa_cb->ioarcb;
+	ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
+					offsetof(struct pmcraid_ioarcb,
+						add_data.u.ioadl[0]));
+	ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
+	ioadl = ioarcb->add_data.u.ioadl;
+
+	/* Initialize ioarcb */
+	ioarcb->request_type = REQ_TYPE_HCAM;
+	ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+	ioarcb->cdb[0] = PMCRAID_HOST_CONTROLLED_ASYNC;
+	ioarcb->cdb[1] = type;
+	ioarcb->cdb[7] = (rcb_size >> 8) & 0xFF;
+	ioarcb->cdb[8] = (rcb_size) & 0xFF;
+
+	ioarcb->data_transfer_length = cpu_to_le32(rcb_size);
+
+	ioadl[0].flags |= cpu_to_le32(IOADL_FLAGS_READ_LAST);
+	ioadl[0].data_len = cpu_to_le32(rcb_size);
+	ioadl[0].address = cpu_to_le32(dma);
+
+	cmd->cmd_done = cmd_done;
+	return cmd;
+}
+
+/**
+ * pmcraid_send_hcam - Send an HCAM to IOA
+ * @pinstance: ioa config struct
+ * @type: HCAM type
+ *
+ * This function will send a Host Controlled Async command to IOA.
+ *
+ * Return value:
+ * 	none
+ */
+static void pmcraid_send_hcam(struct pmcraid_instance *pinstance, u8 type)
+{
+	struct pmcraid_cmd *cmd = pmcraid_init_hcam(pinstance, type);
+	pmcraid_send_hcam_cmd(cmd);
+}
+
+
+/**
+ * pmcraid_prepare_cancel_cmd - prepares a command block to abort another
+ *
+ * @cmd: pointer to cmd that is used as cancelling command
+ * @cmd_to_cancel: pointer to the command that needs to be cancelled
+ */
+static void pmcraid_prepare_cancel_cmd(
+	struct pmcraid_cmd *cmd,
+	struct pmcraid_cmd *cmd_to_cancel
+)
+{
+	struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+	__be64 ioarcb_addr = cmd_to_cancel->ioa_cb->ioarcb.ioarcb_bus_addr;
+
+	/* Get the resource handle to where the command to be aborted has been
+	 * sent.
+	 */
+	ioarcb->resource_handle = cmd_to_cancel->ioa_cb->ioarcb.resource_handle;
+	ioarcb->request_type = REQ_TYPE_IOACMD;
+	memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
+	ioarcb->cdb[0] = PMCRAID_ABORT_CMD;
+
+	/* IOARCB address of the command to be cancelled is given in
+	 * cdb[2]..cdb[9] is Big-Endian format. Note that length bits in
+	 * IOARCB address are not masked.
+	 */
+	ioarcb_addr = cpu_to_be64(ioarcb_addr);
+	memcpy(&(ioarcb->cdb[2]), &ioarcb_addr, sizeof(ioarcb_addr));
+}
+
+/**
+ * pmcraid_cancel_hcam - sends ABORT task to abort a given HCAM
+ *
+ * @cmd: command to be used as cancelling command
+ * @type: HCAM type
+ * @cmd_done: op done function for the cancelling command
+ */
+static void pmcraid_cancel_hcam(
+	struct pmcraid_cmd *cmd,
+	u8 type,
+	void (*cmd_done) (struct pmcraid_cmd *)
+)
+{
+	struct pmcraid_instance *pinstance;
+	struct pmcraid_hostrcb  *hcam;
+
+	pinstance = cmd->drv_inst;
+	hcam =  (type == PMCRAID_HCAM_CODE_LOG_DATA) ?
+		&pinstance->ldn : &pinstance->ccn;
+
+	/* prepare for cancelling previous hcam command. If the HCAM is
+	 * currently not pending with IOA, we would have hcam->cmd as non-null
+	 */
+	if (hcam->cmd == NULL)
+		return;
+
+	pmcraid_prepare_cancel_cmd(cmd, hcam->cmd);
+
+	/* writing to IOARRIN must be protected by host_lock, as mid-layer
+	 * schedule queuecommand while we are doing this
+	 */
+	pmcraid_send_cmd(cmd, cmd_done,
+			 PMCRAID_INTERNAL_TIMEOUT,
+			 pmcraid_timeout_handler);
+}
+
+/**
+ * pmcraid_cancel_ccn - cancel CCN HCAM already registered with IOA
+ *
+ * @cmd: command block to be used for cancelling the HCAM
+ */
+static void pmcraid_cancel_ccn(struct pmcraid_cmd *cmd)
+{
+	pmcraid_info("response for Cancel LDN CDB[0] = %x ioasc = %x\n",
+		     cmd->ioa_cb->ioarcb.cdb[0],
+		     le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
+
+	pmcraid_reinit_cmdblk(cmd);
+
+	pmcraid_cancel_hcam(cmd,
+			    PMCRAID_HCAM_CODE_CONFIG_CHANGE,
+			    pmcraid_ioa_shutdown);
+}
+
+/**
+ * pmcraid_cancel_ldn - cancel LDN HCAM already registered with IOA
+ *
+ * @cmd: command block to be used for cancelling the HCAM
+ */
+static void pmcraid_cancel_ldn(struct pmcraid_cmd *cmd)
+{
+	pmcraid_cancel_hcam(cmd,
+			    PMCRAID_HCAM_CODE_LOG_DATA,
+			    pmcraid_cancel_ccn);
+}
+
+/**
+ * pmcraid_expose_resource - check if the resource can be exposed to OS
+ *
+ * @cfgte: pointer to configuration table entry of the resource
+ *
+ * Return value:
+ * 	true if resource can be added to midlayer, false(0) otherwise
+ */
+static int pmcraid_expose_resource(struct pmcraid_config_table_entry *cfgte)
+{
+	int retval = 0;
+
+	if (cfgte->resource_type == RES_TYPE_VSET)
+		retval = ((cfgte->unique_flags1 & 0xFF) < 0xFE);
+	else if (cfgte->resource_type == RES_TYPE_GSCSI)
+		retval = (RES_BUS(cfgte->resource_address) !=
+				PMCRAID_VIRTUAL_ENCL_BUS_ID);
+	return retval;
+}
+
+/* attributes supported by pmcraid_event_family */
+enum {
+	PMCRAID_AEN_ATTR_UNSPEC,
+	PMCRAID_AEN_ATTR_EVENT,
+	__PMCRAID_AEN_ATTR_MAX,
+};
+#define PMCRAID_AEN_ATTR_MAX (__PMCRAID_AEN_ATTR_MAX - 1)
+
+/* commands supported by pmcraid_event_family */
+enum {
+	PMCRAID_AEN_CMD_UNSPEC,
+	PMCRAID_AEN_CMD_EVENT,
+	__PMCRAID_AEN_CMD_MAX,
+};
+#define PMCRAID_AEN_CMD_MAX (__PMCRAID_AEN_CMD_MAX - 1)
+
+static struct genl_family pmcraid_event_family = {
+	.id = GENL_ID_GENERATE,
+	.name = "pmcraid",
+	.version = 1,
+	.maxattr = PMCRAID_AEN_ATTR_MAX
+};
+
+/**
+ * pmcraid_netlink_init - registers pmcraid_event_family
+ *
+ * Return value:
+ * 	0 if the pmcraid_event_family is successfully registered
+ * 	with netlink generic, non-zero otherwise
+ */
+static int pmcraid_netlink_init(void)
+{
+	int result;
+
+	result = genl_register_family(&pmcraid_event_family);
+
+	if (result)
+		return result;
+
+	pmcraid_info("registered NETLINK GENERIC group: %d\n",
+		     pmcraid_event_family.id);
+
+	return result;
+}
+
+/**
+ * pmcraid_netlink_release - unregisters pmcraid_event_family
+ *
+ * Return value:
+ * 	none
+ */
+static void pmcraid_netlink_release(void)
+{
+	genl_unregister_family(&pmcraid_event_family);
+}
+
+/**
+ * pmcraid_notify_aen - sends event msg to user space application
+ * @pinstance: pointer to adapter instance structure
+ * @type: HCAM type
+ *
+ * Return value:
+ *	0 if success, error value in case of any failure.
+ */
+static int pmcraid_notify_aen(struct pmcraid_instance *pinstance, u8 type)
+{
+	struct sk_buff *skb;
+	struct pmcraid_aen_msg *aen_msg;
+	void *msg_header;
+	int data_size, total_size;
+	int result;
+
+
+	if (type == PMCRAID_HCAM_CODE_LOG_DATA) {
+		aen_msg = pinstance->ldn.msg;
+		data_size = pinstance->ldn.hcam->data_len;
+	} else {
+		aen_msg = pinstance->ccn.msg;
+		data_size = pinstance->ccn.hcam->data_len;
+	}
+
+	data_size += sizeof(struct pmcraid_hcam_hdr);
+	aen_msg->hostno = (pinstance->host->unique_id << 16 |
+			   MINOR(pinstance->cdev.dev));
+	aen_msg->length = data_size;
+	data_size += sizeof(*aen_msg);
+
+	total_size = nla_total_size(data_size);
+	skb = genlmsg_new(total_size, GFP_ATOMIC);
+
+
+	if (!skb) {
+		pmcraid_err("Failed to allocate aen data SKB of size: %x\n",
+			     total_size);
+		return -ENOMEM;
+	}
+
+	/* add the genetlink message header */
+	msg_header = genlmsg_put(skb, 0, 0,
+				 &pmcraid_event_family, 0,
+				 PMCRAID_AEN_CMD_EVENT);
+	if (!msg_header) {
+		pmcraid_err("failed to copy command details\n");
+		nlmsg_free(skb);
+		return -ENOMEM;
+	}
+
+	result = nla_put(skb, PMCRAID_AEN_ATTR_EVENT, data_size, aen_msg);
+
+	if (result) {
+		pmcraid_err("failed to copy AEN attribute data \n");
+		nlmsg_free(skb);
+		return -EINVAL;
+	}
+
+	/* send genetlink multicast message to notify appplications */
+	result = genlmsg_end(skb, msg_header);
+
+	if (result < 0) {
+		pmcraid_err("genlmsg_end failed\n");
+		nlmsg_free(skb);
+		return result;
+	}
+
+	result =
+		genlmsg_multicast(skb, 0, pmcraid_event_family.id, GFP_ATOMIC);
+
+	/* If there are no listeners, genlmsg_multicast may return non-zero
+	 * value.
+	 */
+	if (result)
+		pmcraid_info("failed to send %s event message %x!\n",
+			type == PMCRAID_HCAM_CODE_LOG_DATA ? "LDN" : "CCN",
+			result);
+	return result;
+}
+
+/**
+ * pmcraid_handle_config_change - Handle a config change from the adapter
+ * @pinstance: pointer to per adapter instance structure
+ *
+ * Return value:
+ *  none
+ */
+static void pmcraid_handle_config_change(struct pmcraid_instance *pinstance)
+{
+	struct pmcraid_config_table_entry *cfg_entry;
+	struct pmcraid_hcam_ccn *ccn_hcam;
+	struct pmcraid_cmd *cmd;
+	struct pmcraid_cmd *cfgcmd;
+	struct pmcraid_resource_entry *res = NULL;
+	u32 new_entry = 1;
+	unsigned long lock_flags;
+	unsigned long host_lock_flags;
+	int rc;
+
+	ccn_hcam = (struct pmcraid_hcam_ccn *)pinstance->ccn.hcam;
+	cfg_entry = &ccn_hcam->cfg_entry;
+
+	pmcraid_info
+		("CCN(%x): %x type: %x lost: %x flags: %x res: %x:%x:%x:%x\n",
+		 pinstance->ccn.hcam->ilid,
+		 pinstance->ccn.hcam->op_code,
+		 pinstance->ccn.hcam->notification_type,
+		 pinstance->ccn.hcam->notification_lost,
+		 pinstance->ccn.hcam->flags,
+		 pinstance->host->unique_id,
+		 RES_IS_VSET(*cfg_entry) ? PMCRAID_VSET_BUS_ID :
+		 (RES_IS_GSCSI(*cfg_entry) ? PMCRAID_PHYS_BUS_ID :
+			RES_BUS(cfg_entry->resource_address)),
+		 RES_IS_VSET(*cfg_entry) ? cfg_entry->unique_flags1 :
+			RES_TARGET(cfg_entry->resource_address),
+		 RES_LUN(cfg_entry->resource_address));
+
+
+	/* If this HCAM indicates a lost notification, read the config table */
+	if (pinstance->ccn.hcam->notification_lost) {
+		cfgcmd = pmcraid_get_free_cmd(pinstance);
+		if (cfgcmd) {
+			pmcraid_info("lost CCN, reading config table\b");
+			pinstance->reinit_cfg_table = 1;
+			pmcraid_querycfg(cfgcmd);
+		} else {
+			pmcraid_err("lost CCN, no free cmd for querycfg\n");
+		}
+		goto out_notify_apps;
+	}
+
+	/* If this resource is not going to be added to mid-layer, just notify
+	 * applications and return
+	 */
+	if (!pmcraid_expose_resource(cfg_entry))
+		goto out_notify_apps;
+
+	spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
+	list_for_each_entry(res, &pinstance->used_res_q, queue) {
+		rc = memcmp(&res->cfg_entry.resource_address,
+			    &cfg_entry->resource_address,
+			    sizeof(cfg_entry->resource_address));
+		if (!rc) {
+			new_entry = 0;
+			break;
+		}
+	}
+
+	if (new_entry) {
+
+		/* If there are more number of resources than what driver can
+		 * manage, do not notify the applications about the CCN. Just
+		 * ignore this notifications and re-register the same HCAM
+		 */
+		if (list_empty(&pinstance->free_res_q)) {
+			spin_unlock_irqrestore(&pinstance->resource_lock,
+						lock_flags);
+			pmcraid_err("too many resources attached\n");
+			spin_lock_irqsave(pinstance->host->host_lock,
+					  host_lock_flags);
+			pmcraid_send_hcam(pinstance,
+					  PMCRAID_HCAM_CODE_CONFIG_CHANGE);
+			spin_unlock_irqrestore(pinstance->host->host_lock,
+					       host_lock_flags);
+			return;
+		}
+
+		res = list_entry(pinstance->free_res_q.next,
+				 struct pmcraid_resource_entry, queue);
+
+		list_del(&res->queue);
+		res->scsi_dev = NULL;
+		res->reset_progress = 0;
+		list_add_tail(&res->queue, &pinstance->used_res_q);
+	}
+
+	memcpy(&res->cfg_entry, cfg_entry,
+		sizeof(struct pmcraid_config_table_entry));
+
+	if (pinstance->ccn.hcam->notification_type ==
+	    NOTIFICATION_TYPE_ENTRY_DELETED) {
+		if (res->scsi_dev) {
+			res->change_detected = RES_CHANGE_DEL;
+			res->cfg_entry.resource_handle =
+				PMCRAID_INVALID_RES_HANDLE;
+			schedule_work(&pinstance->worker_q);
+		} else {
+			/* This may be one of the non-exposed resources */
+			list_move_tail(&res->queue, &pinstance->free_res_q);
+		}
+	} else if (!res->scsi_dev) {
+		res->change_detected = RES_CHANGE_ADD;
+		schedule_work(&pinstance->worker_q);
+	}
+	spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
+
+out_notify_apps:
+
+	/* Notify configuration changes to registered applications.*/
+	if (!pmcraid_disable_aen)
+		pmcraid_notify_aen(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
+
+	cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
+	if (cmd)
+		pmcraid_send_hcam_cmd(cmd);
+}
+
+/**
+ * pmcraid_get_error_info - return error string for an ioasc
+ * @ioasc: ioasc code
+ * Return Value
+ *	 none
+ */
+static struct pmcraid_ioasc_error *pmcraid_get_error_info(u32 ioasc)
+{
+	int i;
+	for (i = 0; i < ARRAY_SIZE(pmcraid_ioasc_error_table); i++) {
+		if (pmcraid_ioasc_error_table[i].ioasc_code == ioasc)
+			return &pmcraid_ioasc_error_table[i];
+	}
+	return NULL;
+}
+
+/**
+ * pmcraid_ioasc_logger - log IOASC information based user-settings
+ * @ioasc: ioasc code
+ * @cmd: pointer to command that resulted in 'ioasc'
+ */
+void pmcraid_ioasc_logger(u32 ioasc, struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_ioasc_error *error_info = pmcraid_get_error_info(ioasc);
+
+	if (error_info == NULL ||
+		cmd->drv_inst->current_log_level < error_info->log_level)
+		return;
+
+	/* log the error string */
+	pmcraid_err("cmd [%d] for resource %x failed with %x(%s)\n",
+		cmd->ioa_cb->ioarcb.cdb[0],
+		cmd->ioa_cb->ioarcb.resource_handle,
+		le32_to_cpu(ioasc), error_info->error_string);
+}
+
+/**
+ * pmcraid_handle_error_log - Handle a config change (error log) from the IOA
+ *
+ * @pinstance: pointer to per adapter instance structure
+ *
+ * Return value:
+ *  none
+ */
+static void pmcraid_handle_error_log(struct pmcraid_instance *pinstance)
+{
+	struct pmcraid_hcam_ldn *hcam_ldn;
+	u32 ioasc;
+
+	hcam_ldn = (struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
+
+	pmcraid_info
+		("LDN(%x): %x type: %x lost: %x flags: %x overlay id: %x\n",
+		 pinstance->ldn.hcam->ilid,
+		 pinstance->ldn.hcam->op_code,
+		 pinstance->ldn.hcam->notification_type,
+		 pinstance->ldn.hcam->notification_lost,
+		 pinstance->ldn.hcam->flags,
+		 pinstance->ldn.hcam->overlay_id);
+
+	/* log only the errors, no need to log informational log entries */
+	if (pinstance->ldn.hcam->notification_type !=
+	    NOTIFICATION_TYPE_ERROR_LOG)
+		return;
+
+	if (pinstance->ldn.hcam->notification_lost ==
+	    HOSTRCB_NOTIFICATIONS_LOST)
+		dev_err(&pinstance->pdev->dev, "Error notifications lost\n");
+
+	ioasc = le32_to_cpu(hcam_ldn->error_log.fd_ioasc);
+
+	if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
+		ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER) {
+		dev_err(&pinstance->pdev->dev,
+			"UnitAttention due to IOA Bus Reset\n");
+		scsi_report_bus_reset(
+			pinstance->host,
+			RES_BUS(hcam_ldn->error_log.fd_ra));
+	}
+
+	return;
+}
+
+/**
+ * pmcraid_process_ccn - Op done function for a CCN.
+ * @cmd: pointer to command struct
+ *
+ * This function is the op done function for a configuration
+ * change notification
+ *
+ * Return value:
+ * none
+ */
+static void pmcraid_process_ccn(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
+	unsigned long lock_flags;
+
+	pinstance->ccn.cmd = NULL;
+	pmcraid_return_cmd(cmd);
+
+	/* If driver initiated IOA reset happened while this hcam was pending
+	 * with IOA, or IOA bringdown sequence is in progress, no need to
+	 * re-register the hcam
+	 */
+	if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
+	    atomic_read(&pinstance->ccn.ignore) == 1) {
+		return;
+	} else if (ioasc) {
+		dev_err(&pinstance->pdev->dev,
+			"Host RCB (CCN) failed with IOASC: 0x%08X\n", ioasc);
+		spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+		pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
+		spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+	} else {
+		pmcraid_handle_config_change(pinstance);
+	}
+}
+
+/**
+ * pmcraid_process_ldn - op done function for an LDN
+ * @cmd: pointer to command block
+ *
+ * Return value
+ *   none
+ */
+static void pmcraid_initiate_reset(struct pmcraid_instance *);
+
+static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	struct pmcraid_hcam_ldn *ldn_hcam =
+			(struct pmcraid_hcam_ldn *)pinstance->ldn.hcam;
+	u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
+	u32 fd_ioasc = le32_to_cpu(ldn_hcam->error_log.fd_ioasc);
+	unsigned long lock_flags;
+
+	/* return the command block back to freepool */
+	pinstance->ldn.cmd = NULL;
+	pmcraid_return_cmd(cmd);
+
+	/* If driver initiated IOA reset happened while this hcam was pending
+	 * with IOA, no need to re-register the hcam as reset engine will do it
+	 * once reset sequence is complete
+	 */
+	if (ioasc == PMCRAID_IOASC_IOA_WAS_RESET ||
+	    atomic_read(&pinstance->ccn.ignore) == 1) {
+		return;
+	} else if (!ioasc) {
+		pmcraid_handle_error_log(pinstance);
+		if (fd_ioasc == PMCRAID_IOASC_NR_IOA_RESET_REQUIRED) {
+			spin_lock_irqsave(pinstance->host->host_lock,
+					  lock_flags);
+			pmcraid_initiate_reset(pinstance);
+			spin_unlock_irqrestore(pinstance->host->host_lock,
+					       lock_flags);
+			return;
+		}
+	} else {
+		dev_err(&pinstance->pdev->dev,
+			"Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc);
+	}
+	/* send netlink message for HCAM notification if enabled */
+	if (!pmcraid_disable_aen)
+		pmcraid_notify_aen(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
+
+	cmd = pmcraid_init_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
+	if (cmd)
+		pmcraid_send_hcam_cmd(cmd);
+}
+
+/**
+ * pmcraid_register_hcams - register HCAMs for CCN and LDN
+ *
+ * @pinstance: pointer per adapter instance structure
+ *
+ * Return Value
+ *   none
+ */
+static void pmcraid_register_hcams(struct pmcraid_instance *pinstance)
+{
+	pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_CONFIG_CHANGE);
+	pmcraid_send_hcam(pinstance, PMCRAID_HCAM_CODE_LOG_DATA);
+}
+
+/**
+ * pmcraid_unregister_hcams - cancel HCAMs registered already
+ * @cmd: pointer to command used as part of reset sequence
+ */
+static void pmcraid_unregister_hcams(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+
+	/* During IOA bringdown, HCAM gets fired and tasklet proceeds with
+	 * handling hcam response though it is not necessary. In order to
+	 * prevent this, set 'ignore', so that bring-down sequence doesn't
+	 * re-send any more hcams
+	 */
+	atomic_set(&pinstance->ccn.ignore, 1);
+	atomic_set(&pinstance->ldn.ignore, 1);
+
+	/* If adapter reset was forced as part of runtime reset sequence,
+	 * start the reset sequence.
+	 */
+	if (pinstance->force_ioa_reset && !pinstance->ioa_bringdown) {
+		pinstance->force_ioa_reset = 0;
+		pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
+		pmcraid_reset_alert(cmd);
+		return;
+	}
+
+	/* Driver tries to cancel HCAMs by sending ABORT TASK for each HCAM
+	 * one after the other. So CCN cancellation will be triggered by
+	 * pmcraid_cancel_ldn itself.
+	 */
+	pmcraid_cancel_ldn(cmd);
+}
+
+/**
+ * pmcraid_reset_enable_ioa - re-enable IOA after a hard reset
+ * @pinstance: pointer to adapter instance structure
+ * Return Value
+ *  1 if TRANSITION_TO_OPERATIONAL is active, otherwise 0
+ */
+static void pmcraid_reinit_buffers(struct pmcraid_instance *);
+
+static int pmcraid_reset_enable_ioa(struct pmcraid_instance *pinstance)
+{
+	u32 intrs;
+
+	pmcraid_reinit_buffers(pinstance);
+	intrs = pmcraid_read_interrupts(pinstance);
+
+	pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
+
+	if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
+		iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
+			pinstance->int_regs.ioa_host_interrupt_mask_reg);
+		iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
+			pinstance->int_regs.ioa_host_interrupt_clr_reg);
+		return 1;
+	} else {
+		return 0;
+	}
+}
+
+/**
+ * pmcraid_soft_reset - performs a soft reset and makes IOA become ready
+ * @cmd : pointer to reset command block
+ *
+ * Return Value
+ *	none
+ */
+static void pmcraid_soft_reset(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	u32 int_reg;
+	u32 doorbell;
+
+	/* There will be an interrupt when Transition to Operational bit is
+	 * set so tasklet would execute next reset task. The timeout handler
+	 * would re-initiate a reset
+	 */
+	cmd->cmd_done = pmcraid_ioa_reset;
+	cmd->timer.data = (unsigned long)cmd;
+	cmd->timer.expires = jiffies +
+			     msecs_to_jiffies(PMCRAID_TRANSOP_TIMEOUT);
+	cmd->timer.function = (void (*)(unsigned long))pmcraid_timeout_handler;
+
+	if (!timer_pending(&cmd->timer))
+		add_timer(&cmd->timer);
+
+	/* Enable destructive diagnostics on IOA if it is not yet in
+	 * operational state
+	 */
+	doorbell = DOORBELL_RUNTIME_RESET |
+		   DOORBELL_ENABLE_DESTRUCTIVE_DIAGS;
+
+	iowrite32(doorbell, pinstance->int_regs.host_ioa_interrupt_reg);
+	int_reg = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
+	pmcraid_info("Waiting for IOA to become operational %x:%x\n",
+		     ioread32(pinstance->int_regs.host_ioa_interrupt_reg),
+		     int_reg);
+}
+
+/**
+ * pmcraid_get_dump - retrieves IOA dump in case of Unit Check interrupt
+ *
+ * @pinstance: pointer to adapter instance structure
+ *
+ * Return Value
+ *	none
+ */
+static void pmcraid_get_dump(struct pmcraid_instance *pinstance)
+{
+	pmcraid_info("%s is not yet implemented\n", __func__);
+}
+
+/**
+ * pmcraid_fail_outstanding_cmds - Fails all outstanding ops.
+ * @pinstance: pointer to adapter instance structure
+ *
+ * This function fails all outstanding ops. If they are submitted to IOA
+ * already, it sends cancel all messages if IOA is still accepting IOARCBs,
+ * otherwise just completes the commands and returns the cmd blocks to free
+ * pool.
+ *
+ * Return value:
+ *	 none
+ */
+static void pmcraid_fail_outstanding_cmds(struct pmcraid_instance *pinstance)
+{
+	struct pmcraid_cmd *cmd, *temp;
+	unsigned long lock_flags;
+
+	/* pending command list is protected by pending_pool_lock. Its
+	 * traversal must be done as within this lock
+	 */
+	spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
+	list_for_each_entry_safe(cmd, temp, &pinstance->pending_cmd_pool,
+				 free_list) {
+		list_del(&cmd->free_list);
+		spin_unlock_irqrestore(&pinstance->pending_pool_lock,
+					lock_flags);
+		cmd->ioa_cb->ioasa.ioasc =
+			cpu_to_le32(PMCRAID_IOASC_IOA_WAS_RESET);
+		cmd->ioa_cb->ioasa.ilid =
+			cpu_to_be32(PMCRAID_DRIVER_ILID);
+
+		/* In case the command timer is still running */
+		del_timer(&cmd->timer);
+
+		/* If this is an IO command, complete it by invoking scsi_done
+		 * function. If this is one of the internal commands other
+		 * than pmcraid_ioa_reset and HCAM commands invoke cmd_done to
+		 * complete it
+		 */
+		if (cmd->scsi_cmd) {
+
+			struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
+			__le32 resp = cmd->ioa_cb->ioarcb.response_handle;
+
+			scsi_cmd->result |= DID_ERROR << 16;
+
+			scsi_dma_unmap(scsi_cmd);
+			pmcraid_return_cmd(cmd);
+
+
+			pmcraid_info("failing(%d) CDB[0] = %x result: %x\n",
+				     le32_to_cpu(resp) >> 2,
+				     cmd->ioa_cb->ioarcb.cdb[0],
+				     scsi_cmd->result);
+			scsi_cmd->scsi_done(scsi_cmd);
+		} else if (cmd->cmd_done == pmcraid_internal_done ||
+			   cmd->cmd_done == pmcraid_erp_done) {
+			cmd->cmd_done(cmd);
+		} else if (cmd->cmd_done != pmcraid_ioa_reset) {
+			pmcraid_return_cmd(cmd);
+		}
+
+		atomic_dec(&pinstance->outstanding_cmds);
+		spin_lock_irqsave(&pinstance->pending_pool_lock, lock_flags);
+	}
+
+	spin_unlock_irqrestore(&pinstance->pending_pool_lock, lock_flags);
+}
+
+/**
+ * pmcraid_ioa_reset - Implementation of IOA reset logic
+ *
+ * @cmd: pointer to the cmd block to be used for entire reset process
+ *
+ * This function executes most of the steps required for IOA reset. This gets
+ * called by user threads (modprobe/insmod/rmmod) timer, tasklet and midlayer's
+ * 'eh_' thread. Access to variables used for controling the reset sequence is
+ * synchronized using host lock. Various functions called during reset process
+ * would make use of a single command block, pointer to which is also stored in
+ * adapter instance structure.
+ *
+ * Return Value
+ *	 None
+ */
+static void pmcraid_ioa_reset(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	u8 reset_complete = 0;
+
+	pinstance->ioa_reset_in_progress = 1;
+
+	if (pinstance->reset_cmd != cmd) {
+		pmcraid_err("reset is called with different command block\n");
+		pinstance->reset_cmd = cmd;
+	}
+
+	pmcraid_info("reset_engine: state = %d, command = %p\n",
+		      pinstance->ioa_state, cmd);
+
+	switch (pinstance->ioa_state) {
+
+	case IOA_STATE_DEAD:
+		/* If IOA is offline, whatever may be the reset reason, just
+		 * return. callers might be waiting on the reset wait_q, wake
+		 * up them
+		 */
+		pmcraid_err("IOA is offline no reset is possible\n");
+		reset_complete = 1;
+		break;
+
+	case IOA_STATE_IN_BRINGDOWN:
+		/* we enter here, once ioa shutdown command is processed by IOA
+		 * Alert IOA for a possible reset. If reset alert fails, IOA
+		 * goes through hard-reset
+		 */
+		pmcraid_disable_interrupts(pinstance, ~0);
+		pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
+		pmcraid_reset_alert(cmd);
+		break;
+
+	case IOA_STATE_UNKNOWN:
+		/* We may be called during probe or resume. Some pre-processing
+		 * is required for prior to reset
+		 */
+		scsi_block_requests(pinstance->host);
+
+		/* If asked to reset while IOA was processing responses or
+		 * there are any error responses then IOA may require
+		 * hard-reset.
+		 */
+		if (pinstance->ioa_hard_reset == 0) {
+			if (ioread32(pinstance->ioa_status) &
+			    INTRS_TRANSITION_TO_OPERATIONAL) {
+				pmcraid_info("sticky bit set, bring-up\n");
+				pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
+				pmcraid_reinit_cmdblk(cmd);
+				pmcraid_identify_hrrq(cmd);
+			} else {
+				pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
+				pmcraid_soft_reset(cmd);
+			}
+		} else {
+			/* Alert IOA of a possible reset and wait for critical
+			 * operation in progress bit to reset
+			 */
+			pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
+			pmcraid_reset_alert(cmd);
+		}
+		break;
+
+	case IOA_STATE_IN_RESET_ALERT:
+		/* If critical operation in progress bit is reset or wait gets
+		 * timed out, reset proceeds with starting BIST on the IOA.
+		 * pmcraid_ioa_hard_reset keeps a count of reset attempts. If
+		 * they are 3 or more, reset engine marks IOA dead and returns
+		 */
+		pinstance->ioa_state = IOA_STATE_IN_HARD_RESET;
+		pmcraid_start_bist(cmd);
+		break;
+
+	case IOA_STATE_IN_HARD_RESET:
+		pinstance->ioa_reset_attempts++;
+
+		/* retry reset if we haven't reached maximum allowed limit */
+		if (pinstance->ioa_reset_attempts > PMCRAID_RESET_ATTEMPTS) {
+			pinstance->ioa_reset_attempts = 0;
+			pmcraid_err("IOA didn't respond marking it as dead\n");
+			pinstance->ioa_state = IOA_STATE_DEAD;
+			reset_complete = 1;
+			break;
+		}
+
+		/* Once either bist or pci reset is done, restore PCI config
+		 * space. If this fails, proceed with hard reset again
+		 */
+
+		if (pci_restore_state(pinstance->pdev)) {
+			pmcraid_info("config-space error resetting again\n");
+			pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
+			pmcraid_reset_alert(cmd);
+			break;
+		}
+
+		/* fail all pending commands */
+		pmcraid_fail_outstanding_cmds(pinstance);
+
+		/* check if unit check is active, if so extract dump */
+		if (pinstance->ioa_unit_check) {
+			pmcraid_info("unit check is active\n");
+			pinstance->ioa_unit_check = 0;
+			pmcraid_get_dump(pinstance);
+			pinstance->ioa_reset_attempts--;
+			pinstance->ioa_state = IOA_STATE_IN_RESET_ALERT;
+			pmcraid_reset_alert(cmd);
+			break;
+		}
+
+		/* if the reset reason is to bring-down the ioa, we might be
+		 * done with the reset restore pci_config_space and complete
+		 * the reset
+		 */
+		if (pinstance->ioa_bringdown) {
+			pmcraid_info("bringing down the adapter\n");
+			pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
+			pinstance->ioa_bringdown = 0;
+			pinstance->ioa_state = IOA_STATE_UNKNOWN;
+			reset_complete = 1;
+		} else {
+			/* bring-up IOA, so proceed with soft reset
+			 * Reinitialize hrrq_buffers and their indices also
+			 * enable interrupts after a pci_restore_state
+			 */
+			if (pmcraid_reset_enable_ioa(pinstance)) {
+				pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
+				pmcraid_info("bringing up the adapter\n");
+				pmcraid_reinit_cmdblk(cmd);
+				pmcraid_identify_hrrq(cmd);
+			} else {
+				pinstance->ioa_state = IOA_STATE_IN_SOFT_RESET;
+				pmcraid_soft_reset(cmd);
+			}
+		}
+		break;
+
+	case IOA_STATE_IN_SOFT_RESET:
+		/* TRANSITION TO OPERATIONAL is on so start initialization
+		 * sequence
+		 */
+		pmcraid_info("In softreset proceeding with bring-up\n");
+		pinstance->ioa_state = IOA_STATE_IN_BRINGUP;
+
+		/* Initialization commands start with HRRQ identification. From
+		 * now on tasklet completes most of the commands as IOA is up
+		 * and intrs are enabled
+		 */
+		pmcraid_identify_hrrq(cmd);
+		break;
+
+	case IOA_STATE_IN_BRINGUP:
+		/* we are done with bringing up of IOA, change the ioa_state to
+		 * operational and wake up any waiters
+		 */
+		pinstance->ioa_state = IOA_STATE_OPERATIONAL;
+		reset_complete = 1;
+		break;
+
+	case IOA_STATE_OPERATIONAL:
+	default:
+		/* When IOA is operational and a reset is requested, check for
+		 * the reset reason. If reset is to bring down IOA, unregister
+		 * HCAMs and initiate shutdown; if adapter reset is forced then
+		 * restart reset sequence again
+		 */
+		if (pinstance->ioa_shutdown_type == SHUTDOWN_NONE &&
+		    pinstance->force_ioa_reset == 0) {
+			reset_complete = 1;
+		} else {
+			if (pinstance->ioa_shutdown_type != SHUTDOWN_NONE)
+				pinstance->ioa_state = IOA_STATE_IN_BRINGDOWN;
+			pmcraid_reinit_cmdblk(cmd);
+			pmcraid_unregister_hcams(cmd);
+		}
+		break;
+	}
+
+	/* reset will be completed if ioa_state is either DEAD or UNKNOWN or
+	 * OPERATIONAL. Reset all control variables used during reset, wake up
+	 * any waiting threads and let the SCSI mid-layer send commands. Note
+	 * that host_lock must be held before invoking scsi_report_bus_reset.
+	 */
+	if (reset_complete) {
+		pinstance->ioa_reset_in_progress = 0;
+		pinstance->ioa_reset_attempts = 0;
+		pinstance->reset_cmd = NULL;
+		pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
+		pinstance->ioa_bringdown = 0;
+		pmcraid_return_cmd(cmd);
+
+		/* If target state is to bring up the adapter, proceed with
+		 * hcam registration and resource exposure to mid-layer.
+		 */
+		if (pinstance->ioa_state == IOA_STATE_OPERATIONAL)
+			pmcraid_register_hcams(pinstance);
+
+		wake_up_all(&pinstance->reset_wait_q);
+	}
+
+	return;
+}
+
+/**
+ * pmcraid_initiate_reset - initiates reset sequence. This is called from
+ * ISR/tasklet during error interrupts including IOA unit check. If reset
+ * is already in progress, it just returns, otherwise initiates IOA reset
+ * to bring IOA up to operational state.
+ *
+ * @pinstance: pointer to adapter instance structure
+ *
+ * Return value
+ *	 none
+ */
+static void pmcraid_initiate_reset(struct pmcraid_instance *pinstance)
+{
+	struct pmcraid_cmd *cmd;
+
+	/* If the reset is already in progress, just return, otherwise start
+	 * reset sequence and return
+	 */
+	if (!pinstance->ioa_reset_in_progress) {
+		scsi_block_requests(pinstance->host);
+		cmd = pmcraid_get_free_cmd(pinstance);
+
+		if (cmd == NULL) {
+			pmcraid_err("no cmnd blocks for initiate_reset\n");
+			return;
+		}
+
+		pinstance->ioa_shutdown_type = SHUTDOWN_NONE;
+		pinstance->reset_cmd = cmd;
+		pinstance->force_ioa_reset = 1;
+		pmcraid_ioa_reset(cmd);
+	}
+}
+
+/**
+ * pmcraid_reset_reload - utility routine for doing IOA reset either to bringup
+ *			  or bringdown IOA
+ * @pinstance: pointer adapter instance structure
+ * @shutdown_type: shutdown type to be used NONE, NORMAL or ABRREV
+ * @target_state: expected target state after reset
+ *
+ * Note: This command initiates reset and waits for its completion. Hence this
+ * should not be called from isr/timer/tasklet functions (timeout handlers,
+ * error response handlers and interrupt handlers).
+ *
+ * Return Value
+ *	 1 in case ioa_state is not target_state, 0 otherwise.
+ */
+static int pmcraid_reset_reload(
+	struct pmcraid_instance *pinstance,
+	u8 shutdown_type,
+	u8 target_state
+)
+{
+	struct pmcraid_cmd *reset_cmd = NULL;
+	unsigned long lock_flags;
+	int reset = 1;
+
+	spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+
+	if (pinstance->ioa_reset_in_progress) {
+		pmcraid_info("reset_reload: reset is already in progress\n");
+
+		spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+
+		wait_event(pinstance->reset_wait_q,
+			   !pinstance->ioa_reset_in_progress);
+
+		spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+
+		if (pinstance->ioa_state == IOA_STATE_DEAD) {
+			spin_unlock_irqrestore(pinstance->host->host_lock,
+					       lock_flags);
+			pmcraid_info("reset_reload: IOA is dead\n");
+			return reset;
+		} else if (pinstance->ioa_state == target_state) {
+			reset = 0;
+		}
+	}
+
+	if (reset) {
+		pmcraid_info("reset_reload: proceeding with reset\n");
+		scsi_block_requests(pinstance->host);
+		reset_cmd = pmcraid_get_free_cmd(pinstance);
+
+		if (reset_cmd == NULL) {
+			pmcraid_err("no free cmnd for reset_reload\n");
+			spin_unlock_irqrestore(pinstance->host->host_lock,
+					       lock_flags);
+			return reset;
+		}
+
+		if (shutdown_type == SHUTDOWN_NORMAL)
+			pinstance->ioa_bringdown = 1;
+
+		pinstance->ioa_shutdown_type = shutdown_type;
+		pinstance->reset_cmd = reset_cmd;
+		pinstance->force_ioa_reset = reset;
+		pmcraid_info("reset_reload: initiating reset\n");
+		pmcraid_ioa_reset(reset_cmd);
+		spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+		pmcraid_info("reset_reload: waiting for reset to complete\n");
+		wait_event(pinstance->reset_wait_q,
+			   !pinstance->ioa_reset_in_progress);
+
+		pmcraid_info("reset_reload: reset is complete !! \n");
+		scsi_unblock_requests(pinstance->host);
+		if (pinstance->ioa_state == target_state)
+			reset = 0;
+	}
+
+	return reset;
+}
+
+/**
+ * pmcraid_reset_bringdown - wrapper over pmcraid_reset_reload to bringdown IOA
+ *
+ * @pinstance: pointer to adapter instance structure
+ *
+ * Return Value
+ *	 whatever is returned from pmcraid_reset_reload
+ */
+static int pmcraid_reset_bringdown(struct pmcraid_instance *pinstance)
+{
+	return pmcraid_reset_reload(pinstance,
+				    SHUTDOWN_NORMAL,
+				    IOA_STATE_UNKNOWN);
+}
+
+/**
+ * pmcraid_reset_bringup - wrapper over pmcraid_reset_reload to bring up IOA
+ *
+ * @pinstance: pointer to adapter instance structure
+ *
+ * Return Value
+ *	 whatever is returned from pmcraid_reset_reload
+ */
+static int pmcraid_reset_bringup(struct pmcraid_instance *pinstance)
+{
+	return pmcraid_reset_reload(pinstance,
+				    SHUTDOWN_NONE,
+				    IOA_STATE_OPERATIONAL);
+}
+
+/**
+ * pmcraid_request_sense - Send request sense to a device
+ * @cmd: pmcraid command struct
+ *
+ * This function sends a request sense to a device as a result of a check
+ * condition. This method re-uses the same command block that failed earlier.
+ */
+static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+	struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
+
+	/* allocate DMAable memory for sense buffers */
+	cmd->sense_buffer = pci_alloc_consistent(cmd->drv_inst->pdev,
+						 SCSI_SENSE_BUFFERSIZE,
+						 &cmd->sense_buffer_dma);
+
+	if (cmd->sense_buffer == NULL) {
+		pmcraid_err
+			("couldn't allocate sense buffer for request sense\n");
+		pmcraid_erp_done(cmd);
+		return;
+	}
+
+	/* re-use the command block */
+	memset(&cmd->ioa_cb->ioasa, 0, sizeof(struct pmcraid_ioasa));
+	memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
+	ioarcb->request_flags0 = (SYNC_COMPLETE |
+				  NO_LINK_DESCS |
+				  INHIBIT_UL_CHECK);
+	ioarcb->request_type = REQ_TYPE_SCSI;
+	ioarcb->cdb[0] = REQUEST_SENSE;
+	ioarcb->cdb[4] = SCSI_SENSE_BUFFERSIZE;
+
+	ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
+					offsetof(struct pmcraid_ioarcb,
+						add_data.u.ioadl[0]));
+	ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
+
+	ioarcb->data_transfer_length = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
+
+	ioadl->address = cpu_to_le64(cmd->sense_buffer_dma);
+	ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
+	ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
+
+	/* request sense might be called as part of error response processing
+	 * which runs in tasklets context. It is possible that mid-layer might
+	 * schedule queuecommand during this time, hence, writting to IOARRIN
+	 * must be protect by host_lock
+	 */
+	pmcraid_send_cmd(cmd, pmcraid_erp_done,
+			 PMCRAID_REQUEST_SENSE_TIMEOUT,
+			 pmcraid_timeout_handler);
+}
+
+/**
+ * pmcraid_cancel_all - cancel all outstanding IOARCBs as part of error recovery
+ * @cmd: command that failed
+ * @sense: true if request_sense is required after cancel all
+ *
+ * This function sends a cancel all to a device to clear the queue.
+ */
+static void pmcraid_cancel_all(struct pmcraid_cmd *cmd, u32 sense)
+{
+	struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
+	struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+	struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
+	void (*cmd_done) (struct pmcraid_cmd *) = sense ? pmcraid_erp_done
+							: pmcraid_request_sense;
+
+	memset(ioarcb->cdb, 0, PMCRAID_MAX_CDB_LEN);
+	ioarcb->request_flags0 = SYNC_OVERRIDE;
+	ioarcb->request_type = REQ_TYPE_IOACMD;
+	ioarcb->cdb[0] = PMCRAID_CANCEL_ALL_REQUESTS;
+
+	if (RES_IS_GSCSI(res->cfg_entry))
+		ioarcb->cdb[1] = PMCRAID_SYNC_COMPLETE_AFTER_CANCEL;
+
+	ioarcb->ioadl_bus_addr = 0;
+	ioarcb->ioadl_length = 0;
+	ioarcb->data_transfer_length = 0;
+	ioarcb->ioarcb_bus_addr &= (~0x1FULL);
+
+	/* writing to IOARRIN must be protected by host_lock, as mid-layer
+	 * schedule queuecommand while we are doing this
+	 */
+	pmcraid_send_cmd(cmd, cmd_done,
+			 PMCRAID_REQUEST_SENSE_TIMEOUT,
+			 pmcraid_timeout_handler);
+}
+
+/**
+ * pmcraid_frame_auto_sense: frame fixed format sense information
+ *
+ * @cmd: pointer to failing command block
+ *
+ * Return value
+ *  none
+ */
+static void pmcraid_frame_auto_sense(struct pmcraid_cmd *cmd)
+{
+	u8 *sense_buf = cmd->scsi_cmd->sense_buffer;
+	struct pmcraid_resource_entry *res = cmd->scsi_cmd->device->hostdata;
+	struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
+	u32 ioasc = le32_to_cpu(ioasa->ioasc);
+	u32 failing_lba = 0;
+
+	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
+	cmd->scsi_cmd->result = SAM_STAT_CHECK_CONDITION;
+
+	if (RES_IS_VSET(res->cfg_entry) &&
+	    ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC &&
+	    ioasa->u.vset.failing_lba_hi != 0) {
+
+		sense_buf[0] = 0x72;
+		sense_buf[1] = PMCRAID_IOASC_SENSE_KEY(ioasc);
+		sense_buf[2] = PMCRAID_IOASC_SENSE_CODE(ioasc);
+		sense_buf[3] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
+
+		sense_buf[7] = 12;
+		sense_buf[8] = 0;
+		sense_buf[9] = 0x0A;
+		sense_buf[10] = 0x80;
+
+		failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_hi);
+
+		sense_buf[12] = (failing_lba & 0xff000000) >> 24;
+		sense_buf[13] = (failing_lba & 0x00ff0000) >> 16;
+		sense_buf[14] = (failing_lba & 0x0000ff00) >> 8;
+		sense_buf[15] = failing_lba & 0x000000ff;
+
+		failing_lba = le32_to_cpu(ioasa->u.vset.failing_lba_lo);
+
+		sense_buf[16] = (failing_lba & 0xff000000) >> 24;
+		sense_buf[17] = (failing_lba & 0x00ff0000) >> 16;
+		sense_buf[18] = (failing_lba & 0x0000ff00) >> 8;
+		sense_buf[19] = failing_lba & 0x000000ff;
+	} else {
+		sense_buf[0] = 0x70;
+		sense_buf[2] = PMCRAID_IOASC_SENSE_KEY(ioasc);
+		sense_buf[12] = PMCRAID_IOASC_SENSE_CODE(ioasc);
+		sense_buf[13] = PMCRAID_IOASC_SENSE_QUAL(ioasc);
+
+		if (ioasc == PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC) {
+			if (RES_IS_VSET(res->cfg_entry))
+				failing_lba =
+					le32_to_cpu(ioasa->u.
+						 vset.failing_lba_lo);
+			sense_buf[0] |= 0x80;
+			sense_buf[3] = (failing_lba >> 24) & 0xff;
+			sense_buf[4] = (failing_lba >> 16) & 0xff;
+			sense_buf[5] = (failing_lba >> 8) & 0xff;
+			sense_buf[6] = failing_lba & 0xff;
+		}
+
+		sense_buf[7] = 6; /* additional length */
+	}
+}
+
+/**
+ * pmcraid_error_handler - Error response handlers for a SCSI op
+ * @cmd: pointer to pmcraid_cmd that has failed
+ *
+ * This function determines whether or not to initiate ERP on the affected
+ * device. This is called from a tasklet, which doesn't hold any locks.
+ *
+ * Return value:
+ *	 0 it caller can complete the request, otherwise 1 where in error
+ *	 handler itself completes the request and returns the command block
+ *	 back to free-pool
+ */
+static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
+{
+	struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
+	struct pmcraid_resource_entry *res = scsi_cmd->device->hostdata;
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	struct pmcraid_ioasa *ioasa = &cmd->ioa_cb->ioasa;
+	u32 ioasc = le32_to_cpu(ioasa->ioasc);
+	u32 masked_ioasc = ioasc & PMCRAID_IOASC_SENSE_MASK;
+	u32 sense_copied = 0;
+
+	if (!res) {
+		pmcraid_info("resource pointer is NULL\n");
+		return 0;
+	}
+
+	/* If this was a SCSI read/write command keep count of errors */
+	if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
+		atomic_inc(&res->read_failures);
+	else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
+		atomic_inc(&res->write_failures);
+
+	if (!RES_IS_GSCSI(res->cfg_entry) &&
+		masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
+		pmcraid_frame_auto_sense(cmd);
+	}
+
+	/* Log IOASC/IOASA information based on user settings */
+	pmcraid_ioasc_logger(ioasc, cmd);
+
+	switch (masked_ioasc) {
+
+	case PMCRAID_IOASC_AC_TERMINATED_BY_HOST:
+		scsi_cmd->result |= (DID_ABORT << 16);
+		break;
+
+	case PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE:
+	case PMCRAID_IOASC_HW_CANNOT_COMMUNICATE:
+		scsi_cmd->result |= (DID_NO_CONNECT << 16);
+		break;
+
+	case PMCRAID_IOASC_NR_SYNC_REQUIRED:
+		res->sync_reqd = 1;
+		scsi_cmd->result |= (DID_IMM_RETRY << 16);
+		break;
+
+	case PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC:
+		scsi_cmd->result |= (DID_PASSTHROUGH << 16);
+		break;
+
+	case PMCRAID_IOASC_UA_BUS_WAS_RESET:
+	case PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER:
+		if (!res->reset_progress)
+			scsi_report_bus_reset(pinstance->host,
+					      scsi_cmd->device->channel);
+		scsi_cmd->result |= (DID_ERROR << 16);
+		break;
+
+	case PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR:
+		scsi_cmd->result |= PMCRAID_IOASC_SENSE_STATUS(ioasc);
+		res->sync_reqd = 1;
+
+		/* if check_condition is not active return with error otherwise
+		 * get/frame the sense buffer
+		 */
+		if (PMCRAID_IOASC_SENSE_STATUS(ioasc) !=
+		    SAM_STAT_CHECK_CONDITION &&
+		    PMCRAID_IOASC_SENSE_STATUS(ioasc) != SAM_STAT_ACA_ACTIVE)
+			return 0;
+
+		/* If we have auto sense data as part of IOASA pass it to
+		 * mid-layer
+		 */
+		if (ioasa->auto_sense_length != 0) {
+			short sense_len = ioasa->auto_sense_length;
+			int data_size = min_t(u16, le16_to_cpu(sense_len),
+					      SCSI_SENSE_BUFFERSIZE);
+
+			memcpy(scsi_cmd->sense_buffer,
+			       ioasa->sense_data,
+			       data_size);
+			sense_copied = 1;
+		}
+
+		if (RES_IS_GSCSI(res->cfg_entry)) {
+			pmcraid_cancel_all(cmd, sense_copied);
+		} else if (sense_copied) {
+			pmcraid_erp_done(cmd);
+			return 0;
+		} else  {
+			pmcraid_request_sense(cmd);
+		}
+
+		return 1;
+
+	case PMCRAID_IOASC_NR_INIT_CMD_REQUIRED:
+		break;
+
+	default:
+		if (PMCRAID_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
+			scsi_cmd->result |= (DID_ERROR << 16);
+		break;
+	}
+	return 0;
+}
+
+/**
+ * pmcraid_reset_device - device reset handler functions
+ *
+ * @scsi_cmd: scsi command struct
+ * @modifier: reset modifier indicating the reset sequence to be performed
+ *
+ * This function issues a device reset to the affected device.
+ * A LUN reset will be sent to the device first. If that does
+ * not work, a target reset will be sent.
+ *
+ * Return value:
+ *	SUCCESS / FAILED
+ */
+static int pmcraid_reset_device(
+	struct scsi_cmnd *scsi_cmd,
+	unsigned long timeout,
+	u8 modifier
+)
+{
+	struct pmcraid_cmd *cmd;
+	struct pmcraid_instance *pinstance;
+	struct pmcraid_resource_entry *res;
+	struct pmcraid_ioarcb *ioarcb;
+	unsigned long lock_flags;
+	u32 ioasc;
+
+	pinstance =
+		(struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
+	res = scsi_cmd->device->hostdata;
+
+	if (!res) {
+		pmcraid_err("reset_device: NULL resource pointer\n");
+		return FAILED;
+	}
+
+	/* If adapter is currently going through reset/reload, return failed.
+	 * This will force the mid-layer to call _eh_bus/host reset, which
+	 * will then go to sleep and wait for the reset to complete
+	 */
+	spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+	if (pinstance->ioa_reset_in_progress ||
+	    pinstance->ioa_state == IOA_STATE_DEAD) {
+		spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+		return FAILED;
+	}
+
+	res->reset_progress = 1;
+	pmcraid_info("Resetting %s resource with addr %x\n",
+		     ((modifier & RESET_DEVICE_LUN) ? "LUN" :
+		     ((modifier & RESET_DEVICE_TARGET) ? "TARGET" : "BUS")),
+		     le32_to_cpu(res->cfg_entry.resource_address));
+
+	/* get a free cmd block */
+	cmd = pmcraid_get_free_cmd(pinstance);
+
+	if (cmd == NULL) {
+		spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+		pmcraid_err("%s: no cmd blocks are available\n", __func__);
+		return FAILED;
+	}
+
+	ioarcb = &cmd->ioa_cb->ioarcb;
+	ioarcb->resource_handle = res->cfg_entry.resource_handle;
+	ioarcb->request_type = REQ_TYPE_IOACMD;
+	ioarcb->cdb[0] = PMCRAID_RESET_DEVICE;
+
+	/* Initialize reset modifier bits */
+	if (modifier)
+		modifier = ENABLE_RESET_MODIFIER | modifier;
+
+	ioarcb->cdb[1] = modifier;
+
+	init_completion(&cmd->wait_for_completion);
+	cmd->completion_req = 1;
+
+	pmcraid_info("cmd(CDB[0] = %x) for %x with index = %d\n",
+		     cmd->ioa_cb->ioarcb.cdb[0],
+		     le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle),
+		     le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2);
+
+	pmcraid_send_cmd(cmd,
+			 pmcraid_internal_done,
+			 timeout,
+			 pmcraid_timeout_handler);
+
+	spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+
+	/* RESET_DEVICE command completes after all pending IOARCBs are
+	 * completed. Once this command is completed, pmcraind_internal_done
+	 * will wake up the 'completion' queue.
+	 */
+	wait_for_completion(&cmd->wait_for_completion);
+
+	/* complete the command here itself and return the command block
+	 * to free list
+	 */
+	pmcraid_return_cmd(cmd);
+	res->reset_progress = 0;
+	ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
+
+	/* set the return value based on the returned ioasc */
+	return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
+}
+
+/**
+ * _pmcraid_io_done - helper for pmcraid_io_done function
+ *
+ * @cmd: pointer to pmcraid command struct
+ * @reslen: residual data length to be set in the ioasa
+ * @ioasc: ioasc either returned by IOA or set by driver itself.
+ *
+ * This function is invoked by pmcraid_io_done to complete mid-layer
+ * scsi ops.
+ *
+ * Return value:
+ *	  0 if caller is required to return it to free_pool. Returns 1 if
+ *	  caller need not worry about freeing command block as error handler
+ *	  will take care of that.
+ */
+
+static int _pmcraid_io_done(struct pmcraid_cmd *cmd, int reslen, int ioasc)
+{
+	struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
+	int rc = 0;
+
+	scsi_set_resid(scsi_cmd, reslen);
+
+	pmcraid_info("response(%d) CDB[0] = %x ioasc:result: %x:%x\n",
+		le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
+		cmd->ioa_cb->ioarcb.cdb[0],
+		ioasc, scsi_cmd->result);
+
+	if (PMCRAID_IOASC_SENSE_KEY(ioasc) != 0)
+		rc = pmcraid_error_handler(cmd);
+
+	if (rc == 0) {
+		scsi_dma_unmap(scsi_cmd);
+		scsi_cmd->scsi_done(scsi_cmd);
+	}
+
+	return rc;
+}
+
+/**
+ * pmcraid_io_done - SCSI completion function
+ *
+ * @cmd: pointer to pmcraid command struct
+ *
+ * This function is invoked by tasklet/mid-layer error handler to completing
+ * the SCSI ops sent from mid-layer.
+ *
+ * Return value
+ *	  none
+ */
+
+static void pmcraid_io_done(struct pmcraid_cmd *cmd)
+{
+	u32 ioasc = le32_to_cpu(cmd->ioa_cb->ioasa.ioasc);
+	u32 reslen = le32_to_cpu(cmd->ioa_cb->ioasa.residual_data_length);
+
+	if (_pmcraid_io_done(cmd, reslen, ioasc) == 0)
+		pmcraid_return_cmd(cmd);
+}
+
+/**
+ * pmcraid_abort_cmd - Aborts a single IOARCB already submitted to IOA
+ *
+ * @cmd: command block of the command to be aborted
+ *
+ * Return Value:
+ *	 returns pointer to command structure used as cancelling cmd
+ */
+static struct pmcraid_cmd *pmcraid_abort_cmd(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_cmd *cancel_cmd;
+	struct pmcraid_instance *pinstance;
+	struct pmcraid_resource_entry *res;
+
+	pinstance = (struct pmcraid_instance *)cmd->drv_inst;
+	res = cmd->scsi_cmd->device->hostdata;
+
+	cancel_cmd = pmcraid_get_free_cmd(pinstance);
+
+	if (cancel_cmd == NULL) {
+		pmcraid_err("%s: no cmd blocks are available\n", __func__);
+		return NULL;
+	}
+
+	pmcraid_prepare_cancel_cmd(cancel_cmd, cmd);
+
+	pmcraid_info("aborting command CDB[0]= %x with index = %d\n",
+		cmd->ioa_cb->ioarcb.cdb[0],
+		cmd->ioa_cb->ioarcb.response_handle >> 2);
+
+	init_completion(&cancel_cmd->wait_for_completion);
+	cancel_cmd->completion_req = 1;
+
+	pmcraid_info("command (%d) CDB[0] = %x for %x\n",
+		le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.response_handle) >> 2,
+		cmd->ioa_cb->ioarcb.cdb[0],
+		le32_to_cpu(cancel_cmd->ioa_cb->ioarcb.resource_handle));
+
+	pmcraid_send_cmd(cancel_cmd,
+			 pmcraid_internal_done,
+			 PMCRAID_INTERNAL_TIMEOUT,
+			 pmcraid_timeout_handler);
+	return cancel_cmd;
+}
+
+/**
+ * pmcraid_abort_complete - Waits for ABORT TASK completion
+ *
+ * @cancel_cmd: command block use as cancelling command
+ *
+ * Return Value:
+ *	 returns SUCCESS if ABORT TASK has good completion
+ *	 otherwise FAILED
+ */
+static int pmcraid_abort_complete(struct pmcraid_cmd *cancel_cmd)
+{
+	struct pmcraid_resource_entry *res;
+	u32 ioasc;
+
+	wait_for_completion(&cancel_cmd->wait_for_completion);
+	res = cancel_cmd->u.res;
+	cancel_cmd->u.res = NULL;
+	ioasc = le32_to_cpu(cancel_cmd->ioa_cb->ioasa.ioasc);
+
+	/* If the abort task is not timed out we will get a Good completion
+	 * as sense_key, otherwise we may get one the following responses
+	 * due to subsquent bus reset or device reset. In case IOASC is
+	 * NR_SYNC_REQUIRED, set sync_reqd flag for the corresponding resource
+	 */
+	if (ioasc == PMCRAID_IOASC_UA_BUS_WAS_RESET ||
+	    ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED) {
+		if (ioasc == PMCRAID_IOASC_NR_SYNC_REQUIRED)
+			res->sync_reqd = 1;
+		ioasc = 0;
+	}
+
+	/* complete the command here itself */
+	pmcraid_return_cmd(cancel_cmd);
+	return PMCRAID_IOASC_SENSE_KEY(ioasc) ? FAILED : SUCCESS;
+}
+
+/**
+ * pmcraid_eh_abort_handler - entry point for aborting a single task on errors
+ *
+ * @scsi_cmd:   scsi command struct given by mid-layer. When this is called
+ *		mid-layer ensures that no other commands are queued. This
+ *		never gets called under interrupt, but a separate eh thread.
+ *
+ * Return value:
+ *	 SUCCESS / FAILED
+ */
+static int pmcraid_eh_abort_handler(struct scsi_cmnd *scsi_cmd)
+{
+	struct pmcraid_instance *pinstance;
+	struct pmcraid_cmd *cmd;
+	struct pmcraid_resource_entry *res;
+	unsigned long host_lock_flags;
+	unsigned long pending_lock_flags;
+	struct pmcraid_cmd *cancel_cmd = NULL;
+	int cmd_found = 0;
+	int rc = FAILED;
+
+	pinstance =
+		(struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
+
+	dev_err(&pinstance->pdev->dev,
+		"I/O command timed out, aborting it.\n");
+
+	res = scsi_cmd->device->hostdata;
+
+	if (res == NULL)
+		return rc;
+
+	/* If we are currently going through reset/reload, return failed.
+	 * This will force the mid-layer to eventually call
+	 * pmcraid_eh_host_reset which will then go to sleep and wait for the
+	 * reset to complete
+	 */
+	spin_lock_irqsave(pinstance->host->host_lock, host_lock_flags);
+
+	if (pinstance->ioa_reset_in_progress ||
+	    pinstance->ioa_state == IOA_STATE_DEAD) {
+		spin_unlock_irqrestore(pinstance->host->host_lock,
+				       host_lock_flags);
+		return rc;
+	}
+
+	/* loop over pending cmd list to find cmd corresponding to this
+	 * scsi_cmd. Note that this command might not have been completed
+	 * already. locking: all pending commands are protected with
+	 * pending_pool_lock.
+	 */
+	spin_lock_irqsave(&pinstance->pending_pool_lock, pending_lock_flags);
+	list_for_each_entry(cmd, &pinstance->pending_cmd_pool, free_list) {
+
+		if (cmd->scsi_cmd == scsi_cmd) {
+			cmd_found = 1;
+			break;
+		}
+	}
+
+	spin_unlock_irqrestore(&pinstance->pending_pool_lock,
+				pending_lock_flags);
+
+	/* If the command to be aborted was given to IOA and still pending with
+	 * it, send ABORT_TASK to abort this and wait for its completion
+	 */
+	if (cmd_found)
+		cancel_cmd = pmcraid_abort_cmd(cmd);
+
+	spin_unlock_irqrestore(pinstance->host->host_lock,
+			       host_lock_flags);
+
+	if (cancel_cmd) {
+		cancel_cmd->u.res = cmd->scsi_cmd->device->hostdata;
+		rc = pmcraid_abort_complete(cancel_cmd);
+	}
+
+	return cmd_found ? rc : SUCCESS;
+}
+
+/**
+ * pmcraid_eh_xxxx_reset_handler - bus/target/device reset handler callbacks
+ *
+ * @scmd: pointer to scsi_cmd that was sent to the resource to be reset.
+ *
+ * All these routines invokve pmcraid_reset_device with appropriate parameters.
+ * Since these are called from mid-layer EH thread, no other IO will be queued
+ * to the resource being reset. However, control path (IOCTL) may be active so
+ * it is necessary to synchronize IOARRIN writes which pmcraid_reset_device
+ * takes care by locking/unlocking host_lock.
+ *
+ * Return value
+ * 	SUCCESS or FAILED
+ */
+static int pmcraid_eh_device_reset_handler(struct scsi_cmnd *scmd)
+{
+	pmcraid_err("Doing device reset due to an I/O command timeout.\n");
+	return pmcraid_reset_device(scmd,
+				    PMCRAID_INTERNAL_TIMEOUT,
+				    RESET_DEVICE_LUN);
+}
+
+static int pmcraid_eh_bus_reset_handler(struct scsi_cmnd *scmd)
+{
+	pmcraid_err("Doing bus reset due to an I/O command timeout.\n");
+	return pmcraid_reset_device(scmd,
+				    PMCRAID_RESET_BUS_TIMEOUT,
+				    RESET_DEVICE_BUS);
+}
+
+static int pmcraid_eh_target_reset_handler(struct scsi_cmnd *scmd)
+{
+	pmcraid_err("Doing target reset due to an I/O command timeout.\n");
+	return pmcraid_reset_device(scmd,
+				    PMCRAID_INTERNAL_TIMEOUT,
+				    RESET_DEVICE_TARGET);
+}
+
+/**
+ * pmcraid_eh_host_reset_handler - adapter reset handler callback
+ *
+ * @scmd: pointer to scsi_cmd that was sent to a resource of adapter
+ *
+ * Initiates adapter reset to bring it up to operational state
+ *
+ * Return value
+ * 	SUCCESS or FAILED
+ */
+static int pmcraid_eh_host_reset_handler(struct scsi_cmnd *scmd)
+{
+	unsigned long interval = 10000; /* 10 seconds interval */
+	int waits = jiffies_to_msecs(PMCRAID_RESET_HOST_TIMEOUT) / interval;
+	struct pmcraid_instance *pinstance =
+		(struct pmcraid_instance *)(scmd->device->host->hostdata);
+
+
+	/* wait for an additional 150 seconds just in case firmware could come
+	 * up and if it could complete all the pending commands excluding the
+	 * two HCAM (CCN and LDN).
+	 */
+	while (waits--) {
+		if (atomic_read(&pinstance->outstanding_cmds) <=
+		    PMCRAID_MAX_HCAM_CMD)
+			return SUCCESS;
+		msleep(interval);
+	}
+
+	dev_err(&pinstance->pdev->dev,
+		"Adapter being reset due to an I/O command timeout.\n");
+	return pmcraid_reset_bringup(pinstance) == 0 ? SUCCESS : FAILED;
+}
+
+/**
+ * pmcraid_task_attributes - Translate SPI Q-Tags to task attributes
+ * @scsi_cmd:   scsi command struct
+ *
+ * Return value
+ *	  number of tags or 0 if the task is not tagged
+ */
+static u8 pmcraid_task_attributes(struct scsi_cmnd *scsi_cmd)
+{
+	char tag[2];
+	u8 rc = 0;
+
+	if (scsi_populate_tag_msg(scsi_cmd, tag)) {
+		switch (tag[0]) {
+		case MSG_SIMPLE_TAG:
+			rc = TASK_TAG_SIMPLE;
+			break;
+		case MSG_HEAD_TAG:
+			rc = TASK_TAG_QUEUE_HEAD;
+			break;
+		case MSG_ORDERED_TAG:
+			rc = TASK_TAG_ORDERED;
+			break;
+		};
+	}
+
+	return rc;
+}
+
+
+/**
+ * pmcraid_init_ioadls - initializes IOADL related fields in IOARCB
+ * @cmd: pmcraid command struct
+ * @sgcount: count of scatter-gather elements
+ *
+ * Return value
+ *   returns pointer pmcraid_ioadl_desc, initialized to point to internal
+ *   or external IOADLs
+ */
+struct pmcraid_ioadl_desc *
+pmcraid_init_ioadls(struct pmcraid_cmd *cmd, int sgcount)
+{
+	struct pmcraid_ioadl_desc *ioadl;
+	struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+	int ioadl_count = 0;
+
+	if (ioarcb->add_cmd_param_length)
+		ioadl_count = DIV_ROUND_UP(ioarcb->add_cmd_param_length, 16);
+	ioarcb->ioadl_length =
+		sizeof(struct pmcraid_ioadl_desc) * sgcount;
+
+	if ((sgcount + ioadl_count) > (ARRAY_SIZE(ioarcb->add_data.u.ioadl))) {
+		/* external ioadls start at offset 0x80 from control_block
+		 * structure, re-using 24 out of 27 ioadls part of IOARCB.
+		 * It is necessary to indicate to firmware that driver is
+		 * using ioadls to be treated as external to IOARCB.
+		 */
+		ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
+		ioarcb->ioadl_bus_addr =
+			cpu_to_le64((cmd->ioa_cb_bus_addr) +
+				offsetof(struct pmcraid_ioarcb,
+					add_data.u.ioadl[3]));
+		ioadl = &ioarcb->add_data.u.ioadl[3];
+	} else {
+		ioarcb->ioadl_bus_addr =
+			cpu_to_le64((cmd->ioa_cb_bus_addr) +
+				offsetof(struct pmcraid_ioarcb,
+					add_data.u.ioadl[ioadl_count]));
+
+		ioadl = &ioarcb->add_data.u.ioadl[ioadl_count];
+		ioarcb->ioarcb_bus_addr |=
+				DIV_ROUND_CLOSEST(sgcount + ioadl_count, 8);
+	}
+
+	return ioadl;
+}
+
+/**
+ * pmcraid_build_ioadl - Build a scatter/gather list and map the buffer
+ * @pinstance: pointer to adapter instance structure
+ * @cmd: pmcraid command struct
+ *
+ * This function is invoked by queuecommand entry point while sending a command
+ * to firmware. This builds ioadl descriptors and sets up ioarcb fields.
+ *
+ * Return value:
+ * 	0 on success or -1 on failure
+ */
+static int pmcraid_build_ioadl(
+	struct pmcraid_instance *pinstance,
+	struct pmcraid_cmd *cmd
+)
+{
+	int i, nseg;
+	struct scatterlist *sglist;
+
+	struct scsi_cmnd *scsi_cmd = cmd->scsi_cmd;
+	struct pmcraid_ioarcb *ioarcb = &(cmd->ioa_cb->ioarcb);
+	struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
+
+	u32 length = scsi_bufflen(scsi_cmd);
+
+	if (!length)
+		return 0;
+
+	nseg = scsi_dma_map(scsi_cmd);
+
+	if (nseg < 0) {
+		dev_err(&pinstance->pdev->dev, "scsi_map_dma failed!\n");
+		return -1;
+	} else if (nseg > PMCRAID_MAX_IOADLS) {
+		scsi_dma_unmap(scsi_cmd);
+		dev_err(&pinstance->pdev->dev,
+			"sg count is (%d) more than allowed!\n", nseg);
+		return -1;
+	}
+
+	/* Initialize IOARCB data transfer length fields */
+	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE)
+		ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
+
+	ioarcb->request_flags0 |= NO_LINK_DESCS;
+	ioarcb->data_transfer_length = cpu_to_le32(length);
+	ioadl = pmcraid_init_ioadls(cmd, nseg);
+
+	/* Initialize IOADL descriptor addresses */
+	scsi_for_each_sg(scsi_cmd, sglist, nseg, i) {
+		ioadl[i].data_len = cpu_to_le32(sg_dma_len(sglist));
+		ioadl[i].address = cpu_to_le64(sg_dma_address(sglist));
+		ioadl[i].flags = 0;
+	}
+	/* setup last descriptor */
+	ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
+
+	return 0;
+}
+
+/**
+ * pmcraid_free_sglist - Frees an allocated SG buffer list
+ * @sglist: scatter/gather list pointer
+ *
+ * Free a DMA'able memory previously allocated with pmcraid_alloc_sglist
+ *
+ * Return value:
+ * 	none
+ */
+static void pmcraid_free_sglist(struct pmcraid_sglist *sglist)
+{
+	int i;
+
+	for (i = 0; i < sglist->num_sg; i++)
+		__free_pages(sg_page(&(sglist->scatterlist[i])),
+			     sglist->order);
+
+	kfree(sglist);
+}
+
+/**
+ * pmcraid_alloc_sglist - Allocates memory for a SG list
+ * @buflen: buffer length
+ *
+ * Allocates a DMA'able buffer in chunks and assembles a scatter/gather
+ * list.
+ *
+ * Return value
+ * 	pointer to sglist / NULL on failure
+ */
+static struct pmcraid_sglist *pmcraid_alloc_sglist(int buflen)
+{
+	struct pmcraid_sglist *sglist;
+	struct scatterlist *scatterlist;
+	struct page *page;
+	int num_elem, i, j;
+	int sg_size;
+	int order;
+	int bsize_elem;
+
+	sg_size = buflen / (PMCRAID_MAX_IOADLS - 1);
+	order = (sg_size > 0) ? get_order(sg_size) : 0;
+	bsize_elem = PAGE_SIZE * (1 << order);
+
+	/* Determine the actual number of sg entries needed */
+	if (buflen % bsize_elem)
+		num_elem = (buflen / bsize_elem) + 1;
+	else
+		num_elem = buflen / bsize_elem;
+
+	/* Allocate a scatter/gather list for the DMA */
+	sglist = kzalloc(sizeof(struct pmcraid_sglist) +
+			 (sizeof(struct scatterlist) * (num_elem - 1)),
+			 GFP_KERNEL);
+
+	if (sglist == NULL)
+		return NULL;
+
+	scatterlist = sglist->scatterlist;
+	sg_init_table(scatterlist, num_elem);
+	sglist->order = order;
+	sglist->num_sg = num_elem;
+	sg_size = buflen;
+
+	for (i = 0; i < num_elem; i++) {
+		page = alloc_pages(GFP_KERNEL|GFP_DMA, order);
+		if (!page) {
+			for (j = i - 1; j >= 0; j--)
+				__free_pages(sg_page(&scatterlist[j]), order);
+			kfree(sglist);
+			return NULL;
+		}
+
+		sg_set_page(&scatterlist[i], page,
+			sg_size < bsize_elem ? sg_size : bsize_elem, 0);
+		sg_size -= bsize_elem;
+	}
+
+	return sglist;
+}
+
+/**
+ * pmcraid_copy_sglist - Copy user buffer to kernel buffer's SG list
+ * @sglist: scatter/gather list pointer
+ * @buffer: buffer pointer
+ * @len: buffer length
+ * @direction: data transfer direction
+ *
+ * Copy a user buffer into a buffer allocated by pmcraid_alloc_sglist
+ *
+ * Return value:
+ * 0 on success / other on failure
+ */
+static int pmcraid_copy_sglist(
+	struct pmcraid_sglist *sglist,
+	unsigned long buffer,
+	u32 len,
+	int direction
+)
+{
+	struct scatterlist *scatterlist;
+	void *kaddr;
+	int bsize_elem;
+	int i;
+	int rc = 0;
+
+	/* Determine the actual number of bytes per element */
+	bsize_elem = PAGE_SIZE * (1 << sglist->order);
+
+	scatterlist = sglist->scatterlist;
+
+	for (i = 0; i < (len / bsize_elem); i++, buffer += bsize_elem) {
+		struct page *page = sg_page(&scatterlist[i]);
+
+		kaddr = kmap(page);
+		if (direction == DMA_TO_DEVICE)
+			rc = __copy_from_user(kaddr,
+					      (void *)buffer,
+					      bsize_elem);
+		else
+			rc = __copy_to_user((void *)buffer, kaddr, bsize_elem);
+
+		kunmap(page);
+
+		if (rc) {
+			pmcraid_err("failed to copy user data into sg list\n");
+			return -EFAULT;
+		}
+
+		scatterlist[i].length = bsize_elem;
+	}
+
+	if (len % bsize_elem) {
+		struct page *page = sg_page(&scatterlist[i]);
+
+		kaddr = kmap(page);
+
+		if (direction == DMA_TO_DEVICE)
+			rc = __copy_from_user(kaddr,
+					      (void *)buffer,
+					      len % bsize_elem);
+		else
+			rc = __copy_to_user((void *)buffer,
+					    kaddr,
+					    len % bsize_elem);
+
+		kunmap(page);
+
+		scatterlist[i].length = len % bsize_elem;
+	}
+
+	if (rc) {
+		pmcraid_err("failed to copy user data into sg list\n");
+		rc = -EFAULT;
+	}
+
+	return rc;
+}
+
+/**
+ * pmcraid_queuecommand - Queue a mid-layer request
+ * @scsi_cmd: scsi command struct
+ * @done: done function
+ *
+ * This function queues a request generated by the mid-layer. Midlayer calls
+ * this routine within host->lock. Some of the functions called by queuecommand
+ * would use cmd block queue locks (free_pool_lock and pending_pool_lock)
+ *
+ * Return value:
+ *	  0 on success
+ *	  SCSI_MLQUEUE_DEVICE_BUSY if device is busy
+ *	  SCSI_MLQUEUE_HOST_BUSY if host is busy
+ */
+static int pmcraid_queuecommand(
+	struct scsi_cmnd *scsi_cmd,
+	void (*done) (struct scsi_cmnd *)
+)
+{
+	struct pmcraid_instance *pinstance;
+	struct pmcraid_resource_entry *res;
+	struct pmcraid_ioarcb *ioarcb;
+	struct pmcraid_cmd *cmd;
+	int rc = 0;
+
+	pinstance =
+		(struct pmcraid_instance *)scsi_cmd->device->host->hostdata;
+
+	scsi_cmd->scsi_done = done;
+	res = scsi_cmd->device->hostdata;
+	scsi_cmd->result = (DID_OK << 16);
+
+	/* if adapter is marked as dead, set result to DID_NO_CONNECT complete
+	 * the command
+	 */
+	if (pinstance->ioa_state == IOA_STATE_DEAD) {
+		pmcraid_info("IOA is dead, but queuecommand is scheduled\n");
+		scsi_cmd->result = (DID_NO_CONNECT << 16);
+		scsi_cmd->scsi_done(scsi_cmd);
+		return 0;
+	}
+
+	/* If IOA reset is in progress, can't queue the commands */
+	if (pinstance->ioa_reset_in_progress)
+		return SCSI_MLQUEUE_HOST_BUSY;
+
+	/* initialize the command and IOARCB to be sent to IOA */
+	cmd = pmcraid_get_free_cmd(pinstance);
+
+	if (cmd == NULL) {
+		pmcraid_err("free command block is not available\n");
+		return SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	cmd->scsi_cmd = scsi_cmd;
+	ioarcb = &(cmd->ioa_cb->ioarcb);
+	memcpy(ioarcb->cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
+	ioarcb->resource_handle = res->cfg_entry.resource_handle;
+	ioarcb->request_type = REQ_TYPE_SCSI;
+
+	cmd->cmd_done = pmcraid_io_done;
+
+	if (RES_IS_GSCSI(res->cfg_entry) || RES_IS_VSET(res->cfg_entry)) {
+		if (scsi_cmd->underflow == 0)
+			ioarcb->request_flags0 |= INHIBIT_UL_CHECK;
+
+		if (res->sync_reqd) {
+			ioarcb->request_flags0 |= SYNC_COMPLETE;
+			res->sync_reqd = 0;
+		}
+
+		ioarcb->request_flags0 |= NO_LINK_DESCS;
+		ioarcb->request_flags1 |= pmcraid_task_attributes(scsi_cmd);
+
+		if (RES_IS_GSCSI(res->cfg_entry))
+			ioarcb->request_flags1 |= DELAY_AFTER_RESET;
+	}
+
+	rc = pmcraid_build_ioadl(pinstance, cmd);
+
+	pmcraid_info("command (%d) CDB[0] = %x for %x:%x:%x:%x\n",
+		     le32_to_cpu(ioarcb->response_handle) >> 2,
+		     scsi_cmd->cmnd[0], pinstance->host->unique_id,
+		     RES_IS_VSET(res->cfg_entry) ? PMCRAID_VSET_BUS_ID :
+			PMCRAID_PHYS_BUS_ID,
+		     RES_IS_VSET(res->cfg_entry) ?
+			res->cfg_entry.unique_flags1 :
+			RES_TARGET(res->cfg_entry.resource_address),
+		     RES_LUN(res->cfg_entry.resource_address));
+
+	if (likely(rc == 0)) {
+		_pmcraid_fire_command(cmd);
+	} else {
+		pmcraid_err("queuecommand could not build ioadl\n");
+		pmcraid_return_cmd(cmd);
+		rc = SCSI_MLQUEUE_HOST_BUSY;
+	}
+
+	return rc;
+}
+
+/**
+ * pmcraid_open -char node "open" entry, allowed only users with admin access
+ */
+static int pmcraid_chr_open(struct inode *inode, struct file *filep)
+{
+	struct pmcraid_instance *pinstance;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EACCES;
+
+	/* Populate adapter instance * pointer for use by ioctl */
+	pinstance = container_of(inode->i_cdev, struct pmcraid_instance, cdev);
+	filep->private_data = pinstance;
+
+	return 0;
+}
+
+/**
+ * pmcraid_release - char node "release" entry point
+ */
+static int pmcraid_chr_release(struct inode *inode, struct file *filep)
+{
+	struct pmcraid_instance *pinstance =
+		((struct pmcraid_instance *)filep->private_data);
+
+	filep->private_data = NULL;
+	fasync_helper(-1, filep, 0, &pinstance->aen_queue);
+
+	return 0;
+}
+
+/**
+ * pmcraid_fasync - Async notifier registration from applications
+ *
+ * This function adds the calling process to a driver global queue. When an
+ * event occurs, SIGIO will be sent to all processes in this queue.
+ */
+static int pmcraid_chr_fasync(int fd, struct file *filep, int mode)
+{
+	struct pmcraid_instance *pinstance;
+	int rc;
+
+	pinstance = (struct pmcraid_instance *)filep->private_data;
+	mutex_lock(&pinstance->aen_queue_lock);
+	rc = fasync_helper(fd, filep, mode, &pinstance->aen_queue);
+	mutex_unlock(&pinstance->aen_queue_lock);
+
+	return rc;
+}
+
+
+/**
+ * pmcraid_build_passthrough_ioadls - builds SG elements for passthrough
+ * commands sent over IOCTL interface
+ *
+ * @cmd       : pointer to struct pmcraid_cmd
+ * @buflen    : length of the request buffer
+ * @direction : data transfer direction
+ *
+ * Return value
+ *  0 on sucess, non-zero error code on failure
+ */
+static int pmcraid_build_passthrough_ioadls(
+	struct pmcraid_cmd *cmd,
+	int buflen,
+	int direction
+)
+{
+	struct pmcraid_sglist *sglist = NULL;
+	struct scatterlist *sg = NULL;
+	struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+	struct pmcraid_ioadl_desc *ioadl;
+	int i;
+
+	sglist = pmcraid_alloc_sglist(buflen);
+
+	if (!sglist) {
+		pmcraid_err("can't allocate memory for passthrough SGls\n");
+		return -ENOMEM;
+	}
+
+	sglist->num_dma_sg = pci_map_sg(cmd->drv_inst->pdev,
+					sglist->scatterlist,
+					sglist->num_sg, direction);
+
+	if (!sglist->num_dma_sg || sglist->num_dma_sg > PMCRAID_MAX_IOADLS) {
+		dev_err(&cmd->drv_inst->pdev->dev,
+			"Failed to map passthrough buffer!\n");
+		pmcraid_free_sglist(sglist);
+		return -EIO;
+	}
+
+	cmd->sglist = sglist;
+	ioarcb->request_flags0 |= NO_LINK_DESCS;
+
+	ioadl = pmcraid_init_ioadls(cmd, sglist->num_dma_sg);
+
+	/* Initialize IOADL descriptor addresses */
+	for_each_sg(sglist->scatterlist, sg, sglist->num_dma_sg, i) {
+		ioadl[i].data_len = cpu_to_le32(sg_dma_len(sg));
+		ioadl[i].address = cpu_to_le64(sg_dma_address(sg));
+		ioadl[i].flags = 0;
+	}
+
+	/* setup the last descriptor */
+	ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
+
+	return 0;
+}
+
+
+/**
+ * pmcraid_release_passthrough_ioadls - release passthrough ioadls
+ *
+ * @cmd: pointer to struct pmcraid_cmd for which ioadls were allocated
+ * @buflen: size of the request buffer
+ * @direction: data transfer direction
+ *
+ * Return value
+ *  0 on sucess, non-zero error code on failure
+ */
+static void pmcraid_release_passthrough_ioadls(
+	struct pmcraid_cmd *cmd,
+	int buflen,
+	int direction
+)
+{
+	struct pmcraid_sglist *sglist = cmd->sglist;
+
+	if (buflen > 0) {
+		pci_unmap_sg(cmd->drv_inst->pdev,
+			     sglist->scatterlist,
+			     sglist->num_sg,
+			     direction);
+		pmcraid_free_sglist(sglist);
+		cmd->sglist = NULL;
+	}
+}
+
+/**
+ * pmcraid_ioctl_passthrough - handling passthrough IOCTL commands
+ *
+ * @pinstance: pointer to adapter instance structure
+ * @cmd: ioctl code
+ * @arg: pointer to pmcraid_passthrough_buffer user buffer
+ *
+ * Return value
+ *  0 on sucess, non-zero error code on failure
+ */
+static long pmcraid_ioctl_passthrough(
+	struct pmcraid_instance *pinstance,
+	unsigned int ioctl_cmd,
+	unsigned int buflen,
+	unsigned long arg
+)
+{
+	struct pmcraid_passthrough_ioctl_buffer *buffer;
+	struct pmcraid_ioarcb *ioarcb;
+	struct pmcraid_cmd *cmd;
+	struct pmcraid_cmd *cancel_cmd;
+	unsigned long request_buffer;
+	unsigned long request_offset;
+	unsigned long lock_flags;
+	int request_size;
+	int buffer_size;
+	u8 access, direction;
+	int rc = 0;
+
+	/* If IOA reset is in progress, wait 10 secs for reset to complete */
+	if (pinstance->ioa_reset_in_progress) {
+		rc = wait_event_interruptible_timeout(
+				pinstance->reset_wait_q,
+				!pinstance->ioa_reset_in_progress,
+				msecs_to_jiffies(10000));
+
+		if (!rc)
+			return -ETIMEDOUT;
+		else if (rc < 0)
+			return -ERESTARTSYS;
+	}
+
+	/* If adapter is not in operational state, return error */
+	if (pinstance->ioa_state != IOA_STATE_OPERATIONAL) {
+		pmcraid_err("IOA is not operational\n");
+		return -ENOTTY;
+	}
+
+	buffer_size = sizeof(struct pmcraid_passthrough_ioctl_buffer);
+	buffer = kmalloc(buffer_size, GFP_KERNEL);
+
+	if (!buffer) {
+		pmcraid_err("no memory for passthrough buffer\n");
+		return -ENOMEM;
+	}
+
+	request_offset =
+	    offsetof(struct pmcraid_passthrough_ioctl_buffer, request_buffer);
+
+	request_buffer = arg + request_offset;
+
+	rc = __copy_from_user(buffer,
+			     (struct pmcraid_passthrough_ioctl_buffer *) arg,
+			     sizeof(struct pmcraid_passthrough_ioctl_buffer));
+	if (rc) {
+		pmcraid_err("ioctl: can't copy passthrough buffer\n");
+		rc = -EFAULT;
+		goto out_free_buffer;
+	}
+
+	request_size = buffer->ioarcb.data_transfer_length;
+
+	if (buffer->ioarcb.request_flags0 & TRANSFER_DIR_WRITE) {
+		access = VERIFY_READ;
+		direction = DMA_TO_DEVICE;
+	} else {
+		access = VERIFY_WRITE;
+		direction = DMA_FROM_DEVICE;
+	}
+
+	if (request_size > 0) {
+		rc = access_ok(access, arg, request_offset + request_size);
+
+		if (!rc) {
+			rc = -EFAULT;
+			goto out_free_buffer;
+		}
+	}
+
+	/* check if we have any additional command parameters */
+	if (buffer->ioarcb.add_cmd_param_length > PMCRAID_ADD_CMD_PARAM_LEN) {
+		rc = -EINVAL;
+		goto out_free_buffer;
+	}
+
+	cmd = pmcraid_get_free_cmd(pinstance);
+
+	if (!cmd) {
+		pmcraid_err("free command block is not available\n");
+		rc = -ENOMEM;
+		goto out_free_buffer;
+	}
+
+	cmd->scsi_cmd = NULL;
+	ioarcb = &(cmd->ioa_cb->ioarcb);
+
+	/* Copy the user-provided IOARCB stuff field by field */
+	ioarcb->resource_handle = buffer->ioarcb.resource_handle;
+	ioarcb->data_transfer_length = buffer->ioarcb.data_transfer_length;
+	ioarcb->cmd_timeout = buffer->ioarcb.cmd_timeout;
+	ioarcb->request_type = buffer->ioarcb.request_type;
+	ioarcb->request_flags0 = buffer->ioarcb.request_flags0;
+	ioarcb->request_flags1 = buffer->ioarcb.request_flags1;
+	memcpy(ioarcb->cdb, buffer->ioarcb.cdb, PMCRAID_MAX_CDB_LEN);
+
+	if (buffer->ioarcb.add_cmd_param_length) {
+		ioarcb->add_cmd_param_length =
+			buffer->ioarcb.add_cmd_param_length;
+		ioarcb->add_cmd_param_offset =
+			buffer->ioarcb.add_cmd_param_offset;
+		memcpy(ioarcb->add_data.u.add_cmd_params,
+			buffer->ioarcb.add_data.u.add_cmd_params,
+			buffer->ioarcb.add_cmd_param_length);
+	}
+
+	if (request_size) {
+		rc = pmcraid_build_passthrough_ioadls(cmd,
+						      request_size,
+						      direction);
+		if (rc) {
+			pmcraid_err("couldn't build passthrough ioadls\n");
+			goto out_free_buffer;
+		}
+	}
+
+	/* If data is being written into the device, copy the data from user
+	 * buffers
+	 */
+	if (direction == DMA_TO_DEVICE && request_size > 0) {
+		rc = pmcraid_copy_sglist(cmd->sglist,
+					 request_buffer,
+					 request_size,
+					 direction);
+		if (rc) {
+			pmcraid_err("failed to copy user buffer\n");
+			goto out_free_sglist;
+		}
+	}
+
+	/* passthrough ioctl is a blocking command so, put the user to sleep
+	 * until timeout. Note that a timeout value of 0 means, do timeout.
+	 */
+	cmd->cmd_done = pmcraid_internal_done;
+	init_completion(&cmd->wait_for_completion);
+	cmd->completion_req = 1;
+
+	pmcraid_info("command(%d) (CDB[0] = %x) for %x\n",
+		     le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle) >> 2,
+		     cmd->ioa_cb->ioarcb.cdb[0],
+		     le32_to_cpu(cmd->ioa_cb->ioarcb.resource_handle));
+
+	spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+	_pmcraid_fire_command(cmd);
+	spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+
+	/* If command timeout is specified put caller to wait till that time,
+	 * otherwise it would be blocking wait. If command gets timed out, it
+	 * will be aborted.
+	 */
+	if (buffer->ioarcb.cmd_timeout == 0) {
+		wait_for_completion(&cmd->wait_for_completion);
+	} else if (!wait_for_completion_timeout(
+			&cmd->wait_for_completion,
+			msecs_to_jiffies(buffer->ioarcb.cmd_timeout * 1000))) {
+
+		pmcraid_info("aborting cmd %d (CDB[0] = %x) due to timeout\n",
+			le32_to_cpu(cmd->ioa_cb->ioarcb.response_handle >> 2),
+			cmd->ioa_cb->ioarcb.cdb[0]);
+
+		rc = -ETIMEDOUT;
+		spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+		cancel_cmd = pmcraid_abort_cmd(cmd);
+		spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+
+		if (cancel_cmd) {
+			wait_for_completion(&cancel_cmd->wait_for_completion);
+			pmcraid_return_cmd(cancel_cmd);
+		}
+
+		goto out_free_sglist;
+	}
+
+	/* If the command failed for any reason, copy entire IOASA buffer and
+	 * return IOCTL success. If copying IOASA to user-buffer fails, return
+	 * EFAULT
+	 */
+	if (le32_to_cpu(cmd->ioa_cb->ioasa.ioasc)) {
+
+		void *ioasa =
+		    (void *)(arg +
+		    offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
+
+		pmcraid_info("command failed with %x\n",
+			     le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
+		if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
+				 sizeof(struct pmcraid_ioasa))) {
+			pmcraid_err("failed to copy ioasa buffer to user\n");
+			rc = -EFAULT;
+		}
+	}
+	/* If the data transfer was from device, copy the data onto user
+	 * buffers
+	 */
+	else if (direction == DMA_FROM_DEVICE && request_size > 0) {
+		rc = pmcraid_copy_sglist(cmd->sglist,
+					 request_buffer,
+					 request_size,
+					 direction);
+		if (rc) {
+			pmcraid_err("failed to copy user buffer\n");
+			rc = -EFAULT;
+		}
+	}
+
+out_free_sglist:
+	pmcraid_release_passthrough_ioadls(cmd, request_size, direction);
+	pmcraid_return_cmd(cmd);
+
+out_free_buffer:
+	kfree(buffer);
+
+	return rc;
+}
+
+
+
+
+/**
+ * pmcraid_ioctl_driver - ioctl handler for commands handled by driver itself
+ *
+ * @pinstance: pointer to adapter instance structure
+ * @cmd: ioctl command passed in
+ * @buflen: length of user_buffer
+ * @user_buffer: user buffer pointer
+ *
+ * Return Value
+ *   0 in case of success, otherwise appropriate error code
+ */
+static long pmcraid_ioctl_driver(
+	struct pmcraid_instance *pinstance,
+	unsigned int cmd,
+	unsigned int buflen,
+	void __user *user_buffer
+)
+{
+	int rc = -ENOSYS;
+
+	if (!access_ok(VERIFY_READ, user_buffer, _IOC_SIZE(cmd))) {
+		pmcraid_err("ioctl_driver: access fault in request buffer \n");
+		return -EFAULT;
+	}
+
+	switch (cmd) {
+	case PMCRAID_IOCTL_RESET_ADAPTER:
+		pmcraid_reset_bringup(pinstance);
+		rc = 0;
+		break;
+
+	default:
+		break;
+	}
+
+	return rc;
+}
+
+/**
+ * pmcraid_check_ioctl_buffer - check for proper access to user buffer
+ *
+ * @cmd: ioctl command
+ * @arg: user buffer
+ * @hdr: pointer to kernel memory for pmcraid_ioctl_header
+ *
+ * Return Value
+ *	negetive error code if there are access issues, otherwise zero.
+ *	Upon success, returns ioctl header copied out of user buffer.
+ */
+
+static int pmcraid_check_ioctl_buffer(
+	int cmd,
+	void __user *arg,
+	struct pmcraid_ioctl_header *hdr
+)
+{
+	int rc = 0;
+	int access = VERIFY_READ;
+
+	if (copy_from_user(hdr, arg, sizeof(struct pmcraid_ioctl_header))) {
+		pmcraid_err("couldn't copy ioctl header from user buffer\n");
+		return -EFAULT;
+	}
+
+	/* check for valid driver signature */
+	rc = memcmp(hdr->signature,
+		    PMCRAID_IOCTL_SIGNATURE,
+		    sizeof(hdr->signature));
+	if (rc) {
+		pmcraid_err("signature verification failed\n");
+		return -EINVAL;
+	}
+
+	/* buffer length can't be negetive */
+	if (hdr->buffer_length < 0) {
+		pmcraid_err("ioctl: invalid buffer length specified\n");
+		return -EINVAL;
+	}
+
+	/* check for appropriate buffer access */
+	if ((_IOC_DIR(cmd) & _IOC_READ) == _IOC_READ)
+		access = VERIFY_WRITE;
+
+	rc = access_ok(access,
+		       (arg + sizeof(struct pmcraid_ioctl_header)),
+		       hdr->buffer_length);
+	if (!rc) {
+		pmcraid_err("access failed for user buffer of size %d\n",
+			     hdr->buffer_length);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+/**
+ *  pmcraid_ioctl - char node ioctl entry point
+ */
+static long pmcraid_chr_ioctl(
+	struct file *filep,
+	unsigned int cmd,
+	unsigned long arg
+)
+{
+	struct pmcraid_instance *pinstance = NULL;
+	struct pmcraid_ioctl_header *hdr = NULL;
+	int retval = -ENOTTY;
+
+	hdr = kmalloc(GFP_KERNEL, sizeof(struct pmcraid_ioctl_header));
+
+	if (!hdr) {
+		pmcraid_err("faile to allocate memory for ioctl header\n");
+		return -ENOMEM;
+	}
+
+	retval = pmcraid_check_ioctl_buffer(cmd, (void *)arg, hdr);
+
+	if (retval) {
+		pmcraid_info("chr_ioctl: header check failed\n");
+		kfree(hdr);
+		return retval;
+	}
+
+	pinstance = (struct pmcraid_instance *)filep->private_data;
+
+	if (!pinstance) {
+		pmcraid_info("adapter instance is not found\n");
+		kfree(hdr);
+		return -ENOTTY;
+	}
+
+	switch (_IOC_TYPE(cmd)) {
+
+	case PMCRAID_PASSTHROUGH_IOCTL:
+		/* If ioctl code is to download microcode, we need to block
+		 * mid-layer requests.
+		 */
+		if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
+			scsi_block_requests(pinstance->host);
+
+		retval = pmcraid_ioctl_passthrough(pinstance,
+						   cmd,
+						   hdr->buffer_length,
+						   arg);
+
+		if (cmd == PMCRAID_IOCTL_DOWNLOAD_MICROCODE)
+			scsi_unblock_requests(pinstance->host);
+		break;
+
+	case PMCRAID_DRIVER_IOCTL:
+		arg += sizeof(struct pmcraid_ioctl_header);
+		retval = pmcraid_ioctl_driver(pinstance,
+					      cmd,
+					      hdr->buffer_length,
+					      (void __user *)arg);
+		break;
+
+	default:
+		retval = -ENOTTY;
+		break;
+	}
+
+	kfree(hdr);
+
+	return retval;
+}
+
+/**
+ * File operations structure for management interface
+ */
+static const struct file_operations pmcraid_fops = {
+	.owner = THIS_MODULE,
+	.open = pmcraid_chr_open,
+	.release = pmcraid_chr_release,
+	.fasync = pmcraid_chr_fasync,
+	.unlocked_ioctl = pmcraid_chr_ioctl,
+#ifdef CONFIG_COMPAT
+	.compat_ioctl = pmcraid_chr_ioctl,
+#endif
+};
+
+
+
+
+/**
+ * pmcraid_show_log_level - Display adapter's error logging level
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ *  number of bytes printed to buffer
+ */
+static ssize_t pmcraid_show_log_level(
+	struct device *dev,
+	struct device_attribute *attr,
+	char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct pmcraid_instance *pinstance =
+		(struct pmcraid_instance *)shost->hostdata;
+	return snprintf(buf, PAGE_SIZE, "%d\n", pinstance->current_log_level);
+}
+
+/**
+ * pmcraid_store_log_level - Change the adapter's error logging level
+ * @dev: class device struct
+ * @buf: buffer
+ * @count: not used
+ *
+ * Return value:
+ *  number of bytes printed to buffer
+ */
+static ssize_t pmcraid_store_log_level(
+	struct device *dev,
+	struct device_attribute *attr,
+	const char *buf,
+	size_t count
+)
+{
+	struct Scsi_Host *shost;
+	struct pmcraid_instance *pinstance;
+	unsigned long val;
+
+	if (strict_strtoul(buf, 10, &val))
+		return -EINVAL;
+	/* log-level should be from 0 to 2 */
+	if (val > 2)
+		return -EINVAL;
+
+	shost = class_to_shost(dev);
+	pinstance = (struct pmcraid_instance *)shost->hostdata;
+	pinstance->current_log_level = val;
+
+	return strlen(buf);
+}
+
+static struct device_attribute pmcraid_log_level_attr = {
+	.attr = {
+		 .name = "log_level",
+		 .mode = S_IRUGO | S_IWUSR,
+		 },
+	.show = pmcraid_show_log_level,
+	.store = pmcraid_store_log_level,
+};
+
+/**
+ * pmcraid_show_drv_version - Display driver version
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ *  number of bytes printed to buffer
+ */
+static ssize_t pmcraid_show_drv_version(
+	struct device *dev,
+	struct device_attribute *attr,
+	char *buf
+)
+{
+	return snprintf(buf, PAGE_SIZE, "version: %s, build date: %s\n",
+			PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE);
+}
+
+static struct device_attribute pmcraid_driver_version_attr = {
+	.attr = {
+		 .name = "drv_version",
+		 .mode = S_IRUGO,
+		 },
+	.show = pmcraid_show_drv_version,
+};
+
+/**
+ * pmcraid_show_io_adapter_id - Display driver assigned adapter id
+ * @dev: class device struct
+ * @buf: buffer
+ *
+ * Return value:
+ *  number of bytes printed to buffer
+ */
+static ssize_t pmcraid_show_adapter_id(
+	struct device *dev,
+	struct device_attribute *attr,
+	char *buf
+)
+{
+	struct Scsi_Host *shost = class_to_shost(dev);
+	struct pmcraid_instance *pinstance =
+		(struct pmcraid_instance *)shost->hostdata;
+	u32 adapter_id = (pinstance->pdev->bus->number << 8) |
+		pinstance->pdev->devfn;
+	u32 aen_group = pmcraid_event_family.id;
+
+	return snprintf(buf, PAGE_SIZE,
+			"adapter id: %d\nminor: %d\naen group: %d\n",
+			adapter_id, MINOR(pinstance->cdev.dev), aen_group);
+}
+
+static struct device_attribute pmcraid_adapter_id_attr = {
+	.attr = {
+		 .name = "adapter_id",
+		 .mode = S_IRUGO | S_IWUSR,
+		 },
+	.show = pmcraid_show_adapter_id,
+};
+
+static struct device_attribute *pmcraid_host_attrs[] = {
+	&pmcraid_log_level_attr,
+	&pmcraid_driver_version_attr,
+	&pmcraid_adapter_id_attr,
+	NULL,
+};
+
+
+/* host template structure for pmcraid driver */
+static struct scsi_host_template pmcraid_host_template = {
+	.module = THIS_MODULE,
+	.name = PMCRAID_DRIVER_NAME,
+	.queuecommand = pmcraid_queuecommand,
+	.eh_abort_handler = pmcraid_eh_abort_handler,
+	.eh_bus_reset_handler = pmcraid_eh_bus_reset_handler,
+	.eh_target_reset_handler = pmcraid_eh_target_reset_handler,
+	.eh_device_reset_handler = pmcraid_eh_device_reset_handler,
+	.eh_host_reset_handler = pmcraid_eh_host_reset_handler,
+
+	.slave_alloc = pmcraid_slave_alloc,
+	.slave_configure = pmcraid_slave_configure,
+	.slave_destroy = pmcraid_slave_destroy,
+	.change_queue_depth = pmcraid_change_queue_depth,
+	.change_queue_type  = pmcraid_change_queue_type,
+	.can_queue = PMCRAID_MAX_IO_CMD,
+	.this_id = -1,
+	.sg_tablesize = PMCRAID_MAX_IOADLS,
+	.max_sectors = PMCRAID_IOA_MAX_SECTORS,
+	.cmd_per_lun = PMCRAID_MAX_CMD_PER_LUN,
+	.use_clustering = ENABLE_CLUSTERING,
+	.shost_attrs = pmcraid_host_attrs,
+	.proc_name = PMCRAID_DRIVER_NAME
+};
+
+/**
+ * pmcraid_isr_common - Common interrupt handler routine
+ *
+ * @pinstance: pointer to adapter instance
+ * @intrs: active interrupts (contents of ioa_host_interrupt register)
+ * @hrrq_id: Host RRQ index
+ *
+ * Return Value
+ *	none
+ */
+static void pmcraid_isr_common(
+	struct pmcraid_instance *pinstance,
+	u32 intrs,
+	int hrrq_id
+)
+{
+	u32 intrs_clear =
+		(intrs & INTRS_CRITICAL_OP_IN_PROGRESS) ? intrs
+							: INTRS_HRRQ_VALID;
+	iowrite32(intrs_clear,
+		  pinstance->int_regs.ioa_host_interrupt_clr_reg);
+	intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
+
+	/* hrrq valid bit was set, schedule tasklet to handle the response */
+	if (intrs_clear == INTRS_HRRQ_VALID)
+		tasklet_schedule(&(pinstance->isr_tasklet[hrrq_id]));
+}
+
+/**
+ * pmcraid_isr  - implements interrupt handling routine
+ *
+ * @irq: interrupt vector number
+ * @dev_id: pointer hrrq_vector
+ *
+ * Return Value
+ *	 IRQ_HANDLED if interrupt is handled or IRQ_NONE if ignored
+ */
+static irqreturn_t pmcraid_isr(int irq, void *dev_id)
+{
+	struct pmcraid_isr_param *hrrq_vector;
+	struct pmcraid_instance *pinstance;
+	unsigned long lock_flags;
+	u32 intrs;
+
+	/* In case of legacy interrupt mode where interrupts are shared across
+	 * isrs, it may be possible that the current interrupt is not from IOA
+	 */
+	if (!dev_id) {
+		printk(KERN_INFO "%s(): NULL host pointer\n", __func__);
+		return IRQ_NONE;
+	}
+
+	hrrq_vector = (struct pmcraid_isr_param *)dev_id;
+	pinstance = hrrq_vector->drv_inst;
+
+	/* Acquire the lock (currently host_lock) while processing interrupts.
+	 * This interval is small as most of the response processing is done by
+	 * tasklet without the lock.
+	 */
+	spin_lock_irqsave(pinstance->host->host_lock, lock_flags);
+	intrs = pmcraid_read_interrupts(pinstance);
+
+	if (unlikely((intrs & PMCRAID_PCI_INTERRUPTS) == 0)) {
+		spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+		return IRQ_NONE;
+	}
+
+	/* Any error interrupts including unit_check, initiate IOA reset.
+	 * In case of unit check indicate to reset_sequence that IOA unit
+	 * checked and prepare for a dump during reset sequence
+	 */
+	if (intrs & PMCRAID_ERROR_INTERRUPTS) {
+
+		if (intrs & INTRS_IOA_UNIT_CHECK)
+			pinstance->ioa_unit_check = 1;
+
+		iowrite32(intrs,
+			  pinstance->int_regs.ioa_host_interrupt_clr_reg);
+		pmcraid_err("ISR: error interrupts: %x initiating reset\n",
+			    intrs);
+		intrs = ioread32(pinstance->int_regs.ioa_host_interrupt_reg);
+		pmcraid_initiate_reset(pinstance);
+	} else {
+		pmcraid_isr_common(pinstance, intrs, hrrq_vector->hrrq_id);
+	}
+
+	spin_unlock_irqrestore(pinstance->host->host_lock, lock_flags);
+
+	return IRQ_HANDLED;
+}
+
+
+/**
+ * pmcraid_worker_function -  worker thread function
+ *
+ * @workp: pointer to struct work queue
+ *
+ * Return Value
+ *	 None
+ */
+
+static void pmcraid_worker_function(struct work_struct *workp)
+{
+	struct pmcraid_instance *pinstance;
+	struct pmcraid_resource_entry *res;
+	struct pmcraid_resource_entry *temp;
+	struct scsi_device *sdev;
+	unsigned long lock_flags;
+	unsigned long host_lock_flags;
+	u8 bus, target, lun;
+
+	pinstance = container_of(workp, struct pmcraid_instance, worker_q);
+	/* add resources only after host is added into system */
+	if (!atomic_read(&pinstance->expose_resources))
+		return;
+
+	spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
+	list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue) {
+
+		if (res->change_detected == RES_CHANGE_DEL && res->scsi_dev) {
+			sdev = res->scsi_dev;
+
+			/* host_lock must be held before calling
+			 * scsi_device_get
+			 */
+			spin_lock_irqsave(pinstance->host->host_lock,
+					  host_lock_flags);
+			if (!scsi_device_get(sdev)) {
+				spin_unlock_irqrestore(
+						pinstance->host->host_lock,
+						host_lock_flags);
+				pmcraid_info("deleting %x from midlayer\n",
+					     res->cfg_entry.resource_address);
+				list_move_tail(&res->queue,
+						&pinstance->free_res_q);
+				spin_unlock_irqrestore(
+					&pinstance->resource_lock,
+					lock_flags);
+				scsi_remove_device(sdev);
+				scsi_device_put(sdev);
+				spin_lock_irqsave(&pinstance->resource_lock,
+						   lock_flags);
+				res->change_detected = 0;
+			} else {
+				spin_unlock_irqrestore(
+						pinstance->host->host_lock,
+						host_lock_flags);
+			}
+		}
+	}
+
+	list_for_each_entry(res, &pinstance->used_res_q, queue) {
+
+		if (res->change_detected == RES_CHANGE_ADD) {
+
+			if (!pmcraid_expose_resource(&res->cfg_entry))
+				continue;
+
+			if (RES_IS_VSET(res->cfg_entry)) {
+				bus = PMCRAID_VSET_BUS_ID;
+				target = res->cfg_entry.unique_flags1;
+				lun = PMCRAID_VSET_LUN_ID;
+			} else {
+				bus = PMCRAID_PHYS_BUS_ID;
+				target =
+				     RES_TARGET(
+					res->cfg_entry.resource_address);
+				lun = RES_LUN(res->cfg_entry.resource_address);
+			}
+
+			res->change_detected = 0;
+			spin_unlock_irqrestore(&pinstance->resource_lock,
+						lock_flags);
+			scsi_add_device(pinstance->host, bus, target, lun);
+			spin_lock_irqsave(&pinstance->resource_lock,
+					   lock_flags);
+		}
+	}
+
+	spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
+}
+
+/**
+ * pmcraid_tasklet_function - Tasklet function
+ *
+ * @instance: pointer to msix param structure
+ *
+ * Return Value
+ *	None
+ */
+void pmcraid_tasklet_function(unsigned long instance)
+{
+	struct pmcraid_isr_param *hrrq_vector;
+	struct pmcraid_instance *pinstance;
+	unsigned long hrrq_lock_flags;
+	unsigned long pending_lock_flags;
+	unsigned long host_lock_flags;
+	spinlock_t *lockp; /* hrrq buffer lock */
+	int id;
+	u32 intrs;
+	__le32 resp;
+
+	hrrq_vector = (struct pmcraid_isr_param *)instance;
+	pinstance = hrrq_vector->drv_inst;
+	id = hrrq_vector->hrrq_id;
+	lockp = &(pinstance->hrrq_lock[id]);
+	intrs = pmcraid_read_interrupts(pinstance);
+
+	/* If interrupts was as part of the ioa initialization, clear and mask
+	 * it. Delete the timer and wakeup the reset engine to proceed with
+	 * reset sequence
+	 */
+	if (intrs & INTRS_TRANSITION_TO_OPERATIONAL) {
+		iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
+			pinstance->int_regs.ioa_host_interrupt_mask_reg);
+		iowrite32(INTRS_TRANSITION_TO_OPERATIONAL,
+			pinstance->int_regs.ioa_host_interrupt_clr_reg);
+
+		if (pinstance->reset_cmd != NULL) {
+			del_timer(&pinstance->reset_cmd->timer);
+			spin_lock_irqsave(pinstance->host->host_lock,
+					  host_lock_flags);
+			pinstance->reset_cmd->cmd_done(pinstance->reset_cmd);
+			spin_unlock_irqrestore(pinstance->host->host_lock,
+					       host_lock_flags);
+		}
+		return;
+	}
+
+	/* loop through each of the commands responded by IOA. Each HRRQ buf is
+	 * protected by its own lock. Traversals must be done within this lock
+	 * as there may be multiple tasklets running on multiple CPUs. Note
+	 * that the lock is held just for picking up the response handle and
+	 * manipulating hrrq_curr/toggle_bit values.
+	 */
+	spin_lock_irqsave(lockp, hrrq_lock_flags);
+
+	resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
+
+	while ((resp & HRRQ_TOGGLE_BIT) ==
+		pinstance->host_toggle_bit[id]) {
+
+		int cmd_index = resp >> 2;
+		struct pmcraid_cmd *cmd = NULL;
+
+		if (cmd_index < PMCRAID_MAX_CMD) {
+			cmd = pinstance->cmd_list[cmd_index];
+		} else {
+			/* In case of invalid response handle, initiate IOA
+			 * reset sequence.
+			 */
+			spin_unlock_irqrestore(lockp, hrrq_lock_flags);
+
+			pmcraid_err("Invalid response %d initiating reset\n",
+				    cmd_index);
+
+			spin_lock_irqsave(pinstance->host->host_lock,
+					  host_lock_flags);
+			pmcraid_initiate_reset(pinstance);
+			spin_unlock_irqrestore(pinstance->host->host_lock,
+					       host_lock_flags);
+
+			spin_lock_irqsave(lockp, hrrq_lock_flags);
+			break;
+		}
+
+		if (pinstance->hrrq_curr[id] < pinstance->hrrq_end[id]) {
+			pinstance->hrrq_curr[id]++;
+		} else {
+			pinstance->hrrq_curr[id] = pinstance->hrrq_start[id];
+			pinstance->host_toggle_bit[id] ^= 1u;
+		}
+
+		spin_unlock_irqrestore(lockp, hrrq_lock_flags);
+
+		spin_lock_irqsave(&pinstance->pending_pool_lock,
+				   pending_lock_flags);
+		list_del(&cmd->free_list);
+		spin_unlock_irqrestore(&pinstance->pending_pool_lock,
+					pending_lock_flags);
+		del_timer(&cmd->timer);
+		atomic_dec(&pinstance->outstanding_cmds);
+
+		if (cmd->cmd_done == pmcraid_ioa_reset) {
+			spin_lock_irqsave(pinstance->host->host_lock,
+					  host_lock_flags);
+			cmd->cmd_done(cmd);
+			spin_unlock_irqrestore(pinstance->host->host_lock,
+					       host_lock_flags);
+		} else if (cmd->cmd_done != NULL) {
+			cmd->cmd_done(cmd);
+		}
+		/* loop over until we are done with all responses */
+		spin_lock_irqsave(lockp, hrrq_lock_flags);
+		resp = le32_to_cpu(*(pinstance->hrrq_curr[id]));
+	}
+
+	spin_unlock_irqrestore(lockp, hrrq_lock_flags);
+}
+
+/**
+ * pmcraid_unregister_interrupt_handler - de-register interrupts handlers
+ * @pinstance: pointer to adapter instance structure
+ *
+ * This routine un-registers registered interrupt handler and
+ * also frees irqs/vectors.
+ *
+ * Retun Value
+ *	None
+ */
+static
+void pmcraid_unregister_interrupt_handler(struct pmcraid_instance *pinstance)
+{
+	free_irq(pinstance->pdev->irq, &(pinstance->hrrq_vector[0]));
+}
+
+/**
+ * pmcraid_register_interrupt_handler - registers interrupt handler
+ * @pinstance: pointer to per-adapter instance structure
+ *
+ * Return Value
+ *	0 on success, non-zero error code otherwise.
+ */
+static int
+pmcraid_register_interrupt_handler(struct pmcraid_instance *pinstance)
+{
+	struct pci_dev *pdev = pinstance->pdev;
+
+	pinstance->hrrq_vector[0].hrrq_id = 0;
+	pinstance->hrrq_vector[0].drv_inst = pinstance;
+	pinstance->hrrq_vector[0].vector = 0;
+	pinstance->num_hrrq = 1;
+	return request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED,
+			   PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]);
+}
+
+/**
+ * pmcraid_release_cmd_blocks - release buufers allocated for command blocks
+ * @pinstance: per adapter instance structure pointer
+ * @max_index: number of buffer blocks to release
+ *
+ * Return Value
+ *  None
+ */
+static void
+pmcraid_release_cmd_blocks(struct pmcraid_instance *pinstance, int max_index)
+{
+	int i;
+	for (i = 0; i < max_index; i++) {
+		kmem_cache_free(pinstance->cmd_cachep, pinstance->cmd_list[i]);
+		pinstance->cmd_list[i] = NULL;
+	}
+	kmem_cache_destroy(pinstance->cmd_cachep);
+	pinstance->cmd_cachep = NULL;
+}
+
+/**
+ * pmcraid_release_control_blocks - releases buffers alloced for control blocks
+ * @pinstance: pointer to per adapter instance structure
+ * @max_index: number of buffers (from 0 onwards) to release
+ *
+ * This function assumes that the command blocks for which control blocks are
+ * linked are not released.
+ *
+ * Return Value
+ *	 None
+ */
+static void
+pmcraid_release_control_blocks(
+	struct pmcraid_instance *pinstance,
+	int max_index
+)
+{
+	int i;
+
+	if (pinstance->control_pool == NULL)
+		return;
+
+	for (i = 0; i < max_index; i++) {
+		pci_pool_free(pinstance->control_pool,
+			      pinstance->cmd_list[i]->ioa_cb,
+			      pinstance->cmd_list[i]->ioa_cb_bus_addr);
+		pinstance->cmd_list[i]->ioa_cb = NULL;
+		pinstance->cmd_list[i]->ioa_cb_bus_addr = 0;
+	}
+	pci_pool_destroy(pinstance->control_pool);
+	pinstance->control_pool = NULL;
+}
+
+/**
+ * pmcraid_allocate_cmd_blocks - allocate memory for cmd block structures
+ * @pinstance - pointer to per adapter instance structure
+ *
+ * Allocates memory for command blocks using kernel slab allocator.
+ *
+ * Return Value
+ *	0 in case of success; -ENOMEM in case of failure
+ */
+static int __devinit
+pmcraid_allocate_cmd_blocks(struct pmcraid_instance *pinstance)
+{
+	int i;
+
+	sprintf(pinstance->cmd_pool_name, "pmcraid_cmd_pool_%d",
+		pinstance->host->unique_id);
+
+
+	pinstance->cmd_cachep = kmem_cache_create(
+					pinstance->cmd_pool_name,
+					sizeof(struct pmcraid_cmd), 0,
+					SLAB_HWCACHE_ALIGN, NULL);
+	if (!pinstance->cmd_cachep)
+		return -ENOMEM;
+
+	for (i = 0; i < PMCRAID_MAX_CMD; i++) {
+		pinstance->cmd_list[i] =
+			kmem_cache_alloc(pinstance->cmd_cachep, GFP_KERNEL);
+		if (!pinstance->cmd_list[i]) {
+			pmcraid_release_cmd_blocks(pinstance, i);
+			return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+/**
+ * pmcraid_allocate_control_blocks - allocates memory control blocks
+ * @pinstance : pointer to per adapter instance structure
+ *
+ * This function allocates PCI memory for DMAable buffers like IOARCB, IOADLs
+ * and IOASAs. This is called after command blocks are already allocated.
+ *
+ * Return Value
+ *  0 in case it can allocate all control blocks, otherwise -ENOMEM
+ */
+static int __devinit
+pmcraid_allocate_control_blocks(struct pmcraid_instance *pinstance)
+{
+	int i;
+
+	sprintf(pinstance->ctl_pool_name, "pmcraid_control_pool_%d",
+		pinstance->host->unique_id);
+
+	pinstance->control_pool =
+		pci_pool_create(pinstance->ctl_pool_name,
+				pinstance->pdev,
+				sizeof(struct pmcraid_control_block),
+				PMCRAID_IOARCB_ALIGNMENT, 0);
+
+	if (!pinstance->control_pool)
+		return -ENOMEM;
+
+	for (i = 0; i < PMCRAID_MAX_CMD; i++) {
+		pinstance->cmd_list[i]->ioa_cb =
+			pci_pool_alloc(
+				pinstance->control_pool,
+				GFP_KERNEL,
+				&(pinstance->cmd_list[i]->ioa_cb_bus_addr));
+
+		if (!pinstance->cmd_list[i]->ioa_cb) {
+			pmcraid_release_control_blocks(pinstance, i);
+			return -ENOMEM;
+		}
+		memset(pinstance->cmd_list[i]->ioa_cb, 0,
+			sizeof(struct pmcraid_control_block));
+	}
+	return 0;
+}
+
+/**
+ * pmcraid_release_host_rrqs - release memory allocated for hrrq buffer(s)
+ * @pinstance: pointer to per adapter instance structure
+ * @maxindex: size of hrrq buffer pointer array
+ *
+ * Return Value
+ *	None
+ */
+static void
+pmcraid_release_host_rrqs(struct pmcraid_instance *pinstance, int maxindex)
+{
+	int i;
+	for (i = 0; i < maxindex; i++) {
+
+		pci_free_consistent(pinstance->pdev,
+				    HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD,
+				    pinstance->hrrq_start[i],
+				    pinstance->hrrq_start_bus_addr[i]);
+
+		/* reset pointers and toggle bit to zeros */
+		pinstance->hrrq_start[i] = NULL;
+		pinstance->hrrq_start_bus_addr[i] = 0;
+		pinstance->host_toggle_bit[i] = 0;
+	}
+}
+
+/**
+ * pmcraid_allocate_host_rrqs - Allocate and initialize host RRQ buffers
+ * @pinstance: pointer to per adapter instance structure
+ *
+ * Return value
+ *	0 hrrq buffers are allocated, -ENOMEM otherwise.
+ */
+static int __devinit
+pmcraid_allocate_host_rrqs(struct pmcraid_instance *pinstance)
+{
+	int i;
+	int buf_count = PMCRAID_MAX_CMD / pinstance->num_hrrq;
+
+	for (i = 0; i < pinstance->num_hrrq; i++) {
+		int buffer_size = HRRQ_ENTRY_SIZE * buf_count;
+
+		pinstance->hrrq_start[i] =
+			pci_alloc_consistent(
+					pinstance->pdev,
+					buffer_size,
+					&(pinstance->hrrq_start_bus_addr[i]));
+
+		if (pinstance->hrrq_start[i] == 0) {
+			pmcraid_err("could not allocate host rrq: %d\n", i);
+			pmcraid_release_host_rrqs(pinstance, i);
+			return -ENOMEM;
+		}
+
+		memset(pinstance->hrrq_start[i], 0, buffer_size);
+		pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
+		pinstance->hrrq_end[i] =
+			pinstance->hrrq_start[i] + buf_count - 1;
+		pinstance->host_toggle_bit[i] = 1;
+		spin_lock_init(&pinstance->hrrq_lock[i]);
+	}
+	return 0;
+}
+
+/**
+ * pmcraid_release_hcams - release HCAM buffers
+ *
+ * @pinstance: pointer to per adapter instance structure
+ *
+ * Return value
+ *  none
+ */
+static void pmcraid_release_hcams(struct pmcraid_instance *pinstance)
+{
+	if (pinstance->ccn.msg != NULL) {
+		pci_free_consistent(pinstance->pdev,
+				    PMCRAID_AEN_HDR_SIZE +
+				    sizeof(struct pmcraid_hcam_ccn),
+				    pinstance->ccn.msg,
+				    pinstance->ccn.baddr);
+
+		pinstance->ccn.msg = NULL;
+		pinstance->ccn.hcam = NULL;
+		pinstance->ccn.baddr = 0;
+	}
+
+	if (pinstance->ldn.msg != NULL) {
+		pci_free_consistent(pinstance->pdev,
+				    PMCRAID_AEN_HDR_SIZE +
+				    sizeof(struct pmcraid_hcam_ldn),
+				    pinstance->ldn.msg,
+				    pinstance->ldn.baddr);
+
+		pinstance->ldn.msg = NULL;
+		pinstance->ldn.hcam = NULL;
+		pinstance->ldn.baddr = 0;
+	}
+}
+
+/**
+ * pmcraid_allocate_hcams - allocates HCAM buffers
+ * @pinstance : pointer to per adapter instance structure
+ *
+ * Return Value:
+ *   0 in case of successful allocation, non-zero otherwise
+ */
+static int pmcraid_allocate_hcams(struct pmcraid_instance *pinstance)
+{
+	pinstance->ccn.msg = pci_alloc_consistent(
+					pinstance->pdev,
+					PMCRAID_AEN_HDR_SIZE +
+					sizeof(struct pmcraid_hcam_ccn),
+					&(pinstance->ccn.baddr));
+
+	pinstance->ldn.msg = pci_alloc_consistent(
+					pinstance->pdev,
+					PMCRAID_AEN_HDR_SIZE +
+					sizeof(struct pmcraid_hcam_ldn),
+					&(pinstance->ldn.baddr));
+
+	if (pinstance->ldn.msg == NULL || pinstance->ccn.msg == NULL) {
+		pmcraid_release_hcams(pinstance);
+	} else {
+		pinstance->ccn.hcam =
+			(void *)pinstance->ccn.msg + PMCRAID_AEN_HDR_SIZE;
+		pinstance->ldn.hcam =
+			(void *)pinstance->ldn.msg + PMCRAID_AEN_HDR_SIZE;
+
+		atomic_set(&pinstance->ccn.ignore, 0);
+		atomic_set(&pinstance->ldn.ignore, 0);
+	}
+
+	return (pinstance->ldn.msg == NULL) ? -ENOMEM : 0;
+}
+
+/**
+ * pmcraid_release_config_buffers - release config.table buffers
+ * @pinstance: pointer to per adapter instance structure
+ *
+ * Return Value
+ *	 none
+ */
+static void pmcraid_release_config_buffers(struct pmcraid_instance *pinstance)
+{
+	if (pinstance->cfg_table != NULL &&
+	    pinstance->cfg_table_bus_addr != 0) {
+		pci_free_consistent(pinstance->pdev,
+				    sizeof(struct pmcraid_config_table),
+				    pinstance->cfg_table,
+				    pinstance->cfg_table_bus_addr);
+		pinstance->cfg_table = NULL;
+		pinstance->cfg_table_bus_addr = 0;
+	}
+
+	if (pinstance->res_entries != NULL) {
+		int i;
+
+		for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
+			list_del(&pinstance->res_entries[i].queue);
+		kfree(pinstance->res_entries);
+		pinstance->res_entries = NULL;
+	}
+
+	pmcraid_release_hcams(pinstance);
+}
+
+/**
+ * pmcraid_allocate_config_buffers - allocates DMAable memory for config table
+ * @pinstance : pointer to per adapter instance structure
+ *
+ * Return Value
+ *	0 for successful allocation, -ENOMEM for any failure
+ */
+static int __devinit
+pmcraid_allocate_config_buffers(struct pmcraid_instance *pinstance)
+{
+	int i;
+
+	pinstance->res_entries =
+			kzalloc(sizeof(struct pmcraid_resource_entry) *
+				PMCRAID_MAX_RESOURCES, GFP_KERNEL);
+
+	if (NULL == pinstance->res_entries) {
+		pmcraid_err("failed to allocate memory for resource table\n");
+		return -ENOMEM;
+	}
+
+	for (i = 0; i < PMCRAID_MAX_RESOURCES; i++)
+		list_add_tail(&pinstance->res_entries[i].queue,
+			      &pinstance->free_res_q);
+
+	pinstance->cfg_table =
+		pci_alloc_consistent(pinstance->pdev,
+				     sizeof(struct pmcraid_config_table),
+				     &pinstance->cfg_table_bus_addr);
+
+	if (NULL == pinstance->cfg_table) {
+		pmcraid_err("couldn't alloc DMA memory for config table\n");
+		pmcraid_release_config_buffers(pinstance);
+		return -ENOMEM;
+	}
+
+	if (pmcraid_allocate_hcams(pinstance)) {
+		pmcraid_err("could not alloc DMA memory for HCAMS\n");
+		pmcraid_release_config_buffers(pinstance);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+/**
+ * pmcraid_init_tasklets - registers tasklets for response handling
+ *
+ * @pinstance: pointer adapter instance structure
+ *
+ * Return value
+ *	none
+ */
+static void pmcraid_init_tasklets(struct pmcraid_instance *pinstance)
+{
+	int i;
+	for (i = 0; i < pinstance->num_hrrq; i++)
+		tasklet_init(&pinstance->isr_tasklet[i],
+			     pmcraid_tasklet_function,
+			     (unsigned long)&pinstance->hrrq_vector[i]);
+}
+
+/**
+ * pmcraid_kill_tasklets - destroys tasklets registered for response handling
+ *
+ * @pinstance: pointer to adapter instance structure
+ *
+ * Return value
+ *	none
+ */
+static void pmcraid_kill_tasklets(struct pmcraid_instance *pinstance)
+{
+	int i;
+	for (i = 0; i < pinstance->num_hrrq; i++)
+		tasklet_kill(&pinstance->isr_tasklet[i]);
+}
+
+/**
+ * pmcraid_init_buffers - allocates memory and initializes various structures
+ * @pinstance: pointer to per adapter instance structure
+ *
+ * This routine pre-allocates memory based on the type of block as below:
+ * cmdblocks(PMCRAID_MAX_CMD): kernel memory using kernel's slab_allocator,
+ * IOARCBs(PMCRAID_MAX_CMD)  : DMAable memory, using pci pool allocator
+ * config-table entries      : DMAable memory using pci_alloc_consistent
+ * HostRRQs                  : DMAable memory, using pci_alloc_consistent
+ *
+ * Return Value
+ *	 0 in case all of the blocks are allocated, -ENOMEM otherwise.
+ */
+static int __devinit pmcraid_init_buffers(struct pmcraid_instance *pinstance)
+{
+	int i;
+
+	if (pmcraid_allocate_host_rrqs(pinstance)) {
+		pmcraid_err("couldn't allocate memory for %d host rrqs\n",
+			     pinstance->num_hrrq);
+		return -ENOMEM;
+	}
+
+	if (pmcraid_allocate_config_buffers(pinstance)) {
+		pmcraid_err("couldn't allocate memory for config buffers\n");
+		pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
+		return -ENOMEM;
+	}
+
+	if (pmcraid_allocate_cmd_blocks(pinstance)) {
+		pmcraid_err("couldn't allocate memory for cmd blocks \n");
+		pmcraid_release_config_buffers(pinstance);
+		pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
+		return -ENOMEM;
+	}
+
+	if (pmcraid_allocate_control_blocks(pinstance)) {
+		pmcraid_err("couldn't allocate memory control blocks \n");
+		pmcraid_release_config_buffers(pinstance);
+		pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
+		pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
+		return -ENOMEM;
+	}
+
+	/* Initialize all the command blocks and add them to free pool. No
+	 * need to lock (free_pool_lock) as this is done in initialization
+	 * itself
+	 */
+	for (i = 0; i < PMCRAID_MAX_CMD; i++) {
+		struct pmcraid_cmd *cmdp = pinstance->cmd_list[i];
+		pmcraid_init_cmdblk(cmdp, i);
+		cmdp->drv_inst = pinstance;
+		list_add_tail(&cmdp->free_list, &pinstance->free_cmd_pool);
+	}
+
+	return 0;
+}
+
+/**
+ * pmcraid_reinit_buffers - resets various buffer pointers
+ * @pinstance: pointer to adapter instance
+ * Return value
+ * 	none
+ */
+static void pmcraid_reinit_buffers(struct pmcraid_instance *pinstance)
+{
+	int i;
+	int buffer_size = HRRQ_ENTRY_SIZE * PMCRAID_MAX_CMD;
+
+	for (i = 0; i < pinstance->num_hrrq; i++) {
+		memset(pinstance->hrrq_start[i], 0, buffer_size);
+		pinstance->hrrq_curr[i] = pinstance->hrrq_start[i];
+		pinstance->hrrq_end[i] =
+			pinstance->hrrq_start[i] + PMCRAID_MAX_CMD - 1;
+		pinstance->host_toggle_bit[i] = 1;
+	}
+}
+
+/**
+ * pmcraid_init_instance - initialize per instance data structure
+ * @pdev: pointer to pci device structure
+ * @host: pointer to Scsi_Host structure
+ * @mapped_pci_addr: memory mapped IOA configuration registers
+ *
+ * Return Value
+ *	 0 on success, non-zero in case of any failure
+ */
+static int __devinit pmcraid_init_instance(
+	struct pci_dev *pdev,
+	struct Scsi_Host *host,
+	void __iomem *mapped_pci_addr
+)
+{
+	struct pmcraid_instance *pinstance =
+		(struct pmcraid_instance *)host->hostdata;
+
+	pinstance->host = host;
+	pinstance->pdev = pdev;
+
+	/* Initialize register addresses */
+	pinstance->mapped_dma_addr = mapped_pci_addr;
+
+	/* Initialize chip-specific details */
+	{
+		struct pmcraid_chip_details *chip_cfg = pinstance->chip_cfg;
+		struct pmcraid_interrupts *pint_regs = &pinstance->int_regs;
+
+		pinstance->ioarrin = mapped_pci_addr + chip_cfg->ioarrin;
+
+		pint_regs->ioa_host_interrupt_reg =
+			mapped_pci_addr + chip_cfg->ioa_host_intr;
+		pint_regs->ioa_host_interrupt_clr_reg =
+			mapped_pci_addr + chip_cfg->ioa_host_intr_clr;
+		pint_regs->host_ioa_interrupt_reg =
+			mapped_pci_addr + chip_cfg->host_ioa_intr;
+		pint_regs->host_ioa_interrupt_clr_reg =
+			mapped_pci_addr + chip_cfg->host_ioa_intr_clr;
+
+		/* Current version of firmware exposes interrupt mask set
+		 * and mask clr registers through memory mapped bar0.
+		 */
+		pinstance->mailbox = mapped_pci_addr + chip_cfg->mailbox;
+		pinstance->ioa_status = mapped_pci_addr + chip_cfg->ioastatus;
+		pint_regs->ioa_host_interrupt_mask_reg =
+			mapped_pci_addr + chip_cfg->ioa_host_mask;
+		pint_regs->ioa_host_interrupt_mask_clr_reg =
+			mapped_pci_addr + chip_cfg->ioa_host_mask_clr;
+		pint_regs->global_interrupt_mask_reg =
+			mapped_pci_addr + chip_cfg->global_intr_mask;
+	};
+
+	pinstance->ioa_reset_attempts = 0;
+	init_waitqueue_head(&pinstance->reset_wait_q);
+
+	atomic_set(&pinstance->outstanding_cmds, 0);
+	atomic_set(&pinstance->expose_resources, 0);
+
+	INIT_LIST_HEAD(&pinstance->free_res_q);
+	INIT_LIST_HEAD(&pinstance->used_res_q);
+	INIT_LIST_HEAD(&pinstance->free_cmd_pool);
+	INIT_LIST_HEAD(&pinstance->pending_cmd_pool);
+
+	spin_lock_init(&pinstance->free_pool_lock);
+	spin_lock_init(&pinstance->pending_pool_lock);
+	spin_lock_init(&pinstance->resource_lock);
+	mutex_init(&pinstance->aen_queue_lock);
+
+	/* Work-queue (Shared) for deferred processing error handling */
+	INIT_WORK(&pinstance->worker_q, pmcraid_worker_function);
+
+	/* Initialize the default log_level */
+	pinstance->current_log_level = pmcraid_log_level;
+
+	/* Setup variables required for reset engine */
+	pinstance->ioa_state = IOA_STATE_UNKNOWN;
+	pinstance->reset_cmd = NULL;
+	return 0;
+}
+
+/**
+ * pmcraid_release_buffers - release per-adapter buffers allocated
+ *
+ * @pinstance: pointer to adapter soft state
+ *
+ * Return Value
+ * 	none
+ */
+static void pmcraid_release_buffers(struct pmcraid_instance *pinstance)
+{
+	pmcraid_release_config_buffers(pinstance);
+	pmcraid_release_control_blocks(pinstance, PMCRAID_MAX_CMD);
+	pmcraid_release_cmd_blocks(pinstance, PMCRAID_MAX_CMD);
+	pmcraid_release_host_rrqs(pinstance, pinstance->num_hrrq);
+
+}
+
+/**
+ * pmcraid_shutdown - shutdown adapter controller.
+ * @pdev: pci device struct
+ *
+ * Issues an adapter shutdown to the card waits for its completion
+ *
+ * Return value
+ *	  none
+ */
+static void pmcraid_shutdown(struct pci_dev *pdev)
+{
+	struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
+	pmcraid_reset_bringdown(pinstance);
+}
+
+
+/**
+ * pmcraid_get_minor - returns unused minor number from minor number bitmap
+ */
+static unsigned short pmcraid_get_minor(void)
+{
+	int minor;
+
+	minor = find_first_zero_bit(pmcraid_minor, sizeof(pmcraid_minor));
+	__set_bit(minor, pmcraid_minor);
+	return minor;
+}
+
+/**
+ * pmcraid_release_minor - releases given minor back to minor number bitmap
+ */
+static void pmcraid_release_minor(unsigned short minor)
+{
+	__clear_bit(minor, pmcraid_minor);
+}
+
+/**
+ * pmcraid_setup_chrdev - allocates a minor number and registers a char device
+ *
+ * @pinstance: pointer to adapter instance for which to register device
+ *
+ * Return value
+ *	0 in case of success, otherwise non-zero
+ */
+static int pmcraid_setup_chrdev(struct pmcraid_instance *pinstance)
+{
+	int minor;
+	int error;
+
+	minor = pmcraid_get_minor();
+	cdev_init(&pinstance->cdev, &pmcraid_fops);
+	pinstance->cdev.owner = THIS_MODULE;
+
+	error = cdev_add(&pinstance->cdev, MKDEV(pmcraid_major, minor), 1);
+
+	if (error)
+		pmcraid_release_minor(minor);
+	else
+		device_create(pmcraid_class, NULL, MKDEV(pmcraid_major, minor),
+			      NULL, "pmcsas%u", minor);
+	return error;
+}
+
+/**
+ * pmcraid_release_chrdev - unregisters per-adapter management interface
+ *
+ * @pinstance: pointer to adapter instance structure
+ *
+ * Return value
+ *  none
+ */
+static void pmcraid_release_chrdev(struct pmcraid_instance *pinstance)
+{
+	pmcraid_release_minor(MINOR(pinstance->cdev.dev));
+	device_destroy(pmcraid_class,
+		       MKDEV(pmcraid_major, MINOR(pinstance->cdev.dev)));
+	cdev_del(&pinstance->cdev);
+}
+
+/**
+ * pmcraid_remove - IOA hot plug remove entry point
+ * @pdev: pci device struct
+ *
+ * Return value
+ *	  none
+ */
+static void __devexit pmcraid_remove(struct pci_dev *pdev)
+{
+	struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
+
+	/* remove the management interface (/dev file) for this device */
+	pmcraid_release_chrdev(pinstance);
+
+	/* remove host template from scsi midlayer */
+	scsi_remove_host(pinstance->host);
+
+	/* block requests from mid-layer */
+	scsi_block_requests(pinstance->host);
+
+	/* initiate shutdown adapter */
+	pmcraid_shutdown(pdev);
+
+	pmcraid_disable_interrupts(pinstance, ~0);
+	flush_scheduled_work();
+
+	pmcraid_kill_tasklets(pinstance);
+	pmcraid_unregister_interrupt_handler(pinstance);
+	pmcraid_release_buffers(pinstance);
+	iounmap(pinstance->mapped_dma_addr);
+	pci_release_regions(pdev);
+	scsi_host_put(pinstance->host);
+	pci_disable_device(pdev);
+
+	return;
+}
+
+#ifdef CONFIG_PM
+/**
+ * pmcraid_suspend - driver suspend entry point for power management
+ * @pdev:   PCI device structure
+ * @state:  PCI power state to suspend routine
+ *
+ * Return Value - 0 always
+ */
+static int pmcraid_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
+
+	pmcraid_shutdown(pdev);
+	pmcraid_disable_interrupts(pinstance, ~0);
+	pmcraid_kill_tasklets(pinstance);
+	pci_set_drvdata(pinstance->pdev, pinstance);
+	pmcraid_unregister_interrupt_handler(pinstance);
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+
+	return 0;
+}
+
+/**
+ * pmcraid_resume - driver resume entry point PCI power management
+ * @pdev: PCI device structure
+ *
+ * Return Value - 0 in case of success. Error code in case of any failure
+ */
+static int pmcraid_resume(struct pci_dev *pdev)
+{
+	struct pmcraid_instance *pinstance = pci_get_drvdata(pdev);
+	struct Scsi_Host *host = pinstance->host;
+	int rc;
+	int hrrqs;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_enable_wake(pdev, PCI_D0, 0);
+	pci_restore_state(pdev);
+
+	rc = pci_enable_device(pdev);
+
+	if (rc) {
+		pmcraid_err("pmcraid: Enable device failed\n");
+		return rc;
+	}
+
+	pci_set_master(pdev);
+
+	if ((sizeof(dma_addr_t) == 4) ||
+	     pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+
+	if (rc == 0)
+		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+
+	if (rc != 0) {
+		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
+		goto disable_device;
+	}
+
+	atomic_set(&pinstance->outstanding_cmds, 0);
+	hrrqs = pinstance->num_hrrq;
+	rc = pmcraid_register_interrupt_handler(pinstance);
+
+	if (rc) {
+		pmcraid_err("resume: couldn't register interrupt handlers\n");
+		rc = -ENODEV;
+		goto release_host;
+	}
+
+	pmcraid_init_tasklets(pinstance);
+	pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
+
+	/* Start with hard reset sequence which brings up IOA to operational
+	 * state as well as completes the reset sequence.
+	 */
+	pinstance->ioa_hard_reset = 1;
+
+	/* Start IOA firmware initialization and bring card to Operational
+	 * state.
+	 */
+	if (pmcraid_reset_bringup(pinstance)) {
+		pmcraid_err("couldn't initialize IOA \n");
+		rc = -ENODEV;
+		goto release_tasklets;
+	}
+
+	return 0;
+
+release_tasklets:
+	pmcraid_kill_tasklets(pinstance);
+	pmcraid_unregister_interrupt_handler(pinstance);
+
+release_host:
+	scsi_host_put(host);
+
+disable_device:
+	pci_disable_device(pdev);
+
+	return rc;
+}
+
+#else
+
+#define pmcraid_suspend NULL
+#define pmcraid_resume  NULL
+
+#endif /* CONFIG_PM */
+
+/**
+ * pmcraid_complete_ioa_reset - Called by either timer or tasklet during
+ * 	                        completion of the ioa reset
+ * @cmd: pointer to reset command block
+ */
+static void pmcraid_complete_ioa_reset(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	unsigned long flags;
+
+	spin_lock_irqsave(pinstance->host->host_lock, flags);
+	pmcraid_ioa_reset(cmd);
+	spin_unlock_irqrestore(pinstance->host->host_lock, flags);
+	scsi_unblock_requests(pinstance->host);
+	schedule_work(&pinstance->worker_q);
+}
+
+/**
+ * pmcraid_set_supported_devs - sends SET SUPPORTED DEVICES to IOAFP
+ *
+ * @cmd: pointer to pmcraid_cmd structure
+ *
+ * Return Value
+ *  0 for success or non-zero for failure cases
+ */
+static void pmcraid_set_supported_devs(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+	void (*cmd_done) (struct pmcraid_cmd *) = pmcraid_complete_ioa_reset;
+
+	pmcraid_reinit_cmdblk(cmd);
+
+	ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+	ioarcb->request_type = REQ_TYPE_IOACMD;
+	ioarcb->cdb[0] = PMCRAID_SET_SUPPORTED_DEVICES;
+	ioarcb->cdb[1] = ALL_DEVICES_SUPPORTED;
+
+	/* If this was called as part of resource table reinitialization due to
+	 * lost CCN, it is enough to return the command block back to free pool
+	 * as part of set_supported_devs completion function.
+	 */
+	if (cmd->drv_inst->reinit_cfg_table) {
+		cmd->drv_inst->reinit_cfg_table = 0;
+		cmd->release = 1;
+		cmd_done = pmcraid_reinit_cfgtable_done;
+	}
+
+	/* we will be done with the reset sequence after set supported devices,
+	 * setup the done function to return the command block back to free
+	 * pool
+	 */
+	pmcraid_send_cmd(cmd,
+			 cmd_done,
+			 PMCRAID_SET_SUP_DEV_TIMEOUT,
+			 pmcraid_timeout_handler);
+	return;
+}
+
+/**
+ * pmcraid_init_res_table - Initialize the resource table
+ * @cmd:  pointer to pmcraid command struct
+ *
+ * This function looks through the existing resource table, comparing
+ * it with the config table. This function will take care of old/new
+ * devices and schedule adding/removing them from the mid-layer
+ * as appropriate.
+ *
+ * Return value
+ *	 None
+ */
+static void pmcraid_init_res_table(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	struct pmcraid_resource_entry *res, *temp;
+	struct pmcraid_config_table_entry *cfgte;
+	unsigned long lock_flags;
+	int found, rc, i;
+	LIST_HEAD(old_res);
+
+	if (pinstance->cfg_table->flags & MICROCODE_UPDATE_REQUIRED)
+		dev_err(&pinstance->pdev->dev, "Require microcode download\n");
+
+	/* resource list is protected by pinstance->resource_lock.
+	 * init_res_table can be called from probe (user-thread) or runtime
+	 * reset (timer/tasklet)
+	 */
+	spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
+
+	list_for_each_entry_safe(res, temp, &pinstance->used_res_q, queue)
+		list_move_tail(&res->queue, &old_res);
+
+	for (i = 0; i < pinstance->cfg_table->num_entries; i++) {
+		cfgte = &pinstance->cfg_table->entries[i];
+
+		if (!pmcraid_expose_resource(cfgte))
+			continue;
+
+		found = 0;
+
+		/* If this entry was already detected and initialized */
+		list_for_each_entry_safe(res, temp, &old_res, queue) {
+
+			rc = memcmp(&res->cfg_entry.resource_address,
+				    &cfgte->resource_address,
+				    sizeof(cfgte->resource_address));
+			if (!rc) {
+				list_move_tail(&res->queue,
+						&pinstance->used_res_q);
+				found = 1;
+				break;
+			}
+		}
+
+		/* If this is new entry, initialize it and add it the queue */
+		if (!found) {
+
+			if (list_empty(&pinstance->free_res_q)) {
+				dev_err(&pinstance->pdev->dev,
+					"Too many devices attached\n");
+				break;
+			}
+
+			found = 1;
+			res = list_entry(pinstance->free_res_q.next,
+					 struct pmcraid_resource_entry, queue);
+
+			res->scsi_dev = NULL;
+			res->change_detected = RES_CHANGE_ADD;
+			res->reset_progress = 0;
+			list_move_tail(&res->queue, &pinstance->used_res_q);
+		}
+
+		/* copy new configuration table entry details into driver
+		 * maintained resource entry
+		 */
+		if (found) {
+			memcpy(&res->cfg_entry, cfgte,
+				sizeof(struct pmcraid_config_table_entry));
+			pmcraid_info("New res type:%x, vset:%x, addr:%x:\n",
+				 res->cfg_entry.resource_type,
+				 res->cfg_entry.unique_flags1,
+				 le32_to_cpu(res->cfg_entry.resource_address));
+		}
+	}
+
+	/* Detect any deleted entries, mark them for deletion from mid-layer */
+	list_for_each_entry_safe(res, temp, &old_res, queue) {
+
+		if (res->scsi_dev) {
+			res->change_detected = RES_CHANGE_DEL;
+			res->cfg_entry.resource_handle =
+				PMCRAID_INVALID_RES_HANDLE;
+			list_move_tail(&res->queue, &pinstance->used_res_q);
+		} else {
+			list_move_tail(&res->queue, &pinstance->free_res_q);
+		}
+	}
+
+	/* release the resource list lock */
+	spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
+	pmcraid_set_supported_devs(cmd);
+}
+
+/**
+ * pmcraid_querycfg - Send a Query IOA Config to the adapter.
+ * @cmd: pointer pmcraid_cmd struct
+ *
+ * This function sends a Query IOA Configuration command to the adapter to
+ * retrieve the IOA configuration table.
+ *
+ * Return value:
+ *	none
+ */
+static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+	struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	int cfg_table_size = cpu_to_be32(sizeof(struct pmcraid_config_table));
+
+	ioarcb->request_type = REQ_TYPE_IOACMD;
+	ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+
+	ioarcb->cdb[0] = PMCRAID_QUERY_IOA_CONFIG;
+
+	/* firmware requires 4-byte length field, specified in B.E format */
+	memcpy(&(ioarcb->cdb[10]), &cfg_table_size, sizeof(cfg_table_size));
+
+	/* Since entire config table can be described by single IOADL, it can
+	 * be part of IOARCB itself
+	 */
+	ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
+					offsetof(struct pmcraid_ioarcb,
+						add_data.u.ioadl[0]));
+	ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
+	ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
+
+	ioarcb->request_flags0 |= NO_LINK_DESCS;
+	ioarcb->data_transfer_length =
+		cpu_to_le32(sizeof(struct pmcraid_config_table));
+
+	ioadl = &(ioarcb->add_data.u.ioadl[0]);
+	ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
+	ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr);
+	ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table));
+
+	pmcraid_send_cmd(cmd, pmcraid_init_res_table,
+			 PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
+}
+
+
+/**
+ * pmcraid_probe - PCI probe entry pointer for PMC MaxRaid controller driver
+ * @pdev: pointer to pci device structure
+ * @dev_id: pointer to device ids structure
+ *
+ * Return Value
+ *	returns 0 if the device is claimed and successfully configured.
+ *	returns non-zero error code in case of any failure
+ */
+static int __devinit pmcraid_probe(
+	struct pci_dev *pdev,
+	const struct pci_device_id *dev_id
+)
+{
+	struct pmcraid_instance *pinstance;
+	struct Scsi_Host *host;
+	void __iomem *mapped_pci_addr;
+	int rc = PCIBIOS_SUCCESSFUL;
+
+	if (atomic_read(&pmcraid_adapter_count) >= PMCRAID_MAX_ADAPTERS) {
+		pmcraid_err
+			("maximum number(%d) of supported adapters reached\n",
+			 atomic_read(&pmcraid_adapter_count));
+		return -ENOMEM;
+	}
+
+	atomic_inc(&pmcraid_adapter_count);
+	rc = pci_enable_device(pdev);
+
+	if (rc) {
+		dev_err(&pdev->dev, "Cannot enable adapter\n");
+		atomic_dec(&pmcraid_adapter_count);
+		return rc;
+	}
+
+	dev_info(&pdev->dev,
+		"Found new IOA(%x:%x), Total IOA count: %d\n",
+		 pdev->vendor, pdev->device,
+		 atomic_read(&pmcraid_adapter_count));
+
+	rc = pci_request_regions(pdev, PMCRAID_DRIVER_NAME);
+
+	if (rc < 0) {
+		dev_err(&pdev->dev,
+			"Couldn't register memory range of registers\n");
+		goto out_disable_device;
+	}
+
+	mapped_pci_addr = pci_iomap(pdev, 0, 0);
+
+	if (!mapped_pci_addr) {
+		dev_err(&pdev->dev, "Couldn't map PCI registers memory\n");
+		rc = -ENOMEM;
+		goto out_release_regions;
+	}
+
+	pci_set_master(pdev);
+
+	/* Firmware requires the system bus address of IOARCB to be within
+	 * 32-bit addressable range though it has 64-bit IOARRIN register.
+	 * However, firmware supports 64-bit streaming DMA buffers, whereas
+	 * coherent buffers are to be 32-bit. Since pci_alloc_consistent always
+	 * returns memory within 4GB (if not, change this logic), coherent
+	 * buffers are within firmware acceptible address ranges.
+	 */
+	if ((sizeof(dma_addr_t) == 4) ||
+	    pci_set_dma_mask(pdev, DMA_BIT_MASK(64)))
+		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+
+	/* firmware expects 32-bit DMA addresses for IOARRIN register; set 32
+	 * bit mask for pci_alloc_consistent to return addresses within 4GB
+	 */
+	if (rc == 0)
+		rc = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
+
+	if (rc != 0) {
+		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
+		goto cleanup_nomem;
+	}
+
+	host = scsi_host_alloc(&pmcraid_host_template,
+				sizeof(struct pmcraid_instance));
+
+	if (!host) {
+		dev_err(&pdev->dev, "scsi_host_alloc failed!\n");
+		rc = -ENOMEM;
+		goto cleanup_nomem;
+	}
+
+	host->max_id = PMCRAID_MAX_NUM_TARGETS_PER_BUS;
+	host->max_lun = PMCRAID_MAX_NUM_LUNS_PER_TARGET;
+	host->unique_id = host->host_no;
+	host->max_channel = PMCRAID_MAX_BUS_TO_SCAN;
+	host->max_cmd_len = PMCRAID_MAX_CDB_LEN;
+
+	/* zero out entire instance structure */
+	pinstance = (struct pmcraid_instance *)host->hostdata;
+	memset(pinstance, 0, sizeof(*pinstance));
+
+	pinstance->chip_cfg =
+		(struct pmcraid_chip_details *)(dev_id->driver_data);
+
+	rc = pmcraid_init_instance(pdev, host, mapped_pci_addr);
+
+	if (rc < 0) {
+		dev_err(&pdev->dev, "failed to initialize adapter instance\n");
+		goto out_scsi_host_put;
+	}
+
+	pci_set_drvdata(pdev, pinstance);
+
+	/* Save PCI config-space for use following the reset */
+	rc = pci_save_state(pinstance->pdev);
+
+	if (rc != 0) {
+		dev_err(&pdev->dev, "Failed to save PCI config space\n");
+		goto out_scsi_host_put;
+	}
+
+	pmcraid_disable_interrupts(pinstance, ~0);
+
+	rc = pmcraid_register_interrupt_handler(pinstance);
+
+	if (rc) {
+		pmcraid_err("couldn't register interrupt handler\n");
+		goto out_scsi_host_put;
+	}
+
+	pmcraid_init_tasklets(pinstance);
+
+	/* allocate verious buffers used by LLD.*/
+	rc = pmcraid_init_buffers(pinstance);
+
+	if (rc) {
+		pmcraid_err("couldn't allocate memory blocks\n");
+		goto out_unregister_isr;
+	}
+
+	/* check the reset type required */
+	pmcraid_reset_type(pinstance);
+
+	pmcraid_enable_interrupts(pinstance, PMCRAID_PCI_INTERRUPTS);
+
+	/* Start IOA firmware initialization and bring card to Operational
+	 * state.
+	 */
+	pmcraid_info("starting IOA initialization sequence\n");
+	if (pmcraid_reset_bringup(pinstance)) {
+		pmcraid_err("couldn't initialize IOA \n");
+		rc = 1;
+		goto out_release_bufs;
+	}
+
+	/* Add adapter instance into mid-layer list */
+	rc = scsi_add_host(pinstance->host, &pdev->dev);
+	if (rc != 0) {
+		pmcraid_err("couldn't add host into mid-layer: %d\n", rc);
+		goto out_release_bufs;
+	}
+
+	scsi_scan_host(pinstance->host);
+
+	rc = pmcraid_setup_chrdev(pinstance);
+
+	if (rc != 0) {
+		pmcraid_err("couldn't create mgmt interface, error: %x\n",
+			     rc);
+		goto out_remove_host;
+	}
+
+	/* Schedule worker thread to handle CCN and take care of adding and
+	 * removing devices to OS
+	 */
+	atomic_set(&pinstance->expose_resources, 1);
+	schedule_work(&pinstance->worker_q);
+	return rc;
+
+out_remove_host:
+	scsi_remove_host(host);
+
+out_release_bufs:
+	pmcraid_release_buffers(pinstance);
+
+out_unregister_isr:
+	pmcraid_kill_tasklets(pinstance);
+	pmcraid_unregister_interrupt_handler(pinstance);
+
+out_scsi_host_put:
+	scsi_host_put(host);
+
+cleanup_nomem:
+	iounmap(mapped_pci_addr);
+
+out_release_regions:
+	pci_release_regions(pdev);
+
+out_disable_device:
+	atomic_dec(&pmcraid_adapter_count);
+	pci_set_drvdata(pdev, NULL);
+	pci_disable_device(pdev);
+	return -ENODEV;
+}
+
+/*
+ * PCI driver structure of pcmraid driver
+ */
+static struct pci_driver pmcraid_driver = {
+	.name = PMCRAID_DRIVER_NAME,
+	.id_table = pmcraid_pci_table,
+	.probe = pmcraid_probe,
+	.remove = pmcraid_remove,
+	.suspend = pmcraid_suspend,
+	.resume = pmcraid_resume,
+	.shutdown = pmcraid_shutdown
+};
+
+
+/**
+ * pmcraid_init - module load entry point
+ */
+static int __init pmcraid_init(void)
+{
+	dev_t dev;
+	int error;
+
+	pmcraid_info("%s Device Driver version: %s %s\n",
+			 PMCRAID_DRIVER_NAME,
+			 PMCRAID_DRIVER_VERSION, PMCRAID_DRIVER_DATE);
+
+	error = alloc_chrdev_region(&dev, 0,
+				    PMCRAID_MAX_ADAPTERS,
+				    PMCRAID_DEVFILE);
+
+	if (error) {
+		pmcraid_err("failed to get a major number for adapters\n");
+		goto out_init;
+	}
+
+	pmcraid_major = MAJOR(dev);
+	pmcraid_class = class_create(THIS_MODULE, PMCRAID_DEVFILE);
+
+	if (IS_ERR(pmcraid_class)) {
+		error = PTR_ERR(pmcraid_class);
+		pmcraid_err("failed to register with with sysfs, error = %x\n",
+			    error);
+		goto out_unreg_chrdev;
+	}
+
+
+	error = pmcraid_netlink_init();
+
+	if (error)
+		goto out_unreg_chrdev;
+
+	error = pci_register_driver(&pmcraid_driver);
+
+	if (error == 0)
+		goto out_init;
+
+	pmcraid_err("failed to register pmcraid driver, error = %x\n",
+		     error);
+	class_destroy(pmcraid_class);
+	pmcraid_netlink_release();
+
+out_unreg_chrdev:
+	unregister_chrdev_region(MKDEV(pmcraid_major, 0), PMCRAID_MAX_ADAPTERS);
+out_init:
+	return error;
+}
+
+/**
+ * pmcraid_exit - module unload entry point
+ */
+static void __exit pmcraid_exit(void)
+{
+	pmcraid_netlink_release();
+	class_destroy(pmcraid_class);
+	unregister_chrdev_region(MKDEV(pmcraid_major, 0),
+				 PMCRAID_MAX_ADAPTERS);
+	pci_unregister_driver(&pmcraid_driver);
+}
+
+module_init(pmcraid_init);
+module_exit(pmcraid_exit);
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
new file mode 100644
index 0000000..614b3a7
--- /dev/null
+++ b/drivers/scsi/pmcraid.h
@@ -0,0 +1,1029 @@
+/*
+ * pmcraid.h -- PMC Sierra MaxRAID controller driver header file
+ *
+ * Copyright (C) 2008, 2009 PMC Sierra Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#ifndef _PMCRAID_H
+#define _PMCRAID_H
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/completion.h>
+#include <linux/list.h>
+#include <scsi/scsi.h>
+#include <linux/kref.h>
+#include <scsi/scsi_cmnd.h>
+#include <linux/cdev.h>
+#include <net/netlink.h>
+#include <net/genetlink.h>
+#include <linux/connector.h>
+/*
+ * Driver name   : string representing the driver name
+ * Device file   : /dev file to be used for management interfaces
+ * Driver version: version string in major_version.minor_version.patch format
+ * Driver date   : date information in "Mon dd yyyy" format
+ */
+#define PMCRAID_DRIVER_NAME       	"PMC MaxRAID"
+#define PMCRAID_DEVFILE			"pmcsas"
+#define PMCRAID_DRIVER_VERSION    	"1.0.2"
+#define PMCRAID_DRIVER_DATE       	__DATE__
+
+/* Maximum number of adapters supported by current version of the driver */
+#define PMCRAID_MAX_ADAPTERS		1024
+
+/* Bit definitions as per firmware, bit position [0][1][2].....[31] */
+#define PMC_BIT8(n)          (1 << (7-n))
+#define PMC_BIT16(n)         (1 << (15-n))
+#define PMC_BIT32(n)         (1 << (31-n))
+
+/* PMC PCI vendor ID and device ID values */
+#define PCI_VENDOR_ID_PMC			0x11F8
+#define PCI_DEVICE_ID_PMC_MAXRAID		0x5220
+
+/*
+ * MAX_CMD          : maximum commands that can be outstanding with IOA
+ * MAX_IO_CMD       : command blocks available for IO commands
+ * MAX_HCAM_CMD     : command blocks avaibale for HCAMS
+ * MAX_INTERNAL_CMD : command blocks avaible for internal commands like reset
+ */
+#define PMCRAID_MAX_CMD				1024
+#define PMCRAID_MAX_IO_CMD			1020
+#define PMCRAID_MAX_HCAM_CMD			2
+#define PMCRAID_MAX_INTERNAL_CMD		2
+
+/* MAX_IOADLS       : max number of scatter-gather lists supported by IOA
+ * IOADLS_INTERNAL  : number of ioadls included as part of IOARCB.
+ * IOADLS_EXTERNAL  : number of ioadls allocated external to IOARCB
+ */
+#define PMCRAID_IOADLS_INTERNAL			 27
+#define PMCRAID_IOADLS_EXTERNAL			 37
+#define PMCRAID_MAX_IOADLS			 PMCRAID_IOADLS_INTERNAL
+
+/* HRRQ_ENTRY_SIZE  : size of hrrq buffer
+ * IOARCB_ALIGNMENT : alignment required for IOARCB
+ * IOADL_ALIGNMENT  : alignment requirement for IOADLs
+ * MSIX_VECTORS     : number of MSIX vectors supported
+ */
+#define HRRQ_ENTRY_SIZE                          sizeof(__le32)
+#define PMCRAID_IOARCB_ALIGNMENT                 32
+#define PMCRAID_IOADL_ALIGNMENT                  16
+#define PMCRAID_IOASA_ALIGNMENT                  4
+#define PMCRAID_NUM_MSIX_VECTORS                 1
+
+/* various other limits */
+#define PMCRAID_VENDOR_ID_LEN           	 8
+#define PMCRAID_PRODUCT_ID_LEN            	 16
+#define PMCRAID_SERIAL_NUM_LEN          	 8
+#define PMCRAID_LUN_LEN			         8
+#define PMCRAID_MAX_CDB_LEN                      16
+#define PMCRAID_DEVICE_ID_LEN			 8
+#define PMCRAID_SENSE_DATA_LEN			 256
+#define PMCRAID_ADD_CMD_PARAM_LEN		 48
+
+#define PMCRAID_MAX_BUS_TO_SCAN                  1
+#define PMCRAID_MAX_NUM_TARGETS_PER_BUS          256
+#define PMCRAID_MAX_NUM_LUNS_PER_TARGET          8
+
+/* IOA bus/target/lun number of IOA resources */
+#define PMCRAID_IOA_BUS_ID                       0xfe
+#define PMCRAID_IOA_TARGET_ID                    0xff
+#define PMCRAID_IOA_LUN_ID                       0xff
+#define PMCRAID_VSET_BUS_ID                      0x1
+#define PMCRAID_VSET_LUN_ID                      0x0
+#define PMCRAID_PHYS_BUS_ID                      0x0
+#define PMCRAID_VIRTUAL_ENCL_BUS_ID              0x8
+#define PMCRAID_MAX_VSET_TARGETS                 240
+#define PMCRAID_MAX_VSET_LUNS_PER_TARGET         8
+
+#define PMCRAID_IOA_MAX_SECTORS                  32767
+#define PMCRAID_VSET_MAX_SECTORS                 512
+#define PMCRAID_MAX_CMD_PER_LUN                  254
+
+/* Number of configuration table entries (resources) */
+#define PMCRAID_MAX_NUM_OF_VSETS                 240
+
+/* Todo : Check max limit for Phase 1 */
+#define PMCRAID_MAX_NUM_OF_PHY_DEVS              256
+
+/* MAX_NUM_OF_DEVS includes 1 FP, 1 Dummy Enclosure device */
+#define PMCRAID_MAX_NUM_OF_DEVS                        \
+    (PMCRAID_MAX_NUM_OF_VSETS + PMCRAID_MAX_NUM_OF_PHY_DEVS + 2)
+
+#define PMCRAID_MAX_RESOURCES                    PMCRAID_MAX_NUM_OF_DEVS
+
+/* Adapter Commands used by driver */
+#define PMCRAID_QUERY_RESOURCE_STATE             0xC2
+#define PMCRAID_RESET_DEVICE                     0xC3
+/* options to select reset target */
+#define ENABLE_RESET_MODIFIER                    0x80
+#define RESET_DEVICE_LUN                         0x40
+#define RESET_DEVICE_TARGET                      0x20
+#define RESET_DEVICE_BUS                         0x10
+
+#define PMCRAID_IDENTIFY_HRRQ                    0xC4
+#define PMCRAID_QUERY_IOA_CONFIG                 0xC5
+#define PMCRAID_QUERY_CMD_STATUS		 0xCB
+#define PMCRAID_ABORT_CMD                        0xC7
+
+/* CANCEL ALL command, provides option for setting SYNC_COMPLETE
+ * on the target resources for which commands got cancelled
+ */
+#define PMCRAID_CANCEL_ALL_REQUESTS		 0xCE
+#define PMCRAID_SYNC_COMPLETE_AFTER_CANCEL       PMC_BIT8(0)
+
+/* HCAM command and types of HCAM supported by IOA */
+#define PMCRAID_HOST_CONTROLLED_ASYNC            0xCF
+#define PMCRAID_HCAM_CODE_CONFIG_CHANGE          0x01
+#define PMCRAID_HCAM_CODE_LOG_DATA               0x02
+
+/* IOA shutdown command and various shutdown types */
+#define PMCRAID_IOA_SHUTDOWN                     0xF7
+#define PMCRAID_SHUTDOWN_NORMAL                  0x00
+#define PMCRAID_SHUTDOWN_PREPARE_FOR_NORMAL      0x40
+#define PMCRAID_SHUTDOWN_NONE                    0x100
+#define PMCRAID_SHUTDOWN_ABBREV                  0x80
+
+/* SET SUPPORTED DEVICES command and the option to select all the
+ * devices to be supported
+ */
+#define PMCRAID_SET_SUPPORTED_DEVICES            0xFB
+#define ALL_DEVICES_SUPPORTED                    PMC_BIT8(0)
+
+/* This option is used with SCSI WRITE_BUFFER command */
+#define PMCRAID_WR_BUF_DOWNLOAD_AND_SAVE         0x05
+
+/* IOASC Codes used by driver */
+#define PMCRAID_IOASC_SENSE_MASK                 0xFFFFFF00
+#define PMCRAID_IOASC_SENSE_KEY(ioasc)           ((ioasc) >> 24)
+#define PMCRAID_IOASC_SENSE_CODE(ioasc)          (((ioasc) & 0x00ff0000) >> 16)
+#define PMCRAID_IOASC_SENSE_QUAL(ioasc)          (((ioasc) & 0x0000ff00) >> 8)
+#define PMCRAID_IOASC_SENSE_STATUS(ioasc)        ((ioasc) & 0x000000ff)
+
+#define PMCRAID_IOASC_GOOD_COMPLETION			0x00000000
+#define PMCRAID_IOASC_NR_INIT_CMD_REQUIRED		0x02040200
+#define PMCRAID_IOASC_NR_IOA_RESET_REQUIRED		0x02048000
+#define PMCRAID_IOASC_NR_SYNC_REQUIRED			0x023F0000
+#define PMCRAID_IOASC_ME_READ_ERROR_NO_REALLOC		0x03110C00
+#define PMCRAID_IOASC_HW_CANNOT_COMMUNICATE		0x04050000
+#define PMCRAID_IOASC_HW_DEVICE_TIMEOUT			0x04080100
+#define PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR	0x04448500
+#define PMCRAID_IOASC_HW_IOA_RESET_REQUIRED		0x04448600
+#define PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE        0x05250000
+#define PMCRAID_IOASC_AC_TERMINATED_BY_HOST		0x0B5A0000
+#define PMCRAID_IOASC_UA_BUS_WAS_RESET           	0x06290000
+#define PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER  	0x06298000
+
+/* Driver defined IOASCs */
+#define PMCRAID_IOASC_IOA_WAS_RESET              	0x10000001
+#define PMCRAID_IOASC_PCI_ACCESS_ERROR           	0x10000002
+
+/* Various timeout values (in milliseconds) used. If any of these are chip
+ * specific, move them to pmcraid_chip_details structure.
+ */
+#define PMCRAID_PCI_DEASSERT_TIMEOUT		2000
+#define PMCRAID_BIST_TIMEOUT			2000
+#define PMCRAID_AENWAIT_TIMEOUT			5000
+#define PMCRAID_TRANSOP_TIMEOUT			60000
+
+#define PMCRAID_RESET_TIMEOUT			(2 * HZ)
+#define PMCRAID_CHECK_FOR_RESET_TIMEOUT		((HZ / 10))
+#define PMCRAID_VSET_IO_TIMEOUT			(60 * HZ)
+#define PMCRAID_INTERNAL_TIMEOUT		(60 * HZ)
+#define PMCRAID_SHUTDOWN_TIMEOUT		(150 * HZ)
+#define PMCRAID_RESET_BUS_TIMEOUT		(60 * HZ)
+#define PMCRAID_RESET_HOST_TIMEOUT		(150 * HZ)
+#define PMCRAID_REQUEST_SENSE_TIMEOUT		(30 * HZ)
+#define PMCRAID_SET_SUP_DEV_TIMEOUT		(2 * 60 * HZ)
+
+/* structure to represent a scatter-gather element (IOADL descriptor) */
+struct pmcraid_ioadl_desc {
+	__le64 address;
+	__le32 data_len;
+	__u8  reserved[3];
+	__u8  flags;
+} __attribute__((packed, aligned(PMCRAID_IOADL_ALIGNMENT)));
+
+/* pmcraid_ioadl_desc.flags values */
+#define IOADL_FLAGS_CHAINED      PMC_BIT8(0)
+#define IOADL_FLAGS_LAST_DESC    PMC_BIT8(1)
+#define IOADL_FLAGS_READ_LAST    PMC_BIT8(1)
+#define IOADL_FLAGS_WRITE_LAST   PMC_BIT8(1)
+
+
+/* additional IOARCB data which can be CDB or additional request parameters
+ * or list of IOADLs. Firmware supports max of 512 bytes for IOARCB, hence then
+ * number of IOADLs are limted to 27. In case they are more than 27, they will
+ * be used in chained form
+ */
+struct pmcraid_ioarcb_add_data {
+	union {
+		struct pmcraid_ioadl_desc ioadl[PMCRAID_IOADLS_INTERNAL];
+		__u8 add_cmd_params[PMCRAID_ADD_CMD_PARAM_LEN];
+	} u;
+};
+
+/*
+ * IOA Request Control Block
+ */
+struct pmcraid_ioarcb {
+	__le64 ioarcb_bus_addr;
+	__le32 resource_handle;
+	__le32 response_handle;
+	__le64 ioadl_bus_addr;
+	__le32 ioadl_length;
+	__le32 data_transfer_length;
+	__le64 ioasa_bus_addr;
+	__le16 ioasa_len;
+	__le16 cmd_timeout;
+	__le16 add_cmd_param_offset;
+	__le16 add_cmd_param_length;
+	__le32 reserved1[2];
+	__le32 reserved2;
+	__u8  request_type;
+	__u8  request_flags0;
+	__u8  request_flags1;
+	__u8  hrrq_id;
+	__u8  cdb[PMCRAID_MAX_CDB_LEN];
+	struct pmcraid_ioarcb_add_data add_data;
+} __attribute__((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
+
+/* well known resource handle values */
+#define PMCRAID_IOA_RES_HANDLE        0xffffffff
+#define PMCRAID_INVALID_RES_HANDLE    0
+
+/* pmcraid_ioarcb.request_type values */
+#define REQ_TYPE_SCSI                 0x00
+#define REQ_TYPE_IOACMD               0x01
+#define REQ_TYPE_HCAM                 0x02
+
+/* pmcraid_ioarcb.flags0 values */
+#define TRANSFER_DIR_WRITE            PMC_BIT8(0)
+#define INHIBIT_UL_CHECK              PMC_BIT8(2)
+#define SYNC_OVERRIDE                 PMC_BIT8(3)
+#define SYNC_COMPLETE                 PMC_BIT8(4)
+#define NO_LINK_DESCS                 PMC_BIT8(5)
+
+/* pmcraid_ioarcb.flags1 values */
+#define DELAY_AFTER_RESET             PMC_BIT8(0)
+#define TASK_TAG_SIMPLE               0x10
+#define TASK_TAG_ORDERED              0x20
+#define TASK_TAG_QUEUE_HEAD           0x30
+
+/* toggle bit offset in response handle */
+#define HRRQ_TOGGLE_BIT               0x01
+#define HRRQ_RESPONSE_BIT             0x02
+
+/* IOA Status Area */
+struct pmcraid_ioasa_vset {
+	__le32 failing_lba_hi;
+	__le32 failing_lba_lo;
+	__le32 reserved;
+} __attribute__((packed, aligned(4)));
+
+struct pmcraid_ioasa {
+	__le32 ioasc;
+	__le16 returned_status_length;
+	__le16 available_status_length;
+	__le32 residual_data_length;
+	__le32 ilid;
+	__le32 fd_ioasc;
+	__le32 fd_res_address;
+	__le32 fd_res_handle;
+	__le32 reserved;
+
+	/* resource specific sense information */
+	union {
+		struct pmcraid_ioasa_vset vset;
+	} u;
+
+	/* IOA autosense data */
+	__le16 auto_sense_length;
+	__le16 error_data_length;
+	__u8  sense_data[PMCRAID_SENSE_DATA_LEN];
+} __attribute__((packed, aligned(4)));
+
+#define PMCRAID_DRIVER_ILID           0xffffffff
+
+/* Config Table Entry per Resource */
+struct pmcraid_config_table_entry {
+	__u8  resource_type;
+	__u8  bus_protocol;
+	__le16 array_id;
+	__u8  common_flags0;
+	__u8  common_flags1;
+	__u8  unique_flags0;
+	__u8  unique_flags1;	/*also used as vset target_id */
+	__le32 resource_handle;
+	__le32 resource_address;
+	__u8  device_id[PMCRAID_DEVICE_ID_LEN];
+	__u8  lun[PMCRAID_LUN_LEN];
+} __attribute__((packed, aligned(4)));
+
+/* resource types (config_table_entry.resource_type values) */
+#define RES_TYPE_AF_DASD     0x00
+#define RES_TYPE_GSCSI       0x01
+#define RES_TYPE_VSET        0x02
+#define RES_TYPE_IOA_FP      0xFF
+
+#define RES_IS_IOA(res)      ((res).resource_type == RES_TYPE_IOA_FP)
+#define RES_IS_GSCSI(res)    ((res).resource_type == RES_TYPE_GSCSI)
+#define RES_IS_VSET(res)     ((res).resource_type == RES_TYPE_VSET)
+#define RES_IS_AFDASD(res)   ((res).resource_type == RES_TYPE_AF_DASD)
+
+/* bus_protocol values used by driver */
+#define RES_TYPE_VENCLOSURE  0x8
+
+/* config_table_entry.common_flags0 */
+#define MULTIPATH_RESOURCE   PMC_BIT32(0)
+
+/* unique_flags1 */
+#define IMPORT_MODE_MANUAL   PMC_BIT8(0)
+
+/* well known resource handle values */
+#define RES_HANDLE_IOA       0xFFFFFFFF
+#define RES_HANDLE_NONE      0x00000000
+
+/* well known resource address values */
+#define RES_ADDRESS_IOAFP    0xFEFFFFFF
+#define RES_ADDRESS_INVALID  0xFFFFFFFF
+
+/* BUS/TARGET/LUN values from resource_addrr */
+#define RES_BUS(res_addr)    (le32_to_cpu(res_addr) & 0xFF)
+#define RES_TARGET(res_addr) ((le32_to_cpu(res_addr) >> 16) & 0xFF)
+#define RES_LUN(res_addr)    0x0
+
+/* configuration table structure */
+struct pmcraid_config_table {
+	__le16 num_entries;
+	__u8  table_format;
+	__u8  reserved1;
+	__u8  flags;
+	__u8  reserved2[11];
+	struct pmcraid_config_table_entry entries[PMCRAID_MAX_RESOURCES];
+} __attribute__((packed, aligned(4)));
+
+/* config_table.flags value */
+#define MICROCODE_UPDATE_REQUIRED		PMC_BIT32(0)
+
+/*
+ * HCAM format
+ */
+#define PMCRAID_HOSTRCB_LDNSIZE 		4056
+
+/* Error log notification format */
+struct pmcraid_hostrcb_error {
+	__le32 fd_ioasc;
+	__le32 fd_ra;
+	__le32 fd_rh;
+	__le32 prc;
+	union {
+		__u8 data[PMCRAID_HOSTRCB_LDNSIZE];
+	} u;
+} __attribute__ ((packed, aligned(4)));
+
+struct pmcraid_hcam_hdr {
+	__u8  op_code;
+	__u8  notification_type;
+	__u8  notification_lost;
+	__u8  flags;
+	__u8  overlay_id;
+	__u8  reserved1[3];
+	__le32 ilid;
+	__le32 timestamp1;
+	__le32 timestamp2;
+	__le32 data_len;
+} __attribute__((packed, aligned(4)));
+
+#define PMCRAID_AEN_GROUP	0x3
+
+struct pmcraid_hcam_ccn {
+	struct pmcraid_hcam_hdr header;
+	struct pmcraid_config_table_entry cfg_entry;
+} __attribute__((packed, aligned(4)));
+
+struct pmcraid_hcam_ldn {
+	struct pmcraid_hcam_hdr header;
+	struct pmcraid_hostrcb_error error_log;
+} __attribute__((packed, aligned(4)));
+
+/* pmcraid_hcam.op_code values */
+#define HOSTRCB_TYPE_CCN			0xE1
+#define HOSTRCB_TYPE_LDN			0xE2
+
+/* pmcraid_hcam.notification_type values */
+#define NOTIFICATION_TYPE_ENTRY_CHANGED		0x0
+#define NOTIFICATION_TYPE_ENTRY_NEW		0x1
+#define NOTIFICATION_TYPE_ENTRY_DELETED		0x2
+#define NOTIFICATION_TYPE_ERROR_LOG		0x10
+#define NOTIFICATION_TYPE_INFORMATION_LOG	0x11
+
+#define HOSTRCB_NOTIFICATIONS_LOST		PMC_BIT8(0)
+
+/* pmcraid_hcam.flags values */
+#define HOSTRCB_INTERNAL_OP_ERROR		PMC_BIT8(0)
+#define HOSTRCB_ERROR_RESPONSE_SENT		PMC_BIT8(1)
+
+/* pmcraid_hcam.overlay_id values */
+#define HOSTRCB_OVERLAY_ID_08			0x08
+#define HOSTRCB_OVERLAY_ID_09			0x09
+#define HOSTRCB_OVERLAY_ID_11			0x11
+#define HOSTRCB_OVERLAY_ID_12			0x12
+#define HOSTRCB_OVERLAY_ID_13			0x13
+#define HOSTRCB_OVERLAY_ID_14			0x14
+#define HOSTRCB_OVERLAY_ID_16			0x16
+#define HOSTRCB_OVERLAY_ID_17			0x17
+#define HOSTRCB_OVERLAY_ID_20			0x20
+#define HOSTRCB_OVERLAY_ID_FF			0xFF
+
+/* Implementation specific card details */
+struct pmcraid_chip_details {
+	/* hardware register offsets */
+	unsigned long  ioastatus;
+	unsigned long  ioarrin;
+	unsigned long  mailbox;
+	unsigned long  global_intr_mask;
+	unsigned long  ioa_host_intr;
+	unsigned long  ioa_host_intr_clr;
+	unsigned long  ioa_host_mask;
+	unsigned long  ioa_host_mask_clr;
+	unsigned long  host_ioa_intr;
+	unsigned long  host_ioa_intr_clr;
+
+	/* timeout used during transitional to operational state */
+	unsigned long transop_timeout;
+};
+
+/* IOA to HOST doorbells (interrupts) */
+#define INTRS_TRANSITION_TO_OPERATIONAL		PMC_BIT32(0)
+#define INTRS_IOARCB_TRANSFER_FAILED		PMC_BIT32(3)
+#define INTRS_IOA_UNIT_CHECK			PMC_BIT32(4)
+#define INTRS_NO_HRRQ_FOR_CMD_RESPONSE		PMC_BIT32(5)
+#define INTRS_CRITICAL_OP_IN_PROGRESS		PMC_BIT32(6)
+#define INTRS_IO_DEBUG_ACK			PMC_BIT32(7)
+#define INTRS_IOARRIN_LOST			PMC_BIT32(27)
+#define INTRS_SYSTEM_BUS_MMIO_ERROR		PMC_BIT32(28)
+#define INTRS_IOA_PROCESSOR_ERROR		PMC_BIT32(29)
+#define INTRS_HRRQ_VALID			PMC_BIT32(30)
+#define INTRS_OPERATIONAL_STATUS		PMC_BIT32(0)
+
+/* Host to IOA Doorbells */
+#define DOORBELL_RUNTIME_RESET			PMC_BIT32(1)
+#define DOORBELL_IOA_RESET_ALERT		PMC_BIT32(7)
+#define DOORBELL_IOA_DEBUG_ALERT		PMC_BIT32(9)
+#define DOORBELL_ENABLE_DESTRUCTIVE_DIAGS	PMC_BIT32(8)
+#define DOORBELL_IOA_START_BIST			PMC_BIT32(23)
+#define DOORBELL_RESET_IOA			PMC_BIT32(31)
+
+/* Global interrupt mask register value */
+#define GLOBAL_INTERRUPT_MASK			0x4ULL
+
+#define PMCRAID_ERROR_INTERRUPTS	(INTRS_IOARCB_TRANSFER_FAILED | \
+					 INTRS_IOA_UNIT_CHECK | \
+					 INTRS_NO_HRRQ_FOR_CMD_RESPONSE | \
+					 INTRS_IOARRIN_LOST | \
+					 INTRS_SYSTEM_BUS_MMIO_ERROR | \
+					 INTRS_IOA_PROCESSOR_ERROR)
+
+#define PMCRAID_PCI_INTERRUPTS		(PMCRAID_ERROR_INTERRUPTS | \
+					 INTRS_HRRQ_VALID | \
+					 INTRS_CRITICAL_OP_IN_PROGRESS |\
+					 INTRS_TRANSITION_TO_OPERATIONAL)
+
+/* control_block, associated with each of the commands contains IOARCB, IOADLs
+ * memory for IOASA. Additional 3 * 16 bytes are allocated in order to support
+ * additional request parameters (of max size 48) any command.
+ */
+struct pmcraid_control_block {
+	struct pmcraid_ioarcb ioarcb;
+	struct pmcraid_ioadl_desc ioadl[PMCRAID_IOADLS_EXTERNAL + 3];
+	struct pmcraid_ioasa ioasa;
+} __attribute__ ((packed, aligned(PMCRAID_IOARCB_ALIGNMENT)));
+
+/* pmcraid_sglist - Scatter-gather list allocated for passthrough ioctls
+ */
+struct pmcraid_sglist {
+	u32 order;
+	u32 num_sg;
+	u32 num_dma_sg;
+	u32 buffer_len;
+	struct scatterlist scatterlist[1];
+};
+
+/* pmcraid_cmd - LLD representation of SCSI command */
+struct pmcraid_cmd {
+
+	/* Ptr and bus address of DMA.able control block for this command */
+	struct pmcraid_control_block *ioa_cb;
+	dma_addr_t ioa_cb_bus_addr;
+
+	/* sense buffer for REQUEST SENSE command if firmware is not sending
+	 * auto sense data
+	 */
+	dma_addr_t sense_buffer_dma;
+	dma_addr_t dma_handle;
+	u8 *sense_buffer;
+
+	/* pointer to mid layer structure of SCSI commands */
+	struct scsi_cmnd *scsi_cmd;
+
+	struct list_head free_list;
+	struct completion wait_for_completion;
+	struct timer_list timer;	/* needed for internal commands */
+	u32 timeout;			/* current timeout value */
+	u32 index;			/* index into the command list */
+	u8 completion_req;		/* for handling internal commands */
+	u8 release;			/* for handling completions */
+
+	void (*cmd_done) (struct pmcraid_cmd *);
+	struct pmcraid_instance *drv_inst;
+
+	struct pmcraid_sglist *sglist; /* used for passthrough IOCTLs */
+
+	/* scratch used during reset sequence */
+	union {
+		unsigned long time_left;
+		struct pmcraid_resource_entry *res;
+	} u;
+};
+
+/*
+ * Interrupt registers of IOA
+ */
+struct pmcraid_interrupts {
+	void __iomem *ioa_host_interrupt_reg;
+	void __iomem *ioa_host_interrupt_clr_reg;
+	void __iomem *ioa_host_interrupt_mask_reg;
+	void __iomem *ioa_host_interrupt_mask_clr_reg;
+	void __iomem *global_interrupt_mask_reg;
+	void __iomem *host_ioa_interrupt_reg;
+	void __iomem *host_ioa_interrupt_clr_reg;
+};
+
+/* ISR parameters LLD allocates (one for each MSI-X if enabled) vectors */
+struct pmcraid_isr_param {
+	u8 hrrq_id;			/* hrrq entry index */
+	u16 vector;			/* allocated msi-x vector */
+	struct pmcraid_instance *drv_inst;
+};
+
+/* AEN message header sent as part of event data to applications */
+struct pmcraid_aen_msg {
+	u32 hostno;
+	u32 length;
+	u8  reserved[8];
+	u8  data[0];
+};
+
+struct pmcraid_hostrcb {
+	struct pmcraid_instance *drv_inst;
+	struct pmcraid_aen_msg *msg;
+	struct pmcraid_hcam_hdr *hcam;	/* pointer to hcam buffer */
+	struct pmcraid_cmd  *cmd;       /* pointer to command block used */
+	dma_addr_t baddr;		/* system address of hcam buffer */
+	atomic_t ignore;		/* process HCAM response ? */
+};
+
+#define PMCRAID_AEN_HDR_SIZE	sizeof(struct pmcraid_aen_msg)
+
+
+
+/*
+ * Per adapter structure maintained by LLD
+ */
+struct pmcraid_instance {
+	/* Array of allowed-to-be-exposed resources, initialized from
+	 * Configutation Table, later updated with CCNs
+	 */
+	struct pmcraid_resource_entry *res_entries;
+
+	struct list_head free_res_q;	/* res_entries lists for easy lookup */
+	struct list_head used_res_q;	/* List of to be exposed resources */
+	spinlock_t resource_lock;	/* spinlock to protect resource list */
+
+	void __iomem *mapped_dma_addr;
+	void __iomem *ioa_status;	/* Iomapped IOA status register */
+	void __iomem *mailbox;		/* Iomapped mailbox register */
+	void __iomem *ioarrin;		/* IOmapped IOARR IN register */
+
+	struct pmcraid_interrupts int_regs;
+	struct pmcraid_chip_details *chip_cfg;
+
+	/* HostRCBs needed for HCAM */
+	struct pmcraid_hostrcb ldn;
+	struct pmcraid_hostrcb ccn;
+
+
+	/* Bus address of start of HRRQ */
+	dma_addr_t hrrq_start_bus_addr[PMCRAID_NUM_MSIX_VECTORS];
+
+	/* Pointer to 1st entry of HRRQ */
+	__be32 *hrrq_start[PMCRAID_NUM_MSIX_VECTORS];
+
+	/* Pointer to last entry of HRRQ */
+	__be32 *hrrq_end[PMCRAID_NUM_MSIX_VECTORS];
+
+	/* Pointer to current pointer of hrrq */
+	__be32 *hrrq_curr[PMCRAID_NUM_MSIX_VECTORS];
+
+	/* Lock for HRRQ access */
+	spinlock_t hrrq_lock[PMCRAID_NUM_MSIX_VECTORS];
+
+	/* Expected toggle bit at host */
+	u8 host_toggle_bit[PMCRAID_NUM_MSIX_VECTORS];
+
+	/* No of Reset IOA retries . IOA marked dead if threshold exceeds */
+	u8 ioa_reset_attempts;
+#define PMCRAID_RESET_ATTEMPTS 3
+
+	/* Wait Q for  threads to wait for Reset IOA completion */
+	wait_queue_head_t reset_wait_q;
+	struct pmcraid_cmd *reset_cmd;
+
+	/* structures for supporting SIGIO based AEN. */
+	struct fasync_struct *aen_queue;
+	struct mutex aen_queue_lock;	/* lock for aen subscribers list */
+	struct cdev cdev;
+
+	struct Scsi_Host *host;	/* mid layer interface structure handle */
+	struct pci_dev *pdev;	/* PCI device structure handle */
+
+	u8  current_log_level;	/* default level for logging IOASC errors */
+
+	u8  num_hrrq;		/* Number of interrupt vectors allocated */
+	dev_t dev;		/* Major-Minor numbers for Char device */
+
+	/* Used as ISR handler argument */
+	struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
+
+	/* configuration table */
+	struct pmcraid_config_table *cfg_table;
+	dma_addr_t cfg_table_bus_addr;
+
+	/* structures related to command blocks */
+	struct kmem_cache *cmd_cachep;		/* cache for cmd blocks */
+	struct pci_pool *control_pool;		/* pool for control blocks */
+	char   cmd_pool_name[64];		/* name of cmd cache */
+	char   ctl_pool_name[64];		/* name of control cache */
+
+	struct pmcraid_cmd *cmd_list[PMCRAID_MAX_CMD];
+
+	struct list_head free_cmd_pool;
+	struct list_head pending_cmd_pool;
+	spinlock_t free_pool_lock;	 	/* free pool lock */
+	spinlock_t pending_pool_lock;	 	/* pending pool lock */
+
+	/* No of IO commands pending with FW */
+	atomic_t outstanding_cmds;
+
+	/* should add/delete resources to mid-layer now ?*/
+	atomic_t expose_resources;
+
+	/* Tasklet to handle deferred processing */
+	struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
+
+	/* Work-queue (Shared) for deferred reset processing */
+	struct work_struct worker_q;
+
+
+	u32 ioa_state:4;	/* For IOA Reset sequence FSM */
+#define IOA_STATE_OPERATIONAL       0x0
+#define IOA_STATE_UNKNOWN           0x1
+#define IOA_STATE_DEAD              0x2
+#define IOA_STATE_IN_SOFT_RESET     0x3
+#define IOA_STATE_IN_HARD_RESET     0x4
+#define IOA_STATE_IN_RESET_ALERT    0x5
+#define IOA_STATE_IN_BRINGDOWN      0x6
+#define IOA_STATE_IN_BRINGUP        0x7
+
+	u32 ioa_reset_in_progress:1; /* true if IOA reset is in progress */
+	u32 ioa_hard_reset:1;	/* TRUE if Hard Reset is needed */
+	u32 ioa_unit_check:1;	/* Indicates Unit Check condition */
+	u32 ioa_bringdown:1;	/* whether IOA needs to be brought down */
+	u32 force_ioa_reset:1;  /* force adapter reset ? */
+	u32 reinit_cfg_table:1; /* reinit config table due to lost CCN */
+	u32 ioa_shutdown_type:2;/* shutdown type used during reset */
+#define SHUTDOWN_NONE               0x0
+#define SHUTDOWN_NORMAL             0x1
+#define SHUTDOWN_ABBREV             0x2
+
+};
+
+/* LLD maintained resource entry structure */
+struct pmcraid_resource_entry {
+	struct list_head queue;	/* link to "to be exposed" resources */
+	struct pmcraid_config_table_entry cfg_entry;
+	struct scsi_device *scsi_dev;	/* Link scsi_device structure */
+	atomic_t read_failures;		/* count of failed READ commands */
+	atomic_t write_failures;	/* count of failed WRITE commands */
+
+	/* To indicate add/delete/modify during CCN */
+	u8 change_detected;
+#define RES_CHANGE_ADD          0x1	/* add this to mid-layer */
+#define RES_CHANGE_DEL          0x2	/* remove this from mid-layer */
+
+	u8 reset_progress;      /* Device is resetting */
+
+	/*
+	 * When IOA asks for sync (i.e. IOASC = Not Ready, Sync Required), this
+	 * flag will be set, mid layer will be asked to retry. In the next
+	 * attempt, this flag will be checked in queuecommand() to set
+	 * SYNC_COMPLETE flag in IOARCB (flag_0).
+	 */
+	u8 sync_reqd;
+
+	/* target indicates the mapped target_id assigned to this resource if
+	 * this is VSET resource. For non-VSET resources this will be un-used
+	 * or zero
+	 */
+	u8 target;
+};
+
+/* Data structures used in IOASC error code logging */
+struct pmcraid_ioasc_error {
+	u32 ioasc_code;		/* IOASC code */
+	u8 log_level;		/* default log level assignment. */
+	char *error_string;
+};
+
+/* Initial log_level assignments for various IOASCs */
+#define IOASC_LOG_LEVEL_NONE	    0x0 /* no logging */
+#define IOASC_LOG_LEVEL_MUST        0x1	/* must log: all high-severity errors */
+#define IOASC_LOG_LEVEL_HARD        0x2	/* optional – low severity errors */
+
+/* Error information maintained by LLD. LLD initializes the pmcraid_error_table
+ * statically.
+ */
+static struct pmcraid_ioasc_error pmcraid_ioasc_error_table[] = {
+	{0x01180600, IOASC_LOG_LEVEL_MUST,
+	 "Recovered Error, soft media error, sector reassignment suggested"},
+	{0x015D0000, IOASC_LOG_LEVEL_MUST,
+	 "Recovered Error, failure prediction thresold exceeded"},
+	{0x015D9200, IOASC_LOG_LEVEL_MUST,
+	 "Recovered Error, soft Cache Card Battery error thresold"},
+	{0x015D9200, IOASC_LOG_LEVEL_MUST,
+	 "Recovered Error, soft Cache Card Battery error thresold"},
+	{0x02048000, IOASC_LOG_LEVEL_MUST,
+	 "Not Ready, IOA Reset Required"},
+	{0x02408500, IOASC_LOG_LEVEL_MUST,
+	 "Not Ready, IOA microcode download required"},
+	{0x03110B00, IOASC_LOG_LEVEL_MUST,
+	 "Medium Error, data unreadable, reassignment suggested"},
+	{0x03110C00, IOASC_LOG_LEVEL_MUST,
+	 "Medium Error, data unreadable do not reassign"},
+	{0x03310000, IOASC_LOG_LEVEL_MUST,
+	 "Medium Error, media corrupted"},
+	{0x04050000, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, IOA can't communicate with device"},
+	{0x04080000, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, device bus error"},
+	{0x04080000, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, device bus is not functioning"},
+	{0x04118000, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, IOA reserved area data check"},
+	{0x04118100, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, IOA reserved area invalid data pattern"},
+	{0x04118200, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, IOA reserved area LRC error"},
+	{0x04320000, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, reassignment space exhausted"},
+	{0x04330000, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, data transfer underlength error"},
+	{0x04330000, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, data transfer overlength error"},
+	{0x04418000, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, PCI bus error"},
+	{0x04440000, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, device error"},
+	{0x04448300, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, undefined device response"},
+	{0x04448400, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, IOA microcode error"},
+	{0x04448600, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, IOA reset required"},
+	{0x04449200, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, hard Cache Fearuee Card Battery error"},
+	{0x0444A000, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, failed device altered"},
+	{0x0444A200, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, data check after reassignment"},
+	{0x0444A300, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, LRC error after reassignment"},
+	{0x044A0000, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, device bus error (msg/cmd phase)"},
+	{0x04670400, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, new device can't be used"},
+	{0x04678000, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, invalid multiadapter configuration"},
+	{0x04678100, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, incorrect connection between enclosures"},
+	{0x04678200, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, connections exceed IOA design limits"},
+	{0x04678300, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, incorrect multipath connection"},
+	{0x04679000, IOASC_LOG_LEVEL_MUST,
+	 "Hardware Error, command to LUN failed"},
+	{0x064C8000, IOASC_LOG_LEVEL_HARD,
+	 "Unit Attention, cache exists for missing/failed device"},
+	{0x06670100, IOASC_LOG_LEVEL_HARD,
+	 "Unit Attention, incompatible exposed mode device"},
+	{0x06670600, IOASC_LOG_LEVEL_HARD,
+	 "Unit Attention, attachment of logical unit failed"},
+	{0x06678000, IOASC_LOG_LEVEL_MUST,
+	 "Unit Attention, cables exceed connective design limit"},
+	{0x06678300, IOASC_LOG_LEVEL_MUST,
+	 "Unit Attention, incomplete multipath connection between" \
+	 "IOA and enclosure"},
+	{0x06678400, IOASC_LOG_LEVEL_MUST,
+	 "Unit Attention, incomplete multipath connection between" \
+	 "device and enclosure"},
+	{0x06678500, IOASC_LOG_LEVEL_MUST,
+	 "Unit Attention, incomplete multipath connection between" \
+	 "IOA and remote IOA"},
+	{0x06678600, IOASC_LOG_LEVEL_HARD,
+	 "Unit Attention, missing remote IOA"},
+	{0x06679100, IOASC_LOG_LEVEL_HARD,
+	 "Unit Attention, enclosure doesn't support required multipath" \
+	 "function"},
+	{0x06698200, IOASC_LOG_LEVEL_HARD,
+	 "Unit Attention, corrupt array parity detected on device"},
+	{0x066B0200, IOASC_LOG_LEVEL_MUST,
+	 "Unit Attention, array exposed"},
+	{0x066B8200, IOASC_LOG_LEVEL_HARD,
+	 "Unit Attention, exposed array is still protected"},
+	{0x066B9200, IOASC_LOG_LEVEL_MUST,
+	 "Unit Attention, Multipath redundancy level got worse"},
+	{0x07270000, IOASC_LOG_LEVEL_HARD,
+	 "Data Protect, device is read/write protected by IOA"},
+	{0x07278000, IOASC_LOG_LEVEL_HARD,
+	 "Data Protect, IOA doesn't support device attribute"},
+	{0x07278100, IOASC_LOG_LEVEL_HARD,
+	 "Data Protect, NVRAM mirroring prohibited"},
+	{0x07278400, IOASC_LOG_LEVEL_MUST,
+	 "Data Protect, array is short 2 or more devices"},
+	{0x07278600, IOASC_LOG_LEVEL_MUST,
+	 "Data Protect, exposed array is short a required device"},
+	{0x07278700, IOASC_LOG_LEVEL_MUST,
+	 "Data Protect, array members not at required addresses"},
+	{0x07278800, IOASC_LOG_LEVEL_MUST,
+	 "Data Protect, exposed mode device resource address conflict"},
+	{0x07278900, IOASC_LOG_LEVEL_MUST,
+	 "Data Protect, incorrect resource address of exposed mode device"},
+	{0x07278A00, IOASC_LOG_LEVEL_MUST,
+	 "Data Protect, Array is missing a device and parity is out of sync"},
+	{0x07278B00, IOASC_LOG_LEVEL_MUST,
+	 "Data Protect, maximum number of arrays already exist"},
+	{0x07278C00, IOASC_LOG_LEVEL_HARD,
+	 "Data Protect, cannot locate cache data for device"},
+	{0x07278D00, IOASC_LOG_LEVEL_HARD,
+	 "Data Protect, cache data exits for a changed device"},
+	{0x07279100, IOASC_LOG_LEVEL_MUST,
+	 "Data Protect, detection of a device requiring format"},
+	{0x07279200, IOASC_LOG_LEVEL_MUST,
+	 "Data Protect, IOA exceeds maximum number of devices"},
+	{0x07279600, IOASC_LOG_LEVEL_MUST,
+	 "Data Protect, missing array, volume set is not functional"},
+	{0x07279700, IOASC_LOG_LEVEL_MUST,
+	 "Data Protect, single device for a volume set"},
+	{0x07279800, IOASC_LOG_LEVEL_MUST,
+	 "Data Protect, missing multiple devices for a volume set"},
+	{0x07279900, IOASC_LOG_LEVEL_HARD,
+	 "Data Protect, maximum number of volument sets already exists"},
+	{0x07279A00, IOASC_LOG_LEVEL_MUST,
+	 "Data Protect, other volume set problem"},
+};
+
+/* macros to help in debugging */
+#define pmcraid_err(...)  \
+	printk(KERN_ERR "MaxRAID: "__VA_ARGS__)
+
+#define pmcraid_info(...) \
+	if (pmcraid_debug_log) \
+		printk(KERN_INFO "MaxRAID: "__VA_ARGS__)
+
+/* check if given command is a SCSI READ or SCSI WRITE command */
+#define SCSI_READ_CMD           0x1	/* any of SCSI READ commands */
+#define SCSI_WRITE_CMD          0x2	/* any of SCSI WRITE commands */
+#define SCSI_CMD_TYPE(opcode) \
+({  u8 op = opcode; u8 __type = 0;\
+	if (op == READ_6 || op == READ_10 || op == READ_12 || op == READ_16)\
+		__type = SCSI_READ_CMD;\
+	else if (op == WRITE_6 || op == WRITE_10 || op == WRITE_12 || \
+		 op == WRITE_16)\
+		__type = SCSI_WRITE_CMD;\
+	__type;\
+})
+
+#define IS_SCSI_READ_WRITE(opcode) \
+({	u8 __type = SCSI_CMD_TYPE(opcode); \
+	(__type == SCSI_READ_CMD || __type == SCSI_WRITE_CMD) ? 1 : 0;\
+})
+
+
+/*
+ * pmcraid_ioctl_header - definition of header structure that preceeds all the
+ * buffers given as ioctl arguements.
+ *
+ * .signature           : always ASCII string, "PMCRAID"
+ * .reserved            : not used
+ * .buffer_length       : length of the buffer following the header
+ */
+struct pmcraid_ioctl_header {
+	u8  signature[8];
+	u32 reserved;
+	u32 buffer_length;
+};
+
+#define PMCRAID_IOCTL_SIGNATURE      "PMCRAID"
+
+
+/*
+ * pmcraid_event_details - defines AEN details that apps can retrieve from LLD
+ *
+ * .rcb_ccn - complete RCB of CCN
+ * .rcb_ldn - complete RCB of CCN
+ */
+struct pmcraid_event_details {
+	struct pmcraid_hcam_ccn rcb_ccn;
+	struct pmcraid_hcam_ldn rcb_ldn;
+};
+
+/*
+ * pmcraid_driver_ioctl_buffer - structure passed as argument to most of the
+ * PMC driver handled ioctls.
+ */
+struct pmcraid_driver_ioctl_buffer {
+	struct pmcraid_ioctl_header ioctl_header;
+	struct pmcraid_event_details event_details;
+};
+
+/*
+ * pmcraid_passthrough_ioctl_buffer - structure given as argument to
+ * passthrough(or firmware handled) IOCTL commands. Note that ioarcb requires
+ * 32-byte alignment so, it is necessary to pack this structure to avoid any
+ * holes between ioctl_header and passthrough buffer
+ *
+ * .ioactl_header : ioctl header
+ * .ioarcb        : filled-up ioarcb buffer, driver always reads this buffer
+ * .ioasa         : buffer for ioasa, driver fills this with IOASA from firmware
+ * .request_buffer: The I/O buffer (flat), driver reads/writes to this based on
+ *                  the transfer directions passed in ioarcb.flags0. Contents
+ *                  of this buffer are valid only when ioarcb.data_transfer_len
+ *                  is not zero.
+ */
+struct pmcraid_passthrough_ioctl_buffer {
+	struct pmcraid_ioctl_header ioctl_header;
+	struct pmcraid_ioarcb ioarcb;
+	struct pmcraid_ioasa  ioasa;
+	u8  request_buffer[1];
+} __attribute__ ((packed));
+
+/*
+ * keys to differentiate between driver handled IOCTLs and passthrough
+ * IOCTLs passed to IOA. driver determines the ioctl type using macro
+ * _IOC_TYPE
+ */
+#define PMCRAID_DRIVER_IOCTL         'D'
+#define PMCRAID_PASSTHROUGH_IOCTL    'F'
+
+#define DRV_IOCTL(n, size) \
+    _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
+
+#define FMW_IOCTL(n, size) \
+    _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL,  (n), (size))
+
+/*
+ * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
+ * This is to facilitate applications avoiding un-necessary memory allocations.
+ * For example, most of driver handled ioctls do not require ioarcb, ioasa.
+ */
+#define _ARGSIZE(arg) (sizeof(struct pmcraid_ioctl_header) + sizeof(arg))
+
+/* Driver handled IOCTL command definitions */
+
+#define PMCRAID_IOCTL_RESET_ADAPTER          \
+	DRV_IOCTL(5, sizeof(struct pmcraid_ioctl_header))
+
+/* passthrough/firmware handled commands */
+#define PMCRAID_IOCTL_PASSTHROUGH_COMMAND         \
+	FMW_IOCTL(1, sizeof(struct pmcraid_passthrough_ioctl_buffer))
+
+#define PMCRAID_IOCTL_DOWNLOAD_MICROCODE     \
+	FMW_IOCTL(2, sizeof(struct pmcraid_passthrough_ioctl_buffer))
+
+
+#endif /* _PMCRAID_H */
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 0f87962..fbcb82a 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1670,7 +1670,7 @@
 
 	qla24xx_vport_disable(fc_vport, disable);
 
-	if (ql2xmultique_tag) {
+	if (ha->flags.cpu_affinity_enabled) {
 		req = ha->req_q_map[1];
 		goto vport_queue;
 	} else if (ql2xmaxqueues == 1 || !ha->npiv_info)
@@ -1736,6 +1736,11 @@
 
 	qla24xx_deallocate_vp_id(vha);
 
+	mutex_lock(&ha->vport_lock);
+	ha->cur_vport_count--;
+	clear_bit(vha->vp_idx, ha->vp_idx_map);
+	mutex_unlock(&ha->vport_lock);
+
 	if (vha->timer_active) {
 		qla2x00_vp_stop_timer(vha);
 		DEBUG15(printk ("scsi(%ld): timer for the vport[%d] = %p "
@@ -1743,7 +1748,7 @@
 		    vha->host_no, vha->vp_idx, vha));
         }
 
-	if (vha->req->id && !ql2xmultique_tag) {
+	if (vha->req->id && !ha->flags.cpu_affinity_enabled) {
 		if (qla25xx_delete_req_que(vha, vha->req) != QLA_SUCCESS)
 			qla_printk(KERN_WARNING, ha,
 				"Queue delete failed.\n");
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index 00aa48d..2150618 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -189,6 +189,7 @@
  */
 typedef struct srb {
 	struct fc_port *fcport;
+	uint32_t handle;
 
 	struct scsi_cmnd *cmd;		/* Linux SCSI command pkt */
 
@@ -196,6 +197,8 @@
 
 	uint32_t request_sense_length;
 	uint8_t *request_sense_ptr;
+
+	void *ctx;
 } srb_t;
 
 /*
@@ -204,6 +207,28 @@
 #define SRB_DMA_VALID		BIT_0	/* Command sent to ISP */
 
 /*
+ * SRB extensions.
+ */
+struct srb_ctx {
+#define SRB_LOGIN_CMD	1
+#define SRB_LOGOUT_CMD	2
+	uint16_t type;
+	struct timer_list timer;
+
+	void (*free)(srb_t *sp);
+	void (*timeout)(srb_t *sp);
+};
+
+struct srb_logio {
+	struct srb_ctx ctx;
+
+#define SRB_LOGIN_RETRIED	BIT_0
+#define SRB_LOGIN_COND_PLOGI	BIT_1
+#define SRB_LOGIN_SKIP_PRLI	BIT_2
+	uint16_t flags;
+};
+
+/*
  * ISP I/O Register Set structure definitions.
  */
 struct device_reg_2xxx {
@@ -1482,7 +1507,7 @@
 		uint8_t domain;
 		uint8_t area;
 		uint8_t al_pa;
-#elif __LITTLE_ENDIAN
+#elif defined(__LITTLE_ENDIAN)
 		uint8_t al_pa;
 		uint8_t area;
 		uint8_t domain;
@@ -1565,6 +1590,7 @@
 #define FCF_FABRIC_DEVICE	BIT_0
 #define FCF_LOGIN_NEEDED	BIT_1
 #define FCF_TAPE_PRESENT	BIT_2
+#define FCF_FCP2_DEVICE		BIT_3
 
 /* No loop ID flag. */
 #define FC_NO_LOOP_ID		0x1000
@@ -2093,6 +2119,10 @@
 enum qla_work_type {
 	QLA_EVT_AEN,
 	QLA_EVT_IDC_ACK,
+	QLA_EVT_ASYNC_LOGIN,
+	QLA_EVT_ASYNC_LOGIN_DONE,
+	QLA_EVT_ASYNC_LOGOUT,
+	QLA_EVT_ASYNC_LOGOUT_DONE,
 };
 
 
@@ -2111,6 +2141,11 @@
 #define QLA_IDC_ACK_REGS	7
 			uint16_t mb[QLA_IDC_ACK_REGS];
 		} idc_ack;
+		struct {
+			struct fc_port *fcport;
+#define QLA_LOGIO_LOGIN_RETRIED	BIT_0
+			u16 data[2];
+		} logio;
 	} u;
 };
 
@@ -2224,6 +2259,7 @@
 		uint32_t	chip_reset_done		:1;
 		uint32_t	port0			:1;
 		uint32_t	running_gold_fw		:1;
+		uint32_t	cpu_affinity_enabled	:1;
 	} flags;
 
 	/* This spinlock is used to protect "io transactions", you must
@@ -2350,6 +2386,7 @@
 				(ha)->flags.msix_enabled)
 #define IS_FAC_REQUIRED(ha)	(IS_QLA81XX(ha))
 #define IS_NOCACHE_VPD_TYPE(ha)	(IS_QLA81XX(ha))
+#define IS_ALOGIO_CAPABLE(ha)	(IS_QLA23XX(ha) || IS_FWI2_CAPABLE(ha))
 
 #define IS_IIDMA_CAPABLE(ha)    ((ha)->device_type & DT_IIDMA)
 #define IS_FWI2_CAPABLE(ha)     ((ha)->device_type & DT_FWI2)
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index dfde2dd..66a8da5 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -1126,7 +1126,7 @@
 	uint16_t id;
 	uint16_t reserved_4;
 	uint16_t hopct;
-	uint8_t reserved_5;
+	uint8_t reserved_5[2];
 };
 
 #define VP_RPT_ID_IOCB_TYPE	0x32	/* Report ID Acquisition entry. */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 65b12d8..f3d1d1a 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -52,6 +52,14 @@
 
 extern void qla84xx_put_chip(struct scsi_qla_host *);
 
+extern int qla2x00_async_login(struct scsi_qla_host *, fc_port_t *,
+    uint16_t *);
+extern int qla2x00_async_logout(struct scsi_qla_host *, fc_port_t *);
+extern int qla2x00_async_login_done(struct scsi_qla_host *, fc_port_t *,
+    uint16_t *);
+extern int qla2x00_async_logout_done(struct scsi_qla_host *, fc_port_t *,
+    uint16_t *);
+
 /*
  * Global Data in qla_os.c source file.
  */
@@ -76,6 +84,15 @@
 extern int qla2x00_post_aen_work(struct scsi_qla_host *, enum
     fc_host_event_code, u32);
 extern int qla2x00_post_idc_ack_work(struct scsi_qla_host *, uint16_t *);
+extern int qla2x00_post_async_login_work(struct scsi_qla_host *, fc_port_t *,
+    uint16_t *);
+extern int qla2x00_post_async_login_done_work(struct scsi_qla_host *,
+    fc_port_t *, uint16_t *);
+extern int qla2x00_post_async_logout_work(struct scsi_qla_host *, fc_port_t *,
+    uint16_t *);
+extern int qla2x00_post_async_logout_done_work(struct scsi_qla_host *,
+    fc_port_t *, uint16_t *);
+
 extern int qla81xx_restart_mpi_firmware(scsi_qla_host_t *);
 
 extern void qla2x00_abort_fcport_cmds(fc_port_t *);
@@ -83,6 +100,8 @@
 	struct qla_hw_data *);
 extern void qla2x00_free_host(struct scsi_qla_host *);
 extern void qla2x00_relogin(struct scsi_qla_host *);
+extern void qla2x00_do_work(struct scsi_qla_host *);
+
 /*
  * Global Functions in qla_mid.c source file.
  */
@@ -135,6 +154,7 @@
 						uint16_t, uint16_t, uint8_t);
 int __qla2x00_marker(struct scsi_qla_host *, struct req_que *, struct rsp_que *,
 						uint16_t, uint16_t, uint8_t);
+extern int qla2x00_start_sp(srb_t *);
 
 /*
  * Global Function Prototypes in qla_mbx.c source file.
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 917534b..4647015 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -1674,6 +1674,10 @@
 qla2x00_fdmi_register(scsi_qla_host_t *vha)
 {
 	int rval;
+       struct qla_hw_data *ha = vha->hw;
+
+	if (IS_QLA2100(ha) || IS_QLA2200(ha))
+		return QLA_FUNCTION_FAILED;
 
 	rval = qla2x00_mgmt_svr_login(vha);
 	if (rval)
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index f2ce8e3..9e3eaac 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -40,6 +40,210 @@
 static int qla84xx_init_chip(scsi_qla_host_t *);
 static int qla25xx_init_queues(struct qla_hw_data *);
 
+/* SRB Extensions ---------------------------------------------------------- */
+
+static void
+qla2x00_ctx_sp_timeout(unsigned long __data)
+{
+	srb_t *sp = (srb_t *)__data;
+	struct srb_ctx *ctx;
+	fc_port_t *fcport = sp->fcport;
+	struct qla_hw_data *ha = fcport->vha->hw;
+	struct req_que *req;
+	unsigned long flags;
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	req = ha->req_q_map[0];
+	req->outstanding_cmds[sp->handle] = NULL;
+	ctx = sp->ctx;
+	ctx->timeout(sp);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+
+	ctx->free(sp);
+}
+
+static void
+qla2x00_ctx_sp_free(srb_t *sp)
+{
+	struct srb_ctx *ctx = sp->ctx;
+
+	kfree(ctx);
+	mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
+}
+
+inline srb_t *
+qla2x00_get_ctx_sp(scsi_qla_host_t *vha, fc_port_t *fcport, size_t size,
+    unsigned long tmo)
+{
+	srb_t *sp;
+	struct qla_hw_data *ha = vha->hw;
+	struct srb_ctx *ctx;
+
+	sp = mempool_alloc(ha->srb_mempool, GFP_KERNEL);
+	if (!sp)
+		goto done;
+	ctx = kzalloc(size, GFP_KERNEL);
+	if (!ctx) {
+		mempool_free(sp, ha->srb_mempool);
+		goto done;
+	}
+
+	memset(sp, 0, sizeof(*sp));
+	sp->fcport = fcport;
+	sp->ctx = ctx;
+	ctx->free = qla2x00_ctx_sp_free;
+
+	init_timer(&ctx->timer);
+	if (!tmo)
+		goto done;
+	ctx->timer.expires = jiffies + tmo * HZ;
+	ctx->timer.data = (unsigned long)sp;
+	ctx->timer.function = qla2x00_ctx_sp_timeout;
+	add_timer(&ctx->timer);
+done:
+	return sp;
+}
+
+/* Asynchronous Login/Logout Routines -------------------------------------- */
+
+#define ELS_TMO_2_RATOV(ha) ((ha)->r_a_tov / 10 * 2)
+
+static void
+qla2x00_async_logio_timeout(srb_t *sp)
+{
+	fc_port_t *fcport = sp->fcport;
+	struct srb_logio *lio = sp->ctx;
+
+	DEBUG2(printk(KERN_WARNING
+	    "scsi(%ld:%x): Async-%s timeout.\n",
+	    fcport->vha->host_no, sp->handle,
+	    lio->ctx.type == SRB_LOGIN_CMD ? "login": "logout"));
+
+	if (lio->ctx.type == SRB_LOGIN_CMD)
+		qla2x00_post_async_logout_work(fcport->vha, fcport, NULL);
+}
+
+int
+qla2x00_async_login(struct scsi_qla_host *vha, fc_port_t *fcport,
+    uint16_t *data)
+{
+	struct qla_hw_data *ha = vha->hw;
+	srb_t *sp;
+	struct srb_logio *lio;
+	int rval;
+
+	rval = QLA_FUNCTION_FAILED;
+	sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_logio),
+	    ELS_TMO_2_RATOV(ha) + 2);
+	if (!sp)
+		goto done;
+
+	lio = sp->ctx;
+	lio->ctx.type = SRB_LOGIN_CMD;
+	lio->ctx.timeout = qla2x00_async_logio_timeout;
+	lio->flags |= SRB_LOGIN_COND_PLOGI;
+	if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+		lio->flags |= SRB_LOGIN_RETRIED;
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	DEBUG2(printk(KERN_DEBUG
+	    "scsi(%ld:%x): Async-login - loop-id=%x portid=%02x%02x%02x "
+	    "retries=%d.\n", fcport->vha->host_no, sp->handle, fcport->loop_id,
+	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa,
+	    fcport->login_retry));
+	return rval;
+
+done_free_sp:
+	del_timer_sync(&lio->ctx.timer);
+	lio->ctx.free(sp);
+done:
+	return rval;
+}
+
+int
+qla2x00_async_logout(struct scsi_qla_host *vha, fc_port_t *fcport)
+{
+	struct qla_hw_data *ha = vha->hw;
+	srb_t *sp;
+	struct srb_logio *lio;
+	int rval;
+
+	rval = QLA_FUNCTION_FAILED;
+	sp = qla2x00_get_ctx_sp(vha, fcport, sizeof(struct srb_logio),
+	    ELS_TMO_2_RATOV(ha) + 2);
+	if (!sp)
+		goto done;
+
+	lio = sp->ctx;
+	lio->ctx.type = SRB_LOGOUT_CMD;
+	lio->ctx.timeout = qla2x00_async_logio_timeout;
+	rval = qla2x00_start_sp(sp);
+	if (rval != QLA_SUCCESS)
+		goto done_free_sp;
+
+	DEBUG2(printk(KERN_DEBUG
+	    "scsi(%ld:%x): Async-logout - loop-id=%x portid=%02x%02x%02x.\n",
+	    fcport->vha->host_no, sp->handle, fcport->loop_id,
+	    fcport->d_id.b.domain, fcport->d_id.b.area, fcport->d_id.b.al_pa));
+	return rval;
+
+done_free_sp:
+	del_timer_sync(&lio->ctx.timer);
+	lio->ctx.free(sp);
+done:
+	return rval;
+}
+
+int
+qla2x00_async_login_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+    uint16_t *data)
+{
+	int rval;
+	uint8_t opts = 0;
+
+	switch (data[0]) {
+	case MBS_COMMAND_COMPLETE:
+		if (fcport->flags & FCF_TAPE_PRESENT)
+			opts |= BIT_1;
+		rval = qla2x00_get_port_database(vha, fcport, opts);
+		if (rval != QLA_SUCCESS)
+			qla2x00_mark_device_lost(vha, fcport, 1, 0);
+		else
+			qla2x00_update_fcport(vha, fcport);
+		break;
+	case MBS_COMMAND_ERROR:
+		if (data[1] & QLA_LOGIO_LOGIN_RETRIED)
+			set_bit(RELOGIN_NEEDED, &vha->dpc_flags);
+		else
+			qla2x00_mark_device_lost(vha, fcport, 1, 0);
+		break;
+	case MBS_PORT_ID_USED:
+		fcport->loop_id = data[1];
+		qla2x00_post_async_login_work(vha, fcport, NULL);
+		break;
+	case MBS_LOOP_ID_USED:
+		fcport->loop_id++;
+		rval = qla2x00_find_new_loop_id(vha, fcport);
+		if (rval != QLA_SUCCESS) {
+			qla2x00_mark_device_lost(vha, fcport, 1, 0);
+			break;
+		}
+		qla2x00_post_async_login_work(vha, fcport, NULL);
+		break;
+	}
+	return QLA_SUCCESS;
+}
+
+int
+qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
+    uint16_t *data)
+{
+	qla2x00_mark_device_lost(vha, fcport, 1, 0);
+	return QLA_SUCCESS;
+}
+
 /****************************************************************************/
 /*                QLogic ISP2x00 Hardware Support Functions.                */
 /****************************************************************************/
@@ -987,7 +1191,6 @@
 				    ha->phy_version);
 				if (rval != QLA_SUCCESS)
 					goto failed;
-
 				ha->flags.npiv_supported = 0;
 				if (IS_QLA2XXX_MIDTYPE(ha) &&
 					 (ha->fw_attributes & BIT_2)) {
@@ -1591,7 +1794,8 @@
 	char *st, *en;
 	uint16_t index;
 	struct qla_hw_data *ha = vha->hw;
-	int use_tbl = !IS_QLA25XX(ha) && !IS_QLA81XX(ha);
+	int use_tbl = !IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha) &&
+	    !IS_QLA81XX(ha);
 
 	if (memcmp(model, BINZERO, len) != 0) {
 		strncpy(ha->model_number, model, len);
@@ -1978,7 +2182,7 @@
 	struct fc_rport *rport;
 
 	spin_lock_irq(fcport->vha->host->host_lock);
-	rport = fcport->drport;
+	rport = fcport->drport ? fcport->drport: fcport->rport;
 	fcport->drport = NULL;
 	spin_unlock_irq(fcport->vha->host->host_lock);
 	if (rport)
@@ -2345,8 +2549,7 @@
 	struct fc_rport *rport;
 	struct qla_hw_data *ha = vha->hw;
 
-	if (fcport->drport)
-		qla2x00_rport_del(fcport);
+	qla2x00_rport_del(fcport);
 
 	rport_ids.node_name = wwn_to_u64(fcport->node_name);
 	rport_ids.port_name = wwn_to_u64(fcport->port_name);
@@ -3039,6 +3242,12 @@
 	rval = QLA_SUCCESS;
 	retry = 0;
 
+	if (IS_ALOGIO_CAPABLE(ha)) {
+		rval = qla2x00_post_async_login_work(vha, fcport, NULL);
+		if (!rval)
+			return rval;
+	}
+
 	rval = qla2x00_fabric_login(vha, fcport, next_loopid);
 	if (rval == QLA_SUCCESS) {
 		/* Send an ADISC to tape devices.*/
@@ -3133,7 +3342,7 @@
 			} else {
 				fcport->port_type = FCT_TARGET;
 				if (mb[1] & BIT_1) {
-					fcport->flags |= FCF_TAPE_PRESENT;
+					fcport->flags |= FCF_FCP2_DEVICE;
 				}
 			}
 
@@ -3244,7 +3453,7 @@
 	struct req_que *req;
 	struct rsp_que *rsp;
 
-	if (ql2xmultique_tag)
+	if (vha->hw->flags.cpu_affinity_enabled)
 		req = vha->hw->req_q_map[0];
 	else
 		req = vha->req;
@@ -3286,15 +3495,17 @@
 }
 
 void
-qla2x00_update_fcports(scsi_qla_host_t *vha)
+qla2x00_update_fcports(scsi_qla_host_t *base_vha)
 {
 	fc_port_t *fcport;
+	struct scsi_qla_host *tvp, *vha;
 
 	/* Go with deferred removal of rport references. */
-	list_for_each_entry(fcport, &vha->vp_fcports, list)
-		if (fcport && fcport->drport &&
-		    atomic_read(&fcport->state) != FCS_UNCONFIGURED)
-			qla2x00_rport_del(fcport);
+	list_for_each_entry_safe(vha, tvp, &base_vha->hw->vp_list, list)
+		list_for_each_entry(fcport, &vha->vp_fcports, list)
+			if (fcport && fcport->drport &&
+			    atomic_read(&fcport->state) != FCS_UNCONFIGURED)
+				qla2x00_rport_del(fcport);
 }
 
 /*
@@ -3331,8 +3542,6 @@
 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
 			atomic_set(&vha->loop_state, LOOP_DOWN);
 			qla2x00_mark_all_devices_lost(vha, 0);
-			list_for_each_entry_safe(vp, tvp, &ha->vp_list, list)
-			       qla2x00_mark_all_devices_lost(vp, 0);
 		} else {
 			if (!atomic_read(&vha->loop_down_timer))
 				atomic_set(&vha->loop_down_timer,
@@ -4264,7 +4473,7 @@
 		return -EINVAL;
 
 	rval = qla2x00_fw_ready(base_vha);
-	if (ql2xmultique_tag)
+	if (ha->flags.cpu_affinity_enabled)
 		req = ha->req_q_map[0];
 	else
 		req = vha->req;
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 13396be..c5ccac0 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -350,6 +350,7 @@
 	/* Build command packet */
 	req->current_outstanding_cmd = handle;
 	req->outstanding_cmds[handle] = sp;
+	sp->handle = handle;
 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
 	req->cnt -= req_cnt;
 
@@ -778,6 +779,7 @@
 	/* Build command packet. */
 	req->current_outstanding_cmd = handle;
 	req->outstanding_cmds[handle] = sp;
+	sp->handle = handle;
 	sp->cmd->host_scribble = (unsigned char *)(unsigned long)handle;
 	req->cnt -= req_cnt;
 
@@ -852,9 +854,211 @@
 	struct qla_hw_data *ha = sp->fcport->vha->hw;
 	int affinity = cmd->request->cpu;
 
-	if (ql2xmultique_tag && affinity >= 0 &&
+	if (ha->flags.cpu_affinity_enabled && affinity >= 0 &&
 		affinity < ha->max_rsp_queues - 1)
 		*rsp = ha->rsp_q_map[affinity + 1];
 	 else
 		*rsp = ha->rsp_q_map[0];
 }
+
+/* Generic Control-SRB manipulation functions. */
+
+static void *
+qla2x00_alloc_iocbs(srb_t *sp)
+{
+	scsi_qla_host_t	*vha = sp->fcport->vha;
+	struct qla_hw_data *ha = vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
+	uint32_t index, handle;
+	request_t *pkt;
+	uint16_t cnt, req_cnt;
+
+	pkt = NULL;
+	req_cnt = 1;
+
+	/* Check for room in outstanding command list. */
+	handle = req->current_outstanding_cmd;
+	for (index = 1; index < MAX_OUTSTANDING_COMMANDS; index++) {
+		handle++;
+		if (handle == MAX_OUTSTANDING_COMMANDS)
+			handle = 1;
+		if (!req->outstanding_cmds[handle])
+			break;
+	}
+	if (index == MAX_OUTSTANDING_COMMANDS)
+		goto queuing_error;
+
+	/* Check for room on request queue. */
+	if (req->cnt < req_cnt) {
+		if (ha->mqenable)
+			cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
+		else if (IS_FWI2_CAPABLE(ha))
+			cnt = RD_REG_DWORD(&reg->isp24.req_q_out);
+		else
+			cnt = qla2x00_debounce_register(
+			    ISP_REQ_Q_OUT(ha, &reg->isp));
+
+		if  (req->ring_index < cnt)
+			req->cnt = cnt - req->ring_index;
+		else
+			req->cnt = req->length -
+			    (req->ring_index - cnt);
+	}
+	if (req->cnt < req_cnt)
+		goto queuing_error;
+
+	/* Prep packet */
+	req->current_outstanding_cmd = handle;
+	req->outstanding_cmds[handle] = sp;
+	req->cnt -= req_cnt;
+
+	pkt = req->ring_ptr;
+	memset(pkt, 0, REQUEST_ENTRY_SIZE);
+	pkt->entry_count = req_cnt;
+	pkt->handle = handle;
+	sp->handle = handle;
+
+queuing_error:
+	return pkt;
+}
+
+static void
+qla2x00_start_iocbs(srb_t *sp)
+{
+	struct qla_hw_data *ha = sp->fcport->vha->hw;
+	struct req_que *req = ha->req_q_map[0];
+	device_reg_t __iomem *reg = ISP_QUE_REG(ha, req->id);
+	struct device_reg_2xxx __iomem *ioreg = &ha->iobase->isp;
+
+	/* Adjust ring index. */
+	req->ring_index++;
+	if (req->ring_index == req->length) {
+		req->ring_index = 0;
+		req->ring_ptr = req->ring;
+	} else
+		req->ring_ptr++;
+
+	/* Set chip new ring index. */
+	if (ha->mqenable) {
+		WRT_REG_DWORD(&reg->isp25mq.req_q_in, req->ring_index);
+		RD_REG_DWORD(&ioreg->hccr);
+	} else if (IS_FWI2_CAPABLE(ha)) {
+		WRT_REG_DWORD(&reg->isp24.req_q_in, req->ring_index);
+		RD_REG_DWORD_RELAXED(&reg->isp24.req_q_in);
+	} else {
+		WRT_REG_WORD(ISP_REQ_Q_IN(ha, &reg->isp), req->ring_index);
+		RD_REG_WORD_RELAXED(ISP_REQ_Q_IN(ha, &reg->isp));
+	}
+}
+
+static void
+qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+	struct srb_logio *lio = sp->ctx;
+
+	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+	logio->control_flags = cpu_to_le16(LCF_COMMAND_PLOGI);
+	if (lio->flags & SRB_LOGIN_COND_PLOGI)
+		logio->control_flags |= cpu_to_le16(LCF_COND_PLOGI);
+	if (lio->flags & SRB_LOGIN_SKIP_PRLI)
+		logio->control_flags |= cpu_to_le16(LCF_SKIP_PRLI);
+	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
+	logio->port_id[1] = sp->fcport->d_id.b.area;
+	logio->port_id[2] = sp->fcport->d_id.b.domain;
+	logio->vp_index = sp->fcport->vp_idx;
+}
+
+static void
+qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
+{
+	struct qla_hw_data *ha = sp->fcport->vha->hw;
+	struct srb_logio *lio = sp->ctx;
+	uint16_t opts;
+
+	mbx->entry_type = MBX_IOCB_TYPE;;
+	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
+	mbx->mb0 = cpu_to_le16(MBC_LOGIN_FABRIC_PORT);
+	opts = lio->flags & SRB_LOGIN_COND_PLOGI ? BIT_0: 0;
+	opts |= lio->flags & SRB_LOGIN_SKIP_PRLI ? BIT_1: 0;
+	if (HAS_EXTENDED_IDS(ha)) {
+		mbx->mb1 = cpu_to_le16(sp->fcport->loop_id);
+		mbx->mb10 = cpu_to_le16(opts);
+	} else {
+		mbx->mb1 = cpu_to_le16((sp->fcport->loop_id << 8) | opts);
+	}
+	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
+	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
+	    sp->fcport->d_id.b.al_pa);
+	mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+}
+
+static void
+qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
+{
+	logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
+	logio->control_flags =
+	    cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
+	logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
+	logio->port_id[0] = sp->fcport->d_id.b.al_pa;
+	logio->port_id[1] = sp->fcport->d_id.b.area;
+	logio->port_id[2] = sp->fcport->d_id.b.domain;
+	logio->vp_index = sp->fcport->vp_idx;
+}
+
+static void
+qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
+{
+	struct qla_hw_data *ha = sp->fcport->vha->hw;
+
+	mbx->entry_type = MBX_IOCB_TYPE;;
+	SET_TARGET_ID(ha, mbx->loop_id, sp->fcport->loop_id);
+	mbx->mb0 = cpu_to_le16(MBC_LOGOUT_FABRIC_PORT);
+	mbx->mb1 = HAS_EXTENDED_IDS(ha) ?
+	    cpu_to_le16(sp->fcport->loop_id):
+	    cpu_to_le16(sp->fcport->loop_id << 8);
+	mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
+	mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
+	    sp->fcport->d_id.b.al_pa);
+	mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx);
+	/* Implicit: mbx->mbx10 = 0. */
+}
+
+int
+qla2x00_start_sp(srb_t *sp)
+{
+	int rval;
+	struct qla_hw_data *ha = sp->fcport->vha->hw;
+	void *pkt;
+	struct srb_ctx *ctx = sp->ctx;
+	unsigned long flags;
+
+	rval = QLA_FUNCTION_FAILED;
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	pkt = qla2x00_alloc_iocbs(sp);
+	if (!pkt)
+		goto done;
+
+	rval = QLA_SUCCESS;
+	switch (ctx->type) {
+	case SRB_LOGIN_CMD:
+		IS_FWI2_CAPABLE(ha) ?
+		    qla24xx_login_iocb(sp, pkt):
+		    qla2x00_login_iocb(sp, pkt);
+		break;
+	case SRB_LOGOUT_CMD:
+		IS_FWI2_CAPABLE(ha) ?
+		    qla24xx_logout_iocb(sp, pkt):
+		    qla2x00_logout_iocb(sp, pkt);
+		break;
+	default:
+		break;
+	}
+
+	wmb();
+	qla2x00_start_iocbs(sp);
+done:
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	return rval;
+}
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 245e7af..b20a716 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -598,9 +598,54 @@
 		break;
 
 	case MBA_PORT_UPDATE:		/* Port database update */
-		/* Only handle SCNs for our Vport index. */
-		if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
+		/*
+		 * Handle only global and vn-port update events
+		 *
+		 * Relevant inputs:
+		 * mb[1] = N_Port handle of changed port
+		 * OR 0xffff for global event
+		 * mb[2] = New login state
+		 * 7 = Port logged out
+		 * mb[3] = LSB is vp_idx, 0xff = all vps
+		 *
+		 * Skip processing if:
+		 *       Event is global, vp_idx is NOT all vps,
+		 *           vp_idx does not match
+		 *       Event is not global, vp_idx does not match
+		 */
+		if ((mb[1] == 0xffff && (mb[3] & 0xff) != 0xff)
+			|| (mb[1] != 0xffff)) {
+			if (vha->vp_idx != (mb[3] & 0xff))
+				break;
+		}
+
+		/* Global event -- port logout or port unavailable. */
+		if (mb[1] == 0xffff && mb[2] == 0x7) {
+			DEBUG2(printk("scsi(%ld): Asynchronous PORT UPDATE.\n",
+			    vha->host_no));
+			DEBUG(printk(KERN_INFO
+			    "scsi(%ld): Port unavailable %04x %04x %04x.\n",
+			    vha->host_no, mb[1], mb[2], mb[3]));
+
+			if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
+				atomic_set(&vha->loop_state, LOOP_DOWN);
+				atomic_set(&vha->loop_down_timer,
+				    LOOP_DOWN_TIME);
+				vha->device_flags |= DFLG_NO_CABLE;
+				qla2x00_mark_all_devices_lost(vha, 1);
+			}
+
+			if (vha->vp_idx) {
+				atomic_set(&vha->vp_state, VP_FAILED);
+				fc_vport_set_state(vha->fc_vport,
+				    FC_VPORT_FAILED);
+				qla2x00_mark_all_devices_lost(vha, 1);
+			}
+
+			vha->flags.management_server_logged_in = 0;
+			ha->link_data_rate = PORT_SPEED_UNKNOWN;
 			break;
+		}
 
 		/*
 		 * If PORT UPDATE is global (received LIP_OCCURRED/LIP_RESET
@@ -640,8 +685,9 @@
 		if (vha->vp_idx && test_bit(VP_SCR_NEEDED, &vha->vp_flags))
 			break;
 		/* Only handle SCNs for our Vport index. */
-		if (vha->vp_idx && vha->vp_idx != (mb[3] & 0xff))
+		if (ha->flags.npiv_supported && vha->vp_idx != (mb[3] & 0xff))
 			break;
+
 		DEBUG2(printk("scsi(%ld): Asynchronous RSCR UPDATE.\n",
 		    vha->host_no));
 		DEBUG(printk(KERN_INFO
@@ -874,6 +920,249 @@
 	}
 }
 
+static srb_t *
+qla2x00_get_sp_from_handle(scsi_qla_host_t *vha, const char *func,
+    struct req_que *req, void *iocb)
+{
+	struct qla_hw_data *ha = vha->hw;
+	sts_entry_t *pkt = iocb;
+	srb_t *sp = NULL;
+	uint16_t index;
+
+	index = LSW(pkt->handle);
+	if (index >= MAX_OUTSTANDING_COMMANDS) {
+		qla_printk(KERN_WARNING, ha,
+		    "%s: Invalid completion handle (%x).\n", func, index);
+		set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		goto done;
+	}
+	sp = req->outstanding_cmds[index];
+	if (!sp) {
+		qla_printk(KERN_WARNING, ha,
+		    "%s: Invalid completion handle (%x) -- timed-out.\n", func,
+		    index);
+		return sp;
+	}
+	if (sp->handle != index) {
+		qla_printk(KERN_WARNING, ha,
+		    "%s: SRB handle (%x) mismatch %x.\n", func, sp->handle,
+		    index);
+		return NULL;
+	}
+	req->outstanding_cmds[index] = NULL;
+done:
+	return sp;
+}
+
+static void
+qla2x00_mbx_iocb_entry(scsi_qla_host_t *vha, struct req_que *req,
+    struct mbx_entry *mbx)
+{
+	const char func[] = "MBX-IOCB";
+	const char *type;
+	struct qla_hw_data *ha = vha->hw;
+	fc_port_t *fcport;
+	srb_t *sp;
+	struct srb_logio *lio;
+	uint16_t data[2];
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, mbx);
+	if (!sp)
+		return;
+
+	type = NULL;
+	lio = sp->ctx;
+	switch (lio->ctx.type) {
+	case SRB_LOGIN_CMD:
+		type = "login";
+		break;
+	case SRB_LOGOUT_CMD:
+		type = "logout";
+		break;
+	default:
+		qla_printk(KERN_WARNING, ha,
+		    "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
+		    lio->ctx.type);
+		return;
+	}
+
+	del_timer(&lio->ctx.timer);
+	fcport = sp->fcport;
+
+	data[0] = data[1] = 0;
+	if (mbx->entry_status) {
+		DEBUG2(printk(KERN_WARNING
+		    "scsi(%ld:%x): Async-%s error entry - entry-status=%x "
+		    "status=%x state-flag=%x status-flags=%x.\n",
+		    fcport->vha->host_no, sp->handle, type,
+		    mbx->entry_status, le16_to_cpu(mbx->status),
+		    le16_to_cpu(mbx->state_flags),
+		    le16_to_cpu(mbx->status_flags)));
+		DEBUG2(qla2x00_dump_buffer((uint8_t *)mbx, sizeof(*mbx)));
+
+		data[0] = MBS_COMMAND_ERROR;
+		data[1] = lio->flags & SRB_LOGIN_RETRIED ?
+		    QLA_LOGIO_LOGIN_RETRIED: 0;
+		goto done_post_logio_done_work;
+	}
+
+	if (!mbx->status && le16_to_cpu(mbx->mb0) == MBS_COMMAND_COMPLETE) {
+		DEBUG2(printk(KERN_DEBUG
+		    "scsi(%ld:%x): Async-%s complete - mbx1=%x.\n",
+		    fcport->vha->host_no, sp->handle, type,
+		    le16_to_cpu(mbx->mb1)));
+
+		data[0] = MBS_COMMAND_COMPLETE;
+		if (lio->ctx.type == SRB_LOGIN_CMD && le16_to_cpu(mbx->mb1) & BIT_1)
+			fcport->flags |= FCF_FCP2_DEVICE;
+
+		goto done_post_logio_done_work;
+	}
+
+	data[0] = le16_to_cpu(mbx->mb0);
+	switch (data[0]) {
+	case MBS_PORT_ID_USED:
+		data[1] = le16_to_cpu(mbx->mb1);
+		break;
+	case MBS_LOOP_ID_USED:
+		break;
+	default:
+		data[0] = MBS_COMMAND_ERROR;
+		data[1] = lio->flags & SRB_LOGIN_RETRIED ?
+		    QLA_LOGIO_LOGIN_RETRIED: 0;
+		break;
+	}
+
+	DEBUG2(printk(KERN_WARNING
+	    "scsi(%ld:%x): Async-%s failed - status=%x mb0=%x mb1=%x mb2=%x "
+	    "mb6=%x mb7=%x.\n",
+	    fcport->vha->host_no, sp->handle, type, le16_to_cpu(mbx->status),
+	    le16_to_cpu(mbx->mb0), le16_to_cpu(mbx->mb1),
+	    le16_to_cpu(mbx->mb2), le16_to_cpu(mbx->mb6),
+	    le16_to_cpu(mbx->mb7)));
+
+done_post_logio_done_work:
+	lio->ctx.type == SRB_LOGIN_CMD ?
+	    qla2x00_post_async_login_done_work(fcport->vha, fcport, data):
+	    qla2x00_post_async_logout_done_work(fcport->vha, fcport, data);
+
+	lio->ctx.free(sp);
+}
+
+static void
+qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
+    struct logio_entry_24xx *logio)
+{
+	const char func[] = "LOGIO-IOCB";
+	const char *type;
+	struct qla_hw_data *ha = vha->hw;
+	fc_port_t *fcport;
+	srb_t *sp;
+	struct srb_logio *lio;
+	uint16_t data[2];
+	uint32_t iop[2];
+
+	sp = qla2x00_get_sp_from_handle(vha, func, req, logio);
+	if (!sp)
+		return;
+
+	type = NULL;
+	lio = sp->ctx;
+	switch (lio->ctx.type) {
+	case SRB_LOGIN_CMD:
+		type = "login";
+		break;
+	case SRB_LOGOUT_CMD:
+		type = "logout";
+		break;
+	default:
+		qla_printk(KERN_WARNING, ha,
+		    "%s: Unrecognized SRB: (%p) type=%d.\n", func, sp,
+		    lio->ctx.type);
+		return;
+	}
+
+	del_timer(&lio->ctx.timer);
+	fcport = sp->fcport;
+
+	data[0] = data[1] = 0;
+	if (logio->entry_status) {
+		DEBUG2(printk(KERN_WARNING
+		    "scsi(%ld:%x): Async-%s error entry - entry-status=%x.\n",
+		    fcport->vha->host_no, sp->handle, type,
+		    logio->entry_status));
+		DEBUG2(qla2x00_dump_buffer((uint8_t *)logio, sizeof(*logio)));
+
+		data[0] = MBS_COMMAND_ERROR;
+		data[1] = lio->flags & SRB_LOGIN_RETRIED ?
+		    QLA_LOGIO_LOGIN_RETRIED: 0;
+		goto done_post_logio_done_work;
+	}
+
+	if (le16_to_cpu(logio->comp_status) == CS_COMPLETE) {
+		DEBUG2(printk(KERN_DEBUG
+		    "scsi(%ld:%x): Async-%s complete - iop0=%x.\n",
+		    fcport->vha->host_no, sp->handle, type,
+		    le32_to_cpu(logio->io_parameter[0])));
+
+		data[0] = MBS_COMMAND_COMPLETE;
+		if (lio->ctx.type == SRB_LOGOUT_CMD)
+			goto done_post_logio_done_work;
+
+		iop[0] = le32_to_cpu(logio->io_parameter[0]);
+		if (iop[0] & BIT_4) {
+			fcport->port_type = FCT_TARGET;
+			if (iop[0] & BIT_8)
+				fcport->flags |= FCF_FCP2_DEVICE;
+		}
+		if (iop[0] & BIT_5)
+			fcport->port_type = FCT_INITIATOR;
+		if (logio->io_parameter[7] || logio->io_parameter[8])
+			fcport->supported_classes |= FC_COS_CLASS2;
+		if (logio->io_parameter[9] || logio->io_parameter[10])
+			fcport->supported_classes |= FC_COS_CLASS3;
+
+		goto done_post_logio_done_work;
+	}
+
+	iop[0] = le32_to_cpu(logio->io_parameter[0]);
+	iop[1] = le32_to_cpu(logio->io_parameter[1]);
+	switch (iop[0]) {
+	case LSC_SCODE_PORTID_USED:
+		data[0] = MBS_PORT_ID_USED;
+		data[1] = LSW(iop[1]);
+		break;
+	case LSC_SCODE_NPORT_USED:
+		data[0] = MBS_LOOP_ID_USED;
+		break;
+	case LSC_SCODE_CMD_FAILED:
+		if ((iop[1] & 0xff) == 0x05) {
+			data[0] = MBS_NOT_LOGGED_IN;
+			break;
+		}
+		/* Fall through. */
+	default:
+		data[0] = MBS_COMMAND_ERROR;
+		data[1] = lio->flags & SRB_LOGIN_RETRIED ?
+		    QLA_LOGIO_LOGIN_RETRIED: 0;
+		break;
+	}
+
+	DEBUG2(printk(KERN_WARNING
+	    "scsi(%ld:%x): Async-%s failed - comp=%x iop0=%x iop1=%x.\n",
+	    fcport->vha->host_no, sp->handle, type,
+	    le16_to_cpu(logio->comp_status),
+	    le32_to_cpu(logio->io_parameter[0]),
+	    le32_to_cpu(logio->io_parameter[1])));
+
+done_post_logio_done_work:
+	lio->ctx.type == SRB_LOGIN_CMD ?
+	    qla2x00_post_async_login_done_work(fcport->vha, fcport, data):
+	    qla2x00_post_async_logout_done_work(fcport->vha, fcport, data);
+
+	lio->ctx.free(sp);
+}
+
 /**
  * qla2x00_process_response_queue() - Process response queue entries.
  * @ha: SCSI driver HA context
@@ -935,6 +1224,9 @@
 		case STATUS_CONT_TYPE:
 			qla2x00_status_cont_entry(rsp, (sts_cont_entry_t *)pkt);
 			break;
+		case MBX_IOCB_TYPE:
+			qla2x00_mbx_iocb_entry(vha, rsp->req,
+			    (struct mbx_entry *)pkt);
 		default:
 			/* Type Not Supported. */
 			DEBUG4(printk(KERN_WARNING
@@ -1223,6 +1515,7 @@
 					cp->device->id, cp->device->lun, resid,
 					scsi_bufflen(cp)));
 
+				scsi_set_resid(cp, resid);
 				cp->result = DID_ERROR << 16;
 				break;
 			}
@@ -1544,6 +1837,10 @@
 			qla24xx_report_id_acquisition(vha,
 			    (struct vp_rpt_id_entry_24xx *)pkt);
 			break;
+		case LOGINOUT_PORT_IOCB_TYPE:
+			qla24xx_logio_entry(vha, rsp->req,
+			    (struct logio_entry_24xx *)pkt);
+			break;
 		default:
 			/* Type Not Supported. */
 			DEBUG4(printk(KERN_WARNING
@@ -1723,8 +2020,10 @@
 
 	vha = qla25xx_get_host(rsp);
 	qla24xx_process_response_queue(vha, rsp);
-	WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
-
+	if (!ha->mqenable) {
+		WRT_REG_DWORD(&reg->hccr, HCCRX_CLR_RISC_INT);
+		RD_REG_DWORD_RELAXED(&reg->hccr);
+	}
 	spin_unlock_irq(&ha->hardware_lock);
 
 	return IRQ_HANDLED;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index fe69f30..b6202fe 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -1507,7 +1507,7 @@
 
 	DEBUG11(printk("%s(%ld): entered.\n", __func__, vha->host_no));
 
-	if (ql2xmultique_tag)
+	if (ha->flags.cpu_affinity_enabled)
 		req = ha->req_q_map[0];
 	else
 		req = vha->req;
@@ -2324,7 +2324,7 @@
 	vha = fcport->vha;
 	ha = vha->hw;
 	req = vha->req;
-	if (ql2xmultique_tag)
+	if (ha->flags.cpu_affinity_enabled)
 		rsp = ha->rsp_q_map[tag + 1];
 	else
 		rsp = req->rsp;
@@ -2746,7 +2746,8 @@
 	if (rptid_entry->format == 0) {
 		DEBUG15(printk("%s:format 0 : scsi(%ld) number of VPs setup %d,"
 			" number of VPs acquired %d\n", __func__, vha->host_no,
-			MSB(rptid_entry->vp_count), LSB(rptid_entry->vp_count)));
+			MSB(le16_to_cpu(rptid_entry->vp_count)),
+			LSB(le16_to_cpu(rptid_entry->vp_count))));
 		DEBUG15(printk("%s primary port id %02x%02x%02x\n", __func__,
 			rptid_entry->port_id[2], rptid_entry->port_id[1],
 			rptid_entry->port_id[0]));
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index cd78c50..42b799a 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -42,7 +42,6 @@
 
 	set_bit(vp_id, ha->vp_idx_map);
 	ha->num_vhosts++;
-	ha->cur_vport_count++;
 	vha->vp_idx = vp_id;
 	list_add_tail(&vha->list, &ha->vp_list);
 	mutex_unlock(&ha->vport_lock);
@@ -58,7 +57,6 @@
 	mutex_lock(&ha->vport_lock);
 	vp_id = vha->vp_idx;
 	ha->num_vhosts--;
-	ha->cur_vport_count--;
 	clear_bit(vp_id, ha->vp_idx_map);
 	list_del(&vha->list);
 	mutex_unlock(&ha->vport_lock);
@@ -235,7 +233,11 @@
 			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
 	}
 
-	/* To exclusively reset vport, we need to log it out first.*/
+	/*
+	 * To exclusively reset vport, we need to log it out first.  Note: this
+	 * control_vp can fail if ISP reset is already issued, this is
+	 * expected, as the vp would be already logged out due to ISP reset.
+	 */
 	if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags))
 		qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL);
 
@@ -247,18 +249,11 @@
 static int
 qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
 {
-	struct qla_hw_data *ha = vha->hw;
-	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+	qla2x00_do_work(vha);
 
 	if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) {
 		/* VP acquired. complete port configuration */
-		if (atomic_read(&base_vha->loop_state) == LOOP_READY) {
-			qla24xx_configure_vp(vha);
-		} else {
-			set_bit(VP_IDX_ACQUIRED, &vha->vp_flags);
-			set_bit(VP_DPC_NEEDED, &base_vha->dpc_flags);
-		}
-
+		qla24xx_configure_vp(vha);
 		return 0;
 	}
 
@@ -309,6 +304,9 @@
 
 	clear_bit(VP_DPC_NEEDED, &vha->dpc_flags);
 
+	if (!(ha->current_topology & ISP_CFG_F))
+		return;
+
 	list_for_each_entry_safe(vp, tvp, &ha->vp_list, list) {
 		if (vp->vp_idx)
 			ret = qla2x00_do_dpc_vp(vp);
@@ -413,6 +411,11 @@
 
 	vha->flags.init_done = 1;
 
+	mutex_lock(&ha->vport_lock);
+	set_bit(vha->vp_idx, ha->vp_idx_map);
+	ha->cur_vport_count++;
+	mutex_unlock(&ha->vport_lock);
+
 	return vha;
 
 create_vhost_failed:
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f0396e7..b79fca7 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -287,9 +287,12 @@
 	int ques, req, ret;
 	struct qla_hw_data *ha = vha->hw;
 
+	if (!(ha->fw_attributes & BIT_6)) {
+		qla_printk(KERN_INFO, ha,
+			"Firmware is not multi-queue capable\n");
+		goto fail;
+	}
 	if (ql2xmultique_tag) {
-		/* CPU affinity mode */
-		ha->wq = create_workqueue("qla2xxx_wq");
 		/* create a request queue for IO */
 		options |= BIT_7;
 		req = qla25xx_create_req_que(ha, options, 0, 0, -1,
@@ -299,6 +302,7 @@
 				"Can't create request queue\n");
 			goto fail;
 		}
+		ha->wq = create_workqueue("qla2xxx_wq");
 		vha->req = ha->req_q_map[req];
 		options |= BIT_1;
 		for (ques = 1; ques < ha->max_rsp_queues; ques++) {
@@ -309,6 +313,8 @@
 				goto fail2;
 			}
 		}
+		ha->flags.cpu_affinity_enabled = 1;
+
 		DEBUG2(qla_printk(KERN_INFO, ha,
 			"CPU affinity mode enabled, no. of response"
 			" queues:%d, no. of request queues:%d\n",
@@ -317,8 +323,13 @@
 	return 0;
 fail2:
 	qla25xx_delete_queues(vha);
+	destroy_workqueue(ha->wq);
+	ha->wq = NULL;
 fail:
 	ha->mqenable = 0;
+	kfree(ha->req_q_map);
+	kfree(ha->rsp_q_map);
+	ha->max_req_queues = ha->max_rsp_queues = 1;
 	return 1;
 }
 
@@ -462,6 +473,7 @@
 	sp->flags = 0;
 	CMD_SP(cmd) = (void *)sp;
 	cmd->scsi_done = done;
+	sp->ctx = NULL;
 
 	return sp;
 }
@@ -556,11 +568,8 @@
 	unsigned long wait_iter = ABORT_WAIT_ITER;
 	int ret = QLA_SUCCESS;
 
-	while (CMD_SP(cmd)) {
+	while (CMD_SP(cmd) && wait_iter--) {
 		msleep(ABORT_POLLING_PERIOD);
-
-		if (--wait_iter)
-			break;
 	}
 	if (CMD_SP(cmd))
 		ret = QLA_FUNCTION_FAILED;
@@ -698,6 +707,8 @@
 			continue;
 		if (sp->fcport != fcport)
 			continue;
+		if (sp->ctx)
+			continue;
 
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 		if (ha->isp_ops->abort_command(sp)) {
@@ -783,7 +794,8 @@
 
 		if (sp == NULL)
 			continue;
-
+		if (sp->ctx)
+			continue;
 		if (sp->cmd != cmd)
 			continue;
 
@@ -848,7 +860,8 @@
 		sp = req->outstanding_cmds[cnt];
 		if (!sp)
 			continue;
-
+		if (sp->ctx)
+			continue;
 		if (vha->vp_idx != sp->fcport->vha->vp_idx)
 			continue;
 		match = 0;
@@ -1106,8 +1119,7 @@
 	struct fc_port *fcport;
 	struct qla_hw_data *ha = vha->hw;
 
-	if (ha->flags.enable_lip_full_login && !vha->vp_idx &&
-	    !IS_QLA81XX(ha)) {
+	if (ha->flags.enable_lip_full_login && !IS_QLA81XX(ha)) {
 		ret = qla2x00_full_login_lip(vha);
 		if (ret != QLA_SUCCESS) {
 			DEBUG2_3(printk("%s(%ld): failed: "
@@ -1120,7 +1132,7 @@
 		qla2x00_wait_for_loop_ready(vha);
 	}
 
-	if (ha->flags.enable_lip_reset && !vha->vp_idx) {
+	if (ha->flags.enable_lip_reset) {
 		ret = qla2x00_lip_reset(vha);
 		if (ret != QLA_SUCCESS) {
 			DEBUG2_3(printk("%s(%ld): failed: "
@@ -1154,6 +1166,7 @@
 	int que, cnt;
 	unsigned long flags;
 	srb_t *sp;
+	struct srb_ctx *ctx;
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req;
 
@@ -1166,8 +1179,14 @@
 			sp = req->outstanding_cmds[cnt];
 			if (sp) {
 				req->outstanding_cmds[cnt] = NULL;
-				sp->cmd->result = res;
-				qla2x00_sp_compl(ha, sp);
+				if (!sp->ctx) {
+					sp->cmd->result = res;
+					qla2x00_sp_compl(ha, sp);
+				} else {
+					ctx = sp->ctx;
+					del_timer_sync(&ctx->timer);
+					ctx->free(sp);
+				}
 			}
 		}
 	}
@@ -1193,6 +1212,7 @@
 	scsi_qla_host_t *vha = shost_priv(sdev->host);
 	struct qla_hw_data *ha = vha->hw;
 	struct fc_rport *rport = starget_to_rport(sdev->sdev_target);
+	fc_port_t *fcport = *(fc_port_t **)rport->dd_data;
 	struct req_que *req = vha->req;
 
 	if (sdev->tagged_supported)
@@ -1201,6 +1221,8 @@
 		scsi_deactivate_tcq(sdev, req->max_q_depth);
 
 	rport->dev_loss_tmo = ha->port_down_retry_count;
+	if (sdev->type == TYPE_TAPE)
+		fcport->flags |= FCF_TAPE_PRESENT;
 
 	return 0;
 }
@@ -1923,6 +1945,7 @@
 	if (ret)
 		goto probe_init_failed;
 	/* Alloc arrays of request and response ring ptrs */
+que_init:
 	if (!qla2x00_alloc_queues(ha)) {
 		qla_printk(KERN_WARNING, ha,
 		"[ERROR] Failed to allocate memory for queue"
@@ -1959,11 +1982,14 @@
 		goto probe_failed;
 	}
 
-	if (ha->mqenable)
-		if (qla25xx_setup_mode(base_vha))
+	if (ha->mqenable) {
+		if (qla25xx_setup_mode(base_vha)) {
 			qla_printk(KERN_WARNING, ha,
 				"Can't create queues, falling back to single"
 				" queue mode\n");
+			goto que_init;
+		}
+	}
 
 	if (ha->flags.running_gold_fw)
 		goto skip_dpc;
@@ -2155,17 +2181,19 @@
     int defer)
 {
 	struct fc_rport *rport;
+	scsi_qla_host_t *base_vha;
 
 	if (!fcport->rport)
 		return;
 
 	rport = fcport->rport;
 	if (defer) {
+		base_vha = pci_get_drvdata(vha->hw->pdev);
 		spin_lock_irq(vha->host->host_lock);
 		fcport->drport = rport;
 		spin_unlock_irq(vha->host->host_lock);
-		set_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags);
-		qla2xxx_wake_dpc(vha);
+		set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
+		qla2xxx_wake_dpc(base_vha);
 	} else
 		fc_remote_port_delete(rport);
 }
@@ -2237,8 +2265,9 @@
 	fc_port_t *fcport;
 
 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
-		if (vha->vp_idx != fcport->vp_idx)
+		if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx)
 			continue;
+
 		/*
 		 * No point in marking the device as lost, if the device is
 		 * already DEAD.
@@ -2246,10 +2275,12 @@
 		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
 			continue;
 		if (atomic_read(&fcport->state) == FCS_ONLINE) {
-			atomic_set(&fcport->state, FCS_DEVICE_LOST);
-			qla2x00_schedule_rport_del(vha, fcport, defer);
-		} else
-			atomic_set(&fcport->state, FCS_DEVICE_LOST);
+			if (defer)
+				qla2x00_schedule_rport_del(vha, fcport, defer);
+			else if (vha->vp_idx == fcport->vp_idx)
+				qla2x00_schedule_rport_del(vha, fcport, defer);
+		}
+		atomic_set(&fcport->state, FCS_DEVICE_LOST);
 	}
 }
 
@@ -2598,7 +2629,31 @@
 	return qla2x00_post_work(vha, e);
 }
 
-static void
+#define qla2x00_post_async_work(name, type)	\
+int qla2x00_post_async_##name##_work(		\
+    struct scsi_qla_host *vha,			\
+    fc_port_t *fcport, uint16_t *data)		\
+{						\
+	struct qla_work_evt *e;			\
+						\
+	e = qla2x00_alloc_work(vha, type);	\
+	if (!e)					\
+		return QLA_FUNCTION_FAILED;	\
+						\
+	e->u.logio.fcport = fcport;		\
+	if (data) {				\
+		e->u.logio.data[0] = data[0];	\
+		e->u.logio.data[1] = data[1];	\
+	}					\
+	return qla2x00_post_work(vha, e);	\
+}
+
+qla2x00_post_async_work(login, QLA_EVT_ASYNC_LOGIN);
+qla2x00_post_async_work(login_done, QLA_EVT_ASYNC_LOGIN_DONE);
+qla2x00_post_async_work(logout, QLA_EVT_ASYNC_LOGOUT);
+qla2x00_post_async_work(logout_done, QLA_EVT_ASYNC_LOGOUT_DONE);
+
+void
 qla2x00_do_work(struct scsi_qla_host *vha)
 {
 	struct qla_work_evt *e, *tmp;
@@ -2620,6 +2675,21 @@
 		case QLA_EVT_IDC_ACK:
 			qla81xx_idc_ack(vha, e->u.idc_ack.mb);
 			break;
+		case QLA_EVT_ASYNC_LOGIN:
+			qla2x00_async_login(vha, e->u.logio.fcport,
+			    e->u.logio.data);
+			break;
+		case QLA_EVT_ASYNC_LOGIN_DONE:
+			qla2x00_async_login_done(vha, e->u.logio.fcport,
+			    e->u.logio.data);
+			break;
+		case QLA_EVT_ASYNC_LOGOUT:
+			qla2x00_async_logout(vha, e->u.logio.fcport);
+			break;
+		case QLA_EVT_ASYNC_LOGOUT_DONE:
+			qla2x00_async_logout_done(vha, e->u.logio.fcport,
+			    e->u.logio.data);
+			break;
 		}
 		if (e->flags & QLA_EVT_FLAG_FREE)
 			kfree(e);
@@ -2635,6 +2705,7 @@
 	int status;
 	uint16_t        next_loopid = 0;
 	struct qla_hw_data *ha = vha->hw;
+	uint16_t data[2];
 
 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
 	/*
@@ -2644,6 +2715,7 @@
 		if (atomic_read(&fcport->state) !=
 			FCS_ONLINE && fcport->login_retry) {
 
+			fcport->login_retry--;
 			if (fcport->flags & FCF_FABRIC_DEVICE) {
 				if (fcport->flags & FCF_TAPE_PRESENT)
 					ha->isp_ops->fabric_logout(vha,
@@ -2652,13 +2724,22 @@
 							fcport->d_id.b.area,
 							fcport->d_id.b.al_pa);
 
-				status = qla2x00_fabric_login(vha, fcport,
-							&next_loopid);
+				if (IS_ALOGIO_CAPABLE(ha)) {
+					data[0] = 0;
+					data[1] = QLA_LOGIO_LOGIN_RETRIED;
+					status = qla2x00_post_async_login_work(
+					    vha, fcport, data);
+					if (status == QLA_SUCCESS)
+						continue;
+					/* Attempt a retry. */
+					status = 1;
+				} else
+					status = qla2x00_fabric_login(vha,
+					    fcport, &next_loopid);
 			} else
 				status = qla2x00_local_device_login(vha,
 								fcport);
 
-			fcport->login_retry--;
 			if (status == QLA_SUCCESS) {
 				fcport->old_loop_id = fcport->loop_id;
 
@@ -2831,6 +2912,9 @@
 	 */
 	ha->dpc_active = 0;
 
+	/* Cleanup any residual CTX SRBs. */
+	qla2x00_abort_all_cmds(base_vha, DID_NO_CONNECT << 16);
+
 	return 0;
 }
 
@@ -2971,6 +3055,8 @@
 					sp = req->outstanding_cmds[index];
 					if (!sp)
 						continue;
+					if (sp->ctx)
+						continue;
 					sfcp = sp->fcport;
 					if (!(sfcp->flags & FCF_TAPE_PRESENT))
 						continue;
@@ -2987,8 +3073,7 @@
 
 		/* if the loop has been down for 4 minutes, reinit adapter */
 		if (atomic_dec_and_test(&vha->loop_down_timer) != 0) {
-			if (!(vha->device_flags & DFLG_NO_CABLE) &&
-			    !vha->vp_idx) {
+			if (!(vha->device_flags & DFLG_NO_CABLE)) {
 				DEBUG(printk("scsi(%ld): Loop down - "
 				    "aborting ISP.\n",
 				    vha->host_no));
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 8436970..ac107a2 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.03.01-k4"
+#define QLA2XXX_VERSION      "8.03.01-k6"
 
 #define QLA_DRIVER_MAJOR_VER	8
 #define QLA_DRIVER_MINOR_VER	3
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 8025ee1..c196d55 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -227,11 +227,11 @@
 	case SCS_DATA_UNDERRUN:
 	case SCS_DATA_OVERRUN:
 		if ((sts_entry->iscsiFlags & ISCSI_FLAG_RESIDUAL_OVER) ||
-			(sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
-			DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun, "
-				      "residual = 0x%x\n", ha->host_no,
+		     (sts_entry->completionStatus == SCS_DATA_OVERRUN)) {
+			DEBUG2(printk("scsi%ld:%d:%d:%d: %s: " "Data overrun\n",
+				      ha->host_no,
 				      cmd->device->channel, cmd->device->id,
-				      cmd->device->lun, __func__, residual));
+				      cmd->device->lun, __func__));
 
 			cmd->result = DID_ERROR << 16;
 			break;
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 2de5f3a..b6e0307 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -994,7 +994,7 @@
 	 * all the existing users tried this hard.
 	 */
 	result = scsi_execute_req(sdev, cmd, DMA_FROM_DEVICE, buffer,
-				  len + 4, NULL, 30 * HZ, 3, NULL);
+				  len, NULL, 30 * HZ, 3, NULL);
 	if (result)
 		return result;
 
@@ -1021,13 +1021,14 @@
 {
 	int i, result;
 	unsigned int len;
-	unsigned char *buf = kmalloc(259, GFP_KERNEL);
+	const unsigned int init_vpd_len = 255;
+	unsigned char *buf = kmalloc(init_vpd_len, GFP_KERNEL);
 
 	if (!buf)
 		return NULL;
 
 	/* Ask for all the pages supported by this device */
-	result = scsi_vpd_inquiry(sdev, buf, 0, 255);
+	result = scsi_vpd_inquiry(sdev, buf, 0, init_vpd_len);
 	if (result)
 		goto fail;
 
@@ -1050,12 +1051,12 @@
 	 * Some pages are longer than 255 bytes.  The actual length of
 	 * the page is returned in the header.
 	 */
-	len = (buf[2] << 8) | buf[3];
-	if (len <= 255)
+	len = ((buf[2] << 8) | buf[3]) + 4;
+	if (len <= init_vpd_len)
 		return buf;
 
 	kfree(buf);
-	buf = kmalloc(len + 4, GFP_KERNEL);
+	buf = kmalloc(len, GFP_KERNEL);
 	result = scsi_vpd_inquiry(sdev, buf, page, len);
 	if (result)
 		goto fail;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index a168935..877204d 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -382,9 +382,13 @@
 		 * who knows?  FIXME(eric)
 		 */
 		return SUCCESS;
+	case RESERVATION_CONFLICT:
+		/*
+		 * let issuer deal with this, it could be just fine
+		 */
+		return SUCCESS;
 	case BUSY:
 	case QUEUE_FULL:
-	case RESERVATION_CONFLICT:
 	default:
 		return FAILED;
 	}
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index f3c4089..662024d 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -896,6 +896,7 @@
 			scsi_print_result(cmd);
 			if (driver_byte(result) & DRIVER_SENSE)
 				scsi_print_sense("", cmd);
+			scsi_print_command(cmd);
 		}
 		blk_end_request_all(req, -EIO);
 		scsi_next_command(cmd);
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 292c02f..b98885d 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -291,7 +291,7 @@
 #define FC_STARGET_NUM_ATTRS 	3
 #define FC_RPORT_NUM_ATTRS	10
 #define FC_VPORT_NUM_ATTRS	9
-#define FC_HOST_NUM_ATTRS	21
+#define FC_HOST_NUM_ATTRS	22
 
 struct fc_internal {
 	struct scsi_transport_template t;
@@ -3432,7 +3432,7 @@
 
 /**
  * fc_bsg_softirq_done - softirq done routine for destroying the bsg requests
- * @req:        BSG request that holds the job to be destroyed
+ * @rq:        BSG request that holds the job to be destroyed
  */
 static void fc_bsg_softirq_done(struct request *rq)
 {
diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
index b47240c..ad897df 100644
--- a/drivers/scsi/scsi_transport_iscsi.c
+++ b/drivers/scsi/scsi_transport_iscsi.c
@@ -36,6 +36,38 @@
 
 #define ISCSI_TRANSPORT_VERSION "2.0-870"
 
+static int dbg_session;
+module_param_named(debug_session, dbg_session, int,
+		   S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_session,
+		 "Turn on debugging for sessions in scsi_transport_iscsi "
+		 "module. Set to 1 to turn on, and zero to turn off. Default "
+		 "is off.");
+
+static int dbg_conn;
+module_param_named(debug_conn, dbg_conn, int,
+		   S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(debug_conn,
+		 "Turn on debugging for connections in scsi_transport_iscsi "
+		 "module. Set to 1 to turn on, and zero to turn off. Default "
+		 "is off.");
+
+#define ISCSI_DBG_TRANS_SESSION(_session, dbg_fmt, arg...)		\
+	do {								\
+		if (dbg_session)					\
+			iscsi_cls_session_printk(KERN_INFO, _session,	\
+						 "%s: " dbg_fmt,	\
+						 __func__, ##arg);	\
+	} while (0);
+
+#define ISCSI_DBG_TRANS_CONN(_conn, dbg_fmt, arg...)			\
+	do {								\
+		if (dbg_conn)						\
+			iscsi_cls_conn_printk(KERN_INFO, _conn,		\
+					      "%s: " dbg_fmt,		\
+					      __func__, ##arg);	\
+	} while (0);
+
 struct iscsi_internal {
 	struct scsi_transport_template t;
 	struct iscsi_transport *iscsi_transport;
@@ -377,6 +409,7 @@
 
 	shost = iscsi_session_to_shost(session);
 	scsi_host_put(shost);
+	ISCSI_DBG_TRANS_SESSION(session, "Completing session release\n");
 	kfree(session);
 }
 
@@ -441,6 +474,9 @@
 		return 0;
 
 	session = iscsi_dev_to_session(dev);
+
+	ISCSI_DBG_TRANS_SESSION(session, "Scanning session\n");
+
 	shost = iscsi_session_to_shost(session);
 	ihost = shost->shost_data;
 
@@ -448,8 +484,7 @@
 	spin_lock_irqsave(&session->lock, flags);
 	if (session->state != ISCSI_SESSION_LOGGED_IN) {
 		spin_unlock_irqrestore(&session->lock, flags);
-		mutex_unlock(&ihost->mutex);
-		return 0;
+		goto user_scan_exit;
 	}
 	id = session->target_id;
 	spin_unlock_irqrestore(&session->lock, flags);
@@ -462,7 +497,10 @@
 			scsi_scan_target(&session->dev, 0, id,
 					 scan_data->lun, 1);
 	}
+
+user_scan_exit:
 	mutex_unlock(&ihost->mutex);
+	ISCSI_DBG_TRANS_SESSION(session, "Completed session scan\n");
 	return 0;
 }
 
@@ -522,7 +560,9 @@
 	if (session->transport->session_recovery_timedout)
 		session->transport->session_recovery_timedout(session);
 
+	ISCSI_DBG_TRANS_SESSION(session, "Unblocking SCSI target\n");
 	scsi_target_unblock(&session->dev);
+	ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking SCSI target\n");
 }
 
 static void __iscsi_unblock_session(struct work_struct *work)
@@ -534,6 +574,7 @@
 	struct iscsi_cls_host *ihost = shost->shost_data;
 	unsigned long flags;
 
+	ISCSI_DBG_TRANS_SESSION(session, "Unblocking session\n");
 	/*
 	 * The recovery and unblock work get run from the same workqueue,
 	 * so try to cancel it if it was going to run after this unblock.
@@ -553,6 +594,7 @@
 		if (scsi_queue_work(shost, &session->scan_work))
 			atomic_inc(&ihost->nr_scans);
 	}
+	ISCSI_DBG_TRANS_SESSION(session, "Completed unblocking session\n");
 }
 
 /**
@@ -579,10 +621,12 @@
 				     block_work);
 	unsigned long flags;
 
+	ISCSI_DBG_TRANS_SESSION(session, "Blocking session\n");
 	spin_lock_irqsave(&session->lock, flags);
 	session->state = ISCSI_SESSION_FAILED;
 	spin_unlock_irqrestore(&session->lock, flags);
 	scsi_target_block(&session->dev);
+	ISCSI_DBG_TRANS_SESSION(session, "Completed SCSI target blocking\n");
 	queue_delayed_work(iscsi_eh_timer_workq, &session->recovery_work,
 			   session->recovery_tmo * HZ);
 }
@@ -602,6 +646,8 @@
 	struct iscsi_cls_host *ihost = shost->shost_data;
 	unsigned long flags;
 
+	ISCSI_DBG_TRANS_SESSION(session, "Unbinding session\n");
+
 	/* Prevent new scans and make sure scanning is not in progress */
 	mutex_lock(&ihost->mutex);
 	spin_lock_irqsave(&session->lock, flags);
@@ -616,6 +662,7 @@
 
 	scsi_remove_target(&session->dev);
 	iscsi_session_event(session, ISCSI_KEVENT_UNBIND_SESSION);
+	ISCSI_DBG_TRANS_SESSION(session, "Completed target removal\n");
 }
 
 struct iscsi_cls_session *
@@ -647,6 +694,8 @@
 	device_initialize(&session->dev);
 	if (dd_size)
 		session->dd_data = &session[1];
+
+	ISCSI_DBG_TRANS_SESSION(session, "Completed session allocation\n");
 	return session;
 }
 EXPORT_SYMBOL_GPL(iscsi_alloc_session);
@@ -712,6 +761,7 @@
 	spin_unlock_irqrestore(&sesslock, flags);
 
 	iscsi_session_event(session, ISCSI_KEVENT_CREATE_SESSION);
+	ISCSI_DBG_TRANS_SESSION(session, "Completed session adding\n");
 	return 0;
 
 release_host:
@@ -752,6 +802,7 @@
 	struct iscsi_cls_conn *conn = iscsi_dev_to_conn(dev);
 	struct device *parent = conn->dev.parent;
 
+	ISCSI_DBG_TRANS_CONN(conn, "Releasing conn\n");
 	kfree(conn);
 	put_device(parent);
 }
@@ -774,6 +825,8 @@
 	unsigned long flags;
 	int err;
 
+	ISCSI_DBG_TRANS_SESSION(session, "Removing session\n");
+
 	spin_lock_irqsave(&sesslock, flags);
 	list_del(&session->sess_list);
 	spin_unlock_irqrestore(&sesslock, flags);
@@ -807,12 +860,15 @@
 					 "for session. Error %d.\n", err);
 
 	transport_unregister_device(&session->dev);
+
+	ISCSI_DBG_TRANS_SESSION(session, "Completing session removal\n");
 	device_del(&session->dev);
 }
 EXPORT_SYMBOL_GPL(iscsi_remove_session);
 
 void iscsi_free_session(struct iscsi_cls_session *session)
 {
+	ISCSI_DBG_TRANS_SESSION(session, "Freeing session\n");
 	iscsi_session_event(session, ISCSI_KEVENT_DESTROY_SESSION);
 	put_device(&session->dev);
 }
@@ -828,6 +884,7 @@
 int iscsi_destroy_session(struct iscsi_cls_session *session)
 {
 	iscsi_remove_session(session);
+	ISCSI_DBG_TRANS_SESSION(session, "Completing session destruction\n");
 	iscsi_free_session(session);
 	return 0;
 }
@@ -885,6 +942,8 @@
 	list_add(&conn->conn_list, &connlist);
 	conn->active = 1;
 	spin_unlock_irqrestore(&connlock, flags);
+
+	ISCSI_DBG_TRANS_CONN(conn, "Completed conn creation\n");
 	return conn;
 
 release_parent_ref:
@@ -912,6 +971,7 @@
 	spin_unlock_irqrestore(&connlock, flags);
 
 	transport_unregister_device(&conn->dev);
+	ISCSI_DBG_TRANS_CONN(conn, "Completing conn destruction\n");
 	device_unregister(&conn->dev);
 	return 0;
 }
@@ -1200,6 +1260,9 @@
 					 "Cannot notify userspace of session "
 					 "event %u. Check iscsi daemon\n",
 					 event);
+
+	ISCSI_DBG_TRANS_SESSION(session, "Completed handling event %d rc %d\n",
+				event, rc);
 	return rc;
 }
 EXPORT_SYMBOL_GPL(iscsi_session_event);
@@ -1221,6 +1284,8 @@
 	shost = iscsi_session_to_shost(session);
 	ev->r.c_session_ret.host_no = shost->host_no;
 	ev->r.c_session_ret.sid = session->sid;
+	ISCSI_DBG_TRANS_SESSION(session,
+				"Completed creating transport session\n");
 	return 0;
 }
 
@@ -1246,6 +1311,8 @@
 
 	ev->r.c_conn_ret.sid = session->sid;
 	ev->r.c_conn_ret.cid = conn->cid;
+
+	ISCSI_DBG_TRANS_CONN(conn, "Completed creating transport conn\n");
 	return 0;
 }
 
@@ -1258,8 +1325,10 @@
 	if (!conn)
 		return -EINVAL;
 
+	ISCSI_DBG_TRANS_CONN(conn, "Destroying transport conn\n");
 	if (transport->destroy_conn)
 		transport->destroy_conn(conn);
+
 	return 0;
 }
 
diff --git a/drivers/scsi/scsi_transport_sas.c b/drivers/scsi/scsi_transport_sas.c
index 0895d3c..fd47cb1 100644
--- a/drivers/scsi/scsi_transport_sas.c
+++ b/drivers/scsi/scsi_transport_sas.c
@@ -1692,10 +1692,6 @@
 	i->f = ft;
 
 	count = 0;
-	SETUP_PORT_ATTRIBUTE(num_phys);
-	i->host_attrs[count] = NULL;
-
-	count = 0;
 	SETUP_PHY_ATTRIBUTE(initiator_port_protocols);
 	SETUP_PHY_ATTRIBUTE(target_port_protocols);
 	SETUP_PHY_ATTRIBUTE(device_type);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index b7b9fec..a89c421 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2021,6 +2021,7 @@
 
 	sd_printk(KERN_NOTICE, sdkp, "Attached SCSI %sdisk\n",
 		  sdp->removable ? "removable " : "");
+	put_device(&sdkp->dev);
 }
 
 /**
@@ -2106,6 +2107,7 @@
 
 	get_device(&sdp->sdev_gendev);
 
+	get_device(&sdkp->dev);	/* prevent release before async_schedule */
 	async_schedule(sd_probe_async, sdkp);
 
 	return 0;
diff --git a/drivers/scsi/ses.c b/drivers/scsi/ses.c
index 4f618f4..55b034b 100644
--- a/drivers/scsi/ses.c
+++ b/drivers/scsi/ses.c
@@ -347,6 +347,97 @@
 	return 0;
 }
 
+#define INIT_ALLOC_SIZE 32
+
+static void ses_enclosure_data_process(struct enclosure_device *edev,
+				       struct scsi_device *sdev,
+				       int create)
+{
+	u32 result;
+	unsigned char *buf = NULL, *type_ptr, *desc_ptr, *addl_desc_ptr = NULL;
+	int i, j, page7_len, len, components;
+	struct ses_device *ses_dev = edev->scratch;
+	int types = ses_dev->page1[10];
+	unsigned char *hdr_buf = kzalloc(INIT_ALLOC_SIZE, GFP_KERNEL);
+
+	if (!hdr_buf)
+		goto simple_populate;
+
+	/* re-read page 10 */
+	if (ses_dev->page10)
+		ses_recv_diag(sdev, 10, ses_dev->page10, ses_dev->page10_len);
+	/* Page 7 for the descriptors is optional */
+	result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE);
+	if (result)
+		goto simple_populate;
+
+	page7_len = len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
+	/* add 1 for trailing '\0' we'll use */
+	buf = kzalloc(len + 1, GFP_KERNEL);
+	if (!buf)
+		goto simple_populate;
+	result = ses_recv_diag(sdev, 7, buf, len);
+	if (result) {
+ simple_populate:
+		kfree(buf);
+		buf = NULL;
+		desc_ptr = NULL;
+		len = 0;
+		page7_len = 0;
+	} else {
+		desc_ptr = buf + 8;
+		len = (desc_ptr[2] << 8) + desc_ptr[3];
+		/* skip past overall descriptor */
+		desc_ptr += len + 4;
+		if (ses_dev->page10)
+			addl_desc_ptr = ses_dev->page10 + 8;
+	}
+	type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
+	components = 0;
+	for (i = 0; i < types; i++, type_ptr += 4) {
+		for (j = 0; j < type_ptr[1]; j++) {
+			char *name = NULL;
+			struct enclosure_component *ecomp;
+
+			if (desc_ptr) {
+				if (desc_ptr >= buf + page7_len) {
+					desc_ptr = NULL;
+				} else {
+					len = (desc_ptr[2] << 8) + desc_ptr[3];
+					desc_ptr += 4;
+					/* Add trailing zero - pushes into
+					 * reserved space */
+					desc_ptr[len] = '\0';
+					name = desc_ptr;
+				}
+			}
+			if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
+			    type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) {
+
+				if (create)
+					ecomp =	enclosure_component_register(edev,
+									     components++,
+									     type_ptr[0],
+									     name);
+				else
+					ecomp = &edev->component[components++];
+
+				if (!IS_ERR(ecomp) && addl_desc_ptr)
+					ses_process_descriptor(ecomp,
+							       addl_desc_ptr);
+			}
+			if (desc_ptr)
+				desc_ptr += len;
+
+			if (addl_desc_ptr)
+				addl_desc_ptr += addl_desc_ptr[1] + 2;
+
+		}
+	}
+	kfree(buf);
+	kfree(hdr_buf);
+}
+
 static void ses_match_to_enclosure(struct enclosure_device *edev,
 				   struct scsi_device *sdev)
 {
@@ -361,6 +452,8 @@
 	if (!buf)
 		return;
 
+	ses_enclosure_data_process(edev, to_scsi_device(edev->edev.parent), 0);
+
 	vpd_len = ((buf[2] << 8) | buf[3]) + 4;
 
 	desc = buf + 4;
@@ -395,28 +488,26 @@
 	kfree(buf);
 }
 
-#define INIT_ALLOC_SIZE 32
-
 static int ses_intf_add(struct device *cdev,
 			struct class_interface *intf)
 {
 	struct scsi_device *sdev = to_scsi_device(cdev->parent);
 	struct scsi_device *tmp_sdev;
-	unsigned char *buf = NULL, *hdr_buf, *type_ptr, *desc_ptr = NULL,
-		*addl_desc_ptr = NULL;
+	unsigned char *buf = NULL, *hdr_buf, *type_ptr;
 	struct ses_device *ses_dev;
 	u32 result;
-	int i, j, types, len, page7_len = 0, components = 0;
+	int i, types, len, components = 0;
 	int err = -ENOMEM;
 	struct enclosure_device *edev;
 	struct ses_component *scomp = NULL;
 
 	if (!scsi_device_enclosure(sdev)) {
 		/* not an enclosure, but might be in one */
-		edev = enclosure_find(&sdev->host->shost_gendev);
-		if (edev) {
+		struct enclosure_device *prev = NULL;
+
+		while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) {
 			ses_match_to_enclosure(edev, sdev);
-			put_device(&edev->edev);
+			prev = edev;
 		}
 		return -ENODEV;
 	}
@@ -500,6 +591,7 @@
 		ses_dev->page10_len = len;
 		buf = NULL;
 	}
+	kfree(hdr_buf);
 
 	scomp = kzalloc(sizeof(struct ses_component) * components, GFP_KERNEL);
 	if (!scomp)
@@ -516,72 +608,7 @@
 	for (i = 0; i < components; i++)
 		edev->component[i].scratch = scomp + i;
 
-	/* Page 7 for the descriptors is optional */
-	result = ses_recv_diag(sdev, 7, hdr_buf, INIT_ALLOC_SIZE);
-	if (result)
-		goto simple_populate;
-
-	page7_len = len = (hdr_buf[2] << 8) + hdr_buf[3] + 4;
-	/* add 1 for trailing '\0' we'll use */
-	buf = kzalloc(len + 1, GFP_KERNEL);
-	if (!buf)
-		goto simple_populate;
-	result = ses_recv_diag(sdev, 7, buf, len);
-	if (result) {
- simple_populate:
-		kfree(buf);
-		buf = NULL;
-		desc_ptr = NULL;
-		addl_desc_ptr = NULL;
-	} else {
-		desc_ptr = buf + 8;
-		len = (desc_ptr[2] << 8) + desc_ptr[3];
-		/* skip past overall descriptor */
-		desc_ptr += len + 4;
-		if (ses_dev->page10)
-			addl_desc_ptr = ses_dev->page10 + 8;
-	}
-	type_ptr = ses_dev->page1 + 12 + ses_dev->page1[11];
-	components = 0;
-	for (i = 0; i < types; i++, type_ptr += 4) {
-		for (j = 0; j < type_ptr[1]; j++) {
-			char *name = NULL;
-			struct enclosure_component *ecomp;
-
-			if (desc_ptr) {
-				if (desc_ptr >= buf + page7_len) {
-					desc_ptr = NULL;
-				} else {
-					len = (desc_ptr[2] << 8) + desc_ptr[3];
-					desc_ptr += 4;
-					/* Add trailing zero - pushes into
-					 * reserved space */
-					desc_ptr[len] = '\0';
-					name = desc_ptr;
-				}
-			}
-			if (type_ptr[0] == ENCLOSURE_COMPONENT_DEVICE ||
-			    type_ptr[0] == ENCLOSURE_COMPONENT_ARRAY_DEVICE) {
-
-				ecomp =	enclosure_component_register(edev,
-							     components++,
-							     type_ptr[0],
-							     name);
-
-				if (!IS_ERR(ecomp) && addl_desc_ptr)
-					ses_process_descriptor(ecomp,
-							       addl_desc_ptr);
-			}
-			if (desc_ptr)
-				desc_ptr += len;
-
-			if (addl_desc_ptr)
-				addl_desc_ptr += addl_desc_ptr[1] + 2;
-
-		}
-	}
-	kfree(buf);
-	kfree(hdr_buf);
+	ses_enclosure_data_process(edev, sdev, 1);
 
 	/* see if there are any devices matching before
 	 * we found the enclosure */
@@ -615,17 +642,26 @@
 	return 0;
 }
 
-static void ses_intf_remove(struct device *cdev,
-			    struct class_interface *intf)
+static void ses_intf_remove_component(struct scsi_device *sdev)
 {
-	struct scsi_device *sdev = to_scsi_device(cdev->parent);
+	struct enclosure_device *edev, *prev = NULL;
+
+	while ((edev = enclosure_find(&sdev->host->shost_gendev, prev)) != NULL) {
+		prev = edev;
+		if (!enclosure_remove_device(edev, &sdev->sdev_gendev))
+			break;
+	}
+	if (edev)
+		put_device(&edev->edev);
+}
+
+static void ses_intf_remove_enclosure(struct scsi_device *sdev)
+{
 	struct enclosure_device *edev;
 	struct ses_device *ses_dev;
 
-	if (!scsi_device_enclosure(sdev))
-		return;
-
-	edev = enclosure_find(cdev->parent);
+	/*  exact match to this enclosure */
+	edev = enclosure_find(&sdev->sdev_gendev, NULL);
 	if (!edev)
 		return;
 
@@ -643,6 +679,17 @@
 	enclosure_unregister(edev);
 }
 
+static void ses_intf_remove(struct device *cdev,
+			    struct class_interface *intf)
+{
+	struct scsi_device *sdev = to_scsi_device(cdev->parent);
+
+	if (!scsi_device_enclosure(sdev))
+		ses_intf_remove_component(sdev);
+	else
+		ses_intf_remove_enclosure(sdev);
+}
+
 static struct class_interface ses_interface = {
 	.add_dev	= ses_intf_add,
 	.remove_dev	= ses_intf_remove,
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 9230402..4968c4c 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1811,7 +1811,7 @@
 	return 0;
 out:
 	for (i = 0; i < k; i++)
-		__free_pages(schp->pages[k], order);
+		__free_pages(schp->pages[i], order);
 
 	if (--order >= 0)
 		goto retry;
diff --git a/drivers/scsi/stex.c b/drivers/scsi/stex.c
index 8d2a95c..09fa886 100644
--- a/drivers/scsi/stex.c
+++ b/drivers/scsi/stex.c
@@ -55,6 +55,7 @@
 	OIS	= 0x30,	/* MU_OUTBOUND_INTERRUPT_STATUS */
 	OIM	= 0x3c,	/* MU_OUTBOUND_INTERRUPT_MASK */
 
+	YIOA_STATUS				= 0x00,
 	YH2I_INT				= 0x20,
 	YINT_EN					= 0x34,
 	YI2H_INT				= 0x9c,
@@ -108,6 +109,10 @@
 
 	SS_HEAD_HANDSHAKE			= 0x80,
 
+	SS_H2I_INT_RESET			= 0x100,
+
+	SS_MU_OPERATIONAL			= 0x80000000,
+
 	STEX_CDB_LENGTH				= 16,
 	STATUS_VAR_LEN				= 128,
 
@@ -884,7 +889,7 @@
 		tag = (u16)value;
 		if (unlikely(tag >= hba->host->can_queue)) {
 			printk(KERN_WARNING DRV_NAME
-					"(%s): invalid tag\n", pci_name(hba->pdev));
+				"(%s): invalid tag\n", pci_name(hba->pdev));
 			continue;
 		}
 
@@ -1040,16 +1045,27 @@
 	void __iomem *base = hba->mmio_base;
 	struct st_msg_header *msg_h;
 	struct handshake_frame *h;
-	__le32 *scratch = hba->scratch;
+	__le32 *scratch;
 	u32 data;
 	unsigned long before;
 	int ret = 0;
 
-	h = (struct handshake_frame *)(hba->alloc_rq(hba));
-	msg_h = (struct st_msg_header *)h - 1;
+	before = jiffies;
+	while ((readl(base + YIOA_STATUS) & SS_MU_OPERATIONAL) == 0) {
+		if (time_after(jiffies, before + MU_MAX_DELAY * HZ)) {
+			printk(KERN_ERR DRV_NAME
+				"(%s): firmware not operational\n",
+				pci_name(hba->pdev));
+			return -1;
+		}
+		msleep(1);
+	}
+
+	msg_h = (struct st_msg_header *)hba->dma_mem;
 	msg_h->handle = cpu_to_le64(hba->dma_handle);
 	msg_h->flag = SS_HEAD_HANDSHAKE;
 
+	h = (struct handshake_frame *)(msg_h + 1);
 	h->rb_phy = cpu_to_le64(hba->dma_handle);
 	h->req_sz = cpu_to_le16(hba->rq_size);
 	h->req_cnt = cpu_to_le16(hba->rq_count+1);
@@ -1205,6 +1221,13 @@
 			hba->pdev->saved_config_space[i]);
 }
 
+static void stex_ss_reset(struct st_hba *hba)
+{
+	writel(SS_H2I_INT_RESET, hba->mmio_base + YH2I_INT);
+	readl(hba->mmio_base + YH2I_INT);
+	ssleep(5);
+}
+
 static int stex_reset(struct scsi_cmnd *cmd)
 {
 	struct st_hba *hba;
@@ -1221,6 +1244,8 @@
 
 	if (hba->cardtype == st_shasta)
 		stex_hard_reset(hba);
+	else if (hba->cardtype == st_yel)
+		stex_ss_reset(hba);
 
 	if (hba->cardtype != st_yosemite) {
 		if (stex_handshake(hba)) {
diff --git a/include/linux/enclosure.h b/include/linux/enclosure.h
index 4332442..90d1c21 100644
--- a/include/linux/enclosure.h
+++ b/include/linux/enclosure.h
@@ -122,8 +122,9 @@
 				 enum enclosure_component_type, const char *);
 int enclosure_add_device(struct enclosure_device *enclosure, int component,
 			 struct device *dev);
-int enclosure_remove_device(struct enclosure_device *enclosure, int component);
-struct enclosure_device *enclosure_find(struct device *dev);
+int enclosure_remove_device(struct enclosure_device *, struct device *);
+struct enclosure_device *enclosure_find(struct device *dev,
+					struct enclosure_device *start);
 int enclosure_for_each_device(int (*fn)(struct enclosure_device *, void *),
 			      void *data);
 
diff --git a/include/scsi/fc/fc_gs.h b/include/scsi/fc/fc_gs.h
index ffab027..324dd0e 100644
--- a/include/scsi/fc/fc_gs.h
+++ b/include/scsi/fc/fc_gs.h
@@ -87,6 +87,7 @@
 	FC_FS_EXP_PNAM =	0x02,	/* port name not registered */
 	FC_FS_EXP_NNAM =	0x03,	/* node name not registered */
 	FC_FS_EXP_COS =		0x04,	/* class of service not registered */
+	FC_FS_EXP_FTNR =	0x07,	/* FC-4 types not registered */
 	/* definitions not complete */
 };
 
diff --git a/include/scsi/fc_encode.h b/include/scsi/fc_encode.h
index a0ff61c..27dad70 100644
--- a/include/scsi/fc_encode.h
+++ b/include/scsi/fc_encode.h
@@ -32,6 +32,7 @@
 		struct fc_ns_gid_ft gid;
 		struct fc_ns_rn_id  rn;
 		struct fc_ns_rft rft;
+		struct fc_ns_fid fid;
 	} payload;
 };
 
@@ -57,6 +58,23 @@
 }
 
 /**
+ * fc_adisc_fill() - Fill in adisc request frame
+ * @lport: local port.
+ * @fp: fc frame where payload will be placed.
+ */
+static inline void fc_adisc_fill(struct fc_lport *lport, struct fc_frame *fp)
+{
+	struct fc_els_adisc *adisc;
+
+	adisc = fc_frame_payload_get(fp, sizeof(*adisc));
+	memset(adisc, 0, sizeof(*adisc));
+	adisc->adisc_cmd = ELS_ADISC;
+	put_unaligned_be64(lport->wwpn, &adisc->adisc_wwpn);
+	put_unaligned_be64(lport->wwnn, &adisc->adisc_wwnn);
+	hton24(adisc->adisc_port_id, fc_host_port_id(lport->host));
+}
+
+/**
  * fc_ct_hdr_fill- fills ct header and reset ct payload
  * returns pointer to ct request.
  */
@@ -77,10 +95,17 @@
 }
 
 /**
- * fc_ct_fill - Fill in a name service request frame
+ * fc_ct_fill() - Fill in a name service request frame
+ * @lport: local port.
+ * @fc_id: FC_ID of non-destination rport for GPN_ID and similar inquiries.
+ * @fp: frame to contain payload.
+ * @op: CT opcode.
+ * @r_ctl: pointer to FC header R_CTL.
+ * @fh_type: pointer to FC-4 type.
  */
-static inline int fc_ct_fill(struct fc_lport *lport, struct fc_frame *fp,
-		      unsigned int op, enum fc_rctl *r_ctl, u32 *did,
+static inline int fc_ct_fill(struct fc_lport *lport,
+		      u32 fc_id, struct fc_frame *fp,
+		      unsigned int op, enum fc_rctl *r_ctl,
 		      enum fc_fh_type *fh_type)
 {
 	struct fc_ct_req *ct;
@@ -91,6 +116,11 @@
 		ct->payload.gid.fn_fc4_type = FC_TYPE_FCP;
 		break;
 
+	case FC_NS_GPN_ID:
+		ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_fid));
+		hton24(ct->payload.fid.fp_fid, fc_id);
+		break;
+
 	case FC_NS_RFT_ID:
 		ct = fc_ct_hdr_fill(fp, op, sizeof(struct fc_ns_rft));
 		hton24(ct->payload.rft.fid.fp_fid,
@@ -110,7 +140,6 @@
 		return -EINVAL;
 	}
 	*r_ctl = FC_RCTL_DD_UNSOL_CTL;
-	*did = FC_FID_DIR_SERV;
 	*fh_type = FC_TYPE_CT;
 	return 0;
 }
@@ -249,51 +278,42 @@
 /**
  * fc_els_fill - Fill in an ELS  request frame
  */
-static inline int fc_els_fill(struct fc_lport *lport, struct fc_rport *rport,
+static inline int fc_els_fill(struct fc_lport *lport,
+		       u32 did,
 		       struct fc_frame *fp, unsigned int op,
-		       enum fc_rctl *r_ctl, u32 *did, enum fc_fh_type *fh_type)
+		       enum fc_rctl *r_ctl, enum fc_fh_type *fh_type)
 {
 	switch (op) {
+	case ELS_ADISC:
+		fc_adisc_fill(lport, fp);
+		break;
+
 	case ELS_PLOGI:
 		fc_plogi_fill(lport, fp, ELS_PLOGI);
-		*did = rport->port_id;
 		break;
 
 	case ELS_FLOGI:
 		fc_flogi_fill(lport, fp);
-		*did = FC_FID_FLOGI;
 		break;
 
 	case ELS_LOGO:
 		fc_logo_fill(lport, fp);
-		*did = FC_FID_FLOGI;
-		/*
-		 * if rport is valid then it
-		 * is port logo, therefore
-		 * set did to rport id.
-		 */
-		if (rport)
-			*did = rport->port_id;
 		break;
 
 	case ELS_RTV:
 		fc_rtv_fill(lport, fp);
-		*did = rport->port_id;
 		break;
 
 	case ELS_REC:
 		fc_rec_fill(lport, fp);
-		*did = rport->port_id;
 		break;
 
 	case ELS_PRLI:
 		fc_prli_fill(lport, fp);
-		*did = rport->port_id;
 		break;
 
 	case ELS_SCR:
 		fc_scr_fill(lport, fp);
-		*did = FC_FID_FCTRL;
 		break;
 
 	default:
diff --git a/include/scsi/fc_frame.h b/include/scsi/fc_frame.h
index 5951105..c35d238 100644
--- a/include/scsi/fc_frame.h
+++ b/include/scsi/fc_frame.h
@@ -37,13 +37,6 @@
 #define	FC_FRAME_HEADROOM	32	/* headroom for VLAN + FCoE headers */
 #define	FC_FRAME_TAILROOM	8	/* trailer space for FCoE */
 
-/*
- * Information about an individual fibre channel frame received or to be sent.
- * The buffer may be in up to 4 additional non-contiguous sections,
- * but the linear section must hold the frame header.
- */
-#define FC_FRAME_SG_LEN		4	/* scatter/gather list maximum length */
-
 #define fp_skb(fp)	(&((fp)->skb))
 #define fr_hdr(fp)	((fp)->skb.data)
 #define fr_len(fp)	((fp)->skb.len)
diff --git a/include/scsi/iscsi_if.h b/include/scsi/iscsi_if.h
index 4426f00..d67dda2 100644
--- a/include/scsi/iscsi_if.h
+++ b/include/scsi/iscsi_if.h
@@ -262,6 +262,7 @@
 	ISCSI_ERR_NO_SCSI_CMD		= ISCSI_ERR_BASE + 17,
 	ISCSI_ERR_INVALID_HOST		= ISCSI_ERR_BASE + 18,
 	ISCSI_ERR_XMIT_FAILED		= ISCSI_ERR_BASE + 19,
+	ISCSI_ERR_TCP_CONN_CLOSE	= ISCSI_ERR_BASE + 20,
 };
 
 /*
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index b92584a..65dc9aa 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -51,55 +51,49 @@
 		do {						\
 			CMD;					\
 		} while (0);					\
-} while (0);
+} while (0)
 
 #define FC_LIBFC_DBG(fmt, args...)					\
 	FC_CHECK_LOGGING(FC_LIBFC_LOGGING,				\
-			 printk(KERN_INFO "libfc: " fmt, ##args);)
+			 printk(KERN_INFO "libfc: " fmt, ##args))
 
 #define FC_LPORT_DBG(lport, fmt, args...)				\
 	FC_CHECK_LOGGING(FC_LPORT_LOGGING,				\
-			 printk(KERN_INFO "lport: %6x: " fmt,		\
-				fc_host_port_id(lport->host), ##args);)
+			 printk(KERN_INFO "host%u: lport %6x: " fmt,	\
+				(lport)->host->host_no,			\
+				fc_host_port_id((lport)->host), ##args))
 
 #define FC_DISC_DBG(disc, fmt, args...)					\
 	FC_CHECK_LOGGING(FC_DISC_LOGGING,				\
-			 printk(KERN_INFO "disc: %6x: " fmt,		\
-				fc_host_port_id(disc->lport->host),	\
-				##args);)
+			 printk(KERN_INFO "host%u: disc: " fmt,		\
+				(disc)->lport->host->host_no,		\
+				##args))
 
-#define FC_RPORT_DBG(rport, fmt, args...)				\
-do {									\
-	struct fc_rport_libfc_priv *rdata = rport->dd_data;		\
-	struct fc_lport *lport = rdata->local_port;			\
+#define FC_RPORT_ID_DBG(lport, port_id, fmt, args...)			\
 	FC_CHECK_LOGGING(FC_RPORT_LOGGING,				\
-			 printk(KERN_INFO "rport: %6x: %6x: " fmt,	\
-				fc_host_port_id(lport->host),		\
-				rport->port_id, ##args);)		\
-} while (0);
+			 printk(KERN_INFO "host%u: rport %6x: " fmt,	\
+				(lport)->host->host_no,			\
+				(port_id), ##args))
+
+#define FC_RPORT_DBG(rdata, fmt, args...)				\
+	FC_RPORT_ID_DBG((rdata)->local_port, (rdata)->ids.port_id, fmt, ##args)
 
 #define FC_FCP_DBG(pkt, fmt, args...)					\
 	FC_CHECK_LOGGING(FC_FCP_LOGGING,				\
-			 printk(KERN_INFO "fcp: %6x: %6x: " fmt,	\
-				fc_host_port_id(pkt->lp->host),		\
-				pkt->rport->port_id, ##args);)
-
-#define FC_EM_DBG(em, fmt, args...)					\
-	FC_CHECK_LOGGING(FC_EM_LOGGING,					\
-			 printk(KERN_INFO "em: %6x: " fmt,		\
-				fc_host_port_id(em->lp->host),		\
-				##args);)
+			 printk(KERN_INFO "host%u: fcp: %6x: " fmt,	\
+				(pkt)->lp->host->host_no,		\
+				pkt->rport->port_id, ##args))
 
 #define FC_EXCH_DBG(exch, fmt, args...)					\
 	FC_CHECK_LOGGING(FC_EXCH_LOGGING,				\
-			 printk(KERN_INFO "exch: %6x: %4x: " fmt,	\
-				fc_host_port_id(exch->lp->host),	\
-				exch->xid, ##args);)
+			 printk(KERN_INFO "host%u: xid %4x: " fmt,	\
+				(exch)->lp->host->host_no,		\
+				exch->xid, ##args))
 
 #define FC_SCSI_DBG(lport, fmt, args...)				\
 	FC_CHECK_LOGGING(FC_SCSI_LOGGING,                               \
-			 printk(KERN_INFO "scsi: %6x: " fmt,		\
-				fc_host_port_id(lport->host), ##args);)
+			 printk(KERN_INFO "host%u: scsi: " fmt,		\
+				(lport)->host->host_no,	##args))
 
 /*
  * libfc error codes
@@ -125,7 +119,7 @@
  * FC HBA status
  */
 enum fc_lport_state {
-	LPORT_ST_NONE = 0,
+	LPORT_ST_DISABLED = 0,
 	LPORT_ST_FLOGI,
 	LPORT_ST_DNS,
 	LPORT_ST_RPN_ID,
@@ -143,59 +137,52 @@
 };
 
 enum fc_rport_state {
-	RPORT_ST_NONE = 0,
 	RPORT_ST_INIT,		/* initialized */
 	RPORT_ST_PLOGI,		/* waiting for PLOGI completion */
 	RPORT_ST_PRLI,		/* waiting for PRLI completion */
 	RPORT_ST_RTV,		/* waiting for RTV completion */
 	RPORT_ST_READY,		/* ready for use */
 	RPORT_ST_LOGO,		/* port logout sent */
-};
-
-enum fc_rport_trans_state {
-	FC_PORTSTATE_ROGUE,
-	FC_PORTSTATE_REAL,
+	RPORT_ST_ADISC,		/* Discover Address sent */
+	RPORT_ST_DELETE,	/* port being deleted */
 };
 
 /**
  * struct fc_disc_port - temporary discovery port to hold rport identifiers
- * @lp: Fibre Channel host port instance
- * @peers: node for list management during discovery and RSCN processing
- * @ids: identifiers structure to pass to fc_remote_port_add()
- * @rport_work: work struct for starting the rport state machine
+ * @lp:         Fibre Channel host port instance
+ * @peers:      Node for list management during discovery and RSCN processing
+ * @rport_work: Work struct for starting the rport state machine
+ * @port_id:    Port ID of the discovered port
  */
 struct fc_disc_port {
 	struct fc_lport             *lp;
 	struct list_head            peers;
-	struct fc_rport_identifiers ids;
 	struct work_struct	    rport_work;
+	u32                         port_id;
 };
 
 enum fc_rport_event {
 	RPORT_EV_NONE = 0,
-	RPORT_EV_CREATED,
+	RPORT_EV_READY,
 	RPORT_EV_FAILED,
 	RPORT_EV_STOP,
 	RPORT_EV_LOGO
 };
 
+struct fc_rport_priv;
+
 struct fc_rport_operations {
-	void (*event_callback)(struct fc_lport *, struct fc_rport *,
+	void (*event_callback)(struct fc_lport *, struct fc_rport_priv *,
 			       enum fc_rport_event);
 };
 
 /**
  * struct fc_rport_libfc_priv - libfc internal information about a remote port
  * @local_port: Fibre Channel host port instance
- * @rp_state: state tracks progress of PLOGI, PRLI, and RTV exchanges
+ * @rp_state: indicates READY for I/O or DELETE when blocked.
  * @flags: REC and RETRY supported flags
- * @max_seq: maximum number of concurrent sequences
- * @retries: retry count in current state
  * @e_d_tov: error detect timeout value (in msec)
  * @r_a_tov: resource allocation timeout value (in msec)
- * @rp_mutex: mutex protects rport
- * @retry_work:
- * @event_callback: Callback for rport READY, FAILED or LOGO
  */
 struct fc_rport_libfc_priv {
 	struct fc_lport		   *local_port;
@@ -203,32 +190,50 @@
 	u16			   flags;
 	#define FC_RP_FLAGS_REC_SUPPORTED	(1 << 0)
 	#define FC_RP_FLAGS_RETRY		(1 << 1)
+	unsigned int	           e_d_tov;
+	unsigned int	           r_a_tov;
+};
+
+/**
+ * struct fc_rport_priv - libfc rport and discovery info about a remote port
+ * @local_port: Fibre Channel host port instance
+ * @rport: transport remote port
+ * @kref: reference counter
+ * @rp_state: state tracks progress of PLOGI, PRLI, and RTV exchanges
+ * @ids: remote port identifiers and roles
+ * @flags: REC and RETRY supported flags
+ * @max_seq: maximum number of concurrent sequences
+ * @disc_id: discovery identifier
+ * @maxframe_size: maximum frame size
+ * @retries: retry count in current state
+ * @e_d_tov: error detect timeout value (in msec)
+ * @r_a_tov: resource allocation timeout value (in msec)
+ * @rp_mutex: mutex protects rport
+ * @retry_work:
+ * @event_callback: Callback for rport READY, FAILED or LOGO
+ */
+struct fc_rport_priv {
+	struct fc_lport		   *local_port;
+	struct fc_rport		   *rport;
+	struct kref		   kref;
+	enum fc_rport_state        rp_state;
+	struct fc_rport_identifiers ids;
+	u16			   flags;
 	u16		           max_seq;
+	u16			   disc_id;
+	u16			   maxframe_size;
 	unsigned int	           retries;
 	unsigned int	           e_d_tov;
 	unsigned int	           r_a_tov;
-	enum fc_rport_trans_state  trans_state;
 	struct mutex               rp_mutex;
 	struct delayed_work	   retry_work;
 	enum fc_rport_event        event;
 	struct fc_rport_operations *ops;
 	struct list_head           peers;
 	struct work_struct         event_work;
+	u32			   supported_classes;
 };
 
-#define PRIV_TO_RPORT(x)						\
-	(struct fc_rport *)((void *)x - sizeof(struct fc_rport));
-#define RPORT_TO_PRIV(x)						\
-	(struct fc_rport_libfc_priv *)((void *)x + sizeof(struct fc_rport));
-
-struct fc_rport *fc_rport_rogue_create(struct fc_disc_port *);
-
-static inline void fc_rport_set_name(struct fc_rport *rport, u64 wwpn, u64 wwnn)
-{
-	rport->node_name = wwnn;
-	rport->port_name = wwpn;
-}
-
 /*
  * fcoe stats structure
  */
@@ -344,6 +349,8 @@
  */
 
 struct fc_exch_mgr;
+struct fc_exch_mgr_anchor;
+extern u16	fc_cpu_mask;	/* cpu mask for possible cpus */
 
 /*
  * Sequence.
@@ -368,6 +375,7 @@
  */
 struct fc_exch {
 	struct fc_exch_mgr *em;		/* exchange manager */
+	struct fc_exch_pool *pool;	/* per cpu exches pool */
 	u32		state;		/* internal driver state */
 	u16		xid;		/* our exchange ID */
 	struct list_head	ex_list;	/* free or busy list linkage */
@@ -415,7 +423,7 @@
 	 * STATUS: OPTIONAL
 	 */
 	struct fc_seq *(*elsct_send)(struct fc_lport *lport,
-				     struct fc_rport *rport,
+				     u32 did,
 				     struct fc_frame *fp,
 				     unsigned int op,
 				     void (*resp)(struct fc_seq *,
@@ -519,25 +527,6 @@
 	void (*exch_done)(struct fc_seq *sp);
 
 	/*
-	 * Assigns a EM and a free XID for an new exchange and then
-	 * allocates a new exchange and sequence pair.
-	 * The fp can be used to determine free XID.
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	struct fc_exch *(*exch_get)(struct fc_lport *lp, struct fc_frame *fp);
-
-	/*
-	 * Release previously assigned XID by exch_get API.
-	 * The LLD may implement this if XID is assigned by LLD
-	 * in exch_get().
-	 *
-	 * STATUS: OPTIONAL
-	 */
-	void (*exch_put)(struct fc_lport *lp, struct fc_exch_mgr *mp,
-			 u16 ex_id);
-
-	/*
 	 * Start a new sequence on the same exchange/sequence tuple.
 	 *
 	 * STATUS: OPTIONAL
@@ -577,9 +566,11 @@
 	int (*lport_reset)(struct fc_lport *);
 
 	/*
-	 * Create a remote port
+	 * Create a remote port with a given port ID
+	 *
+	 * STATUS: OPTIONAL
 	 */
-	struct fc_rport *(*rport_create)(struct fc_disc_port *);
+	struct fc_rport_priv *(*rport_create)(struct fc_lport *, u32);
 
 	/*
 	 * Initiates the RP state machine. It is called from the LP module.
@@ -592,7 +583,7 @@
 	 *
 	 * STATUS: OPTIONAL
 	 */
-	int (*rport_login)(struct fc_rport *rport);
+	int (*rport_login)(struct fc_rport_priv *);
 
 	/*
 	 * Logoff, and remove the rport from the transport if
@@ -600,7 +591,7 @@
 	 *
 	 * STATUS: OPTIONAL
 	 */
-	int (*rport_logoff)(struct fc_rport *rport);
+	int (*rport_logoff)(struct fc_rport_priv *);
 
 	/*
 	 * Recieve a request from a remote port.
@@ -608,14 +599,20 @@
 	 * STATUS: OPTIONAL
 	 */
 	void (*rport_recv_req)(struct fc_seq *, struct fc_frame *,
-			       struct fc_rport *);
+			       struct fc_lport *);
 
 	/*
 	 * lookup an rport by it's port ID.
 	 *
 	 * STATUS: OPTIONAL
 	 */
-	struct fc_rport *(*rport_lookup)(const struct fc_lport *, u32);
+	struct fc_rport_priv *(*rport_lookup)(const struct fc_lport *, u32);
+
+	/*
+	 * Destroy an rport after final kref_put().
+	 * The argument is a pointer to the kref inside the fc_rport_priv.
+	 */
+	void (*rport_destroy)(struct kref *);
 
 	/*
 	 * Send a fcp cmd from fsp pkt.
@@ -681,18 +678,16 @@
 /* information used by the discovery layer */
 struct fc_disc {
 	unsigned char		retry_count;
-	unsigned char		delay;
 	unsigned char		pending;
 	unsigned char		requested;
 	unsigned short		seq_count;
 	unsigned char		buf_len;
-	enum fc_disc_event	event;
+	u16			disc_id;
 
 	void (*disc_callback)(struct fc_lport *,
 			      enum fc_disc_event);
 
 	struct list_head	 rports;
-	struct list_head	 rogue_rports;
 	struct fc_lport		*lport;
 	struct mutex		disc_mutex;
 	struct fc_gpn_ft_resp	partial_buf;	/* partial name buffer */
@@ -704,9 +699,9 @@
 
 	/* Associations */
 	struct Scsi_Host	*host;
-	struct fc_exch_mgr	*emp;
-	struct fc_rport		*dns_rp;
-	struct fc_rport		*ptp_rp;
+	struct list_head	ema_list;
+	struct fc_rport_priv	*dns_rp;
+	struct fc_rport_priv	*ptp_rp;
 	void			*scsi_priv;
 	struct fc_disc          disc;
 
@@ -960,6 +955,28 @@
 int fc_exch_init(struct fc_lport *lp);
 
 /*
+ * Adds Exchange Manager (EM) mp to lport.
+ *
+ * Adds specified mp to lport using struct fc_exch_mgr_anchor,
+ * the struct fc_exch_mgr_anchor allows same EM sharing by
+ * more than one lport with their specified match function,
+ * the match function is used in allocating exchange from
+ * added mp.
+ */
+struct fc_exch_mgr_anchor *fc_exch_mgr_add(struct fc_lport *lport,
+					   struct fc_exch_mgr *mp,
+					   bool (*match)(struct fc_frame *));
+
+/*
+ * Deletes Exchange Manager (EM) from lport by removing
+ * its anchor ema from lport.
+ *
+ * If removed anchor ema was the last user of its associated EM
+ * then also destroys associated EM.
+ */
+void fc_exch_mgr_del(struct fc_exch_mgr_anchor *ema);
+
+/*
  * Allocates an Exchange Manager (EM).
  *
  * The EM manages exchanges for their allocation and
@@ -974,27 +991,25 @@
  * a new exchange.
  * The LLD may choose to have multiple EMs,
  * e.g. one EM instance per CPU receive thread in LLD.
- * The LLD can use exch_get() of struct libfc_function_template
- * to specify XID for a new exchange within
- * a specified EM instance.
  *
- * The em_idx to uniquely identify an EM instance.
+ * Specified match function is used in allocating exchanges
+ * from newly allocated EM.
  */
 struct fc_exch_mgr *fc_exch_mgr_alloc(struct fc_lport *lp,
 				      enum fc_class class,
 				      u16 min_xid,
-				      u16 max_xid);
+				      u16 max_xid,
+				      bool (*match)(struct fc_frame *));
 
 /*
- * Free an exchange manager.
+ * Free all exchange managers of a lport.
  */
-void fc_exch_mgr_free(struct fc_exch_mgr *mp);
+void fc_exch_mgr_free(struct fc_lport *lport);
 
 /*
  * Receive a frame on specified local port and exchange manager.
  */
-void fc_exch_recv(struct fc_lport *lp, struct fc_exch_mgr *mp,
-		  struct fc_frame *fp);
+void fc_exch_recv(struct fc_lport *lp, struct fc_frame *fp);
 
 /*
  * This function is for exch_seq_send function pointer in
@@ -1036,28 +1051,20 @@
 void fc_exch_done(struct fc_seq *sp);
 
 /*
- * Assigns a EM and XID for a frame and then allocates
- * a new exchange and sequence pair.
- * The fp can be used to determine free XID.
- */
-struct fc_exch *fc_exch_get(struct fc_lport *lp, struct fc_frame *fp);
-
-/*
  * Allocate a new exchange and sequence pair.
- * if ex_id is zero then next free exchange id
- * from specified exchange manger mp will be assigned.
  */
-struct fc_exch *fc_exch_alloc(struct fc_exch_mgr *mp,
-			      struct fc_frame *fp, u16 ex_id);
+struct fc_exch *fc_exch_alloc(struct fc_lport *lport, struct fc_frame *fp);
 /*
  * Start a new sequence on the same exchange as the supplied sequence.
  */
 struct fc_seq *fc_seq_start_next(struct fc_seq *sp);
 
+
 /*
- * Reset an exchange manager, completing all sequences and exchanges.
- * If s_id is non-zero, reset only exchanges originating from that FID.
- * If d_id is non-zero, reset only exchanges sending to that FID.
+ * Reset all EMs of a lport, releasing its all sequences and
+ * exchanges. If sid is non-zero, then reset only exchanges
+ * we sourced from that FID. If did is non-zero, reset only
+ * exchanges destined to that FID.
  */
 void fc_exch_mgr_reset(struct fc_lport *, u32 s_id, u32 d_id);
 
@@ -1078,4 +1085,9 @@
 int fc_setup_rport(void);
 void fc_destroy_rport(void);
 
+/*
+ * Internal libfc functions.
+ */
+const char *fc_els_resp_type(struct fc_frame *);
+
 #endif /* _LIBFC_H_ */
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 61afeb5..887e57e 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -390,6 +390,7 @@
 extern int iscsi_conn_get_param(struct iscsi_cls_conn *cls_conn,
 				enum iscsi_param param, char *buf);
 extern void iscsi_suspend_tx(struct iscsi_conn *conn);
+extern void iscsi_suspend_queue(struct iscsi_conn *conn);
 extern void iscsi_conn_queue_work(struct iscsi_conn *conn);
 
 #define iscsi_conn_printk(prefix, _c, fmt, a...) \
@@ -415,6 +416,8 @@
 extern void iscsi_requeue_task(struct iscsi_task *task);
 extern void iscsi_put_task(struct iscsi_task *task);
 extern void __iscsi_get_task(struct iscsi_task *task);
+extern void iscsi_complete_scsi_task(struct iscsi_task *task,
+				     uint32_t exp_cmdsn, uint32_t max_cmdsn);
 
 /*
  * generic helpers
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 3f566af..9af48cb 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -187,10 +187,13 @@
 	void (*detach)(struct scsi_device *);
 	int (*activate)(struct scsi_device *);
 	int (*prep_fn)(struct scsi_device *, struct request *);
+	int (*set_params)(struct scsi_device *, const char *);
 };
 
 struct scsi_dh_data {
 	struct scsi_device_handler *scsi_dh;
+	struct scsi_device *sdev;
+	struct kref kref;
 	char buf[0];
 };
 
diff --git a/include/scsi/scsi_dh.h b/include/scsi/scsi_dh.h
index 33efce2..ff24074 100644
--- a/include/scsi/scsi_dh.h
+++ b/include/scsi/scsi_dh.h
@@ -60,6 +60,7 @@
 extern int scsi_dh_handler_exist(const char *);
 extern int scsi_dh_attach(struct request_queue *, const char *);
 extern void scsi_dh_detach(struct request_queue *);
+extern int scsi_dh_set_params(struct request_queue *, const char *);
 #else
 static inline int scsi_dh_activate(struct request_queue *req)
 {
@@ -77,4 +78,8 @@
 {
 	return;
 }
+static inline int scsi_dh_set_params(struct request_queue *req, const char *params)
+{
+	return -SCSI_DH_NOSYS;
+}
 #endif