Merge git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6

* git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi-misc-2.6:
  [SCSI] fix race in scsi_target_reap
  [SCSI] aacraid: Eliminate use after free
  [SCSI] arcmsr: Support HW reset for EH and polling scheme for scsi device
  [SCSI] bfa: fix system crash when reading sysfs fc_host statistics
  [SCSI] iscsi_tcp: remove sk_sleep check
  [SCSI] ipr: improve interrupt service routine performance
  [SCSI] ipr: set the data list length in the request control block
  [SCSI] ipr: fix a register read to use the correct address for 64 bit adapters
  [SCSI] ipr: include the resource path in the IOA status area structure
  [SCSI] ipr: implement fixes for 64 bit adapter support
  [SCSI] be2iscsi: correct return value in mgmt_invalidate_icds()
diff --git a/drivers/scsi/aacraid/commctrl.c b/drivers/scsi/aacraid/commctrl.c
index 9c0c911..1a5bf57 100644
--- a/drivers/scsi/aacraid/commctrl.c
+++ b/drivers/scsi/aacraid/commctrl.c
@@ -655,9 +655,9 @@
 				/* Does this really need to be GFP_DMA? */
 				p = kmalloc(usg->sg[i].count,GFP_KERNEL|__GFP_DMA);
 				if(!p) {
-					kfree (usg);
-					dprintk((KERN_DEBUG"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
+					dprintk((KERN_DEBUG "aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
 					  usg->sg[i].count,i,usg->count));
+					kfree(usg);
 					rcode = -ENOMEM;
 					goto cleanup;
 				}
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index ab646e5..ce5371b 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -48,7 +48,7 @@
 /*The limit of outstanding scsi command that firmware can handle*/
 #define ARCMSR_MAX_OUTSTANDING_CMD						256
 #define ARCMSR_MAX_FREECCB_NUM							320
-#define ARCMSR_DRIVER_VERSION		     "Driver Version 1.20.00.15 2008/02/27"
+#define ARCMSR_DRIVER_VERSION		     "Driver Version 1.20.00.15 2008/11/03"
 #define ARCMSR_SCSI_INITIATOR_ID						255
 #define ARCMSR_MAX_XFER_SECTORS							512
 #define ARCMSR_MAX_XFER_SECTORS_B						4096
@@ -110,6 +110,8 @@
 #define FUNCTION_SAY_HELLO			0x0807
 #define FUNCTION_SAY_GOODBYE			0x0808
 #define FUNCTION_FLUSH_ADAPTER_CACHE		0x0809
+#define FUNCTION_GET_FIRMWARE_STATUS			0x080A
+#define FUNCTION_HARDWARE_RESET			0x080B
 /* ARECA IO CONTROL CODE*/
 #define ARCMSR_MESSAGE_READ_RQBUFFER       \
 	ARECA_SATA_RAID | FUNCTION_READ_RQBUFFER
@@ -133,6 +135,7 @@
 #define ARCMSR_MESSAGE_RETURNCODE_OK              0x00000001
 #define ARCMSR_MESSAGE_RETURNCODE_ERROR           0x00000006
 #define ARCMSR_MESSAGE_RETURNCODE_3F              0x0000003F
+#define ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON	0x00000088
 /*
 *************************************************************
 **   structure for holding DMA address data
@@ -341,13 +344,13 @@
 	uint32_t	done_qbuffer[ARCMSR_MAX_HBB_POSTQUEUE];
 	uint32_t	postq_index;
 	uint32_t	doneq_index;
-	void		__iomem *drv2iop_doorbell_reg;
-	void		__iomem *drv2iop_doorbell_mask_reg;
-	void		__iomem *iop2drv_doorbell_reg;
-	void		__iomem *iop2drv_doorbell_mask_reg;
-	void		__iomem *msgcode_rwbuffer_reg;
-	void		__iomem *ioctl_wbuffer_reg;
-	void		__iomem *ioctl_rbuffer_reg;
+	uint32_t		__iomem *drv2iop_doorbell_reg;
+	uint32_t		__iomem *drv2iop_doorbell_mask_reg;
+	uint32_t		__iomem *iop2drv_doorbell_reg;
+	uint32_t		__iomem *iop2drv_doorbell_mask_reg;
+	uint32_t		__iomem *msgcode_rwbuffer_reg;
+	uint32_t		__iomem *ioctl_wbuffer_reg;
+	uint32_t		__iomem *ioctl_rbuffer_reg;
 };
 
 /*
@@ -375,6 +378,7 @@
 	/* message unit ATU inbound base address0 */
 
 	uint32_t			acb_flags;
+	uint8_t                   		adapter_index;
 	#define ACB_F_SCSISTOPADAPTER         	0x0001
 	#define ACB_F_MSG_STOP_BGRB     	0x0002
 	/* stop RAID background rebuild */
@@ -390,7 +394,7 @@
 	#define ACB_F_BUS_RESET               	0x0080
 	#define ACB_F_IOP_INITED              	0x0100
 	/* iop init */
-
+	#define ACB_F_FIRMWARE_TRAP           		0x0400
 	struct CommandControlBlock *			pccb_pool[ARCMSR_MAX_FREECCB_NUM];
 	/* used for memory free */
 	struct list_head		ccb_free_list;
@@ -423,12 +427,19 @@
 #define ARECA_RAID_GOOD               0xaa
 	uint32_t			num_resets;
 	uint32_t			num_aborts;
+	uint32_t			signature;
 	uint32_t			firm_request_len;
 	uint32_t			firm_numbers_queue;
 	uint32_t			firm_sdram_size;
 	uint32_t			firm_hd_channels;
 	char				firm_model[12];
 	char				firm_version[20];
+	char			device_map[20];			/*21,84-99*/
+	struct work_struct 		arcmsr_do_message_isr_bh;
+	struct timer_list		eternal_timer;
+	unsigned short		fw_state;
+	atomic_t 			rq_map_token;
+	int			ante_token_value;
 };/* HW_DEVICE_EXTENSION */
 /*
 *******************************************************************************
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index a4e04c5..07fdfe5 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -192,6 +192,7 @@
 	.attr = {
 		.name = "mu_read",
 		.mode = S_IRUSR ,
+		.owner = THIS_MODULE,
 	},
 	.size = 1032,
 	.read = arcmsr_sysfs_iop_message_read,
@@ -201,6 +202,7 @@
 	.attr = {
 		.name = "mu_write",
 		.mode = S_IWUSR,
+		.owner = THIS_MODULE,
 	},
 	.size = 1032,
 	.write = arcmsr_sysfs_iop_message_write,
@@ -210,6 +212,7 @@
 	.attr = {
 		.name = "mu_clear",
 		.mode = S_IWUSR,
+		.owner = THIS_MODULE,
 	},
 	.size = 1,
 	.write = arcmsr_sysfs_iop_message_clear,
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index ffbe219..ffa5479 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -72,8 +72,16 @@
 #include <scsi/scsicam.h>
 #include "arcmsr.h"
 
+#ifdef CONFIG_SCSI_ARCMSR_RESET
+	static int sleeptime = 20;
+	static int retrycount = 12;
+	module_param(sleeptime, int, S_IRUGO|S_IWUSR);
+	MODULE_PARM_DESC(sleeptime, "The waiting period for FW ready while bus reset");
+	module_param(retrycount, int, S_IRUGO|S_IWUSR);
+	MODULE_PARM_DESC(retrycount, "The retry count for FW ready while bus reset");
+#endif
 MODULE_AUTHOR("Erich Chen <support@areca.com.tw>");
-MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID HOST Adapter");
+MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/13xx/16xx) SATA/SAS RAID Host Bus Adapter");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
 
@@ -96,6 +104,13 @@
 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
 static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
+static void arcmsr_request_device_map(unsigned long pacb);
+static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb);
+static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb);
+static void arcmsr_message_isr_bh_fn(struct work_struct *work);
+static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode);
+static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
+
 static const char *arcmsr_info(struct Scsi_Host *);
 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
@@ -112,7 +127,7 @@
 
 static struct scsi_host_template arcmsr_scsi_host_template = {
 	.module			= THIS_MODULE,
-	.name			= "ARCMSR ARECA SATA/SAS RAID HOST Adapter"
+	.name			= "ARCMSR ARECA SATA/SAS RAID Host Bus Adapter"
 							ARCMSR_DRIVER_VERSION,
 	.info			= arcmsr_info,
 	.queuecommand		= arcmsr_queue_command,
@@ -128,16 +143,6 @@
 	.use_clustering		= ENABLE_CLUSTERING,
 	.shost_attrs		= arcmsr_host_attrs,
 };
-#ifdef CONFIG_SCSI_ARCMSR_AER
-static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev);
-static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
-						pci_channel_state_t state);
-
-static struct pci_error_handlers arcmsr_pci_error_handlers = {
-	.error_detected		= arcmsr_pci_error_detected,
-	.slot_reset		= arcmsr_pci_slot_reset,
-};
-#endif
 static struct pci_device_id arcmsr_device_id_table[] = {
 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
 	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
@@ -166,9 +171,6 @@
 	.probe			= arcmsr_probe,
 	.remove			= arcmsr_remove,
 	.shutdown		= arcmsr_shutdown,
-	#ifdef CONFIG_SCSI_ARCMSR_AER
-	.err_handler		= &arcmsr_pci_error_handlers,
-	#endif
 };
 
 static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id)
@@ -236,10 +238,9 @@
 		void *dma_coherent;
 		dma_addr_t dma_coherent_handle, dma_addr;
 		struct CommandControlBlock *ccb_tmp;
-		uint32_t intmask_org;
 		int i, j;
 
-		acb->pmuA = pci_ioremap_bar(pdev, 0);
+		acb->pmuA = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
 		if (!acb->pmuA) {
 			printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n",
 							acb->host->host_no);
@@ -281,12 +282,6 @@
 		for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
 			for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
 				acb->devstate[i][j] = ARECA_RAID_GONE;
-
-		/*
-		** here we need to tell iop 331 our ccb_tmp.HighPart
-		** if ccb_tmp.HighPart is not zero
-		*/
-		intmask_org = arcmsr_disable_outbound_ints(acb);
 		}
 		break;
 
@@ -297,7 +292,6 @@
 		void __iomem *mem_base0, *mem_base1;
 		void *dma_coherent;
 		dma_addr_t dma_coherent_handle, dma_addr;
-		uint32_t intmask_org;
 		struct CommandControlBlock *ccb_tmp;
 		int i, j;
 
@@ -333,11 +327,13 @@
 		reg = (struct MessageUnit_B *)(dma_coherent +
 		ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock));
 		acb->pmuB = reg;
-		mem_base0 = pci_ioremap_bar(pdev, 0);
+		mem_base0 = ioremap(pci_resource_start(pdev, 0),
+					pci_resource_len(pdev, 0));
 		if (!mem_base0)
 			goto out;
 
-		mem_base1 = pci_ioremap_bar(pdev, 2);
+		mem_base1 = ioremap(pci_resource_start(pdev, 2),
+					pci_resource_len(pdev, 2));
 		if (!mem_base1) {
 			iounmap(mem_base0);
 			goto out;
@@ -357,12 +353,6 @@
 		for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
 			for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
 				acb->devstate[i][j] = ARECA_RAID_GOOD;
-
-		/*
-		** here we need to tell iop 331 our ccb_tmp.HighPart
-		** if ccb_tmp.HighPart is not zero
-		*/
-		intmask_org = arcmsr_disable_outbound_ints(acb);
 		}
 		break;
 	}
@@ -374,6 +364,88 @@
 		sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
 	return -ENOMEM;
 }
+static void arcmsr_message_isr_bh_fn(struct work_struct *work)
+{
+	struct AdapterControlBlock *acb = container_of(work, struct AdapterControlBlock, arcmsr_do_message_isr_bh);
+
+	switch (acb->adapter_type) {
+		case ACB_ADAPTER_TYPE_A: {
+
+			struct MessageUnit_A __iomem *reg  = acb->pmuA;
+			char *acb_dev_map = (char *)acb->device_map;
+			uint32_t __iomem *signature = (uint32_t __iomem *) (&reg->message_rwbuffer[0]);
+			char __iomem *devicemap = (char __iomem *) (&reg->message_rwbuffer[21]);
+			int target, lun;
+			struct scsi_device *psdev;
+			char diff;
+
+			atomic_inc(&acb->rq_map_token);
+			if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
+				for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
+					diff = (*acb_dev_map)^readb(devicemap);
+					if (diff != 0) {
+						char temp;
+						*acb_dev_map = readb(devicemap);
+						temp = *acb_dev_map;
+						for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
+							if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
+								scsi_add_device(acb->host, 0, target, lun);
+							} else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
+								psdev = scsi_device_lookup(acb->host, 0, target, lun);
+								if (psdev != NULL) {
+									scsi_remove_device(psdev);
+									scsi_device_put(psdev);
+								}
+							}
+							temp >>= 1;
+							diff >>= 1;
+						}
+					}
+					devicemap++;
+					acb_dev_map++;
+				}
+			}
+			break;
+		}
+
+		case ACB_ADAPTER_TYPE_B: {
+			struct MessageUnit_B *reg  = acb->pmuB;
+			char *acb_dev_map = (char *)acb->device_map;
+			uint32_t __iomem *signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer_reg[0]);
+			char __iomem *devicemap = (char __iomem *)(&reg->msgcode_rwbuffer_reg[21]);
+			int target, lun;
+			struct scsi_device *psdev;
+			char diff;
+
+			atomic_inc(&acb->rq_map_token);
+			if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
+				for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
+					diff = (*acb_dev_map)^readb(devicemap);
+					if (diff != 0) {
+						char temp;
+						*acb_dev_map = readb(devicemap);
+						temp = *acb_dev_map;
+						for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
+							if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
+								scsi_add_device(acb->host, 0, target, lun);
+							} else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
+								psdev = scsi_device_lookup(acb->host, 0, target, lun);
+								if (psdev != NULL) {
+									scsi_remove_device(psdev);
+									scsi_device_put(psdev);
+								}
+							}
+							temp >>= 1;
+							diff >>= 1;
+						}
+					}
+					devicemap++;
+					acb_dev_map++;
+				}
+			}
+		}
+	}
+}
 
 static int arcmsr_probe(struct pci_dev *pdev,
 	const struct pci_device_id *id)
@@ -432,17 +504,17 @@
 			   ACB_F_MESSAGE_WQBUFFER_READED);
 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
 	INIT_LIST_HEAD(&acb->ccb_free_list);
-
+	INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
 	error = arcmsr_alloc_ccb_pool(acb);
 	if (error)
 		goto out_release_regions;
 
+	arcmsr_iop_init(acb);
 	error = request_irq(pdev->irq, arcmsr_do_interrupt,
 			    IRQF_SHARED, "arcmsr", acb);
 	if (error)
 		goto out_free_ccb_pool;
 
-	arcmsr_iop_init(acb);
 	pci_set_drvdata(pdev, host);
 	if (strncmp(acb->firm_version, "V1.42", 5) >= 0)
 		host->max_sectors= ARCMSR_MAX_XFER_SECTORS_B;
@@ -459,6 +531,14 @@
 	#ifdef CONFIG_SCSI_ARCMSR_AER
 	pci_enable_pcie_error_reporting(pdev);
 	#endif
+	atomic_set(&acb->rq_map_token, 16);
+	acb->fw_state = true;
+	init_timer(&acb->eternal_timer);
+	acb->eternal_timer.expires = jiffies + msecs_to_jiffies(10*HZ);
+	acb->eternal_timer.data = (unsigned long) acb;
+	acb->eternal_timer.function = &arcmsr_request_device_map;
+	add_timer(&acb->eternal_timer);
+
 	return 0;
  out_free_sysfs:
  out_free_irq:
@@ -518,40 +598,48 @@
 	return 0xff;
 }
 
-static void arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
+static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
 
 	writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
-	if (arcmsr_hba_wait_msgint_ready(acb))
+	if (arcmsr_hba_wait_msgint_ready(acb)) {
 		printk(KERN_NOTICE
 			"arcmsr%d: wait 'abort all outstanding command' timeout \n"
 			, acb->host->host_no);
+		return 0xff;
+	}
+	return 0x00;
 }
 
-static void arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
+static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_B *reg = acb->pmuB;
 
 	writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell_reg);
-	if (arcmsr_hbb_wait_msgint_ready(acb))
+	if (arcmsr_hbb_wait_msgint_ready(acb)) {
 		printk(KERN_NOTICE
 			"arcmsr%d: wait 'abort all outstanding command' timeout \n"
 			, acb->host->host_no);
+		return 0xff;
+	}
+	return 0x00;
 }
 
-static void arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
+static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
 {
+	uint8_t rtnval = 0;
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
-		arcmsr_abort_hba_allcmd(acb);
+		rtnval = arcmsr_abort_hba_allcmd(acb);
 		}
 		break;
 
 	case ACB_ADAPTER_TYPE_B: {
-		arcmsr_abort_hbb_allcmd(acb);
+		rtnval = arcmsr_abort_hbb_allcmd(acb);
 		}
 	}
+	return rtnval;
 }
 
 static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
@@ -649,8 +737,7 @@
 
 	case ACB_ADAPTER_TYPE_A : {
 		struct MessageUnit_A __iomem *reg = acb->pmuA;
-		orig_mask = readl(&reg->outbound_intmask)|\
-				ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE;
+		orig_mask = readl(&reg->outbound_intmask);
 		writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \
 						&reg->outbound_intmask);
 		}
@@ -658,8 +745,7 @@
 
 	case ACB_ADAPTER_TYPE_B : {
 		struct MessageUnit_B *reg = acb->pmuB;
-		orig_mask = readl(reg->iop2drv_doorbell_mask_reg) & \
-					(~ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
+		orig_mask = readl(reg->iop2drv_doorbell_mask_reg);
 		writel(0, reg->iop2drv_doorbell_mask_reg);
 		}
 		break;
@@ -795,12 +881,13 @@
 	struct AdapterControlBlock *acb =
 		(struct AdapterControlBlock *) host->hostdata;
 	int poll_count = 0;
-
 	arcmsr_free_sysfs_attr(acb);
 	scsi_remove_host(host);
+	flush_scheduled_work();
+	del_timer_sync(&acb->eternal_timer);
+	arcmsr_disable_outbound_ints(acb);
 	arcmsr_stop_adapter_bgrb(acb);
 	arcmsr_flush_adapter_cache(acb);
-	arcmsr_disable_outbound_ints(acb);
 	acb->acb_flags |= ACB_F_SCSISTOPADAPTER;
 	acb->acb_flags &= ~ACB_F_IOP_INITED;
 
@@ -841,7 +928,9 @@
 	struct Scsi_Host *host = pci_get_drvdata(pdev);
 	struct AdapterControlBlock *acb =
 		(struct AdapterControlBlock *)host->hostdata;
-
+	del_timer_sync(&acb->eternal_timer);
+	arcmsr_disable_outbound_ints(acb);
+	flush_scheduled_work();
 	arcmsr_stop_adapter_bgrb(acb);
 	arcmsr_flush_adapter_cache(acb);
 }
@@ -861,7 +950,7 @@
 module_init(arcmsr_module_init);
 module_exit(arcmsr_module_exit);
 
-static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, \
+static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
 						u32 intmask_org)
 {
 	u32 mask;
@@ -871,7 +960,8 @@
 	case ACB_ADAPTER_TYPE_A : {
 		struct MessageUnit_A __iomem *reg = acb->pmuA;
 		mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE |
-			     ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE);
+			     ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE|
+			     ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE);
 		writel(mask, &reg->outbound_intmask);
 		acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff;
 		}
@@ -879,8 +969,10 @@
 
 	case ACB_ADAPTER_TYPE_B : {
 		struct MessageUnit_B *reg = acb->pmuB;
-		mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | \
-			ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE);
+		mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK |
+			ARCMSR_IOP2DRV_DATA_READ_OK |
+			ARCMSR_IOP2DRV_CDB_DONE |
+			ARCMSR_IOP2DRV_MESSAGE_CMD_DONE);
 		writel(mask, reg->iop2drv_doorbell_mask_reg);
 		acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f;
 		}
@@ -1048,8 +1140,8 @@
 	}
 	case ACB_ADAPTER_TYPE_B: {
 		struct MessageUnit_B *reg = acb->pmuB;
-		iounmap(reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
-		iounmap(reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
+		iounmap((u8 *)reg->drv2iop_doorbell_reg - ARCMSR_DRV2IOP_DOORBELL);
+		iounmap((u8 *)reg->ioctl_wbuffer_reg - ARCMSR_IOCTL_WBUFFER);
 		dma_free_coherent(&acb->pdev->dev,
 		(ARCMSR_MAX_FREECCB_NUM * sizeof(struct CommandControlBlock) + 0x20 +
 		sizeof(struct MessageUnit_B)), acb->dma_coherent, acb->dma_coherent_handle);
@@ -1249,13 +1341,36 @@
 		reg->doneq_index = index;
 	}
 }
+/*
+**********************************************************************************
+** Handle a message interrupt
+**
+** The only message interrupt we expect is in response to a query for the current adapter config.
+** We want this in order to compare the drivemap so that we can detect newly-attached drives.
+**********************************************************************************
+*/
+static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb)
+{
+	struct MessageUnit_A *reg  = acb->pmuA;
 
+	/*clear interrupt and message state*/
+	writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
+	schedule_work(&acb->arcmsr_do_message_isr_bh);
+}
+static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb)
+{
+	struct MessageUnit_B *reg  = acb->pmuB;
+
+	/*clear interrupt and message state*/
+	writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell_reg);
+	schedule_work(&acb->arcmsr_do_message_isr_bh);
+}
 static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
 {
 	uint32_t outbound_intstatus;
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
 
-	outbound_intstatus = readl(&reg->outbound_intstatus) & \
+	outbound_intstatus = readl(&reg->outbound_intstatus) &
 							acb->outbound_int_enable;
 	if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))	{
 		return 1;
@@ -1267,6 +1382,10 @@
 	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
 		arcmsr_hba_postqueue_isr(acb);
 	}
+	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) 	{
+		/* messenger of "driver to iop commands" */
+		arcmsr_hba_message_isr(acb);
+	}
 	return 0;
 }
 
@@ -1275,13 +1394,14 @@
 	uint32_t outbound_doorbell;
 	struct MessageUnit_B *reg = acb->pmuB;
 
-	outbound_doorbell = readl(reg->iop2drv_doorbell_reg) & \
+	outbound_doorbell = readl(reg->iop2drv_doorbell_reg) &
 							acb->outbound_int_enable;
 	if (!outbound_doorbell)
 		return 1;
 
 	writel(~outbound_doorbell, reg->iop2drv_doorbell_reg);
-	/*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/
+	/*in case the last action of doorbell interrupt clearance is cached,
+	this action can push HW to write down the clear bit*/
 	readl(reg->iop2drv_doorbell_reg);
 	writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell_reg);
 	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) 	{
@@ -1293,6 +1413,10 @@
 	if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
 		arcmsr_hbb_postqueue_isr(acb);
 	}
+	if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
+		/* messenger of "driver to iop commands" */
+		arcmsr_hbb_message_isr(acb);
+	}
 
 	return 0;
 }
@@ -1360,7 +1484,7 @@
 	}
 }
 
-static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, \
+static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
 					struct scsi_cmnd *cmd)
 {
 	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
@@ -1398,6 +1522,13 @@
 			retvalue = ARCMSR_MESSAGE_FAIL;
 			goto message_out;
 		}
+
+		if (!acb->fw_state) {
+			pcmdmessagefld->cmdmessage.ReturnCode =
+			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+			goto message_out;
+		}
+
 		ptmpQbuffer = ver_addr;
 		while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
 			&& (allxfer_len < 1031)) {
@@ -1444,6 +1575,12 @@
 			retvalue = ARCMSR_MESSAGE_FAIL;
 			goto message_out;
 		}
+		if (!acb->fw_state) {
+			pcmdmessagefld->cmdmessage.ReturnCode =
+			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+			goto message_out;
+		}
+
 		ptmpuserbuffer = ver_addr;
 		user_len = pcmdmessagefld->cmdmessage.Length;
 		memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
@@ -1496,6 +1633,11 @@
 
 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
 		uint8_t *pQbuffer = acb->rqbuffer;
+		if (!acb->fw_state) {
+			pcmdmessagefld->cmdmessage.ReturnCode =
+			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+			goto message_out;
+		}
 
 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
@@ -1511,6 +1653,11 @@
 
 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
 		uint8_t *pQbuffer = acb->wqbuffer;
+		if (!acb->fw_state) {
+			pcmdmessagefld->cmdmessage.ReturnCode =
+			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+			goto message_out;
+		}
 
 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
@@ -1529,6 +1676,11 @@
 
 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
 		uint8_t *pQbuffer;
+		if (!acb->fw_state) {
+			pcmdmessagefld->cmdmessage.ReturnCode =
+			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+			goto message_out;
+		}
 
 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
@@ -1551,13 +1703,22 @@
 		break;
 
 	case ARCMSR_MESSAGE_RETURN_CODE_3F: {
+		if (!acb->fw_state) {
+			pcmdmessagefld->cmdmessage.ReturnCode =
+			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+			goto message_out;
+		}
 		pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F;
 		}
 		break;
 
 	case ARCMSR_MESSAGE_SAY_HELLO: {
 		int8_t *hello_string = "Hello! I am ARCMSR";
-
+		if (!acb->fw_state) {
+			pcmdmessagefld->cmdmessage.ReturnCode =
+			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+			goto message_out;
+		}
 		memcpy(pcmdmessagefld->messagedatabuffer, hello_string
 			, (int16_t)strlen(hello_string));
 		pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
@@ -1565,10 +1726,20 @@
 		break;
 
 	case ARCMSR_MESSAGE_SAY_GOODBYE:
+		if (!acb->fw_state) {
+			pcmdmessagefld->cmdmessage.ReturnCode =
+			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+			goto message_out;
+		}
 		arcmsr_iop_parking(acb);
 		break;
 
 	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
+		if (!acb->fw_state) {
+			pcmdmessagefld->cmdmessage.ReturnCode =
+			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+			goto message_out;
+		}
 		arcmsr_flush_adapter_cache(acb);
 		break;
 
@@ -1651,16 +1822,57 @@
 	struct CommandControlBlock *ccb;
 	int target = cmd->device->id;
 	int lun = cmd->device->lun;
-
+	uint8_t scsicmd = cmd->cmnd[0];
 	cmd->scsi_done = done;
 	cmd->host_scribble = NULL;
 	cmd->result = 0;
+
+	if ((scsicmd == SYNCHRONIZE_CACHE) || (scsicmd == SEND_DIAGNOSTIC)) {
+		if (acb->devstate[target][lun] == ARECA_RAID_GONE) {
+			cmd->result = (DID_NO_CONNECT << 16);
+		}
+		cmd->scsi_done(cmd);
+		return 0;
+	}
+
 	if (acb->acb_flags & ACB_F_BUS_RESET) {
-		printk(KERN_NOTICE "arcmsr%d: bus reset"
-			" and return busy \n"
-			, acb->host->host_no);
+		switch (acb->adapter_type) {
+			case ACB_ADAPTER_TYPE_A: {
+				struct MessageUnit_A __iomem *reg = acb->pmuA;
+				uint32_t intmask_org, outbound_doorbell;
+
+				if ((readl(&reg->outbound_msgaddr1) &
+					ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
+					printk(KERN_NOTICE "arcmsr%d: bus reset and return busy\n",
+						acb->host->host_no);
 		return SCSI_MLQUEUE_HOST_BUSY;
 	}
+
+				acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP;
+				printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok\n",
+					acb->host->host_no);
+				/* disable all outbound interrupt */
+				intmask_org = arcmsr_disable_outbound_ints(acb);
+				arcmsr_get_firmware_spec(acb, 1);
+				/*start background rebuild*/
+				arcmsr_start_adapter_bgrb(acb);
+				/* clear Qbuffer if door bell ringed */
+				outbound_doorbell = readl(&reg->outbound_doorbell);
+				/*clear interrupt */
+				writel(outbound_doorbell, &reg->outbound_doorbell);
+				writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK,
+					&reg->inbound_doorbell);
+				/* enable outbound Post Queue,outbound doorbell Interrupt */
+				arcmsr_enable_outbound_ints(acb, intmask_org);
+				acb->acb_flags |= ACB_F_IOP_INITED;
+				acb->acb_flags &= ~ACB_F_BUS_RESET;
+			}
+			break;
+			case ACB_ADAPTER_TYPE_B: {
+			}
+		}
+	}
+
 	if (target == 16) {
 		/* virtual device for iop message transfer */
 		arcmsr_handle_virtual_command(acb, cmd);
@@ -1699,21 +1911,25 @@
 	return 0;
 }
 
-static void arcmsr_get_hba_config(struct AdapterControlBlock *acb)
+static void *arcmsr_get_hba_config(struct AdapterControlBlock *acb, int mode)
 {
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
 	char *acb_firm_model = acb->firm_model;
 	char *acb_firm_version = acb->firm_version;
+	char *acb_device_map = acb->device_map;
 	char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]);
 	char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]);
+	char __iomem *iop_device_map = (char __iomem *) (&reg->message_rwbuffer[21]);
 	int count;
 
 	writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
 	if (arcmsr_hba_wait_msgint_ready(acb)) {
 		printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
 			miscellaneous data' timeout \n", acb->host->host_no);
+		return NULL;
 	}
 
+	if (mode == 1) {
 	count = 8;
 	while (count) {
 		*acb_firm_model = readb(iop_firm_model);
@@ -1730,34 +1946,48 @@
 		count--;
 	}
 
+		count = 16;
+		while (count) {
+			*acb_device_map = readb(iop_device_map);
+			acb_device_map++;
+			iop_device_map++;
+			count--;
+		}
+
 	printk(KERN_INFO 	"ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n"
 		, acb->host->host_no
 		, acb->firm_version);
-
+		acb->signature = readl(&reg->message_rwbuffer[0]);
 	acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
 	acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
 	acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]);
 	acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]);
 }
-
-static void arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
+	return reg->message_rwbuffer;
+}
+static void __iomem *arcmsr_get_hbb_config(struct AdapterControlBlock *acb, int mode)
 {
 	struct MessageUnit_B *reg = acb->pmuB;
 	uint32_t __iomem *lrwbuffer = reg->msgcode_rwbuffer_reg;
 	char *acb_firm_model = acb->firm_model;
 	char *acb_firm_version = acb->firm_version;
+	char *acb_device_map = acb->device_map;
 	char __iomem *iop_firm_model = (char __iomem *)(&lrwbuffer[15]);
 	/*firm_model,15,60-67*/
 	char __iomem *iop_firm_version = (char __iomem *)(&lrwbuffer[17]);
 	/*firm_version,17,68-83*/
+	char __iomem *iop_device_map = (char __iomem *) (&lrwbuffer[21]);
+	/*firm_version,21,84-99*/
 	int count;
 
 	writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
 	if (arcmsr_hbb_wait_msgint_ready(acb)) {
 		printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
 			miscellaneous data' timeout \n", acb->host->host_no);
+		return NULL;
 	}
 
+	if (mode == 1) {
 	count = 8;
 	while (count)
 	{
@@ -1776,11 +2006,20 @@
 		count--;
 	}
 
+		count = 16;
+		while (count) {
+			*acb_device_map = readb(iop_device_map);
+			acb_device_map++;
+			iop_device_map++;
+			count--;
+		}
+
 	printk(KERN_INFO "ARECA RAID ADAPTER%d: FIRMWARE VERSION %s \n",
 			acb->host->host_no,
 			acb->firm_version);
 
-	lrwbuffer++;
+		acb->signature = readl(lrwbuffer++);
+		/*firm_signature,1,00-03*/
 	acb->firm_request_len = readl(lrwbuffer++);
 	/*firm_request_len,1,04-07*/
 	acb->firm_numbers_queue = readl(lrwbuffer++);
@@ -1790,20 +2029,23 @@
 	acb->firm_hd_channels = readl(lrwbuffer);
 	/*firm_ide_channels,4,16-19*/
 }
-
-static void arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
+	return reg->msgcode_rwbuffer_reg;
+}
+static void *arcmsr_get_firmware_spec(struct AdapterControlBlock *acb, int mode)
 {
+	void *rtnval = 0;
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
-		arcmsr_get_hba_config(acb);
+		rtnval = arcmsr_get_hba_config(acb, mode);
 		}
 		break;
 
 	case ACB_ADAPTER_TYPE_B: {
-		arcmsr_get_hbb_config(acb);
+		rtnval = arcmsr_get_hbb_config(acb, mode);
 		}
 		break;
 	}
+	return rtnval;
 }
 
 static void arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
@@ -2043,6 +2285,66 @@
 	}
 }
 
+static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
+{
+	struct MessageUnit_A __iomem *reg = acb->pmuA;
+
+	if (unlikely(atomic_read(&acb->rq_map_token) == 0)) {
+		acb->fw_state = false;
+	} else {
+	/*to prevent rq_map_token from changing by other interrupt, then
+	avoid the dead-lock*/
+		acb->fw_state = true;
+		atomic_dec(&acb->rq_map_token);
+		if (!(acb->fw_state) ||
+			(acb->ante_token_value == atomic_read(&acb->rq_map_token))) {
+			atomic_set(&acb->rq_map_token, 16);
+		}
+		acb->ante_token_value = atomic_read(&acb->rq_map_token);
+		writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
+	}
+	mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000));
+	return;
+}
+
+static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
+{
+	struct MessageUnit_B __iomem *reg = acb->pmuB;
+
+	if (unlikely(atomic_read(&acb->rq_map_token) == 0)) {
+		acb->fw_state = false;
+	} else {
+	/*to prevent rq_map_token from changing by other interrupt, then
+	avoid the dead-lock*/
+		acb->fw_state = true;
+		atomic_dec(&acb->rq_map_token);
+		if (!(acb->fw_state) ||
+			(acb->ante_token_value == atomic_read(&acb->rq_map_token))) {
+			atomic_set(&acb->rq_map_token, 16);
+		}
+		acb->ante_token_value = atomic_read(&acb->rq_map_token);
+		writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell_reg);
+	}
+	mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6000));
+	return;
+}
+
+static void arcmsr_request_device_map(unsigned long pacb)
+{
+	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
+
+	switch (acb->adapter_type) {
+		case ACB_ADAPTER_TYPE_A: {
+			arcmsr_request_hba_device_map(acb);
+		}
+		break;
+		case ACB_ADAPTER_TYPE_B: {
+			arcmsr_request_hbb_device_map(acb);
+		}
+		break;
+	}
+}
+
 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -2121,6 +2423,60 @@
 	return;
 }
 
+static void arcmsr_hardware_reset(struct AdapterControlBlock *acb)
+{
+	uint8_t value[64];
+	int i;
+
+	/* backup pci config data */
+	for (i = 0; i < 64; i++) {
+		pci_read_config_byte(acb->pdev, i, &value[i]);
+	}
+	/* hardware reset signal */
+	pci_write_config_byte(acb->pdev, 0x84, 0x20);
+	msleep(1000);
+	/* write back pci config data */
+	for (i = 0; i < 64; i++) {
+		pci_write_config_byte(acb->pdev, i, value[i]);
+	}
+	msleep(1000);
+	return;
+}
+/*
+****************************************************************************
+****************************************************************************
+*/
+#ifdef CONFIG_SCSI_ARCMSR_RESET
+	int arcmsr_sleep_for_bus_reset(struct scsi_cmnd *cmd)
+	{
+			struct Scsi_Host *shost = NULL;
+			spinlock_t *host_lock = NULL;
+			int i, isleep;
+
+			shost = cmd->device->host;
+			host_lock = shost->host_lock;
+
+			printk(KERN_NOTICE "Host %d bus reset over, sleep %d seconds (busy %d, can queue %d) ...........\n",
+					shost->host_no, sleeptime, shost->host_busy, shost->can_queue);
+			isleep = sleeptime / 10;
+			spin_unlock_irq(host_lock);
+			if (isleep > 0) {
+				for (i = 0; i < isleep; i++) {
+					msleep(10000);
+					printk(KERN_NOTICE "^%d^\n", i);
+				}
+			}
+
+			isleep = sleeptime % 10;
+			if (isleep > 0) {
+				msleep(isleep * 1000);
+				printk(KERN_NOTICE "^v^\n");
+			}
+			spin_lock_irq(host_lock);
+			printk(KERN_NOTICE "***** wake up *****\n");
+			return 0;
+	}
+#endif
 static void arcmsr_iop_init(struct AdapterControlBlock *acb)
 {
 	uint32_t intmask_org;
@@ -2129,7 +2485,7 @@
        intmask_org = arcmsr_disable_outbound_ints(acb);
 	arcmsr_wait_firmware_ready(acb);
 	arcmsr_iop_confirm(acb);
-	arcmsr_get_firmware_spec(acb);
+	arcmsr_get_firmware_spec(acb, 1);
 	/*start background rebuild*/
 	arcmsr_start_adapter_bgrb(acb);
 	/* empty doorbell Qbuffer if door bell ringed */
@@ -2140,51 +2496,110 @@
 	acb->acb_flags |= ACB_F_IOP_INITED;
 }
 
-static void arcmsr_iop_reset(struct AdapterControlBlock *acb)
+static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb)
 {
 	struct CommandControlBlock *ccb;
 	uint32_t intmask_org;
+	uint8_t rtnval = 0x00;
 	int i = 0;
 
 	if (atomic_read(&acb->ccboutstandingcount) != 0) {
-		/* talk to iop 331 outstanding command aborted */
-		arcmsr_abort_allcmd(acb);
-
-		/* wait for 3 sec for all command aborted*/
-		ssleep(3);
-
 		/* disable all outbound interrupt */
 		intmask_org = arcmsr_disable_outbound_ints(acb);
+		/* talk to iop 331 outstanding command aborted */
+		rtnval = arcmsr_abort_allcmd(acb);
+		/* wait for 3 sec for all command aborted*/
+		ssleep(3);
 		/* clear all outbound posted Q */
 		arcmsr_done4abort_postqueue(acb);
 		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
 			ccb = acb->pccb_pool[i];
 			if (ccb->startdone == ARCMSR_CCB_START) {
-				ccb->startdone = ARCMSR_CCB_ABORTED;
 				arcmsr_ccb_complete(ccb, 1);
 			}
 		}
+		atomic_set(&acb->ccboutstandingcount, 0);
 		/* enable all outbound interrupt */
 		arcmsr_enable_outbound_ints(acb, intmask_org);
+		return rtnval;
 	}
+	return rtnval;
 }
 
 static int arcmsr_bus_reset(struct scsi_cmnd *cmd)
 {
 	struct AdapterControlBlock *acb =
 		(struct AdapterControlBlock *)cmd->device->host->hostdata;
-	int i;
+	int retry = 0;
 
-	acb->num_resets++;
+	if (acb->acb_flags & ACB_F_BUS_RESET)
+		return SUCCESS;
+
+	printk(KERN_NOTICE "arcmsr%d: bus reset ..... \n", acb->adapter_index);
 	acb->acb_flags |= ACB_F_BUS_RESET;
-	for (i = 0; i < 400; i++) {
-		if (!atomic_read(&acb->ccboutstandingcount))
-			break;
-		arcmsr_interrupt(acb);/* FIXME: need spinlock */
-		msleep(25);
+	acb->num_resets++;
+	while (atomic_read(&acb->ccboutstandingcount) != 0 && retry < 4) {
+		arcmsr_interrupt(acb);
+		retry++;
 	}
-	arcmsr_iop_reset(acb);
+
+	if (arcmsr_iop_reset(acb)) {
+		switch (acb->adapter_type) {
+		case ACB_ADAPTER_TYPE_A: {
+			printk(KERN_NOTICE "arcmsr%d: do hardware bus reset, num_resets = %d num_aborts = %d \n",
+				acb->adapter_index, acb->num_resets, acb->num_aborts);
+			arcmsr_hardware_reset(acb);
+			acb->acb_flags |= ACB_F_FIRMWARE_TRAP;
+			acb->acb_flags &= ~ACB_F_IOP_INITED;
+			#ifdef CONFIG_SCSI_ARCMSR_RESET
+			struct MessageUnit_A __iomem *reg = acb->pmuA;
+			uint32_t intmask_org, outbound_doorbell;
+			int retry_count = 0;
+sleep_again:
+			arcmsr_sleep_for_bus_reset(cmd);
+			if ((readl(&reg->outbound_msgaddr1) &
+			ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) {
+			printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry=%d \n",
+			acb->host->host_no, retry_count);
+			if (retry_count > retrycount) {
+				printk(KERN_NOTICE "arcmsr%d: hardware bus reset and return busy, retry aborted \n",
+				acb->host->host_no);
+			return SUCCESS;
+			}
+			retry_count++;
+			goto sleep_again;
+			}
+			acb->acb_flags &= ~ACB_F_FIRMWARE_TRAP;
+			acb->acb_flags |= ACB_F_IOP_INITED;
+			acb->acb_flags &= ~ACB_F_BUS_RESET;
+			printk(KERN_NOTICE "arcmsr%d: hardware bus reset and reset ok \n",
+				acb->host->host_no);
+			/* disable all outbound interrupt */
+			intmask_org = arcmsr_disable_outbound_ints(acb);
+			arcmsr_get_firmware_spec(acb, 1);
+			/*start	background rebuild*/
+			arcmsr_start_adapter_bgrb(acb);
+			/* clear Qbuffer if door bell ringed */
+			outbound_doorbell = readl(&reg->outbound_doorbell);
+			writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */
+			writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell);
+			/* enable outbound Post Queue,outbound doorbell Interrupt */
+			arcmsr_enable_outbound_ints(acb, intmask_org);
+			atomic_set(&acb->rq_map_token, 16);
+			init_timer(&acb->eternal_timer);
+			acb->eternal_timer.expires = jiffies + msecs_to_jiffies(20*HZ);
+			acb->eternal_timer.data = (unsigned long) acb;
+			acb->eternal_timer.function = &arcmsr_request_device_map;
+			add_timer(&acb->eternal_timer);
+			#endif
+		}
+			break;
+		case ACB_ADAPTER_TYPE_B: {
+	}
+		}
+	} else {
 	acb->acb_flags &= ~ACB_F_BUS_RESET;
+	}
 	return SUCCESS;
 }
 
@@ -2277,98 +2692,3 @@
 			ARCMSR_DRIVER_VERSION);
 	return buf;
 }
-#ifdef CONFIG_SCSI_ARCMSR_AER
-static pci_ers_result_t arcmsr_pci_slot_reset(struct pci_dev *pdev)
-{
-	struct Scsi_Host *host = pci_get_drvdata(pdev);
-	struct AdapterControlBlock *acb =
-		(struct AdapterControlBlock *) host->hostdata;
-	uint32_t intmask_org;
-	int i, j;
-
-	if (pci_enable_device(pdev)) {
-		return PCI_ERS_RESULT_DISCONNECT;
-	}
-	pci_set_master(pdev);
-	intmask_org = arcmsr_disable_outbound_ints(acb);
-	acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
-			   ACB_F_MESSAGE_RQBUFFER_CLEARED |
-			   ACB_F_MESSAGE_WQBUFFER_READED);
-	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
-	for (i = 0; i < ARCMSR_MAX_TARGETID; i++)
-		for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++)
-			acb->devstate[i][j] = ARECA_RAID_GONE;
-
-	arcmsr_wait_firmware_ready(acb);
-	arcmsr_iop_confirm(acb);
-       /* disable all outbound interrupt */
-	arcmsr_get_firmware_spec(acb);
-	/*start background rebuild*/
-	arcmsr_start_adapter_bgrb(acb);
-	/* empty doorbell Qbuffer if door bell ringed */
-	arcmsr_clear_doorbell_queue_buffer(acb);
-	arcmsr_enable_eoi_mode(acb);
-	/* enable outbound Post Queue,outbound doorbell Interrupt */
-	arcmsr_enable_outbound_ints(acb, intmask_org);
-	acb->acb_flags |= ACB_F_IOP_INITED;
-
-	pci_enable_pcie_error_reporting(pdev);
-	return PCI_ERS_RESULT_RECOVERED;
-}
-
-static void arcmsr_pci_ers_need_reset_forepart(struct pci_dev *pdev)
-{
-	struct Scsi_Host *host = pci_get_drvdata(pdev);
-	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata;
-	struct CommandControlBlock *ccb;
-	uint32_t intmask_org;
-	int i = 0;
-
-	if (atomic_read(&acb->ccboutstandingcount) != 0) {
-		/* talk to iop 331 outstanding command aborted */
-		arcmsr_abort_allcmd(acb);
-		/* wait for 3 sec for all command aborted*/
-		ssleep(3);
-		/* disable all outbound interrupt */
-		intmask_org = arcmsr_disable_outbound_ints(acb);
-		/* clear all outbound posted Q */
-		arcmsr_done4abort_postqueue(acb);
-		for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
-			ccb = acb->pccb_pool[i];
-			if (ccb->startdone == ARCMSR_CCB_START) {
-				ccb->startdone = ARCMSR_CCB_ABORTED;
-				arcmsr_ccb_complete(ccb, 1);
-			}
-		}
-		/* enable all outbound interrupt */
-		arcmsr_enable_outbound_ints(acb, intmask_org);
-	}
-	pci_disable_device(pdev);
-}
-
-static void arcmsr_pci_ers_disconnect_forepart(struct pci_dev *pdev)
-{
-			struct Scsi_Host *host = pci_get_drvdata(pdev);
-			struct AdapterControlBlock *acb	= \
-				(struct AdapterControlBlock *)host->hostdata;
-
-			arcmsr_stop_adapter_bgrb(acb);
-			arcmsr_flush_adapter_cache(acb);
-}
-
-static pci_ers_result_t arcmsr_pci_error_detected(struct pci_dev *pdev,
-						pci_channel_state_t state)
-{
-	switch (state) {
-	case pci_channel_io_frozen:
-			arcmsr_pci_ers_need_reset_forepart(pdev);
-			return PCI_ERS_RESULT_NEED_RESET;
-	case pci_channel_io_perm_failure:
-			arcmsr_pci_ers_disconnect_forepart(pdev);
-			return PCI_ERS_RESULT_DISCONNECT;
-			break;
-	default:
-			return PCI_ERS_RESULT_NEED_RESET;
-	  }
-}
-#endif
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index e641922..350cbea 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -167,10 +167,9 @@
 				&nonemb_cmd.dma);
 	if (nonemb_cmd.va == NULL) {
 		SE_DEBUG(DBG_LVL_1,
-			 "Failed to allocate memory for"
-			 "mgmt_invalidate_icds \n");
+			 "Failed to allocate memory for mgmt_invalidate_icds\n");
 		spin_unlock(&ctrl->mbox_lock);
-		return -1;
+		return 0;
 	}
 	nonemb_cmd.size = sizeof(struct invalidate_commands_params_in);
 	req = nonemb_cmd.va;
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 0c08e18..3a7b3f8 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -84,11 +84,32 @@
 	for (i = 0; hal_mods[i]; i++)
 		hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
 
+	dm_len += bfa_port_meminfo();
 
 	meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
 	meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
 }
 
+static void
+bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
+{
+	struct bfa_port_s       *port = &bfa->modules.port;
+	uint32_t                dm_len;
+	uint8_t                 *dm_kva;
+	uint64_t                dm_pa;
+
+	dm_len = bfa_port_meminfo();
+	dm_kva = bfa_meminfo_dma_virt(mi);
+	dm_pa  = bfa_meminfo_dma_phys(mi);
+
+	memset(port, 0, sizeof(struct bfa_port_s));
+	bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod, bfa->logm);
+	bfa_port_mem_claim(port, dm_kva, dm_pa);
+
+	bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
+	bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
+}
+
 /**
  * Use this function to do attach the driver instance with the BFA
  * library. This function will not trigger any HW initialization
@@ -140,6 +161,7 @@
 	for (i = 0; hal_mods[i]; i++)
 		hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
 
+	bfa_com_port_attach(bfa, meminfo);
 }
 
 /**
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 6a6661c..82ea4a8 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -567,7 +567,8 @@
 static void ipr_reinit_ipr_cmnd(struct ipr_cmnd *ipr_cmd)
 {
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
-	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
+	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
+	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
 
 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
@@ -576,19 +577,19 @@
 	ioarcb->ioadl_len = 0;
 	ioarcb->read_ioadl_len = 0;
 
-	if (ipr_cmd->ioa_cfg->sis64)
+	if (ipr_cmd->ioa_cfg->sis64) {
 		ioarcb->u.sis64_addr_data.data_ioadl_addr =
 			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
-	else {
+		ioasa64->u.gata.status = 0;
+	} else {
 		ioarcb->write_ioadl_addr =
 			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+		ioasa->u.gata.status = 0;
 	}
 
-	ioasa->ioasc = 0;
-	ioasa->residual_data_len = 0;
-	ioasa->u.gata.status = 0;
-
+	ioasa->hdr.ioasc = 0;
+	ioasa->hdr.residual_data_len = 0;
 	ipr_cmd->scsi_cmd = NULL;
 	ipr_cmd->qc = NULL;
 	ipr_cmd->sense_buffer[0] = 0;
@@ -768,8 +769,8 @@
 	list_for_each_entry_safe(ipr_cmd, temp, &ioa_cfg->pending_q, queue) {
 		list_del(&ipr_cmd->queue);
 
-		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
-		ipr_cmd->ioasa.ilid = cpu_to_be32(IPR_DRIVER_ILID);
+		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_IOA_WAS_RESET);
+		ipr_cmd->s.ioasa.hdr.ilid = cpu_to_be32(IPR_DRIVER_ILID);
 
 		if (ipr_cmd->scsi_cmd)
 			ipr_cmd->done = ipr_scsi_eh_done;
@@ -1040,7 +1041,7 @@
 		proto = cfgtew->u.cfgte64->proto;
 		res->res_flags = cfgtew->u.cfgte64->res_flags;
 		res->qmodel = IPR_QUEUEING_MODEL64(res);
-		res->type = cfgtew->u.cfgte64->res_type & 0x0f;
+		res->type = cfgtew->u.cfgte64->res_type;
 
 		memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
 			sizeof(res->res_path));
@@ -1319,7 +1320,7 @@
 {
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
-	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
 	list_del(&hostrcb->queue);
 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
@@ -2354,7 +2355,7 @@
 {
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
-	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 	u32 fd_ioasc;
 
 	if (ioa_cfg->sis64)
@@ -4509,11 +4510,16 @@
 	}
 
 	ipr_send_blocking_cmd(ipr_cmd, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
-	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
-	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET)
-		memcpy(&res->sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
-		       sizeof(struct ipr_ioasa_gata));
+	if (ipr_is_gata(res) && res->sata_port && ioasc != IPR_IOASC_IOA_WAS_RESET) {
+		if (ipr_cmd->ioa_cfg->sis64)
+			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
+			       sizeof(struct ipr_ioasa_gata));
+		else
+			memcpy(&res->sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
+			       sizeof(struct ipr_ioasa_gata));
+	}
 
 	LEAVE;
 	return (IPR_IOASC_SENSE_KEY(ioasc) ? -EIO : 0);
@@ -4768,7 +4774,7 @@
 	scmd_printk(KERN_ERR, scsi_cmd, "Aborting command: %02X\n",
 		    scsi_cmd->cmnd[0]);
 	ipr_send_blocking_cmd(ipr_cmd, ipr_abort_timeout, IPR_CANCEL_ALL_TIMEOUT);
-	ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+	ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
 	/*
 	 * If the abort task timed out and we sent a bus reset, we will get
@@ -4812,15 +4818,39 @@
 /**
  * ipr_handle_other_interrupt - Handle "other" interrupts
  * @ioa_cfg:	ioa config struct
- * @int_reg:	interrupt register
  *
  * Return value:
  * 	IRQ_NONE / IRQ_HANDLED
  **/
-static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
-					      volatile u32 int_reg)
+static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg)
 {
 	irqreturn_t rc = IRQ_HANDLED;
+	volatile u32 int_reg, int_mask_reg;
+
+	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
+	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
+
+	/* If an interrupt on the adapter did not occur, ignore it.
+	 * Or in the case of SIS 64, check for a stage change interrupt.
+	 */
+	if ((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0) {
+		if (ioa_cfg->sis64) {
+			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+			if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
+
+				/* clear stage change */
+				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
+				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+				list_del(&ioa_cfg->reset_cmd->queue);
+				del_timer(&ioa_cfg->reset_cmd->timer);
+				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
+				return IRQ_HANDLED;
+			}
+		}
+
+		return IRQ_NONE;
+	}
 
 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
 		/* Mask the interrupt */
@@ -4881,7 +4911,7 @@
 {
 	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp;
 	unsigned long lock_flags = 0;
-	volatile u32 int_reg, int_mask_reg;
+	volatile u32 int_reg;
 	u32 ioasc;
 	u16 cmd_index;
 	int num_hrrq = 0;
@@ -4896,33 +4926,6 @@
 		return IRQ_NONE;
 	}
 
-	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
-	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
-
-	/* If an interrupt on the adapter did not occur, ignore it.
-	 * Or in the case of SIS 64, check for a stage change interrupt.
-	 */
-	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
-		if (ioa_cfg->sis64) {
-			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
-			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
-			if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
-
-				/* clear stage change */
-				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
-				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
-				list_del(&ioa_cfg->reset_cmd->queue);
-				del_timer(&ioa_cfg->reset_cmd->timer);
-				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
-				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-				return IRQ_HANDLED;
-			}
-		}
-
-		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-		return IRQ_NONE;
-	}
-
 	while (1) {
 		ipr_cmd = NULL;
 
@@ -4940,7 +4943,7 @@
 
 			ipr_cmd = ioa_cfg->ipr_cmnd_list[cmd_index];
 
-			ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+			ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
 			ipr_trc_hook(ipr_cmd, IPR_TRACE_FINISH, ioasc);
 
@@ -4962,7 +4965,7 @@
 			/* Clear the PCI interrupt */
 			do {
 				writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
-				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
+				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
 			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
 					num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
 
@@ -4977,7 +4980,7 @@
 	}
 
 	if (unlikely(rc == IRQ_NONE))
-		rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
+		rc = ipr_handle_other_interrupt(ioa_cfg);
 
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 	return rc;
@@ -5014,6 +5017,10 @@
 
 	ipr_cmd->dma_use_sg = nseg;
 
+	ioarcb->data_transfer_length = cpu_to_be32(length);
+	ioarcb->ioadl_len =
+		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
+
 	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
@@ -5135,7 +5142,7 @@
 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
 		scsi_cmd->result |= (DID_ERROR << 16);
@@ -5166,7 +5173,7 @@
 static void ipr_reinit_ipr_cmnd_for_erp(struct ipr_cmnd *ipr_cmd)
 {
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
-	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
+	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
 	dma_addr_t dma_addr = ipr_cmd->dma_addr;
 
 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
@@ -5174,8 +5181,8 @@
 	ioarcb->read_data_transfer_length = 0;
 	ioarcb->ioadl_len = 0;
 	ioarcb->read_ioadl_len = 0;
-	ioasa->ioasc = 0;
-	ioasa->residual_data_len = 0;
+	ioasa->hdr.ioasc = 0;
+	ioasa->hdr.residual_data_len = 0;
 
 	if (ipr_cmd->ioa_cfg->sis64)
 		ioarcb->u.sis64_addr_data.data_ioadl_addr =
@@ -5200,7 +5207,7 @@
 static void ipr_erp_request_sense(struct ipr_cmnd *ipr_cmd)
 {
 	struct ipr_cmd_pkt *cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
-	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
 	if (IPR_IOASC_SENSE_KEY(ioasc) > 0) {
 		ipr_erp_done(ipr_cmd);
@@ -5277,12 +5284,12 @@
 	int i;
 	u16 data_len;
 	u32 ioasc, fd_ioasc;
-	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
+	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
 	__be32 *ioasa_data = (__be32 *)ioasa;
 	int error_index;
 
-	ioasc = be32_to_cpu(ioasa->ioasc) & IPR_IOASC_IOASC_MASK;
-	fd_ioasc = be32_to_cpu(ioasa->fd_ioasc) & IPR_IOASC_IOASC_MASK;
+	ioasc = be32_to_cpu(ioasa->hdr.ioasc) & IPR_IOASC_IOASC_MASK;
+	fd_ioasc = be32_to_cpu(ioasa->hdr.fd_ioasc) & IPR_IOASC_IOASC_MASK;
 
 	if (0 == ioasc)
 		return;
@@ -5297,7 +5304,7 @@
 
 	if (ioa_cfg->log_level < IPR_MAX_LOG_LEVEL) {
 		/* Don't log an error if the IOA already logged one */
-		if (ioasa->ilid != 0)
+		if (ioasa->hdr.ilid != 0)
 			return;
 
 		if (!ipr_is_gscsi(res))
@@ -5309,10 +5316,11 @@
 
 	ipr_res_err(ioa_cfg, res, "%s\n", ipr_error_table[error_index].error);
 
-	if (sizeof(struct ipr_ioasa) < be16_to_cpu(ioasa->ret_stat_len))
+	data_len = be16_to_cpu(ioasa->hdr.ret_stat_len);
+	if (ioa_cfg->sis64 && sizeof(struct ipr_ioasa64) < data_len)
+		data_len = sizeof(struct ipr_ioasa64);
+	else if (!ioa_cfg->sis64 && sizeof(struct ipr_ioasa) < data_len)
 		data_len = sizeof(struct ipr_ioasa);
-	else
-		data_len = be16_to_cpu(ioasa->ret_stat_len);
 
 	ipr_err("IOASA Dump:\n");
 
@@ -5338,8 +5346,8 @@
 	u32 failing_lba;
 	u8 *sense_buf = ipr_cmd->scsi_cmd->sense_buffer;
 	struct ipr_resource_entry *res = ipr_cmd->scsi_cmd->device->hostdata;
-	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
-	u32 ioasc = be32_to_cpu(ioasa->ioasc);
+	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
+	u32 ioasc = be32_to_cpu(ioasa->hdr.ioasc);
 
 	memset(sense_buf, 0, SCSI_SENSE_BUFFERSIZE);
 
@@ -5382,7 +5390,7 @@
 
 		/* Illegal request */
 		if ((IPR_IOASC_SENSE_KEY(ioasc) == 0x05) &&
-		    (be32_to_cpu(ioasa->ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
+		    (be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_FIELD_POINTER_VALID)) {
 			sense_buf[7] = 10;	/* additional length */
 
 			/* IOARCB was in error */
@@ -5393,10 +5401,10 @@
 
 			sense_buf[16] =
 			    ((IPR_FIELD_POINTER_MASK &
-			      be32_to_cpu(ioasa->ioasc_specific)) >> 8) & 0xff;
+			      be32_to_cpu(ioasa->hdr.ioasc_specific)) >> 8) & 0xff;
 			sense_buf[17] =
 			    (IPR_FIELD_POINTER_MASK &
-			     be32_to_cpu(ioasa->ioasc_specific)) & 0xff;
+			     be32_to_cpu(ioasa->hdr.ioasc_specific)) & 0xff;
 		} else {
 			if (ioasc == IPR_IOASC_MED_DO_NOT_REALLOC) {
 				if (ipr_is_vset_device(res))
@@ -5428,14 +5436,20 @@
  **/
 static int ipr_get_autosense(struct ipr_cmnd *ipr_cmd)
 {
-	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
+	struct ipr_ioasa *ioasa = &ipr_cmd->s.ioasa;
+	struct ipr_ioasa64 *ioasa64 = &ipr_cmd->s.ioasa64;
 
-	if ((be32_to_cpu(ioasa->ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
+	if ((be32_to_cpu(ioasa->hdr.ioasc_specific) & IPR_AUTOSENSE_VALID) == 0)
 		return 0;
 
-	memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
-	       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
-		   SCSI_SENSE_BUFFERSIZE));
+	if (ipr_cmd->ioa_cfg->sis64)
+		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa64->auto_sense.data,
+		       min_t(u16, be16_to_cpu(ioasa64->auto_sense.auto_sense_len),
+			   SCSI_SENSE_BUFFERSIZE));
+	else
+		memcpy(ipr_cmd->scsi_cmd->sense_buffer, ioasa->auto_sense.data,
+		       min_t(u16, be16_to_cpu(ioasa->auto_sense.auto_sense_len),
+			   SCSI_SENSE_BUFFERSIZE));
 	return 1;
 }
 
@@ -5455,7 +5469,7 @@
 {
 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
 	struct ipr_resource_entry *res = scsi_cmd->device->hostdata;
-	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 	u32 masked_ioasc = ioasc & IPR_IOASC_IOASC_MASK;
 
 	if (!res) {
@@ -5547,9 +5561,9 @@
 {
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
-	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
-	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->ioasa.residual_data_len));
+	scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
 
 	if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
 		scsi_dma_unmap(ipr_cmd->scsi_cmd);
@@ -5839,19 +5853,23 @@
 	struct ata_queued_cmd *qc = ipr_cmd->qc;
 	struct ipr_sata_port *sata_port = qc->ap->private_data;
 	struct ipr_resource_entry *res = sata_port->res;
-	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
-	memcpy(&sata_port->ioasa, &ipr_cmd->ioasa.u.gata,
-	       sizeof(struct ipr_ioasa_gata));
+	if (ipr_cmd->ioa_cfg->sis64)
+		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa64.u.gata,
+		       sizeof(struct ipr_ioasa_gata));
+	else
+		memcpy(&sata_port->ioasa, &ipr_cmd->s.ioasa.u.gata,
+		       sizeof(struct ipr_ioasa_gata));
 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
 
-	if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
+	if (be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
 		scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
 
 	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
-		qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
+		qc->err_mask |= __ac_err_mask(sata_port->ioasa.status);
 	else
-		qc->err_mask |= ac_err_mask(ipr_cmd->ioasa.u.gata.status);
+		qc->err_mask |= ac_err_mask(sata_port->ioasa.status);
 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
 	ata_qc_complete(qc);
 }
@@ -6520,7 +6538,7 @@
 static int ipr_reset_cmd_failed(struct ipr_cmnd *ipr_cmd)
 {
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
 	dev_err(&ioa_cfg->pdev->dev,
 		"0x%02X failed with IOASC: 0x%08X\n",
@@ -6544,7 +6562,7 @@
 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
 {
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
 		ipr_cmd->job_step = ipr_set_supported_devs;
@@ -6634,7 +6652,7 @@
  **/
 static int ipr_reset_mode_sense_page24_failed(struct ipr_cmnd *ipr_cmd)
 {
-	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+	u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page28;
@@ -6706,7 +6724,7 @@
 		list_move_tail(&res->queue, &old_res);
 
 	if (ioa_cfg->sis64)
-		entries = ioa_cfg->u.cfg_table64->hdr64.num_entries;
+		entries = be16_to_cpu(ioa_cfg->u.cfg_table64->hdr64.num_entries);
 	else
 		entries = ioa_cfg->u.cfg_table->hdr.num_entries;
 
@@ -6792,6 +6810,7 @@
 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
 
 	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
+	ioarcb->cmd_pkt.cdb[6] = (ioa_cfg->cfg_table_size >> 16) & 0xff;
 	ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
 	ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
 
@@ -7122,7 +7141,9 @@
 	ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
 
 	/* sanity check the stage_time value */
-	if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
+	if (stage_time == 0)
+		stage_time = IPR_IPL_INIT_DEFAULT_STAGE_TIME;
+	else if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
 		stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
 	else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
 		stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
@@ -7165,13 +7186,14 @@
 {
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 	volatile u32 int_reg;
+	volatile u64 maskval;
 
 	ENTER;
 	ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
 	ipr_init_ioa_mem(ioa_cfg);
 
 	ioa_cfg->allow_interrupts = 1;
-	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
+	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32);
 
 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
 		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
@@ -7183,9 +7205,12 @@
 	/* Enable destructive diagnostics on IOA */
 	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
 
-	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
-	if (ioa_cfg->sis64)
-		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg);
+	if (ioa_cfg->sis64) {
+		maskval = IPR_PCII_IPL_STAGE_CHANGE;
+		maskval = (maskval << 32) | IPR_PCII_OPER_INTERRUPTS;
+		writeq(maskval, ioa_cfg->regs.clr_interrupt_mask_reg);
+	} else
+		writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
 
 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
 
@@ -7332,12 +7357,12 @@
 	rc = pci_restore_state(ioa_cfg->pdev);
 
 	if (rc != PCIBIOS_SUCCESSFUL) {
-		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
+		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
 		return IPR_RC_JOB_CONTINUE;
 	}
 
 	if (ipr_set_pcix_cmd_reg(ioa_cfg)) {
-		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
+		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
 		return IPR_RC_JOB_CONTINUE;
 	}
 
@@ -7364,7 +7389,7 @@
 		}
 	}
 
-	ENTER;
+	LEAVE;
 	return IPR_RC_JOB_CONTINUE;
 }
 
@@ -7406,7 +7431,7 @@
 
 	if (rc != PCIBIOS_SUCCESSFUL) {
 		pci_unblock_user_cfg_access(ipr_cmd->ioa_cfg->pdev);
-		ipr_cmd->ioasa.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
+		ipr_cmd->s.ioasa.hdr.ioasc = cpu_to_be32(IPR_IOASC_PCI_ACCESS_ERROR);
 		rc = IPR_RC_JOB_CONTINUE;
 	} else {
 		ipr_cmd->job_step = ipr_reset_bist_done;
@@ -7665,7 +7690,7 @@
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 
 	do {
-		ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
+		ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
 
 		if (ioa_cfg->reset_cmd != ipr_cmd) {
 			/*
@@ -8048,13 +8073,13 @@
 			ioarcb->u.sis64_addr_data.data_ioadl_addr =
 				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
 			ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
-				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
+				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, s.ioasa64));
 		} else {
 			ioarcb->write_ioadl_addr =
 				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
 			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
 			ioarcb->ioasa_host_pci_addr =
-				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
+				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, s.ioasa));
 		}
 		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
 		ipr_cmd->cmd_index = i;
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 4c267b5..9ecd225 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -244,6 +244,7 @@
 #define IPR_RUNTIME_RESET				0x40000000
 
 #define IPR_IPL_INIT_MIN_STAGE_TIME			5
+#define IPR_IPL_INIT_DEFAULT_STAGE_TIME                 15
 #define IPR_IPL_INIT_STAGE_UNKNOWN			0x0
 #define IPR_IPL_INIT_STAGE_TRANSOP			0xB0000000
 #define IPR_IPL_INIT_STAGE_MASK				0xff000000
@@ -613,7 +614,7 @@
 	__be32 data[SCSI_SENSE_BUFFERSIZE/sizeof(__be32)];
 };
 
-struct ipr_ioasa {
+struct ipr_ioasa_hdr {
 	__be32 ioasc;
 #define IPR_IOASC_SENSE_KEY(ioasc) ((ioasc) >> 24)
 #define IPR_IOASC_SENSE_CODE(ioasc) (((ioasc) & 0x00ff0000) >> 16)
@@ -645,6 +646,25 @@
 #define IPR_FIELD_POINTER_VALID		(0x80000000 >> 8)
 #define IPR_FIELD_POINTER_MASK		0x0000ffff
 
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ioasa {
+	struct ipr_ioasa_hdr hdr;
+
+	union {
+		struct ipr_ioasa_vset vset;
+		struct ipr_ioasa_af_dasd dasd;
+		struct ipr_ioasa_gpdd gpdd;
+		struct ipr_ioasa_gata gata;
+	} u;
+
+	struct ipr_auto_sense auto_sense;
+}__attribute__((packed, aligned (4)));
+
+struct ipr_ioasa64 {
+	struct ipr_ioasa_hdr hdr;
+	u8 fd_res_path[8];
+
 	union {
 		struct ipr_ioasa_vset vset;
 		struct ipr_ioasa_af_dasd dasd;
@@ -804,7 +824,7 @@
 }__attribute__((packed, aligned (4)));
 
 struct ipr_hostrcb_type_ff_error {
-	__be32 ioa_data[502];
+	__be32 ioa_data[758];
 }__attribute__((packed, aligned (4)));
 
 struct ipr_hostrcb_type_01_error {
@@ -1181,7 +1201,7 @@
 	u8 flags;
 	__be16 res_flags;
 
-	__be32 type;
+	u8 type;
 
 	u8 qmodel;
 	struct ipr_std_inq_data std_inq_data;
@@ -1464,7 +1484,10 @@
 		struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
 		struct ipr_ata64_ioadl ata_ioadl;
 	} i;
-	struct ipr_ioasa ioasa;
+	union {
+		struct ipr_ioasa ioasa;
+		struct ipr_ioasa64 ioasa64;
+	} s;
 	struct list_head queue;
 	struct scsi_cmnd *scsi_cmd;
 	struct ata_queued_cmd *qc;
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index bf55d30..fec47de 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -601,10 +601,8 @@
 	set_bit(ISCSI_SUSPEND_BIT, &conn->suspend_rx);
 	write_unlock_bh(&tcp_sw_conn->sock->sk->sk_callback_lock);
 
-	if (sk_sleep(sock->sk)) {
-		sock->sk->sk_err = EIO;
-		wake_up_interruptible(sk_sleep(sock->sk));
-	}
+	sock->sk->sk_err = EIO;
+	wake_up_interruptible(sk_sleep(sock->sk));
 
 	iscsi_conn_stop(cls_conn, flag);
 	iscsi_sw_tcp_release_conn(conn);
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 9798c2c..1c027a9 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -492,19 +492,20 @@
 	struct Scsi_Host *shost = dev_to_shost(starget->dev.parent);
 	unsigned long flags;
 	enum scsi_target_state state;
-	int empty;
+	int empty = 0;
 
 	spin_lock_irqsave(shost->host_lock, flags);
 	state = starget->state;
-	empty = --starget->reap_ref == 0 &&
-		list_empty(&starget->devices) ? 1 : 0;
+	if (--starget->reap_ref == 0 && list_empty(&starget->devices)) {
+		empty = 1;
+		starget->state = STARGET_DEL;
+	}
 	spin_unlock_irqrestore(shost->host_lock, flags);
 
 	if (!empty)
 		return;
 
 	BUG_ON(state == STARGET_DEL);
-	starget->state = STARGET_DEL;
 	if (state == STARGET_CREATED)
 		scsi_target_destroy(starget);
 	else