Merge tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi

Pull SCSI updates from James Bottomley:
 "This patch set consists of the usual driver updates (megaraid_sas,
  arcmsr, be2iscsi, lpfc, mpt2sas, mpt3sas, qla2xxx, ufs) plus several
  assorted fixes and miscellaneous updates (including the
  pci_msix_enable_range() changes that have been pending for a while)"

* tag 'scsi-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi: (202 commits)
  scsi: add a CONFIG_SCSI_MQ_DEFAULT option
  ufs: definitions for phy interface
  ufs: tune bkops while power managment events
  ufs: Add support for clock scaling using devfreq framework
  ufs: Add freq-table-hz property for UFS device
  ufs: Add support for clock gating
  ufs: refactor configuring power mode
  ufs: add UFS power management support
  ufs: introduce well known logical unit in ufs
  ufs: manually add well known logical units
  ufs: Active Power Mode - configuring bActiveICCLevel
  ufs: improve init sequence
  ufs: refactor query descriptor API support
  ufs: add voting support for host controller power
  ufs: Add clock initialization support
  ufs: Add regulator enable support
  ufs: Allow vendor specific initialization
  scsi: don't add scsi_device if its already visible
  scsi: fix the type for well known LUs
  scsi: fix comment in struct Scsi_Host definition
  ...
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index 20468b2..5357919 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -8,9 +8,50 @@
 - interrupts        : <interrupt mapping for UFS host controller IRQ>
 - reg               : <registers mapping>
 
+Optional properties:
+- vdd-hba-supply        : phandle to UFS host controller supply regulator node
+- vcc-supply            : phandle to VCC supply regulator node
+- vccq-supply           : phandle to VCCQ supply regulator node
+- vccq2-supply          : phandle to VCCQ2 supply regulator node
+- vcc-supply-1p8        : For embedded UFS devices, valid VCC range is 1.7-1.95V
+                          or 2.7-3.6V. This boolean property when set, specifies
+			  to use low voltage range of 1.7-1.95V. Note for external
+			  UFS cards this property is invalid and valid VCC range is
+			  always 2.7-3.6V.
+- vcc-max-microamp      : specifies max. load that can be drawn from vcc supply
+- vccq-max-microamp     : specifies max. load that can be drawn from vccq supply
+- vccq2-max-microamp    : specifies max. load that can be drawn from vccq2 supply
+- <name>-fixed-regulator : boolean property specifying that <name>-supply is a fixed regulator
+
+- clocks                : List of phandle and clock specifier pairs
+- clock-names           : List of clock input name strings sorted in the same
+                          order as the clocks property.
+- freq-table-hz		: Array of <min max> operating frequencies stored in the same
+                          order as the clocks property. If this property is not
+			  defined or a value in the array is "0" then it is assumed
+			  that the frequency is set by the parent clock or a
+			  fixed rate clock source.
+
+Note: If above properties are not defined it can be assumed that the supply
+regulators or clocks are always on.
+
 Example:
 	ufshc@0xfc598000 {
 		compatible = "jedec,ufs-1.1";
 		reg = <0xfc598000 0x800>;
 		interrupts = <0 28 0>;
+
+		vdd-hba-supply = <&xxx_reg0>;
+		vdd-hba-fixed-regulator;
+		vcc-supply = <&xxx_reg1>;
+		vcc-supply-1p8;
+		vccq-supply = <&xxx_reg2>;
+		vccq2-supply = <&xxx_reg3>;
+		vcc-max-microamp = 500000;
+		vccq-max-microamp = 200000;
+		vccq2-max-microamp = 200000;
+
+		clocks = <&core 0>, <&ref 0>, <&iface 0>;
+		clock-names = "core_clk", "ref_clk", "iface_clk";
+		freq-table-hz = <100000000 200000000>, <0 0>, <0 0>;
 	};
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 91ba58e..18b5709 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,17 @@
+Release Date    : Thu. Jun 19, 2014 17:00:00 PST 2014 -
+			(emaild-id:megaraidlinux@lsi.com)
+			Adam Radford
+			Kashyap Desai
+			Sumit Saxena
+			Uday Lingala
+Current Version : 06.803.02.00-rc1
+Old Version     : 06.803.01.00-rc1
+    1. Fix reset_mutex leak in megasas_reset_fusion().
+    2. Remove unused variables in megasas_instance.
+    3. Fix LD/VF affiliation parsing.
+    4. Add missing initial call to megasas_get_ld_vf_affiliation().
+    5. Version and Changelog update.
+-------------------------------------------------------------------------------
 Release Date    : Mon. Mar 10, 2014 17:00:00 PST 2014 -
 			(emaild-id:megaraidlinux@lsi.com)
 			Adam Radford
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index a896d94..187f836 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1400,7 +1400,6 @@
  *	@vendor: pci vendor id
  *	@device: pci device id
  *	@revision: pci revision id
- *	@prod_name: string returned
  *
  *	Returns product string displayed when driver loads,
  *	in /proc/mpt/summary and /sysfs/class/scsi_host/host<X>/version_product
@@ -3172,12 +3171,7 @@
 			facts->FWImageSize = le32_to_cpu(facts->FWImageSize);
 		}
 
-		sz = facts->FWImageSize;
-		if ( sz & 0x01 )
-			sz += 1;
-		if ( sz & 0x02 )
-			sz += 2;
-		facts->FWImageSize = sz;
+		facts->FWImageSize = ALIGN(facts->FWImageSize, 4);
 
 		if (!facts->RequestFrameSize) {
 			/*  Something is wrong!  */
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index b0a892a..70bb753 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1741,12 +1741,7 @@
 
 	/* Allocate memory for the new FW image
 	 */
-	newFwSize = karg.newImageSize;
-
-	if (newFwSize & 0x01)
-		newFwSize += 1;
-	if (newFwSize & 0x02)
-		newFwSize += 2;
+	newFwSize = ALIGN(karg.newImageSize, 4);
 
 	mpt_alloc_fw_memory(ioc, newFwSize);
 	if (ioc->cached_fw == NULL)
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index 787933d..613231c 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -1419,6 +1419,11 @@
 		goto out_mptspi_probe;
         }
 
+	/* VMWare emulation doesn't properly implement WRITE_SAME
+	 */
+	if (pdev->subsystem_vendor == 0x15AD)
+		sh->no_write_same = 1;
+
 	spin_lock_irqsave(&ioc->FreeQlock, flags);
 
 	/* Attach the SCSI Host to the IOC structure
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index bd85fb4..e85e64a 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -45,6 +45,17 @@
 	default	n
 	depends on NET
 
+config SCSI_MQ_DEFAULT
+	bool "SCSI: use blk-mq I/O path by default"
+	depends on SCSI
+	---help---
+	  This option enables the new blk-mq based I/O path for SCSI
+	  devices by default.  With the option the scsi_mod.use_blk_mq
+	  module/boot option defaults to Y, without it to N, but it can
+	  still be overriden either way.
+
+	  If unsure say N.
+
 config SCSI_PROC_FS
 	bool "legacy /proc/scsi/ support"
 	depends on SCSI && PROC_FS
diff --git a/drivers/scsi/aacraid/linit.c b/drivers/scsi/aacraid/linit.c
index 63f576c..a759cb2 100644
--- a/drivers/scsi/aacraid/linit.c
+++ b/drivers/scsi/aacraid/linit.c
@@ -1152,6 +1152,7 @@
 	shost->irq = pdev->irq;
 	shost->unique_id = unique_id;
 	shost->max_cmd_len = 16;
+	shost->use_cmd_list = 1;
 
 	aac = (struct aac_dev *)shost->hostdata;
 	aac->base_start = pci_resource_start(pdev, 0);
diff --git a/drivers/scsi/arcmsr/arcmsr.h b/drivers/scsi/arcmsr/arcmsr.h
index 77b26f5..3bcaaac 100644
--- a/drivers/scsi/arcmsr/arcmsr.h
+++ b/drivers/scsi/arcmsr/arcmsr.h
@@ -45,13 +45,14 @@
 #include <linux/interrupt.h>
 struct device_attribute;
 /*The limit of outstanding scsi command that firmware can handle*/
-#define ARCMSR_MAX_OUTSTANDING_CMD						256
 #ifdef CONFIG_XEN
 	#define ARCMSR_MAX_FREECCB_NUM	160
+#define ARCMSR_MAX_OUTSTANDING_CMD	155
 #else
 	#define ARCMSR_MAX_FREECCB_NUM	320
+#define ARCMSR_MAX_OUTSTANDING_CMD	255
 #endif
-#define ARCMSR_DRIVER_VERSION		     "Driver Version 1.20.00.15 2010/08/05"
+#define ARCMSR_DRIVER_VERSION		"v1.30.00.04-20140919"
 #define ARCMSR_SCSI_INITIATOR_ID						255
 #define ARCMSR_MAX_XFER_SECTORS							512
 #define ARCMSR_MAX_XFER_SECTORS_B						4096
@@ -62,11 +63,17 @@
 #define ARCMSR_MAX_QBUFFER							4096
 #define ARCMSR_DEFAULT_SG_ENTRIES						38
 #define ARCMSR_MAX_HBB_POSTQUEUE						264
+#define ARCMSR_MAX_ARC1214_POSTQUEUE	256
+#define ARCMSR_MAX_ARC1214_DONEQUEUE	257
 #define ARCMSR_MAX_XFER_LEN							0x26000 /* 152K */
 #define ARCMSR_CDB_SG_PAGE_LENGTH						256 
+#define ARCMST_NUM_MSIX_VECTORS		4
 #ifndef PCI_DEVICE_ID_ARECA_1880
 #define PCI_DEVICE_ID_ARECA_1880 0x1880
  #endif
+#ifndef PCI_DEVICE_ID_ARECA_1214
+	#define PCI_DEVICE_ID_ARECA_1214	0x1214
+#endif
 /*
 **********************************************************************************
 **
@@ -100,10 +107,11 @@
 **        IOP Message Transfer Data for user space
 *******************************************************************************
 */
+#define	ARCMSR_API_DATA_BUFLEN	1032
 struct CMD_MESSAGE_FIELD
 {
     struct CMD_MESSAGE			cmdmessage;
-    uint8_t				messagedatabuffer[1032];
+    uint8_t				messagedatabuffer[ARCMSR_API_DATA_BUFLEN];
 };
 /* IOP message transfer */
 #define ARCMSR_MESSAGE_FAIL			0x0001
@@ -337,6 +345,56 @@
 #define ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK			0x80000000
 /*
 *******************************************************************************
+**                SPEC. for Areca Type D adapter
+*******************************************************************************
+*/
+#define ARCMSR_ARC1214_CHIP_ID				0x00004
+#define ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION		0x00008
+#define ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK		0x00034
+#define ARCMSR_ARC1214_SAMPLE_RESET			0x00100
+#define ARCMSR_ARC1214_RESET_REQUEST			0x00108
+#define ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS		0x00200
+#define ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE		0x0020C
+#define ARCMSR_ARC1214_INBOUND_MESSAGE0			0x00400
+#define ARCMSR_ARC1214_INBOUND_MESSAGE1			0x00404
+#define ARCMSR_ARC1214_OUTBOUND_MESSAGE0		0x00420
+#define ARCMSR_ARC1214_OUTBOUND_MESSAGE1		0x00424
+#define ARCMSR_ARC1214_INBOUND_DOORBELL			0x00460
+#define ARCMSR_ARC1214_OUTBOUND_DOORBELL		0x00480
+#define ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE		0x00484
+#define ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW		0x01000
+#define ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH		0x01004
+#define ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER	0x01018
+#define ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW		0x01060
+#define ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH		0x01064
+#define ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER	0x0106C
+#define ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER	0x01070
+#define ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE		0x01088
+#define ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE	0x0108C
+#define ARCMSR_ARC1214_MESSAGE_WBUFFER			0x02000
+#define ARCMSR_ARC1214_MESSAGE_RBUFFER			0x02100
+#define ARCMSR_ARC1214_MESSAGE_RWBUFFER			0x02200
+/* Host Interrupt Mask */
+#define ARCMSR_ARC1214_ALL_INT_ENABLE			0x00001010
+#define ARCMSR_ARC1214_ALL_INT_DISABLE			0x00000000
+/* Host Interrupt Status */
+#define ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR		0x00001000
+#define ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR		0x00000010
+/* DoorBell*/
+#define ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY		0x00000001
+#define ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ		0x00000002
+/*inbound message 0 ready*/
+#define ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK		0x00000001
+/*outbound DATA WRITE isr door bell clear*/
+#define ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK		0x00000002
+/*outbound message 0 ready*/
+#define ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE		0x02000000
+/*outbound message cmd isr door bell clear*/
+/*ARCMSR_HBAMU_MESSAGE_FIRMWARE_OK*/
+#define ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK		0x80000000
+#define ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR	0x00000001
+/*
+*******************************************************************************
 **    ARECA SCSI COMMAND DESCRIPTOR BLOCK size 0x1F8 (504)
 *******************************************************************************
 */
@@ -357,7 +415,7 @@
 #define ARCMSR_CDB_FLAG_ORDEREDQ           0x10
 
 	uint8_t							msgPages;
-	uint32_t						Context;
+	uint32_t						msgContext;
 	uint32_t						DataLength;
 	uint8_t							Cdb[16];
 	uint8_t							DeviceStatus;
@@ -494,6 +552,56 @@
 	uint32_t	msgcode_rwbuffer[256];			/*2200 23FF*/
 };
 /*
+*********************************************************************
+**     Messaging Unit (MU) of Type D processor
+*********************************************************************
+*/
+struct InBound_SRB {
+	uint32_t addressLow; /* pointer to SRB block */
+	uint32_t addressHigh;
+	uint32_t length; /* in DWORDs */
+	uint32_t reserved0;
+};
+
+struct OutBound_SRB {
+	uint32_t addressLow; /* pointer to SRB block */
+	uint32_t addressHigh;
+};
+
+struct MessageUnit_D {
+	struct InBound_SRB	post_qbuffer[ARCMSR_MAX_ARC1214_POSTQUEUE];
+	volatile struct OutBound_SRB
+				done_qbuffer[ARCMSR_MAX_ARC1214_DONEQUEUE];
+	u16 postq_index;
+	volatile u16 doneq_index;
+	u32 __iomem *chip_id;			/* 0x00004 */
+	u32 __iomem *cpu_mem_config;		/* 0x00008 */
+	u32 __iomem *i2o_host_interrupt_mask;	/* 0x00034 */
+	u32 __iomem *sample_at_reset;		/* 0x00100 */
+	u32 __iomem *reset_request;		/* 0x00108 */
+	u32 __iomem *host_int_status;		/* 0x00200 */
+	u32 __iomem *pcief0_int_enable;		/* 0x0020C */
+	u32 __iomem *inbound_msgaddr0;		/* 0x00400 */
+	u32 __iomem *inbound_msgaddr1;		/* 0x00404 */
+	u32 __iomem *outbound_msgaddr0;		/* 0x00420 */
+	u32 __iomem *outbound_msgaddr1;		/* 0x00424 */
+	u32 __iomem *inbound_doorbell;		/* 0x00460 */
+	u32 __iomem *outbound_doorbell;		/* 0x00480 */
+	u32 __iomem *outbound_doorbell_enable;	/* 0x00484 */
+	u32 __iomem *inboundlist_base_low;	/* 0x01000 */
+	u32 __iomem *inboundlist_base_high;	/* 0x01004 */
+	u32 __iomem *inboundlist_write_pointer;	/* 0x01018 */
+	u32 __iomem *outboundlist_base_low;	/* 0x01060 */
+	u32 __iomem *outboundlist_base_high;	/* 0x01064 */
+	u32 __iomem *outboundlist_copy_pointer;	/* 0x0106C */
+	u32 __iomem *outboundlist_read_pointer;	/* 0x01070 0x01072 */
+	u32 __iomem *outboundlist_interrupt_cause;	/* 0x1088 */
+	u32 __iomem *outboundlist_interrupt_enable;	/* 0x108C */
+	u32 __iomem *message_wbuffer;		/* 0x2000 */
+	u32 __iomem *message_rbuffer;		/* 0x2100 */
+	u32 __iomem *msgcode_rwbuffer;		/* 0x2200 */
+};
+/*
 *******************************************************************************
 **                 Adapter Control Block
 *******************************************************************************
@@ -505,19 +613,26 @@
 	#define ACB_ADAPTER_TYPE_B            0x00000002	/* hbb M IOP */
 	#define ACB_ADAPTER_TYPE_C            0x00000004	/* hbc P IOP */
 	#define ACB_ADAPTER_TYPE_D            0x00000008	/* hbd A IOP */
+	u32				roundup_ccbsize;
 	struct pci_dev *		pdev;
 	struct Scsi_Host *		host;
 	unsigned long			vir2phy_offset;
+	struct msix_entry	entries[ARCMST_NUM_MSIX_VECTORS];
 	/* Offset is used in making arc cdb physical to virtual calculations */
 	uint32_t			outbound_int_enable;
 	uint32_t			cdb_phyaddr_hi32;
 	uint32_t			reg_mu_acc_handle0;
 	spinlock_t                      			eh_lock;
 	spinlock_t                      			ccblist_lock;
+	spinlock_t			postq_lock;
+	spinlock_t			doneq_lock;
+	spinlock_t			rqbuffer_lock;
+	spinlock_t			wqbuffer_lock;
 	union {
 		struct MessageUnit_A __iomem *pmuA;
 		struct MessageUnit_B 	*pmuB;
 		struct MessageUnit_C __iomem *pmuC;
+		struct MessageUnit_D 	*pmuD;
 	};
 	/* message unit ATU inbound base address0 */
 	void __iomem *mem_base0;
@@ -544,6 +659,8 @@
 	/* iop init */
 	#define ACB_F_ABORT				0x0200
 	#define ACB_F_FIRMWARE_TRAP           		0x0400
+	#define ACB_F_MSI_ENABLED		0x1000
+	#define ACB_F_MSIX_ENABLED		0x2000
 	struct CommandControlBlock *			pccb_pool[ARCMSR_MAX_FREECCB_NUM];
 	/* used for memory free */
 	struct list_head		ccb_free_list;
@@ -557,19 +674,20 @@
 	/* dma_coherent used for memory free */
 	dma_addr_t			dma_coherent_handle;
 	/* dma_coherent_handle used for memory free */
-	dma_addr_t				dma_coherent_handle_hbb_mu;
+	dma_addr_t				dma_coherent_handle2;
+	void				*dma_coherent2;
 	unsigned int				uncache_size;
 	uint8_t				rqbuffer[ARCMSR_MAX_QBUFFER];
 	/* data collection buffer for read from 80331 */
-	int32_t				rqbuf_firstindex;
+	int32_t				rqbuf_getIndex;
 	/* first of read buffer  */
-	int32_t				rqbuf_lastindex;
+	int32_t				rqbuf_putIndex;
 	/* last of read buffer   */
 	uint8_t				wqbuffer[ARCMSR_MAX_QBUFFER];
 	/* data collection buffer for write to 80331  */
-	int32_t				wqbuf_firstindex;
+	int32_t				wqbuf_getIndex;
 	/* first of write buffer */
-	int32_t				wqbuf_lastindex;
+	int32_t				wqbuf_putIndex;
 	/* last of write buffer  */
 	uint8_t				devstate[ARCMSR_MAX_TARGETID][ARCMSR_MAX_TARGETLUN];
 	/* id0 ..... id15, lun0...lun7 */
@@ -594,6 +712,8 @@
 				#define	FW_DEADLOCK	0x0010
 	atomic_t 			rq_map_token;
 	atomic_t			ante_token_value;
+	uint32_t	maxOutstanding;
+	int		msix_vector_count;
 };/* HW_DEVICE_EXTENSION */
 /*
 *******************************************************************************
@@ -606,7 +726,7 @@
 	struct list_head		list;				/*x32: 8byte, x64: 16byte*/
 	struct scsi_cmnd		*pcmd;				/*8 bytes pointer of linux scsi command */
 	struct AdapterControlBlock	*acb;				/*x32: 4byte, x64: 8byte*/
-	uint32_t			cdb_phyaddr_pattern;		/*x32: 4byte, x64: 4byte*/
+	uint32_t			cdb_phyaddr;			/*x32: 4byte, x64: 4byte*/
 	uint32_t			arc_cdb_size;			/*x32:4byte,x64:4byte*/
 	uint16_t			ccb_flags;			/*x32: 2byte, x64: 2byte*/
 	#define			CCB_FLAG_READ			0x0000
@@ -684,8 +804,10 @@
 #define     ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE               0x01
 #define     ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE                    0x1F
 
-extern void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *);
-extern void arcmsr_iop_message_read(struct AdapterControlBlock *);
+extern void arcmsr_write_ioctldata2iop(struct AdapterControlBlock *);
+extern uint32_t arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *,
+	struct QBUFFER __iomem *);
+extern void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *);
 extern struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *);
 extern struct device_attribute *arcmsr_host_attrs[];
 extern int arcmsr_alloc_sysfs_attr(struct AdapterControlBlock *);
diff --git a/drivers/scsi/arcmsr/arcmsr_attr.c b/drivers/scsi/arcmsr/arcmsr_attr.c
index acdae33..9c86481 100644
--- a/drivers/scsi/arcmsr/arcmsr_attr.c
+++ b/drivers/scsi/arcmsr/arcmsr_attr.c
@@ -50,6 +50,7 @@
 #include <linux/errno.h>
 #include <linux/delay.h>
 #include <linux/pci.h>
+#include <linux/circ_buf.h>
 
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
@@ -68,42 +69,42 @@
 	struct device *dev = container_of(kobj,struct device,kobj);
 	struct Scsi_Host *host = class_to_shost(dev);
 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
-	uint8_t *pQbuffer,*ptmpQbuffer;
+	uint8_t *ptmpQbuffer;
 	int32_t allxfer_len = 0;
+	unsigned long flags;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
 
 	/* do message unit read. */
 	ptmpQbuffer = (uint8_t *)buf;
-	while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
-		&& (allxfer_len < 1031)) {
-		pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
-		memcpy(ptmpQbuffer, pQbuffer, 1);
-		acb->rqbuf_firstindex++;
-		acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
-		ptmpQbuffer++;
-		allxfer_len++;
+	spin_lock_irqsave(&acb->rqbuffer_lock, flags);
+	if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
+		unsigned int tail = acb->rqbuf_getIndex;
+		unsigned int head = acb->rqbuf_putIndex;
+		unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
+
+		allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
+		if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
+			allxfer_len = ARCMSR_API_DATA_BUFLEN;
+
+		if (allxfer_len <= cnt_to_end)
+			memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
+		else {
+			memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
+			memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
+		}
+		acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
 	}
 	if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
 		struct QBUFFER __iomem *prbuffer;
-		uint8_t __iomem *iop_data;
-		int32_t iop_len;
-
 		acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
 		prbuffer = arcmsr_get_iop_rqbuffer(acb);
-		iop_data = prbuffer->data;
-		iop_len = readl(&prbuffer->data_len);
-		while (iop_len > 0) {
-			acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
-			acb->rqbuf_lastindex++;
-			acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
-			iop_data++;
-			iop_len--;
-		}
-		arcmsr_iop_message_read(acb);
+		if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
+			acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
 	}
-	return (allxfer_len);
+	spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
+	return allxfer_len;
 }
 
 static ssize_t arcmsr_sysfs_iop_message_write(struct file *filp,
@@ -115,43 +116,42 @@
 	struct device *dev = container_of(kobj,struct device,kobj);
 	struct Scsi_Host *host = class_to_shost(dev);
 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
-	int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
+	int32_t user_len, cnt2end;
 	uint8_t *pQbuffer, *ptmpuserbuffer;
+	unsigned long flags;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
-	if (count > 1032)
+	if (count > ARCMSR_API_DATA_BUFLEN)
 		return -EINVAL;
 	/* do message unit write. */
 	ptmpuserbuffer = (uint8_t *)buf;
 	user_len = (int32_t)count;
-	wqbuf_lastindex = acb->wqbuf_lastindex;
-	wqbuf_firstindex = acb->wqbuf_firstindex;
-	if (wqbuf_lastindex != wqbuf_firstindex) {
-		arcmsr_post_ioctldata2iop(acb);
+	spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+	if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
+		arcmsr_write_ioctldata2iop(acb);
+		spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
 		return 0;	/*need retry*/
 	} else {
-		my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
-				&(ARCMSR_MAX_QBUFFER - 1);
-		if (my_empty_len >= user_len) {
-			while (user_len > 0) {
-				pQbuffer =
-				&acb->wqbuffer[acb->wqbuf_lastindex];
-				memcpy(pQbuffer, ptmpuserbuffer, 1);
-				acb->wqbuf_lastindex++;
-				acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
-				ptmpuserbuffer++;
-				user_len--;
-			}
-			if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
-				acb->acb_flags &=
-					~ACB_F_MESSAGE_WQBUFFER_CLEARED;
-				arcmsr_post_ioctldata2iop(acb);
-			}
-			return count;
-		} else {
-			return 0;	/*need retry*/
+		pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
+		cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
+		if (user_len > cnt2end) {
+			memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
+			ptmpuserbuffer += cnt2end;
+			user_len -= cnt2end;
+			acb->wqbuf_putIndex = 0;
+			pQbuffer = acb->wqbuffer;
 		}
+		memcpy(pQbuffer, ptmpuserbuffer, user_len);
+		acb->wqbuf_putIndex += user_len;
+		acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
+		if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
+			acb->acb_flags &=
+				~ACB_F_MESSAGE_WQBUFFER_CLEARED;
+			arcmsr_write_ioctldata2iop(acb);
+		}
+		spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
+		return count;
 	}
 }
 
@@ -165,22 +165,24 @@
 	struct Scsi_Host *host = class_to_shost(dev);
 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata;
 	uint8_t *pQbuffer;
+	unsigned long flags;
 
 	if (!capable(CAP_SYS_ADMIN))
 		return -EACCES;
 
-	if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
-		acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
-		arcmsr_iop_message_read(acb);
-	}
+	arcmsr_clear_iop2drv_rqueue_buffer(acb);
 	acb->acb_flags |=
 		(ACB_F_MESSAGE_WQBUFFER_CLEARED
 		| ACB_F_MESSAGE_RQBUFFER_CLEARED
 		| ACB_F_MESSAGE_WQBUFFER_READED);
-	acb->rqbuf_firstindex = 0;
-	acb->rqbuf_lastindex = 0;
-	acb->wqbuf_firstindex = 0;
-	acb->wqbuf_lastindex = 0;
+	spin_lock_irqsave(&acb->rqbuffer_lock, flags);
+	acb->rqbuf_getIndex = 0;
+	acb->rqbuf_putIndex = 0;
+	spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
+	spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+	acb->wqbuf_getIndex = 0;
+	acb->wqbuf_putIndex = 0;
+	spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
 	pQbuffer = acb->rqbuffer;
 	memset(pQbuffer, 0, sizeof (struct QBUFFER));
 	pQbuffer = acb->wqbuffer;
@@ -193,7 +195,7 @@
 		.name = "mu_read",
 		.mode = S_IRUSR ,
 	},
-	.size = 1032,
+	.size = ARCMSR_API_DATA_BUFLEN,
 	.read = arcmsr_sysfs_iop_message_read,
 };
 
@@ -202,7 +204,7 @@
 		.name = "mu_write",
 		.mode = S_IWUSR,
 	},
-	.size = 1032,
+	.size = ARCMSR_API_DATA_BUFLEN,
 	.write = arcmsr_sysfs_iop_message_write,
 };
 
diff --git a/drivers/scsi/arcmsr/arcmsr_hba.c b/drivers/scsi/arcmsr/arcmsr_hba.c
index b13764c..0b44fb5 100644
--- a/drivers/scsi/arcmsr/arcmsr_hba.c
+++ b/drivers/scsi/arcmsr/arcmsr_hba.c
@@ -2,11 +2,10 @@
 *******************************************************************************
 **        O.S   : Linux
 **   FILE NAME  : arcmsr_hba.c
-**        BY    : Nick Cheng
-**   Description: SCSI RAID Device Driver for
-**                ARECA RAID Host adapter
+**        BY    : Nick Cheng, C.L. Huang
+**   Description: SCSI RAID Device Driver for Areca RAID Controller
 *******************************************************************************
-** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
+** Copyright (C) 2002 - 2014, Areca Technology Corporation All rights reserved
 **
 **     Web site: www.areca.com.tw
 **       E-mail: support@areca.com.tw
@@ -59,6 +58,7 @@
 #include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/aer.h>
+#include <linux/circ_buf.h>
 #include <asm/dma.h>
 #include <asm/io.h>
 #include <asm/uaccess.h>
@@ -70,15 +70,15 @@
 #include <scsi/scsi_transport.h>
 #include <scsi/scsicam.h>
 #include "arcmsr.h"
-MODULE_AUTHOR("Nick Cheng <support@areca.com.tw>");
-MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapter");
+MODULE_AUTHOR("Nick Cheng, C.L. Huang <support@areca.com.tw>");
+MODULE_DESCRIPTION("Areca ARC11xx/12xx/16xx/188x SAS/SATA RAID Controller Driver");
 MODULE_LICENSE("Dual BSD/GPL");
 MODULE_VERSION(ARCMSR_DRIVER_VERSION);
 
 #define	ARCMSR_SLEEPTIME	10
 #define	ARCMSR_RETRYCOUNT	12
 
-wait_queue_head_t wait_q;
+static wait_queue_head_t wait_q;
 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
 					struct scsi_cmnd *cmd);
 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb);
@@ -89,25 +89,31 @@
 static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd);
 static int arcmsr_probe(struct pci_dev *pdev,
 				const struct pci_device_id *id);
+static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state);
+static int arcmsr_resume(struct pci_dev *pdev);
 static void arcmsr_remove(struct pci_dev *pdev);
 static void arcmsr_shutdown(struct pci_dev *pdev);
 static void arcmsr_iop_init(struct AdapterControlBlock *acb);
 static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb);
 static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb);
+static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb,
+	u32 intmask_org);
 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb);
-static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb);
-static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb);
+static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb);
+static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb);
 static void arcmsr_request_device_map(unsigned long pacb);
-static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb);
-static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb);
-static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb);
+static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb);
+static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb);
+static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb);
 static void arcmsr_message_isr_bh_fn(struct work_struct *work);
 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb);
 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb);
-static void arcmsr_hbc_message_isr(struct AdapterControlBlock *pACB);
+static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *pACB);
+static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb);
 static void arcmsr_hardware_reset(struct AdapterControlBlock *acb);
 static const char *arcmsr_info(struct Scsi_Host *);
 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb);
+static void arcmsr_free_irq(struct pci_dev *, struct AdapterControlBlock *);
 static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev,
 					  int queue_depth, int reason)
 {
@@ -122,15 +128,14 @@
 
 static struct scsi_host_template arcmsr_scsi_host_template = {
 	.module			= THIS_MODULE,
-	.name			= "ARCMSR ARECA SATA/SAS RAID Controller"
-				ARCMSR_DRIVER_VERSION,
+	.name			= "Areca SAS/SATA RAID driver",
 	.info			= arcmsr_info,
 	.queuecommand		= arcmsr_queue_command,
 	.eh_abort_handler		= arcmsr_abort,
 	.eh_bus_reset_handler	= arcmsr_bus_reset,
 	.bios_param		= arcmsr_bios_param,
 	.change_queue_depth	= arcmsr_adjust_disk_queue_depth,
-	.can_queue		= ARCMSR_MAX_FREECCB_NUM,
+	.can_queue		= ARCMSR_MAX_OUTSTANDING_CMD,
 	.this_id			= ARCMSR_SCSI_INITIATOR_ID,
 	.sg_tablesize	        	= ARCMSR_DEFAULT_SG_ENTRIES, 
 	.max_sectors    	    	= ARCMSR_MAX_XFER_SECTORS_C, 
@@ -139,34 +144,59 @@
 	.shost_attrs		= arcmsr_host_attrs,
 	.no_write_same		= 1,
 };
+
 static struct pci_device_id arcmsr_device_id_table[] = {
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)},
-	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880)},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110),
+		.driver_data = ACB_ADAPTER_TYPE_A},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120),
+		.driver_data = ACB_ADAPTER_TYPE_A},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130),
+		.driver_data = ACB_ADAPTER_TYPE_A},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160),
+		.driver_data = ACB_ADAPTER_TYPE_A},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170),
+		.driver_data = ACB_ADAPTER_TYPE_A},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200),
+		.driver_data = ACB_ADAPTER_TYPE_B},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201),
+		.driver_data = ACB_ADAPTER_TYPE_B},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202),
+		.driver_data = ACB_ADAPTER_TYPE_B},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210),
+		.driver_data = ACB_ADAPTER_TYPE_A},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1214),
+		.driver_data = ACB_ADAPTER_TYPE_D},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220),
+		.driver_data = ACB_ADAPTER_TYPE_A},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230),
+		.driver_data = ACB_ADAPTER_TYPE_A},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260),
+		.driver_data = ACB_ADAPTER_TYPE_A},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270),
+		.driver_data = ACB_ADAPTER_TYPE_A},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280),
+		.driver_data = ACB_ADAPTER_TYPE_A},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380),
+		.driver_data = ACB_ADAPTER_TYPE_A},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381),
+		.driver_data = ACB_ADAPTER_TYPE_A},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680),
+		.driver_data = ACB_ADAPTER_TYPE_A},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681),
+		.driver_data = ACB_ADAPTER_TYPE_A},
+	{PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880),
+		.driver_data = ACB_ADAPTER_TYPE_C},
 	{0, 0}, /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table);
+
 static struct pci_driver arcmsr_pci_driver = {
 	.name			= "arcmsr",
 	.id_table			= arcmsr_device_id_table,
 	.probe			= arcmsr_probe,
 	.remove			= arcmsr_remove,
+	.suspend		= arcmsr_suspend,
+	.resume			= arcmsr_resume,
 	.shutdown		= arcmsr_shutdown,
 };
 /*
@@ -174,16 +204,14 @@
 ****************************************************************************
 */
 
-static void arcmsr_free_hbb_mu(struct AdapterControlBlock *acb)
+static void arcmsr_free_mu(struct AdapterControlBlock *acb)
 {
 	switch (acb->adapter_type) {
-	case ACB_ADAPTER_TYPE_A:
-	case ACB_ADAPTER_TYPE_C:
+	case ACB_ADAPTER_TYPE_B:
+	case ACB_ADAPTER_TYPE_D: {
+		dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
+			acb->dma_coherent2, acb->dma_coherent_handle2);
 		break;
-	case ACB_ADAPTER_TYPE_B:{
-		dma_free_coherent(&acb->pdev->dev,
-			sizeof(struct MessageUnit_B),
-			acb->pmuB, acb->dma_coherent_handle_hbb_mu);
 	}
 	}
 }
@@ -229,6 +257,25 @@
 		}
 		break;
 	}
+	case ACB_ADAPTER_TYPE_D: {
+		void __iomem *mem_base0;
+		unsigned long addr, range, flags;
+
+		addr = (unsigned long)pci_resource_start(pdev, 0);
+		range = pci_resource_len(pdev, 0);
+		flags = pci_resource_flags(pdev, 0);
+		if (flags & IORESOURCE_CACHEABLE)
+			mem_base0 = ioremap(addr, range);
+		else
+			mem_base0 = ioremap_nocache(addr, range);
+		if (!mem_base0) {
+			pr_notice("arcmsr%d: memory mapping region fail\n",
+				acb->host->host_no);
+			return false;
+		}
+		acb->mem_base0 = mem_base0;
+		break;
+		}
 	}
 	return true;
 }
@@ -249,6 +296,10 @@
 	case ACB_ADAPTER_TYPE_C:{
 		iounmap(acb->pmuC);
 	}
+	break;
+	case ACB_ADAPTER_TYPE_D:
+		iounmap(acb->mem_base0);
+		break;
 	}
 }
 
@@ -289,27 +340,7 @@
 	return 0;
 }
 
-static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb)
-{
-	struct pci_dev *pdev = acb->pdev;
-	u16 dev_id;
-	pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id);
-	acb->dev_id = dev_id;
-	switch (dev_id) {
-	case 0x1880: {
-		acb->adapter_type = ACB_ADAPTER_TYPE_C;
-		}
-		break;
-	case 0x1201: {
-		acb->adapter_type = ACB_ADAPTER_TYPE_B;
-		}
-		break;
-
-	default: acb->adapter_type = ACB_ADAPTER_TYPE_A;
-	}
-}
-
-static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb)
+static uint8_t arcmsr_hbaA_wait_msgint_ready(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
 	int i;
@@ -327,7 +358,7 @@
 	return false;
 }
 
-static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb)
+static uint8_t arcmsr_hbaB_wait_msgint_ready(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_B *reg = acb->pmuB;
 	int i;
@@ -347,9 +378,9 @@
 	return false;
 }
 
-static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB)
+static uint8_t arcmsr_hbaC_wait_msgint_ready(struct AdapterControlBlock *pACB)
 {
-	struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
+	struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
 	int i;
 
 	for (i = 0; i < 2000; i++) {
@@ -365,13 +396,30 @@
 	return false;
 }
 
-static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb)
+static bool arcmsr_hbaD_wait_msgint_ready(struct AdapterControlBlock *pACB)
+{
+	struct MessageUnit_D *reg = pACB->pmuD;
+	int i;
+
+	for (i = 0; i < 2000; i++) {
+		if (readl(reg->outbound_doorbell)
+			& ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
+			writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
+				reg->outbound_doorbell);
+			return true;
+		}
+		msleep(10);
+	} /* max 20 seconds */
+	return false;
+}
+
+static void arcmsr_hbaA_flush_cache(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
 	int retry_count = 30;
 	writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
 	do {
-		if (arcmsr_hba_wait_msgint_ready(acb))
+		if (arcmsr_hbaA_wait_msgint_ready(acb))
 			break;
 		else {
 			retry_count--;
@@ -381,13 +429,13 @@
 	} while (retry_count != 0);
 }
 
-static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb)
+static void arcmsr_hbaB_flush_cache(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_B *reg = acb->pmuB;
 	int retry_count = 30;
 	writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell);
 	do {
-		if (arcmsr_hbb_wait_msgint_ready(acb))
+		if (arcmsr_hbaB_wait_msgint_ready(acb))
 			break;
 		else {
 			retry_count--;
@@ -397,14 +445,14 @@
 	} while (retry_count != 0);
 }
 
-static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *pACB)
+static void arcmsr_hbaC_flush_cache(struct AdapterControlBlock *pACB)
 {
-	struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
+	struct MessageUnit_C __iomem *reg = pACB->pmuC;
 	int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */
 	writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0);
 	writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
 	do {
-		if (arcmsr_hbc_wait_msgint_ready(pACB)) {
+		if (arcmsr_hbaC_wait_msgint_ready(pACB)) {
 			break;
 		} else {
 			retry_count--;
@@ -414,22 +462,44 @@
 	} while (retry_count != 0);
 	return;
 }
+
+static void arcmsr_hbaD_flush_cache(struct AdapterControlBlock *pACB)
+{
+	int retry_count = 15;
+	struct MessageUnit_D *reg = pACB->pmuD;
+
+	writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, reg->inbound_msgaddr0);
+	do {
+		if (arcmsr_hbaD_wait_msgint_ready(pACB))
+			break;
+
+		retry_count--;
+		pr_notice("arcmsr%d: wait 'flush adapter "
+			"cache' timeout, retry count down = %d\n",
+			pACB->host->host_no, retry_count);
+	} while (retry_count != 0);
+}
+
 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb)
 {
 	switch (acb->adapter_type) {
 
 	case ACB_ADAPTER_TYPE_A: {
-		arcmsr_flush_hba_cache(acb);
+		arcmsr_hbaA_flush_cache(acb);
 		}
 		break;
 
 	case ACB_ADAPTER_TYPE_B: {
-		arcmsr_flush_hbb_cache(acb);
+		arcmsr_hbaB_flush_cache(acb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
-		arcmsr_flush_hbc_cache(acb);
+		arcmsr_hbaC_flush_cache(acb);
 		}
+		break;
+	case ACB_ADAPTER_TYPE_D:
+		arcmsr_hbaD_flush_cache(acb);
+		break;
 	}
 }
 
@@ -473,7 +543,16 @@
 	acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle;
 	for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){
 		cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb);
-		ccb_tmp->cdb_phyaddr_pattern = ((acb->adapter_type == ACB_ADAPTER_TYPE_C) ? cdb_phyaddr : (cdb_phyaddr >> 5));
+		switch (acb->adapter_type) {
+		case ACB_ADAPTER_TYPE_A:
+		case ACB_ADAPTER_TYPE_B:
+			ccb_tmp->cdb_phyaddr = cdb_phyaddr >> 5;
+			break;
+		case ACB_ADAPTER_TYPE_C:
+		case ACB_ADAPTER_TYPE_D:
+			ccb_tmp->cdb_phyaddr = cdb_phyaddr;
+			break;
+		}
 		acb->pccb_pool[i] = ccb_tmp;
 		ccb_tmp->acb = acb;
 		INIT_LIST_HEAD(&ccb_tmp->list);
@@ -486,123 +565,128 @@
 
 static void arcmsr_message_isr_bh_fn(struct work_struct *work) 
 {
-	struct AdapterControlBlock *acb = container_of(work,struct AdapterControlBlock, arcmsr_do_message_isr_bh);
+	struct AdapterControlBlock *acb = container_of(work,
+		struct AdapterControlBlock, arcmsr_do_message_isr_bh);
+	char *acb_dev_map = (char *)acb->device_map;
+	uint32_t __iomem *signature = NULL;
+	char __iomem *devicemap = NULL;
+	int target, lun;
+	struct scsi_device *psdev;
+	char diff, temp;
+
 	switch (acb->adapter_type) {
-		case ACB_ADAPTER_TYPE_A: {
+	case ACB_ADAPTER_TYPE_A: {
+		struct MessageUnit_A __iomem *reg  = acb->pmuA;
 
-			struct MessageUnit_A __iomem *reg  = acb->pmuA;
-			char *acb_dev_map = (char *)acb->device_map;
-			uint32_t __iomem *signature = (uint32_t __iomem*) (&reg->message_rwbuffer[0]);
-			char __iomem *devicemap = (char __iomem*) (&reg->message_rwbuffer[21]);
-			int target, lun;
-			struct scsi_device *psdev;
-			char diff;
-
-			atomic_inc(&acb->rq_map_token);
-			if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
-				for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
-					diff = (*acb_dev_map)^readb(devicemap);
-					if (diff != 0) {
-						char temp;
-						*acb_dev_map = readb(devicemap);
-						temp =*acb_dev_map;
-						for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
-							if((temp & 0x01)==1 && (diff & 0x01) == 1) {	
-								scsi_add_device(acb->host, 0, target, lun);
-							}else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
-								psdev = scsi_device_lookup(acb->host, 0, target, lun);
-								if (psdev != NULL ) {
-									scsi_remove_device(psdev);
-									scsi_device_put(psdev);
-								}
-							}
-							temp >>= 1;
-							diff >>= 1;
-						}
-					}
-					devicemap++;
-					acb_dev_map++;
-				}
-			}
-			break;
-		}
-
-		case ACB_ADAPTER_TYPE_B: {
-			struct MessageUnit_B *reg  = acb->pmuB;
-			char *acb_dev_map = (char *)acb->device_map;
-			uint32_t __iomem *signature = (uint32_t __iomem*)(&reg->message_rwbuffer[0]);
-			char __iomem *devicemap = (char __iomem*)(&reg->message_rwbuffer[21]);
-			int target, lun;
-			struct scsi_device *psdev;
-			char diff;
-
-			atomic_inc(&acb->rq_map_token);
-			if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
-				for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) {
-					diff = (*acb_dev_map)^readb(devicemap);
-					if (diff != 0) {
-						char temp;
-						*acb_dev_map = readb(devicemap);
-						temp =*acb_dev_map;
-						for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
-							if((temp & 0x01)==1 && (diff & 0x01) == 1) {	
-								scsi_add_device(acb->host, 0, target, lun);
-							}else if((temp & 0x01) == 0 && (diff & 0x01) == 1) {
-								psdev = scsi_device_lookup(acb->host, 0, target, lun);
-								if (psdev != NULL ) {
-									scsi_remove_device(psdev);
-									scsi_device_put(psdev);
-								}
-							}
-							temp >>= 1;
-							diff >>= 1;
-						}
-					}
-					devicemap++;
-					acb_dev_map++;
-				}
-			}
-		}
+		signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]);
+		devicemap = (char __iomem *)(&reg->message_rwbuffer[21]);
 		break;
-		case ACB_ADAPTER_TYPE_C: {
-			struct MessageUnit_C *reg  = acb->pmuC;
-			char *acb_dev_map = (char *)acb->device_map;
-			uint32_t __iomem *signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
-			char __iomem *devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
-			int target, lun;
-			struct scsi_device *psdev;
-			char diff;
+	}
+	case ACB_ADAPTER_TYPE_B: {
+		struct MessageUnit_B *reg  = acb->pmuB;
 
-			atomic_inc(&acb->rq_map_token);
-			if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) {
-				for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) {
-					diff = (*acb_dev_map)^readb(devicemap);
-					if (diff != 0) {
-						char temp;
-						*acb_dev_map = readb(devicemap);
-						temp = *acb_dev_map;
-						for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) {
-							if ((temp & 0x01) == 1 && (diff & 0x01) == 1) {
-								scsi_add_device(acb->host, 0, target, lun);
-							} else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) {
-								psdev = scsi_device_lookup(acb->host, 0, target, lun);
-								if (psdev != NULL) {
-									scsi_remove_device(psdev);
-									scsi_device_put(psdev);
-								}
-							}
-							temp >>= 1;
-							diff >>= 1;
-						}
+		signature = (uint32_t __iomem *)(&reg->message_rwbuffer[0]);
+		devicemap = (char __iomem *)(&reg->message_rwbuffer[21]);
+		break;
+	}
+	case ACB_ADAPTER_TYPE_C: {
+		struct MessageUnit_C __iomem *reg  = acb->pmuC;
+
+		signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
+		devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
+		break;
+	}
+	case ACB_ADAPTER_TYPE_D: {
+		struct MessageUnit_D *reg  = acb->pmuD;
+
+		signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]);
+		devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
+		break;
+	}
+	}
+	atomic_inc(&acb->rq_map_token);
+	if (readl(signature) != ARCMSR_SIGNATURE_GET_CONFIG)
+		return;
+	for (target = 0; target < ARCMSR_MAX_TARGETID - 1;
+		target++) {
+		temp = readb(devicemap);
+		diff = (*acb_dev_map) ^ temp;
+		if (diff != 0) {
+			*acb_dev_map = temp;
+			for (lun = 0; lun < ARCMSR_MAX_TARGETLUN;
+				lun++) {
+				if ((diff & 0x01) == 1 &&
+					(temp & 0x01) == 1) {
+					scsi_add_device(acb->host,
+						0, target, lun);
+				} else if ((diff & 0x01) == 1
+					&& (temp & 0x01) == 0) {
+					psdev = scsi_device_lookup(acb->host,
+						0, target, lun);
+					if (psdev != NULL) {
+						scsi_remove_device(psdev);
+						scsi_device_put(psdev);
 					}
-					devicemap++;
-					acb_dev_map++;
 				}
+				temp >>= 1;
+				diff >>= 1;
 			}
 		}
+		devicemap++;
+		acb_dev_map++;
 	}
 }
 
+static int
+arcmsr_request_irq(struct pci_dev *pdev, struct AdapterControlBlock *acb)
+{
+	int	i, j, r;
+	struct msix_entry entries[ARCMST_NUM_MSIX_VECTORS];
+
+	for (i = 0; i < ARCMST_NUM_MSIX_VECTORS; i++)
+		entries[i].entry = i;
+	r = pci_enable_msix_range(pdev, entries, 1, ARCMST_NUM_MSIX_VECTORS);
+	if (r < 0)
+		goto msi_int;
+	acb->msix_vector_count = r;
+	for (i = 0; i < r; i++) {
+		if (request_irq(entries[i].vector,
+			arcmsr_do_interrupt, 0, "arcmsr", acb)) {
+			pr_warn("arcmsr%d: request_irq =%d failed!\n",
+				acb->host->host_no, entries[i].vector);
+			for (j = 0 ; j < i ; j++)
+				free_irq(entries[j].vector, acb);
+			pci_disable_msix(pdev);
+			goto msi_int;
+		}
+		acb->entries[i] = entries[i];
+	}
+	acb->acb_flags |= ACB_F_MSIX_ENABLED;
+	pr_info("arcmsr%d: msi-x enabled\n", acb->host->host_no);
+	return SUCCESS;
+msi_int:
+	if (pci_enable_msi_exact(pdev, 1) < 0)
+		goto legacy_int;
+	if (request_irq(pdev->irq, arcmsr_do_interrupt,
+		IRQF_SHARED, "arcmsr", acb)) {
+		pr_warn("arcmsr%d: request_irq =%d failed!\n",
+			acb->host->host_no, pdev->irq);
+		pci_disable_msi(pdev);
+		goto legacy_int;
+	}
+	acb->acb_flags |= ACB_F_MSI_ENABLED;
+	pr_info("arcmsr%d: msi enabled\n", acb->host->host_no);
+	return SUCCESS;
+legacy_int:
+	if (request_irq(pdev->irq, arcmsr_do_interrupt,
+		IRQF_SHARED, "arcmsr", acb)) {
+		pr_warn("arcmsr%d: request_irq = %d failed!\n",
+			acb->host->host_no, pdev->irq);
+		return FAILED;
+	}
+	return SUCCESS;
+}
+
 static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
 {
 	struct Scsi_Host *host;
@@ -637,7 +721,7 @@
 	host->max_lun = ARCMSR_MAX_TARGETLUN;
 	host->max_id = ARCMSR_MAX_TARGETID;		/*16:8*/
 	host->max_cmd_len = 16;	 			/*this is issue of 64bit LBA ,over 2T byte*/
-	host->can_queue = ARCMSR_MAX_FREECCB_NUM;	/* max simultaneous cmds */		
+	host->can_queue = ARCMSR_MAX_OUTSTANDING_CMD;
 	host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN;	    
 	host->this_id = ARCMSR_SCSI_INITIATOR_ID;
 	host->unique_id = (bus << 8) | dev_fun;
@@ -649,12 +733,16 @@
 	}
 	spin_lock_init(&acb->eh_lock);
 	spin_lock_init(&acb->ccblist_lock);
+	spin_lock_init(&acb->postq_lock);
+	spin_lock_init(&acb->doneq_lock);
+	spin_lock_init(&acb->rqbuffer_lock);
+	spin_lock_init(&acb->wqbuffer_lock);
 	acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
 			ACB_F_MESSAGE_RQBUFFER_CLEARED |
 			ACB_F_MESSAGE_WQBUFFER_READED);
 	acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER;
 	INIT_LIST_HEAD(&acb->ccb_free_list);
-	arcmsr_define_adapter_type(acb);
+	acb->adapter_type = id->driver_data;
 	error = arcmsr_remap_pciregion(acb);
 	if(!error){
 		goto pci_release_regs;
@@ -667,17 +755,13 @@
 	if(error){
 		goto free_hbb_mu;
 	}
-	arcmsr_iop_init(acb);
 	error = scsi_add_host(host, &pdev->dev);
 	if(error){
-		goto RAID_controller_stop;
+		goto free_ccb_pool;
 	}
-	error = request_irq(pdev->irq, arcmsr_do_interrupt, IRQF_SHARED, "arcmsr", acb);
-	if(error){
+	if (arcmsr_request_irq(pdev, acb) == FAILED)
 		goto scsi_host_remove;
-	}
-	host->irq = pdev->irq;
-    	scsi_scan_host(host);
+	arcmsr_iop_init(acb);
 	INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
 	atomic_set(&acb->rq_map_token, 16);
 	atomic_set(&acb->ante_token_value, 16);
@@ -689,16 +773,20 @@
 	add_timer(&acb->eternal_timer);
 	if(arcmsr_alloc_sysfs_attr(acb))
 		goto out_free_sysfs;
+	scsi_scan_host(host);
 	return 0;
 out_free_sysfs:
-scsi_host_remove:
-	scsi_remove_host(host);
-RAID_controller_stop:
+	del_timer_sync(&acb->eternal_timer);
+	flush_work(&acb->arcmsr_do_message_isr_bh);
 	arcmsr_stop_adapter_bgrb(acb);
 	arcmsr_flush_adapter_cache(acb);
+	arcmsr_free_irq(pdev, acb);
+scsi_host_remove:
+	scsi_remove_host(host);
+free_ccb_pool:
 	arcmsr_free_ccb_pool(acb);
 free_hbb_mu:
-	arcmsr_free_hbb_mu(acb);
+	arcmsr_free_mu(acb);
 unmap_pci_region:
 	arcmsr_unmap_pciregion(acb);
 pci_release_regs:
@@ -710,77 +798,171 @@
 	return -ENODEV;
 }
 
-static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb)
+static void arcmsr_free_irq(struct pci_dev *pdev,
+		struct AdapterControlBlock *acb)
+{
+	int i;
+
+	if (acb->acb_flags & ACB_F_MSI_ENABLED) {
+		free_irq(pdev->irq, acb);
+		pci_disable_msi(pdev);
+	} else if (acb->acb_flags & ACB_F_MSIX_ENABLED) {
+		for (i = 0; i < acb->msix_vector_count; i++)
+			free_irq(acb->entries[i].vector, acb);
+		pci_disable_msix(pdev);
+	} else
+		free_irq(pdev->irq, acb);
+}
+
+static int arcmsr_suspend(struct pci_dev *pdev, pm_message_t state)
+{
+	uint32_t intmask_org;
+	struct Scsi_Host *host = pci_get_drvdata(pdev);
+	struct AdapterControlBlock *acb =
+		(struct AdapterControlBlock *)host->hostdata;
+
+	intmask_org = arcmsr_disable_outbound_ints(acb);
+	arcmsr_free_irq(pdev, acb);
+	del_timer_sync(&acb->eternal_timer);
+	flush_work(&acb->arcmsr_do_message_isr_bh);
+	arcmsr_stop_adapter_bgrb(acb);
+	arcmsr_flush_adapter_cache(acb);
+	pci_set_drvdata(pdev, host);
+	pci_save_state(pdev);
+	pci_disable_device(pdev);
+	pci_set_power_state(pdev, pci_choose_state(pdev, state));
+	return 0;
+}
+
+static int arcmsr_resume(struct pci_dev *pdev)
+{
+	int error;
+	struct Scsi_Host *host = pci_get_drvdata(pdev);
+	struct AdapterControlBlock *acb =
+		(struct AdapterControlBlock *)host->hostdata;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_enable_wake(pdev, PCI_D0, 0);
+	pci_restore_state(pdev);
+	if (pci_enable_device(pdev)) {
+		pr_warn("%s: pci_enable_device error\n", __func__);
+		return -ENODEV;
+	}
+	error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+	if (error) {
+		error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (error) {
+			pr_warn("scsi%d: No suitable DMA mask available\n",
+			       host->host_no);
+			goto controller_unregister;
+		}
+	}
+	pci_set_master(pdev);
+	if (arcmsr_request_irq(pdev, acb) == FAILED)
+		goto controller_stop;
+	arcmsr_iop_init(acb);
+	INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn);
+	atomic_set(&acb->rq_map_token, 16);
+	atomic_set(&acb->ante_token_value, 16);
+	acb->fw_flag = FW_NORMAL;
+	init_timer(&acb->eternal_timer);
+	acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ);
+	acb->eternal_timer.data = (unsigned long) acb;
+	acb->eternal_timer.function = &arcmsr_request_device_map;
+	add_timer(&acb->eternal_timer);
+	return 0;
+controller_stop:
+	arcmsr_stop_adapter_bgrb(acb);
+	arcmsr_flush_adapter_cache(acb);
+controller_unregister:
+	scsi_remove_host(host);
+	arcmsr_free_ccb_pool(acb);
+	arcmsr_unmap_pciregion(acb);
+	pci_release_regions(pdev);
+	scsi_host_put(host);
+	pci_disable_device(pdev);
+	return -ENODEV;
+}
+
+static uint8_t arcmsr_hbaA_abort_allcmd(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
 	writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
-	if (!arcmsr_hba_wait_msgint_ready(acb)) {
+	if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
 		printk(KERN_NOTICE
-			"arcmsr%d: wait 'abort all outstanding command' timeout \n"
+			"arcmsr%d: wait 'abort all outstanding command' timeout\n"
 			, acb->host->host_no);
 		return false;
 	}
 	return true;
 }
 
-static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb)
+static uint8_t arcmsr_hbaB_abort_allcmd(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_B *reg = acb->pmuB;
 
 	writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell);
-	if (!arcmsr_hbb_wait_msgint_ready(acb)) {
+	if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
 		printk(KERN_NOTICE
-			"arcmsr%d: wait 'abort all outstanding command' timeout \n"
+			"arcmsr%d: wait 'abort all outstanding command' timeout\n"
 			, acb->host->host_no);
 		return false;
 	}
 	return true;
 }
-static uint8_t arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *pACB)
+static uint8_t arcmsr_hbaC_abort_allcmd(struct AdapterControlBlock *pACB)
 {
-	struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
+	struct MessageUnit_C __iomem *reg = pACB->pmuC;
 	writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0);
 	writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
-	if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
+	if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
 		printk(KERN_NOTICE
-			"arcmsr%d: wait 'abort all outstanding command' timeout \n"
+			"arcmsr%d: wait 'abort all outstanding command' timeout\n"
 			, pACB->host->host_no);
 		return false;
 	}
 	return true;
 }
+
+static uint8_t arcmsr_hbaD_abort_allcmd(struct AdapterControlBlock *pACB)
+{
+	struct MessageUnit_D *reg = pACB->pmuD;
+
+	writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, reg->inbound_msgaddr0);
+	if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
+		pr_notice("arcmsr%d: wait 'abort all outstanding "
+			"command' timeout\n", pACB->host->host_no);
+		return false;
+	}
+	return true;
+}
+
 static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb)
 {
 	uint8_t rtnval = 0;
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
-		rtnval = arcmsr_abort_hba_allcmd(acb);
+		rtnval = arcmsr_hbaA_abort_allcmd(acb);
 		}
 		break;
 
 	case ACB_ADAPTER_TYPE_B: {
-		rtnval = arcmsr_abort_hbb_allcmd(acb);
+		rtnval = arcmsr_hbaB_abort_allcmd(acb);
 		}
 		break;
 
 	case ACB_ADAPTER_TYPE_C: {
-		rtnval = arcmsr_abort_hbc_allcmd(acb);
+		rtnval = arcmsr_hbaC_abort_allcmd(acb);
 		}
+		break;
+
+	case ACB_ADAPTER_TYPE_D:
+		rtnval = arcmsr_hbaD_abort_allcmd(acb);
+		break;
 	}
 	return rtnval;
 }
 
-static bool arcmsr_hbb_enable_driver_mode(struct AdapterControlBlock *pacb)
-{
-	struct MessageUnit_B *reg = pacb->pmuB;
-	writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
-	if (!arcmsr_hbb_wait_msgint_ready(pacb)) {
-		printk(KERN_ERR "arcmsr%d: can't set driver mode. \n", pacb->host->host_no);
-		return false;
-	}
-    	return true;
-}
-
 static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb)
 {
 	struct scsi_cmnd *pcmd = ccb->pcmd;
@@ -837,12 +1019,18 @@
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C:{
-		struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
+		struct MessageUnit_C __iomem *reg = acb->pmuC;
 		/* disable all outbound interrupt */
 		orig_mask = readl(&reg->host_int_mask); /* disable outbound message0 int */
 		writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask);
 		}
 		break;
+	case ACB_ADAPTER_TYPE_D: {
+		struct MessageUnit_D *reg = acb->pmuD;
+		/* disable all outbound interrupt */
+		writel(ARCMSR_ARC1214_ALL_INT_DISABLE, reg->pcief0_int_enable);
+		}
+		break;
 	}
 	return orig_mask;
 }
@@ -933,7 +1121,7 @@
 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb)
 {
 	int i = 0;
-	uint32_t flag_ccb;
+	uint32_t flag_ccb, ccb_cdb_phy;
 	struct ARCMSR_CDB *pARCMSR_CDB;
 	bool error;
 	struct CommandControlBlock *pCCB;
@@ -961,8 +1149,9 @@
 		/*clear all outbound posted Q*/
 		writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */
 		for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) {
-			if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) {
-				writel(0, &reg->done_qbuffer[i]);
+			flag_ccb = reg->done_qbuffer[i];
+			if (flag_ccb != 0) {
+				reg->done_qbuffer[i] = 0;
 				pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
 				pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
 				error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
@@ -975,11 +1164,7 @@
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
-		struct MessageUnit_C *reg = acb->pmuC;
-		struct  ARCMSR_CDB *pARCMSR_CDB;
-		uint32_t flag_ccb, ccb_cdb_phy;
-		bool error;
-		struct CommandControlBlock *pCCB;
+		struct MessageUnit_C __iomem *reg = acb->pmuC;
 		while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) {
 			/*need to do*/
 			flag_ccb = readl(&reg->outbound_queueport_low);
@@ -989,9 +1174,54 @@
 			error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
 			arcmsr_drain_donequeue(acb, pCCB, error);
 		}
-	}
+		}
+		break;
+	case ACB_ADAPTER_TYPE_D: {
+		struct MessageUnit_D  *pmu = acb->pmuD;
+		uint32_t outbound_write_pointer;
+		uint32_t doneq_index, index_stripped, addressLow, residual, toggle;
+		unsigned long flags;
+
+		residual = atomic_read(&acb->ccboutstandingcount);
+		for (i = 0; i < residual; i++) {
+			spin_lock_irqsave(&acb->doneq_lock, flags);
+			outbound_write_pointer =
+				pmu->done_qbuffer[0].addressLow + 1;
+			doneq_index = pmu->doneq_index;
+			if ((doneq_index & 0xFFF) !=
+				(outbound_write_pointer & 0xFFF)) {
+				toggle = doneq_index & 0x4000;
+				index_stripped = (doneq_index & 0xFFF) + 1;
+				index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
+				pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
+					((toggle ^ 0x4000) + 1);
+				doneq_index = pmu->doneq_index;
+				spin_unlock_irqrestore(&acb->doneq_lock, flags);
+				addressLow = pmu->done_qbuffer[doneq_index &
+					0xFFF].addressLow;
+				ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
+				pARCMSR_CDB = (struct  ARCMSR_CDB *)
+					(acb->vir2phy_offset + ccb_cdb_phy);
+				pCCB = container_of(pARCMSR_CDB,
+					struct CommandControlBlock, arcmsr_cdb);
+				error = (addressLow &
+					ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ?
+					true : false;
+				arcmsr_drain_donequeue(acb, pCCB, error);
+				writel(doneq_index,
+					pmu->outboundlist_read_pointer);
+			} else {
+				spin_unlock_irqrestore(&acb->doneq_lock, flags);
+				mdelay(10);
+			}
+		}
+		pmu->postq_index = 0;
+		pmu->doneq_index = 0x40FF;
+		}
+		break;
 	}
 }
+
 static void arcmsr_remove(struct pci_dev *pdev)
 {
 	struct Scsi_Host *host = pci_get_drvdata(pdev);
@@ -1029,9 +1259,9 @@
 			}
 		}
 	}
-	free_irq(pdev->irq, acb);
+	arcmsr_free_irq(pdev, acb);
 	arcmsr_free_ccb_pool(acb);
-	arcmsr_free_hbb_mu(acb);
+	arcmsr_free_mu(acb);
 	arcmsr_unmap_pciregion(acb);
 	pci_release_regions(pdev);
 	scsi_host_put(host);
@@ -1045,6 +1275,7 @@
 		(struct AdapterControlBlock *)host->hostdata;
 	del_timer_sync(&acb->eternal_timer);
 	arcmsr_disable_outbound_ints(acb);
+	arcmsr_free_irq(pdev, acb);
 	flush_work(&acb->arcmsr_do_message_isr_bh);
 	arcmsr_stop_adapter_bgrb(acb);
 	arcmsr_flush_adapter_cache(acb);
@@ -1091,11 +1322,19 @@
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
-		struct MessageUnit_C *reg = acb->pmuC;
+		struct MessageUnit_C __iomem *reg = acb->pmuC;
 		mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK);
 		writel(intmask_org & mask, &reg->host_int_mask);
 		acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f;
 		}
+		break;
+	case ACB_ADAPTER_TYPE_D: {
+		struct MessageUnit_D *reg = acb->pmuD;
+
+		mask = ARCMSR_ARC1214_ALL_INT_ENABLE;
+		writel(intmask_org | mask, reg->pcief0_int_enable);
+		break;
+		}
 	}
 }
 
@@ -1115,7 +1354,7 @@
 	arcmsr_cdb->TargetID = pcmd->device->id;
 	arcmsr_cdb->LUN = pcmd->device->lun;
 	arcmsr_cdb->Function = 1;
-	arcmsr_cdb->Context = 0;
+	arcmsr_cdb->msgContext = 0;
 	memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len);
 
 	nseg = scsi_dma_map(pcmd);
@@ -1156,7 +1395,7 @@
 
 static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb)
 {
-	uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern;
+	uint32_t cdb_phyaddr = ccb->cdb_phyaddr;
 	struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb;
 	atomic_inc(&acb->ccboutstandingcount);
 	ccb->startdone = ARCMSR_CCB_START;
@@ -1165,25 +1404,24 @@
 		struct MessageUnit_A __iomem *reg = acb->pmuA;
 
 		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE)
-			writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
+			writel(cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,
 			&reg->inbound_queueport);
-		else {
-				writel(cdb_phyaddr_pattern, &reg->inbound_queueport);
-		}
-		}
+		else
+			writel(cdb_phyaddr, &reg->inbound_queueport);
 		break;
+	}
 
 	case ACB_ADAPTER_TYPE_B: {
 		struct MessageUnit_B *reg = acb->pmuB;
 		uint32_t ending_index, index = reg->postq_index;
 
 		ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE);
-		writel(0, &reg->post_qbuffer[ending_index]);
+		reg->post_qbuffer[ending_index] = 0;
 		if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) {
-			writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\
-						 &reg->post_qbuffer[index]);
+			reg->post_qbuffer[index] =
+				cdb_phyaddr | ARCMSR_CCBPOST_FLAG_SGL_BSIZE;
 		} else {
-			writel(cdb_phyaddr_pattern, &reg->post_qbuffer[index]);
+			reg->post_qbuffer[index] = cdb_phyaddr;
 		}
 		index++;
 		index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */
@@ -1192,11 +1430,11 @@
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
-		struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC;
+		struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
 		uint32_t ccb_post_stamp, arc_cdb_size;
 
 		arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size;
-		ccb_post_stamp = (cdb_phyaddr_pattern | ((arc_cdb_size - 1) >> 6) | 1);
+		ccb_post_stamp = (cdb_phyaddr | ((arc_cdb_size - 1) >> 6) | 1);
 		if (acb->cdb_phyaddr_hi32) {
 			writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high);
 			writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
@@ -1204,62 +1442,102 @@
 			writel(ccb_post_stamp, &phbcmu->inbound_queueport_low);
 		}
 		}
+		break;
+	case ACB_ADAPTER_TYPE_D: {
+		struct MessageUnit_D  *pmu = acb->pmuD;
+		u16 index_stripped;
+		u16 postq_index, toggle;
+		unsigned long flags;
+		struct InBound_SRB *pinbound_srb;
+
+		spin_lock_irqsave(&acb->postq_lock, flags);
+		postq_index = pmu->postq_index;
+		pinbound_srb = (struct InBound_SRB *)&(pmu->post_qbuffer[postq_index & 0xFF]);
+		pinbound_srb->addressHigh = dma_addr_hi32(cdb_phyaddr);
+		pinbound_srb->addressLow = dma_addr_lo32(cdb_phyaddr);
+		pinbound_srb->length = ccb->arc_cdb_size >> 2;
+		arcmsr_cdb->msgContext = dma_addr_lo32(cdb_phyaddr);
+		toggle = postq_index & 0x4000;
+		index_stripped = postq_index + 1;
+		index_stripped &= (ARCMSR_MAX_ARC1214_POSTQUEUE - 1);
+		pmu->postq_index = index_stripped ? (index_stripped | toggle) :
+			(toggle ^ 0x4000);
+		writel(postq_index, pmu->inboundlist_write_pointer);
+		spin_unlock_irqrestore(&acb->postq_lock, flags);
+		break;
+		}
 	}
 }
 
-static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb)
+static void arcmsr_hbaA_stop_bgrb(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
 	writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
-	if (!arcmsr_hba_wait_msgint_ready(acb)) {
+	if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
 		printk(KERN_NOTICE
-			"arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
+			"arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
 			, acb->host->host_no);
 	}
 }
 
-static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb)
+static void arcmsr_hbaB_stop_bgrb(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_B *reg = acb->pmuB;
 	acb->acb_flags &= ~ACB_F_MSG_START_BGRB;
 	writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell);
 
-	if (!arcmsr_hbb_wait_msgint_ready(acb)) {
+	if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
 		printk(KERN_NOTICE
-			"arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
+			"arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
 			, acb->host->host_no);
 	}
 }
 
-static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *pACB)
+static void arcmsr_hbaC_stop_bgrb(struct AdapterControlBlock *pACB)
 {
-	struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
+	struct MessageUnit_C __iomem *reg = pACB->pmuC;
 	pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
 	writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0);
 	writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
-	if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
+	if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
 		printk(KERN_NOTICE
-			"arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
+			"arcmsr%d: wait 'stop adapter background rebulid' timeout\n"
 			, pACB->host->host_no);
 	}
 	return;
 }
+
+static void arcmsr_hbaD_stop_bgrb(struct AdapterControlBlock *pACB)
+{
+	struct MessageUnit_D *reg = pACB->pmuD;
+
+	pACB->acb_flags &= ~ACB_F_MSG_START_BGRB;
+	writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, reg->inbound_msgaddr0);
+	if (!arcmsr_hbaD_wait_msgint_ready(pACB))
+		pr_notice("arcmsr%d: wait 'stop adapter background rebulid' "
+			"timeout\n", pACB->host->host_no);
+}
+
 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb)
 {
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
-		arcmsr_stop_hba_bgrb(acb);
+		arcmsr_hbaA_stop_bgrb(acb);
 		}
 		break;
 
 	case ACB_ADAPTER_TYPE_B: {
-		arcmsr_stop_hbb_bgrb(acb);
+		arcmsr_hbaB_stop_bgrb(acb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
-		arcmsr_stop_hbc_bgrb(acb);
+		arcmsr_hbaC_stop_bgrb(acb);
 		}
+		break;
+	case ACB_ADAPTER_TYPE_D:
+		arcmsr_hbaD_stop_bgrb(acb);
+		break;
 	}
 }
 
@@ -1268,7 +1546,7 @@
 	dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle);
 }
 
-void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
+static void arcmsr_iop_message_read(struct AdapterControlBlock *acb)
 {
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A: {
@@ -1284,8 +1562,16 @@
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 		struct MessageUnit_C __iomem *reg = acb->pmuC;
+
 		writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
 		}
+		break;
+	case ACB_ADAPTER_TYPE_D: {
+		struct MessageUnit_D *reg = acb->pmuD;
+		writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
+			reg->inbound_doorbell);
+		}
+		break;
 	}
 }
 
@@ -1320,6 +1606,12 @@
 		writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, &reg->inbound_doorbell);
 		}
 		break;
+	case ACB_ADAPTER_TYPE_D: {
+		struct MessageUnit_D *reg = acb->pmuD;
+		writel(ARCMSR_ARC1214_DRV2IOP_DATA_IN_READY,
+			reg->inbound_doorbell);
+		}
+		break;
 	}
 }
 
@@ -1340,9 +1632,15 @@
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
-		struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC;
+		struct MessageUnit_C __iomem *phbcmu = acb->pmuC;
 		qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer;
 		}
+		break;
+	case ACB_ADAPTER_TYPE_D: {
+		struct MessageUnit_D *reg = acb->pmuD;
+		qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer;
+		}
+		break;
 	}
 	return qbuffer;
 }
@@ -1364,96 +1662,208 @@
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
-		struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
+		struct MessageUnit_C __iomem *reg = acb->pmuC;
 		pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer;
-	}
-
+		}
+		break;
+	case ACB_ADAPTER_TYPE_D: {
+		struct MessageUnit_D *reg = acb->pmuD;
+		pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer;
+		}
+		break;
 	}
 	return pqbuffer;
 }
 
+static uint32_t
+arcmsr_Read_iop_rqbuffer_in_DWORD(struct AdapterControlBlock *acb,
+		struct QBUFFER __iomem *prbuffer)
+{
+	uint8_t *pQbuffer;
+	uint8_t *buf1 = NULL;
+	uint32_t __iomem *iop_data;
+	uint32_t iop_len, data_len, *buf2 = NULL;
+
+	iop_data = (uint32_t __iomem *)prbuffer->data;
+	iop_len = readl(&prbuffer->data_len);
+	if (iop_len > 0) {
+		buf1 = kmalloc(128, GFP_ATOMIC);
+		buf2 = (uint32_t *)buf1;
+		if (buf1 == NULL)
+			return 0;
+		data_len = iop_len;
+		while (data_len >= 4) {
+			*buf2++ = readl(iop_data);
+			iop_data++;
+			data_len -= 4;
+		}
+		if (data_len)
+			*buf2 = readl(iop_data);
+		buf2 = (uint32_t *)buf1;
+	}
+	while (iop_len > 0) {
+		pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
+		*pQbuffer = *buf1;
+		acb->rqbuf_putIndex++;
+		/* if last, index number set it to 0 */
+		acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
+		buf1++;
+		iop_len--;
+	}
+	kfree(buf2);
+	/* let IOP know data has been read */
+	arcmsr_iop_message_read(acb);
+	return 1;
+}
+
+uint32_t
+arcmsr_Read_iop_rqbuffer_data(struct AdapterControlBlock *acb,
+	struct QBUFFER __iomem *prbuffer) {
+
+	uint8_t *pQbuffer;
+	uint8_t __iomem *iop_data;
+	uint32_t iop_len;
+
+	if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D))
+		return arcmsr_Read_iop_rqbuffer_in_DWORD(acb, prbuffer);
+	iop_data = (uint8_t __iomem *)prbuffer->data;
+	iop_len = readl(&prbuffer->data_len);
+	while (iop_len > 0) {
+		pQbuffer = &acb->rqbuffer[acb->rqbuf_putIndex];
+		*pQbuffer = readb(iop_data);
+		acb->rqbuf_putIndex++;
+		acb->rqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
+		iop_data++;
+		iop_len--;
+	}
+	arcmsr_iop_message_read(acb);
+	return 1;
+}
+
 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb)
 {
-	struct QBUFFER __iomem *prbuffer;
-	struct QBUFFER *pQbuffer;
-	uint8_t __iomem *iop_data;
-	int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex;
-	rqbuf_lastindex = acb->rqbuf_lastindex;
-	rqbuf_firstindex = acb->rqbuf_firstindex;
+	unsigned long flags;
+	struct QBUFFER __iomem  *prbuffer;
+	int32_t buf_empty_len;
+
+	spin_lock_irqsave(&acb->rqbuffer_lock, flags);
 	prbuffer = arcmsr_get_iop_rqbuffer(acb);
-	iop_data = (uint8_t __iomem *)prbuffer->data;
-	iop_len = prbuffer->data_len;
-	my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1) & (ARCMSR_MAX_QBUFFER - 1);
-
-	if (my_empty_len >= iop_len)
-	{
-		while (iop_len > 0) {
-			pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex];
-			memcpy(pQbuffer, iop_data, 1);
-			rqbuf_lastindex++;
-			rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
-			iop_data++;
-			iop_len--;
-		}
-		acb->rqbuf_lastindex = rqbuf_lastindex;
-		arcmsr_iop_message_read(acb);
-	}
-
-	else {
+	buf_empty_len = (acb->rqbuf_putIndex - acb->rqbuf_getIndex - 1) &
+		(ARCMSR_MAX_QBUFFER - 1);
+	if (buf_empty_len >= readl(&prbuffer->data_len)) {
+		if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
+			acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
+	} else
 		acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
+	spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
+}
+
+static void arcmsr_write_ioctldata2iop_in_DWORD(struct AdapterControlBlock *acb)
+{
+	uint8_t *pQbuffer;
+	struct QBUFFER __iomem *pwbuffer;
+	uint8_t *buf1 = NULL;
+	uint32_t __iomem *iop_data;
+	uint32_t allxfer_len = 0, data_len, *buf2 = NULL, data;
+
+	if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
+		buf1 = kmalloc(128, GFP_ATOMIC);
+		buf2 = (uint32_t *)buf1;
+		if (buf1 == NULL)
+			return;
+
+		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
+		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
+		iop_data = (uint32_t __iomem *)pwbuffer->data;
+		while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
+			&& (allxfer_len < 124)) {
+			pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
+			*buf1 = *pQbuffer;
+			acb->wqbuf_getIndex++;
+			acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
+			buf1++;
+			allxfer_len++;
+		}
+		data_len = allxfer_len;
+		buf1 = (uint8_t *)buf2;
+		while (data_len >= 4) {
+			data = *buf2++;
+			writel(data, iop_data);
+			iop_data++;
+			data_len -= 4;
+		}
+		if (data_len) {
+			data = *buf2;
+			writel(data, iop_data);
+		}
+		writel(allxfer_len, &pwbuffer->data_len);
+		kfree(buf1);
+		arcmsr_iop_message_wrote(acb);
+	}
+}
+
+void
+arcmsr_write_ioctldata2iop(struct AdapterControlBlock *acb)
+{
+	uint8_t *pQbuffer;
+	struct QBUFFER __iomem *pwbuffer;
+	uint8_t __iomem *iop_data;
+	int32_t allxfer_len = 0;
+
+	if (acb->adapter_type & (ACB_ADAPTER_TYPE_C | ACB_ADAPTER_TYPE_D)) {
+		arcmsr_write_ioctldata2iop_in_DWORD(acb);
+		return;
+	}
+	if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
+		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
+		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
+		iop_data = (uint8_t __iomem *)pwbuffer->data;
+		while ((acb->wqbuf_getIndex != acb->wqbuf_putIndex)
+			&& (allxfer_len < 124)) {
+			pQbuffer = &acb->wqbuffer[acb->wqbuf_getIndex];
+			writeb(*pQbuffer, iop_data);
+			acb->wqbuf_getIndex++;
+			acb->wqbuf_getIndex %= ARCMSR_MAX_QBUFFER;
+			iop_data++;
+			allxfer_len++;
+		}
+		writel(allxfer_len, &pwbuffer->data_len);
+		arcmsr_iop_message_wrote(acb);
 	}
 }
 
 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb)
 {
+	unsigned long flags;
+
+	spin_lock_irqsave(&acb->wqbuffer_lock, flags);
 	acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED;
-	if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) {
-		uint8_t *pQbuffer;
-		struct QBUFFER __iomem *pwbuffer;
-		uint8_t __iomem *iop_data;
-		int32_t allxfer_len = 0;
-
-		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
-		pwbuffer = arcmsr_get_iop_wqbuffer(acb);
-		iop_data = (uint8_t __iomem *)pwbuffer->data;
-
-		while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \
-							(allxfer_len < 124)) {
-			pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex];
-			memcpy(iop_data, pQbuffer, 1);
-			acb->wqbuf_firstindex++;
-			acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
-			iop_data++;
-			allxfer_len++;
-		}
-		pwbuffer->data_len = allxfer_len;
-
-		arcmsr_iop_message_wrote(acb);
-	}
-
-	if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) {
+	if (acb->wqbuf_getIndex != acb->wqbuf_putIndex)
+		arcmsr_write_ioctldata2iop(acb);
+	if (acb->wqbuf_getIndex == acb->wqbuf_putIndex)
 		acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED;
-	}
+	spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
 }
 
-static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb)
+static void arcmsr_hbaA_doorbell_isr(struct AdapterControlBlock *acb)
 {
 	uint32_t outbound_doorbell;
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
 	outbound_doorbell = readl(&reg->outbound_doorbell);
-	writel(outbound_doorbell, &reg->outbound_doorbell);
-	if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) {
-		arcmsr_iop2drv_data_wrote_handle(acb);
-	}
-
-	if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) {
-		arcmsr_iop2drv_data_read_handle(acb);
-	}
+	do {
+		writel(outbound_doorbell, &reg->outbound_doorbell);
+		if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK)
+			arcmsr_iop2drv_data_wrote_handle(acb);
+		if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK)
+			arcmsr_iop2drv_data_read_handle(acb);
+		outbound_doorbell = readl(&reg->outbound_doorbell);
+	} while (outbound_doorbell & (ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK
+		| ARCMSR_OUTBOUND_IOP331_DATA_READ_OK));
 }
-static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *pACB)
+static void arcmsr_hbaC_doorbell_isr(struct AdapterControlBlock *pACB)
 {
 	uint32_t outbound_doorbell;
-	struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC;
+	struct MessageUnit_C __iomem *reg = pACB->pmuC;
 	/*
 	*******************************************************************
 	**  Maybe here we need to check wrqbuffer_lock is lock or not
@@ -1462,19 +1872,42 @@
 	*******************************************************************
 	*/
 	outbound_doorbell = readl(&reg->outbound_doorbell);
-	writel(outbound_doorbell, &reg->outbound_doorbell_clear);/*clear interrupt*/
-	if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
-		arcmsr_iop2drv_data_wrote_handle(pACB);
-	}
-	if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) {
-		arcmsr_iop2drv_data_read_handle(pACB);
-	}
-	if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) {
-		arcmsr_hbc_message_isr(pACB);    /* messenger of "driver to iop commands" */
-	}
-	return;
+	do {
+		writel(outbound_doorbell, &reg->outbound_doorbell_clear);
+		readl(&reg->outbound_doorbell_clear);
+		if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK)
+			arcmsr_iop2drv_data_wrote_handle(pACB);
+		if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK)
+			arcmsr_iop2drv_data_read_handle(pACB);
+		if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE)
+			arcmsr_hbaC_message_isr(pACB);
+		outbound_doorbell = readl(&reg->outbound_doorbell);
+	} while (outbound_doorbell & (ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
+		| ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK
+		| ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE));
 }
-static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb)
+
+static void arcmsr_hbaD_doorbell_isr(struct AdapterControlBlock *pACB)
+{
+	uint32_t outbound_doorbell;
+	struct MessageUnit_D  *pmu = pACB->pmuD;
+
+	outbound_doorbell = readl(pmu->outbound_doorbell);
+	do {
+		writel(outbound_doorbell, pmu->outbound_doorbell);
+		if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE)
+			arcmsr_hbaD_message_isr(pACB);
+		if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK)
+			arcmsr_iop2drv_data_wrote_handle(pACB);
+		if (outbound_doorbell & ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK)
+			arcmsr_iop2drv_data_read_handle(pACB);
+		outbound_doorbell = readl(pmu->outbound_doorbell);
+	} while (outbound_doorbell & (ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK
+		| ARCMSR_ARC1214_IOP2DRV_DATA_READ_OK
+		| ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE));
+}
+
+static void arcmsr_hbaA_postqueue_isr(struct AdapterControlBlock *acb)
 {
 	uint32_t flag_ccb;
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -1488,7 +1921,7 @@
 		arcmsr_drain_donequeue(acb, pCCB, error);
 	}
 }
-static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb)
+static void arcmsr_hbaB_postqueue_isr(struct AdapterControlBlock *acb)
 {
 	uint32_t index;
 	uint32_t flag_ccb;
@@ -1497,8 +1930,8 @@
 	struct CommandControlBlock *pCCB;
 	bool error;
 	index = reg->doneq_index;
-	while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) {
-		writel(0, &reg->done_qbuffer[index]);
+	while ((flag_ccb = reg->done_qbuffer[index]) != 0) {
+		reg->done_qbuffer[index] = 0;
 		pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/
 		pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb);
 		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false;
@@ -1509,35 +1942,80 @@
 	}
 }
 
-static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb)
+static void arcmsr_hbaC_postqueue_isr(struct AdapterControlBlock *acb)
 {
-	struct MessageUnit_C *phbcmu;
+	struct MessageUnit_C __iomem *phbcmu;
 	struct ARCMSR_CDB *arcmsr_cdb;
 	struct CommandControlBlock *ccb;
 	uint32_t flag_ccb, ccb_cdb_phy, throttling = 0;
 	int error;
 
-	phbcmu = (struct MessageUnit_C *)acb->pmuC;
+	phbcmu = acb->pmuC;
 	/* areca cdb command done */
 	/* Use correct offset and size for syncing */
 
-	while (readl(&phbcmu->host_int_status) &
-	ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR){
-	/* check if command done with no error*/
-	flag_ccb = readl(&phbcmu->outbound_queueport_low);
-	ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);/*frame must be 32 bytes aligned*/
-	arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);
-	ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
-	error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false;
-	/* check if command done with no error */
-	arcmsr_drain_donequeue(acb, ccb, error);
-	if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
-		writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING, &phbcmu->inbound_doorbell);
-		break;
-	}
-	throttling++;
+	while ((flag_ccb = readl(&phbcmu->outbound_queueport_low)) !=
+			0xFFFFFFFF) {
+		ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
+		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
+			+ ccb_cdb_phy);
+		ccb = container_of(arcmsr_cdb, struct CommandControlBlock,
+			arcmsr_cdb);
+		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
+			? true : false;
+		/* check if command done with no error */
+		arcmsr_drain_donequeue(acb, ccb, error);
+		throttling++;
+		if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) {
+			writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING,
+				&phbcmu->inbound_doorbell);
+			throttling = 0;
+		}
 	}
 }
+
+static void arcmsr_hbaD_postqueue_isr(struct AdapterControlBlock *acb)
+{
+	u32 outbound_write_pointer, doneq_index, index_stripped, toggle;
+	uint32_t addressLow, ccb_cdb_phy;
+	int error;
+	struct MessageUnit_D  *pmu;
+	struct ARCMSR_CDB *arcmsr_cdb;
+	struct CommandControlBlock *ccb;
+	unsigned long flags;
+
+	spin_lock_irqsave(&acb->doneq_lock, flags);
+	pmu = acb->pmuD;
+	outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
+	doneq_index = pmu->doneq_index;
+	if ((doneq_index & 0xFFF) != (outbound_write_pointer & 0xFFF)) {
+		do {
+			toggle = doneq_index & 0x4000;
+			index_stripped = (doneq_index & 0xFFF) + 1;
+			index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
+			pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
+				((toggle ^ 0x4000) + 1);
+			doneq_index = pmu->doneq_index;
+			addressLow = pmu->done_qbuffer[doneq_index &
+				0xFFF].addressLow;
+			ccb_cdb_phy = (addressLow & 0xFFFFFFF0);
+			arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset
+				+ ccb_cdb_phy);
+			ccb = container_of(arcmsr_cdb,
+				struct CommandControlBlock, arcmsr_cdb);
+			error = (addressLow & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
+				? true : false;
+			arcmsr_drain_donequeue(acb, ccb, error);
+			writel(doneq_index, pmu->outboundlist_read_pointer);
+		} while ((doneq_index & 0xFFF) !=
+			(outbound_write_pointer & 0xFFF));
+	}
+	writel(ARCMSR_ARC1214_OUTBOUND_LIST_INTERRUPT_CLEAR,
+		pmu->outboundlist_interrupt_cause);
+	readl(pmu->outboundlist_interrupt_cause);
+	spin_unlock_irqrestore(&acb->doneq_lock, flags);
+}
+
 /*
 **********************************************************************************
 ** Handle a message interrupt
@@ -1546,14 +2024,14 @@
 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
 **********************************************************************************
 */
-static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb)
+static void arcmsr_hbaA_message_isr(struct AdapterControlBlock *acb)
 {
-	struct MessageUnit_A *reg  = acb->pmuA;
+	struct MessageUnit_A __iomem *reg  = acb->pmuA;
 	/*clear interrupt and message state*/
 	writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus);
 	schedule_work(&acb->arcmsr_do_message_isr_bh);
 }
-static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb)
+static void arcmsr_hbaB_message_isr(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_B *reg  = acb->pmuB;
 
@@ -1570,114 +2048,142 @@
 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
 **********************************************************************************
 */
-static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb)
+static void arcmsr_hbaC_message_isr(struct AdapterControlBlock *acb)
 {
-	struct MessageUnit_C *reg  = acb->pmuC;
+	struct MessageUnit_C __iomem *reg  = acb->pmuC;
 	/*clear interrupt and message state*/
 	writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);
 	schedule_work(&acb->arcmsr_do_message_isr_bh);
 }
 
-static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb)
+static void arcmsr_hbaD_message_isr(struct AdapterControlBlock *acb)
+{
+	struct MessageUnit_D *reg  = acb->pmuD;
+
+	writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE, reg->outbound_doorbell);
+	readl(reg->outbound_doorbell);
+	schedule_work(&acb->arcmsr_do_message_isr_bh);
+}
+
+static int arcmsr_hbaA_handle_isr(struct AdapterControlBlock *acb)
 {
 	uint32_t outbound_intstatus;
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
 	outbound_intstatus = readl(&reg->outbound_intstatus) &
 		acb->outbound_int_enable;
-	if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))	{
-		return 1;
-	}
-	writel(outbound_intstatus, &reg->outbound_intstatus);
-	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)	{
-		arcmsr_hba_doorbell_isr(acb);
-	}
-	if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) {
-		arcmsr_hba_postqueue_isr(acb);
-	}
-	if(outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) 	{
-		/* messenger of "driver to iop commands" */
-		arcmsr_hba_message_isr(acb);
-	}
-	return 0;
+	if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT))
+		return IRQ_NONE;
+	do {
+		writel(outbound_intstatus, &reg->outbound_intstatus);
+		if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT)
+			arcmsr_hbaA_doorbell_isr(acb);
+		if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT)
+			arcmsr_hbaA_postqueue_isr(acb);
+		if (outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT)
+			arcmsr_hbaA_message_isr(acb);
+		outbound_intstatus = readl(&reg->outbound_intstatus) &
+			acb->outbound_int_enable;
+	} while (outbound_intstatus & (ARCMSR_MU_OUTBOUND_DOORBELL_INT
+		| ARCMSR_MU_OUTBOUND_POSTQUEUE_INT
+		| ARCMSR_MU_OUTBOUND_MESSAGE0_INT));
+	return IRQ_HANDLED;
 }
 
-static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb)
+static int arcmsr_hbaB_handle_isr(struct AdapterControlBlock *acb)
 {
 	uint32_t outbound_doorbell;
 	struct MessageUnit_B *reg = acb->pmuB;
 	outbound_doorbell = readl(reg->iop2drv_doorbell) &
 				acb->outbound_int_enable;
 	if (!outbound_doorbell)
-		return 1;
-
-	writel(~outbound_doorbell, reg->iop2drv_doorbell);
-	/*in case the last action of doorbell interrupt clearance is cached,
-	this action can push HW to write down the clear bit*/
-	readl(reg->iop2drv_doorbell);
-	writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
-	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) {
-		arcmsr_iop2drv_data_wrote_handle(acb);
-	}
-	if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) {
-		arcmsr_iop2drv_data_read_handle(acb);
-	}
-	if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) {
-		arcmsr_hbb_postqueue_isr(acb);
-	}
-	if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) {
-		/* messenger of "driver to iop commands" */
-		arcmsr_hbb_message_isr(acb);
-	}
-	return 0;
+		return IRQ_NONE;
+	do {
+		writel(~outbound_doorbell, reg->iop2drv_doorbell);
+		writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell);
+		if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK)
+			arcmsr_iop2drv_data_wrote_handle(acb);
+		if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK)
+			arcmsr_iop2drv_data_read_handle(acb);
+		if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE)
+			arcmsr_hbaB_postqueue_isr(acb);
+		if (outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE)
+			arcmsr_hbaB_message_isr(acb);
+		outbound_doorbell = readl(reg->iop2drv_doorbell) &
+			acb->outbound_int_enable;
+	} while (outbound_doorbell & (ARCMSR_IOP2DRV_DATA_WRITE_OK
+		| ARCMSR_IOP2DRV_DATA_READ_OK
+		| ARCMSR_IOP2DRV_CDB_DONE
+		| ARCMSR_IOP2DRV_MESSAGE_CMD_DONE));
+	return IRQ_HANDLED;
 }
 
-static int arcmsr_handle_hbc_isr(struct AdapterControlBlock *pACB)
+static int arcmsr_hbaC_handle_isr(struct AdapterControlBlock *pACB)
 {
 	uint32_t host_interrupt_status;
-	struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
+	struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
 	/*
 	*********************************************
 	**   check outbound intstatus
 	*********************************************
 	*/
-	host_interrupt_status = readl(&phbcmu->host_int_status);
-	if (!host_interrupt_status) {
-		/*it must be share irq*/
-		return 1;
-	}
-	/* MU ioctl transfer doorbell interrupts*/
-	if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) {
-		arcmsr_hbc_doorbell_isr(pACB);   /* messenger of "ioctl message read write" */
-	}
-	/* MU post queue interrupts*/
-	if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) {
-		arcmsr_hbc_postqueue_isr(pACB);  /* messenger of "scsi commands" */
-	}
-	return 0;
+	host_interrupt_status = readl(&phbcmu->host_int_status) &
+		(ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
+		ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR);
+	if (!host_interrupt_status)
+		return IRQ_NONE;
+	do {
+		if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR)
+			arcmsr_hbaC_doorbell_isr(pACB);
+		/* MU post queue interrupts*/
+		if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR)
+			arcmsr_hbaC_postqueue_isr(pACB);
+		host_interrupt_status = readl(&phbcmu->host_int_status);
+	} while (host_interrupt_status & (ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR |
+		ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR));
+	return IRQ_HANDLED;
 }
+
+static irqreturn_t arcmsr_hbaD_handle_isr(struct AdapterControlBlock *pACB)
+{
+	u32 host_interrupt_status;
+	struct MessageUnit_D  *pmu = pACB->pmuD;
+
+	host_interrupt_status = readl(pmu->host_int_status) &
+		(ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
+		ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR);
+	if (!host_interrupt_status)
+		return IRQ_NONE;
+	do {
+		/* MU post queue interrupts*/
+		if (host_interrupt_status &
+			ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR)
+			arcmsr_hbaD_postqueue_isr(pACB);
+		if (host_interrupt_status &
+			ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR)
+			arcmsr_hbaD_doorbell_isr(pACB);
+		host_interrupt_status = readl(pmu->host_int_status);
+	} while (host_interrupt_status &
+		(ARCMSR_ARC1214_OUTBOUND_POSTQUEUE_ISR |
+		ARCMSR_ARC1214_OUTBOUND_DOORBELL_ISR));
+	return IRQ_HANDLED;
+}
+
 static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb)
 {
 	switch (acb->adapter_type) {
-	case ACB_ADAPTER_TYPE_A: {
-		if (arcmsr_handle_hba_isr(acb)) {
-			return IRQ_NONE;
-		}
-		}
+	case ACB_ADAPTER_TYPE_A:
+		return arcmsr_hbaA_handle_isr(acb);
 		break;
-
-	case ACB_ADAPTER_TYPE_B: {
-		if (arcmsr_handle_hbb_isr(acb)) {
-			return IRQ_NONE;
-		}
-		}
+	case ACB_ADAPTER_TYPE_B:
+		return arcmsr_hbaB_handle_isr(acb);
 		break;
-	 case ACB_ADAPTER_TYPE_C: {
-		if (arcmsr_handle_hbc_isr(acb)) {
-			return IRQ_NONE;
-		}
-		}
+	case ACB_ADAPTER_TYPE_C:
+		return arcmsr_hbaC_handle_isr(acb);
+	case ACB_ADAPTER_TYPE_D:
+		return arcmsr_hbaD_handle_isr(acb);
+	default:
+		return IRQ_NONE;
 	}
-	return IRQ_HANDLED;
 }
 
 static void arcmsr_iop_parking(struct AdapterControlBlock *acb)
@@ -1695,296 +2201,273 @@
 	}
 }
 
-void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb)
+
+void arcmsr_clear_iop2drv_rqueue_buffer(struct AdapterControlBlock *acb)
 {
-	int32_t wqbuf_firstindex, wqbuf_lastindex;
-	uint8_t *pQbuffer;
-	struct QBUFFER __iomem *pwbuffer;
-	uint8_t __iomem *iop_data;
-	int32_t allxfer_len = 0;
-	pwbuffer = arcmsr_get_iop_wqbuffer(acb);
-	iop_data = (uint8_t __iomem *)pwbuffer->data;
-	if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) {
-		acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED);
-		wqbuf_firstindex = acb->wqbuf_firstindex;
-		wqbuf_lastindex = acb->wqbuf_lastindex;
-		while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) {
-			pQbuffer = &acb->wqbuffer[wqbuf_firstindex];
-			memcpy(iop_data, pQbuffer, 1);
-			wqbuf_firstindex++;
-			wqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
-			iop_data++;
-			allxfer_len++;
+	uint32_t	i;
+
+	if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
+		for (i = 0; i < 15; i++) {
+			if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
+				acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
+				acb->rqbuf_getIndex = 0;
+				acb->rqbuf_putIndex = 0;
+				arcmsr_iop_message_read(acb);
+				mdelay(30);
+			} else if (acb->rqbuf_getIndex !=
+				   acb->rqbuf_putIndex) {
+				acb->rqbuf_getIndex = 0;
+				acb->rqbuf_putIndex = 0;
+				mdelay(30);
+			} else
+				break;
 		}
-		acb->wqbuf_firstindex = wqbuf_firstindex;
-		pwbuffer->data_len = allxfer_len;
-		arcmsr_iop_message_wrote(acb);
 	}
 }
 
 static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb,
-					struct scsi_cmnd *cmd)
+		struct scsi_cmnd *cmd)
 {
-	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
-	int retvalue = 0, transfer_len = 0;
 	char *buffer;
+	unsigned short use_sg;
+	int retvalue = 0, transfer_len = 0;
+	unsigned long flags;
+	struct CMD_MESSAGE_FIELD *pcmdmessagefld;
+	uint32_t controlcode = (uint32_t)cmd->cmnd[5] << 24 |
+		(uint32_t)cmd->cmnd[6] << 16 |
+		(uint32_t)cmd->cmnd[7] << 8 |
+		(uint32_t)cmd->cmnd[8];
 	struct scatterlist *sg;
-	uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 |
-						(uint32_t ) cmd->cmnd[6] << 16 |
-						(uint32_t ) cmd->cmnd[7] << 8  |
-						(uint32_t ) cmd->cmnd[8];
-						/* 4 bytes: Areca io control code */
+
+	use_sg = scsi_sg_count(cmd);
 	sg = scsi_sglist(cmd);
 	buffer = kmap_atomic(sg_page(sg)) + sg->offset;
-	if (scsi_sg_count(cmd) > 1) {
+	if (use_sg > 1) {
 		retvalue = ARCMSR_MESSAGE_FAIL;
 		goto message_out;
 	}
 	transfer_len += sg->length;
-
 	if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) {
 		retvalue = ARCMSR_MESSAGE_FAIL;
+		pr_info("%s: ARCMSR_MESSAGE_FAIL!\n", __func__);
 		goto message_out;
 	}
-	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer;
-	switch(controlcode) {
-
+	pcmdmessagefld = (struct CMD_MESSAGE_FIELD *)buffer;
+	switch (controlcode) {
 	case ARCMSR_MESSAGE_READ_RQBUFFER: {
 		unsigned char *ver_addr;
-		uint8_t *pQbuffer, *ptmpQbuffer;
-		int32_t allxfer_len = 0;
-
-		ver_addr = kmalloc(1032, GFP_ATOMIC);
+		uint8_t *ptmpQbuffer;
+		uint32_t allxfer_len = 0;
+		ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
 		if (!ver_addr) {
 			retvalue = ARCMSR_MESSAGE_FAIL;
+			pr_info("%s: memory not enough!\n", __func__);
 			goto message_out;
 		}
-				
 		ptmpQbuffer = ver_addr;
-		while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex)
-			&& (allxfer_len < 1031)) {
-			pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex];
-			memcpy(ptmpQbuffer, pQbuffer, 1);
-			acb->rqbuf_firstindex++;
-			acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER;
-			ptmpQbuffer++;
-			allxfer_len++;
+		spin_lock_irqsave(&acb->rqbuffer_lock, flags);
+		if (acb->rqbuf_getIndex != acb->rqbuf_putIndex) {
+			unsigned int tail = acb->rqbuf_getIndex;
+			unsigned int head = acb->rqbuf_putIndex;
+			unsigned int cnt_to_end = CIRC_CNT_TO_END(head, tail, ARCMSR_MAX_QBUFFER);
+
+			allxfer_len = CIRC_CNT(head, tail, ARCMSR_MAX_QBUFFER);
+			if (allxfer_len > ARCMSR_API_DATA_BUFLEN)
+				allxfer_len = ARCMSR_API_DATA_BUFLEN;
+
+			if (allxfer_len <= cnt_to_end)
+				memcpy(ptmpQbuffer, acb->rqbuffer + tail, allxfer_len);
+			else {
+				memcpy(ptmpQbuffer, acb->rqbuffer + tail, cnt_to_end);
+				memcpy(ptmpQbuffer + cnt_to_end, acb->rqbuffer, allxfer_len - cnt_to_end);
+			}
+			acb->rqbuf_getIndex = (acb->rqbuf_getIndex + allxfer_len) % ARCMSR_MAX_QBUFFER;
 		}
+		memcpy(pcmdmessagefld->messagedatabuffer, ver_addr,
+			allxfer_len);
 		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
-
 			struct QBUFFER __iomem *prbuffer;
-			uint8_t __iomem *iop_data;
-			int32_t iop_len;
-
 			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
 			prbuffer = arcmsr_get_iop_rqbuffer(acb);
-			iop_data = prbuffer->data;
-			iop_len = readl(&prbuffer->data_len);
-			while (iop_len > 0) {
-				acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data);
-				acb->rqbuf_lastindex++;
-				acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
-				iop_data++;
-				iop_len--;
-			}
-			arcmsr_iop_message_read(acb);
+			if (arcmsr_Read_iop_rqbuffer_data(acb, prbuffer) == 0)
+				acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW;
 		}
-		memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len);
-		pcmdmessagefld->cmdmessage.Length = allxfer_len;
-		if(acb->fw_flag == FW_DEADLOCK) {
-			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
-		}else{
-			pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK;
-		}
+		spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
 		kfree(ver_addr);
-		}
+		pcmdmessagefld->cmdmessage.Length = allxfer_len;
+		if (acb->fw_flag == FW_DEADLOCK)
+			pcmdmessagefld->cmdmessage.ReturnCode =
+				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+		else
+			pcmdmessagefld->cmdmessage.ReturnCode =
+				ARCMSR_MESSAGE_RETURNCODE_OK;
 		break;
-
+	}
 	case ARCMSR_MESSAGE_WRITE_WQBUFFER: {
 		unsigned char *ver_addr;
-		int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex;
+		int32_t user_len, cnt2end;
 		uint8_t *pQbuffer, *ptmpuserbuffer;
-
-		ver_addr = kmalloc(1032, GFP_ATOMIC);
+		ver_addr = kmalloc(ARCMSR_API_DATA_BUFLEN, GFP_ATOMIC);
 		if (!ver_addr) {
 			retvalue = ARCMSR_MESSAGE_FAIL;
 			goto message_out;
 		}
-		if(acb->fw_flag == FW_DEADLOCK) {
-			pcmdmessagefld->cmdmessage.ReturnCode = 
-			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
-		}else{
-			pcmdmessagefld->cmdmessage.ReturnCode = 
-			ARCMSR_MESSAGE_RETURNCODE_OK;
-		}
 		ptmpuserbuffer = ver_addr;
 		user_len = pcmdmessagefld->cmdmessage.Length;
-		memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len);
-		wqbuf_lastindex = acb->wqbuf_lastindex;
-		wqbuf_firstindex = acb->wqbuf_firstindex;
-		if (wqbuf_lastindex != wqbuf_firstindex) {
+		memcpy(ptmpuserbuffer,
+			pcmdmessagefld->messagedatabuffer, user_len);
+		spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+		if (acb->wqbuf_putIndex != acb->wqbuf_getIndex) {
 			struct SENSE_DATA *sensebuffer =
 				(struct SENSE_DATA *)cmd->sense_buffer;
-			arcmsr_post_ioctldata2iop(acb);
+			arcmsr_write_ioctldata2iop(acb);
 			/* has error report sensedata */
-			sensebuffer->ErrorCode = 0x70;
+			sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS;
 			sensebuffer->SenseKey = ILLEGAL_REQUEST;
 			sensebuffer->AdditionalSenseLength = 0x0A;
 			sensebuffer->AdditionalSenseCode = 0x20;
 			sensebuffer->Valid = 1;
 			retvalue = ARCMSR_MESSAGE_FAIL;
 		} else {
-			my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1)
-				&(ARCMSR_MAX_QBUFFER - 1);
-			if (my_empty_len >= user_len) {
-				while (user_len > 0) {
-					pQbuffer =
-					&acb->wqbuffer[acb->wqbuf_lastindex];
-					memcpy(pQbuffer, ptmpuserbuffer, 1);
-					acb->wqbuf_lastindex++;
-					acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER;
-					ptmpuserbuffer++;
-					user_len--;
-				}
-				if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
-					acb->acb_flags &=
+			pQbuffer = &acb->wqbuffer[acb->wqbuf_putIndex];
+			cnt2end = ARCMSR_MAX_QBUFFER - acb->wqbuf_putIndex;
+			if (user_len > cnt2end) {
+				memcpy(pQbuffer, ptmpuserbuffer, cnt2end);
+				ptmpuserbuffer += cnt2end;
+				user_len -= cnt2end;
+				acb->wqbuf_putIndex = 0;
+				pQbuffer = acb->wqbuffer;
+			}
+			memcpy(pQbuffer, ptmpuserbuffer, user_len);
+			acb->wqbuf_putIndex += user_len;
+			acb->wqbuf_putIndex %= ARCMSR_MAX_QBUFFER;
+			if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) {
+				acb->acb_flags &=
 						~ACB_F_MESSAGE_WQBUFFER_CLEARED;
-					arcmsr_post_ioctldata2iop(acb);
-				}
-			} else {
-				/* has error report sensedata */
-				struct SENSE_DATA *sensebuffer =
-					(struct SENSE_DATA *)cmd->sense_buffer;
-				sensebuffer->ErrorCode = 0x70;
-				sensebuffer->SenseKey = ILLEGAL_REQUEST;
-				sensebuffer->AdditionalSenseLength = 0x0A;
-				sensebuffer->AdditionalSenseCode = 0x20;
-				sensebuffer->Valid = 1;
-				retvalue = ARCMSR_MESSAGE_FAIL;
+				arcmsr_write_ioctldata2iop(acb);
 			}
-			}
-			kfree(ver_addr);
 		}
+		spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
+		kfree(ver_addr);
+		if (acb->fw_flag == FW_DEADLOCK)
+			pcmdmessagefld->cmdmessage.ReturnCode =
+				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+		else
+			pcmdmessagefld->cmdmessage.ReturnCode =
+				ARCMSR_MESSAGE_RETURNCODE_OK;
 		break;
-
+	}
 	case ARCMSR_MESSAGE_CLEAR_RQBUFFER: {
 		uint8_t *pQbuffer = acb->rqbuffer;
-		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
-			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
-			arcmsr_iop_message_read(acb);
-		}
-		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
-		acb->rqbuf_firstindex = 0;
-		acb->rqbuf_lastindex = 0;
-		memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
-		if(acb->fw_flag == FW_DEADLOCK) {
-			pcmdmessagefld->cmdmessage.ReturnCode =
-			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
-		}else{
-			pcmdmessagefld->cmdmessage.ReturnCode =
-			ARCMSR_MESSAGE_RETURNCODE_OK;
-		}
-		}
-		break;
 
+		arcmsr_clear_iop2drv_rqueue_buffer(acb);
+		spin_lock_irqsave(&acb->rqbuffer_lock, flags);
+		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
+		acb->rqbuf_getIndex = 0;
+		acb->rqbuf_putIndex = 0;
+		memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
+		spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
+		if (acb->fw_flag == FW_DEADLOCK)
+			pcmdmessagefld->cmdmessage.ReturnCode =
+				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+		else
+			pcmdmessagefld->cmdmessage.ReturnCode =
+				ARCMSR_MESSAGE_RETURNCODE_OK;
+		break;
+	}
 	case ARCMSR_MESSAGE_CLEAR_WQBUFFER: {
 		uint8_t *pQbuffer = acb->wqbuffer;
-		if(acb->fw_flag == FW_DEADLOCK) {
-			pcmdmessagefld->cmdmessage.ReturnCode =
-			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
-		}else{
-			pcmdmessagefld->cmdmessage.ReturnCode =
-			ARCMSR_MESSAGE_RETURNCODE_OK;
-		}
-
-		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
-			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
-			arcmsr_iop_message_read(acb);
-		}
-		acb->acb_flags |=
-			(ACB_F_MESSAGE_WQBUFFER_CLEARED |
-				ACB_F_MESSAGE_WQBUFFER_READED);
-		acb->wqbuf_firstindex = 0;
-		acb->wqbuf_lastindex = 0;
+		spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
+			ACB_F_MESSAGE_WQBUFFER_READED);
+		acb->wqbuf_getIndex = 0;
+		acb->wqbuf_putIndex = 0;
 		memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER);
-		}
+		spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
+		if (acb->fw_flag == FW_DEADLOCK)
+			pcmdmessagefld->cmdmessage.ReturnCode =
+				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+		else
+			pcmdmessagefld->cmdmessage.ReturnCode =
+				ARCMSR_MESSAGE_RETURNCODE_OK;
 		break;
-
+	}
 	case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: {
 		uint8_t *pQbuffer;
-
-		if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) {
-			acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW;
-			arcmsr_iop_message_read(acb);
-		}
-		acb->acb_flags |=
-			(ACB_F_MESSAGE_WQBUFFER_CLEARED
-			| ACB_F_MESSAGE_RQBUFFER_CLEARED
-			| ACB_F_MESSAGE_WQBUFFER_READED);
-		acb->rqbuf_firstindex = 0;
-		acb->rqbuf_lastindex = 0;
-		acb->wqbuf_firstindex = 0;
-		acb->wqbuf_lastindex = 0;
+		arcmsr_clear_iop2drv_rqueue_buffer(acb);
+		spin_lock_irqsave(&acb->rqbuffer_lock, flags);
+		acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED;
+		acb->rqbuf_getIndex = 0;
+		acb->rqbuf_putIndex = 0;
 		pQbuffer = acb->rqbuffer;
 		memset(pQbuffer, 0, sizeof(struct QBUFFER));
+		spin_unlock_irqrestore(&acb->rqbuffer_lock, flags);
+		spin_lock_irqsave(&acb->wqbuffer_lock, flags);
+		acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED |
+			ACB_F_MESSAGE_WQBUFFER_READED);
+		acb->wqbuf_getIndex = 0;
+		acb->wqbuf_putIndex = 0;
 		pQbuffer = acb->wqbuffer;
 		memset(pQbuffer, 0, sizeof(struct QBUFFER));
-		if(acb->fw_flag == FW_DEADLOCK) {
+		spin_unlock_irqrestore(&acb->wqbuffer_lock, flags);
+		if (acb->fw_flag == FW_DEADLOCK)
 			pcmdmessagefld->cmdmessage.ReturnCode =
-			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
-		}else{
+				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+		else
 			pcmdmessagefld->cmdmessage.ReturnCode =
-			ARCMSR_MESSAGE_RETURNCODE_OK;
-		}
-		}
+				ARCMSR_MESSAGE_RETURNCODE_OK;
 		break;
-
+	}
 	case ARCMSR_MESSAGE_RETURN_CODE_3F: {
-		if(acb->fw_flag == FW_DEADLOCK) {
+		if (acb->fw_flag == FW_DEADLOCK)
 			pcmdmessagefld->cmdmessage.ReturnCode =
-			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
-		}else{
+				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+		else
 			pcmdmessagefld->cmdmessage.ReturnCode =
-			ARCMSR_MESSAGE_RETURNCODE_3F;
-		}
+				ARCMSR_MESSAGE_RETURNCODE_3F;
 		break;
-		}
+	}
 	case ARCMSR_MESSAGE_SAY_HELLO: {
 		int8_t *hello_string = "Hello! I am ARCMSR";
-		if(acb->fw_flag == FW_DEADLOCK) {
+		if (acb->fw_flag == FW_DEADLOCK)
 			pcmdmessagefld->cmdmessage.ReturnCode =
-			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
-		}else{
+				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+		else
 			pcmdmessagefld->cmdmessage.ReturnCode =
-			ARCMSR_MESSAGE_RETURNCODE_OK;
-		}
-		memcpy(pcmdmessagefld->messagedatabuffer, hello_string
-			, (int16_t)strlen(hello_string));
-		}
+				ARCMSR_MESSAGE_RETURNCODE_OK;
+		memcpy(pcmdmessagefld->messagedatabuffer,
+			hello_string, (int16_t)strlen(hello_string));
 		break;
-
-	case ARCMSR_MESSAGE_SAY_GOODBYE:
-		if(acb->fw_flag == FW_DEADLOCK) {
+	}
+	case ARCMSR_MESSAGE_SAY_GOODBYE: {
+		if (acb->fw_flag == FW_DEADLOCK)
 			pcmdmessagefld->cmdmessage.ReturnCode =
-			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
-		}
+				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+		else
+			pcmdmessagefld->cmdmessage.ReturnCode =
+				ARCMSR_MESSAGE_RETURNCODE_OK;
 		arcmsr_iop_parking(acb);
 		break;
-
-	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE:
-		if(acb->fw_flag == FW_DEADLOCK) {
+	}
+	case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: {
+		if (acb->fw_flag == FW_DEADLOCK)
 			pcmdmessagefld->cmdmessage.ReturnCode =
-			ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
-		}
+				ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON;
+		else
+			pcmdmessagefld->cmdmessage.ReturnCode =
+				ARCMSR_MESSAGE_RETURNCODE_OK;
 		arcmsr_flush_adapter_cache(acb);
 		break;
-
+	}
 	default:
 		retvalue = ARCMSR_MESSAGE_FAIL;
+		pr_info("%s: unknown controlcode!\n", __func__);
 	}
-	message_out:
-	sg = scsi_sglist(cmd);
-	kunmap_atomic(buffer - sg->offset);
+message_out:
+	if (use_sg) {
+		struct scatterlist *sg = scsi_sglist(cmd);
+		kunmap_atomic(buffer - sg->offset);
+	}
 	return retvalue;
 }
 
@@ -1999,7 +2482,7 @@
 		list_del_init(&ccb->list);
 	}else{
 		spin_unlock_irqrestore(&acb->ccblist_lock, flags);
-		return 0;
+		return NULL;
 	}
 	spin_unlock_irqrestore(&acb->ccblist_lock, flags);
 	return ccb;
@@ -2079,9 +2562,6 @@
 		arcmsr_handle_virtual_command(acb, cmd);
 		return 0;
 	}
-	if (atomic_read(&acb->ccboutstandingcount) >=
-			ARCMSR_MAX_OUTSTANDING_CMD)
-		return SCSI_MLQUEUE_HOST_BUSY;
 	ccb = arcmsr_get_freeccb(acb);
 	if (!ccb)
 		return SCSI_MLQUEUE_HOST_BUSY;
@@ -2096,7 +2576,7 @@
 
 static DEF_SCSI_QCMD(arcmsr_queue_command)
 
-static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb)
+static bool arcmsr_hbaA_get_config(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
 	char *acb_firm_model = acb->firm_model;
@@ -2107,7 +2587,7 @@
 	char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);
 	int count;
 	writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0);
-	if (!arcmsr_hba_wait_msgint_ready(acb)) {
+	if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
 		printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
 			miscellaneous data' timeout \n", acb->host->host_no);
 		return false;
@@ -2135,10 +2615,10 @@
 		iop_device_map++;
 		count--;
 	}
-	printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n", 
+	pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
 		acb->host->host_no,
-		acb->firm_version,
-		acb->firm_model);
+		acb->firm_model,
+		acb->firm_version);
 	acb->signature = readl(&reg->message_rwbuffer[0]);
 	acb->firm_request_len = readl(&reg->message_rwbuffer[1]);
 	acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]);
@@ -2147,7 +2627,7 @@
 	acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]);  /*firm_cfg_version,25,100-103*/
 	return true;
 }
-static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb)
+static bool arcmsr_hbaB_get_config(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_B *reg = acb->pmuB;
 	struct pci_dev *pdev = acb->pdev;
@@ -2163,12 +2643,18 @@
 	char __iomem *iop_device_map;
 	/*firm_version,21,84-99*/
 	int count;
-	dma_coherent = dma_alloc_coherent(&pdev->dev, sizeof(struct MessageUnit_B), &dma_coherent_handle, GFP_KERNEL);
+
+	acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_B), 32);
+	dma_coherent = dma_alloc_coherent(&pdev->dev, acb->roundup_ccbsize,
+			&dma_coherent_handle, GFP_KERNEL);
 	if (!dma_coherent){
-		printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error for hbb mu\n", acb->host->host_no);
+		printk(KERN_NOTICE
+			"arcmsr%d: dma_alloc_coherent got error for hbb mu\n",
+			acb->host->host_no);
 		return false;
 	}
-	acb->dma_coherent_handle_hbb_mu = dma_coherent_handle;
+	acb->dma_coherent_handle2 = dma_coherent_handle;
+	acb->dma_coherent2 = dma_coherent;
 	reg = (struct MessageUnit_B *)dma_coherent;
 	acb->pmuB = reg;
 	reg->drv2iop_doorbell= (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL);
@@ -2183,7 +2669,7 @@
 	iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]);	/*firm_version,21,84-99*/
 
 	writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell);
-	if (!arcmsr_hbb_wait_msgint_ready(acb)) {
+	if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
 		printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \
 			miscellaneous data' timeout \n", acb->host->host_no);
 		return false;
@@ -2211,10 +2697,10 @@
 		count--;
 	}
 	
-	printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
+	pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
 		acb->host->host_no,
-		acb->firm_version,
-		acb->firm_model);
+		acb->firm_model,
+		acb->firm_version);
 
 	acb->signature = readl(&reg->message_rwbuffer[1]);
 	/*firm_signature,1,00-03*/
@@ -2231,14 +2717,14 @@
 	return true;
 }
 
-static bool arcmsr_get_hbc_config(struct AdapterControlBlock *pACB)
+static bool arcmsr_hbaC_get_config(struct AdapterControlBlock *pACB)
 {
 	uint32_t intmask_org, Index, firmware_state = 0;
-	struct MessageUnit_C *reg = pACB->pmuC;
+	struct MessageUnit_C __iomem *reg = pACB->pmuC;
 	char *acb_firm_model = pACB->firm_model;
 	char *acb_firm_version = pACB->firm_version;
-	char *iop_firm_model = (char *)(&reg->msgcode_rwbuffer[15]);    /*firm_model,15,60-67*/
-	char *iop_firm_version = (char *)(&reg->msgcode_rwbuffer[17]);  /*firm_version,17,68-83*/
+	char __iomem *iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]);    /*firm_model,15,60-67*/
+	char __iomem *iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]);  /*firm_version,17,68-83*/
 	int count;
 	/* disable all outbound interrupt */
 	intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */
@@ -2277,10 +2763,10 @@
 		iop_firm_version++;
 		count--;
 	}
-	printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n",
+	pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
 		pACB->host->host_no,
-		pACB->firm_version,
-		pACB->firm_model);
+		pACB->firm_model,
+		pACB->firm_version);
 	pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]);   /*firm_request_len,1,04-07*/
 	pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/
 	pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]);    /*firm_sdram_size,3,12-15*/
@@ -2289,17 +2775,166 @@
 	/*all interrupt service will be enable at arcmsr_iop_init*/
 	return true;
 }
-static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
+
+static bool arcmsr_hbaD_get_config(struct AdapterControlBlock *acb)
 {
-	if (acb->adapter_type == ACB_ADAPTER_TYPE_A)
-		return arcmsr_get_hba_config(acb);
-	else if (acb->adapter_type == ACB_ADAPTER_TYPE_B)
-		return arcmsr_get_hbb_config(acb);
-	else
-		return arcmsr_get_hbc_config(acb);
+	char *acb_firm_model = acb->firm_model;
+	char *acb_firm_version = acb->firm_version;
+	char *acb_device_map = acb->device_map;
+	char __iomem *iop_firm_model;
+	char __iomem *iop_firm_version;
+	char __iomem *iop_device_map;
+	u32 count;
+	struct MessageUnit_D *reg;
+	void *dma_coherent2;
+	dma_addr_t dma_coherent_handle2;
+	struct pci_dev *pdev = acb->pdev;
+
+	acb->roundup_ccbsize = roundup(sizeof(struct MessageUnit_D), 32);
+	dma_coherent2 = dma_alloc_coherent(&pdev->dev, acb->roundup_ccbsize,
+		&dma_coherent_handle2, GFP_KERNEL);
+	if (!dma_coherent2) {
+		pr_notice("DMA allocation failed...\n");
+		return false;
+	}
+	memset(dma_coherent2, 0, acb->roundup_ccbsize);
+	acb->dma_coherent_handle2 = dma_coherent_handle2;
+	acb->dma_coherent2 = dma_coherent2;
+	reg = (struct MessageUnit_D *)dma_coherent2;
+	acb->pmuD = reg;
+	reg->chip_id = acb->mem_base0 + ARCMSR_ARC1214_CHIP_ID;
+	reg->cpu_mem_config = acb->mem_base0 +
+		ARCMSR_ARC1214_CPU_MEMORY_CONFIGURATION;
+	reg->i2o_host_interrupt_mask = acb->mem_base0 +
+		ARCMSR_ARC1214_I2_HOST_INTERRUPT_MASK;
+	reg->sample_at_reset = acb->mem_base0 + ARCMSR_ARC1214_SAMPLE_RESET;
+	reg->reset_request = acb->mem_base0 + ARCMSR_ARC1214_RESET_REQUEST;
+	reg->host_int_status = acb->mem_base0 +
+		ARCMSR_ARC1214_MAIN_INTERRUPT_STATUS;
+	reg->pcief0_int_enable = acb->mem_base0 +
+		ARCMSR_ARC1214_PCIE_F0_INTERRUPT_ENABLE;
+	reg->inbound_msgaddr0 = acb->mem_base0 +
+		ARCMSR_ARC1214_INBOUND_MESSAGE0;
+	reg->inbound_msgaddr1 = acb->mem_base0 +
+		ARCMSR_ARC1214_INBOUND_MESSAGE1;
+	reg->outbound_msgaddr0 = acb->mem_base0 +
+		ARCMSR_ARC1214_OUTBOUND_MESSAGE0;
+	reg->outbound_msgaddr1 = acb->mem_base0 +
+		ARCMSR_ARC1214_OUTBOUND_MESSAGE1;
+	reg->inbound_doorbell = acb->mem_base0 +
+		ARCMSR_ARC1214_INBOUND_DOORBELL;
+	reg->outbound_doorbell = acb->mem_base0 +
+		ARCMSR_ARC1214_OUTBOUND_DOORBELL;
+	reg->outbound_doorbell_enable = acb->mem_base0 +
+		ARCMSR_ARC1214_OUTBOUND_DOORBELL_ENABLE;
+	reg->inboundlist_base_low = acb->mem_base0 +
+		ARCMSR_ARC1214_INBOUND_LIST_BASE_LOW;
+	reg->inboundlist_base_high = acb->mem_base0 +
+		ARCMSR_ARC1214_INBOUND_LIST_BASE_HIGH;
+	reg->inboundlist_write_pointer = acb->mem_base0 +
+		ARCMSR_ARC1214_INBOUND_LIST_WRITE_POINTER;
+	reg->outboundlist_base_low = acb->mem_base0 +
+		ARCMSR_ARC1214_OUTBOUND_LIST_BASE_LOW;
+	reg->outboundlist_base_high = acb->mem_base0 +
+		ARCMSR_ARC1214_OUTBOUND_LIST_BASE_HIGH;
+	reg->outboundlist_copy_pointer = acb->mem_base0 +
+		ARCMSR_ARC1214_OUTBOUND_LIST_COPY_POINTER;
+	reg->outboundlist_read_pointer = acb->mem_base0 +
+		ARCMSR_ARC1214_OUTBOUND_LIST_READ_POINTER;
+	reg->outboundlist_interrupt_cause = acb->mem_base0 +
+		ARCMSR_ARC1214_OUTBOUND_INTERRUPT_CAUSE;
+	reg->outboundlist_interrupt_enable = acb->mem_base0 +
+		ARCMSR_ARC1214_OUTBOUND_INTERRUPT_ENABLE;
+	reg->message_wbuffer = acb->mem_base0 + ARCMSR_ARC1214_MESSAGE_WBUFFER;
+	reg->message_rbuffer = acb->mem_base0 + ARCMSR_ARC1214_MESSAGE_RBUFFER;
+	reg->msgcode_rwbuffer = acb->mem_base0 +
+		ARCMSR_ARC1214_MESSAGE_RWBUFFER;
+	iop_firm_model = (char __iomem *)(&reg->msgcode_rwbuffer[15]);
+	iop_firm_version = (char __iomem *)(&reg->msgcode_rwbuffer[17]);
+	iop_device_map = (char __iomem *)(&reg->msgcode_rwbuffer[21]);
+	if (readl(acb->pmuD->outbound_doorbell) &
+		ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE) {
+		writel(ARCMSR_ARC1214_IOP2DRV_MESSAGE_CMD_DONE,
+			acb->pmuD->outbound_doorbell);/*clear interrupt*/
+	}
+	/* post "get config" instruction */
+	writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, reg->inbound_msgaddr0);
+	/* wait message ready */
+	if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
+		pr_notice("arcmsr%d: wait get adapter firmware "
+			"miscellaneous data timeout\n", acb->host->host_no);
+		dma_free_coherent(&acb->pdev->dev, acb->roundup_ccbsize,
+			acb->dma_coherent2, acb->dma_coherent_handle2);
+		return false;
+	}
+	count = 8;
+	while (count) {
+		*acb_firm_model = readb(iop_firm_model);
+		acb_firm_model++;
+		iop_firm_model++;
+		count--;
+	}
+	count = 16;
+	while (count) {
+		*acb_firm_version = readb(iop_firm_version);
+		acb_firm_version++;
+		iop_firm_version++;
+		count--;
+	}
+	count = 16;
+	while (count) {
+		*acb_device_map = readb(iop_device_map);
+		acb_device_map++;
+		iop_device_map++;
+		count--;
+	}
+	acb->signature = readl(&reg->msgcode_rwbuffer[1]);
+	/*firm_signature,1,00-03*/
+	acb->firm_request_len = readl(&reg->msgcode_rwbuffer[2]);
+	/*firm_request_len,1,04-07*/
+	acb->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[3]);
+	/*firm_numbers_queue,2,08-11*/
+	acb->firm_sdram_size = readl(&reg->msgcode_rwbuffer[4]);
+	/*firm_sdram_size,3,12-15*/
+	acb->firm_hd_channels = readl(&reg->msgcode_rwbuffer[5]);
+	/*firm_hd_channels,4,16-19*/
+	acb->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]);
+	pr_notice("Areca RAID Controller%d: Model %s, F/W %s\n",
+		acb->host->host_no,
+		acb->firm_model,
+		acb->firm_version);
+	return true;
 }
 
-static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb,
+static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb)
+{
+	bool rtn = false;
+
+	switch (acb->adapter_type) {
+	case ACB_ADAPTER_TYPE_A:
+		rtn = arcmsr_hbaA_get_config(acb);
+		break;
+	case ACB_ADAPTER_TYPE_B:
+		rtn = arcmsr_hbaB_get_config(acb);
+		break;
+	case ACB_ADAPTER_TYPE_C:
+		rtn = arcmsr_hbaC_get_config(acb);
+		break;
+	case ACB_ADAPTER_TYPE_D:
+		rtn = arcmsr_hbaD_get_config(acb);
+		break;
+	default:
+		break;
+	}
+	if (acb->firm_numbers_queue > ARCMSR_MAX_OUTSTANDING_CMD)
+		acb->maxOutstanding = ARCMSR_MAX_OUTSTANDING_CMD;
+	else
+		acb->maxOutstanding = acb->firm_numbers_queue - 1;
+	acb->host->can_queue = acb->maxOutstanding;
+	return rtn;
+}
+
+static int arcmsr_hbaA_polling_ccbdone(struct AdapterControlBlock *acb,
 	struct CommandControlBlock *poll_ccb)
 {
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
@@ -2328,7 +2963,7 @@
 		}
 		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
 		ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
-		poll_ccb_done = (ccb == poll_ccb) ? 1:0;
+		poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
 		if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
 			if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
 				printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
@@ -2355,7 +2990,7 @@
 	return rtn;
 }
 
-static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb,
+static int arcmsr_hbaB_polling_ccbdone(struct AdapterControlBlock *acb,
 					struct CommandControlBlock *poll_ccb)
 {
 	struct MessageUnit_B *reg = acb->pmuB;
@@ -2371,7 +3006,8 @@
 	writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell);
 	while(1){
 		index = reg->doneq_index;
-		if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) {
+		flag_ccb = reg->done_qbuffer[index];
+		if (flag_ccb == 0) {
 			if (poll_ccb_done){
 				rtn = SUCCESS;
 				break;
@@ -2384,7 +3020,7 @@
 				goto polling_hbb_ccb_retry;
 			}
 		}
-		writel(0, &reg->done_qbuffer[index]);
+		reg->done_qbuffer[index] = 0;
 		index++;
 		/*if last index number set it to 0 */
 		index %= ARCMSR_MAX_HBB_POSTQUEUE;
@@ -2392,7 +3028,7 @@
 		/* check if command done with no error*/
 		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));
 		ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
-		poll_ccb_done = (ccb == poll_ccb) ? 1:0;
+		poll_ccb_done |= (ccb == poll_ccb) ? 1 : 0;
 		if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) {
 			if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) {
 				printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
@@ -2419,9 +3055,10 @@
 	return rtn;
 }
 
-static int arcmsr_polling_hbc_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb)
+static int arcmsr_hbaC_polling_ccbdone(struct AdapterControlBlock *acb,
+		struct CommandControlBlock *poll_ccb)
 {
-	struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
+	struct MessageUnit_C __iomem *reg = acb->pmuC;
 	uint32_t flag_ccb, ccb_cdb_phy;
 	struct ARCMSR_CDB *arcmsr_cdb;
 	bool error;
@@ -2448,7 +3085,7 @@
 		ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
 		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/
 		pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb);
-		poll_ccb_done = (pCCB == poll_ccb) ? 1 : 0;
+		poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
 		/* check ifcommand done with no error*/
 		if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) {
 			if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
@@ -2475,6 +3112,81 @@
 	}
 	return rtn;
 }
+
+static int arcmsr_hbaD_polling_ccbdone(struct AdapterControlBlock *acb,
+				struct CommandControlBlock *poll_ccb)
+{
+	bool error;
+	uint32_t poll_ccb_done = 0, poll_count = 0, flag_ccb, ccb_cdb_phy;
+	int rtn, doneq_index, index_stripped, outbound_write_pointer, toggle;
+	unsigned long flags;
+	struct ARCMSR_CDB *arcmsr_cdb;
+	struct CommandControlBlock *pCCB;
+	struct MessageUnit_D *pmu = acb->pmuD;
+
+polling_hbaD_ccb_retry:
+	poll_count++;
+	while (1) {
+		spin_lock_irqsave(&acb->doneq_lock, flags);
+		outbound_write_pointer = pmu->done_qbuffer[0].addressLow + 1;
+		doneq_index = pmu->doneq_index;
+		if ((outbound_write_pointer & 0xFFF) == (doneq_index & 0xFFF)) {
+			spin_unlock_irqrestore(&acb->doneq_lock, flags);
+			if (poll_ccb_done) {
+				rtn = SUCCESS;
+				break;
+			} else {
+				msleep(25);
+				if (poll_count > 40) {
+					rtn = FAILED;
+					break;
+				}
+				goto polling_hbaD_ccb_retry;
+			}
+		}
+		toggle = doneq_index & 0x4000;
+		index_stripped = (doneq_index & 0xFFF) + 1;
+		index_stripped %= ARCMSR_MAX_ARC1214_DONEQUEUE;
+		pmu->doneq_index = index_stripped ? (index_stripped | toggle) :
+				((toggle ^ 0x4000) + 1);
+		doneq_index = pmu->doneq_index;
+		spin_unlock_irqrestore(&acb->doneq_lock, flags);
+		flag_ccb = pmu->done_qbuffer[doneq_index & 0xFFF].addressLow;
+		ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);
+		arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset +
+			ccb_cdb_phy);
+		pCCB = container_of(arcmsr_cdb, struct CommandControlBlock,
+			arcmsr_cdb);
+		poll_ccb_done |= (pCCB == poll_ccb) ? 1 : 0;
+		if ((pCCB->acb != acb) ||
+			(pCCB->startdone != ARCMSR_CCB_START)) {
+			if (pCCB->startdone == ARCMSR_CCB_ABORTED) {
+				pr_notice("arcmsr%d: scsi id = %d "
+					"lun = %d ccb = '0x%p' poll command "
+					"abort successfully\n"
+					, acb->host->host_no
+					, pCCB->pcmd->device->id
+					, (u32)pCCB->pcmd->device->lun
+					, pCCB);
+				pCCB->pcmd->result = DID_ABORT << 16;
+				arcmsr_ccb_complete(pCCB);
+				continue;
+			}
+			pr_notice("arcmsr%d: polling an illegal "
+				"ccb command done ccb = '0x%p' "
+				"ccboutstandingcount = %d\n"
+				, acb->host->host_no
+				, pCCB
+				, atomic_read(&acb->ccboutstandingcount));
+			continue;
+		}
+		error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1)
+			? true : false;
+		arcmsr_report_ccb_state(acb, pCCB, error);
+	}
+	return rtn;
+}
+
 static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb,
 					struct CommandControlBlock *poll_ccb)
 {
@@ -2482,17 +3194,21 @@
 	switch (acb->adapter_type) {
 
 	case ACB_ADAPTER_TYPE_A: {
-		rtn = arcmsr_polling_hba_ccbdone(acb, poll_ccb);
+		rtn = arcmsr_hbaA_polling_ccbdone(acb, poll_ccb);
 		}
 		break;
 
 	case ACB_ADAPTER_TYPE_B: {
-		rtn = arcmsr_polling_hbb_ccbdone(acb, poll_ccb);
+		rtn = arcmsr_hbaB_polling_ccbdone(acb, poll_ccb);
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
-		rtn = arcmsr_polling_hbc_ccbdone(acb, poll_ccb);
+		rtn = arcmsr_hbaC_polling_ccbdone(acb, poll_ccb);
 		}
+		break;
+	case ACB_ADAPTER_TYPE_D:
+		rtn = arcmsr_hbaD_polling_ccbdone(acb, poll_ccb);
+		break;
 	}
 	return rtn;
 }
@@ -2500,6 +3216,7 @@
 static int arcmsr_iop_confirm(struct AdapterControlBlock *acb)
 {
 	uint32_t cdb_phyaddr, cdb_phyaddr_hi32;
+	dma_addr_t dma_coherent_handle;
 
 	/*
 	********************************************************************
@@ -2507,8 +3224,17 @@
 	** if freeccb.HighPart is not zero
 	********************************************************************
 	*/
-	cdb_phyaddr = lower_32_bits(acb->dma_coherent_handle);
-	cdb_phyaddr_hi32 = upper_32_bits(acb->dma_coherent_handle);
+	switch (acb->adapter_type) {
+	case ACB_ADAPTER_TYPE_B:
+	case ACB_ADAPTER_TYPE_D:
+		dma_coherent_handle = acb->dma_coherent_handle2;
+		break;
+	default:
+		dma_coherent_handle = acb->dma_coherent_handle;
+		break;
+	}
+	cdb_phyaddr = lower_32_bits(dma_coherent_handle);
+	cdb_phyaddr_hi32 = upper_32_bits(dma_coherent_handle);
 	acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32;
 	/*
 	***********************************************************************
@@ -2520,65 +3246,62 @@
 	case ACB_ADAPTER_TYPE_A: {
 		if (cdb_phyaddr_hi32 != 0) {
 			struct MessageUnit_A __iomem *reg = acb->pmuA;
-			uint32_t intmask_org;
-			intmask_org = arcmsr_disable_outbound_ints(acb);
 			writel(ARCMSR_SIGNATURE_SET_CONFIG, \
 						&reg->message_rwbuffer[0]);
 			writel(cdb_phyaddr_hi32, &reg->message_rwbuffer[1]);
 			writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \
 							&reg->inbound_msgaddr0);
-			if (!arcmsr_hba_wait_msgint_ready(acb)) {
+			if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
 				printk(KERN_NOTICE "arcmsr%d: ""set ccb high \
 				part physical address timeout\n",
 				acb->host->host_no);
 				return 1;
 			}
-			arcmsr_enable_outbound_ints(acb, intmask_org);
 		}
 		}
 		break;
 
 	case ACB_ADAPTER_TYPE_B: {
-		unsigned long post_queue_phyaddr;
 		uint32_t __iomem *rwbuffer;
 
 		struct MessageUnit_B *reg = acb->pmuB;
-		uint32_t intmask_org;
-		intmask_org = arcmsr_disable_outbound_ints(acb);
 		reg->postq_index = 0;
 		reg->doneq_index = 0;
 		writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell);
-		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
+		if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
 			printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \
 				acb->host->host_no);
 			return 1;
 		}
-		post_queue_phyaddr = acb->dma_coherent_handle_hbb_mu;
 		rwbuffer = reg->message_rwbuffer;
 		/* driver "set config" signature */
 		writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
 		/* normal should be zero */
 		writel(cdb_phyaddr_hi32, rwbuffer++);
 		/* postQ size (256 + 8)*4	 */
-		writel(post_queue_phyaddr, rwbuffer++);
+		writel(cdb_phyaddr, rwbuffer++);
 		/* doneQ size (256 + 8)*4	 */
-		writel(post_queue_phyaddr + 1056, rwbuffer++);
+		writel(cdb_phyaddr + 1056, rwbuffer++);
 		/* ccb maxQ size must be --> [(256 + 8)*4]*/
 		writel(1056, rwbuffer);
 
 		writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell);
-		if (!arcmsr_hbb_wait_msgint_ready(acb)) {
+		if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
 			printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
 			timeout \n",acb->host->host_no);
 			return 1;
 		}
-		arcmsr_hbb_enable_driver_mode(acb);
-		arcmsr_enable_outbound_ints(acb, intmask_org);
+		writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell);
+		if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
+			pr_err("arcmsr%d: can't set driver mode.\n",
+				acb->host->host_no);
+			return 1;
+		}
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
 		if (cdb_phyaddr_hi32 != 0) {
-			struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
+			struct MessageUnit_C __iomem *reg = acb->pmuC;
 
 			printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n",
 					acb->adapter_index, cdb_phyaddr_hi32);
@@ -2586,13 +3309,34 @@
 			writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]);
 			writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0);
 			writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell);
-			if (!arcmsr_hbc_wait_msgint_ready(acb)) {
+			if (!arcmsr_hbaC_wait_msgint_ready(acb)) {
 				printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \
 				timeout \n", acb->host->host_no);
 				return 1;
 			}
 		}
 		}
+		break;
+	case ACB_ADAPTER_TYPE_D: {
+		uint32_t __iomem *rwbuffer;
+		struct MessageUnit_D *reg = acb->pmuD;
+		reg->postq_index = 0;
+		reg->doneq_index = 0;
+		rwbuffer = reg->msgcode_rwbuffer;
+		writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++);
+		writel(cdb_phyaddr_hi32, rwbuffer++);
+		writel(cdb_phyaddr, rwbuffer++);
+		writel(cdb_phyaddr + (ARCMSR_MAX_ARC1214_POSTQUEUE *
+			sizeof(struct InBound_SRB)), rwbuffer++);
+		writel(0x100, rwbuffer);
+		writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, reg->inbound_msgaddr0);
+		if (!arcmsr_hbaD_wait_msgint_ready(acb)) {
+			pr_notice("arcmsr%d: 'set command Q window' timeout\n",
+				acb->host->host_no);
+			return 1;
+		}
+		}
+		break;
 	}
 	return 0;
 }
@@ -2619,15 +3363,24 @@
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
-		struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
+		struct MessageUnit_C __iomem *reg = acb->pmuC;
 		do {
 			firmware_state = readl(&reg->outbound_msgaddr1);
 		} while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0);
 		}
+		break;
+	case ACB_ADAPTER_TYPE_D: {
+		struct MessageUnit_D *reg = acb->pmuD;
+		do {
+			firmware_state = readl(reg->outbound_msgaddr1);
+		} while ((firmware_state &
+			ARCMSR_ARC1214_MESSAGE_FIRMWARE_OK) == 0);
+		}
+		break;
 	}
 }
 
-static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb)
+static void arcmsr_hbaA_request_device_map(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
 	if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
@@ -2649,9 +3402,9 @@
 	return;
 }
 
-static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb)
+static void arcmsr_hbaB_request_device_map(struct AdapterControlBlock *acb)
 {
-	struct MessageUnit_B __iomem *reg = acb->pmuB;
+	struct MessageUnit_B *reg = acb->pmuB;
 	if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){
 		mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ));
 		return;
@@ -2671,7 +3424,7 @@
 	return;
 }
 
-static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb)
+static void arcmsr_hbaC_request_device_map(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_C __iomem *reg = acb->pmuC;
 	if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) {
@@ -2694,69 +3447,119 @@
 	return;
 }
 
+static void arcmsr_hbaD_request_device_map(struct AdapterControlBlock *acb)
+{
+	struct MessageUnit_D *reg = acb->pmuD;
+
+	if (unlikely(atomic_read(&acb->rq_map_token) == 0) ||
+		((acb->acb_flags & ACB_F_BUS_RESET) != 0) ||
+		((acb->acb_flags & ACB_F_ABORT) != 0)) {
+		mod_timer(&acb->eternal_timer,
+			jiffies + msecs_to_jiffies(6 * HZ));
+	} else {
+		acb->fw_flag = FW_NORMAL;
+		if (atomic_read(&acb->ante_token_value) ==
+			atomic_read(&acb->rq_map_token)) {
+			atomic_set(&acb->rq_map_token, 16);
+		}
+		atomic_set(&acb->ante_token_value,
+			atomic_read(&acb->rq_map_token));
+		if (atomic_dec_and_test(&acb->rq_map_token)) {
+			mod_timer(&acb->eternal_timer, jiffies +
+				msecs_to_jiffies(6 * HZ));
+			return;
+		}
+		writel(ARCMSR_INBOUND_MESG0_GET_CONFIG,
+			reg->inbound_msgaddr0);
+		mod_timer(&acb->eternal_timer, jiffies +
+			msecs_to_jiffies(6 * HZ));
+	}
+}
+
 static void arcmsr_request_device_map(unsigned long pacb)
 {
 	struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb;
 	switch (acb->adapter_type) {
 		case ACB_ADAPTER_TYPE_A: {
-			arcmsr_request_hba_device_map(acb);
+			arcmsr_hbaA_request_device_map(acb);
 		}
 		break;
 		case ACB_ADAPTER_TYPE_B: {
-			arcmsr_request_hbb_device_map(acb);
+			arcmsr_hbaB_request_device_map(acb);
 		}
 		break;
 		case ACB_ADAPTER_TYPE_C: {
-			arcmsr_request_hbc_device_map(acb);
+			arcmsr_hbaC_request_device_map(acb);
 		}
+		break;
+		case ACB_ADAPTER_TYPE_D:
+			arcmsr_hbaD_request_device_map(acb);
+		break;
 	}
 }
 
-static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb)
+static void arcmsr_hbaA_start_bgrb(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_A __iomem *reg = acb->pmuA;
 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
 	writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0);
-	if (!arcmsr_hba_wait_msgint_ready(acb)) {
+	if (!arcmsr_hbaA_wait_msgint_ready(acb)) {
 		printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
 				rebulid' timeout \n", acb->host->host_no);
 	}
 }
 
-static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb)
+static void arcmsr_hbaB_start_bgrb(struct AdapterControlBlock *acb)
 {
 	struct MessageUnit_B *reg = acb->pmuB;
 	acb->acb_flags |= ACB_F_MSG_START_BGRB;
 	writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell);
-	if (!arcmsr_hbb_wait_msgint_ready(acb)) {
+	if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
 		printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
 				rebulid' timeout \n",acb->host->host_no);
 	}
 }
 
-static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *pACB)
+static void arcmsr_hbaC_start_bgrb(struct AdapterControlBlock *pACB)
 {
-	struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC;
+	struct MessageUnit_C __iomem *phbcmu = pACB->pmuC;
 	pACB->acb_flags |= ACB_F_MSG_START_BGRB;
 	writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0);
 	writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell);
-	if (!arcmsr_hbc_wait_msgint_ready(pACB)) {
+	if (!arcmsr_hbaC_wait_msgint_ready(pACB)) {
 		printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \
 				rebulid' timeout \n", pACB->host->host_no);
 	}
 	return;
 }
+
+static void arcmsr_hbaD_start_bgrb(struct AdapterControlBlock *pACB)
+{
+	struct MessageUnit_D *pmu = pACB->pmuD;
+
+	pACB->acb_flags |= ACB_F_MSG_START_BGRB;
+	writel(ARCMSR_INBOUND_MESG0_START_BGRB, pmu->inbound_msgaddr0);
+	if (!arcmsr_hbaD_wait_msgint_ready(pACB)) {
+		pr_notice("arcmsr%d: wait 'start adapter "
+			"background rebulid' timeout\n", pACB->host->host_no);
+	}
+}
+
 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb)
 {
 	switch (acb->adapter_type) {
 	case ACB_ADAPTER_TYPE_A:
-		arcmsr_start_hba_bgrb(acb);
+		arcmsr_hbaA_start_bgrb(acb);
 		break;
 	case ACB_ADAPTER_TYPE_B:
-		arcmsr_start_hbb_bgrb(acb);
+		arcmsr_hbaB_start_bgrb(acb);
 		break;
 	case ACB_ADAPTER_TYPE_C:
-		arcmsr_start_hbc_bgrb(acb);
+		arcmsr_hbaC_start_bgrb(acb);
+		break;
+	case ACB_ADAPTER_TYPE_D:
+		arcmsr_hbaD_start_bgrb(acb);
+		break;
 	}
 }
 
@@ -2783,13 +3586,48 @@
 		}
 		break;
 	case ACB_ADAPTER_TYPE_C: {
-		struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC;
-		uint32_t outbound_doorbell;
+		struct MessageUnit_C __iomem *reg = acb->pmuC;
+		uint32_t outbound_doorbell, i;
 		/* empty doorbell Qbuffer if door bell ringed */
 		outbound_doorbell = readl(&reg->outbound_doorbell);
 		writel(outbound_doorbell, &reg->outbound_doorbell_clear);
 		writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
+		for (i = 0; i < 200; i++) {
+			msleep(20);
+			outbound_doorbell = readl(&reg->outbound_doorbell);
+			if (outbound_doorbell &
+				ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) {
+				writel(outbound_doorbell,
+					&reg->outbound_doorbell_clear);
+				writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK,
+					&reg->inbound_doorbell);
+			} else
+				break;
 		}
+		}
+		break;
+	case ACB_ADAPTER_TYPE_D: {
+		struct MessageUnit_D *reg = acb->pmuD;
+		uint32_t outbound_doorbell, i;
+		/* empty doorbell Qbuffer if door bell ringed */
+		outbound_doorbell = readl(reg->outbound_doorbell);
+		writel(outbound_doorbell, reg->outbound_doorbell);
+		writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
+			reg->inbound_doorbell);
+		for (i = 0; i < 200; i++) {
+			msleep(20);
+			outbound_doorbell = readl(reg->outbound_doorbell);
+			if (outbound_doorbell &
+				ARCMSR_ARC1214_IOP2DRV_DATA_WRITE_OK) {
+				writel(outbound_doorbell,
+					reg->outbound_doorbell);
+				writel(ARCMSR_ARC1214_DRV2IOP_DATA_OUT_READ,
+					reg->inbound_doorbell);
+			} else
+				break;
+		}
+		}
+		break;
 	}
 }
 
@@ -2802,7 +3640,7 @@
 		{
 			struct MessageUnit_B *reg = acb->pmuB;
 			writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell);
-			if (!arcmsr_hbb_wait_msgint_ready(acb)) {
+			if (!arcmsr_hbaB_wait_msgint_ready(acb)) {
 				printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT");
 				return;
 			}
@@ -2820,6 +3658,7 @@
 	int i, count = 0;
 	struct MessageUnit_A __iomem *pmuA = acb->pmuA;
 	struct MessageUnit_C __iomem *pmuC = acb->pmuC;
+	struct MessageUnit_D *pmuD = acb->pmuD;
 
 	/* backup pci config data */
 	printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no);
@@ -2840,6 +3679,8 @@
 			writel(0xD, &pmuC->write_sequence);
 		} while (((readl(&pmuC->host_diagnostic) & ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5));
 		writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic);
+	} else if ((acb->dev_id == 0x1214)) {
+		writel(0x20, pmuD->reset_request);
 	} else {
 		pci_write_config_byte(acb->pdev, 0x84, 0x20);
 	}
@@ -3016,9 +3857,7 @@
 				arcmsr_get_firmware_spec(acb);
 				arcmsr_start_adapter_bgrb(acb);
 				/* clear Qbuffer if door bell ringed */
-				outbound_doorbell = readl(&reg->outbound_doorbell);
-				writel(outbound_doorbell, &reg->outbound_doorbell_clear); /*clear interrupt */
-				writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell);
+				arcmsr_clear_doorbell_queue_buffer(acb);
 				/* enable outbound Post Queue,outbound doorbell Interrupt */
 				arcmsr_enable_outbound_ints(acb, intmask_org);
 				atomic_set(&acb->rq_map_token, 16);
@@ -3038,6 +3877,66 @@
 			}
 			break;
 		}
+		case ACB_ADAPTER_TYPE_D: {
+			if (acb->acb_flags & ACB_F_BUS_RESET) {
+				long timeout;
+				pr_notice("arcmsr: there is an bus reset"
+					" eh proceeding.......\n");
+				timeout = wait_event_timeout(wait_q, (acb->acb_flags
+					& ACB_F_BUS_RESET) == 0, 220 * HZ);
+				if (timeout)
+					return SUCCESS;
+			}
+			acb->acb_flags |= ACB_F_BUS_RESET;
+			if (!arcmsr_iop_reset(acb)) {
+				struct MessageUnit_D *reg;
+				reg = acb->pmuD;
+				arcmsr_hardware_reset(acb);
+				acb->acb_flags &= ~ACB_F_IOP_INITED;
+			nap:
+				ssleep(ARCMSR_SLEEPTIME);
+				if ((readl(reg->sample_at_reset) & 0x80) != 0) {
+					pr_err("arcmsr%d: waiting for "
+						"hw bus reset return, retry=%d\n",
+						acb->host->host_no, retry_count);
+					if (retry_count > ARCMSR_RETRYCOUNT) {
+						acb->fw_flag = FW_DEADLOCK;
+						pr_err("arcmsr%d: waiting for hw bus"
+							" reset return, "
+							"RETRY TERMINATED!!\n",
+							acb->host->host_no);
+						return FAILED;
+					}
+					retry_count++;
+					goto nap;
+				}
+				acb->acb_flags |= ACB_F_IOP_INITED;
+				/* disable all outbound interrupt */
+				intmask_org = arcmsr_disable_outbound_ints(acb);
+				arcmsr_get_firmware_spec(acb);
+				arcmsr_start_adapter_bgrb(acb);
+				arcmsr_clear_doorbell_queue_buffer(acb);
+				arcmsr_enable_outbound_ints(acb, intmask_org);
+				atomic_set(&acb->rq_map_token, 16);
+				atomic_set(&acb->ante_token_value, 16);
+				acb->fw_flag = FW_NORMAL;
+				mod_timer(&acb->eternal_timer,
+					jiffies + msecs_to_jiffies(6 * HZ));
+				acb->acb_flags &= ~ACB_F_BUS_RESET;
+				rtn = SUCCESS;
+				pr_err("arcmsr: scsi bus reset "
+					"eh returns with success\n");
+			} else {
+				acb->acb_flags &= ~ACB_F_BUS_RESET;
+				atomic_set(&acb->rq_map_token, 16);
+				atomic_set(&acb->ante_token_value, 16);
+				acb->fw_flag = FW_NORMAL;
+				mod_timer(&acb->eternal_timer,
+					jiffies + msecs_to_jiffies(6 * HZ));
+				rtn = SUCCESS;
+			}
+			break;
+		}
 	}
 	return rtn;
 }
@@ -3056,8 +3955,10 @@
 		(struct AdapterControlBlock *)cmd->device->host->hostdata;
 	int i = 0;
 	int rtn = FAILED;
+	uint32_t intmask_org;
+
 	printk(KERN_NOTICE
-		"arcmsr%d: abort device command of scsi id = %d lun = %d \n",
+		"arcmsr%d: abort device command of scsi id = %d lun = %d\n",
 		acb->host->host_no, cmd->device->id, (u32)cmd->device->lun);
 	acb->acb_flags |= ACB_F_ABORT;
 	acb->num_aborts++;
@@ -3067,9 +3968,12 @@
 	** we need to handle it as soon as possible and exit
 	************************************************
 	*/
-	if (!atomic_read(&acb->ccboutstandingcount))
+	if (!atomic_read(&acb->ccboutstandingcount)) {
+		acb->acb_flags &= ~ACB_F_ABORT;
 		return rtn;
+	}
 
+	intmask_org = arcmsr_disable_outbound_ints(acb);
 	for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) {
 		struct CommandControlBlock *ccb = acb->pccb_pool[i];
 		if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) {
@@ -3079,6 +3983,7 @@
 		}
 	}
 	acb->acb_flags &= ~ACB_F_ABORT;
+	arcmsr_enable_outbound_ints(acb, intmask_org);
 	return rtn;
 }
 
@@ -3108,19 +4013,20 @@
 	case PCI_DEVICE_ID_ARECA_1280:
 		type = "SATA";
 		break;
+	case PCI_DEVICE_ID_ARECA_1214:
 	case PCI_DEVICE_ID_ARECA_1380:
 	case PCI_DEVICE_ID_ARECA_1381:
 	case PCI_DEVICE_ID_ARECA_1680:
 	case PCI_DEVICE_ID_ARECA_1681:
 	case PCI_DEVICE_ID_ARECA_1880:
-		type = "SAS";
+		type = "SAS/SATA";
 		break;
 	default:
-		type = "X-TYPE";
+		type = "unknown";
+		raid6 =	0;
 		break;
 	}
-	sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s",
-			type, raid6 ? "( RAID6 capable)" : "",
-			ARCMSR_DRIVER_VERSION);
+	sprintf(buf, "Areca %s RAID Controller %s\narcmsr version %s\n",
+		type, raid6 ? "(RAID6 capable)" : "", ARCMSR_DRIVER_VERSION);
 	return buf;
 }
diff --git a/drivers/scsi/be2iscsi/be.h b/drivers/scsi/be2iscsi/be.h
index 860f527..81e83a6 100644
--- a/drivers/scsi/be2iscsi/be.h
+++ b/drivers/scsi/be2iscsi/be.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 1432ed5..80d97f3 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -275,6 +275,19 @@
 		  ASYNC_EVENT_CODE_LINK_STATE);
 }
 
+static bool is_iscsi_evt(u32 trailer)
+{
+	return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
+		  ASYNC_TRAILER_EVENT_CODE_MASK) ==
+		  ASYNC_EVENT_CODE_ISCSI;
+}
+
+static int iscsi_evt_type(u32 trailer)
+{
+	return (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
+		 ASYNC_TRAILER_EVENT_TYPE_MASK;
+}
+
 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
 {
 	if (compl->flags != 0) {
@@ -438,7 +451,7 @@
 	} else if ((evt->port_link_status & ASYNC_EVENT_LINK_UP) ||
 		    ((evt->port_link_status & ASYNC_EVENT_LOGICAL) &&
 		     (evt->port_fault == BEISCSI_PHY_LINK_FAULT_NONE))) {
-		phba->state = BE_ADAPTER_LINK_UP;
+		phba->state = BE_ADAPTER_LINK_UP | BE_ADAPTER_CHECK_BOOT;
 
 		beiscsi_log(phba, KERN_ERR,
 			    BEISCSI_LOG_CONFIG | BEISCSI_LOG_INIT,
@@ -461,7 +474,28 @@
 				/* Interpret compl as a async link evt */
 				beiscsi_async_link_state_process(phba,
 				   (struct be_async_event_link_state *) compl);
-			else
+			else if (is_iscsi_evt(compl->flags)) {
+				switch (iscsi_evt_type(compl->flags)) {
+				case ASYNC_EVENT_NEW_ISCSI_TGT_DISC:
+				case ASYNC_EVENT_NEW_ISCSI_CONN:
+				case ASYNC_EVENT_NEW_TCP_CONN:
+					phba->state |= BE_ADAPTER_CHECK_BOOT;
+					beiscsi_log(phba, KERN_ERR,
+						    BEISCSI_LOG_CONFIG |
+						    BEISCSI_LOG_MBOX,
+						    "BC_%d : Async iscsi Event,"
+						    " flags handled = 0x%08x\n",
+						    compl->flags);
+					break;
+				default:
+					beiscsi_log(phba, KERN_ERR,
+						    BEISCSI_LOG_CONFIG |
+						    BEISCSI_LOG_MBOX,
+						    "BC_%d : Unsupported Async"
+						    " Event, flags = 0x%08x\n",
+						    compl->flags);
+				}
+			} else
 				beiscsi_log(phba, KERN_ERR,
 					    BEISCSI_LOG_CONFIG |
 					    BEISCSI_LOG_MBOX,
diff --git a/drivers/scsi/be2iscsi/be_cmds.h b/drivers/scsi/be2iscsi/be_cmds.h
index cc7405c..98897434 100644
--- a/drivers/scsi/be2iscsi/be_cmds.h
+++ b/drivers/scsi/be2iscsi/be_cmds.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -26,9 +26,9 @@
  * The commands are serviced by the ARM processor in the OneConnect's MPU.
  */
 struct be_sge {
-	u32 pa_lo;
-	u32 pa_hi;
-	u32 len;
+	__le32 pa_lo;
+	__le32 pa_hi;
+	__le32 len;
 };
 
 #define MCC_WRB_SGE_CNT_SHIFT 3	/* bits 3 - 7 of dword 0 */
@@ -118,6 +118,14 @@
 #define ASYNC_TRAILER_EVENT_CODE_SHIFT	8	/* bits 8 - 15 */
 #define ASYNC_TRAILER_EVENT_CODE_MASK	0xFF
 #define ASYNC_EVENT_CODE_LINK_STATE	0x1
+#define ASYNC_EVENT_CODE_ISCSI		0x4
+
+#define ASYNC_TRAILER_EVENT_TYPE_SHIFT	16	/* bits 16 - 23 */
+#define ASYNC_TRAILER_EVENT_TYPE_MASK	0xF
+#define ASYNC_EVENT_NEW_ISCSI_TGT_DISC	0x4
+#define ASYNC_EVENT_NEW_ISCSI_CONN	0x5
+#define ASYNC_EVENT_NEW_TCP_CONN	0x7
+
 struct be_async_event_trailer {
 	u32 code;
 };
@@ -624,11 +632,11 @@
 /******************** Modify EQ Delay *******************/
 struct be_cmd_req_modify_eq_delay {
 	struct be_cmd_req_hdr hdr;
-	u32 num_eq;
+	__le32 num_eq;
 	struct {
-		u32 eq_id;
-		u32 phase;
-		u32 delay_multiplier;
+		__le32 eq_id;
+		__le32 phase;
+		__le32 delay_multiplier;
 	} delay[MAX_CPUS];
 } __packed;
 
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 8616281..b7391a3 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -1274,6 +1274,31 @@
 }
 
 /**
+ * beiscsi_flush_cq()- Flush the CQ created.
+ * @phba: ptr device priv structure.
+ *
+ * Before the connection resource are freed flush
+ * all the CQ enteries
+ **/
+static void beiscsi_flush_cq(struct beiscsi_hba *phba)
+{
+	uint16_t i;
+	struct be_eq_obj *pbe_eq;
+	struct hwi_controller *phwi_ctrlr;
+	struct hwi_context_memory *phwi_context;
+
+	phwi_ctrlr = phba->phwi_ctrlr;
+	phwi_context = phwi_ctrlr->phwi_ctxt;
+
+	for (i = 0; i < phba->num_cpus; i++) {
+		pbe_eq = &phwi_context->be_eq[i];
+		blk_iopoll_disable(&pbe_eq->iopoll);
+		beiscsi_process_cq(pbe_eq);
+		blk_iopoll_enable(&pbe_eq->iopoll);
+	}
+}
+
+/**
  * beiscsi_close_conn - Upload the  connection
  * @ep: The iscsi endpoint
  * @flag: The type of connection closure
@@ -1294,6 +1319,10 @@
 	}
 
 	ret = beiscsi_mccq_compl(phba, tag, NULL, NULL);
+
+	/* Flush the CQ entries */
+	beiscsi_flush_cq(phba);
+
 	return ret;
 }
 
diff --git a/drivers/scsi/be2iscsi/be_iscsi.h b/drivers/scsi/be2iscsi/be_iscsi.h
index 31ddc84..e0b3b2d 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.h
+++ b/drivers/scsi/be2iscsi/be_iscsi.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 915c26b..30d74a0 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -2068,7 +2068,7 @@
  * return
  *     Number of Completion Entries processed.
  **/
-static unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
+unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq)
 {
 	struct be_queue_info *cq;
 	struct sol_cqe *sol;
@@ -2110,6 +2110,18 @@
 
 		cri_index = BE_GET_CRI_FROM_CID(cid);
 		ep = phba->ep_array[cri_index];
+
+		if (ep == NULL) {
+			/* connection has already been freed
+			 * just move on to next one
+			 */
+			beiscsi_log(phba, KERN_WARNING,
+				    BEISCSI_LOG_INIT,
+				    "BM_%d : proc cqe of disconn ep: cid %d\n",
+				    cid);
+			goto proc_next_cqe;
+		}
+
 		beiscsi_ep = ep->dd_data;
 		beiscsi_conn = beiscsi_ep->conn;
 
@@ -2219,6 +2231,7 @@
 			break;
 		}
 
+proc_next_cqe:
 		AMAP_SET_BITS(struct amap_sol_cqe, valid, sol, 0);
 		queue_tail_inc(cq);
 		sol = queue_tail_node(cq);
@@ -4377,6 +4390,10 @@
 {
 	struct iscsi_boot_kobj *boot_kobj;
 
+	/* it has been created previously */
+	if (phba->boot_kset)
+		return 0;
+
 	/* get boot info using mgmt cmd */
 	if (beiscsi_get_boot_info(phba))
 		/* Try to see if we can carry on without this */
@@ -5206,6 +5223,7 @@
 			free_irq(phba->pcidev->irq, phba);
 		}
 	pci_disable_msix(phba->pcidev);
+	cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
 
 	for (i = 0; i < phba->num_cpus; i++) {
 		pbe_eq = &phwi_context->be_eq[i];
@@ -5227,7 +5245,6 @@
 		hwi_cleanup(phba);
 	}
 
-	cancel_delayed_work_sync(&phba->beiscsi_hw_check_task);
 }
 
 static void beiscsi_remove(struct pci_dev *pcidev)
@@ -5276,9 +5293,9 @@
 	for (i = 0; i <= phba->num_cpus; i++)
 		phba->msix_entries[i].entry = i;
 
-	status = pci_enable_msix(phba->pcidev, phba->msix_entries,
-				 (phba->num_cpus + 1));
-	if (!status)
+	status = pci_enable_msix_range(phba->pcidev, phba->msix_entries,
+				       phba->num_cpus + 1, phba->num_cpus + 1);
+	if (status > 0)
 		phba->msix_enabled = true;
 
 	return;
@@ -5335,6 +5352,14 @@
 	}
 }
 
+static void be_check_boot_session(struct beiscsi_hba *phba)
+{
+	if (beiscsi_setup_boot_info(phba))
+		beiscsi_log(phba, KERN_ERR, BEISCSI_LOG_INIT,
+			    "BM_%d : Could not set up "
+			    "iSCSI boot info on async event.\n");
+}
+
 /*
  * beiscsi_hw_health_check()- Check adapter health
  * @work: work item to check HW health
@@ -5350,6 +5375,11 @@
 
 	be_eqd_update(phba);
 
+	if (phba->state & BE_ADAPTER_CHECK_BOOT) {
+		phba->state &= ~BE_ADAPTER_CHECK_BOOT;
+		be_check_boot_session(phba);
+	}
+
 	beiscsi_ue_detect(phba);
 
 	schedule_delayed_work(&phba->beiscsi_hw_check_task,
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index 9ceab42..7ee0ffc 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -36,7 +36,7 @@
 #include <scsi/scsi_transport_iscsi.h>
 
 #define DRV_NAME		"be2iscsi"
-#define BUILD_STR		"10.2.273.0"
+#define BUILD_STR		"10.4.114.0"
 #define BE_NAME			"Emulex OneConnect" \
 				"Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC		BE_NAME " " "Driver"
@@ -104,6 +104,7 @@
 #define BE_ADAPTER_LINK_DOWN	0x002
 #define BE_ADAPTER_PCI_ERR	0x004
 #define BE_ADAPTER_STATE_SHUTDOWN	0x008
+#define BE_ADAPTER_CHECK_BOOT	0x010
 
 
 #define BEISCSI_CLEAN_UNLOAD	0x01
@@ -839,6 +840,9 @@
 void hwi_ring_cq_db(struct beiscsi_hba *phba,
 		     unsigned int id, unsigned int num_processed,
 		     unsigned char rearm, unsigned char event);
+
+unsigned int beiscsi_process_cq(struct be_eq_obj *pbe_eq);
+
 static inline bool beiscsi_error(struct beiscsi_hba *phba)
 {
 	return phba->ue_detected || phba->fw_timeout;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 665afcb..681d4e8 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -943,17 +943,20 @@
 
 	if (ip_action == IP_ACTION_ADD) {
 		memcpy(req->ip_params.ip_record.ip_addr.addr, ip_param->value,
-		       ip_param->len);
+		       sizeof(req->ip_params.ip_record.ip_addr.addr));
 
 		if (subnet_param)
 			memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
-			       subnet_param->value, subnet_param->len);
+			       subnet_param->value,
+			       sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
 	} else {
 		memcpy(req->ip_params.ip_record.ip_addr.addr,
-		       if_info->ip_addr.addr, ip_param->len);
+		       if_info->ip_addr.addr,
+		       sizeof(req->ip_params.ip_record.ip_addr.addr));
 
 		memcpy(req->ip_params.ip_record.ip_addr.subnet_mask,
-		       if_info->ip_addr.subnet_mask, ip_param->len);
+		       if_info->ip_addr.subnet_mask,
+		       sizeof(req->ip_params.ip_record.ip_addr.subnet_mask));
 	}
 
 	rc = mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
@@ -981,7 +984,7 @@
 	req->action = gtway_action;
 	req->ip_addr.ip_type = BE2_IPV4;
 
-	memcpy(req->ip_addr.addr, gt_addr, param_len);
+	memcpy(req->ip_addr.addr, gt_addr, sizeof(req->ip_addr.addr));
 
 	return mgmt_exec_nonemb_cmd(phba, &nonemb_cmd, NULL, 0);
 }
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index 24a8fc5..bd81446 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2013 Emulex
+ * Copyright (C) 2005 - 2014 Emulex
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 4c5891e..0679782 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1654,6 +1654,10 @@
 	u64 addr;
 	int i;
 
+	/*
+	 * Use dma_map_sg directly to ensure we're using the correct
+	 * dev struct off of pcidev.
+	 */
 	sg_count = dma_map_sg(&hba->pcidev->dev, scsi_sglist(sc),
 			      scsi_sg_count(sc), sc->sc_data_direction);
 	scsi_for_each_sg(sc, sg, sg_count, i) {
@@ -1703,9 +1707,16 @@
 static void bnx2fc_unmap_sg_list(struct bnx2fc_cmd *io_req)
 {
 	struct scsi_cmnd *sc = io_req->sc_cmd;
+	struct bnx2fc_interface *interface = io_req->port->priv;
+	struct bnx2fc_hba *hba = interface->hba;
 
-	if (io_req->bd_tbl->bd_valid && sc) {
-		scsi_dma_unmap(sc);
+	/*
+	 * Use dma_unmap_sg directly to ensure we're using the correct
+	 * dev struct off of pcidev.
+	 */
+	if (io_req->bd_tbl->bd_valid && sc && scsi_sg_count(sc)) {
+		dma_unmap_sg(&hba->pcidev->dev, scsi_sglist(sc),
+		    scsi_sg_count(sc), sc->sc_data_direction);
 		io_req->bd_tbl->bd_valid = 0;
 	}
 }
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 40e2249..7a36388 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -2235,6 +2235,9 @@
 		case ISCSI_PARAM_TGT_RESET_TMO:
 		case ISCSI_PARAM_IFACE_NAME:
 		case ISCSI_PARAM_INITIATOR_NAME:
+		case ISCSI_PARAM_BOOT_ROOT:
+		case ISCSI_PARAM_BOOT_NIC:
+		case ISCSI_PARAM_BOOT_TARGET:
 			return S_IRUGO;
 		default:
 			return 0;
diff --git a/drivers/scsi/csiostor/csio_hw.h b/drivers/scsi/csiostor/csio_hw.h
index 49b1daa..5db2d85 100644
--- a/drivers/scsi/csiostor/csio_hw.h
+++ b/drivers/scsi/csiostor/csio_hw.h
@@ -94,7 +94,7 @@
 };
 
 struct csio_msix_entries {
-	unsigned short	vector;		/* Vector assigned by pci_enable_msix */
+	unsigned short	vector;		/* Assigned MSI-X vector */
 	void		*dev_id;	/* Priv object associated w/ this msix*/
 	char		desc[24];	/* Description of this vector */
 };
diff --git a/drivers/scsi/csiostor/csio_isr.c b/drivers/scsi/csiostor/csio_isr.c
index 7ee9777..a8c748a 100644
--- a/drivers/scsi/csiostor/csio_isr.c
+++ b/drivers/scsi/csiostor/csio_isr.c
@@ -499,7 +499,7 @@
 static int
 csio_enable_msix(struct csio_hw *hw)
 {
-	int rv, i, j, k, n, min, cnt;
+	int i, j, k, n, min, cnt;
 	struct csio_msix_entries *entryp;
 	struct msix_entry *entries;
 	int extra = CSIO_EXTRA_VECS;
@@ -521,21 +521,15 @@
 
 	csio_dbg(hw, "FW supp #niq:%d, trying %d msix's\n", hw->cfg_niq, cnt);
 
-	while ((rv = pci_enable_msix(hw->pdev, entries, cnt)) >= min)
-		cnt = rv;
-	if (!rv) {
-		if (cnt < (hw->num_sqsets + extra)) {
-			csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
-			csio_reduce_sqsets(hw, cnt - extra);
-		}
-	} else {
-		if (rv > 0) {
-			pci_disable_msix(hw->pdev);
-			csio_info(hw, "Not using MSI-X, remainder:%d\n", rv);
-		}
-
+	cnt = pci_enable_msix_range(hw->pdev, entries, min, cnt);
+	if (cnt < 0) {
 		kfree(entries);
-		return -ENOMEM;
+		return cnt;
+	}
+
+	if (cnt < (hw->num_sqsets + extra)) {
+		csio_dbg(hw, "Reducing sqsets to %d\n", cnt - extra);
+		csio_reduce_sqsets(hw, cnt - extra);
 	}
 
 	/* Save off vectors */
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index addd1dd..6a2001d 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -1852,7 +1852,7 @@
 	u32 credits;
 
 	log_debug(1 << CXGBI_DBG_PDU_RX,
-		"csk 0x%p,%u,0x%lu,%u, seq %u, wup %u, thre %u, %u.\n",
+		"csk 0x%p,%u,0x%lx,%u, seq %u, wup %u, thre %u, %u.\n",
 		csk, csk->state, csk->flags, csk->tid, csk->copied_seq,
 		csk->rcv_wup, cdev->rx_credit_thres,
 		cdev->rcv_win);
diff --git a/drivers/scsi/dpt_i2o.c b/drivers/scsi/dpt_i2o.c
index 67283ef..072f0ec 100644
--- a/drivers/scsi/dpt_i2o.c
+++ b/drivers/scsi/dpt_i2o.c
@@ -2363,6 +2363,7 @@
 	host->unique_id = (u32)sys_tbl_pa + pHba->unit;
 	host->sg_tablesize = pHba->sg_tablesize;
 	host->can_queue = pHba->post_fifo_size;
+	host->use_cmd_list = 1;
 
 	return 0;
 }
diff --git a/drivers/scsi/eata.c b/drivers/scsi/eata.c
index 813dd5c..943ad3a 100644
--- a/drivers/scsi/eata.c
+++ b/drivers/scsi/eata.c
@@ -837,7 +837,6 @@
 static struct Scsi_Host *sh[MAX_BOARDS];
 static const char *driver_name = "EATA";
 static char sha[MAX_BOARDS];
-static DEFINE_SPINLOCK(driver_lock);
 
 /* Initialize num_boards so that ihdlr can work while detect is in progress */
 static unsigned int num_boards = MAX_BOARDS;
@@ -1097,8 +1096,6 @@
 		goto fail;
 	}
 
-	spin_lock_irq(&driver_lock);
-
 	if (do_dma(port_base, 0, READ_CONFIG_PIO)) {
 #if defined(DEBUG_DETECT)
 		printk("%s: detect, do_dma failed at 0x%03lx.\n", name,
@@ -1264,10 +1261,7 @@
 	}
 #endif
 
-	spin_unlock_irq(&driver_lock);
 	sh[j] = shost = scsi_register(tpnt, sizeof(struct hostdata));
-	spin_lock_irq(&driver_lock);
-
 	if (shost == NULL) {
 		printk("%s: unable to register host, detaching.\n", name);
 		goto freedma;
@@ -1344,8 +1338,6 @@
 	else
 		sprintf(dma_name, "DMA %u", dma_channel);
 
-	spin_unlock_irq(&driver_lock);
-
 	for (i = 0; i < shost->can_queue; i++)
 		ha->cp[i].cp_dma_addr = pci_map_single(ha->pdev,
 							  &ha->cp[i],
@@ -1438,7 +1430,6 @@
       freeirq:
 	free_irq(irq, &sha[j]);
       freelock:
-	spin_unlock_irq(&driver_lock);
 	release_region(port_base, REGION_SIZE);
       fail:
 	return 0;
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 74277c2..bdc8989 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -96,14 +96,32 @@
 	struct ethtool_cmd ecmd;
 
 	if (!__ethtool_get_settings(netdev, &ecmd)) {
-		lport->link_supported_speeds &=
-			~(FC_PORTSPEED_1GBIT | FC_PORTSPEED_10GBIT);
+		lport->link_supported_speeds &= ~(FC_PORTSPEED_1GBIT  |
+		                                  FC_PORTSPEED_10GBIT |
+		                                  FC_PORTSPEED_20GBIT |
+		                                  FC_PORTSPEED_40GBIT);
+
 		if (ecmd.supported & (SUPPORTED_1000baseT_Half |
-				      SUPPORTED_1000baseT_Full))
+		                      SUPPORTED_1000baseT_Full |
+		                      SUPPORTED_1000baseKX_Full))
 			lport->link_supported_speeds |= FC_PORTSPEED_1GBIT;
-		if (ecmd.supported & SUPPORTED_10000baseT_Full)
-			lport->link_supported_speeds |=
-				FC_PORTSPEED_10GBIT;
+
+		if (ecmd.supported & (SUPPORTED_10000baseT_Full   |
+		                      SUPPORTED_10000baseKX4_Full |
+		                      SUPPORTED_10000baseKR_Full  |
+		                      SUPPORTED_10000baseR_FEC))
+			lport->link_supported_speeds |= FC_PORTSPEED_10GBIT;
+
+		if (ecmd.supported & (SUPPORTED_20000baseMLD2_Full |
+		                      SUPPORTED_20000baseKR2_Full))
+			lport->link_supported_speeds |= FC_PORTSPEED_20GBIT;
+
+		if (ecmd.supported & (SUPPORTED_40000baseKR4_Full |
+		                      SUPPORTED_40000baseCR4_Full |
+		                      SUPPORTED_40000baseSR4_Full |
+		                      SUPPORTED_40000baseLR4_Full))
+			lport->link_supported_speeds |= FC_PORTSPEED_40GBIT;
+
 		switch (ethtool_cmd_speed(&ecmd)) {
 		case SPEED_1000:
 			lport->link_speed = FC_PORTSPEED_1GBIT;
@@ -111,6 +129,15 @@
 		case SPEED_10000:
 			lport->link_speed = FC_PORTSPEED_10GBIT;
 			break;
+		case 20000:
+			lport->link_speed = FC_PORTSPEED_20GBIT;
+			break;
+		case 40000:
+			lport->link_speed = FC_PORTSPEED_40GBIT;
+			break;
+		default:
+			lport->link_speed = FC_PORTSPEED_UNKNOWN;
+			break;
 		}
 		return 0;
 	}
diff --git a/drivers/scsi/fnic/fnic.h b/drivers/scsi/fnic/fnic.h
index 1d3521e..bf8d34c 100644
--- a/drivers/scsi/fnic/fnic.h
+++ b/drivers/scsi/fnic/fnic.h
@@ -39,7 +39,7 @@
 
 #define DRV_NAME		"fnic"
 #define DRV_DESCRIPTION		"Cisco FCoE HBA Driver"
-#define DRV_VERSION		"1.6.0.10"
+#define DRV_VERSION		"1.6.0.11"
 #define PFX			DRV_NAME ": "
 #define DFX                     DRV_NAME "%d: "
 
diff --git a/drivers/scsi/fnic/fnic_fcs.c b/drivers/scsi/fnic/fnic_fcs.c
index 1b948f6..f3984b4 100644
--- a/drivers/scsi/fnic/fnic_fcs.c
+++ b/drivers/scsi/fnic/fnic_fcs.c
@@ -35,7 +35,7 @@
 #include "cq_enet_desc.h"
 #include "cq_exch_desc.h"
 
-static u8 fcoe_all_fcfs[ETH_ALEN];
+static u8 fcoe_all_fcfs[ETH_ALEN] = FIP_ALL_FCF_MACS;
 struct workqueue_struct *fnic_fip_queue;
 struct workqueue_struct *fnic_event_queue;
 
@@ -101,13 +101,14 @@
 				FNIC_FCS_DBG(KERN_DEBUG, fnic->lport->host,
 					     "link up\n");
 				fcoe_ctlr_link_up(&fnic->ctlr);
-			} else
+			} else {
 				/* UP -> UP */
 				spin_unlock_irqrestore(&fnic->fnic_lock, flags);
 				fnic_fc_trace_set_data(
 					fnic->lport->host->host_no, FNIC_FC_LE,
 					"Link Status: UP_UP",
 					strlen("Link Status: UP_UP"));
+			}
 		}
 	} else if (fnic->link_status) {
 		/* DOWN -> UP */
diff --git a/drivers/scsi/fnic/fnic_trace.c b/drivers/scsi/fnic/fnic_trace.c
index 8b1b387..acf1f95 100644
--- a/drivers/scsi/fnic/fnic_trace.c
+++ b/drivers/scsi/fnic/fnic_trace.c
@@ -743,7 +743,7 @@
 
 	fmt = "%02d:%02d:%04ld %02d:%02d:%02d.%09lu ns%8x       %c%8x\t";
 	len += snprintf(fnic_dbgfs_prt->buffer + len,
-		(fnic_fc_trace_max_pages * PAGE_SIZE * 3) - len,
+		max_size - len,
 		fmt,
 		tm.tm_mon + 1, tm.tm_mday, tm.tm_year + 1900,
 		tm.tm_hour, tm.tm_min, tm.tm_sec,
@@ -767,8 +767,7 @@
 				j == ethhdr_len + fcoehdr_len + fchdr_len ||
 				(i > 3 && j%fchdr_len == 0)) {
 				len += snprintf(fnic_dbgfs_prt->buffer
-					+ len, (fnic_fc_trace_max_pages
-					* PAGE_SIZE * 3) - len,
+					+ len, max_size - len,
 					"\n\t\t\t\t\t\t\t\t");
 				i++;
 			}
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 6b35d0d..cef5d49 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -5971,10 +5971,6 @@
 
 	/* Save the PCI command register */
 	pci_read_config_word(pdev, 4, &command_register);
-	/* Turn the board off.  This is so that later pci_restore_state()
-	 * won't turn the board on before the rest of config space is ready.
-	 */
-	pci_disable_device(pdev);
 	pci_save_state(pdev);
 
 	/* find the first memory BAR, so we can find the cfg table */
@@ -6022,11 +6018,6 @@
 		goto unmap_cfgtable;
 
 	pci_restore_state(pdev);
-	rc = pci_enable_device(pdev);
-	if (rc) {
-		dev_warn(&pdev->dev, "failed to enable device.\n");
-		goto unmap_cfgtable;
-	}
 	pci_write_config_word(pdev, 4, command_register);
 
 	/* Some devices (notably the HP Smart Array 5i Controller)
@@ -6159,26 +6150,22 @@
 		h->msix_vector = MAX_REPLY_QUEUES;
 		if (h->msix_vector > num_online_cpus())
 			h->msix_vector = num_online_cpus();
-		err = pci_enable_msix(h->pdev, hpsa_msix_entries,
-				      h->msix_vector);
-		if (err > 0) {
+		err = pci_enable_msix_range(h->pdev, hpsa_msix_entries,
+					    1, h->msix_vector);
+		if (err < 0) {
+			dev_warn(&h->pdev->dev, "MSI-X init failed %d\n", err);
+			h->msix_vector = 0;
+			goto single_msi_mode;
+		} else if (err < h->msix_vector) {
 			dev_warn(&h->pdev->dev, "only %d MSI-X vectors "
 			       "available\n", err);
-			h->msix_vector = err;
-			err = pci_enable_msix(h->pdev, hpsa_msix_entries,
-					      h->msix_vector);
 		}
-		if (!err) {
-			for (i = 0; i < h->msix_vector; i++)
-				h->intr[i] = hpsa_msix_entries[i].vector;
-			return;
-		} else {
-			dev_warn(&h->pdev->dev, "MSI-X init failed %d\n",
-			       err);
-			h->msix_vector = 0;
-			goto default_int_mode;
-		}
+		h->msix_vector = err;
+		for (i = 0; i < h->msix_vector; i++)
+			h->intr[i] = hpsa_msix_entries[i].vector;
+		return;
 	}
+single_msi_mode:
 	if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
 		dev_info(&h->pdev->dev, "MSI\n");
 		if (!pci_enable_msi(h->pdev))
@@ -6541,6 +6528,23 @@
 	if (!reset_devices)
 		return 0;
 
+	/* kdump kernel is loading, we don't know in which state is
+	 * the pci interface. The dev->enable_cnt is equal zero
+	 * so we call enable+disable, wait a while and switch it on.
+	 */
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		dev_warn(&pdev->dev, "Failed to enable PCI device\n");
+		return -ENODEV;
+	}
+	pci_disable_device(pdev);
+	msleep(260);			/* a randomly chosen number */
+	rc = pci_enable_device(pdev);
+	if (rc) {
+		dev_warn(&pdev->dev, "failed to enable device.\n");
+		return -ENODEV;
+	}
+	pci_set_master(pdev);
 	/* Reset the controller with a PCI power-cycle or via doorbell */
 	rc = hpsa_kdump_hard_reset_controller(pdev);
 
@@ -6549,10 +6553,11 @@
 	 * "performant mode".  Or, it might be 640x, which can't reset
 	 * due to concerns about shared bbwc between 6402/6404 pair.
 	 */
-	if (rc == -ENOTSUPP)
-		return rc; /* just try to do the kdump anyhow. */
-	if (rc)
-		return -ENODEV;
+	if (rc) {
+		if (rc != -ENOTSUPP) /* just try to do the kdump anyhow. */
+			rc = -ENODEV;
+		goto out_disable;
+	}
 
 	/* Now try to get the controller to respond to a no-op */
 	dev_warn(&pdev->dev, "Waiting for controller to respond to no-op\n");
@@ -6563,7 +6568,11 @@
 			dev_warn(&pdev->dev, "no-op failed%s\n",
 					(i < 11 ? "; re-trying" : ""));
 	}
-	return 0;
+
+out_disable:
+
+	pci_disable_device(pdev);
+	return rc;
 }
 
 static int hpsa_allocate_cmd_pool(struct ctlr_info *h)
@@ -6743,6 +6752,7 @@
 		iounmap(h->transtable);
 	if (h->cfgtable)
 		iounmap(h->cfgtable);
+	pci_disable_device(h->pdev);
 	pci_release_regions(h->pdev);
 	kfree(h);
 }
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 924b0ba..2a9578c 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -2440,6 +2440,7 @@
 {
 	u32 ioasc;
 	int error_index;
+	struct ipr_hostrcb_type_21_error *error;
 
 	if (hostrcb->hcam.notify_type != IPR_HOST_RCB_NOTIF_TYPE_ERROR_LOG_ENTRY)
 		return;
@@ -2464,6 +2465,15 @@
 	if (!ipr_error_table[error_index].log_hcam)
 		return;
 
+	if (ioasc == IPR_IOASC_HW_CMD_FAILED &&
+	    hostrcb->hcam.overlay_id == IPR_HOST_RCB_OVERLAY_ID_21) {
+		error = &hostrcb->hcam.u.error64.u.type_21_error;
+
+		if (((be32_to_cpu(error->sense_data[0]) & 0x0000ff00) >> 8) == ILLEGAL_REQUEST &&
+			ioa_cfg->log_level <= IPR_DEFAULT_LOG_LEVEL)
+				return;
+	}
+
 	ipr_hcam_err(hostrcb, "%s\n", ipr_error_table[error_index].error);
 
 	/* Set indication we have logged an error */
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 31ed126..d0201ce 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -130,6 +130,7 @@
 #define IPR_IOASC_HW_DEV_BUS_STATUS			0x04448500
 #define	IPR_IOASC_IOASC_MASK			0xFFFFFF00
 #define	IPR_IOASC_SCSI_STATUS_MASK		0x000000FF
+#define IPR_IOASC_HW_CMD_FAILED			0x046E0000
 #define IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT	0x05240000
 #define IPR_IOASC_IR_RESOURCE_HANDLE		0x05250000
 #define IPR_IOASC_IR_NO_CMDS_TO_2ND_IOA		0x05258100
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index a669f2d..427af0f 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -726,13 +726,18 @@
 	switch(param) {
 	case ISCSI_PARAM_CONN_PORT:
 	case ISCSI_PARAM_CONN_ADDRESS:
+	case ISCSI_PARAM_LOCAL_PORT:
 		spin_lock_bh(&conn->session->frwd_lock);
 		if (!tcp_sw_conn || !tcp_sw_conn->sock) {
 			spin_unlock_bh(&conn->session->frwd_lock);
 			return -ENOTCONN;
 		}
-		rc = kernel_getpeername(tcp_sw_conn->sock,
-					(struct sockaddr *)&addr, &len);
+		if (param == ISCSI_PARAM_LOCAL_PORT)
+			rc = kernel_getsockname(tcp_sw_conn->sock,
+						(struct sockaddr *)&addr, &len);
+		else
+			rc = kernel_getpeername(tcp_sw_conn->sock,
+						(struct sockaddr *)&addr, &len);
 		spin_unlock_bh(&conn->session->frwd_lock);
 		if (rc)
 			return rc;
@@ -895,6 +900,7 @@
 		case ISCSI_PARAM_DATADGST_EN:
 		case ISCSI_PARAM_CONN_ADDRESS:
 		case ISCSI_PARAM_CONN_PORT:
+		case ISCSI_PARAM_LOCAL_PORT:
 		case ISCSI_PARAM_EXP_STATSN:
 		case ISCSI_PARAM_PERSISTENT_ADDRESS:
 		case ISCSI_PARAM_PERSISTENT_PORT:
diff --git a/drivers/scsi/libfc/fc_libfc.c b/drivers/scsi/libfc/fc_libfc.c
index 8d65a51a..c11a638 100644
--- a/drivers/scsi/libfc/fc_libfc.c
+++ b/drivers/scsi/libfc/fc_libfc.c
@@ -296,9 +296,9 @@
 	BUG_ON(type >= FC_FC4_PROV_SIZE);
 	mutex_lock(&fc_prov_mutex);
 	if (prov->recv)
-		rcu_assign_pointer(fc_passive_prov[type], NULL);
+		RCU_INIT_POINTER(fc_passive_prov[type], NULL);
 	else
-		rcu_assign_pointer(fc_active_prov[type], NULL);
+		RCU_INIT_POINTER(fc_active_prov[type], NULL);
 	mutex_unlock(&fc_prov_mutex);
 	synchronize_rcu();
 }
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 191b597..0d8bc6c 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -3505,6 +3505,7 @@
 			len = sprintf(buf, "%pI6\n", &sin6->sin6_addr);
 		break;
 	case ISCSI_PARAM_CONN_PORT:
+	case ISCSI_PARAM_LOCAL_PORT:
 		if (sin)
 			len = sprintf(buf, "%hu\n", be16_to_cpu(sin->sin_port));
 		else
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index 6eed9e7..2f9b968 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3385,7 +3385,7 @@
 		if (strlen(buf) > (LPFC_MAX_DATA_CTRL_LEN - 1))
 			return -EINVAL;
 
-		strcpy(bucket_data, buf);
+		strncpy(bucket_data, buf, LPFC_MAX_DATA_CTRL_LEN);
 		str_ptr = &bucket_data[0];
 		/* Ignore this token - this is command token */
 		token = strsep(&str_ptr, "\t ");
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index 5b5c825..a7bf359 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -656,7 +656,6 @@
 	struct lpfc_nodelist *ndlp = rdata->pnode;
 	uint32_t elscmd;
 	uint32_t cmdsize;
-	uint32_t rspsize;
 	struct lpfc_iocbq *cmdiocbq;
 	uint16_t rpi = 0;
 	struct bsg_job_data *dd_data;
@@ -687,7 +686,6 @@
 
 	elscmd = job->request->rqst_data.r_els.els_code;
 	cmdsize = job->request_payload.payload_len;
-	rspsize = job->reply_payload.payload_len;
 
 	if (!lpfc_nlp_get(ndlp)) {
 		rc = -ENODEV;
@@ -2251,7 +2249,6 @@
 	i = 0;
 	while (phba->link_state != LPFC_LINK_DOWN) {
 		if (i++ > timeout) {
-			rc = -ETIMEDOUT;
 			lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
 					"3140 Timeout waiting for link to "
 					"diagnostic mode_end, timeout:%d ms\n",
@@ -2291,7 +2288,6 @@
 	LPFC_MBOXQ_t *pmboxq;
 	struct sli4_link_diag *link_diag_test_cmd;
 	uint32_t req_len, alloc_len;
-	uint32_t timeout;
 	struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
 	union lpfc_sli4_cfg_shdr *shdr;
 	uint32_t shdr_status, shdr_add_status;
@@ -2342,7 +2338,6 @@
 
 	link_diag_test_cmd = (struct sli4_link_diag *)
 			 job->request->rqst_data.h_vendor.vendor_cmd;
-	timeout = link_diag_test_cmd->timeout * 100;
 
 	rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
 
@@ -2693,14 +2688,13 @@
 	INIT_LIST_HEAD(&dmabuf->list);
 
 	/* now, allocate dma buffer */
-	dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
-					  &(dmabuf->phys), GFP_KERNEL);
+	dmabuf->virt = dma_zalloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
+					   &(dmabuf->phys), GFP_KERNEL);
 
 	if (!dmabuf->virt) {
 		kfree(dmabuf);
 		return NULL;
 	}
-	memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
 
 	return dmabuf;
 }
@@ -2828,8 +2822,10 @@
 		size -= cnt;
 	}
 
-	mlist->flag = i;
-	return mlist;
+	if (mlist) {
+		mlist->flag = i;
+		return mlist;
+	}
 out:
 	diag_cmd_data_free(phba, mlist);
 	return NULL;
@@ -3344,7 +3340,7 @@
  * will wake up thread waiting on the wait queue pointed by context1
  * of the mailbox.
  **/
-void
+static void
 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
 {
 	struct bsg_job_data *dd_data;
@@ -4593,7 +4589,7 @@
  * being reset) and com-plete the job, otherwise issue the mailbox command and
  * let our completion handler finish the command.
  **/
-static uint32_t
+static int
 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
 	struct lpfc_vport *vport)
 {
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index db5604f..00665a5 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -451,7 +451,6 @@
 int lpfc_set_rrq_active(struct lpfc_hba *, struct lpfc_nodelist *,
 	uint16_t, uint16_t, uint16_t);
 uint16_t lpfc_sli4_xri_inrange(struct lpfc_hba *, uint16_t);
-void lpfc_cleanup_wt_rrqs(struct lpfc_hba *);
 void lpfc_cleanup_vports_rrqs(struct lpfc_vport *, struct lpfc_nodelist *);
 struct lpfc_node_rrq *lpfc_get_active_rrq(struct lpfc_vport *, uint16_t,
 	uint32_t);
diff --git a/drivers/scsi/lpfc/lpfc_ct.c b/drivers/scsi/lpfc/lpfc_ct.c
index da61d8d..61a32cd 100644
--- a/drivers/scsi/lpfc/lpfc_ct.c
+++ b/drivers/scsi/lpfc/lpfc_ct.c
@@ -1439,7 +1439,7 @@
 			/* #2 HBA attribute entry */
 			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
 			ae->ad.bits.AttrType = be16_to_cpu(MANUFACTURER);
-			strcpy(ae->un.Manufacturer, "Emulex Corporation");
+			strncpy(ae->un.Manufacturer, "Emulex Corporation", 64);
 			len = strlen(ae->un.Manufacturer);
 			len += (len & 3) ? (4 - (len & 3)) : 4;
 			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
@@ -1449,7 +1449,7 @@
 			/* #3 HBA attribute entry */
 			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
 			ae->ad.bits.AttrType = be16_to_cpu(SERIAL_NUMBER);
-			strcpy(ae->un.SerialNumber, phba->SerialNumber);
+			strncpy(ae->un.SerialNumber, phba->SerialNumber, 64);
 			len = strlen(ae->un.SerialNumber);
 			len += (len & 3) ? (4 - (len & 3)) : 4;
 			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
@@ -1459,7 +1459,7 @@
 			/* #4 HBA attribute entry */
 			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
 			ae->ad.bits.AttrType = be16_to_cpu(MODEL);
-			strcpy(ae->un.Model, phba->ModelName);
+			strncpy(ae->un.Model, phba->ModelName, 256);
 			len = strlen(ae->un.Model);
 			len += (len & 3) ? (4 - (len & 3)) : 4;
 			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
@@ -1469,7 +1469,7 @@
 			/* #5 HBA attribute entry */
 			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
 			ae->ad.bits.AttrType = be16_to_cpu(MODEL_DESCRIPTION);
-			strcpy(ae->un.ModelDescription, phba->ModelDesc);
+			strncpy(ae->un.ModelDescription, phba->ModelDesc, 256);
 			len = strlen(ae->un.ModelDescription);
 			len += (len & 3) ? (4 - (len & 3)) : 4;
 			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
@@ -1500,7 +1500,8 @@
 			/* #7 HBA attribute entry */
 			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
 			ae->ad.bits.AttrType = be16_to_cpu(DRIVER_VERSION);
-			strcpy(ae->un.DriverVersion, lpfc_release_version);
+			strncpy(ae->un.DriverVersion,
+				lpfc_release_version, 256);
 			len = strlen(ae->un.DriverVersion);
 			len += (len & 3) ? (4 - (len & 3)) : 4;
 			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
@@ -1510,7 +1511,8 @@
 			/* #8 HBA attribute entry */
 			ae = (ATTRIBUTE_ENTRY *) ((uint8_t *) rh + size);
 			ae->ad.bits.AttrType = be16_to_cpu(OPTION_ROM_VERSION);
-			strcpy(ae->un.OptionROMVersion, phba->OptionROMVersion);
+			strncpy(ae->un.OptionROMVersion,
+				phba->OptionROMVersion, 256);
 			len = strlen(ae->un.OptionROMVersion);
 			len += (len & 3) ? (4 - (len & 3)) : 4;
 			ae->ad.bits.AttrLen = be16_to_cpu(FOURBYTES + len);
diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
index b0aedce..786a2af 100644
--- a/drivers/scsi/lpfc/lpfc_debugfs.c
+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
@@ -269,7 +269,7 @@
 lpfc_debugfs_hbqinfo_data(struct lpfc_hba *phba, char *buf, int size)
 {
 	int len = 0;
-	int cnt, i, j, found, posted, low;
+	int i, j, found, posted, low;
 	uint32_t phys, raw_index, getidx;
 	struct lpfc_hbq_init *hip;
 	struct hbq_s *hbqs;
@@ -279,7 +279,7 @@
 
 	if (phba->sli_rev != 3)
 		return 0;
-	cnt = LPFC_HBQINFO_SIZE;
+
 	spin_lock_irq(&phba->hbalock);
 
 	/* toggle between multiple hbqs, if any */
diff --git a/drivers/scsi/lpfc/lpfc_disc.h b/drivers/scsi/lpfc/lpfc_disc.h
index 1a6fe52..6977027 100644
--- a/drivers/scsi/lpfc/lpfc_disc.h
+++ b/drivers/scsi/lpfc/lpfc_disc.h
@@ -78,7 +78,8 @@
 	struct list_head nlp_listp;
 	struct lpfc_name nlp_portname;
 	struct lpfc_name nlp_nodename;
-	uint32_t         nlp_flag;		/* entry  flags */
+	uint32_t         nlp_flag;		/* entry flags */
+	uint32_t         nlp_add_flag;		/* additional flags */
 	uint32_t         nlp_DID;		/* FC D_ID of entry */
 	uint32_t         nlp_last_elscmd;	/* Last ELS cmd sent */
 	uint16_t         nlp_type;
@@ -157,6 +158,9 @@
 #define NLP_FIRSTBURST     0x40000000	/* Target supports FirstBurst */
 #define NLP_RPI_REGISTERED 0x80000000	/* nlp_rpi is valid */
 
+/* Defines for nlp_add_flag (uint32) */
+#define NLP_IN_DEV_LOSS  0x00000001	/* Dev Loss processing in progress */
+
 /* ndlp usage management macros */
 #define NLP_CHK_NODE_ACT(ndlp)		(((ndlp)->nlp_usg_map \
 						& NLP_USG_NODE_ACT_BIT) \
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 7a5d81a..4c25485 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -1084,7 +1084,8 @@
 	 * accessing it.
 	 */
 	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
-
+	if (!prsp)
+		goto out;
 	sp = prsp->virt + sizeof(uint32_t);
 
 	/* FLOGI completes successfully */
@@ -1828,7 +1829,7 @@
 	IOCB_t *irsp;
 	struct lpfc_nodelist *ndlp;
 	struct lpfc_dmabuf *prsp;
-	int disc, rc, did, type;
+	int disc, rc;
 
 	/* we pass cmdiocb to state machine which needs rspiocb as well */
 	cmdiocb->context_un.rsp_iocb = rspiocb;
@@ -1873,10 +1874,6 @@
 		goto out;
 	}
 
-	/* ndlp could be freed in DSM, save these values now */
-	type = ndlp->nlp_type;
-	did = ndlp->nlp_DID;
-
 	if (irsp->ulpStatus) {
 		/* Check for retry */
 		if (lpfc_els_retry(phba, cmdiocb, rspiocb)) {
@@ -2269,8 +2266,6 @@
 void
 lpfc_more_adisc(struct lpfc_vport *vport)
 {
-	int sentadisc;
-
 	if (vport->num_disc_nodes)
 		vport->num_disc_nodes--;
 	/* Continue discovery with <num_disc_nodes> ADISCs to go */
@@ -2283,7 +2278,7 @@
 	if (vport->fc_flag & FC_NLP_MORE) {
 		lpfc_set_disctmo(vport);
 		/* go thru NPR nodes and issue any remaining ELS ADISCs */
-		sentadisc = lpfc_els_disc_adisc(vport);
+		lpfc_els_disc_adisc(vport);
 	}
 	if (!vport->num_disc_nodes)
 		lpfc_adisc_done(vport);
@@ -3027,10 +3022,9 @@
 {
 	struct lpfc_vport *vport = ndlp->vport;
 	struct Scsi_Host  *shost = lpfc_shost_from_vport(vport);
-	uint32_t cmd, did, retry;
+	uint32_t cmd, retry;
 
 	spin_lock_irq(shost->host_lock);
-	did = ndlp->nlp_DID;
 	cmd = ndlp->nlp_last_elscmd;
 	ndlp->nlp_last_elscmd = 0;
 
@@ -5288,10 +5282,9 @@
 	IOCB_t *icmd;
 	RNID *rn;
 	struct ls_rjt stat;
-	uint32_t cmd, did;
+	uint32_t cmd;
 
 	icmd = &cmdiocb->iocb;
-	did = icmd->un.elsreq64.remoteID;
 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
 	lp = (uint32_t *) pcmd->virt;
 
@@ -6693,6 +6686,13 @@
 
 	phba->fc_stat.elsRcvFrame++;
 
+	/*
+	 * Do not process any unsolicited ELS commands
+	 * if the ndlp is in DEV_LOSS
+	 */
+	if (ndlp->nlp_add_flag & NLP_IN_DEV_LOSS)
+		goto dropit;
+
 	elsiocb->context1 = lpfc_nlp_get(ndlp);
 	elsiocb->vport = vport;
 
@@ -7514,6 +7514,8 @@
 	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
 	lpfc_vport_set_state(vport, FC_VPORT_ACTIVE);
 	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+	if (!prsp)
+		goto out;
 	sp = prsp->virt + sizeof(uint32_t);
 	fabric_param_changed = lpfc_check_clean_addr_bit(vport, sp);
 	memcpy(&vport->fabric_portname, &sp->portName,
@@ -8187,9 +8189,11 @@
 			list_del(&sglq_entry->list);
 			ndlp = sglq_entry->ndlp;
 			sglq_entry->ndlp = NULL;
+			spin_lock(&pring->ring_lock);
 			list_add_tail(&sglq_entry->list,
 				&phba->sli4_hba.lpfc_sgl_list);
 			sglq_entry->state = SGL_FREED;
+			spin_unlock(&pring->ring_lock);
 			spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
 			spin_unlock_irqrestore(&phba->hbalock, iflag);
 			lpfc_set_rrq_active(phba, ndlp,
@@ -8208,12 +8212,15 @@
 		spin_unlock_irqrestore(&phba->hbalock, iflag);
 		return;
 	}
+	spin_lock(&pring->ring_lock);
 	sglq_entry = __lpfc_get_active_sglq(phba, lxri);
 	if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
+		spin_unlock(&pring->ring_lock);
 		spin_unlock_irqrestore(&phba->hbalock, iflag);
 		return;
 	}
 	sglq_entry->state = SGL_XRI_ABORTED;
+	spin_unlock(&pring->ring_lock);
 	spin_unlock_irqrestore(&phba->hbalock, iflag);
 	return;
 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2a17e31..5452f1f 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -150,9 +150,30 @@
 
 		/* If the WWPN of the rport and ndlp don't match, ignore it */
 		if (rport->port_name != wwn_to_u64(ndlp->nlp_portname.u.wwn)) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_NODE,
+				"6789 rport name %lx != node port name %lx",
+				(unsigned long)rport->port_name,
+				(unsigned long)wwn_to_u64(
+						ndlp->nlp_portname.u.wwn));
+			put_node = rdata->pnode != NULL;
+			put_rport = ndlp->rport != NULL;
+			rdata->pnode = NULL;
+			ndlp->rport = NULL;
+			if (put_node)
+				lpfc_nlp_put(ndlp);
 			put_device(&rport->dev);
 			return;
 		}
+
+		put_node = rdata->pnode != NULL;
+		put_rport = ndlp->rport != NULL;
+		rdata->pnode = NULL;
+		ndlp->rport = NULL;
+		if (put_node)
+			lpfc_nlp_put(ndlp);
+		if (put_rport)
+			put_device(&rport->dev);
+		return;
 	}
 
 	evtp = &ndlp->dev_loss_evt;
@@ -161,6 +182,7 @@
 		return;
 
 	evtp->evt_arg1  = lpfc_nlp_get(ndlp);
+	ndlp->nlp_add_flag |= NLP_IN_DEV_LOSS;
 
 	spin_lock_irq(&phba->hbalock);
 	/* We need to hold the node by incrementing the reference
@@ -201,8 +223,10 @@
 
 	rport = ndlp->rport;
 
-	if (!rport)
+	if (!rport) {
+		ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
 		return fcf_inuse;
+	}
 
 	rdata = rport->dd_data;
 	name = (uint8_t *) &ndlp->nlp_portname;
@@ -235,6 +259,7 @@
 		put_rport = ndlp->rport != NULL;
 		rdata->pnode = NULL;
 		ndlp->rport = NULL;
+		ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
 		if (put_node)
 			lpfc_nlp_put(ndlp);
 		if (put_rport)
@@ -250,6 +275,7 @@
 				 *name, *(name+1), *(name+2), *(name+3),
 				 *(name+4), *(name+5), *(name+6), *(name+7),
 				 ndlp->nlp_DID);
+		ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
 		return fcf_inuse;
 	}
 
@@ -259,6 +285,7 @@
 		put_rport = ndlp->rport != NULL;
 		rdata->pnode = NULL;
 		ndlp->rport = NULL;
+		ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
 		if (put_node)
 			lpfc_nlp_put(ndlp);
 		if (put_rport)
@@ -269,6 +296,7 @@
 	if (ndlp->nlp_sid != NLP_NO_SID) {
 		warn_on = 1;
 		/* flush the target */
+		ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
 		lpfc_sli_abort_iocb(vport, &phba->sli.ring[phba->sli.fcp_ring],
 				    ndlp->nlp_sid, 0, LPFC_CTX_TGT);
 	}
@@ -297,6 +325,7 @@
 	put_rport = ndlp->rport != NULL;
 	rdata->pnode = NULL;
 	ndlp->rport = NULL;
+	ndlp->nlp_add_flag &= ~NLP_IN_DEV_LOSS;
 	if (put_node)
 		lpfc_nlp_put(ndlp);
 	if (put_rport)
@@ -995,7 +1024,6 @@
 	struct lpfc_vport **vports;
 	int i;
 
-	lpfc_cleanup_wt_rrqs(phba);
 	phba->link_state = LPFC_LINK_UP;
 
 	/* Unblock fabric iocbs if they are blocked */
@@ -2042,7 +2070,8 @@
  * returns:
  * 0=success 1=failure
  **/
-int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba, uint16_t fcf_index,
+static int lpfc_sli4_fcf_pri_list_add(struct lpfc_hba *phba,
+	uint16_t fcf_index,
 	struct fcf_record *new_fcf_record)
 {
 	uint16_t current_fcf_pri;
@@ -2146,7 +2175,6 @@
 	uint16_t fcf_index, next_fcf_index;
 	struct lpfc_fcf_rec *fcf_rec = NULL;
 	uint16_t vlan_id;
-	uint32_t seed;
 	bool select_new_fcf;
 	int rc;
 
@@ -2383,9 +2411,6 @@
 		phba->fcf.fcf_flag |= FCF_AVAILABLE;
 		/* Setup initial running random FCF selection count */
 		phba->fcf.eligible_fcf_cnt = 1;
-		/* Seeding the random number generator for random selection */
-		seed = (uint32_t)(0xFFFFFFFF & jiffies);
-		prandom_seed(seed);
 	}
 	spin_unlock_irq(&phba->hbalock);
 	goto read_next_fcf;
@@ -2678,7 +2703,7 @@
  *
  * This function handles completion of init vfi mailbox command.
  */
-void
+static void
 lpfc_init_vfi_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
 {
 	struct lpfc_vport *vport = mboxq->vport;
@@ -4438,7 +4463,7 @@
  * This function will issue an ELS LOGO command after completing
  * the UNREG_RPI.
  **/
-void
+static void
 lpfc_nlp_logo_unreg(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
 {
 	struct lpfc_vport  *vport = pmb->vport;
@@ -5006,7 +5031,6 @@
 	struct lpfc_hba  *phba = vport->phba;
 	uint32_t num_sent;
 	uint32_t clear_la_pending;
-	int did_changed;
 
 	if (!lpfc_is_link_up(phba)) {
 		lpfc_printf_vlog(vport, KERN_INFO, LOG_SLI,
@@ -5025,11 +5049,6 @@
 
 	lpfc_set_disctmo(vport);
 
-	if (vport->fc_prevDID == vport->fc_myDID)
-		did_changed = 0;
-	else
-		did_changed = 1;
-
 	vport->fc_prevDID = vport->fc_myDID;
 	vport->num_disc_nodes = 0;
 
@@ -6318,7 +6337,7 @@
 		uint8_t *buff,
 		uint32_t size)
 {
-	uint32_t offset = 0, rec_length;
+	uint32_t offset = 0;
 	uint8_t *rec_ptr;
 
 	/*
@@ -6345,8 +6364,6 @@
 	}
 	offset += 4;
 
-	rec_length = buff[offset + 1];
-
 	/* Read FCoE param record */
 	rec_ptr = lpfc_get_rec_conf23(&buff[offset],
 			size - offset, FCOE_PARAM_TYPE);
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index a5769a9..0b2c53a 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -306,10 +306,10 @@
 		dist = dist_char[prg->dist];
 
 	if ((prg->dist == 3) && (prg->num == 0))
-		sprintf(phba->OptionROMVersion, "%d.%d%d",
+		snprintf(phba->OptionROMVersion, 32, "%d.%d%d",
 			prg->ver, prg->rev, prg->lev);
 	else
-		sprintf(phba->OptionROMVersion, "%d.%d%d%c%d",
+		snprintf(phba->OptionROMVersion, 32, "%d.%d%d%c%d",
 			prg->ver, prg->rev, prg->lev,
 			dist, prg->num);
 	mempool_free(pmboxq, phba->mbox_mem_pool);
@@ -649,7 +649,7 @@
  *		0 - success
  *		Any other value - error
  **/
-int
+static int
 lpfc_hba_init_link(struct lpfc_hba *phba, uint32_t flag)
 {
 	return lpfc_hba_init_link_fc_topology(phba, phba->cfg_topology, flag);
@@ -750,7 +750,7 @@
  *		0 - success
  *		Any other value - error
  **/
-int
+static int
 lpfc_hba_down_link(struct lpfc_hba *phba, uint32_t flag)
 {
 	LPFC_MBOXQ_t *pmb;
@@ -988,9 +988,12 @@
 	LIST_HEAD(aborts);
 	unsigned long iflag = 0;
 	struct lpfc_sglq *sglq_entry = NULL;
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring;
 
 	lpfc_hba_free_post_buf(phba);
 	lpfc_hba_clean_txcmplq(phba);
+	pring = &psli->ring[LPFC_ELS_RING];
 
 	/* At this point in time the HBA is either reset or DOA. Either
 	 * way, nothing should be on lpfc_abts_els_sgl_list, it needs to be
@@ -1008,8 +1011,10 @@
 		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
 		sglq_entry->state = SGL_FREED;
 
+	spin_lock(&pring->ring_lock);
 	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
 			&phba->sli4_hba.lpfc_sgl_list);
+	spin_unlock(&pring->ring_lock);
 	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
 	/* abts_scsi_buf_list_lock required because worker thread uses this
 	 * list.
@@ -3047,6 +3052,7 @@
 	LIST_HEAD(els_sgl_list);
 	LIST_HEAD(scsi_sgl_list);
 	int rc;
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 
 	/*
 	 * update on pci function's els xri-sgl list
@@ -3087,7 +3093,9 @@
 			list_add_tail(&sglq_entry->list, &els_sgl_list);
 		}
 		spin_lock_irq(&phba->hbalock);
+		spin_lock(&pring->ring_lock);
 		list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
+		spin_unlock(&pring->ring_lock);
 		spin_unlock_irq(&phba->hbalock);
 	} else if (els_xri_cnt < phba->sli4_hba.els_xri_cnt) {
 		/* els xri-sgl shrinked */
@@ -3097,7 +3105,9 @@
 				"%d to %d\n", phba->sli4_hba.els_xri_cnt,
 				els_xri_cnt);
 		spin_lock_irq(&phba->hbalock);
+		spin_lock(&pring->ring_lock);
 		list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &els_sgl_list);
+		spin_unlock(&pring->ring_lock);
 		spin_unlock_irq(&phba->hbalock);
 		/* release extra els sgls from list */
 		for (i = 0; i < xri_cnt; i++) {
@@ -3110,7 +3120,9 @@
 			}
 		}
 		spin_lock_irq(&phba->hbalock);
+		spin_lock(&pring->ring_lock);
 		list_splice_init(&els_sgl_list, &phba->sli4_hba.lpfc_sgl_list);
+		spin_unlock(&pring->ring_lock);
 		spin_unlock_irq(&phba->hbalock);
 	} else
 		lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
@@ -3165,9 +3177,11 @@
 		for (i = 0; i < scsi_xri_cnt; i++) {
 			list_remove_head(&scsi_sgl_list, psb,
 					 struct lpfc_scsi_buf, list);
-			pci_pool_free(phba->lpfc_scsi_dma_buf_pool, psb->data,
-				      psb->dma_handle);
-			kfree(psb);
+			if (psb) {
+				pci_pool_free(phba->lpfc_scsi_dma_buf_pool,
+					      psb->data, psb->dma_handle);
+				kfree(psb);
+			}
 		}
 		spin_lock_irq(&phba->scsi_buf_list_get_lock);
 		phba->sli4_hba.scsi_xri_cnt -= scsi_xri_cnt;
@@ -3550,7 +3564,7 @@
  * list, and then worker thread shall be waked up for processing from the
  * worker thread context.
  **/
-void
+static void
 lpfc_sli4_fcf_redisc_wait_tmo(unsigned long ptr)
 {
 	struct lpfc_hba *phba = (struct lpfc_hba *)ptr;
@@ -5680,10 +5694,13 @@
 lpfc_free_els_sgl_list(struct lpfc_hba *phba)
 {
 	LIST_HEAD(sglq_list);
+	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 
 	/* Retrieve all els sgls from driver list */
 	spin_lock_irq(&phba->hbalock);
+	spin_lock(&pring->ring_lock);
 	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &sglq_list);
+	spin_unlock(&pring->ring_lock);
 	spin_unlock_irq(&phba->hbalock);
 
 	/* Now free the sgl list */
@@ -5848,16 +5865,14 @@
 	if (!dmabuf)
 		return NULL;
 
-	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
-					  LPFC_HDR_TEMPLATE_SIZE,
-					  &dmabuf->phys,
-					  GFP_KERNEL);
+	dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
+					   LPFC_HDR_TEMPLATE_SIZE,
+					   &dmabuf->phys, GFP_KERNEL);
 	if (!dmabuf->virt) {
 		rpi_hdr = NULL;
 		goto err_free_dmabuf;
 	}
 
-	memset(dmabuf->virt, 0, LPFC_HDR_TEMPLATE_SIZE);
 	if (!IS_ALIGNED(dmabuf->phys, LPFC_HDR_TEMPLATE_SIZE)) {
 		rpi_hdr = NULL;
 		goto err_free_coherent;
@@ -6246,14 +6261,11 @@
 	}
 
 	/* Allocate memory for SLI-2 structures */
-	phba->slim2p.virt = dma_alloc_coherent(&pdev->dev,
-					       SLI2_SLIM_SIZE,
-					       &phba->slim2p.phys,
-					       GFP_KERNEL);
+	phba->slim2p.virt = dma_zalloc_coherent(&pdev->dev, SLI2_SLIM_SIZE,
+						&phba->slim2p.phys, GFP_KERNEL);
 	if (!phba->slim2p.virt)
 		goto out_iounmap;
 
-	memset(phba->slim2p.virt, 0, SLI2_SLIM_SIZE);
 	phba->mbox = phba->slim2p.virt + offsetof(struct lpfc_sli2_slim, mbx);
 	phba->mbox_ext = (phba->slim2p.virt +
 		offsetof(struct lpfc_sli2_slim, mbx_ext_words));
@@ -6618,15 +6630,12 @@
 	 * plus an alignment restriction of 16 bytes.
 	 */
 	bmbx_size = sizeof(struct lpfc_bmbx_create) + (LPFC_ALIGN_16_BYTE - 1);
-	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
-					  bmbx_size,
-					  &dmabuf->phys,
-					  GFP_KERNEL);
+	dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, bmbx_size,
+					   &dmabuf->phys, GFP_KERNEL);
 	if (!dmabuf->virt) {
 		kfree(dmabuf);
 		return -ENOMEM;
 	}
-	memset(dmabuf->virt, 0, bmbx_size);
 
 	/*
 	 * Initialize the bootstrap mailbox pointers now so that the register
@@ -6710,7 +6719,6 @@
 	struct lpfc_mbx_get_func_cfg *get_func_cfg;
 	struct lpfc_rsrc_desc_fcfcoe *desc;
 	char *pdesc_0;
-	uint32_t desc_count;
 	int length, i, rc = 0, rc2;
 
 	pmb = (LPFC_MBOXQ_t *) mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
@@ -6841,7 +6849,6 @@
 
 	/* search for fc_fcoe resrouce descriptor */
 	get_func_cfg = &pmb->u.mqe.un.get_func_cfg;
-	desc_count = get_func_cfg->func_cfg.rsrc_desc_count;
 
 	pdesc_0 = (char *)&get_func_cfg->func_cfg.desc[0];
 	desc = (struct lpfc_rsrc_desc_fcfcoe *)pdesc_0;
@@ -7417,7 +7424,8 @@
 		if (rc) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"0523 Failed setup of fast-path EQ "
-					"(%d), rc = 0x%x\n", fcp_eqidx, rc);
+					"(%d), rc = 0x%x\n", fcp_eqidx,
+					(uint32_t)rc);
 			goto out_destroy_hba_eq;
 		}
 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -7448,7 +7456,8 @@
 		if (rc) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"0527 Failed setup of fast-path FCP "
-					"CQ (%d), rc = 0x%x\n", fcp_cqidx, rc);
+					"CQ (%d), rc = 0x%x\n", fcp_cqidx,
+					(uint32_t)rc);
 			goto out_destroy_fcp_cq;
 		}
 
@@ -7488,7 +7497,8 @@
 		if (rc) {
 			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 					"0535 Failed setup of fast-path FCP "
-					"WQ (%d), rc = 0x%x\n", fcp_wqidx, rc);
+					"WQ (%d), rc = 0x%x\n", fcp_wqidx,
+					(uint32_t)rc);
 			goto out_destroy_fcp_wq;
 		}
 
@@ -7521,7 +7531,7 @@
 	if (rc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0529 Failed setup of slow-path mailbox CQ: "
-				"rc = 0x%x\n", rc);
+				"rc = 0x%x\n", (uint32_t)rc);
 		goto out_destroy_fcp_wq;
 	}
 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -7541,7 +7551,7 @@
 	if (rc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0531 Failed setup of slow-path ELS CQ: "
-				"rc = 0x%x\n", rc);
+				"rc = 0x%x\n", (uint32_t)rc);
 		goto out_destroy_mbx_cq;
 	}
 	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
@@ -7585,7 +7595,7 @@
 	if (rc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0537 Failed setup of slow-path ELS WQ: "
-				"rc = 0x%x\n", rc);
+				"rc = 0x%x\n", (uint32_t)rc);
 		goto out_destroy_mbx_wq;
 	}
 
@@ -7617,7 +7627,7 @@
 	if (rc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"0541 Failed setup of Receive Queue: "
-				"rc = 0x%x\n", rc);
+				"rc = 0x%x\n", (uint32_t)rc);
 		goto out_destroy_fcp_wq;
 	}
 
@@ -7896,7 +7906,8 @@
 	LPFC_MBOXQ_t *mboxq;
 	uint32_t rc = 0, if_type;
 	uint32_t shdr_status, shdr_add_status;
-	uint32_t rdy_chk, num_resets = 0, reset_again = 0;
+	uint32_t rdy_chk;
+	uint32_t port_reset = 0;
 	union lpfc_sli4_cfg_shdr *shdr;
 	struct lpfc_register reg_data;
 	uint16_t devid;
@@ -7936,9 +7947,42 @@
 		}
 		break;
 	case LPFC_SLI_INTF_IF_TYPE_2:
-		for (num_resets = 0;
-		     num_resets < MAX_IF_TYPE_2_RESETS;
-		     num_resets++) {
+wait:
+		/*
+		 * Poll the Port Status Register and wait for RDY for
+		 * up to 30 seconds. If the port doesn't respond, treat
+		 * it as an error.
+		 */
+		for (rdy_chk = 0; rdy_chk < 3000; rdy_chk++) {
+			if (lpfc_readl(phba->sli4_hba.u.if_type2.
+				STATUSregaddr, &reg_data.word0)) {
+				rc = -ENODEV;
+				goto out;
+			}
+			if (bf_get(lpfc_sliport_status_rdy, &reg_data))
+				break;
+			msleep(20);
+		}
+
+		if (!bf_get(lpfc_sliport_status_rdy, &reg_data)) {
+			phba->work_status[0] = readl(
+				phba->sli4_hba.u.if_type2.ERR1regaddr);
+			phba->work_status[1] = readl(
+				phba->sli4_hba.u.if_type2.ERR2regaddr);
+			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+					"2890 Port not ready, port status reg "
+					"0x%x error 1=0x%x, error 2=0x%x\n",
+					reg_data.word0,
+					phba->work_status[0],
+					phba->work_status[1]);
+			rc = -ENODEV;
+			goto out;
+		}
+
+		if (!port_reset) {
+			/*
+			 * Reset the port now
+			 */
 			reg_data.word0 = 0;
 			bf_set(lpfc_sliport_ctrl_end, &reg_data,
 			       LPFC_SLIPORT_LITTLE_ENDIAN);
@@ -7949,64 +7993,16 @@
 			/* flush */
 			pci_read_config_word(phba->pcidev,
 					     PCI_DEVICE_ID, &devid);
-			/*
-			 * Poll the Port Status Register and wait for RDY for
-			 * up to 10 seconds.  If the port doesn't respond, treat
-			 * it as an error.  If the port responds with RN, start
-			 * the loop again.
-			 */
-			for (rdy_chk = 0; rdy_chk < 1000; rdy_chk++) {
-				msleep(10);
-				if (lpfc_readl(phba->sli4_hba.u.if_type2.
-					      STATUSregaddr, &reg_data.word0)) {
-					rc = -ENODEV;
-					goto out;
-				}
-				if (bf_get(lpfc_sliport_status_rn, &reg_data))
-					reset_again++;
-				if (bf_get(lpfc_sliport_status_rdy, &reg_data))
-					break;
-			}
 
-			/*
-			 * If the port responds to the init request with
-			 * reset needed, delay for a bit and restart the loop.
-			 */
-			if (reset_again && (rdy_chk < 1000)) {
-				msleep(10);
-				reset_again = 0;
-				continue;
-			}
-
-			/* Detect any port errors. */
-			if ((bf_get(lpfc_sliport_status_err, &reg_data)) ||
-			    (rdy_chk >= 1000)) {
-				phba->work_status[0] = readl(
-					phba->sli4_hba.u.if_type2.ERR1regaddr);
-				phba->work_status[1] = readl(
-					phba->sli4_hba.u.if_type2.ERR2regaddr);
-				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
-					"2890 Port error detected during port "
-					"reset(%d): wait_tmo:%d ms, "
-					"port status reg 0x%x, "
-					"error 1=0x%x, error 2=0x%x\n",
-					num_resets, rdy_chk*10,
-					reg_data.word0,
-					phba->work_status[0],
-					phba->work_status[1]);
-				rc = -ENODEV;
-			}
-
-			/*
-			 * Terminate the outer loop provided the Port indicated
-			 * ready within 10 seconds.
-			 */
-			if (rdy_chk < 1000)
-				break;
+			port_reset = 1;
+			msleep(20);
+			goto wait;
+		} else if (bf_get(lpfc_sliport_status_rn, &reg_data)) {
+			rc = -ENODEV;
+			goto out;
 		}
-		/* delay driver action following IF_TYPE_2 function reset */
-		msleep(100);
 		break;
+
 	case LPFC_SLI_INTF_IF_TYPE_1:
 	default:
 		break;
@@ -8014,11 +8010,10 @@
 
 out:
 	/* Catch the not-ready port failure after a port reset. */
-	if (num_resets >= MAX_IF_TYPE_2_RESETS) {
+	if (rc) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"3317 HBA not functional: IP Reset Failed "
-				"after (%d) retries, try: "
-				"echo fw_reset > board_mode\n", num_resets);
+				"try: echo fw_reset > board_mode\n");
 		rc = -ENODEV;
 	}
 
@@ -8211,9 +8206,9 @@
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine is invoked to enable the MSI-X interrupt vectors to device
- * with SLI-3 interface specs. The kernel function pci_enable_msix() is
- * called to enable the MSI-X vectors. Note that pci_enable_msix(), once
- * invoked, enables either all or nothing, depending on the current
+ * with SLI-3 interface specs. The kernel function pci_enable_msix_exact()
+ * is called to enable the MSI-X vectors. Note that pci_enable_msix_exact(),
+ * once invoked, enables either all or nothing, depending on the current
  * availability of PCI vector resources. The device driver is responsible
  * for calling the individual request_irq() to register each MSI-X vector
  * with a interrupt handler, which is done in this function. Note that
@@ -8237,8 +8232,8 @@
 		phba->msix_entries[i].entry = i;
 
 	/* Configure MSI-X capability structure */
-	rc = pci_enable_msix(phba->pcidev, phba->msix_entries,
-				ARRAY_SIZE(phba->msix_entries));
+	rc = pci_enable_msix_exact(phba->pcidev, phba->msix_entries,
+				   LPFC_MSIX_VECTORS);
 	if (rc) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 				"0420 PCI enable MSI-X failed (%d)\n", rc);
@@ -8775,16 +8770,14 @@
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine is invoked to enable the MSI-X interrupt vectors to device
- * with SLI-4 interface spec. The kernel function pci_enable_msix() is called
- * to enable the MSI-X vectors. Note that pci_enable_msix(), once invoked,
- * enables either all or nothing, depending on the current availability of
- * PCI vector resources. The device driver is responsible for calling the
- * individual request_irq() to register each MSI-X vector with a interrupt
- * handler, which is done in this function. Note that later when device is
- * unloading, the driver should always call free_irq() on all MSI-X vectors
- * it has done request_irq() on before calling pci_disable_msix(). Failure
- * to do so results in a BUG_ON() and a device will be left with MSI-X
- * enabled and leaks its vectors.
+ * with SLI-4 interface spec. The kernel function pci_enable_msix_range()
+ * is called to enable the MSI-X vectors. The device driver is responsible
+ * for calling the individual request_irq() to register each MSI-X vector
+ * with a interrupt handler, which is done in this function. Note that
+ * later when device is unloading, the driver should always call free_irq()
+ * on all MSI-X vectors it has done request_irq() on before calling
+ * pci_disable_msix(). Failure to do so results in a BUG_ON() and a device
+ * will be left with MSI-X enabled and leaks its vectors.
  *
  * Return codes
  * 0 - successful
@@ -8805,17 +8798,14 @@
 		phba->sli4_hba.msix_entries[index].entry = index;
 		vectors++;
 	}
-enable_msix_vectors:
-	rc = pci_enable_msix(phba->pcidev, phba->sli4_hba.msix_entries,
-			     vectors);
-	if (rc > 1) {
-		vectors = rc;
-		goto enable_msix_vectors;
-	} else if (rc) {
+	rc = pci_enable_msix_range(phba->pcidev, phba->sli4_hba.msix_entries,
+				   2, vectors);
+	if (rc < 0) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
 				"0484 PCI enable MSI-X failed (%d)\n", rc);
 		goto vec_fail_out;
 	}
+	vectors = rc;
 
 	/* Log MSI-X vector assignment */
 	for (index = 0; index < vectors; index++)
@@ -8828,7 +8818,8 @@
 	/* Assign MSI-X vectors to interrupt handlers */
 	for (index = 0; index < vectors; index++) {
 		memset(&phba->sli4_hba.handler_name[index], 0, 16);
-		sprintf((char *)&phba->sli4_hba.handler_name[index],
+		snprintf((char *)&phba->sli4_hba.handler_name[index],
+			 LPFC_SLI4_HANDLER_NAME_SZ,
 			 LPFC_DRIVER_HANDLER_NAME"%d", index);
 
 		phba->sli4_hba.fcp_eq_hdl[index].idx = index;
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 1f292e2..06241f5 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1811,12 +1811,12 @@
 		 * page, this is used as a priori size of SLI4_PAGE_SIZE for
 		 * the later DMA memory free.
 		 */
-		viraddr = dma_alloc_coherent(&phba->pcidev->dev, SLI4_PAGE_SIZE,
-					     &phyaddr, GFP_KERNEL);
+		viraddr = dma_zalloc_coherent(&phba->pcidev->dev,
+					      SLI4_PAGE_SIZE, &phyaddr,
+					      GFP_KERNEL);
 		/* In case of malloc fails, proceed with whatever we have */
 		if (!viraddr)
 			break;
-		memset(viraddr, 0, SLI4_PAGE_SIZE);
 		mbox->sge_array->addr[pagen] = viraddr;
 		/* Keep the first page for later sub-header construction */
 		if (pagen == 0)
diff --git a/drivers/scsi/lpfc/lpfc_nportdisc.c b/drivers/scsi/lpfc/lpfc_nportdisc.c
index c342f6a..5cc1103 100644
--- a/drivers/scsi/lpfc/lpfc_nportdisc.c
+++ b/drivers/scsi/lpfc/lpfc_nportdisc.c
@@ -1031,6 +1031,8 @@
 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
 
 	prsp = list_get_first(&pcmd->list, struct lpfc_dmabuf, list);
+	if (!prsp)
+		goto out;
 
 	lp = (uint32_t *) prsp->virt;
 	sp = (struct serv_parm *) ((uint8_t *) lp + sizeof (uint32_t));
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 7862c55..b99399f 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -306,7 +306,7 @@
  * depth for a scsi device. This function sets the queue depth to the new
  * value and sends an event out to log the queue depth change.
  **/
-int
+static int
 lpfc_change_queue_depth(struct scsi_device *sdev, int qdepth, int reason)
 {
 	struct lpfc_vport *vport = (struct lpfc_vport *) sdev->host->hostdata;
@@ -380,12 +380,14 @@
 {
 	unsigned long flags;
 	uint32_t evt_posted;
+	unsigned long expires;
 
 	spin_lock_irqsave(&phba->hbalock, flags);
 	atomic_inc(&phba->num_rsrc_err);
 	phba->last_rsrc_error_time = jiffies;
 
-	if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
+	expires = phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL;
+	if (time_after(expires, jiffies)) {
 		spin_unlock_irqrestore(&phba->hbalock, flags);
 		return;
 	}
@@ -741,7 +743,7 @@
  *
  * Returns: 0 = failure, non-zero number of successfully posted buffers.
  **/
-int
+static int
 lpfc_sli4_post_scsi_sgl_list(struct lpfc_hba *phba,
 			     struct list_head *post_sblist, int sb_count)
 {
@@ -2965,7 +2967,7 @@
  * on the specified data using a CRC algorithmn
  * using crc_t10dif.
  */
-uint16_t
+static uint16_t
 lpfc_bg_crc(uint8_t *data, int count)
 {
 	uint16_t crc = 0;
@@ -2981,7 +2983,7 @@
  * on the specified data using a CSUM algorithmn
  * using ip_compute_csum.
  */
-uint16_t
+static uint16_t
 lpfc_bg_csum(uint8_t *data, int count)
 {
 	uint16_t ret;
@@ -2994,7 +2996,7 @@
  * This function examines the protection data to try to determine
  * what type of T10-DIF error occurred.
  */
-void
+static void
 lpfc_calc_bg_err(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
 {
 	struct scatterlist *sgpe; /* s/g prot entry */
@@ -3464,7 +3466,7 @@
 	 */
 	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
 		scsi_cmnd->device->hostdata)->oas_enabled)
-		lpfc_cmd->cur_iocbq.iocb_flag |= LPFC_IO_OAS;
+		lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
 	return 0;
 }
 
@@ -3604,6 +3606,14 @@
 	 */
 	iocb_cmd->un.fcpi.fcpi_parm = fcpdl;
 
+	/*
+	 * If the OAS driver feature is enabled and the lun is enabled for
+	 * OAS, set the oas iocb related flags.
+	 */
+	if ((phba->cfg_fof) && ((struct lpfc_device_data *)
+		scsi_cmnd->device->hostdata)->oas_enabled)
+		lpfc_cmd->cur_iocbq.iocb_flag |= (LPFC_IO_OAS | LPFC_IO_FOF);
+
 	return 0;
 err:
 	if (lpfc_cmd->seg_cnt)
@@ -4874,6 +4884,8 @@
 	/* ABTS WQE must go to the same WQ as the WQE to be aborted */
 	abtsiocb->fcp_wqidx = iocb->fcp_wqidx;
 	abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
+	if (iocb->iocb_flag & LPFC_IO_FOF)
+		abtsiocb->iocb_flag |= LPFC_IO_FOF;
 
 	if (lpfc_is_link_up(phba))
 		icmd->ulpCommand = CMD_ABORT_XRI_CN;
@@ -5327,7 +5339,13 @@
 	if (status == FAILED) {
 		lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 			"0722 Target Reset rport failure: rdata x%p\n", rdata);
-		return FAILED;
+		spin_lock_irq(shost->host_lock);
+		pnode->nlp_flag &= ~NLP_NPR_ADISC;
+		pnode->nlp_fcp_info &= ~NLP_FCP_2_DEVICE;
+		spin_unlock_irq(shost->host_lock);
+		lpfc_reset_flush_io_context(vport, tgt_id, lun_id,
+					  LPFC_CTX_TGT);
+		return FAST_IO_FAIL;
 	}
 
 	scsi_event.event_type = FC_REG_SCSI_EVENT;
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 32ada05..207a43d 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -187,7 +187,6 @@
 {
 	struct lpfc_mqe *temp_mqe;
 	struct lpfc_register doorbell;
-	uint32_t host_index;
 
 	/* sanity check on queue memory */
 	if (unlikely(!q))
@@ -202,7 +201,6 @@
 	q->phba->mbox = (MAILBOX_t *)temp_mqe;
 
 	/* Update the host index before invoking device */
-	host_index = q->host_index;
 	q->host_index = ((q->host_index + 1) % q->entry_count);
 
 	/* Ring Doorbell */
@@ -786,42 +784,6 @@
 }
 
 /**
- * lpfc_cleanup_wt_rrqs - Remove all rrq's from the active list.
- * @phba: Pointer to HBA context object.
- *
- * Remove all rrqs from the phba->active_rrq_list and free them by
- * calling __lpfc_clr_active_rrq
- *
- **/
-void
-lpfc_cleanup_wt_rrqs(struct lpfc_hba *phba)
-{
-	struct lpfc_node_rrq *rrq;
-	struct lpfc_node_rrq *nextrrq;
-	unsigned long next_time;
-	unsigned long iflags;
-	LIST_HEAD(rrq_list);
-
-	if (phba->sli_rev != LPFC_SLI_REV4)
-		return;
-	spin_lock_irqsave(&phba->hbalock, iflags);
-	phba->hba_flag &= ~HBA_RRQ_ACTIVE;
-	next_time = jiffies + msecs_to_jiffies(1000 * (phba->fc_ratov * 2));
-	list_splice_init(&phba->active_rrq_list, &rrq_list);
-	spin_unlock_irqrestore(&phba->hbalock, iflags);
-
-	list_for_each_entry_safe(rrq, nextrrq, &rrq_list, list) {
-		list_del(&rrq->list);
-		lpfc_clr_rrq_active(phba, rrq->xritag, rrq);
-	}
-	if ((!list_empty(&phba->active_rrq_list)) &&
-	    (!(phba->pport->load_flag & FC_UNLOADING)))
-
-		mod_timer(&phba->rrq_tmr, next_time);
-}
-
-
-/**
  * lpfc_test_rrq_active - Test RRQ bit in xri_bitmap.
  * @phba: Pointer to HBA context object.
  * @ndlp: Targets nodelist pointer for this exchange.
@@ -937,7 +899,7 @@
  * @phba: Pointer to HBA context object.
  * @piocb: Pointer to the iocbq.
  *
- * This function is called with hbalock held. This function
+ * This function is called with the ring lock held. This function
  * gets a new driver sglq object from the sglq list. If the
  * list is not empty then it is successful, it returns pointer to the newly
  * allocated sglq object else it returns NULL.
@@ -1053,10 +1015,12 @@
 			spin_unlock_irqrestore(
 				&phba->sli4_hba.abts_sgl_list_lock, iflag);
 		} else {
+			spin_lock_irqsave(&pring->ring_lock, iflag);
 			sglq->state = SGL_FREED;
 			sglq->ndlp = NULL;
 			list_add_tail(&sglq->list,
 				&phba->sli4_hba.lpfc_sgl_list);
+			spin_unlock_irqrestore(&pring->ring_lock, iflag);
 
 			/* Check if TXQ queue needs to be serviced */
 			if (!list_empty(&pring->txq))
@@ -2469,11 +2433,9 @@
 	IOCB_t           * irsp;
 	WORD5            * w5p;
 	uint32_t           Rctl, Type;
-	uint32_t           match;
 	struct lpfc_iocbq *iocbq;
 	struct lpfc_dmabuf *dmzbuf;
 
-	match = 0;
 	irsp = &(saveq->iocb);
 
 	if (irsp->ulpCommand == CMD_ASYNC_STATUS) {
@@ -2899,7 +2861,7 @@
 void lpfc_poll_eratt(unsigned long ptr)
 {
 	struct lpfc_hba *phba;
-	uint32_t eratt = 0, rem;
+	uint32_t eratt = 0;
 	uint64_t sli_intr, cnt;
 
 	phba = (struct lpfc_hba *)ptr;
@@ -2914,7 +2876,7 @@
 		cnt = (sli_intr - phba->sli.slistat.sli_prev_intr);
 
 	/* 64-bit integer division not supporte on 32-bit x86 - use do_div */
-	rem = do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
+	do_div(cnt, LPFC_ERATT_POLL_INTERVAL);
 	phba->sli.slistat.sli_ips = cnt;
 
 	phba->sli.slistat.sli_prev_intr = sli_intr;
@@ -4864,15 +4826,12 @@
 	 * mailbox command.
 	 */
 	dma_size = *vpd_size;
-	dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
-					  dma_size,
-					  &dmabuf->phys,
-					  GFP_KERNEL);
+	dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev, dma_size,
+					   &dmabuf->phys, GFP_KERNEL);
 	if (!dmabuf->virt) {
 		kfree(dmabuf);
 		return -ENOMEM;
 	}
-	memset(dmabuf->virt, 0, dma_size);
 
 	/*
 	 * The SLI4 implementation of READ_REV conflicts at word1,
@@ -5990,9 +5949,6 @@
 		curr_blks++;
 	}
 
-	/* Calculate the total requested length of the dma memory. */
-	req_len = curr_blks * sizeof(uint16_t);
-
 	/*
 	 * Calculate the size of an embedded mailbox.  The uint32_t
 	 * accounts for extents-specific word.
@@ -6101,14 +6057,18 @@
 	struct lpfc_sglq *sglq_entry_first = NULL;
 	int status, total_cnt, post_cnt = 0, num_posted = 0, block_cnt = 0;
 	int last_xritag = NO_XRI;
+	struct lpfc_sli_ring *pring;
 	LIST_HEAD(prep_sgl_list);
 	LIST_HEAD(blck_sgl_list);
 	LIST_HEAD(allc_sgl_list);
 	LIST_HEAD(post_sgl_list);
 	LIST_HEAD(free_sgl_list);
 
+	pring = &phba->sli.ring[LPFC_ELS_RING];
 	spin_lock_irq(&phba->hbalock);
+	spin_lock(&pring->ring_lock);
 	list_splice_init(&phba->sli4_hba.lpfc_sgl_list, &allc_sgl_list);
+	spin_unlock(&pring->ring_lock);
 	spin_unlock_irq(&phba->hbalock);
 
 	total_cnt = phba->sli4_hba.els_xri_cnt;
@@ -6210,8 +6170,10 @@
 	/* push els sgls posted to the availble list */
 	if (!list_empty(&post_sgl_list)) {
 		spin_lock_irq(&phba->hbalock);
+		spin_lock(&pring->ring_lock);
 		list_splice_init(&post_sgl_list,
 				 &phba->sli4_hba.lpfc_sgl_list);
+		spin_unlock(&pring->ring_lock);
 		spin_unlock_irq(&phba->hbalock);
 	} else {
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -6797,13 +6759,16 @@
 lpfc_mbox_timeout_handler(struct lpfc_hba *phba)
 {
 	LPFC_MBOXQ_t *pmbox = phba->sli.mbox_active;
-	MAILBOX_t *mb = &pmbox->u.mb;
+	MAILBOX_t *mb = NULL;
+
 	struct lpfc_sli *psli = &phba->sli;
 
 	/* If the mailbox completed, process the completion and return */
 	if (lpfc_sli4_process_missed_mbox_completions(phba))
 		return;
 
+	if (pmbox != NULL)
+		mb = &pmbox->u.mb;
 	/* Check the pmbox pointer first.  There is a race condition
 	 * between the mbox timeout handler getting executed in the
 	 * worklist and the mailbox actually completing. When this
@@ -8138,7 +8103,7 @@
  *
  * Return: index into SLI4 fast-path FCP queue index.
  **/
-static inline uint32_t
+static inline int
 lpfc_sli4_scmd_to_wqidx_distr(struct lpfc_hba *phba)
 {
 	struct lpfc_vector_map_info *cpup;
@@ -8152,7 +8117,6 @@
 			cpup += cpu;
 			return cpup->channel_id;
 		}
-		chann = cpu;
 	}
 	chann = atomic_add_return(1, &phba->fcp_qidx);
 	chann = (chann % phba->cfg_fcp_io_channel);
@@ -8784,6 +8748,37 @@
 	return 0;
 }
 
+int
+lpfc_sli_calc_ring(struct lpfc_hba *phba, uint32_t ring_number,
+		    struct lpfc_iocbq *piocb)
+{
+	uint32_t idx;
+
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		if (piocb->iocb_flag &  (LPFC_IO_FCP | LPFC_USE_FCPWQIDX)) {
+			/*
+			 * fcp_wqidx should already be setup based on what
+			 * completion queue we want to use.
+			 */
+			if (!(phba->cfg_fof) ||
+			    (!(piocb->iocb_flag & LPFC_IO_FOF))) {
+				if (unlikely(!phba->sli4_hba.fcp_wq))
+					return LPFC_HBA_ERROR;
+				idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
+				piocb->fcp_wqidx = idx;
+				ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
+			} else {
+				if (unlikely(!phba->sli4_hba.oas_wq))
+					return LPFC_HBA_ERROR;
+				idx = 0;
+				piocb->fcp_wqidx = idx;
+				ring_number =  LPFC_FCP_OAS_RING;
+			}
+		}
+	}
+	return ring_number;
+}
+
 /**
  * lpfc_sli_issue_iocb - Wrapper function for __lpfc_sli_issue_iocb
  * @phba: Pointer to HBA context object.
@@ -8809,61 +8804,42 @@
 	int rc, idx;
 
 	if (phba->sli_rev == LPFC_SLI_REV4) {
-		if (piocb->iocb_flag &  LPFC_IO_FCP) {
-			if (!phba->cfg_fof || (!(piocb->iocb_flag &
-				LPFC_IO_OAS))) {
-				if (unlikely(!phba->sli4_hba.fcp_wq))
-					return IOCB_ERROR;
-				idx = lpfc_sli4_scmd_to_wqidx_distr(phba);
-				piocb->fcp_wqidx = idx;
-				ring_number = MAX_SLI3_CONFIGURED_RINGS + idx;
-			} else {
-				if (unlikely(!phba->sli4_hba.oas_wq))
-					return IOCB_ERROR;
-				idx = 0;
-				piocb->fcp_wqidx = 0;
-				ring_number =  LPFC_FCP_OAS_RING;
-			}
-			pring = &phba->sli.ring[ring_number];
-			spin_lock_irqsave(&pring->ring_lock, iflags);
-			rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
-				flag);
-			spin_unlock_irqrestore(&pring->ring_lock, iflags);
+		ring_number = lpfc_sli_calc_ring(phba, ring_number, piocb);
+		if (unlikely(ring_number == LPFC_HBA_ERROR))
+			return IOCB_ERROR;
+		idx = piocb->fcp_wqidx;
 
-			if (lpfc_fcp_look_ahead) {
-				fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
+		pring = &phba->sli.ring[ring_number];
+		spin_lock_irqsave(&pring->ring_lock, iflags);
+		rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb, flag);
+		spin_unlock_irqrestore(&pring->ring_lock, iflags);
 
-				if (atomic_dec_and_test(&fcp_eq_hdl->
-					fcp_eq_in_use)) {
+		if (lpfc_fcp_look_ahead && (piocb->iocb_flag &  LPFC_IO_FCP)) {
+			fcp_eq_hdl = &phba->sli4_hba.fcp_eq_hdl[idx];
 
-					/* Get associated EQ with this index */
-					fpeq = phba->sli4_hba.hba_eq[idx];
+			if (atomic_dec_and_test(&fcp_eq_hdl->
+				fcp_eq_in_use)) {
 
-					/* Turn off interrupts from this EQ */
-					lpfc_sli4_eq_clr_intr(fpeq);
+				/* Get associated EQ with this index */
+				fpeq = phba->sli4_hba.hba_eq[idx];
 
-					/*
-					 * Process all the events on FCP EQ
-					 */
-					while ((eqe = lpfc_sli4_eq_get(fpeq))) {
-						lpfc_sli4_hba_handle_eqe(phba,
-							eqe, idx);
-						fpeq->EQ_processed++;
-					}
+				/* Turn off interrupts from this EQ */
+				lpfc_sli4_eq_clr_intr(fpeq);
 
-					/* Always clear and re-arm the EQ */
-					lpfc_sli4_eq_release(fpeq,
-						LPFC_QUEUE_REARM);
+				/*
+				 * Process all the events on FCP EQ
+				 */
+				while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+					lpfc_sli4_hba_handle_eqe(phba,
+						eqe, idx);
+					fpeq->EQ_processed++;
 				}
-				atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
-			}
-		} else {
-			pring = &phba->sli.ring[ring_number];
-			spin_lock_irqsave(&pring->ring_lock, iflags);
-			rc = __lpfc_sli_issue_iocb(phba, ring_number, piocb,
-				flag);
-			spin_unlock_irqrestore(&pring->ring_lock, iflags);
 
+				/* Always clear and re-arm the EQ */
+				lpfc_sli4_eq_release(fpeq,
+					LPFC_QUEUE_REARM);
+			}
+			atomic_inc(&fcp_eq_hdl->fcp_eq_in_use);
 		}
 	} else {
 		/* For now, SLI2/3 will still use hbalock */
@@ -9746,6 +9722,7 @@
 	struct lpfc_iocbq *abtsiocbp;
 	IOCB_t *icmd = NULL;
 	IOCB_t *iabt = NULL;
+	int ring_number;
 	int retval;
 	unsigned long iflags;
 
@@ -9786,6 +9763,8 @@
 	abtsiocbp->fcp_wqidx = cmdiocb->fcp_wqidx;
 	if (cmdiocb->iocb_flag & LPFC_IO_FCP)
 		abtsiocbp->iocb_flag |= LPFC_USE_FCPWQIDX;
+	if (cmdiocb->iocb_flag & LPFC_IO_FOF)
+		abtsiocbp->iocb_flag |= LPFC_IO_FOF;
 
 	if (phba->link_state >= LPFC_LINK_UP)
 		iabt->ulpCommand = CMD_ABORT_XRI_CN;
@@ -9802,6 +9781,11 @@
 			 abtsiocbp->iotag);
 
 	if (phba->sli_rev == LPFC_SLI_REV4) {
+		ring_number =
+			lpfc_sli_calc_ring(phba, pring->ringno, abtsiocbp);
+		if (unlikely(ring_number == LPFC_HBA_ERROR))
+			return 0;
+		pring = &phba->sli.ring[ring_number];
 		/* Note: both hbalock and ring_lock need to be set here */
 		spin_lock_irqsave(&pring->ring_lock, iflags);
 		retval = __lpfc_sli_issue_iocb(phba, pring->ringno,
@@ -10099,6 +10083,8 @@
 		abtsiocb->fcp_wqidx = iocbq->fcp_wqidx;
 		if (iocbq->iocb_flag & LPFC_IO_FCP)
 			abtsiocb->iocb_flag |= LPFC_USE_FCPWQIDX;
+		if (iocbq->iocb_flag & LPFC_IO_FOF)
+			abtsiocb->iocb_flag |= LPFC_IO_FOF;
 
 		if (lpfc_is_link_up(phba))
 			abtsiocb->iocb.ulpCommand = CMD_ABORT_XRI_CN;
@@ -10146,7 +10132,9 @@
 			uint16_t tgt_id, uint64_t lun_id, lpfc_ctx_cmd cmd)
 {
 	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_scsi_buf *lpfc_cmd;
 	struct lpfc_iocbq *abtsiocbq;
+	struct lpfc_nodelist *ndlp;
 	struct lpfc_iocbq *iocbq;
 	IOCB_t *icmd;
 	int sum, i, ret_val;
@@ -10198,8 +10186,14 @@
 		abtsiocbq->fcp_wqidx = iocbq->fcp_wqidx;
 		if (iocbq->iocb_flag & LPFC_IO_FCP)
 			abtsiocbq->iocb_flag |= LPFC_USE_FCPWQIDX;
+		if (iocbq->iocb_flag & LPFC_IO_FOF)
+			abtsiocbq->iocb_flag |= LPFC_IO_FOF;
 
-		if (lpfc_is_link_up(phba))
+		lpfc_cmd = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
+		ndlp = lpfc_cmd->rdata->pnode;
+
+		if (lpfc_is_link_up(phba) &&
+		    (ndlp && ndlp->nlp_state == NLP_STE_MAPPED_NODE))
 			abtsiocbq->iocb.ulpCommand = CMD_ABORT_XRI_CN;
 		else
 			abtsiocbq->iocb.ulpCommand = CMD_CLOSE_XRI_CN;
@@ -12611,6 +12605,9 @@
 	 * Process all the event on FCP fast-path EQ
 	 */
 	while ((eqe = lpfc_sli4_eq_get(fpeq))) {
+		if (eqe == NULL)
+			break;
+
 		lpfc_sli4_hba_handle_eqe(phba, eqe, fcp_eqidx);
 		if (!(++ecount % fpeq->entry_repost))
 			lpfc_sli4_eq_release(fpeq, LPFC_QUEUE_NOARM);
@@ -12760,14 +12757,13 @@
 		dmabuf = kzalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
 		if (!dmabuf)
 			goto out_fail;
-		dmabuf->virt = dma_alloc_coherent(&phba->pcidev->dev,
-						  hw_page_size, &dmabuf->phys,
-						  GFP_KERNEL);
+		dmabuf->virt = dma_zalloc_coherent(&phba->pcidev->dev,
+						   hw_page_size, &dmabuf->phys,
+						   GFP_KERNEL);
 		if (!dmabuf->virt) {
 			kfree(dmabuf);
 			goto out_fail;
 		}
-		memset(dmabuf->virt, 0, hw_page_size);
 		dmabuf->buffer_tag = x;
 		list_add_tail(&dmabuf->list, &queue->page_list);
 		/* initialize queue's entry array */
@@ -12845,7 +12841,7 @@
  * memory this function will return -ENOMEM. If the queue create mailbox command
  * fails this function will return -ENXIO.
  **/
-uint32_t
+int
 lpfc_modify_fcp_eq_delay(struct lpfc_hba *phba, uint16_t startq)
 {
 	struct lpfc_mbx_modify_eq_delay *eq_delay;
@@ -12931,7 +12927,7 @@
  * memory this function will return -ENOMEM. If the queue create mailbox command
  * fails this function will return -ENXIO.
  **/
-uint32_t
+int
 lpfc_eq_create(struct lpfc_hba *phba, struct lpfc_queue *eq, uint32_t imax)
 {
 	struct lpfc_mbx_eq_create *eq_create;
@@ -13053,7 +13049,7 @@
  * memory this function will return -ENOMEM. If the queue create mailbox command
  * fails this function will return -ENXIO.
  **/
-uint32_t
+int
 lpfc_cq_create(struct lpfc_hba *phba, struct lpfc_queue *cq,
 	       struct lpfc_queue *eq, uint32_t type, uint32_t subtype)
 {
@@ -13394,7 +13390,7 @@
  * memory this function will return -ENOMEM. If the queue create mailbox command
  * fails this function will return -ENXIO.
  **/
-uint32_t
+int
 lpfc_wq_create(struct lpfc_hba *phba, struct lpfc_queue *wq,
 	       struct lpfc_queue *cq, uint32_t subtype)
 {
@@ -13630,7 +13626,7 @@
  * memory this function will return -ENOMEM. If the queue create mailbox command
  * fails this function will return -ENXIO.
  **/
-uint32_t
+int
 lpfc_rq_create(struct lpfc_hba *phba, struct lpfc_queue *hrq,
 	       struct lpfc_queue *drq, struct lpfc_queue *cq, uint32_t subtype)
 {
@@ -13895,7 +13891,7 @@
  * On success this function will return a zero. If the queue destroy mailbox
  * command fails this function will return -ENXIO.
  **/
-uint32_t
+int
 lpfc_eq_destroy(struct lpfc_hba *phba, struct lpfc_queue *eq)
 {
 	LPFC_MBOXQ_t *mbox;
@@ -13951,7 +13947,7 @@
  * On success this function will return a zero. If the queue destroy mailbox
  * command fails this function will return -ENXIO.
  **/
-uint32_t
+int
 lpfc_cq_destroy(struct lpfc_hba *phba, struct lpfc_queue *cq)
 {
 	LPFC_MBOXQ_t *mbox;
@@ -14005,7 +14001,7 @@
  * On success this function will return a zero. If the queue destroy mailbox
  * command fails this function will return -ENXIO.
  **/
-uint32_t
+int
 lpfc_mq_destroy(struct lpfc_hba *phba, struct lpfc_queue *mq)
 {
 	LPFC_MBOXQ_t *mbox;
@@ -14059,7 +14055,7 @@
  * On success this function will return a zero. If the queue destroy mailbox
  * command fails this function will return -ENXIO.
  **/
-uint32_t
+int
 lpfc_wq_destroy(struct lpfc_hba *phba, struct lpfc_queue *wq)
 {
 	LPFC_MBOXQ_t *mbox;
@@ -14112,7 +14108,7 @@
  * On success this function will return a zero. If the queue destroy mailbox
  * command fails this function will return -ENXIO.
  **/
-uint32_t
+int
 lpfc_rq_destroy(struct lpfc_hba *phba, struct lpfc_queue *hrq,
 		struct lpfc_queue *drq)
 {
@@ -14252,7 +14248,6 @@
 				"2511 POST_SGL mailbox failed with "
 				"status x%x add_status x%x, mbx status x%x\n",
 				shdr_status, shdr_add_status, rc);
-		rc = -ENXIO;
 	}
 	return 0;
 }
@@ -14270,7 +14265,7 @@
  *	A nonzero rpi defined as rpi_base <= rpi < max_rpi if successful
  *	LPFC_RPI_ALLOC_ERROR if no rpis are available.
  **/
-uint16_t
+static uint16_t
 lpfc_sli4_alloc_xri(struct lpfc_hba *phba)
 {
 	unsigned long xri;
@@ -14300,7 +14295,7 @@
  * This routine is invoked to release an xri to the pool of
  * available rpis maintained by the driver.
  **/
-void
+static void
 __lpfc_sli4_free_xri(struct lpfc_hba *phba, int xri)
 {
 	if (test_and_clear_bit(xri, phba->sli4_hba.xri_bmask)) {
@@ -14720,7 +14715,7 @@
  * the driver uses this time stamp to indicate if any received sequences have
  * timed out.
  **/
-void
+static void
 lpfc_update_rcv_time_stamp(struct lpfc_vport *vport)
 {
 	struct lpfc_dmabuf *h_buf;
@@ -15019,7 +15014,7 @@
 lpfc_sli4_xri_inrange(struct lpfc_hba *phba,
 		      uint16_t xri)
 {
-	int i;
+	uint16_t i;
 
 	for (i = 0; i < phba->sli4_hba.max_cfg_param.max_xri; i++) {
 		if (xri == phba->sli4_hba.xri_ids[i])
@@ -15189,7 +15184,7 @@
  * unsolicited sequence has been aborted. After that, it will issue a basic
  * accept to accept the abort.
  **/
-void
+static void
 lpfc_sli4_handle_unsol_abort(struct lpfc_vport *vport,
 			     struct hbq_dmabuf *dmabuf)
 {
@@ -15734,7 +15729,7 @@
  * This routine is invoked to release an rpi to the pool of
  * available rpis maintained by the driver.
  **/
-void
+static void
 __lpfc_sli4_free_rpi(struct lpfc_hba *phba, int rpi)
 {
 	if (test_and_clear_bit(rpi, phba->sli4_hba.rpi_bmask)) {
@@ -16172,7 +16167,7 @@
  * returns:
  * 1=success 0=failure
  **/
-int
+static int
 lpfc_check_next_fcf_pri_level(struct lpfc_hba *phba)
 {
 	uint16_t next_fcf_pri;
@@ -16403,7 +16398,7 @@
  * command. If the mailbox command returned failure, it will try to stop the
  * FCF rediscover wait timer.
  **/
-void
+static void
 lpfc_mbx_cmpl_redisc_fcf_table(struct lpfc_hba *phba, LPFC_MBOXQ_t *mbox)
 {
 	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
@@ -16956,7 +16951,7 @@
 	char *fail_msg = NULL;
 	struct lpfc_sglq *sglq;
 	union lpfc_wqe wqe;
-	int txq_cnt = 0;
+	uint32_t txq_cnt = 0;
 
 	spin_lock_irqsave(&pring->ring_lock, iflags);
 	list_for_each_entry(piocbq, &pring->txq, list) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index edb4883..4a01452 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -79,6 +79,7 @@
 #define LPFC_FIP_ELS_ID_SHIFT	14
 
 #define LPFC_IO_OAS		0x10000 /* OAS FCP IO */
+#define LPFC_IO_FOF		0x20000 /* FOF FCP IO */
 
 	uint32_t drvrTimeout;	/* driver timeout in seconds */
 	uint32_t fcp_wqidx;	/* index to FCP work queue */
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 7f50aa0..22ceb2b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -670,22 +670,22 @@
 struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
 			uint32_t);
 void lpfc_sli4_queue_free(struct lpfc_queue *);
-uint32_t lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
-uint32_t lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t);
-uint32_t lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
+int lpfc_eq_create(struct lpfc_hba *, struct lpfc_queue *, uint32_t);
+int lpfc_modify_fcp_eq_delay(struct lpfc_hba *, uint16_t);
+int lpfc_cq_create(struct lpfc_hba *, struct lpfc_queue *,
 			struct lpfc_queue *, uint32_t, uint32_t);
 int32_t lpfc_mq_create(struct lpfc_hba *, struct lpfc_queue *,
 		       struct lpfc_queue *, uint32_t);
-uint32_t lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
+int lpfc_wq_create(struct lpfc_hba *, struct lpfc_queue *,
 			struct lpfc_queue *, uint32_t);
-uint32_t lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
+int lpfc_rq_create(struct lpfc_hba *, struct lpfc_queue *,
 			struct lpfc_queue *, struct lpfc_queue *, uint32_t);
 void lpfc_rq_adjust_repost(struct lpfc_hba *, struct lpfc_queue *, int);
-uint32_t lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
-uint32_t lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
-uint32_t lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
-uint32_t lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
-uint32_t lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
+int lpfc_eq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+int lpfc_cq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+int lpfc_mq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+int lpfc_wq_destroy(struct lpfc_hba *, struct lpfc_queue *);
+int lpfc_rq_destroy(struct lpfc_hba *, struct lpfc_queue *,
 			 struct lpfc_queue *);
 int lpfc_sli4_queue_setup(struct lpfc_hba *);
 void lpfc_sli4_queue_unset(struct lpfc_hba *);
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index 41675c1..89413ad 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "10.2.8001.0."
+#define LPFC_DRIVER_VERSION "10.4.8000.0."
 #define LPFC_DRIVER_NAME		"lpfc"
 
 /* Used for SLI 2/3 */
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 32166c2..a49914d 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -33,9 +33,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION				"06.803.01.00-rc1"
-#define MEGASAS_RELDATE				"Mar. 10, 2014"
-#define MEGASAS_EXT_VERSION			"Mon. Mar. 10 17:00:00 PDT 2014"
+#define MEGASAS_VERSION				"06.805.06.00-rc1"
+#define MEGASAS_RELDATE				"Sep. 4, 2014"
+#define MEGASAS_EXT_VERSION			"Thu. Sep. 4 17:00:00 PDT 2014"
 
 /*
  * Device IDs
@@ -105,6 +105,9 @@
 #define MFI_STATE_READY				0xB0000000
 #define MFI_STATE_OPERATIONAL			0xC0000000
 #define MFI_STATE_FAULT				0xF0000000
+#define MFI_STATE_FORCE_OCR			0x00000080
+#define MFI_STATE_DMADONE			0x00000008
+#define MFI_STATE_CRASH_DUMP_DONE		0x00000004
 #define MFI_RESET_REQUIRED			0x00000001
 #define MFI_RESET_ADAPTER			0x00000002
 #define MEGAMFI_FRAME_SIZE			64
@@ -191,6 +194,9 @@
 #define MR_DCMD_CLUSTER_RESET_LD		0x08010200
 #define MR_DCMD_PD_LIST_QUERY                   0x02010100
 
+#define MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS	0x01190100
+#define MR_DRIVER_SET_APP_CRASHDUMP_MODE	(0xF0010000 | 0x0600)
+
 /*
  * Global functions
  */
@@ -264,6 +270,25 @@
 };
 
 /*
+ * Crash dump related defines
+ */
+#define MAX_CRASH_DUMP_SIZE 512
+#define CRASH_DMA_BUF_SIZE  (1024 * 1024)
+
+enum MR_FW_CRASH_DUMP_STATE {
+	UNAVAILABLE = 0,
+	AVAILABLE = 1,
+	COPYING = 2,
+	COPIED = 3,
+	COPY_ERROR = 4,
+};
+
+enum _MR_CRASH_BUF_STATUS {
+	MR_CRASH_BUF_TURN_OFF = 0,
+	MR_CRASH_BUF_TURN_ON = 1,
+};
+
+/*
  * Number of mailbox bytes in DCMD message frame
  */
 #define MFI_MBOX_SIZE				12
@@ -365,7 +390,6 @@
 #define MR_EVT_FOREIGN_CFG_IMPORTED                     0x00db
 #define MR_EVT_LD_OFFLINE                               0x00fc
 #define MR_EVT_CTRL_HOST_BUS_SCAN_REQUESTED             0x0152
-#define MAX_LOGICAL_DRIVES				64
 
 enum MR_PD_STATE {
 	MR_PD_STATE_UNCONFIGURED_GOOD   = 0x00,
@@ -443,14 +467,14 @@
 		u8          state;
 		u8          reserved[3];
 		u64         size;
-	} ldList[MAX_LOGICAL_DRIVES];
+	} ldList[MAX_LOGICAL_DRIVES_EXT];
 } __packed;
 
 struct MR_LD_TARGETID_LIST {
 	u32	size;
 	u32	count;
 	u8	pad[3];
-	u8	targetId[MAX_LOGICAL_DRIVES];
+	u8	targetId[MAX_LOGICAL_DRIVES_EXT];
 };
 
 
@@ -916,6 +940,15 @@
 	* HA cluster information
 	*/
 	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u32     reserved:26;
+		u32     premiumFeatureMismatch:1;
+		u32     ctrlPropIncompatible:1;
+		u32     fwVersionMismatch:1;
+		u32     hwIncompatible:1;
+		u32     peerIsIncompatible:1;
+		u32     peerIsPresent:1;
+#else
 		u32     peerIsPresent:1;
 		u32     peerIsIncompatible:1;
 		u32     hwIncompatible:1;
@@ -923,6 +956,7 @@
 		u32     ctrlPropIncompatible:1;
 		u32     premiumFeatureMismatch:1;
 		u32     reserved:26;
+#endif
 	} cluster;
 
 	char clusterId[16];                     /*7D4h */
@@ -933,7 +967,27 @@
 		u8  reserved;                   /*0x7E7*/
 	} iov;
 
-	u8          pad[0x800-0x7E8];           /*0x7E8 pad to 2k */
+	struct {
+#if defined(__BIG_ENDIAN_BITFIELD)
+		u32     reserved:25;
+		u32     supportCrashDump:1;
+		u32     supportMaxExtLDs:1;
+		u32     supportT10RebuildAssist:1;
+		u32     supportDisableImmediateIO:1;
+		u32     supportThermalPollInterval:1;
+		u32     supportPersonalityChange:2;
+#else
+		u32     supportPersonalityChange:2;
+		u32     supportThermalPollInterval:1;
+		u32     supportDisableImmediateIO:1;
+		u32     supportT10RebuildAssist:1;
+		u32     supportMaxExtLDs:1;
+		u32     supportCrashDump:1;
+		u32     reserved:25;
+#endif
+	} adapterOperations3;
+
+	u8          pad[0x800-0x7EC];
 } __packed;
 
 /*
@@ -942,13 +996,12 @@
  * ===============================
  */
 #define MEGASAS_MAX_PD_CHANNELS			2
-#define MEGASAS_MAX_LD_CHANNELS			1
+#define MEGASAS_MAX_LD_CHANNELS			2
 #define MEGASAS_MAX_CHANNELS			(MEGASAS_MAX_PD_CHANNELS + \
 						MEGASAS_MAX_LD_CHANNELS)
 #define MEGASAS_MAX_DEV_PER_CHANNEL		128
 #define MEGASAS_DEFAULT_INIT_ID			-1
 #define MEGASAS_MAX_LUN				8
-#define MEGASAS_MAX_LD				64
 #define MEGASAS_DEFAULT_CMD_PER_LUN		256
 #define MEGASAS_MAX_PD                          (MEGASAS_MAX_PD_CHANNELS * \
 						MEGASAS_MAX_DEV_PER_CHANNEL)
@@ -961,6 +1014,14 @@
 
 #define MEGASAS_FW_BUSY				1
 
+#define VD_EXT_DEBUG 0
+
+enum MR_MFI_MPT_PTHR_FLAGS {
+	MFI_MPT_DETACHED = 0,
+	MFI_LIST_ADDED = 1,
+	MFI_MPT_ATTACHED = 2,
+};
+
 /* Frame Type */
 #define IO_FRAME				0
 #define PTHRU_FRAME				1
@@ -978,7 +1039,7 @@
 #define MEGASAS_IOCTL_CMD			0
 #define MEGASAS_DEFAULT_CMD_TIMEOUT		90
 #define MEGASAS_THROTTLE_QUEUE_DEPTH		16
-
+#define MEGASAS_BLOCKED_CMD_TIMEOUT		60
 /*
  * FW reports the maximum of number of commands that it can accept (maximum
  * commands that can be outstanding) at any time. The driver must report a
@@ -1133,13 +1194,19 @@
 typedef union _MFI_CAPABILITIES {
 	struct {
 #if   defined(__BIG_ENDIAN_BITFIELD)
-		u32     reserved:30;
+		u32     reserved:27;
+		u32     support_ndrive_r1_lb:1;
+		u32	support_max_255lds:1;
+		u32	reserved1:1;
 		u32     support_additional_msix:1;
 		u32     support_fp_remote_lun:1;
 #else
 		u32     support_fp_remote_lun:1;
 		u32     support_additional_msix:1;
-		u32     reserved:30;
+		u32	reserved1:1;
+		u32	support_max_255lds:1;
+		u32     support_ndrive_r1_lb:1;
+		u32     reserved:27;
 #endif
 	} mfi_capabilities;
 	u32     reg;
@@ -1559,6 +1626,20 @@
 	u32 *reply_queue;
 	dma_addr_t reply_queue_h;
 
+	u32 *crash_dump_buf;
+	dma_addr_t crash_dump_h;
+	void *crash_buf[MAX_CRASH_DUMP_SIZE];
+	u32 crash_buf_pages;
+	unsigned int    fw_crash_buffer_size;
+	unsigned int    fw_crash_state;
+	unsigned int    fw_crash_buffer_offset;
+	u32 drv_buf_index;
+	u32 drv_buf_alloc;
+	u32 crash_dump_fw_support;
+	u32 crash_dump_drv_support;
+	u32 crash_dump_app_support;
+	spinlock_t crashdump_lock;
+
 	struct megasas_register_set __iomem *reg_set;
 	u32 *reply_post_host_index_addr[MR_MAX_MSIX_REG_ARRAY];
 	struct megasas_pd_list          pd_list[MEGASAS_MAX_PD];
@@ -1577,7 +1658,7 @@
 	struct megasas_cmd **cmd_list;
 	struct list_head cmd_pool;
 	/* used to sync fire the cmd to fw */
-	spinlock_t cmd_pool_lock;
+	spinlock_t mfi_pool_lock;
 	/* used to sync fire the cmd to fw */
 	spinlock_t hba_lock;
 	/* used to synch producer, consumer ptrs in dpc */
@@ -1606,6 +1687,7 @@
 	struct megasas_instance_template *instancet;
 	struct tasklet_struct isr_tasklet;
 	struct work_struct work_init;
+	struct work_struct crash_init;
 
 	u8 flag;
 	u8 unload;
@@ -1613,6 +1695,14 @@
 	u8 issuepend_done;
 	u8 disableOnlineCtrlReset;
 	u8 UnevenSpanSupport;
+
+	u8 supportmax256vd;
+	u16 fw_supported_vd_count;
+	u16 fw_supported_pd_count;
+
+	u16 drv_supported_vd_count;
+	u16 drv_supported_pd_count;
+
 	u8 adprecovery;
 	unsigned long last_time;
 	u32 mfiStatus;
@@ -1622,6 +1712,8 @@
 
 	/* Ptr to hba specific information */
 	void *ctrl_context;
+	u32 ctrl_context_pages;
+	struct megasas_ctrl_info *ctrl_info;
 	unsigned int msix_vectors;
 	struct msix_entry msixentry[MEGASAS_MAX_MSIX_QUEUES];
 	struct megasas_irq_context irq_context[MEGASAS_MAX_MSIX_QUEUES];
@@ -1633,8 +1725,6 @@
 	struct timer_list sriov_heartbeat_timer;
 	char skip_heartbeat_timer_del;
 	u8 requestorId;
-	u64 initiator_sas_address;
-	u64 ld_sas_address[64];
 	char PlasmaFW111;
 	char mpio;
 	int throttlequeuedepth;
@@ -1661,6 +1751,7 @@
 /* Plasma 1.11 FW backward compatibility structures */
 #define IOV_111_OFFSET 0x7CE
 #define MAX_VIRTUAL_FUNCTIONS 8
+#define MR_LD_ACCESS_HIDDEN 15
 
 struct IOV_111 {
 	u8 maxVFsSupported;
@@ -1754,6 +1845,11 @@
 
 	struct list_head list;
 	struct scsi_cmnd *scmd;
+
+	void *mpt_pthr_cmd_blocked;
+	atomic_t mfi_mpt_pthr;
+	u8 is_wait_event;
+
 	struct megasas_instance *instance;
 	union {
 		struct {
@@ -1823,12 +1919,33 @@
 MR_BuildRaidContext(struct megasas_instance *instance,
 		    struct IO_REQUEST_INFO *io_info,
 		    struct RAID_CONTEXT *pRAID_Context,
-		    struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN);
-u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map);
-struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
-u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map);
-u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map);
-u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map);
-u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map);
+		    struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN);
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map);
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map);
+u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map);
+
+u16 get_updated_dev_handle(struct megasas_instance *instance,
+	struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *in_info);
+void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *map,
+	struct LD_LOAD_BALANCE_INFO *lbInfo);
+int megasas_get_ctrl_info(struct megasas_instance *instance,
+	struct megasas_ctrl_info *ctrl_info);
+int megasas_set_crash_dump_params(struct megasas_instance *instance,
+	u8 crash_buf_state);
+void megasas_free_host_crash_buffer(struct megasas_instance *instance);
+void megasas_fusion_crash_dump_wq(struct work_struct *work);
+
+void megasas_return_cmd_fusion(struct megasas_instance *instance,
+	struct megasas_cmd_fusion *cmd);
+int megasas_issue_blocked_cmd(struct megasas_instance *instance,
+	struct megasas_cmd *cmd, int timeout);
+void __megasas_return_cmd(struct megasas_instance *instance,
+	struct megasas_cmd *cmd);
+
+void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
+	struct megasas_cmd *cmd_mfi, struct megasas_cmd_fusion *cmd_fusion);
 
 #endif				/*LSI_MEGARAID_SAS_H */
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 22a04e3..f6a69a3 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -18,7 +18,7 @@
  *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  *
  *  FILE: megaraid_sas_base.c
- *  Version : 06.803.01.00-rc1
+ *  Version : 06.805.06.00-rc1
  *
  *  Authors: LSI Corporation
  *           Sreenivas Bagalkote
@@ -89,6 +89,10 @@
 MODULE_PARM_DESC(resetwaittime, "Wait time in seconds after I/O timeout "
 		 "before resetting adapter. Default: 180");
 
+int smp_affinity_enable = 1;
+module_param(smp_affinity_enable, int, S_IRUGO);
+MODULE_PARM_DESC(smp_affinity_enable, "SMP affinity feature enable/disbale Default: enable(1)");
+
 MODULE_LICENSE("GPL");
 MODULE_VERSION(MEGASAS_VERSION);
 MODULE_AUTHOR("megaraidlinux@lsi.com");
@@ -206,21 +210,54 @@
 	unsigned long flags;
 	struct megasas_cmd *cmd = NULL;
 
-	spin_lock_irqsave(&instance->cmd_pool_lock, flags);
+	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
 
 	if (!list_empty(&instance->cmd_pool)) {
 		cmd = list_entry((&instance->cmd_pool)->next,
 				 struct megasas_cmd, list);
 		list_del_init(&cmd->list);
+		atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_DETACHED);
 	} else {
 		printk(KERN_ERR "megasas: Command pool empty!\n");
 	}
 
-	spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
 	return cmd;
 }
 
 /**
+ * __megasas_return_cmd -	Return a cmd to free command pool
+ * @instance:		Adapter soft state
+ * @cmd:		Command packet to be returned to free command pool
+ */
+inline void
+__megasas_return_cmd(struct megasas_instance *instance, struct megasas_cmd *cmd)
+{
+	/*
+	 * Don't go ahead and free the MFI frame, if corresponding
+	 * MPT frame is not freed(valid for only fusion adapters).
+	 * In case of MFI adapters, anyways for any allocated MFI
+	 * frame will have cmd->mfi_mpt_mpthr set to MFI_MPT_DETACHED
+	 */
+	if (atomic_read(&cmd->mfi_mpt_pthr) != MFI_MPT_DETACHED)
+		return;
+
+	cmd->scmd = NULL;
+	cmd->frame_count = 0;
+	cmd->is_wait_event = 0;
+	cmd->mpt_pthr_cmd_blocked = NULL;
+
+	if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
+	    (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
+	    (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
+	    (reset_devices))
+		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
+
+	atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED);
+	list_add(&cmd->list, (&instance->cmd_pool)->next);
+}
+
+/**
  * megasas_return_cmd -	Return a cmd to free command pool
  * @instance:		Adapter soft state
  * @cmd:		Command packet to be returned to free command pool
@@ -230,19 +267,9 @@
 {
 	unsigned long flags;
 
-	spin_lock_irqsave(&instance->cmd_pool_lock, flags);
-
-	cmd->scmd = NULL;
-	cmd->frame_count = 0;
-	if ((instance->pdev->device != PCI_DEVICE_ID_LSI_FUSION) &&
-	    (instance->pdev->device != PCI_DEVICE_ID_LSI_PLASMA) &&
-	    (instance->pdev->device != PCI_DEVICE_ID_LSI_INVADER) &&
-	    (instance->pdev->device != PCI_DEVICE_ID_LSI_FURY) &&
-	    (reset_devices))
-		cmd->frame->hdr.cmd = MFI_CMD_INVALID;
-	list_add_tail(&cmd->list, &instance->cmd_pool);
-
-	spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+	__megasas_return_cmd(instance, cmd);
+	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
 }
 
 
@@ -921,13 +948,14 @@
  * Max wait time is MEGASAS_INTERNAL_CMD_WAIT_TIME secs
  * Used to issue ioctl commands.
  */
-static int
+int
 megasas_issue_blocked_cmd(struct megasas_instance *instance,
 			  struct megasas_cmd *cmd, int timeout)
 {
 	int ret = 0;
 	cmd->cmd_status = ENODATA;
 
+	cmd->is_wait_event = 1;
 	instance->instancet->issue_dcmd(instance, cmd);
 	if (timeout) {
 		ret = wait_event_timeout(instance->int_cmd_wait_q,
@@ -1536,7 +1564,7 @@
  * @done:			Callback entry point
  */
 static int
-megasas_queue_command_lck(struct scsi_cmnd *scmd, void (*done) (struct scsi_cmnd *))
+megasas_queue_command(struct Scsi_Host *shost, struct scsi_cmnd *scmd)
 {
 	struct megasas_instance *instance;
 	unsigned long flags;
@@ -1558,7 +1586,7 @@
 		} else {
 			spin_unlock_irqrestore(&instance->hba_lock, flags);
 			scmd->result = DID_NO_CONNECT << 16;
-			done(scmd);
+			scmd->scsi_done(scmd);
 			return 0;
 		}
 	}
@@ -1566,7 +1594,7 @@
 	if (instance->adprecovery == MEGASAS_HW_CRITICAL_ERROR) {
 		spin_unlock_irqrestore(&instance->hba_lock, flags);
 		scmd->result = DID_NO_CONNECT << 16;
-		done(scmd);
+		scmd->scsi_done(scmd);
 		return 0;
 	}
 
@@ -1577,11 +1605,11 @@
 
 	spin_unlock_irqrestore(&instance->hba_lock, flags);
 
-	scmd->scsi_done = done;
 	scmd->result = 0;
 
 	if (MEGASAS_IS_LOGICAL(scmd) &&
-	    (scmd->device->id >= MEGASAS_MAX_LD || scmd->device->lun)) {
+	    (scmd->device->id >= instance->fw_supported_vd_count ||
+		scmd->device->lun)) {
 		scmd->result = DID_BAD_TARGET << 16;
 		goto out_done;
 	}
@@ -1606,12 +1634,10 @@
 	return 0;
 
  out_done:
-	done(scmd);
+	scmd->scsi_done(scmd);
 	return 0;
 }
 
-static DEF_SCSI_QCMD(megasas_queue_command)
-
 static struct megasas_instance *megasas_lookup_instance(u16 host_no)
 {
 	int i;
@@ -1628,36 +1654,12 @@
 
 static int megasas_slave_configure(struct scsi_device *sdev)
 {
-	u16             pd_index = 0;
-	struct  megasas_instance *instance ;
-
-	instance = megasas_lookup_instance(sdev->host->host_no);
-
-	/*
-	* Don't export physical disk devices to the disk driver.
-	*
-	* FIXME: Currently we don't export them to the midlayer at all.
-	*        That will be fixed once LSI engineers have audited the
-	*        firmware for possible issues.
-	*/
-	if (sdev->channel < MEGASAS_MAX_PD_CHANNELS &&
-				sdev->type == TYPE_DISK) {
-		pd_index = (sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
-								sdev->id;
-		if (instance->pd_list[pd_index].driveState ==
-						MR_PD_STATE_SYSTEM) {
-			blk_queue_rq_timeout(sdev->request_queue,
-				MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
-			return 0;
-		}
-		return -ENXIO;
-	}
-
 	/*
 	* The RAID firmware may require extended timeouts.
 	*/
 	blk_queue_rq_timeout(sdev->request_queue,
 		MEGASAS_DEFAULT_CMD_TIMEOUT * HZ);
+
 	return 0;
 }
 
@@ -1666,18 +1668,15 @@
 	u16             pd_index = 0;
 	struct megasas_instance *instance ;
 	instance = megasas_lookup_instance(sdev->host->host_no);
-	if ((sdev->channel < MEGASAS_MAX_PD_CHANNELS) &&
-				(sdev->type == TYPE_DISK)) {
+	if (sdev->channel < MEGASAS_MAX_PD_CHANNELS) {
 		/*
 		 * Open the OS scan to the SYSTEM PD
 		 */
 		pd_index =
 			(sdev->channel * MEGASAS_MAX_DEV_PER_CHANNEL) +
 			sdev->id;
-		if ((instance->pd_list[pd_index].driveState ==
-					MR_PD_STATE_SYSTEM) &&
-			(instance->pd_list[pd_index].driveType ==
-						TYPE_DISK)) {
+		if (instance->pd_list[pd_index].driveState ==
+					MR_PD_STATE_SYSTEM) {
 			return 0;
 		}
 		return -ENXIO;
@@ -1825,16 +1824,12 @@
 	process_fw_state_change_wq(&instance->work_init);
 }
 
-/* This function will get the current SR-IOV LD/VF affiliation */
-static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
-	int initial)
+static int megasas_get_ld_vf_affiliation_111(struct megasas_instance *instance,
+					    int initial)
 {
 	struct megasas_cmd *cmd;
 	struct megasas_dcmd_frame *dcmd;
-	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
 	struct MR_LD_VF_AFFILIATION_111 *new_affiliation_111 = NULL;
-	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
-	dma_addr_t new_affiliation_h;
 	dma_addr_t new_affiliation_111_h;
 	int ld, retval = 0;
 	u8 thisVf;
@@ -1842,15 +1837,15 @@
 	cmd = megasas_get_cmd(instance);
 
 	if (!cmd) {
-		printk(KERN_DEBUG "megasas: megasas_get_ld_vf_"
-		       "affiliation: Failed to get cmd for scsi%d.\n",
+		printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation_111:"
+		       "Failed to get cmd for scsi%d.\n",
 			instance->host->host_no);
 		return -ENOMEM;
 	}
 
 	dcmd = &cmd->frame->dcmd;
 
-	if (!instance->vf_affiliation && !instance->vf_affiliation_111) {
+	if (!instance->vf_affiliation_111) {
 		printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
 		       "affiliation for scsi%d.\n", instance->host->host_no);
 		megasas_return_cmd(instance, cmd);
@@ -1858,38 +1853,22 @@
 	}
 
 	if (initial)
-		if (instance->PlasmaFW111)
 			memset(instance->vf_affiliation_111, 0,
 			       sizeof(struct MR_LD_VF_AFFILIATION_111));
-		else
-			memset(instance->vf_affiliation, 0,
-			       (MAX_LOGICAL_DRIVES + 1) *
-			       sizeof(struct MR_LD_VF_AFFILIATION));
 	else {
-		if (instance->PlasmaFW111)
-			new_affiliation_111 =
-				pci_alloc_consistent(instance->pdev,
-						     sizeof(struct MR_LD_VF_AFFILIATION_111),
-						     &new_affiliation_111_h);
-		else
-			new_affiliation =
-				pci_alloc_consistent(instance->pdev,
-						     (MAX_LOGICAL_DRIVES + 1) *
-						     sizeof(struct MR_LD_VF_AFFILIATION),
-						     &new_affiliation_h);
-		if (!new_affiliation && !new_affiliation_111) {
+		new_affiliation_111 =
+			pci_alloc_consistent(instance->pdev,
+					     sizeof(struct MR_LD_VF_AFFILIATION_111),
+					     &new_affiliation_111_h);
+		if (!new_affiliation_111) {
 			printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
 			       "memory for new affiliation for scsi%d.\n",
-				instance->host->host_no);
+			       instance->host->host_no);
 			megasas_return_cmd(instance, cmd);
 			return -ENOMEM;
 		}
-		if (instance->PlasmaFW111)
-			memset(new_affiliation_111, 0,
-			       sizeof(struct MR_LD_VF_AFFILIATION_111));
-		else
-			memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
-			       sizeof(struct MR_LD_VF_AFFILIATION));
+		memset(new_affiliation_111, 0,
+		       sizeof(struct MR_LD_VF_AFFILIATION_111));
 	}
 
 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
@@ -1900,34 +1879,17 @@
 	dcmd->flags = MFI_FRAME_DIR_BOTH;
 	dcmd->timeout = 0;
 	dcmd->pad_0 = 0;
-	if (instance->PlasmaFW111) {
-		dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111);
-		dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
-	} else {
-		dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
-			sizeof(struct MR_LD_VF_AFFILIATION);
-		dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
-	}
+	dcmd->data_xfer_len = sizeof(struct MR_LD_VF_AFFILIATION_111);
+	dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS_111;
 
-	if (initial) {
-		if (instance->PlasmaFW111)
-			dcmd->sgl.sge32[0].phys_addr =
-			  instance->vf_affiliation_111_h;
-		else
-			dcmd->sgl.sge32[0].phys_addr =
-			  instance->vf_affiliation_h;
-	} else {
-		if (instance->PlasmaFW111)
-			dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
-		else
-			dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
-	}
-	if (instance->PlasmaFW111)
-		dcmd->sgl.sge32[0].length =
-		  sizeof(struct MR_LD_VF_AFFILIATION_111);
+	if (initial)
+		dcmd->sgl.sge32[0].phys_addr =
+			instance->vf_affiliation_111_h;
 	else
-		dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) *
-			sizeof(struct MR_LD_VF_AFFILIATION);
+		dcmd->sgl.sge32[0].phys_addr = new_affiliation_111_h;
+
+	dcmd->sgl.sge32[0].length =
+		sizeof(struct MR_LD_VF_AFFILIATION_111);
 
 	printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
 	       "scsi%d\n", instance->host->host_no);
@@ -1943,77 +1905,219 @@
 	}
 
 	if (!initial) {
-		if (instance->PlasmaFW111) {
-			if (!new_affiliation_111->vdCount) {
-				printk(KERN_WARNING "megasas: SR-IOV: Got new "
-				       "LD/VF affiliation for passive path "
+		thisVf = new_affiliation_111->thisVf;
+		for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
+			if (instance->vf_affiliation_111->map[ld].policy[thisVf] !=
+			    new_affiliation_111->map[ld].policy[thisVf]) {
+				printk(KERN_WARNING "megasas: SR-IOV: "
+				       "Got new LD/VF affiliation "
 				       "for scsi%d.\n",
-					instance->host->host_no);
-				retval = 1;
-				goto out;
-			}
-			thisVf = new_affiliation_111->thisVf;
-			for (ld = 0 ; ld < new_affiliation_111->vdCount; ld++)
-				if (instance->vf_affiliation_111->map[ld].policy[thisVf] != new_affiliation_111->map[ld].policy[thisVf]) {
-					printk(KERN_WARNING "megasas: SR-IOV: "
-					       "Got new LD/VF affiliation "
-					       "for scsi%d.\n",
-						instance->host->host_no);
-					memcpy(instance->vf_affiliation_111,
-					       new_affiliation_111,
-					       sizeof(struct MR_LD_VF_AFFILIATION_111));
-					retval = 1;
-					goto out;
-				}
-		} else {
-			if (!new_affiliation->ldCount) {
-				printk(KERN_WARNING "megasas: SR-IOV: Got new "
-				       "LD/VF affiliation for passive "
-				       "path for scsi%d.\n",
 				       instance->host->host_no);
+				memcpy(instance->vf_affiliation_111,
+				       new_affiliation_111,
+				       sizeof(struct MR_LD_VF_AFFILIATION_111));
 				retval = 1;
 				goto out;
 			}
-			newmap = new_affiliation->map;
-			savedmap = instance->vf_affiliation->map;
-			thisVf = new_affiliation->thisVf;
-			for (ld = 0 ; ld < new_affiliation->ldCount; ld++) {
-				if (savedmap->policy[thisVf] !=
-				    newmap->policy[thisVf]) {
-					printk(KERN_WARNING "megasas: SR-IOV: "
-					       "Got new LD/VF affiliation "
-					       "for scsi%d.\n",
-						instance->host->host_no);
-					memcpy(instance->vf_affiliation,
-					       new_affiliation,
-					       new_affiliation->size);
-					retval = 1;
-					goto out;
+	}
+out:
+	if (new_affiliation_111) {
+		pci_free_consistent(instance->pdev,
+				    sizeof(struct MR_LD_VF_AFFILIATION_111),
+				    new_affiliation_111,
+				    new_affiliation_111_h);
+	}
+
+	if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+		megasas_return_mfi_mpt_pthr(instance, cmd,
+			cmd->mpt_pthr_cmd_blocked);
+	else
+		megasas_return_cmd(instance, cmd);
+
+	return retval;
+}
+
+static int megasas_get_ld_vf_affiliation_12(struct megasas_instance *instance,
+					    int initial)
+{
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+	struct MR_LD_VF_AFFILIATION *new_affiliation = NULL;
+	struct MR_LD_VF_MAP *newmap = NULL, *savedmap = NULL;
+	dma_addr_t new_affiliation_h;
+	int i, j, retval = 0, found = 0, doscan = 0;
+	u8 thisVf;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		printk(KERN_DEBUG "megasas: megasas_get_ld_vf_affiliation12: "
+		       "Failed to get cmd for scsi%d.\n",
+		       instance->host->host_no);
+		return -ENOMEM;
+	}
+
+	dcmd = &cmd->frame->dcmd;
+
+	if (!instance->vf_affiliation) {
+		printk(KERN_WARNING "megasas: SR-IOV: Couldn't get LD/VF "
+		       "affiliation for scsi%d.\n", instance->host->host_no);
+		megasas_return_cmd(instance, cmd);
+		return -ENOMEM;
+	}
+
+	if (initial)
+		memset(instance->vf_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+		       sizeof(struct MR_LD_VF_AFFILIATION));
+	else {
+		new_affiliation =
+			pci_alloc_consistent(instance->pdev,
+					     (MAX_LOGICAL_DRIVES + 1) *
+					     sizeof(struct MR_LD_VF_AFFILIATION),
+					     &new_affiliation_h);
+		if (!new_affiliation) {
+			printk(KERN_DEBUG "megasas: SR-IOV: Couldn't allocate "
+			       "memory for new affiliation for scsi%d.\n",
+			       instance->host->host_no);
+			megasas_return_cmd(instance, cmd);
+			return -ENOMEM;
+		}
+		memset(new_affiliation, 0, (MAX_LOGICAL_DRIVES + 1) *
+		       sizeof(struct MR_LD_VF_AFFILIATION));
+	}
+
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = MFI_FRAME_DIR_BOTH;
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = (MAX_LOGICAL_DRIVES + 1) *
+		sizeof(struct MR_LD_VF_AFFILIATION);
+	dcmd->opcode = MR_DCMD_LD_VF_MAP_GET_ALL_LDS;
+
+	if (initial)
+		dcmd->sgl.sge32[0].phys_addr = instance->vf_affiliation_h;
+	else
+		dcmd->sgl.sge32[0].phys_addr = new_affiliation_h;
+
+	dcmd->sgl.sge32[0].length = (MAX_LOGICAL_DRIVES + 1) *
+		sizeof(struct MR_LD_VF_AFFILIATION);
+
+	printk(KERN_WARNING "megasas: SR-IOV: Getting LD/VF affiliation for "
+	       "scsi%d\n", instance->host->host_no);
+
+	megasas_issue_blocked_cmd(instance, cmd, 0);
+
+	if (dcmd->cmd_status) {
+		printk(KERN_WARNING "megasas: SR-IOV: LD/VF affiliation DCMD"
+		       " failed with status 0x%x for scsi%d.\n",
+		       dcmd->cmd_status, instance->host->host_no);
+		retval = 1; /* Do a scan if we couldn't get affiliation */
+		goto out;
+	}
+
+	if (!initial) {
+		if (!new_affiliation->ldCount) {
+			printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF "
+			       "affiliation for passive path for scsi%d.\n",
+			       instance->host->host_no);
+			retval = 1;
+			goto out;
+		}
+		newmap = new_affiliation->map;
+		savedmap = instance->vf_affiliation->map;
+		thisVf = new_affiliation->thisVf;
+		for (i = 0 ; i < new_affiliation->ldCount; i++) {
+			found = 0;
+			for (j = 0; j < instance->vf_affiliation->ldCount;
+			     j++) {
+				if (newmap->ref.targetId ==
+				    savedmap->ref.targetId) {
+					found = 1;
+					if (newmap->policy[thisVf] !=
+					    savedmap->policy[thisVf]) {
+						doscan = 1;
+						goto out;
+					}
 				}
 				savedmap = (struct MR_LD_VF_MAP *)
 					((unsigned char *)savedmap +
 					 savedmap->size);
+			}
+			if (!found && newmap->policy[thisVf] !=
+			    MR_LD_ACCESS_HIDDEN) {
+				doscan = 1;
+				goto out;
+			}
+			newmap = (struct MR_LD_VF_MAP *)
+				((unsigned char *)newmap + newmap->size);
+		}
+
+		newmap = new_affiliation->map;
+		savedmap = instance->vf_affiliation->map;
+
+		for (i = 0 ; i < instance->vf_affiliation->ldCount; i++) {
+			found = 0;
+			for (j = 0 ; j < new_affiliation->ldCount; j++) {
+				if (savedmap->ref.targetId ==
+				    newmap->ref.targetId) {
+					found = 1;
+					if (savedmap->policy[thisVf] !=
+					    newmap->policy[thisVf]) {
+						doscan = 1;
+						goto out;
+					}
+				}
 				newmap = (struct MR_LD_VF_MAP *)
 					((unsigned char *)newmap +
 					 newmap->size);
 			}
+			if (!found && savedmap->policy[thisVf] !=
+			    MR_LD_ACCESS_HIDDEN) {
+				doscan = 1;
+				goto out;
+			}
+			savedmap = (struct MR_LD_VF_MAP *)
+				((unsigned char *)savedmap +
+				 savedmap->size);
 		}
 	}
 out:
-	if (new_affiliation) {
-		if (instance->PlasmaFW111)
-			pci_free_consistent(instance->pdev,
-					    sizeof(struct MR_LD_VF_AFFILIATION_111),
-					    new_affiliation_111,
-					    new_affiliation_111_h);
-		else
-			pci_free_consistent(instance->pdev,
-					    (MAX_LOGICAL_DRIVES + 1) *
-					    sizeof(struct MR_LD_VF_AFFILIATION),
-					    new_affiliation, new_affiliation_h);
+	if (doscan) {
+		printk(KERN_WARNING "megasas: SR-IOV: Got new LD/VF "
+		       "affiliation for scsi%d.\n", instance->host->host_no);
+		memcpy(instance->vf_affiliation, new_affiliation,
+		       new_affiliation->size);
+		retval = 1;
 	}
-	megasas_return_cmd(instance, cmd);
 
+	if (new_affiliation)
+		pci_free_consistent(instance->pdev,
+				    (MAX_LOGICAL_DRIVES + 1) *
+				    sizeof(struct MR_LD_VF_AFFILIATION),
+				    new_affiliation, new_affiliation_h);
+	if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+		megasas_return_mfi_mpt_pthr(instance, cmd,
+			cmd->mpt_pthr_cmd_blocked);
+	else
+		megasas_return_cmd(instance, cmd);
+
+	return retval;
+}
+
+/* This function will get the current SR-IOV LD/VF affiliation */
+static int megasas_get_ld_vf_affiliation(struct megasas_instance *instance,
+	int initial)
+{
+	int retval;
+
+	if (instance->PlasmaFW111)
+		retval = megasas_get_ld_vf_affiliation_111(instance, initial);
+	else
+		retval = megasas_get_ld_vf_affiliation_12(instance, initial);
 	return retval;
 }
 
@@ -2459,7 +2563,12 @@
 		cmd->abort_aen = 0;
 
 	instance->aen_cmd = NULL;
-	megasas_return_cmd(instance, cmd);
+
+	if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+		megasas_return_mfi_mpt_pthr(instance, cmd,
+			cmd->mpt_pthr_cmd_blocked);
+	else
+		megasas_return_cmd(instance, cmd);
 
 	if ((instance->unload == 0) &&
 		((instance->issuepend_done == 1))) {
@@ -2491,6 +2600,152 @@
 	return queue_depth;
 }
 
+static ssize_t
+megasas_fw_crash_buffer_store(struct device *cdev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct megasas_instance *instance =
+		(struct megasas_instance *) shost->hostdata;
+	int val = 0;
+	unsigned long flags;
+
+	if (kstrtoint(buf, 0, &val) != 0)
+		return -EINVAL;
+
+	spin_lock_irqsave(&instance->crashdump_lock, flags);
+	instance->fw_crash_buffer_offset = val;
+	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+	return strlen(buf);
+}
+
+static ssize_t
+megasas_fw_crash_buffer_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct megasas_instance *instance =
+		(struct megasas_instance *) shost->hostdata;
+	u32 size;
+	unsigned long buff_addr;
+	unsigned long dmachunk = CRASH_DMA_BUF_SIZE;
+	unsigned long src_addr;
+	unsigned long flags;
+	u32 buff_offset;
+
+	spin_lock_irqsave(&instance->crashdump_lock, flags);
+	buff_offset = instance->fw_crash_buffer_offset;
+	if (!instance->crash_dump_buf &&
+		!((instance->fw_crash_state == AVAILABLE) ||
+		(instance->fw_crash_state == COPYING))) {
+		dev_err(&instance->pdev->dev,
+			"Firmware crash dump is not available\n");
+		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+		return -EINVAL;
+	}
+
+	buff_addr = (unsigned long) buf;
+
+	if (buff_offset >
+		(instance->fw_crash_buffer_size * dmachunk)) {
+		dev_err(&instance->pdev->dev,
+			"Firmware crash dump offset is out of range\n");
+		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+		return 0;
+	}
+
+	size = (instance->fw_crash_buffer_size * dmachunk) - buff_offset;
+	size = (size >= PAGE_SIZE) ? (PAGE_SIZE - 1) : size;
+
+	src_addr = (unsigned long)instance->crash_buf[buff_offset / dmachunk] +
+		(buff_offset % dmachunk);
+	memcpy(buf, (void *)src_addr,  size);
+	spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+
+	return size;
+}
+
+static ssize_t
+megasas_fw_crash_buffer_size_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct megasas_instance *instance =
+		(struct megasas_instance *) shost->hostdata;
+
+	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)
+		((instance->fw_crash_buffer_size) * 1024 * 1024)/PAGE_SIZE);
+}
+
+static ssize_t
+megasas_fw_crash_state_store(struct device *cdev,
+	struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct megasas_instance *instance =
+		(struct megasas_instance *) shost->hostdata;
+	int val = 0;
+	unsigned long flags;
+
+	if (kstrtoint(buf, 0, &val) != 0)
+		return -EINVAL;
+
+	if ((val <= AVAILABLE || val > COPY_ERROR)) {
+		dev_err(&instance->pdev->dev, "application updates invalid "
+			"firmware crash state\n");
+		return -EINVAL;
+	}
+
+	instance->fw_crash_state = val;
+
+	if ((val == COPIED) || (val == COPY_ERROR)) {
+		spin_lock_irqsave(&instance->crashdump_lock, flags);
+		megasas_free_host_crash_buffer(instance);
+		spin_unlock_irqrestore(&instance->crashdump_lock, flags);
+		if (val == COPY_ERROR)
+			dev_info(&instance->pdev->dev, "application failed to "
+				"copy Firmware crash dump\n");
+		else
+			dev_info(&instance->pdev->dev, "Firmware crash dump "
+				"copied successfully\n");
+	}
+	return strlen(buf);
+}
+
+static ssize_t
+megasas_fw_crash_state_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	struct Scsi_Host *shost = class_to_shost(cdev);
+	struct megasas_instance *instance =
+		(struct megasas_instance *) shost->hostdata;
+	return snprintf(buf, PAGE_SIZE, "%d\n", instance->fw_crash_state);
+}
+
+static ssize_t
+megasas_page_size_show(struct device *cdev,
+	struct device_attribute *attr, char *buf)
+{
+	return snprintf(buf, PAGE_SIZE, "%ld\n", (unsigned long)PAGE_SIZE - 1);
+}
+
+static DEVICE_ATTR(fw_crash_buffer, S_IRUGO | S_IWUSR,
+	megasas_fw_crash_buffer_show, megasas_fw_crash_buffer_store);
+static DEVICE_ATTR(fw_crash_buffer_size, S_IRUGO,
+	megasas_fw_crash_buffer_size_show, NULL);
+static DEVICE_ATTR(fw_crash_state, S_IRUGO | S_IWUSR,
+	megasas_fw_crash_state_show, megasas_fw_crash_state_store);
+static DEVICE_ATTR(page_size, S_IRUGO,
+	megasas_page_size_show, NULL);
+
+struct device_attribute *megaraid_host_attrs[] = {
+	&dev_attr_fw_crash_buffer_size,
+	&dev_attr_fw_crash_buffer,
+	&dev_attr_fw_crash_state,
+	&dev_attr_page_size,
+	NULL,
+};
+
 /*
  * Scsi host template for megaraid_sas driver
  */
@@ -2506,6 +2761,7 @@
 	.eh_bus_reset_handler = megasas_reset_bus_host,
 	.eh_host_reset_handler = megasas_reset_bus_host,
 	.eh_timed_out = megasas_reset_timer,
+	.shost_attrs = megaraid_host_attrs,
 	.bios_param = megasas_bios_param,
 	.use_clustering = ENABLE_CLUSTERING,
 	.change_queue_depth = megasas_change_queue_depth,
@@ -2688,7 +2944,8 @@
 					       "failed, status = 0x%x.\n",
 					       cmd->frame->hdr.cmd_status);
 				else {
-					megasas_return_cmd(instance, cmd);
+					megasas_return_mfi_mpt_pthr(instance,
+						cmd, cmd->mpt_pthr_cmd_blocked);
 					spin_unlock_irqrestore(
 						instance->host->host_lock,
 						flags);
@@ -2696,7 +2953,8 @@
 				}
 			} else
 				instance->map_id++;
-			megasas_return_cmd(instance, cmd);
+			megasas_return_mfi_mpt_pthr(instance, cmd,
+				cmd->mpt_pthr_cmd_blocked);
 
 			/*
 			 * Set fast path IO to ZERO.
@@ -2852,7 +3110,7 @@
 	unsigned long flags;
 
 	defer_index     = 0;
-	spin_lock_irqsave(&instance->cmd_pool_lock, flags);
+	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
 	for (i = 0; i < max_cmd; i++) {
 		cmd = instance->cmd_list[i];
 		if (cmd->sync_cmd == 1 || cmd->scmd) {
@@ -2873,7 +3131,7 @@
 				&instance->internal_reset_pending_q);
 		}
 	}
-	spin_unlock_irqrestore(&instance->cmd_pool_lock, flags);
+	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
 }
 
 
@@ -3438,7 +3696,9 @@
 	int j;
 	u32 max_cmd;
 	struct megasas_cmd *cmd;
+	struct fusion_context *fusion;
 
+	fusion = instance->ctrl_context;
 	max_cmd = instance->max_mfi_cmds;
 
 	/*
@@ -3471,13 +3731,11 @@
 		}
 	}
 
-	/*
-	 * Add all the commands to command pool (instance->cmd_pool)
-	 */
 	for (i = 0; i < max_cmd; i++) {
 		cmd = instance->cmd_list[i];
 		memset(cmd, 0, sizeof(struct megasas_cmd));
 		cmd->index = i;
+		atomic_set(&cmd->mfi_mpt_pthr, MFI_LIST_ADDED);
 		cmd->scmd = NULL;
 		cmd->instance = instance;
 
@@ -3548,11 +3806,11 @@
 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
 	dcmd->sgl.sge32[0].length = cpu_to_le32(MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST));
 
-	if (!megasas_issue_polled(instance, cmd)) {
-		ret = 0;
-	} else {
-		ret = -1;
-	}
+	if (instance->ctrl_context && !instance->mask_interrupts)
+		ret = megasas_issue_blocked_cmd(instance, cmd,
+			MEGASAS_BLOCKED_CMD_TIMEOUT);
+	else
+		ret = megasas_issue_polled(instance, cmd);
 
 	/*
 	* the following function will get the instance PD LIST.
@@ -3584,7 +3842,12 @@
 	pci_free_consistent(instance->pdev,
 				MEGASAS_MAX_PD * sizeof(struct MR_PD_LIST),
 				ci, ci_h);
-	megasas_return_cmd(instance, cmd);
+
+	if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+		megasas_return_mfi_mpt_pthr(instance, cmd,
+			cmd->mpt_pthr_cmd_blocked);
+	else
+		megasas_return_cmd(instance, cmd);
 
 	return ret;
 }
@@ -3630,6 +3893,8 @@
 	memset(ci, 0, sizeof(*ci));
 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
 
+	if (instance->supportmax256vd)
+		dcmd->mbox.b[0] = 1;
 	dcmd->cmd = MFI_CMD_DCMD;
 	dcmd->cmd_status = 0xFF;
 	dcmd->sge_count = 1;
@@ -3641,18 +3906,19 @@
 	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_LIST));
 	dcmd->pad_0  = 0;
 
-	if (!megasas_issue_polled(instance, cmd)) {
-		ret = 0;
-	} else {
-		ret = -1;
-	}
+	if (instance->ctrl_context && !instance->mask_interrupts)
+		ret = megasas_issue_blocked_cmd(instance, cmd,
+			MEGASAS_BLOCKED_CMD_TIMEOUT);
+	else
+		ret = megasas_issue_polled(instance, cmd);
+
 
 	ld_count = le32_to_cpu(ci->ldCount);
 
 	/* the following function will get the instance PD LIST */
 
-	if ((ret == 0) && (ld_count <= MAX_LOGICAL_DRIVES)) {
-		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
+	if ((ret == 0) && (ld_count <= instance->fw_supported_vd_count)) {
+		memset(instance->ld_ids, 0xff, MAX_LOGICAL_DRIVES_EXT);
 
 		for (ld_index = 0; ld_index < ld_count; ld_index++) {
 			if (ci->ldList[ld_index].state != 0) {
@@ -3668,7 +3934,11 @@
 				ci,
 				ci_h);
 
-	megasas_return_cmd(instance, cmd);
+	if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+		megasas_return_mfi_mpt_pthr(instance, cmd,
+			cmd->mpt_pthr_cmd_blocked);
+	else
+		megasas_return_cmd(instance, cmd);
 	return ret;
 }
 
@@ -3715,6 +3985,8 @@
 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
 
 	dcmd->mbox.b[0] = query_type;
+	if (instance->supportmax256vd)
+		dcmd->mbox.b[2] = 1;
 
 	dcmd->cmd = MFI_CMD_DCMD;
 	dcmd->cmd_status = 0xFF;
@@ -3727,16 +3999,15 @@
 	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct MR_LD_TARGETID_LIST));
 	dcmd->pad_0  = 0;
 
-	if (!megasas_issue_polled(instance, cmd) && !dcmd->cmd_status) {
-		ret = 0;
-	} else {
-		/* On failure, call older LD list DCMD */
-		ret = 1;
-	}
+	if (instance->ctrl_context && !instance->mask_interrupts)
+		ret = megasas_issue_blocked_cmd(instance, cmd,
+			MEGASAS_BLOCKED_CMD_TIMEOUT);
+	else
+		ret = megasas_issue_polled(instance, cmd);
 
 	tgtid_count = le32_to_cpu(ci->count);
 
-	if ((ret == 0) && (tgtid_count <= (MAX_LOGICAL_DRIVES))) {
+	if ((ret == 0) && (tgtid_count <= (instance->fw_supported_vd_count))) {
 		memset(instance->ld_ids, 0xff, MEGASAS_MAX_LD_IDS);
 		for (ld_index = 0; ld_index < tgtid_count; ld_index++) {
 			ids = ci->targetId[ld_index];
@@ -3748,7 +4019,11 @@
 	pci_free_consistent(instance->pdev, sizeof(struct MR_LD_TARGETID_LIST),
 			    ci, ci_h);
 
-	megasas_return_cmd(instance, cmd);
+	if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+		megasas_return_mfi_mpt_pthr(instance, cmd,
+			cmd->mpt_pthr_cmd_blocked);
+	else
+		megasas_return_cmd(instance, cmd);
 
 	return ret;
 }
@@ -3762,7 +4037,7 @@
  * This information is mainly used to find out the maximum IO transfer per
  * command supported by the FW.
  */
-static int
+int
 megasas_get_ctrl_info(struct megasas_instance *instance,
 		      struct megasas_ctrl_info *ctrl_info)
 {
@@ -3803,18 +4078,84 @@
 	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_GET_INFO);
 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
 	dcmd->sgl.sge32[0].length = cpu_to_le32(sizeof(struct megasas_ctrl_info));
+	dcmd->mbox.b[0] = 1;
 
-	if (!megasas_issue_polled(instance, cmd)) {
-		ret = 0;
+	if (instance->ctrl_context && !instance->mask_interrupts)
+		ret = megasas_issue_blocked_cmd(instance, cmd,
+			MEGASAS_BLOCKED_CMD_TIMEOUT);
+	else
+		ret = megasas_issue_polled(instance, cmd);
+
+	if (!ret)
 		memcpy(ctrl_info, ci, sizeof(struct megasas_ctrl_info));
-	} else {
-		ret = -1;
-	}
 
 	pci_free_consistent(instance->pdev, sizeof(struct megasas_ctrl_info),
 			    ci, ci_h);
 
-	megasas_return_cmd(instance, cmd);
+	if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+		megasas_return_mfi_mpt_pthr(instance, cmd,
+			cmd->mpt_pthr_cmd_blocked);
+	else
+		megasas_return_cmd(instance, cmd);
+	return ret;
+}
+
+/*
+ * megasas_set_crash_dump_params -	Sends address of crash dump DMA buffer
+ *					to firmware
+ *
+ * @instance:				Adapter soft state
+ * @crash_buf_state		-	tell FW to turn ON/OFF crash dump feature
+					MR_CRASH_BUF_TURN_OFF = 0
+					MR_CRASH_BUF_TURN_ON = 1
+ * @return 0 on success non-zero on failure.
+ * Issues an internal command (DCMD) to set parameters for crash dump feature.
+ * Driver will send address of crash dump DMA buffer and set mbox to tell FW
+ * that driver supports crash dump feature. This DCMD will be sent only if
+ * crash dump feature is supported by the FW.
+ *
+ */
+int megasas_set_crash_dump_params(struct megasas_instance *instance,
+	u8 crash_buf_state)
+{
+	int ret = 0;
+	struct megasas_cmd *cmd;
+	struct megasas_dcmd_frame *dcmd;
+
+	cmd = megasas_get_cmd(instance);
+
+	if (!cmd) {
+		dev_err(&instance->pdev->dev, "Failed to get a free cmd\n");
+		return -ENOMEM;
+	}
+
+
+	dcmd = &cmd->frame->dcmd;
+
+	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
+	dcmd->mbox.b[0] = crash_buf_state;
+	dcmd->cmd = MFI_CMD_DCMD;
+	dcmd->cmd_status = 0xFF;
+	dcmd->sge_count = 1;
+	dcmd->flags = cpu_to_le16(MFI_FRAME_DIR_NONE);
+	dcmd->timeout = 0;
+	dcmd->pad_0 = 0;
+	dcmd->data_xfer_len = cpu_to_le32(CRASH_DMA_BUF_SIZE);
+	dcmd->opcode = cpu_to_le32(MR_DCMD_CTRL_SET_CRASH_DUMP_PARAMS);
+	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(instance->crash_dump_h);
+	dcmd->sgl.sge32[0].length = cpu_to_le32(CRASH_DMA_BUF_SIZE);
+
+	if (instance->ctrl_context && !instance->mask_interrupts)
+		ret = megasas_issue_blocked_cmd(instance, cmd,
+			MEGASAS_BLOCKED_CMD_TIMEOUT);
+	else
+		ret = megasas_issue_polled(instance, cmd);
+
+	if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+		megasas_return_mfi_mpt_pthr(instance, cmd,
+			cmd->mpt_pthr_cmd_blocked);
+	else
+		megasas_return_cmd(instance, cmd);
 	return ret;
 }
 
@@ -3948,6 +4289,13 @@
 	if (megasas_issue_init_mfi(instance))
 		goto fail_fw_init;
 
+	if (megasas_get_ctrl_info(instance, instance->ctrl_info)) {
+		dev_err(&instance->pdev->dev, "(%d): Could get controller info "
+			"Fail from %s %d\n", instance->unique_id,
+			__func__, __LINE__);
+		goto fail_fw_init;
+	}
+
 	instance->fw_support_ieee = 0;
 	instance->fw_support_ieee =
 		(instance->instancet->read_fw_status_reg(reg_set) &
@@ -3986,7 +4334,7 @@
 	u32 tmp_sectors, msix_enable, scratch_pad_2;
 	resource_size_t base_addr;
 	struct megasas_register_set __iomem *reg_set;
-	struct megasas_ctrl_info *ctrl_info;
+	struct megasas_ctrl_info *ctrl_info = NULL;
 	unsigned long bar_list;
 	int i, loop, fw_msix_count = 0;
 	struct IOV_111 *iovPtr;
@@ -4103,17 +4451,11 @@
 					     (unsigned int)num_online_cpus());
 		for (i = 0; i < instance->msix_vectors; i++)
 			instance->msixentry[i].entry = i;
-		i = pci_enable_msix(instance->pdev, instance->msixentry,
-				    instance->msix_vectors);
-		if (i >= 0) {
-			if (i) {
-				if (!pci_enable_msix(instance->pdev,
-						     instance->msixentry, i))
-					instance->msix_vectors = i;
-				else
-					instance->msix_vectors = 0;
-			}
-		} else
+		i = pci_enable_msix_range(instance->pdev, instance->msixentry,
+					  1, instance->msix_vectors);
+		if (i)
+			instance->msix_vectors = i;
+		else
 			instance->msix_vectors = 0;
 
 		dev_info(&instance->pdev->dev, "[scsi%d]: FW supports"
@@ -4123,6 +4465,17 @@
 			instance->msix_vectors);
 	}
 
+	instance->ctrl_info = kzalloc(sizeof(struct megasas_ctrl_info),
+				GFP_KERNEL);
+	if (instance->ctrl_info == NULL)
+		goto fail_init_adapter;
+
+	/*
+	 * Below are default value for legacy Firmware.
+	 * non-fusion based controllers
+	 */
+	instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
+	instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
 	/* Get operational params, sge flags, send init cmd to controller */
 	if (instance->instancet->init_adapter(instance))
 		goto fail_init_adapter;
@@ -4145,8 +4498,6 @@
 				  MR_LD_QUERY_TYPE_EXPOSED_TO_HOST))
 		megasas_get_ld_list(instance);
 
-	ctrl_info = kmalloc(sizeof(struct megasas_ctrl_info), GFP_KERNEL);
-
 	/*
 	 * Compute the max allowed sectors per IO: The controller info has two
 	 * limits on max sectors. Driver should use the minimum of these two.
@@ -4157,58 +4508,79 @@
 	 * to calculate max_sectors_1. So the number ended up as zero always.
 	 */
 	tmp_sectors = 0;
-	if (ctrl_info && !megasas_get_ctrl_info(instance, ctrl_info)) {
+	ctrl_info = instance->ctrl_info;
 
-		max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
-			le16_to_cpu(ctrl_info->max_strips_per_io);
-		max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
+	max_sectors_1 = (1 << ctrl_info->stripe_sz_ops.min) *
+		le16_to_cpu(ctrl_info->max_strips_per_io);
+	max_sectors_2 = le32_to_cpu(ctrl_info->max_request_size);
 
-		tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
+	tmp_sectors = min_t(u32, max_sectors_1 , max_sectors_2);
 
-		/*Check whether controller is iMR or MR */
-		if (ctrl_info->memory_size) {
-			instance->is_imr = 0;
-			dev_info(&instance->pdev->dev, "Controller type: MR,"
-				"Memory size is: %dMB\n",
-				le16_to_cpu(ctrl_info->memory_size));
-		} else {
-			instance->is_imr = 1;
-			dev_info(&instance->pdev->dev,
-				"Controller type: iMR\n");
+	/*Check whether controller is iMR or MR */
+	if (ctrl_info->memory_size) {
+		instance->is_imr = 0;
+		dev_info(&instance->pdev->dev, "Controller type: MR,"
+			"Memory size is: %dMB\n",
+			le16_to_cpu(ctrl_info->memory_size));
+	} else {
+		instance->is_imr = 1;
+		dev_info(&instance->pdev->dev,
+			"Controller type: iMR\n");
+	}
+	/* OnOffProperties are converted into CPU arch*/
+	le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
+	instance->disableOnlineCtrlReset =
+	ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
+	/* adapterOperations2 are converted into CPU arch*/
+	le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
+	instance->mpio = ctrl_info->adapterOperations2.mpio;
+	instance->UnevenSpanSupport =
+		ctrl_info->adapterOperations2.supportUnevenSpans;
+	if (instance->UnevenSpanSupport) {
+		struct fusion_context *fusion = instance->ctrl_context;
+
+		dev_info(&instance->pdev->dev, "FW supports: "
+		"UnevenSpanSupport=%x\n", instance->UnevenSpanSupport);
+		if (MR_ValidateMapInfo(instance))
+			fusion->fast_path_io = 1;
+		else
+			fusion->fast_path_io = 0;
+
+	}
+	if (ctrl_info->host_interface.SRIOV) {
+		if (!ctrl_info->adapterOperations2.activePassive)
+			instance->PlasmaFW111 = 1;
+
+		if (!instance->PlasmaFW111)
+			instance->requestorId =
+				ctrl_info->iov.requestorId;
+		else {
+			iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET);
+			instance->requestorId = iovPtr->requestorId;
 		}
-		/* OnOffProperties are converted into CPU arch*/
-		le32_to_cpus((u32 *)&ctrl_info->properties.OnOffProperties);
-		instance->disableOnlineCtrlReset =
-		ctrl_info->properties.OnOffProperties.disableOnlineCtrlReset;
-		/* adapterOperations2 are converted into CPU arch*/
-		le32_to_cpus((u32 *)&ctrl_info->adapterOperations2);
-		instance->mpio = ctrl_info->adapterOperations2.mpio;
-		instance->UnevenSpanSupport =
-			ctrl_info->adapterOperations2.supportUnevenSpans;
-		if (instance->UnevenSpanSupport) {
-			struct fusion_context *fusion = instance->ctrl_context;
-			dev_info(&instance->pdev->dev, "FW supports: "
-			"UnevenSpanSupport=%x\n", instance->UnevenSpanSupport);
-			if (MR_ValidateMapInfo(instance))
-				fusion->fast_path_io = 1;
-			else
-				fusion->fast_path_io = 0;
+		dev_warn(&instance->pdev->dev, "I am VF "
+		       "requestorId %d\n", instance->requestorId);
+	}
 
-		}
-		if (ctrl_info->host_interface.SRIOV) {
-			if (!ctrl_info->adapterOperations2.activePassive)
-				instance->PlasmaFW111 = 1;
+	le32_to_cpus((u32 *)&ctrl_info->adapterOperations3);
+	instance->crash_dump_fw_support =
+		ctrl_info->adapterOperations3.supportCrashDump;
+	instance->crash_dump_drv_support =
+		(instance->crash_dump_fw_support &&
+		instance->crash_dump_buf);
+	if (instance->crash_dump_drv_support) {
+		dev_info(&instance->pdev->dev, "Firmware Crash dump "
+			"feature is supported\n");
+		megasas_set_crash_dump_params(instance,
+			MR_CRASH_BUF_TURN_OFF);
 
-			if (!instance->PlasmaFW111)
-				instance->requestorId =
-					ctrl_info->iov.requestorId;
-			else {
-				iovPtr = (struct IOV_111 *)((unsigned char *)ctrl_info + IOV_111_OFFSET);
-				instance->requestorId = iovPtr->requestorId;
-			}
-			printk(KERN_WARNING "megaraid_sas: I am VF "
-			       "requestorId %d\n", instance->requestorId);
-		}
+	} else {
+		if (instance->crash_dump_buf)
+			pci_free_consistent(instance->pdev,
+				CRASH_DMA_BUF_SIZE,
+				instance->crash_dump_buf,
+				instance->crash_dump_h);
+		instance->crash_dump_buf = NULL;
 	}
 	instance->max_sectors_per_req = instance->max_num_sge *
 						PAGE_SIZE / 512;
@@ -4256,6 +4628,8 @@
 
 fail_init_adapter:
 fail_ready_state:
+	kfree(instance->ctrl_info);
+	instance->ctrl_info = NULL;
 	iounmap(instance->reg_set);
 
       fail_ioremap:
@@ -4351,7 +4725,11 @@
 	pci_free_consistent(instance->pdev, sizeof(struct megasas_evt_log_info),
 			    el_info, el_info_h);
 
-	megasas_return_cmd(instance, cmd);
+	if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+		megasas_return_mfi_mpt_pthr(instance, cmd,
+			cmd->mpt_pthr_cmd_blocked);
+	else
+		megasas_return_cmd(instance, cmd);
 
 	return 0;
 }
@@ -4634,6 +5012,7 @@
 	struct Scsi_Host *host;
 	struct megasas_instance *instance;
 	u16 control = 0;
+	struct fusion_context *fusion = NULL;
 
 	/* Reset MSI-X in the kdump kernel */
 	if (reset_devices) {
@@ -4694,10 +5073,10 @@
 	case PCI_DEVICE_ID_LSI_INVADER:
 	case PCI_DEVICE_ID_LSI_FURY:
 	{
-		struct fusion_context *fusion;
-
-		instance->ctrl_context =
-			kzalloc(sizeof(struct fusion_context), GFP_KERNEL);
+		instance->ctrl_context_pages =
+			get_order(sizeof(struct fusion_context));
+		instance->ctrl_context = (void *)__get_free_pages(GFP_KERNEL,
+				instance->ctrl_context_pages);
 		if (!instance->ctrl_context) {
 			printk(KERN_DEBUG "megasas: Failed to allocate "
 			       "memory for Fusion context info\n");
@@ -4705,7 +5084,9 @@
 		}
 		fusion = instance->ctrl_context;
 		INIT_LIST_HEAD(&fusion->cmd_pool);
-		spin_lock_init(&fusion->cmd_pool_lock);
+		spin_lock_init(&fusion->mpt_pool_lock);
+		memset(fusion->load_balance_info, 0,
+			sizeof(struct LD_LOAD_BALANCE_INFO) * MAX_LOGICAL_DRIVES_EXT);
 	}
 	break;
 	default: /* For all other supported controllers */
@@ -4728,13 +5109,29 @@
 		break;
 	}
 
+	/* Crash dump feature related initialisation*/
+	instance->drv_buf_index = 0;
+	instance->drv_buf_alloc = 0;
+	instance->crash_dump_fw_support = 0;
+	instance->crash_dump_app_support = 0;
+	instance->fw_crash_state = UNAVAILABLE;
+	spin_lock_init(&instance->crashdump_lock);
+	instance->crash_dump_buf = NULL;
+
+	if (!reset_devices)
+		instance->crash_dump_buf = pci_alloc_consistent(pdev,
+						CRASH_DMA_BUF_SIZE,
+						&instance->crash_dump_h);
+	if (!instance->crash_dump_buf)
+		dev_err(&instance->pdev->dev, "Can't allocate Firmware "
+			"crash dump DMA buffer\n");
+
 	megasas_poll_wait_aen = 0;
 	instance->flag_ieee = 0;
 	instance->ev = NULL;
 	instance->issuepend_done = 1;
 	instance->adprecovery = MEGASAS_HBA_OPERATIONAL;
 	instance->is_imr = 0;
-	megasas_poll_wait_aen = 0;
 
 	instance->evt_detail = pci_alloc_consistent(pdev,
 						    sizeof(struct
@@ -4758,7 +5155,7 @@
 	init_waitqueue_head(&instance->int_cmd_wait_q);
 	init_waitqueue_head(&instance->abort_cmd_wait_q);
 
-	spin_lock_init(&instance->cmd_pool_lock);
+	spin_lock_init(&instance->mfi_pool_lock);
 	spin_lock_init(&instance->hba_lock);
 	spin_lock_init(&instance->completion_lock);
 
@@ -4771,13 +5168,14 @@
 	instance->host = host;
 	instance->unique_id = pdev->bus->number << 8 | pdev->devfn;
 	instance->init_id = MEGASAS_DEFAULT_INIT_ID;
+	instance->ctrl_info = NULL;
 
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0073SKINNY) ||
 		(instance->pdev->device == PCI_DEVICE_ID_LSI_SAS0071SKINNY)) {
 		instance->flag_ieee = 1;
 		sema_init(&instance->ioctl_sem, MEGASAS_SKINNY_INT_CMDS);
 	} else
-		sema_init(&instance->ioctl_sem, MEGASAS_INT_CMDS);
+		sema_init(&instance->ioctl_sem, (MEGASAS_INT_CMDS - 5));
 
 	megasas_dbg_lvl = 0;
 	instance->flag = 0;
@@ -4789,9 +5187,10 @@
 	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_FUSION) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_PLASMA) ||
 	    (instance->pdev->device == PCI_DEVICE_ID_LSI_INVADER) ||
-	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY))
+	    (instance->pdev->device == PCI_DEVICE_ID_LSI_FURY)) {
 		INIT_WORK(&instance->work_init, megasas_fusion_ocr_wq);
-	else
+		INIT_WORK(&instance->crash_init, megasas_fusion_crash_dump_wq);
+	} else
 		INIT_WORK(&instance->work_init, process_fw_state_change_wq);
 
 	/*
@@ -4836,8 +5235,9 @@
 				printk(KERN_DEBUG "megasas: Failed to "
 				       "register IRQ for vector %d.\n", i);
 				for (j = 0; j < i; j++) {
-					irq_set_affinity_hint(
-						instance->msixentry[j].vector, NULL);
+					if (smp_affinity_enable)
+						irq_set_affinity_hint(
+							instance->msixentry[j].vector, NULL);
 					free_irq(
 						instance->msixentry[j].vector,
 						&instance->irq_context[j]);
@@ -4846,11 +5246,14 @@
 				instance->msix_vectors = 0;
 				goto retry_irq_register;
 			}
-			if (irq_set_affinity_hint(instance->msixentry[i].vector,
-				get_cpu_mask(cpu)))
-				dev_err(&instance->pdev->dev, "Error setting"
-					"affinity hint for cpu %d\n", cpu);
-			cpu = cpumask_next(cpu, cpu_online_mask);
+			if (smp_affinity_enable) {
+				if (irq_set_affinity_hint(instance->msixentry[i].vector,
+					get_cpu_mask(cpu)))
+					dev_err(&instance->pdev->dev,
+						"Error setting affinity hint "
+						"for cpu %d\n", cpu);
+				cpu = cpumask_next(cpu, cpu_online_mask);
+			}
 		}
 	} else {
 		instance->irq_context[0].instance = instance;
@@ -4894,6 +5297,10 @@
 		goto fail_start_aen;
 	}
 
+	/* Get current SR-IOV LD/VF affiliation */
+	if (instance->requestorId)
+		megasas_get_ld_vf_affiliation(instance, 1);
+
 	return 0;
 
       fail_start_aen:
@@ -4905,8 +5312,9 @@
 	instance->instancet->disable_intr(instance);
 	if (instance->msix_vectors)
 		for (i = 0; i < instance->msix_vectors; i++) {
-			irq_set_affinity_hint(
-				instance->msixentry[i].vector, NULL);
+			if (smp_affinity_enable)
+				irq_set_affinity_hint(
+					instance->msixentry[i].vector, NULL);
 			free_irq(instance->msixentry[i].vector,
 				 &instance->irq_context[i]);
 		}
@@ -4979,7 +5387,11 @@
 		dev_err(&instance->pdev->dev, "Command timedout"
 			" from %s\n", __func__);
 
-	megasas_return_cmd(instance, cmd);
+	if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+		megasas_return_mfi_mpt_pthr(instance, cmd,
+			cmd->mpt_pthr_cmd_blocked);
+	else
+		megasas_return_cmd(instance, cmd);
 
 	return;
 }
@@ -5026,7 +5438,11 @@
 		dev_err(&instance->pdev->dev, "Command timedout"
 			"from %s\n", __func__);
 
-	megasas_return_cmd(instance, cmd);
+	if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+		megasas_return_mfi_mpt_pthr(instance, cmd,
+			cmd->mpt_pthr_cmd_blocked);
+	else
+		megasas_return_cmd(instance, cmd);
 
 	return;
 }
@@ -5069,8 +5485,9 @@
 
 	if (instance->msix_vectors)
 		for (i = 0; i < instance->msix_vectors; i++) {
-			irq_set_affinity_hint(
-				instance->msixentry[i].vector, NULL);
+			if (smp_affinity_enable)
+				irq_set_affinity_hint(
+					instance->msixentry[i].vector, NULL);
 			free_irq(instance->msixentry[i].vector,
 				 &instance->irq_context[i]);
 		}
@@ -5132,9 +5549,10 @@
 		goto fail_ready_state;
 
 	/* Now re-enable MSI-X */
-	if (instance->msix_vectors)
-		pci_enable_msix(instance->pdev, instance->msixentry,
-				instance->msix_vectors);
+	if (instance->msix_vectors &&
+	    pci_enable_msix_exact(instance->pdev, instance->msixentry,
+				  instance->msix_vectors))
+		goto fail_reenable_msix;
 
 	switch (instance->pdev->device) {
 	case PCI_DEVICE_ID_LSI_FUSION:
@@ -5178,8 +5596,9 @@
 				printk(KERN_DEBUG "megasas: Failed to "
 				       "register IRQ for vector %d.\n", i);
 				for (j = 0; j < i; j++) {
-					irq_set_affinity_hint(
-						instance->msixentry[j].vector, NULL);
+					if (smp_affinity_enable)
+						irq_set_affinity_hint(
+							instance->msixentry[j].vector, NULL);
 					free_irq(
 						instance->msixentry[j].vector,
 						&instance->irq_context[j]);
@@ -5187,11 +5606,14 @@
 				goto fail_irq;
 			}
 
-			if (irq_set_affinity_hint(instance->msixentry[i].vector,
-				get_cpu_mask(cpu)))
-				dev_err(&instance->pdev->dev, "Error setting"
-					"affinity hint for cpu %d\n", cpu);
-			cpu = cpumask_next(cpu, cpu_online_mask);
+			if (smp_affinity_enable) {
+				if (irq_set_affinity_hint(instance->msixentry[i].vector,
+					get_cpu_mask(cpu)))
+					dev_err(&instance->pdev->dev, "Error "
+						"setting affinity hint for cpu "
+						"%d\n", cpu);
+				cpu = cpumask_next(cpu, cpu_online_mask);
+			}
 		}
 	} else {
 		instance->irq_context[0].instance = instance;
@@ -5243,6 +5665,7 @@
 
 fail_set_dma_mask:
 fail_ready_state:
+fail_reenable_msix:
 
 	pci_disable_device(pdev);
 
@@ -5273,6 +5696,8 @@
 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
 		del_timer_sync(&instance->sriov_heartbeat_timer);
 
+	if (instance->fw_crash_state != UNAVAILABLE)
+		megasas_free_host_crash_buffer(instance);
 	scsi_remove_host(instance->host);
 	megasas_flush_cache(instance);
 	megasas_shutdown_controller(instance, MR_DCMD_CTRL_SHUTDOWN);
@@ -5306,8 +5731,9 @@
 
 	if (instance->msix_vectors)
 		for (i = 0; i < instance->msix_vectors; i++) {
-			irq_set_affinity_hint(
-				instance->msixentry[i].vector, NULL);
+			if (smp_affinity_enable)
+				irq_set_affinity_hint(
+					instance->msixentry[i].vector, NULL);
 			free_irq(instance->msixentry[i].vector,
 				 &instance->irq_context[i]);
 		}
@@ -5322,14 +5748,18 @@
 	case PCI_DEVICE_ID_LSI_INVADER:
 	case PCI_DEVICE_ID_LSI_FURY:
 		megasas_release_fusion(instance);
-		for (i = 0; i < 2 ; i++)
+		for (i = 0; i < 2 ; i++) {
 			if (fusion->ld_map[i])
 				dma_free_coherent(&instance->pdev->dev,
-						  fusion->map_sz,
+						  fusion->max_map_sz,
 						  fusion->ld_map[i],
-						  fusion->
-						  ld_map_phys[i]);
-		kfree(instance->ctrl_context);
+						  fusion->ld_map_phys[i]);
+			if (fusion->ld_drv_map[i])
+				free_pages((ulong)fusion->ld_drv_map[i],
+					fusion->drv_map_pages);
+		}
+		free_pages((ulong)instance->ctrl_context,
+			instance->ctrl_context_pages);
 		break;
 	default:
 		megasas_release_mfi(instance);
@@ -5342,6 +5772,8 @@
 		break;
 	}
 
+	kfree(instance->ctrl_info);
+
 	if (instance->evt_detail)
 		pci_free_consistent(pdev, sizeof(struct megasas_evt_detail),
 				instance->evt_detail, instance->evt_detail_h);
@@ -5363,6 +5795,10 @@
 				    instance->hb_host_mem,
 				    instance->hb_host_mem_h);
 
+	if (instance->crash_dump_buf)
+		pci_free_consistent(pdev, CRASH_DMA_BUF_SIZE,
+			    instance->crash_dump_buf, instance->crash_dump_h);
+
 	scsi_host_put(host);
 
 	pci_disable_device(pdev);
@@ -5385,8 +5821,9 @@
 	instance->instancet->disable_intr(instance);
 	if (instance->msix_vectors)
 		for (i = 0; i < instance->msix_vectors; i++) {
-			irq_set_affinity_hint(
-				instance->msixentry[i].vector, NULL);
+			if (smp_affinity_enable)
+				irq_set_affinity_hint(
+					instance->msixentry[i].vector, NULL);
 			free_irq(instance->msixentry[i].vector,
 				 &instance->irq_context[i]);
 		}
@@ -5448,12 +5885,53 @@
 	spin_lock_irqsave(&poll_aen_lock, flags);
 	if (megasas_poll_wait_aen)
 		mask =   (POLLIN | POLLRDNORM);
+
 	else
 		mask = 0;
+	megasas_poll_wait_aen = 0;
 	spin_unlock_irqrestore(&poll_aen_lock, flags);
 	return mask;
 }
 
+/*
+ * megasas_set_crash_dump_params_ioctl:
+ *		Send CRASH_DUMP_MODE DCMD to all controllers
+ * @cmd:	MFI command frame
+ */
+
+static int megasas_set_crash_dump_params_ioctl(
+	struct megasas_cmd *cmd)
+{
+	struct megasas_instance *local_instance;
+	int i, error = 0;
+	int crash_support;
+
+	crash_support = cmd->frame->dcmd.mbox.w[0];
+
+	for (i = 0; i < megasas_mgmt_info.max_index; i++) {
+		local_instance = megasas_mgmt_info.instance[i];
+		if (local_instance && local_instance->crash_dump_drv_support) {
+			if ((local_instance->adprecovery ==
+				MEGASAS_HBA_OPERATIONAL) &&
+				!megasas_set_crash_dump_params(local_instance,
+					crash_support)) {
+				local_instance->crash_dump_app_support =
+					crash_support;
+				dev_info(&local_instance->pdev->dev,
+					"Application firmware crash "
+					"dump mode set success\n");
+				error = 0;
+			} else {
+				dev_info(&local_instance->pdev->dev,
+					"Application firmware crash "
+					"dump mode set failed\n");
+				error = -1;
+			}
+		}
+	}
+	return error;
+}
+
 /**
  * megasas_mgmt_fw_ioctl -	Issues management ioctls to FW
  * @instance:			Adapter soft state
@@ -5500,6 +5978,12 @@
 					       MFI_FRAME_SGL64 |
 					       MFI_FRAME_SENSE64));
 
+	if (cmd->frame->dcmd.opcode == MR_DRIVER_SET_APP_CRASHDUMP_MODE) {
+		error = megasas_set_crash_dump_params_ioctl(cmd);
+		megasas_return_cmd(instance, cmd);
+		return error;
+	}
+
 	/*
 	 * The management interface between applications and the fw uses
 	 * MFI frames. E.g, RAID configuration changes, LD property changes
@@ -5619,9 +6103,14 @@
 					  le32_to_cpu(kern_sge32[i].length),
 					  kbuff_arr[i],
 					  le32_to_cpu(kern_sge32[i].phys_addr));
+			kbuff_arr[i] = NULL;
 	}
 
-	megasas_return_cmd(instance, cmd);
+	if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+		megasas_return_mfi_mpt_pthr(instance, cmd,
+			cmd->mpt_pthr_cmd_blocked);
+	else
+		megasas_return_cmd(instance, cmd);
 	return error;
 }
 
diff --git a/drivers/scsi/megaraid/megaraid_sas_fp.c b/drivers/scsi/megaraid/megaraid_sas_fp.c
index 081bfff..685e6f3 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fp.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fp.c
@@ -55,6 +55,13 @@
 #include "megaraid_sas.h"
 #include <asm/div64.h>
 
+#define LB_PENDING_CMDS_DEFAULT 4
+static unsigned int lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
+module_param(lb_pending_cmds, int, S_IRUGO);
+MODULE_PARM_DESC(lb_pending_cmds, "Change raid-1 load balancing outstanding "
+	"threshold. Valid Values are 1-128. Default: 4");
+
+
 #define ABS_DIFF(a, b)   (((a) > (b)) ? ((a) - (b)) : ((b) - (a)))
 #define MR_LD_STATE_OPTIMAL 3
 #define FALSE 0
@@ -66,16 +73,13 @@
 #define SPAN_INVALID  0xff
 
 /* Prototypes */
-void mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
-	struct LD_LOAD_BALANCE_INFO *lbInfo);
-
-static void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
+static void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
 	PLD_SPAN_INFO ldSpanInfo);
 static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
 	u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
-	struct RAID_CONTEXT *pRAID_Context, struct MR_FW_RAID_MAP_ALL *map);
+	struct RAID_CONTEXT *pRAID_Context, struct MR_DRV_RAID_MAP_ALL *map);
 static u64 get_row_from_strip(struct megasas_instance *instance, u32 ld,
-	u64 strip, struct MR_FW_RAID_MAP_ALL *map);
+	u64 strip, struct MR_DRV_RAID_MAP_ALL *map);
 
 u32 mega_mod64(u64 dividend, u32 divisor)
 {
@@ -109,94 +113,183 @@
 	return d;
 }
 
-struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
+struct MR_LD_RAID *MR_LdRaidGet(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
 {
 	return &map->raidMap.ldSpanMap[ld].ldRaid;
 }
 
 static struct MR_SPAN_BLOCK_INFO *MR_LdSpanInfoGet(u32 ld,
-						   struct MR_FW_RAID_MAP_ALL
+						   struct MR_DRV_RAID_MAP_ALL
 						   *map)
 {
 	return &map->raidMap.ldSpanMap[ld].spanBlock[0];
 }
 
-static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_FW_RAID_MAP_ALL *map)
+static u8 MR_LdDataArmGet(u32 ld, u32 armIdx, struct MR_DRV_RAID_MAP_ALL *map)
 {
 	return map->raidMap.ldSpanMap[ld].dataArmMap[armIdx];
 }
 
-u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_ArPdGet(u32 ar, u32 arm, struct MR_DRV_RAID_MAP_ALL *map)
 {
 	return le16_to_cpu(map->raidMap.arMapInfo[ar].pd[arm]);
 }
 
-u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_LdSpanArrayGet(u32 ld, u32 span, struct MR_DRV_RAID_MAP_ALL *map)
 {
 	return le16_to_cpu(map->raidMap.ldSpanMap[ld].spanBlock[span].span.arrayRef);
 }
 
-u16 MR_PdDevHandleGet(u32 pd, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_PdDevHandleGet(u32 pd, struct MR_DRV_RAID_MAP_ALL *map)
 {
 	return map->raidMap.devHndlInfo[pd].curDevHdl;
 }
 
-u16 MR_GetLDTgtId(u32 ld, struct MR_FW_RAID_MAP_ALL *map)
+u16 MR_GetLDTgtId(u32 ld, struct MR_DRV_RAID_MAP_ALL *map)
 {
 	return le16_to_cpu(map->raidMap.ldSpanMap[ld].ldRaid.targetId);
 }
 
-u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_FW_RAID_MAP_ALL *map)
+u8 MR_TargetIdToLdGet(u32 ldTgtId, struct MR_DRV_RAID_MAP_ALL *map)
 {
 	return map->raidMap.ldTgtIdToLd[ldTgtId];
 }
 
 static struct MR_LD_SPAN *MR_LdSpanPtrGet(u32 ld, u32 span,
-					  struct MR_FW_RAID_MAP_ALL *map)
+					  struct MR_DRV_RAID_MAP_ALL *map)
 {
 	return &map->raidMap.ldSpanMap[ld].spanBlock[span].span;
 }
 
 /*
+ * This function will Populate Driver Map using firmware raid map
+ */
+void MR_PopulateDrvRaidMap(struct megasas_instance *instance)
+{
+	struct fusion_context *fusion = instance->ctrl_context;
+	struct MR_FW_RAID_MAP_ALL     *fw_map_old    = NULL;
+	struct MR_FW_RAID_MAP         *pFwRaidMap    = NULL;
+	int i;
+
+
+	struct MR_DRV_RAID_MAP_ALL *drv_map =
+			fusion->ld_drv_map[(instance->map_id & 1)];
+	struct MR_DRV_RAID_MAP *pDrvRaidMap = &drv_map->raidMap;
+
+	if (instance->supportmax256vd) {
+		memcpy(fusion->ld_drv_map[instance->map_id & 1],
+			fusion->ld_map[instance->map_id & 1],
+			fusion->current_map_sz);
+		/* New Raid map will not set totalSize, so keep expected value
+		 * for legacy code in ValidateMapInfo
+		 */
+		pDrvRaidMap->totalSize = sizeof(struct MR_FW_RAID_MAP_EXT);
+	} else {
+		fw_map_old = (struct MR_FW_RAID_MAP_ALL *)
+			fusion->ld_map[(instance->map_id & 1)];
+		pFwRaidMap = &fw_map_old->raidMap;
+
+#if VD_EXT_DEBUG
+		for (i = 0; i < pFwRaidMap->ldCount; i++) {
+			dev_dbg(&instance->pdev->dev, "(%d) :Index 0x%x "
+				"Target Id 0x%x Seq Num 0x%x Size 0/%llx\n",
+				instance->unique_id, i,
+				fw_map_old->raidMap.ldSpanMap[i].ldRaid.targetId,
+				fw_map_old->raidMap.ldSpanMap[i].ldRaid.seqNum,
+				fw_map_old->raidMap.ldSpanMap[i].ldRaid.size);
+		}
+#endif
+
+		memset(drv_map, 0, fusion->drv_map_sz);
+		pDrvRaidMap->totalSize = pFwRaidMap->totalSize;
+		pDrvRaidMap->ldCount = pFwRaidMap->ldCount;
+		pDrvRaidMap->fpPdIoTimeoutSec = pFwRaidMap->fpPdIoTimeoutSec;
+		for (i = 0; i < MAX_RAIDMAP_LOGICAL_DRIVES + MAX_RAIDMAP_VIEWS; i++)
+			pDrvRaidMap->ldTgtIdToLd[i] =
+				(u8)pFwRaidMap->ldTgtIdToLd[i];
+		for (i = 0; i < pDrvRaidMap->ldCount; i++) {
+			pDrvRaidMap->ldSpanMap[i] = pFwRaidMap->ldSpanMap[i];
+#if VD_EXT_DEBUG
+			dev_dbg(&instance->pdev->dev,
+				"pFwRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
+				"pFwRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
+				"size 0x%x\n", i, i,
+				pFwRaidMap->ldSpanMap[i].ldRaid.targetId,
+				pFwRaidMap->ldSpanMap[i].ldRaid.seqNum,
+				(u32)pFwRaidMap->ldSpanMap[i].ldRaid.rowSize);
+			dev_dbg(&instance->pdev->dev,
+				"pDrvRaidMap->ldSpanMap[%d].ldRaid.targetId 0x%x "
+				"pDrvRaidMap->ldSpanMap[%d].ldRaid.seqNum 0x%x "
+				"size 0x%x\n", i, i,
+				pDrvRaidMap->ldSpanMap[i].ldRaid.targetId,
+				pDrvRaidMap->ldSpanMap[i].ldRaid.seqNum,
+				(u32)pDrvRaidMap->ldSpanMap[i].ldRaid.rowSize);
+			dev_dbg(&instance->pdev->dev, "Driver raid map all %p "
+				"raid map %p LD RAID MAP %p/%p\n", drv_map,
+				pDrvRaidMap, &pFwRaidMap->ldSpanMap[i].ldRaid,
+				&pDrvRaidMap->ldSpanMap[i].ldRaid);
+#endif
+		}
+		memcpy(pDrvRaidMap->arMapInfo, pFwRaidMap->arMapInfo,
+			sizeof(struct MR_ARRAY_INFO) * MAX_RAIDMAP_ARRAYS);
+		memcpy(pDrvRaidMap->devHndlInfo, pFwRaidMap->devHndlInfo,
+			sizeof(struct MR_DEV_HANDLE_INFO) *
+			MAX_RAIDMAP_PHYSICAL_DEVICES);
+	}
+}
+
+/*
  * This function will validate Map info data provided by FW
  */
 u8 MR_ValidateMapInfo(struct megasas_instance *instance)
 {
-	struct fusion_context *fusion = instance->ctrl_context;
-	struct MR_FW_RAID_MAP_ALL *map = fusion->ld_map[(instance->map_id & 1)];
-	struct LD_LOAD_BALANCE_INFO *lbInfo = fusion->load_balance_info;
-	PLD_SPAN_INFO ldSpanInfo = fusion->log_to_span;
-	struct MR_FW_RAID_MAP *pFwRaidMap = &map->raidMap;
+	struct fusion_context *fusion;
+	struct MR_DRV_RAID_MAP_ALL *drv_map;
+	struct MR_DRV_RAID_MAP *pDrvRaidMap;
+	struct LD_LOAD_BALANCE_INFO *lbInfo;
+	PLD_SPAN_INFO ldSpanInfo;
 	struct MR_LD_RAID         *raid;
 	int ldCount, num_lds;
 	u16 ld;
+	u32 expected_size;
 
 
-	if (le32_to_cpu(pFwRaidMap->totalSize) !=
-	    (sizeof(struct MR_FW_RAID_MAP) -sizeof(struct MR_LD_SPAN_MAP) +
-	     (sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pFwRaidMap->ldCount)))) {
-		printk(KERN_ERR "megasas: map info structure size 0x%x is not matching with ld count\n",
-		       (unsigned int)((sizeof(struct MR_FW_RAID_MAP) -
-				       sizeof(struct MR_LD_SPAN_MAP)) +
-				      (sizeof(struct MR_LD_SPAN_MAP) *
-					le32_to_cpu(pFwRaidMap->ldCount))));
-		printk(KERN_ERR "megasas: span map %x, pFwRaidMap->totalSize "
-		       ": %x\n", (unsigned int)sizeof(struct MR_LD_SPAN_MAP),
-			le32_to_cpu(pFwRaidMap->totalSize));
+	MR_PopulateDrvRaidMap(instance);
+
+	fusion = instance->ctrl_context;
+	drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+	pDrvRaidMap = &drv_map->raidMap;
+
+	lbInfo = fusion->load_balance_info;
+	ldSpanInfo = fusion->log_to_span;
+
+	if (instance->supportmax256vd)
+		expected_size = sizeof(struct MR_FW_RAID_MAP_EXT);
+	else
+		expected_size =
+			(sizeof(struct MR_FW_RAID_MAP) - sizeof(struct MR_LD_SPAN_MAP) +
+			(sizeof(struct MR_LD_SPAN_MAP) * le32_to_cpu(pDrvRaidMap->ldCount)));
+
+	if (le32_to_cpu(pDrvRaidMap->totalSize) != expected_size) {
+		dev_err(&instance->pdev->dev, "map info structure size 0x%x is not matching with ld count\n",
+		       (unsigned int) expected_size);
+		dev_err(&instance->pdev->dev, "megasas: span map %x, pDrvRaidMap->totalSize : %x\n",
+			(unsigned int)sizeof(struct MR_LD_SPAN_MAP),
+			le32_to_cpu(pDrvRaidMap->totalSize));
 		return 0;
 	}
 
 	if (instance->UnevenSpanSupport)
-		mr_update_span_set(map, ldSpanInfo);
+		mr_update_span_set(drv_map, ldSpanInfo);
 
-	mr_update_load_balance_params(map, lbInfo);
+	mr_update_load_balance_params(drv_map, lbInfo);
 
-	num_lds = le32_to_cpu(map->raidMap.ldCount);
+	num_lds = le32_to_cpu(drv_map->raidMap.ldCount);
 
 	/*Convert Raid capability values to CPU arch */
 	for (ldCount = 0; ldCount < num_lds; ldCount++) {
-		ld = MR_TargetIdToLdGet(ldCount, map);
-		raid = MR_LdRaidGet(ld, map);
+		ld = MR_TargetIdToLdGet(ldCount, drv_map);
+		raid = MR_LdRaidGet(ld, drv_map);
 		le32_to_cpus((u32 *)&raid->capability);
 	}
 
@@ -204,7 +297,7 @@
 }
 
 u32 MR_GetSpanBlock(u32 ld, u64 row, u64 *span_blk,
-		    struct MR_FW_RAID_MAP_ALL *map)
+		    struct MR_DRV_RAID_MAP_ALL *map)
 {
 	struct MR_SPAN_BLOCK_INFO *pSpanBlock = MR_LdSpanInfoGet(ld, map);
 	struct MR_QUAD_ELEMENT    *quad;
@@ -246,7 +339,8 @@
 * ldSpanInfo - ldSpanInfo per HBA instance
 */
 #if SPAN_DEBUG
-static int getSpanInfo(struct MR_FW_RAID_MAP_ALL *map, PLD_SPAN_INFO ldSpanInfo)
+static int getSpanInfo(struct MR_DRV_RAID_MAP_ALL *map,
+	PLD_SPAN_INFO ldSpanInfo)
 {
 
 	u8   span;
@@ -257,9 +351,9 @@
 	int ldCount;
 	u16 ld;
 
-	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
 		ld = MR_TargetIdToLdGet(ldCount, map);
-			if (ld >= MAX_LOGICAL_DRIVES)
+			if (ld >= MAX_LOGICAL_DRIVES_EXT)
 				continue;
 		raid = MR_LdRaidGet(ld, map);
 		dev_dbg(&instance->pdev->dev, "LD %x: span_depth=%x\n",
@@ -339,7 +433,7 @@
 */
 
 u32 mr_spanset_get_span_block(struct megasas_instance *instance,
-		u32 ld, u64 row, u64 *span_blk, struct MR_FW_RAID_MAP_ALL *map)
+		u32 ld, u64 row, u64 *span_blk, struct MR_DRV_RAID_MAP_ALL *map)
 {
 	struct fusion_context *fusion = instance->ctrl_context;
 	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
@@ -402,7 +496,7 @@
 */
 
 static u64  get_row_from_strip(struct megasas_instance *instance,
-	u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
+	u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
 {
 	struct fusion_context *fusion = instance->ctrl_context;
 	struct MR_LD_RAID	*raid = MR_LdRaidGet(ld, map);
@@ -471,7 +565,7 @@
 */
 
 static u64 get_strip_from_row(struct megasas_instance *instance,
-		u32 ld, u64 row, struct MR_FW_RAID_MAP_ALL *map)
+		u32 ld, u64 row, struct MR_DRV_RAID_MAP_ALL *map)
 {
 	struct fusion_context *fusion = instance->ctrl_context;
 	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
@@ -532,7 +626,7 @@
 */
 
 static u32 get_arm_from_strip(struct megasas_instance *instance,
-	u32 ld, u64 strip, struct MR_FW_RAID_MAP_ALL *map)
+	u32 ld, u64 strip, struct MR_DRV_RAID_MAP_ALL *map)
 {
 	struct fusion_context *fusion = instance->ctrl_context;
 	struct MR_LD_RAID         *raid = MR_LdRaidGet(ld, map);
@@ -580,7 +674,7 @@
 
 /* This Function will return Phys arm */
 u8 get_arm(struct megasas_instance *instance, u32 ld, u8 span, u64 stripe,
-		struct MR_FW_RAID_MAP_ALL *map)
+		struct MR_DRV_RAID_MAP_ALL *map)
 {
 	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
 	/* Need to check correct default value */
@@ -624,7 +718,7 @@
 static u8 mr_spanset_get_phy_params(struct megasas_instance *instance, u32 ld,
 		u64 stripRow, u16 stripRef, struct IO_REQUEST_INFO *io_info,
 		struct RAID_CONTEXT *pRAID_Context,
-		struct MR_FW_RAID_MAP_ALL *map)
+		struct MR_DRV_RAID_MAP_ALL *map)
 {
 	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
 	u32     pd, arRef;
@@ -682,6 +776,7 @@
 	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
 	pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
 					physArm;
+	io_info->span_arm = pRAID_Context->spanArm;
 	return retval;
 }
 
@@ -705,7 +800,7 @@
 u8 MR_GetPhyParams(struct megasas_instance *instance, u32 ld, u64 stripRow,
 		u16 stripRef, struct IO_REQUEST_INFO *io_info,
 		struct RAID_CONTEXT *pRAID_Context,
-		struct MR_FW_RAID_MAP_ALL *map)
+		struct MR_DRV_RAID_MAP_ALL *map)
 {
 	struct MR_LD_RAID  *raid = MR_LdRaidGet(ld, map);
 	u32         pd, arRef;
@@ -778,6 +873,7 @@
 	*pdBlock += stripRef + le64_to_cpu(MR_LdSpanPtrGet(ld, span, map)->startBlk);
 	pRAID_Context->spanArm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) |
 		physArm;
+	io_info->span_arm = pRAID_Context->spanArm;
 	return retval;
 }
 
@@ -794,7 +890,7 @@
 MR_BuildRaidContext(struct megasas_instance *instance,
 		    struct IO_REQUEST_INFO *io_info,
 		    struct RAID_CONTEXT *pRAID_Context,
-		    struct MR_FW_RAID_MAP_ALL *map, u8 **raidLUN)
+		    struct MR_DRV_RAID_MAP_ALL *map, u8 **raidLUN)
 {
 	struct MR_LD_RAID  *raid;
 	u32         ld, stripSize, stripe_mask;
@@ -1043,8 +1139,8 @@
 * ldSpanInfo - ldSpanInfo per HBA instance
 *
 */
-void mr_update_span_set(struct MR_FW_RAID_MAP_ALL *map,
-			PLD_SPAN_INFO ldSpanInfo)
+void mr_update_span_set(struct MR_DRV_RAID_MAP_ALL *map,
+	PLD_SPAN_INFO ldSpanInfo)
 {
 	u8   span, count;
 	u32  element, span_row_width;
@@ -1056,9 +1152,9 @@
 	u16 ld;
 
 
-	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
+	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
 		ld = MR_TargetIdToLdGet(ldCount, map);
-		if (ld >= MAX_LOGICAL_DRIVES)
+		if (ld >= MAX_LOGICAL_DRIVES_EXT)
 			continue;
 		raid = MR_LdRaidGet(ld, map);
 		for (element = 0; element < MAX_QUAD_DEPTH; element++) {
@@ -1152,90 +1248,105 @@
 
 }
 
-void
-mr_update_load_balance_params(struct MR_FW_RAID_MAP_ALL *map,
-			      struct LD_LOAD_BALANCE_INFO *lbInfo)
+void mr_update_load_balance_params(struct MR_DRV_RAID_MAP_ALL *drv_map,
+	struct LD_LOAD_BALANCE_INFO *lbInfo)
 {
 	int ldCount;
 	u16 ld;
 	struct MR_LD_RAID *raid;
 
-	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES; ldCount++) {
-		ld = MR_TargetIdToLdGet(ldCount, map);
-		if (ld >= MAX_LOGICAL_DRIVES) {
+	if (lb_pending_cmds > 128 || lb_pending_cmds < 1)
+		lb_pending_cmds = LB_PENDING_CMDS_DEFAULT;
+
+	for (ldCount = 0; ldCount < MAX_LOGICAL_DRIVES_EXT; ldCount++) {
+		ld = MR_TargetIdToLdGet(ldCount, drv_map);
+		if (ld >= MAX_LOGICAL_DRIVES_EXT) {
 			lbInfo[ldCount].loadBalanceFlag = 0;
 			continue;
 		}
 
-		raid = MR_LdRaidGet(ld, map);
-
-		/* Two drive Optimal RAID 1 */
-		if ((raid->level == 1)  &&  (raid->rowSize == 2) &&
-		    (raid->spanDepth == 1) && raid->ldState ==
-		    MR_LD_STATE_OPTIMAL) {
-			u32 pd, arRef;
-
-			lbInfo[ldCount].loadBalanceFlag = 1;
-
-			/* Get the array on which this span is present */
-			arRef = MR_LdSpanArrayGet(ld, 0, map);
-
-			/* Get the Pd */
-			pd = MR_ArPdGet(arRef, 0, map);
-			/* Get dev handle from Pd */
-			lbInfo[ldCount].raid1DevHandle[0] =
-				MR_PdDevHandleGet(pd, map);
-			/* Get the Pd */
-			pd = MR_ArPdGet(arRef, 1, map);
-
-			/* Get the dev handle from Pd */
-			lbInfo[ldCount].raid1DevHandle[1] =
-				MR_PdDevHandleGet(pd, map);
-		} else
+		raid = MR_LdRaidGet(ld, drv_map);
+		if ((raid->level != 1) ||
+			(raid->ldState != MR_LD_STATE_OPTIMAL)) {
 			lbInfo[ldCount].loadBalanceFlag = 0;
+			continue;
+		}
+		lbInfo[ldCount].loadBalanceFlag = 1;
 	}
 }
 
-u8 megasas_get_best_arm(struct LD_LOAD_BALANCE_INFO *lbInfo, u8 arm, u64 block,
-			u32 count)
+u8 megasas_get_best_arm_pd(struct megasas_instance *instance,
+	struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
 {
-	u16     pend0, pend1;
+	struct fusion_context *fusion;
+	struct MR_LD_RAID  *raid;
+	struct MR_DRV_RAID_MAP_ALL *drv_map;
+	u16     pend0, pend1, ld;
 	u64     diff0, diff1;
-	u8      bestArm;
+	u8      bestArm, pd0, pd1, span, arm;
+	u32     arRef, span_row_size;
+
+	u64 block = io_info->ldStartBlock;
+	u32 count = io_info->numBlocks;
+
+	span = ((io_info->span_arm & RAID_CTX_SPANARM_SPAN_MASK)
+			>> RAID_CTX_SPANARM_SPAN_SHIFT);
+	arm = (io_info->span_arm & RAID_CTX_SPANARM_ARM_MASK);
+
+
+	fusion = instance->ctrl_context;
+	drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
+	ld = MR_TargetIdToLdGet(io_info->ldTgtId, drv_map);
+	raid = MR_LdRaidGet(ld, drv_map);
+	span_row_size = instance->UnevenSpanSupport ?
+			SPAN_ROW_SIZE(drv_map, ld, span) : raid->rowSize;
+
+	arRef = MR_LdSpanArrayGet(ld, span, drv_map);
+	pd0 = MR_ArPdGet(arRef, arm, drv_map);
+	pd1 = MR_ArPdGet(arRef, (arm + 1) >= span_row_size ?
+		(arm + 1 - span_row_size) : arm + 1, drv_map);
 
 	/* get the pending cmds for the data and mirror arms */
-	pend0 = atomic_read(&lbInfo->scsi_pending_cmds[0]);
-	pend1 = atomic_read(&lbInfo->scsi_pending_cmds[1]);
+	pend0 = atomic_read(&lbInfo->scsi_pending_cmds[pd0]);
+	pend1 = atomic_read(&lbInfo->scsi_pending_cmds[pd1]);
 
 	/* Determine the disk whose head is nearer to the req. block */
-	diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[0]);
-	diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[1]);
-	bestArm = (diff0 <= diff1 ? 0 : 1);
+	diff0 = ABS_DIFF(block, lbInfo->last_accessed_block[pd0]);
+	diff1 = ABS_DIFF(block, lbInfo->last_accessed_block[pd1]);
+	bestArm = (diff0 <= diff1 ? arm : arm ^ 1);
 
-	/*Make balance count from 16 to 4 to keep driver in sync with Firmware*/
-	if ((bestArm == arm && pend0 > pend1 + 4)  ||
-	    (bestArm != arm && pend1 > pend0 + 4))
+	if ((bestArm == arm && pend0 > pend1 + lb_pending_cmds)  ||
+			(bestArm != arm && pend1 > pend0 + lb_pending_cmds))
 		bestArm ^= 1;
 
 	/* Update the last accessed block on the correct pd */
-	lbInfo->last_accessed_block[bestArm] = block + count - 1;
-
-	return bestArm;
+	io_info->pd_after_lb = (bestArm == arm) ? pd0 : pd1;
+	lbInfo->last_accessed_block[io_info->pd_after_lb] = block + count - 1;
+	io_info->span_arm = (span << RAID_CTX_SPANARM_SPAN_SHIFT) | bestArm;
+#if SPAN_DEBUG
+	if (arm != bestArm)
+		dev_dbg(&instance->pdev->dev, "LSI Debug R1 Load balance "
+			"occur - span 0x%x arm 0x%x bestArm 0x%x "
+			"io_info->span_arm 0x%x\n",
+			span, arm, bestArm, io_info->span_arm);
+#endif
+	return io_info->pd_after_lb;
 }
 
-u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
-			   struct IO_REQUEST_INFO *io_info)
+u16 get_updated_dev_handle(struct megasas_instance *instance,
+	struct LD_LOAD_BALANCE_INFO *lbInfo, struct IO_REQUEST_INFO *io_info)
 {
-	u8 arm, old_arm;
+	u8 arm_pd;
 	u16 devHandle;
+	struct fusion_context *fusion;
+	struct MR_DRV_RAID_MAP_ALL *drv_map;
 
-	old_arm = lbInfo->raid1DevHandle[0] == io_info->devHandle ? 0 : 1;
+	fusion = instance->ctrl_context;
+	drv_map = fusion->ld_drv_map[(instance->map_id & 1)];
 
-	/* get best new arm */
-	arm  = megasas_get_best_arm(lbInfo, old_arm, io_info->ldStartBlock,
-				    io_info->numBlocks);
-	devHandle = lbInfo->raid1DevHandle[arm];
-	atomic_inc(&lbInfo->scsi_pending_cmds[arm]);
-
+	/* get best new arm (PD ID) */
+	arm_pd  = megasas_get_best_arm_pd(instance, lbInfo, io_info);
+	devHandle = MR_PdDevHandleGet(arm_pd, drv_map);
+	atomic_inc(&lbInfo->scsi_pending_cmds[arm_pd]);
 	return devHandle;
 }
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.c b/drivers/scsi/megaraid/megaraid_sas_fusion.c
index 3ed03df..f37eed6 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.c
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.c
@@ -50,6 +50,7 @@
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
+#include <scsi/scsi_dbg.h>
 
 #include "megaraid_sas_fusion.h"
 #include "megaraid_sas.h"
@@ -76,8 +77,6 @@
 void
 megasas_check_and_restore_queue_depth(struct megasas_instance *instance);
 
-u16 get_updated_dev_handle(struct LD_LOAD_BALANCE_INFO *lbInfo,
-			   struct IO_REQUEST_INFO *in_info);
 int megasas_transition_to_ready(struct megasas_instance *instance, int ocr);
 void megaraid_sas_kill_hba(struct megasas_instance *instance);
 
@@ -91,6 +90,8 @@
 extern struct megasas_mgmt_info megasas_mgmt_info;
 extern int resetwaittime;
 
+
+
 /**
  * megasas_enable_intr_fusion -	Enables interrupts
  * @regs:			MFI register set
@@ -163,7 +164,7 @@
 		(struct fusion_context *)instance->ctrl_context;
 	struct megasas_cmd_fusion *cmd = NULL;
 
-	spin_lock_irqsave(&fusion->cmd_pool_lock, flags);
+	spin_lock_irqsave(&fusion->mpt_pool_lock, flags);
 
 	if (!list_empty(&fusion->cmd_pool)) {
 		cmd = list_entry((&fusion->cmd_pool)->next,
@@ -173,7 +174,7 @@
 		printk(KERN_ERR "megasas: Command pool (fusion) empty!\n");
 	}
 
-	spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags);
+	spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
 	return cmd;
 }
 
@@ -182,21 +183,47 @@
  * @instance:		Adapter soft state
  * @cmd:		Command packet to be returned to free command pool
  */
-static inline void
-megasas_return_cmd_fusion(struct megasas_instance *instance,
-			  struct megasas_cmd_fusion *cmd)
+inline void megasas_return_cmd_fusion(struct megasas_instance *instance,
+	struct megasas_cmd_fusion *cmd)
 {
 	unsigned long flags;
 	struct fusion_context *fusion =
 		(struct fusion_context *)instance->ctrl_context;
 
-	spin_lock_irqsave(&fusion->cmd_pool_lock, flags);
+	spin_lock_irqsave(&fusion->mpt_pool_lock, flags);
 
 	cmd->scmd = NULL;
 	cmd->sync_cmd_idx = (u32)ULONG_MAX;
-	list_add_tail(&cmd->list, &fusion->cmd_pool);
+	list_add(&cmd->list, (&fusion->cmd_pool)->next);
 
-	spin_unlock_irqrestore(&fusion->cmd_pool_lock, flags);
+	spin_unlock_irqrestore(&fusion->mpt_pool_lock, flags);
+}
+
+/**
+ * megasas_return_mfi_mpt_pthr - Return a mfi and mpt to free command pool
+ * @instance:		Adapter soft state
+ * @cmd_mfi:		MFI Command packet to be returned to free command pool
+ * @cmd_mpt:		MPT Command packet to be returned to free command pool
+ */
+inline void megasas_return_mfi_mpt_pthr(struct megasas_instance *instance,
+		struct megasas_cmd *cmd_mfi,
+		struct megasas_cmd_fusion *cmd_fusion)
+{
+	unsigned long flags;
+
+	/*
+	 * TO DO: optimize this code and use only one lock instead of two
+	 * locks being used currently- mpt_pool_lock is acquired
+	 * inside mfi_pool_lock
+	 */
+	spin_lock_irqsave(&instance->mfi_pool_lock, flags);
+	megasas_return_cmd_fusion(instance, cmd_fusion);
+	if (atomic_read(&cmd_mfi->mfi_mpt_pthr) != MFI_MPT_ATTACHED)
+		dev_err(&instance->pdev->dev, "Possible bug from %s %d\n",
+			__func__, __LINE__);
+	atomic_set(&cmd_mfi->mfi_mpt_pthr, MFI_MPT_DETACHED);
+	__megasas_return_cmd(instance, cmd_mfi);
+	spin_unlock_irqrestore(&instance->mfi_pool_lock, flags);
 }
 
 /**
@@ -562,9 +589,11 @@
 {
 	int i;
 	struct megasas_header *frame_hdr = &cmd->frame->hdr;
+	struct fusion_context *fusion;
 
 	u32 msecs = seconds * 1000;
 
+	fusion = instance->ctrl_context;
 	/*
 	 * Wait for cmd_status to change
 	 */
@@ -573,8 +602,12 @@
 		msleep(20);
 	}
 
-	if (frame_hdr->cmd_status == 0xff)
+	if (frame_hdr->cmd_status == 0xff) {
+		if (fusion)
+			megasas_return_mfi_mpt_pthr(instance, cmd,
+				cmd->mpt_pthr_cmd_blocked);
 		return -ETIME;
+	}
 
 	return 0;
 }
@@ -650,6 +683,10 @@
 	/* driver supports HA / Remote LUN over Fast Path interface */
 	init_frame->driver_operations.mfi_capabilities.support_fp_remote_lun
 		= 1;
+	init_frame->driver_operations.mfi_capabilities.support_max_255lds
+		= 1;
+	init_frame->driver_operations.mfi_capabilities.support_ndrive_r1_lb
+		= 1;
 	/* Convert capability to LE32 */
 	cpu_to_le32s((u32 *)&init_frame->driver_operations.mfi_capabilities);
 
@@ -709,6 +746,13 @@
  * Issues an internal command (DCMD) to get the FW's controller PD
  * list structure.  This information is mainly used to find out SYSTEM
  * supported by the FW.
+ * dcmd.mbox value setting for MR_DCMD_LD_MAP_GET_INFO
+ * dcmd.mbox.b[0]	- number of LDs being sync'd
+ * dcmd.mbox.b[1]	- 0 - complete command immediately.
+ *			- 1 - pend till config change
+ * dcmd.mbox.b[2]	- 0 - supports max 64 lds and uses legacy MR_FW_RAID_MAP
+ *			- 1 - supports max MAX_LOGICAL_DRIVES_EXT lds and
+ *				uses extended struct MR_FW_RAID_MAP_EXT
  */
 static int
 megasas_get_ld_map_info(struct megasas_instance *instance)
@@ -716,7 +760,7 @@
 	int ret = 0;
 	struct megasas_cmd *cmd;
 	struct megasas_dcmd_frame *dcmd;
-	struct MR_FW_RAID_MAP_ALL *ci;
+	void *ci;
 	dma_addr_t ci_h = 0;
 	u32 size_map_info;
 	struct fusion_context *fusion;
@@ -737,10 +781,9 @@
 
 	dcmd = &cmd->frame->dcmd;
 
-	size_map_info = sizeof(struct MR_FW_RAID_MAP) +
-		(sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
+	size_map_info = fusion->current_map_sz;
 
-	ci = fusion->ld_map[(instance->map_id & 1)];
+	ci = (void *) fusion->ld_map[(instance->map_id & 1)];
 	ci_h = fusion->ld_map_phys[(instance->map_id & 1)];
 
 	if (!ci) {
@@ -749,9 +792,13 @@
 		return -ENOMEM;
 	}
 
-	memset(ci, 0, sizeof(*ci));
+	memset(ci, 0, fusion->max_map_sz);
 	memset(dcmd->mbox.b, 0, MFI_MBOX_SIZE);
-
+#if VD_EXT_DEBUG
+	dev_dbg(&instance->pdev->dev,
+		"%s sending MR_DCMD_LD_MAP_GET_INFO with size %d\n",
+		__func__, cpu_to_le32(size_map_info));
+#endif
 	dcmd->cmd = MFI_CMD_DCMD;
 	dcmd->cmd_status = 0xFF;
 	dcmd->sge_count = 1;
@@ -763,14 +810,17 @@
 	dcmd->sgl.sge32[0].phys_addr = cpu_to_le32(ci_h);
 	dcmd->sgl.sge32[0].length = cpu_to_le32(size_map_info);
 
-	if (!megasas_issue_polled(instance, cmd))
-		ret = 0;
-	else {
-		printk(KERN_ERR "megasas: Get LD Map Info Failed\n");
-		ret = -1;
-	}
+	if (instance->ctrl_context && !instance->mask_interrupts)
+		ret = megasas_issue_blocked_cmd(instance, cmd,
+			MEGASAS_BLOCKED_CMD_TIMEOUT);
+	else
+		ret = megasas_issue_polled(instance, cmd);
 
-	megasas_return_cmd(instance, cmd);
+	if (instance->ctrl_context && cmd->mpt_pthr_cmd_blocked)
+		megasas_return_mfi_mpt_pthr(instance, cmd,
+			cmd->mpt_pthr_cmd_blocked);
+	else
+		megasas_return_cmd(instance, cmd);
 
 	return ret;
 }
@@ -807,7 +857,7 @@
 	u32 size_sync_info, num_lds;
 	struct fusion_context *fusion;
 	struct MR_LD_TARGET_SYNC *ci = NULL;
-	struct MR_FW_RAID_MAP_ALL *map;
+	struct MR_DRV_RAID_MAP_ALL *map;
 	struct MR_LD_RAID  *raid;
 	struct MR_LD_TARGET_SYNC *ld_sync;
 	dma_addr_t ci_h = 0;
@@ -828,7 +878,7 @@
 		return 1;
 	}
 
-	map = fusion->ld_map[instance->map_id & 1];
+	map = fusion->ld_drv_map[instance->map_id & 1];
 
 	num_lds = le32_to_cpu(map->raidMap.ldCount);
 
@@ -840,7 +890,7 @@
 
 	ci = (struct MR_LD_TARGET_SYNC *)
 	  fusion->ld_map[(instance->map_id - 1) & 1];
-	memset(ci, 0, sizeof(struct MR_FW_RAID_MAP_ALL));
+	memset(ci, 0, fusion->max_map_sz);
 
 	ci_h = fusion->ld_map_phys[(instance->map_id - 1) & 1];
 
@@ -852,8 +902,7 @@
 		ld_sync->seqNum = raid->seqNum;
 	}
 
-	size_map_info = sizeof(struct MR_FW_RAID_MAP) +
-		(sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
+	size_map_info = fusion->current_map_sz;
 
 	dcmd->cmd = MFI_CMD_DCMD;
 	dcmd->cmd_status = 0xFF;
@@ -971,7 +1020,7 @@
 
 	max_cmd = instance->max_fw_cmds;
 
-	fusion->reply_q_depth = ((max_cmd + 1 + 15)/16)*16;
+	fusion->reply_q_depth = 2 * (((max_cmd + 1 + 15)/16)*16);
 
 	fusion->request_alloc_sz =
 		sizeof(union MEGASAS_REQUEST_DESCRIPTOR_UNION) *max_cmd;
@@ -988,8 +1037,8 @@
 	fusion->max_sge_in_chain =
 		MEGASAS_MAX_SZ_CHAIN_FRAME / sizeof(union MPI2_SGE_IO_UNION);
 
-	instance->max_num_sge = fusion->max_sge_in_main_msg +
-		fusion->max_sge_in_chain - 2;
+	instance->max_num_sge = rounddown_pow_of_two(
+		fusion->max_sge_in_main_msg + fusion->max_sge_in_chain - 2);
 
 	/* Used for pass thru MFI frame (DCMD) */
 	fusion->chain_offset_mfi_pthru =
@@ -1016,17 +1065,75 @@
 		goto fail_ioc_init;
 
 	megasas_display_intel_branding(instance);
+	if (megasas_get_ctrl_info(instance, instance->ctrl_info)) {
+		dev_err(&instance->pdev->dev,
+			"Could not get controller info. Fail from %s %d\n",
+			__func__, __LINE__);
+		goto fail_ioc_init;
+	}
+
+	instance->supportmax256vd =
+		instance->ctrl_info->adapterOperations3.supportMaxExtLDs;
+	/* Below is additional check to address future FW enhancement */
+	if (instance->ctrl_info->max_lds > 64)
+		instance->supportmax256vd = 1;
+	instance->drv_supported_vd_count = MEGASAS_MAX_LD_CHANNELS
+					* MEGASAS_MAX_DEV_PER_CHANNEL;
+	instance->drv_supported_pd_count = MEGASAS_MAX_PD_CHANNELS
+					* MEGASAS_MAX_DEV_PER_CHANNEL;
+	if (instance->supportmax256vd) {
+		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES_EXT;
+		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+	} else {
+		instance->fw_supported_vd_count = MAX_LOGICAL_DRIVES;
+		instance->fw_supported_pd_count = MAX_PHYSICAL_DEVICES;
+	}
+	dev_info(&instance->pdev->dev, "Firmware supports %d VDs %d PDs\n"
+		"Driver supports %d VDs  %d PDs\n",
+		instance->fw_supported_vd_count,
+		instance->fw_supported_pd_count,
+		instance->drv_supported_vd_count,
+		instance->drv_supported_pd_count);
 
 	instance->flag_ieee = 1;
-
-	fusion->map_sz =  sizeof(struct MR_FW_RAID_MAP) +
-	  (sizeof(struct MR_LD_SPAN_MAP) *(MAX_LOGICAL_DRIVES - 1));
-
 	fusion->fast_path_io = 0;
 
+	fusion->old_map_sz =
+		sizeof(struct MR_FW_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) *
+		(instance->fw_supported_vd_count - 1));
+	fusion->new_map_sz =
+		sizeof(struct MR_FW_RAID_MAP_EXT);
+	fusion->drv_map_sz =
+		sizeof(struct MR_DRV_RAID_MAP) + (sizeof(struct MR_LD_SPAN_MAP) *
+		(instance->drv_supported_vd_count - 1));
+
+	fusion->drv_map_pages = get_order(fusion->drv_map_sz);
+	for (i = 0; i < 2; i++) {
+		fusion->ld_map[i] = NULL;
+		fusion->ld_drv_map[i] = (void *)__get_free_pages(GFP_KERNEL,
+			fusion->drv_map_pages);
+		if (!fusion->ld_drv_map[i]) {
+			dev_err(&instance->pdev->dev, "Could not allocate "
+				"memory for local map info for %d pages\n",
+				fusion->drv_map_pages);
+			if (i == 1)
+				free_pages((ulong)fusion->ld_drv_map[0],
+					fusion->drv_map_pages);
+			goto fail_ioc_init;
+		}
+	}
+
+	fusion->max_map_sz = max(fusion->old_map_sz, fusion->new_map_sz);
+
+	if (instance->supportmax256vd)
+		fusion->current_map_sz = fusion->new_map_sz;
+	else
+		fusion->current_map_sz = fusion->old_map_sz;
+
+
 	for (i = 0; i < 2; i++) {
 		fusion->ld_map[i] = dma_alloc_coherent(&instance->pdev->dev,
-						       fusion->map_sz,
+						       fusion->max_map_sz,
 						       &fusion->ld_map_phys[i],
 						       GFP_KERNEL);
 		if (!fusion->ld_map[i]) {
@@ -1043,7 +1150,7 @@
 
 fail_map_info:
 	if (i == 1)
-		dma_free_coherent(&instance->pdev->dev, fusion->map_sz,
+		dma_free_coherent(&instance->pdev->dev, fusion->max_map_sz,
 				  fusion->ld_map[0], fusion->ld_map_phys[0]);
 fail_ioc_init:
 	megasas_free_cmds_fusion(instance);
@@ -1065,6 +1172,11 @@
 			u32 req_desc_hi,
 			struct megasas_register_set __iomem *regs)
 {
+#if defined(writeq) && defined(CONFIG_64BIT)
+	u64 req_data = (((u64)req_desc_hi << 32) | (u32)req_desc_lo);
+
+	writeq(le64_to_cpu(req_data), &(regs)->inbound_low_queue_port);
+#else
 	unsigned long flags;
 
 	spin_lock_irqsave(&instance->hba_lock, flags);
@@ -1072,6 +1184,7 @@
 	writel(le32_to_cpu(req_desc_lo), &(regs)->inbound_low_queue_port);
 	writel(le32_to_cpu(req_desc_hi), &(regs)->inbound_high_queue_port);
 	spin_unlock_irqrestore(&instance->hba_lock, flags);
+#endif
 }
 
 /**
@@ -1224,7 +1337,7 @@
 void
 megasas_set_pd_lba(struct MPI2_RAID_SCSI_IO_REQUEST *io_request, u8 cdb_len,
 		   struct IO_REQUEST_INFO *io_info, struct scsi_cmnd *scp,
-		   struct MR_FW_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
+		   struct MR_DRV_RAID_MAP_ALL *local_map_ptr, u32 ref_tag)
 {
 	struct MR_LD_RAID *raid;
 	u32 ld;
@@ -1409,7 +1522,7 @@
 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
 	struct IO_REQUEST_INFO io_info;
 	struct fusion_context *fusion;
-	struct MR_FW_RAID_MAP_ALL *local_map_ptr;
+	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
 	u8 *raidLUN;
 
 	device_id = MEGASAS_DEV_INDEX(instance, scp);
@@ -1486,10 +1599,10 @@
 	if (scp->sc_data_direction == PCI_DMA_FROMDEVICE)
 		io_info.isRead = 1;
 
-	local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
+	local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
 
 	if ((MR_TargetIdToLdGet(device_id, local_map_ptr) >=
-	     MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io)) {
+		instance->fw_supported_vd_count) || (!fusion->fast_path_io)) {
 		io_request->RaidContext.regLockFlags  = 0;
 		fp_possible = 0;
 	} else {
@@ -1529,10 +1642,11 @@
 		if ((fusion->load_balance_info[device_id].loadBalanceFlag) &&
 		    (io_info.isRead)) {
 			io_info.devHandle =
-				get_updated_dev_handle(
+				get_updated_dev_handle(instance,
 					&fusion->load_balance_info[device_id],
 					&io_info);
 			scp->SCp.Status |= MEGASAS_LOAD_BALANCE_FLAG;
+			cmd->pd_r1_lb = io_info.pd_after_lb;
 		} else
 			scp->SCp.Status &= ~MEGASAS_LOAD_BALANCE_FLAG;
 		cmd->request_desc->SCSIIO.DevHandle = io_info.devHandle;
@@ -1579,7 +1693,7 @@
 	u32 device_id;
 	struct MPI2_RAID_SCSI_IO_REQUEST *io_request;
 	u16 pd_index = 0;
-	struct MR_FW_RAID_MAP_ALL *local_map_ptr;
+	struct MR_DRV_RAID_MAP_ALL *local_map_ptr;
 	struct fusion_context *fusion = instance->ctrl_context;
 	u8                          span, physArm;
 	u16                         devHandle;
@@ -1591,7 +1705,7 @@
 	device_id = MEGASAS_DEV_INDEX(instance, scmd);
 	pd_index = (scmd->device->channel * MEGASAS_MAX_DEV_PER_CHANNEL)
 		+scmd->device->id;
-	local_map_ptr = fusion->ld_map[(instance->map_id & 1)];
+	local_map_ptr = fusion->ld_drv_map[(instance->map_id & 1)];
 
 	io_request->DataLength = cpu_to_le32(scsi_bufflen(scmd));
 
@@ -1639,7 +1753,8 @@
 			goto NonFastPath;
 
 		ld = MR_TargetIdToLdGet(device_id, local_map_ptr);
-		if ((ld >= MAX_LOGICAL_DRIVES) || (!fusion->fast_path_io))
+		if ((ld >= instance->fw_supported_vd_count) ||
+			(!fusion->fast_path_io))
 			goto NonFastPath;
 
 		raid = MR_LdRaidGet(ld, local_map_ptr);
@@ -1864,10 +1979,11 @@
 	struct megasas_cmd *cmd_mfi;
 	struct megasas_cmd_fusion *cmd_fusion;
 	u16 smid, num_completed;
-	u8 reply_descript_type, arm;
+	u8 reply_descript_type;
 	u32 status, extStatus, device_id;
 	union desc_value d_val;
 	struct LD_LOAD_BALANCE_INFO *lbinfo;
+	int threshold_reply_count = 0;
 
 	fusion = instance->ctrl_context;
 
@@ -1914,10 +2030,7 @@
 			lbinfo = &fusion->load_balance_info[device_id];
 			if (cmd_fusion->scmd->SCp.Status &
 			    MEGASAS_LOAD_BALANCE_FLAG) {
-				arm = lbinfo->raid1DevHandle[0] ==
-					cmd_fusion->io_request->DevHandle ? 0 :
-					1;
-				atomic_dec(&lbinfo->scsi_pending_cmds[arm]);
+				atomic_dec(&lbinfo->scsi_pending_cmds[cmd_fusion->pd_r1_lb]);
 				cmd_fusion->scmd->SCp.Status &=
 					~MEGASAS_LOAD_BALANCE_FLAG;
 			}
@@ -1941,10 +2054,19 @@
 			break;
 		case MEGASAS_MPI2_FUNCTION_PASSTHRU_IO_REQUEST: /*MFI command */
 			cmd_mfi = instance->cmd_list[cmd_fusion->sync_cmd_idx];
+
+			if (!cmd_mfi->mpt_pthr_cmd_blocked) {
+				if (megasas_dbg_lvl == 5)
+					dev_info(&instance->pdev->dev,
+						"freeing mfi/mpt pass-through "
+						"from %s %d\n",
+						 __func__, __LINE__);
+				megasas_return_mfi_mpt_pthr(instance, cmd_mfi,
+					cmd_fusion);
+			}
+
 			megasas_complete_cmd(instance, cmd_mfi, DID_OK);
 			cmd_fusion->flags = 0;
-			megasas_return_cmd_fusion(instance, cmd_fusion);
-
 			break;
 		}
 
@@ -1955,6 +2077,7 @@
 
 		desc->Words = ULLONG_MAX;
 		num_completed++;
+		threshold_reply_count++;
 
 		/* Get the next reply descriptor */
 		if (!fusion->last_reply_idx[MSIxIndex])
@@ -1974,6 +2097,25 @@
 
 		if (reply_descript_type == MPI2_RPY_DESCRIPT_FLAGS_UNUSED)
 			break;
+		/*
+		 * Write to reply post host index register after completing threshold
+		 * number of reply counts and still there are more replies in reply queue
+		 * pending to be completed
+		 */
+		if (threshold_reply_count >= THRESHOLD_REPLY_COUNT) {
+			if ((instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_INVADER) ||
+				(instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_FURY))
+				writel(((MSIxIndex & 0x7) << 24) |
+					fusion->last_reply_idx[MSIxIndex],
+					instance->reply_post_host_index_addr[MSIxIndex/8]);
+			else
+				writel((MSIxIndex << 24) |
+					fusion->last_reply_idx[MSIxIndex],
+					instance->reply_post_host_index_addr[0]);
+			threshold_reply_count = 0;
+		}
 	}
 
 	if (!num_completed)
@@ -2028,7 +2170,7 @@
 {
 	struct megasas_irq_context *irq_context = devp;
 	struct megasas_instance *instance = irq_context->instance;
-	u32 mfiStatus, fw_state;
+	u32 mfiStatus, fw_state, dma_state;
 
 	if (instance->mask_interrupts)
 		return IRQ_NONE;
@@ -2050,7 +2192,16 @@
 		/* If we didn't complete any commands, check for FW fault */
 		fw_state = instance->instancet->read_fw_status_reg(
 			instance->reg_set) & MFI_STATE_MASK;
-		if (fw_state == MFI_STATE_FAULT) {
+		dma_state = instance->instancet->read_fw_status_reg
+			(instance->reg_set) & MFI_STATE_DMADONE;
+		if (instance->crash_dump_drv_support &&
+			instance->crash_dump_app_support) {
+			/* Start collecting crash, if DMA bit is done */
+			if ((fw_state == MFI_STATE_FAULT) && dma_state)
+				schedule_work(&instance->crash_init);
+			else if (fw_state == MFI_STATE_FAULT)
+				schedule_work(&instance->work_init);
+		} else if (fw_state == MFI_STATE_FAULT) {
 			printk(KERN_WARNING "megaraid_sas: Iop2SysDoorbellInt"
 			       "for scsi%d\n", instance->host->host_no);
 			schedule_work(&instance->work_init);
@@ -2075,6 +2226,7 @@
 	struct megasas_cmd_fusion *cmd;
 	struct fusion_context *fusion;
 	struct megasas_header *frame_hdr = &mfi_cmd->frame->hdr;
+	u32 opcode;
 
 	cmd = megasas_get_cmd_fusion(instance);
 	if (!cmd)
@@ -2082,9 +2234,20 @@
 
 	/*  Save the smid. To be used for returning the cmd */
 	mfi_cmd->context.smid = cmd->index;
-
 	cmd->sync_cmd_idx = mfi_cmd->index;
 
+	/* Set this only for Blocked commands */
+	opcode = le32_to_cpu(mfi_cmd->frame->dcmd.opcode);
+	if ((opcode == MR_DCMD_LD_MAP_GET_INFO)
+		&& (mfi_cmd->frame->dcmd.mbox.b[1] == 1))
+		mfi_cmd->is_wait_event = 1;
+
+	if (opcode == MR_DCMD_CTRL_EVENT_WAIT)
+		mfi_cmd->is_wait_event = 1;
+
+	if (mfi_cmd->is_wait_event)
+		mfi_cmd->mpt_pthr_cmd_blocked = cmd;
+
 	/*
 	 * For cmds where the flag is set, store the flag and check
 	 * on completion. For cmds with this flag, don't call
@@ -2173,6 +2336,7 @@
 		printk(KERN_ERR "Couldn't issue MFI pass thru cmd\n");
 		return;
 	}
+	atomic_set(&cmd->mfi_mpt_pthr, MFI_MPT_ATTACHED);
 	instance->instancet->fire_cmd(instance, req_desc->u.low,
 				      req_desc->u.high, instance->reg_set);
 }
@@ -2203,6 +2367,49 @@
 }
 
 /**
+ * megasas_alloc_host_crash_buffer -	Host buffers for Crash dump collection from Firmware
+ * @instance:				Controller's soft instance
+ * return:			        Number of allocated host crash buffers
+ */
+static void
+megasas_alloc_host_crash_buffer(struct megasas_instance *instance)
+{
+	unsigned int i;
+
+	instance->crash_buf_pages = get_order(CRASH_DMA_BUF_SIZE);
+	for (i = 0; i < MAX_CRASH_DUMP_SIZE; i++) {
+		instance->crash_buf[i] = (void	*)__get_free_pages(GFP_KERNEL,
+				instance->crash_buf_pages);
+		if (!instance->crash_buf[i]) {
+			dev_info(&instance->pdev->dev, "Firmware crash dump "
+				"memory allocation failed at index %d\n", i);
+			break;
+		}
+	}
+	instance->drv_buf_alloc = i;
+}
+
+/**
+ * megasas_free_host_crash_buffer -	Host buffers for Crash dump collection from Firmware
+ * @instance:				Controller's soft instance
+ */
+void
+megasas_free_host_crash_buffer(struct megasas_instance *instance)
+{
+	unsigned int i
+;
+	for (i = 0; i < instance->drv_buf_alloc; i++) {
+		if (instance->crash_buf[i])
+			free_pages((ulong)instance->crash_buf[i],
+					instance->crash_buf_pages);
+	}
+	instance->drv_buf_index = 0;
+	instance->drv_buf_alloc = 0;
+	instance->fw_crash_state = UNAVAILABLE;
+	instance->fw_crash_buffer_size = 0;
+}
+
+/**
  * megasas_adp_reset_fusion -	For controller reset
  * @regs:				MFI register set
  */
@@ -2345,6 +2552,7 @@
 	struct megasas_cmd *cmd_mfi;
 	union MEGASAS_REQUEST_DESCRIPTOR_UNION *req_desc;
 	u32 host_diag, abs_state, status_reg, reset_adapter;
+	u32 io_timeout_in_crash_mode = 0;
 
 	instance = (struct megasas_instance *)shost->hostdata;
 	fusion = instance->ctrl_context;
@@ -2355,8 +2563,45 @@
 		printk(KERN_WARNING "megaraid_sas: Hardware critical error, "
 		       "returning FAILED for scsi%d.\n",
 			instance->host->host_no);
+		mutex_unlock(&instance->reset_mutex);
 		return FAILED;
 	}
+	status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+	abs_state = status_reg & MFI_STATE_MASK;
+
+	/* IO timeout detected, forcibly put FW in FAULT state */
+	if (abs_state != MFI_STATE_FAULT && instance->crash_dump_buf &&
+		instance->crash_dump_app_support && iotimeout) {
+		dev_info(&instance->pdev->dev, "IO timeout is detected, "
+			"forcibly FAULT Firmware\n");
+		instance->adprecovery = MEGASAS_ADPRESET_SM_INFAULT;
+		status_reg = readl(&instance->reg_set->doorbell);
+		writel(status_reg | MFI_STATE_FORCE_OCR,
+			&instance->reg_set->doorbell);
+		readl(&instance->reg_set->doorbell);
+		mutex_unlock(&instance->reset_mutex);
+		do {
+			ssleep(3);
+			io_timeout_in_crash_mode++;
+			dev_dbg(&instance->pdev->dev, "waiting for [%d] "
+				"seconds for crash dump collection and OCR "
+				"to be done\n", (io_timeout_in_crash_mode * 3));
+		} while ((instance->adprecovery != MEGASAS_HBA_OPERATIONAL) &&
+			(io_timeout_in_crash_mode < 80));
+
+		if (instance->adprecovery == MEGASAS_HBA_OPERATIONAL) {
+			dev_info(&instance->pdev->dev, "OCR done for IO "
+				"timeout case\n");
+			retval = SUCCESS;
+		} else {
+			dev_info(&instance->pdev->dev, "Controller is not "
+				"operational after 240 seconds wait for IO "
+				"timeout case in FW crash dump mode\n do "
+				"OCR/kill adapter\n");
+			retval = megasas_reset_fusion(shost, 0);
+		}
+		return retval;
+	}
 
 	if (instance->requestorId && !instance->skip_heartbeat_timer_del)
 		del_timer_sync(&instance->sriov_heartbeat_timer);
@@ -2563,10 +2808,7 @@
 					cmd_list[cmd_fusion->sync_cmd_idx];
 					if (cmd_mfi->frame->dcmd.opcode ==
 					    cpu_to_le32(MR_DCMD_LD_MAP_GET_INFO)) {
-						megasas_return_cmd(instance,
-								   cmd_mfi);
-						megasas_return_cmd_fusion(
-							instance, cmd_fusion);
+						megasas_return_mfi_mpt_pthr(instance, cmd_mfi, cmd_fusion);
 					} else  {
 						req_desc =
 						megasas_get_request_descriptor(
@@ -2603,7 +2845,7 @@
 			/* Reset load balance info */
 			memset(fusion->load_balance_info, 0,
 			       sizeof(struct LD_LOAD_BALANCE_INFO)
-			       *MAX_LOGICAL_DRIVES);
+			       *MAX_LOGICAL_DRIVES_EXT);
 
 			if (!megasas_get_map_info(instance))
 				megasas_sync_map_info(instance);
@@ -2623,6 +2865,15 @@
 			printk(KERN_WARNING "megaraid_sas: Reset "
 			       "successful for scsi%d.\n",
 				instance->host->host_no);
+
+			if (instance->crash_dump_drv_support) {
+				if (instance->crash_dump_app_support)
+					megasas_set_crash_dump_params(instance,
+						MR_CRASH_BUF_TURN_ON);
+				else
+					megasas_set_crash_dump_params(instance,
+						MR_CRASH_BUF_TURN_OFF);
+			}
 			retval = SUCCESS;
 			goto out;
 		}
@@ -2651,6 +2902,74 @@
 	return retval;
 }
 
+/* Fusion Crash dump collection work queue */
+void  megasas_fusion_crash_dump_wq(struct work_struct *work)
+{
+	struct megasas_instance *instance =
+		container_of(work, struct megasas_instance, crash_init);
+	u32 status_reg;
+	u8 partial_copy = 0;
+
+
+	status_reg = instance->instancet->read_fw_status_reg(instance->reg_set);
+
+	/*
+	 * Allocate host crash buffers to copy data from 1 MB DMA crash buffer
+	 * to host crash buffers
+	 */
+	if (instance->drv_buf_index == 0) {
+		/* Buffer is already allocated for old Crash dump.
+		 * Do OCR and do not wait for crash dump collection
+		 */
+		if (instance->drv_buf_alloc) {
+			dev_info(&instance->pdev->dev, "earlier crash dump is "
+				"not yet copied by application, ignoring this "
+				"crash dump and initiating OCR\n");
+			status_reg |= MFI_STATE_CRASH_DUMP_DONE;
+			writel(status_reg,
+				&instance->reg_set->outbound_scratch_pad);
+			readl(&instance->reg_set->outbound_scratch_pad);
+			return;
+		}
+		megasas_alloc_host_crash_buffer(instance);
+		dev_info(&instance->pdev->dev, "Number of host crash buffers "
+			"allocated: %d\n", instance->drv_buf_alloc);
+	}
+
+	/*
+	 * Driver has allocated max buffers, which can be allocated
+	 * and FW has more crash dump data, then driver will
+	 * ignore the data.
+	 */
+	if (instance->drv_buf_index >= (instance->drv_buf_alloc)) {
+		dev_info(&instance->pdev->dev, "Driver is done copying "
+			"the buffer: %d\n", instance->drv_buf_alloc);
+		status_reg |= MFI_STATE_CRASH_DUMP_DONE;
+		partial_copy = 1;
+	} else {
+		memcpy(instance->crash_buf[instance->drv_buf_index],
+			instance->crash_dump_buf, CRASH_DMA_BUF_SIZE);
+		instance->drv_buf_index++;
+		status_reg &= ~MFI_STATE_DMADONE;
+	}
+
+	if (status_reg & MFI_STATE_CRASH_DUMP_DONE) {
+		dev_info(&instance->pdev->dev, "Crash Dump is available,number "
+			"of copied buffers: %d\n", instance->drv_buf_index);
+		instance->fw_crash_buffer_size =  instance->drv_buf_index;
+		instance->fw_crash_state = AVAILABLE;
+		instance->drv_buf_index = 0;
+		writel(status_reg, &instance->reg_set->outbound_scratch_pad);
+		readl(&instance->reg_set->outbound_scratch_pad);
+		if (!partial_copy)
+			megasas_reset_fusion(instance->host, 0);
+	} else {
+		writel(status_reg, &instance->reg_set->outbound_scratch_pad);
+		readl(&instance->reg_set->outbound_scratch_pad);
+	}
+}
+
+
 /* Fusion OCR work queue */
 void megasas_fusion_ocr_wq(struct work_struct *work)
 {
diff --git a/drivers/scsi/megaraid/megaraid_sas_fusion.h b/drivers/scsi/megaraid/megaraid_sas_fusion.h
index e76af54..0d183d5 100644
--- a/drivers/scsi/megaraid/megaraid_sas_fusion.h
+++ b/drivers/scsi/megaraid/megaraid_sas_fusion.h
@@ -86,6 +86,7 @@
 
 #define MEGASAS_FP_CMD_LEN	16
 #define MEGASAS_FUSION_IN_RESET 0
+#define THRESHOLD_REPLY_COUNT 50
 
 /*
  * Raid Context structure which describes MegaRAID specific IO Parameters
@@ -478,10 +479,13 @@
 #define MAX_ROW_SIZE 32
 #define MAX_RAIDMAP_ROW_SIZE (MAX_ROW_SIZE)
 #define MAX_LOGICAL_DRIVES 64
+#define MAX_LOGICAL_DRIVES_EXT 256
 #define MAX_RAIDMAP_LOGICAL_DRIVES (MAX_LOGICAL_DRIVES)
 #define MAX_RAIDMAP_VIEWS (MAX_LOGICAL_DRIVES)
 #define MAX_ARRAYS 128
 #define MAX_RAIDMAP_ARRAYS (MAX_ARRAYS)
+#define MAX_ARRAYS_EXT	256
+#define MAX_API_ARRAYS_EXT (MAX_ARRAYS_EXT)
 #define MAX_PHYSICAL_DEVICES 256
 #define MAX_RAIDMAP_PHYSICAL_DEVICES (MAX_PHYSICAL_DEVICES)
 #define MR_DCMD_LD_MAP_GET_INFO             0x0300e101
@@ -601,7 +605,6 @@
 			u32         maxArrays;
 		} validationInfo;
 		u32             version[5];
-		u32             reserved1[5];
 	};
 
 	u32                 ldCount;
@@ -627,6 +630,8 @@
 	u8 start_span;
 	u8 reserved;
 	u64 start_row;
+	u8  span_arm;	/* span[7:5], arm[4:0] */
+	u8  pd_after_lb;
 };
 
 struct MR_LD_TARGET_SYNC {
@@ -678,14 +683,14 @@
 	u32 sync_cmd_idx;
 	u32 index;
 	u8 flags;
+	u8 pd_r1_lb;
 };
 
 struct LD_LOAD_BALANCE_INFO {
 	u8	loadBalanceFlag;
 	u8	reserved1;
-	u16     raid1DevHandle[2];
-	atomic_t     scsi_pending_cmds[2];
-	u64     last_accessed_block[2];
+	atomic_t     scsi_pending_cmds[MAX_PHYSICAL_DEVICES];
+	u64     last_accessed_block[MAX_PHYSICAL_DEVICES];
 };
 
 /* SPAN_SET is info caclulated from span info from Raid map per LD */
@@ -713,11 +718,86 @@
 	struct MR_LD_SPAN_MAP ldSpanMap[MAX_LOGICAL_DRIVES - 1];
 } __attribute__ ((packed));
 
+struct MR_DRV_RAID_MAP {
+	/* total size of this structure, including this field.
+	 * This feild will be manupulated by driver for ext raid map,
+	 * else pick the value from firmware raid map.
+	 */
+	u32                 totalSize;
+
+	union {
+	struct {
+		u32         maxLd;
+		u32         maxSpanDepth;
+		u32         maxRowSize;
+		u32         maxPdCount;
+		u32         maxArrays;
+	} validationInfo;
+	u32             version[5];
+	};
+
+	/* timeout value used by driver in FP IOs*/
+	u8                  fpPdIoTimeoutSec;
+	u8                  reserved2[7];
+
+	u16                 ldCount;
+	u16                 arCount;
+	u16                 spanCount;
+	u16                 reserve3;
+
+	struct MR_DEV_HANDLE_INFO  devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+	u8                  ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
+	struct MR_ARRAY_INFO       arMapInfo[MAX_API_ARRAYS_EXT];
+	struct MR_LD_SPAN_MAP      ldSpanMap[1];
+
+};
+
+/* Driver raid map size is same as raid map ext
+ * MR_DRV_RAID_MAP_ALL is created to sync with old raid.
+ * And it is mainly for code re-use purpose.
+ */
+struct MR_DRV_RAID_MAP_ALL {
+
+	struct MR_DRV_RAID_MAP raidMap;
+	struct MR_LD_SPAN_MAP      ldSpanMap[MAX_LOGICAL_DRIVES_EXT - 1];
+} __packed;
+
+
+
+struct MR_FW_RAID_MAP_EXT {
+	/* Not usred in new map */
+	u32                 reserved;
+
+	union {
+	struct {
+		u32         maxLd;
+		u32         maxSpanDepth;
+		u32         maxRowSize;
+		u32         maxPdCount;
+		u32         maxArrays;
+	} validationInfo;
+	u32             version[5];
+	};
+
+	u8                  fpPdIoTimeoutSec;
+	u8                  reserved2[7];
+
+	u16                 ldCount;
+	u16                 arCount;
+	u16                 spanCount;
+	u16                 reserve3;
+
+	struct MR_DEV_HANDLE_INFO  devHndlInfo[MAX_RAIDMAP_PHYSICAL_DEVICES];
+	u8                  ldTgtIdToLd[MAX_LOGICAL_DRIVES_EXT];
+	struct MR_ARRAY_INFO       arMapInfo[MAX_API_ARRAYS_EXT];
+	struct MR_LD_SPAN_MAP      ldSpanMap[MAX_LOGICAL_DRIVES_EXT];
+};
+
 struct fusion_context {
 	struct megasas_cmd_fusion **cmd_list;
 	struct list_head cmd_pool;
 
-	spinlock_t cmd_pool_lock;
+	spinlock_t mpt_pool_lock;
 
 	dma_addr_t req_frames_desc_phys;
 	u8 *req_frames_desc;
@@ -749,10 +829,18 @@
 	struct MR_FW_RAID_MAP_ALL *ld_map[2];
 	dma_addr_t ld_map_phys[2];
 
-	u32 map_sz;
+	/*Non dma-able memory. Driver local copy.*/
+	struct MR_DRV_RAID_MAP_ALL *ld_drv_map[2];
+
+	u32 max_map_sz;
+	u32 current_map_sz;
+	u32 old_map_sz;
+	u32 new_map_sz;
+	u32 drv_map_sz;
+	u32 drv_map_pages;
 	u8 fast_path_io;
-	struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES];
-	LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES];
+	struct LD_LOAD_BALANCE_INFO load_balance_info[MAX_LOGICAL_DRIVES_EXT];
+	LD_SPAN_INFO log_to_span[MAX_LOGICAL_DRIVES_EXT];
 };
 
 union desc_value {
@@ -763,4 +851,5 @@
 	} u;
 };
 
+
 #endif /* _MEGARAID_SAS_FUSION_H_ */
diff --git a/drivers/scsi/mpt2sas/Kconfig b/drivers/scsi/mpt2sas/Kconfig
index 39f08dd..657b45c 100644
--- a/drivers/scsi/mpt2sas/Kconfig
+++ b/drivers/scsi/mpt2sas/Kconfig
@@ -2,7 +2,7 @@
 # Kernel configuration file for the MPT2SAS
 #
 # This code is based on drivers/scsi/mpt2sas/Kconfig
-# Copyright (C) 2007-2012  LSI Corporation
+# Copyright (C) 2007-2014  LSI Corporation
 #  (mailto:DL-MPTFusionLinux@lsi.com)
 
 # This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2.h b/drivers/scsi/mpt2sas/mpi/mpi2.h
index 7b14a01..088eefa 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2013 LSI Corporation.
+ *  Copyright (c) 2000-2014 LSI Corporation.
  *
  *
  *           Name:  mpi2.h
@@ -8,7 +8,7 @@
  *                  scatter/gather formats.
  *  Creation Date:  June 21, 2006
  *
- *  mpi2.h Version:  02.00.28
+ *  mpi2.h Version:  02.00.32
  *
  *  Version History
  *  ---------------
@@ -78,6 +78,11 @@
  *  07-10-12  02.00.26  Bumped MPI2_HEADER_VERSION_UNIT.
  *  07-26-12  02.00.27  Bumped MPI2_HEADER_VERSION_UNIT.
  *  11-27-12  02.00.28  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  12-20-12  02.00.29  Bumped MPI2_HEADER_VERSION_UNIT.
+ *			Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET.
+ *  04-09-13  02.00.30  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  04-17-13  02.00.31  Bumped MPI2_HEADER_VERSION_UNIT.
+ *  08-19-13  02.00.32  Bumped MPI2_HEADER_VERSION_UNIT.
  *  --------------------------------------------------------------------------
  */
 
@@ -103,7 +108,7 @@
 #define MPI2_VERSION_02_00                  (0x0200)
 
 /* versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x1C)
+#define MPI2_HEADER_VERSION_UNIT            (0x20)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
@@ -263,6 +268,7 @@
 #define MPI2_REPLY_POST_HOST_INDEX_MASK         (0x00FFFFFF)
 #define MPI2_RPHI_MSIX_INDEX_MASK               (0xFF000000)
 #define MPI2_RPHI_MSIX_INDEX_SHIFT              (24)
+#define MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET  (0x0000030C) /* MPI v2.5 only */
 
 /*
  * Defines for the HCBSize and address
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
index 88cb7f8..510ef0d 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_cnfg.h
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2013 LSI Corporation.
+ *  Copyright (c) 2000-2014 LSI Corporation.
  *
  *
  *           Name:  mpi2_cnfg.h
  *          Title:  MPI Configuration messages and pages
  *  Creation Date:  November 10, 2006
  *
- *    mpi2_cnfg.h Version:  02.00.23
+ *    mpi2_cnfg.h Version:  02.00.26
  *
  *  Version History
  *  ---------------
@@ -150,7 +150,13 @@
  *                      Added UEFIVersion field to BIOS Page 1 and defined new
  *                      BiosOptions bits.
  *  11-27-12  02.00.23  Added MPI2_MANPAGE7_FLAG_EVENTREPLAY_SLOT_ORDER.
- *			 Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID.
+ *			Added MPI2_BIOSPAGE1_OPTIONS_MASK_OEM_ID.
+ *  12-20-12  02.00.24  Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as
+ *			obsolete for MPI v2.5 and later.
+ *			Added some defines for 12G SAS speeds.
+ *  04-09-13  02.00.25  Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK.
+ *			Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to
+ *			match the specification.
  *  --------------------------------------------------------------------------
  */
 
@@ -773,6 +779,7 @@
 #define MPI2_IOUNITPAGE1_PAGEVERSION                    (0x04)
 
 /* IO Unit Page 1 Flags defines */
+#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK       (0x00004000)
 #define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY    (0x00000800)
 #define MPI2_IOUNITPAGE1_MASK_SATA_WRITE_CACHE          (0x00000600)
 #define MPI2_IOUNITPAGE1_SATA_WRITE_CACHE_SHIFT         (9)
@@ -844,7 +851,7 @@
 #define MPI2_IOUNITPAGE5_PAGEVERSION                    (0x00)
 
 /* defines for IO Unit Page 5 DmaEngineCapabilities field */
-#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS      (0xFF00)
+#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS      (0xFFFF0000)
 #define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS     (16)
 
 #define MPI2_IOUNITPAGE5_DMA_CAP_EEDP                   (0x0008)
@@ -885,13 +892,17 @@
     U16                     IOCTemperature;                         /* 0x10 */
     U8                      IOCTemperatureUnits;                    /* 0x12 */
     U8                      IOCSpeed;                               /* 0x13 */
-	U16                     BoardTemperature;              /* 0x14 */
-	U8                      BoardTemperatureUnits;         /* 0x16 */
-	U8                      Reserved3;                     /* 0x17 */
+	U16                     BoardTemperature;		    /* 0x14 */
+	U8                      BoardTemperatureUnits;		    /* 0x16 */
+	U8                      Reserved3;			    /* 0x17 */
+	U32                     Reserved4;                          /* 0x18 */
+	U32                     Reserved5;                          /* 0x1C */
+	U32                     Reserved6;                          /* 0x20 */
+	U32                     Reserved7;                          /* 0x24 */
 } MPI2_CONFIG_PAGE_IO_UNIT_7, MPI2_POINTER PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
   Mpi2IOUnitPage7_t, MPI2_POINTER pMpi2IOUnitPage7_t;
 
-#define MPI2_IOUNITPAGE7_PAGEVERSION                    (0x02)
+#define MPI2_IOUNITPAGE7_PAGEVERSION                    (0x04)
 
 /* defines for IO Unit Page 7 PCIeWidth field */
 #define MPI2_IOUNITPAGE7_PCIE_WIDTH_X1              (0x01)
@@ -1801,6 +1812,7 @@
 #define MPI2_SAS_PRATE_MAX_RATE_1_5                     (0x80)
 #define MPI2_SAS_PRATE_MAX_RATE_3_0                     (0x90)
 #define MPI2_SAS_PRATE_MAX_RATE_6_0                     (0xA0)
+#define MPI25_SAS_PRATE_MAX_RATE_12_0                   (0xB0)
 #define MPI2_SAS_PRATE_MIN_RATE_MASK                    (0x0F)
 #define MPI2_SAS_PRATE_MIN_RATE_NOT_PROGRAMMABLE        (0x00)
 #define MPI2_SAS_PRATE_MIN_RATE_1_5                     (0x08)
@@ -1813,6 +1825,7 @@
 #define MPI2_SAS_HWRATE_MAX_RATE_1_5                    (0x80)
 #define MPI2_SAS_HWRATE_MAX_RATE_3_0                    (0x90)
 #define MPI2_SAS_HWRATE_MAX_RATE_6_0                    (0xA0)
+#define MPI25_SAS_HWRATE_MAX_RATE_12_0                  (0xB0)
 #define MPI2_SAS_HWRATE_MIN_RATE_MASK                   (0x0F)
 #define MPI2_SAS_HWRATE_MIN_RATE_1_5                    (0x08)
 #define MPI2_SAS_HWRATE_MIN_RATE_3_0                    (0x09)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_init.h b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
index 9d284da..eea1a16 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_init.h
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2013 LSI Corporation.
+ *  Copyright (c) 2000-2014 LSI Corporation.
  *
  *
  *           Name:  mpi2_init.h
  *          Title:  MPI SCSI initiator mode messages and structures
  *  Creation Date:  June 23, 2006
  *
- *    mpi2_init.h Version:  02.00.14
+ *    mpi2_init.h Version:  02.00.15
  *
  *  Version History
  *  ---------------
@@ -37,6 +37,8 @@
  *  02-06-12  02.00.13  Added alternate defines for Task Priority / Command
  *                      Priority to match SAM-4.
  *  07-10-12  02.00.14  Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION.
+ *  04-09-13  02.00.15  Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY,
+ *			replacing the Reserved4 field.
  *  --------------------------------------------------------------------------
  */
 
@@ -234,7 +236,7 @@
     U32                     SenseCount;                     /* 0x18 */
     U32                     ResponseInfo;                   /* 0x1C */
     U16                     TaskTag;                        /* 0x20 */
-    U16                     Reserved4;                      /* 0x22 */
+	U16                     SCSIStatusQualifier;	     /* 0x22 */
     U32                     BidirectionalTransferCount;     /* 0x24 */
     U32                     Reserved5;                      /* 0x28 */
     U32                     Reserved6;                      /* 0x2C */
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
index d159c5f..2c3b0f2 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_ioc.h
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2013 LSI Corporation.
+ *  Copyright (c) 2000-2014 LSI Corporation.
  *
  *
  *           Name:  mpi2_ioc.h
  *          Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  *  Creation Date:  October 11, 2006
  *
- *  mpi2_ioc.h Version:  02.00.22
+ *  mpi2_ioc.h Version:  02.00.23
  *
  *  Version History
  *  ---------------
@@ -121,6 +121,11 @@
  *  07-26-12  02.00.22  Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE.
  *                      Added ElapsedSeconds field to
  *                      MPI2_EVENT_DATA_IR_OPERATION_STATUS.
+ *  08-19-13  02.00.23  For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE
+ *                      and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY.
+ *                      Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE.
+ *                      Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
+ *                      Added Encrypted Hash Extended Image.
  *  --------------------------------------------------------------------------
  */
 
@@ -177,6 +182,9 @@
 #define MPI2_WHOINIT_HOST_DRIVER                (0x04)
 #define MPI2_WHOINIT_MANUFACTURER               (0x05)
 
+/* MsgFlags */
+#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE    (0x01)
+
 /* MsgVersion */
 #define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK      (0xFF00)
 #define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT     (8)
@@ -189,9 +197,17 @@
 #define MPI2_IOCINIT_HDRVERSION_DEV_MASK        (0x00FF)
 #define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT       (0)
 
-/* minimum depth for the Reply Descriptor Post Queue */
+/* minimum depth for a Reply Descriptor Post Queue */
 #define MPI2_RDPQ_DEPTH_MIN                     (16)
 
+/* Reply Descriptor Post Queue Array Entry */
+typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
+	U64                 RDPQBaseAddress;                    /* 0x00 */
+	U32                 Reserved1;                          /* 0x08 */
+	U32                 Reserved2;                          /* 0x0C */
+} MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
+MPI2_POINTER PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
+Mpi2IOCInitRDPQArrayEntry, MPI2_POINTER pMpi2IOCInitRDPQArrayEntry;
 
 /* IOCInit Reply message */
 typedef struct _MPI2_IOC_INIT_REPLY
@@ -307,6 +323,7 @@
 /* ProductID field uses MPI2_FW_HEADER_PID_ */
 
 /* IOCCapabilities */
+#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE     (0x00040000)
 #define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY   (0x00010000)
 #define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX            (0x00008000)
 #define MPI2_IOCFACTS_CAPABILITY_RAID_ACCELERATOR       (0x00004000)
@@ -1153,6 +1170,7 @@
 #define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID             (0x09)
 #define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE             (0x0A)
 #define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK    (0x0B)
+#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY           (0x0C)
 #define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
 
 /* FWDownload TransactionContext Element */
@@ -1379,14 +1397,15 @@
 #define MPI2_EXT_IMAGE_HEADER_SIZE              (0x40)
 
 /* defines for the ImageType field */
-#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED				(0x00)
-#define MPI2_EXT_IMAGE_TYPE_FW						(0x01)
-#define MPI2_EXT_IMAGE_TYPE_NVDATA					(0x03)
-#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER				(0x04)
-#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION			(0x05)
-#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT			(0x06)
-#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES		(0x07)
-#define MPI2_EXT_IMAGE_TYPE_MEGARAID				(0x08)
+#define MPI2_EXT_IMAGE_TYPE_UNSPECIFIED             (0x00)
+#define MPI2_EXT_IMAGE_TYPE_FW                      (0x01)
+#define MPI2_EXT_IMAGE_TYPE_NVDATA                  (0x03)
+#define MPI2_EXT_IMAGE_TYPE_BOOTLOADER              (0x04)
+#define MPI2_EXT_IMAGE_TYPE_INITIALIZATION          (0x05)
+#define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT            (0x06)
+#define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES       (0x07)
+#define MPI2_EXT_IMAGE_TYPE_MEGARAID                (0x08)
+#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH          (0x09)
 #define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC    (0x80)
 #define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC    (0xFF)
 #define MPI2_EXT_IMAGE_TYPE_MAX                   \
@@ -1555,6 +1574,39 @@
 #define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET      (0x14)
 
 
+/* Encrypted Hash Extended Image Data */
+
+typedef struct _MPI25_ENCRYPTED_HASH_ENTRY {
+	U8                  HashImageType;          /* 0x00 */
+	U8                  HashAlgorithm;          /* 0x01 */
+	U8                  EncryptionAlgorithm;    /* 0x02 */
+	U8                  Reserved1;              /* 0x03 */
+	U32                 Reserved2;              /* 0x04 */
+	U32                 EncryptedHash[1];       /* 0x08 */
+} MPI25_ENCRYPTED_HASH_ENTRY, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_ENTRY,
+Mpi25EncryptedHashEntry_t, MPI2_POINTER pMpi25EncryptedHashEntry_t;
+
+/* values for HashImageType */
+#define MPI25_HASH_IMAGE_TYPE_UNUSED            (0x00)
+#define MPI25_HASH_IMAGE_TYPE_FIRMWARE          (0x01)
+
+/* values for HashAlgorithm */
+#define MPI25_HASH_ALGORITHM_UNUSED             (0x00)
+#define MPI25_HASH_ALGORITHM_SHA256             (0x01)
+
+/* values for EncryptionAlgorithm */
+#define MPI25_ENCRYPTION_ALG_UNUSED             (0x00)
+#define MPI25_ENCRYPTION_ALG_RSA256             (0x01)
+
+typedef struct _MPI25_ENCRYPTED_HASH_DATA {
+	U8                              ImageVersion;           /* 0x00 */
+	U8                              NumHash;                /* 0x01 */
+	U16                             Reserved1;              /* 0x02 */
+	U32                             Reserved2;              /* 0x04 */
+	MPI25_ENCRYPTED_HASH_ENTRY      EncryptedHashEntry[1];  /* 0x08 */
+} MPI25_ENCRYPTED_HASH_DATA, MPI2_POINTER PTR_MPI25_ENCRYPTED_HASH_DATA,
+Mpi25EncryptedHashData_t, MPI2_POINTER pMpi25EncryptedHashData_t;
+
 /****************************************************************************
 *  PowerManagementControl message
 ****************************************************************************/
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
index 0d202a2..7efa58f 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_raid.h
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2013 LSI Corporation.
+ *  Copyright (c) 2000-2014 LSI Corporation.
  *
  *
  *           Name:  mpi2_raid.h
  *          Title:  MPI Integrated RAID messages and structures
  *  Creation Date:  April 26, 2007
  *
- *    mpi2_raid.h Version:  02.00.09
+ *    mpi2_raid.h Version:  02.00.10
  *
  *  Version History
  *  ---------------
@@ -29,6 +29,7 @@
  *  02-06-12  02.00.08  Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
  *  07-26-12  02.00.09  Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR.
  *                      Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define.
+ *  04-17-13  02.00.10  Added MPI25_RAID_ACTION_ADATA_ALLOW_PI.
  *  --------------------------------------------------------------------------
  */
 
@@ -45,6 +46,9 @@
 *  RAID Action messages
 ****************************************************************************/
 
+/* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */
+#define MPI25_RAID_ACTION_ADATA_ALLOW_PI            (0x80000000)
+
 /* ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */
 #define MPI2_RAID_ACTION_ADATA_KEEP_LBA0            (0x00000000)
 #define MPI2_RAID_ACTION_ADATA_ZERO_LBA0            (0x00000001)
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
index 50b39cc..45b6fa1 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_sas.h
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2013 LSI Corporation.
+ *  Copyright (c) 2000-2014 LSI Corporation.
  *
  *
  *           Name:  mpi2_sas.h
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
index 11b2ac4..9be03ed 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_tool.h
@@ -1,12 +1,12 @@
 /*
- *  Copyright (c) 2000-2013 LSI Corporation.
+ *  Copyright (c) 2000-2014 LSI Corporation.
  *
  *
  *           Name:  mpi2_tool.h
  *          Title:  MPI diagnostic tool structures and definitions
  *  Creation Date:  March 26, 2007
  *
- *    mpi2_tool.h Version:  02.00.10
+ *    mpi2_tool.h Version:  02.00.11
  *
  *  Version History
  *  ---------------
@@ -29,6 +29,7 @@
  *                      MPI2_TOOLBOX_ISTWI_READ_WRITE_REQUEST.
  *  07-26-12  02.00.10  Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that
  *			it uses MPI Chain SGE as well as MPI Simple SGE.
+ * 08-19-13  02.00.11  Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
  *  --------------------------------------------------------------------------
  */
 
@@ -48,6 +49,7 @@
 #define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL          (0x03)
 #define MPI2_TOOLBOX_BEACON_TOOL                    (0x05)
 #define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL            (0x06)
+#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL              (0x07)
 
 
 /****************************************************************************
@@ -321,6 +323,44 @@
   MPI2_POINTER pMpi2ToolboxDiagnosticCliReply_t;
 
 
+/****************************************************************************
+*  Toolbox Console Text Display Tool
+****************************************************************************/
+
+/* Toolbox Console Text Display Tool request message */
+typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST {
+	U8                      Tool;               /* 0x00 */
+	U8                      Reserved1;          /* 0x01 */
+	U8                      ChainOffset;        /* 0x02 */
+	U8                      Function;           /* 0x03 */
+	U16                     Reserved2;          /* 0x04 */
+	U8                      Reserved3;          /* 0x06 */
+	U8                      MsgFlags;           /* 0x07 */
+	U8                      VP_ID;              /* 0x08 */
+	U8                      VF_ID;              /* 0x09 */
+	U16                     Reserved4;          /* 0x0A */
+	U8                      Console;            /* 0x0C */
+	U8                      Flags;              /* 0x0D */
+	U16                     Reserved6;          /* 0x0E */
+	U8                      TextToDisplay[4];   /* 0x10 */
+} MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
+MPI2_POINTER PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
+Mpi2ToolboxTextDisplayRequest_t,
+MPI2_POINTER pMpi2ToolboxTextDisplayRequest_t;
+
+/* defines for the Console field */
+#define MPI2_TOOLBOX_CONSOLE_TYPE_MASK          (0xF0)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_DEFAULT       (0x00)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_UART          (0x10)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_ETHERNET      (0x20)
+
+#define MPI2_TOOLBOX_CONSOLE_NUMBER_MASK        (0x0F)
+
+/* defines for the Flags field */
+#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP     (0x01)
+
+
+
 /*****************************************************************************
 *
 *       Diagnostic Buffer Messages
diff --git a/drivers/scsi/mpt2sas/mpi/mpi2_type.h b/drivers/scsi/mpt2sas/mpi/mpi2_type.h
index 0b128b6..6b0dcdd 100644
--- a/drivers/scsi/mpt2sas/mpi/mpi2_type.h
+++ b/drivers/scsi/mpt2sas/mpi/mpi2_type.h
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2013 LSI Corporation.
+ *  Copyright (c) 2000-2014 LSI Corporation.
  *
  *
  *           Name:  mpi2_type.h
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 2f262be..58e4521 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -3,7 +3,7 @@
  * for access to MPT (Message Passing Technology) firmware.
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2013  LSI Corporation
+ * Copyright (C) 2007-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -80,6 +80,10 @@
 module_param(msix_disable, int, 0);
 MODULE_PARM_DESC(msix_disable, " disable msix routed interrupts (default=0)");
 
+static int max_msix_vectors = -1;
+module_param(max_msix_vectors, int, 0);
+MODULE_PARM_DESC(max_msix_vectors, " max msix vectors ");
+
 static int mpt2sas_fwfault_debug;
 MODULE_PARM_DESC(mpt2sas_fwfault_debug, " enable detection of firmware fault "
 	"and halt firmware - (default=0)");
@@ -88,6 +92,12 @@
 module_param(disable_discovery, int, 0);
 MODULE_PARM_DESC(disable_discovery, " disable discovery ");
 
+static int
+_base_get_ioc_facts(struct MPT2SAS_ADAPTER *ioc, int sleep_flag);
+
+static int
+_base_diag_reset(struct MPT2SAS_ADAPTER *ioc, int sleep_flag);
+
 /**
  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
  *
@@ -1175,17 +1185,22 @@
 _base_config_dma_addressing(struct MPT2SAS_ADAPTER *ioc, struct pci_dev *pdev)
 {
 	struct sysinfo s;
-	char *desc = NULL;
+	u64 consistent_dma_mask;
+
+	if (ioc->dma_mask)
+		consistent_dma_mask = DMA_BIT_MASK(64);
+	else
+		consistent_dma_mask = DMA_BIT_MASK(32);
 
 	if (sizeof(dma_addr_t) > 4) {
 		const uint64_t required_mask =
 		    dma_get_required_mask(&pdev->dev);
-		if ((required_mask > DMA_BIT_MASK(32)) && !pci_set_dma_mask(pdev,
-		    DMA_BIT_MASK(64)) && !pci_set_consistent_dma_mask(pdev,
-		    DMA_BIT_MASK(64))) {
+		if ((required_mask > DMA_BIT_MASK(32)) &&
+		    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
+		    !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
 			ioc->base_add_sg_single = &_base_add_sg_single_64;
 			ioc->sge_size = sizeof(Mpi2SGESimple64_t);
-			desc = "64";
+			ioc->dma_mask = 64;
 			goto out;
 		}
 	}
@@ -1194,18 +1209,29 @@
 	    && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
 		ioc->base_add_sg_single = &_base_add_sg_single_32;
 		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
-		desc = "32";
+		ioc->dma_mask = 32;
 	} else
 		return -ENODEV;
 
  out:
 	si_meminfo(&s);
-	printk(MPT2SAS_INFO_FMT "%s BIT PCI BUS DMA ADDRESSING SUPPORTED, "
-	    "total mem (%ld kB)\n", ioc->name, desc, convert_to_kb(s.totalram));
+	printk(MPT2SAS_INFO_FMT
+	    "%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
+	    ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
 
 	return 0;
 }
 
+static int
+_base_change_consistent_dma_mask(struct MPT2SAS_ADAPTER *ioc,
+				  struct pci_dev *pdev)
+{
+	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+			return -ENODEV;
+	}
+	return 0;
+}
 /**
  * _base_check_enable_msix - checks MSIX capabable.
  * @ioc: per adapter object
@@ -1402,6 +1428,20 @@
 	ioc->reply_queue_count = min_t(int, ioc->cpu_count,
 	    ioc->msix_vector_count);
 
+	if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
+		max_msix_vectors = 8;
+
+	if (max_msix_vectors > 0) {
+		ioc->reply_queue_count = min_t(int, max_msix_vectors,
+		    ioc->reply_queue_count);
+		ioc->msix_vector_count = ioc->reply_queue_count;
+	} else if (max_msix_vectors == 0)
+		goto try_ioapic;
+
+	printk(MPT2SAS_INFO_FMT
+	"MSI-X vectors supported: %d, no of cores: %d, max_msix_vectors: %d\n",
+	 ioc->name, ioc->msix_vector_count, ioc->cpu_count, max_msix_vectors);
+
 	entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
 	    GFP_KERNEL);
 	if (!entries) {
@@ -1414,10 +1454,10 @@
 	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
 		a->entry = i;
 
-	r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
+	r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
 	if (r) {
-		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT "pci_enable_msix "
-		    "failed (r=%d) !!!\n", ioc->name, r));
+		dfailprintk(ioc, printk(MPT2SAS_INFO_FMT
+		    "pci_enable_msix_exact failed (r=%d) !!!\n", ioc->name, r));
 		kfree(entries);
 		goto try_ioapic;
 	}
@@ -1439,6 +1479,7 @@
 /* failback to io_apic interrupt routing */
  try_ioapic:
 
+	ioc->reply_queue_count = 1;
 	r = _base_request_irq(ioc, 0, ioc->pdev->irq);
 
 	return r;
@@ -1520,6 +1561,16 @@
 	}
 
 	_base_mask_interrupts(ioc);
+
+	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+	if (r)
+		goto out_fail;
+
+	if (!ioc->rdpq_array_enable_assigned) {
+		ioc->rdpq_array_enable = ioc->rdpq_array_capable;
+		ioc->rdpq_array_enable_assigned = 1;
+	}
+
 	r = _base_enable_msix(ioc);
 	if (r)
 		goto out_fail;
@@ -2317,7 +2368,8 @@
 static void
 _base_release_memory_pools(struct MPT2SAS_ADAPTER *ioc)
 {
-	int i;
+	int i = 0;
+	struct reply_post_struct *rps;
 
 	dexitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
 	    __func__));
@@ -2358,15 +2410,25 @@
 		ioc->reply_free = NULL;
 	}
 
-	if (ioc->reply_post_free) {
-		pci_pool_free(ioc->reply_post_free_dma_pool,
-		    ioc->reply_post_free, ioc->reply_post_free_dma);
+	if (ioc->reply_post) {
+		do {
+			rps = &ioc->reply_post[i];
+			if (rps->reply_post_free) {
+				pci_pool_free(
+				    ioc->reply_post_free_dma_pool,
+				    rps->reply_post_free,
+				    rps->reply_post_free_dma);
+				dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
+				    "reply_post_free_pool(0x%p): free\n",
+				    ioc->name, rps->reply_post_free));
+				rps->reply_post_free = NULL;
+			}
+		} while (ioc->rdpq_array_enable &&
+			   (++i < ioc->reply_queue_count));
+
 		if (ioc->reply_post_free_dma_pool)
 			pci_pool_destroy(ioc->reply_post_free_dma_pool);
-		dexitprintk(ioc, printk(MPT2SAS_INFO_FMT
-		    "reply_post_free_pool(0x%p): free\n", ioc->name,
-		    ioc->reply_post_free));
-		ioc->reply_post_free = NULL;
+		kfree(ioc->reply_post);
 	}
 
 	if (ioc->config_page) {
@@ -2509,6 +2571,65 @@
 	    ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
 	    ioc->chains_needed_per_io));
 
+	/* reply post queue, 16 byte align */
+	reply_post_free_sz = ioc->reply_post_queue_depth *
+	    sizeof(Mpi2DefaultReplyDescriptor_t);
+
+	sz = reply_post_free_sz;
+	if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
+		sz *= ioc->reply_queue_count;
+
+	ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
+	    (ioc->reply_queue_count):1,
+	    sizeof(struct reply_post_struct), GFP_KERNEL);
+
+	if (!ioc->reply_post) {
+		printk(MPT2SAS_ERR_FMT "reply_post_free pool: kcalloc failed\n",
+			ioc->name);
+		goto out;
+	}
+	ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
+	    ioc->pdev, sz, 16, 0);
+	if (!ioc->reply_post_free_dma_pool) {
+		printk(MPT2SAS_ERR_FMT
+		 "reply_post_free pool: pci_pool_create failed\n",
+		 ioc->name);
+		goto out;
+	}
+	i = 0;
+	do {
+		ioc->reply_post[i].reply_post_free =
+		    pci_pool_alloc(ioc->reply_post_free_dma_pool,
+		    GFP_KERNEL,
+		    &ioc->reply_post[i].reply_post_free_dma);
+		if (!ioc->reply_post[i].reply_post_free) {
+			printk(MPT2SAS_ERR_FMT
+			"reply_post_free pool: pci_pool_alloc failed\n",
+			ioc->name);
+			goto out;
+		}
+		memset(ioc->reply_post[i].reply_post_free, 0, sz);
+		dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
+		    "reply post free pool (0x%p): depth(%d),"
+		    "element_size(%d), pool_size(%d kB)\n", ioc->name,
+		    ioc->reply_post[i].reply_post_free,
+		    ioc->reply_post_queue_depth, 8, sz/1024));
+		dinitprintk(ioc, printk(MPT2SAS_INFO_FMT
+		    "reply_post_free_dma = (0x%llx)\n", ioc->name,
+		    (unsigned long long)
+		    ioc->reply_post[i].reply_post_free_dma));
+		total_sz += sz;
+	} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
+
+	if (ioc->dma_mask == 64) {
+		if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
+			printk(MPT2SAS_WARN_FMT
+			    "no suitable consistent DMA mask for %s\n",
+			    ioc->name, pci_name(ioc->pdev));
+			goto out;
+		}
+	}
+
 	ioc->scsiio_depth = ioc->hba_queue_depth -
 	    ioc->hi_priority_depth - ioc->internal_depth;
 
@@ -2720,37 +2841,6 @@
 	    "(0x%llx)\n", ioc->name, (unsigned long long)ioc->reply_free_dma));
 	total_sz += sz;
 
-	/* reply post queue, 16 byte align */
-	reply_post_free_sz = ioc->reply_post_queue_depth *
-	    sizeof(Mpi2DefaultReplyDescriptor_t);
-	if (_base_is_controller_msix_enabled(ioc))
-		sz = reply_post_free_sz * ioc->reply_queue_count;
-	else
-		sz = reply_post_free_sz;
-	ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
-	    ioc->pdev, sz, 16, 0);
-	if (!ioc->reply_post_free_dma_pool) {
-		printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_create "
-		    "failed\n", ioc->name);
-		goto out;
-	}
-	ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
-	    GFP_KERNEL, &ioc->reply_post_free_dma);
-	if (!ioc->reply_post_free) {
-		printk(MPT2SAS_ERR_FMT "reply_post_free pool: pci_pool_alloc "
-		    "failed\n", ioc->name);
-		goto out;
-	}
-	memset(ioc->reply_post_free, 0, sz);
-	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply post free pool"
-	    "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
-	    ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
-	    sz/1024));
-	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "reply_post_free_dma = "
-	    "(0x%llx)\n", ioc->name, (unsigned long long)
-	    ioc->reply_post_free_dma));
-	total_sz += sz;
-
 	ioc->config_page_sz = 512;
 	ioc->config_page = pci_alloc_consistent(ioc->pdev,
 	    ioc->config_page_sz, &ioc->config_page_dma);
@@ -3374,6 +3464,64 @@
 }
 
 /**
+ * _base_wait_for_iocstate - Wait until the card is in READY or OPERATIONAL
+ * @ioc: per adapter object
+ * @timeout:
+ * @sleep_flag: CAN_SLEEP or NO_SLEEP
+ *
+ * Returns 0 for success, non-zero for failure.
+ */
+static int
+_base_wait_for_iocstate(struct MPT2SAS_ADAPTER *ioc, int timeout,
+	int sleep_flag)
+{
+	u32 ioc_state, doorbell;
+	int rc;
+
+	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
+	    __func__));
+
+	if (ioc->pci_error_recovery)
+		return 0;
+
+	doorbell = mpt2sas_base_get_iocstate(ioc, 0);
+	ioc_state = doorbell & MPI2_IOC_STATE_MASK;
+	dhsprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: ioc_state(0x%08x)\n",
+	    ioc->name, __func__, ioc_state));
+
+	switch (ioc_state) {
+	case MPI2_IOC_STATE_READY:
+	case MPI2_IOC_STATE_OPERATIONAL:
+		return 0;
+	}
+
+	if (doorbell & MPI2_DOORBELL_USED) {
+		dhsprintk(ioc, printk(MPT2SAS_INFO_FMT
+		    "unexpected doorbell activ!e\n", ioc->name));
+		goto issue_diag_reset;
+	}
+
+	if (ioc_state == MPI2_IOC_STATE_FAULT) {
+		mpt2sas_base_fault_info(ioc, doorbell &
+		    MPI2_DOORBELL_DATA_MASK);
+		goto issue_diag_reset;
+	}
+
+	ioc_state = _base_wait_on_iocstate(ioc, MPI2_IOC_STATE_READY,
+	    timeout, sleep_flag);
+	if (ioc_state) {
+		printk(MPT2SAS_ERR_FMT
+		    "%s: failed going to ready state (ioc_state=0x%x)\n",
+		    ioc->name, __func__, ioc_state);
+		return -EFAULT;
+	}
+
+ issue_diag_reset:
+	rc = _base_diag_reset(ioc, sleep_flag);
+	return rc;
+}
+
+/**
  * _base_get_ioc_facts - obtain ioc facts reply and save in ioc
  * @ioc: per adapter object
  * @sleep_flag: CAN_SLEEP or NO_SLEEP
@@ -3391,6 +3539,13 @@
 	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
 	    __func__));
 
+	r = _base_wait_for_iocstate(ioc, 10, sleep_flag);
+	if (r) {
+		printk(MPT2SAS_ERR_FMT "%s: failed getting to correct state\n",
+			ioc->name, __func__);
+		return r;
+	}
+
 	mpi_reply_sz = sizeof(Mpi2IOCFactsReply_t);
 	mpi_request_sz = sizeof(Mpi2IOCFactsRequest_t);
 	memset(&mpi_request, 0, mpi_request_sz);
@@ -3422,6 +3577,9 @@
 	facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
 	if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
 		ioc->ir_firmware = 1;
+	if ((facts->IOCCapabilities &
+	      MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
+		ioc->rdpq_array_capable = 1;
 	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
 	facts->IOCRequestFrameSize =
 	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
@@ -3457,9 +3615,12 @@
 {
 	Mpi2IOCInitRequest_t mpi_request;
 	Mpi2IOCInitReply_t mpi_reply;
-	int r;
+	int i, r = 0;
 	struct timeval current_time;
 	u16 ioc_status;
+	u32 reply_post_free_array_sz = 0;
+	Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
+	dma_addr_t reply_post_free_array_dma;
 
 	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
 	    __func__));
@@ -3488,9 +3649,31 @@
 	    cpu_to_le64((u64)ioc->request_dma);
 	mpi_request.ReplyFreeQueueAddress =
 	    cpu_to_le64((u64)ioc->reply_free_dma);
-	mpi_request.ReplyDescriptorPostQueueAddress =
-	    cpu_to_le64((u64)ioc->reply_post_free_dma);
 
+	if (ioc->rdpq_array_enable) {
+		reply_post_free_array_sz = ioc->reply_queue_count *
+		    sizeof(Mpi2IOCInitRDPQArrayEntry);
+		reply_post_free_array = pci_alloc_consistent(ioc->pdev,
+			reply_post_free_array_sz, &reply_post_free_array_dma);
+		if (!reply_post_free_array) {
+			printk(MPT2SAS_ERR_FMT
+			"reply_post_free_array: pci_alloc_consistent failed\n",
+			ioc->name);
+			r = -ENOMEM;
+			goto out;
+		}
+		memset(reply_post_free_array, 0, reply_post_free_array_sz);
+		for (i = 0; i < ioc->reply_queue_count; i++)
+			reply_post_free_array[i].RDPQBaseAddress =
+			    cpu_to_le64(
+				(u64)ioc->reply_post[i].reply_post_free_dma);
+		mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
+		mpi_request.ReplyDescriptorPostQueueAddress =
+		    cpu_to_le64((u64)reply_post_free_array_dma);
+	} else {
+		mpi_request.ReplyDescriptorPostQueueAddress =
+		    cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
+	}
 
 	/* This time stamp specifies number of milliseconds
 	 * since epoch ~ midnight January 1, 1970.
@@ -3518,7 +3701,7 @@
 	if (r != 0) {
 		printk(MPT2SAS_ERR_FMT "%s: handshake failed (r=%d)\n",
 		    ioc->name, __func__, r);
-		return r;
+		goto out;
 	}
 
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
@@ -3528,7 +3711,12 @@
 		r = -EIO;
 	}
 
-	return 0;
+out:
+	if (reply_post_free_array)
+		pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
+				    reply_post_free_array,
+				    reply_post_free_array_dma);
+	return r;
 }
 
 /**
@@ -4061,7 +4249,7 @@
 	u8 hide_flag;
 	struct adapter_reply_queue *reply_q;
 	long reply_post_free;
-	u32 reply_post_free_sz;
+	u32 reply_post_free_sz, index = 0;
 
 	dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "%s\n", ioc->name,
 	    __func__));
@@ -4132,19 +4320,27 @@
 		_base_assign_reply_queues(ioc);
 
 	/* initialize Reply Post Free Queue */
-	reply_post_free = (long)ioc->reply_post_free;
 	reply_post_free_sz = ioc->reply_post_queue_depth *
 	    sizeof(Mpi2DefaultReplyDescriptor_t);
+	reply_post_free = (long)ioc->reply_post[index].reply_post_free;
 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
 		reply_q->reply_post_host_index = 0;
 		reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
 		    reply_post_free;
 		for (i = 0; i < ioc->reply_post_queue_depth; i++)
 			reply_q->reply_post_free[i].Words =
-							cpu_to_le64(ULLONG_MAX);
+						     cpu_to_le64(ULLONG_MAX);
 		if (!_base_is_controller_msix_enabled(ioc))
 			goto skip_init_reply_post_free_queue;
-		reply_post_free += reply_post_free_sz;
+		/*
+		 * If RDPQ is enabled, switch to the next allocation.
+		 * Otherwise advance within the contiguous region.
+		 */
+		if (ioc->rdpq_array_enable)
+			reply_post_free = (long)
+			    ioc->reply_post[++index].reply_post_free;
+		else
+			reply_post_free += reply_post_free_sz;
 	}
  skip_init_reply_post_free_queue:
 
@@ -4272,6 +4468,8 @@
 		}
 	}
 
+	ioc->rdpq_array_enable_assigned = 0;
+	ioc->dma_mask = 0;
 	r = mpt2sas_base_map_resources(ioc);
 	if (r)
 		goto out_free_resources;
@@ -4633,6 +4831,16 @@
 		r = -EFAULT;
 		goto out;
 	}
+
+	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+	if (r)
+		goto out;
+
+	if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
+		panic("%s: Issue occurred with flashing controller firmware."
+		      "Please reboot the system and ensure that the correct"
+		      " firmware version is running\n", ioc->name);
+
 	r = _base_make_ioc_operational(ioc, sleep_flag);
 	if (!r)
 		_base_reset_handler(ioc, MPT2_IOC_DONE_RESET);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.h b/drivers/scsi/mpt2sas/mpt2sas_base.h
index 0ac5815..239f169 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.h
@@ -3,7 +3,7 @@
  * for access to MPT (Message Passing Technology) firmware.
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_base.h
- * Copyright (C) 2007-2013  LSI Corporation
+ * Copyright (C) 2007-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -69,8 +69,8 @@
 #define MPT2SAS_DRIVER_NAME		"mpt2sas"
 #define MPT2SAS_AUTHOR	"LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT2SAS_DESCRIPTION	"LSI MPT Fusion SAS 2.0 Device Driver"
-#define MPT2SAS_DRIVER_VERSION		"16.100.00.00"
-#define MPT2SAS_MAJOR_VERSION		16
+#define MPT2SAS_DRIVER_VERSION		"18.100.00.00"
+#define MPT2SAS_MAJOR_VERSION		18
 #define MPT2SAS_MINOR_VERSION		100
 #define MPT2SAS_BUILD_VERSION		00
 #define MPT2SAS_RELEASE_VERSION		00
@@ -355,6 +355,7 @@
  * @slot: number number
  * @phy: phy identifier provided in sas device page 0
  * @responding: used in _scsih_sas_device_mark_responding
+ * @pfa_led_on: flag for PFA LED status
  */
 struct _sas_device {
 	struct list_head list;
@@ -373,6 +374,7 @@
 	u16	slot;
 	u8	phy;
 	u8	responding;
+	u8	pfa_led_on;
 };
 
 /**
@@ -634,6 +636,11 @@
 	u16			MaxPostedCmdBuffers;
 };
 
+struct reply_post_struct {
+	Mpi2ReplyDescriptorsUnion_t	*reply_post_free;
+	dma_addr_t			reply_post_free_dma;
+};
+
 /**
  * enum mutex_type - task management mutex type
  * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
@@ -661,6 +668,7 @@
  * @ir_firmware: IR firmware present
  * @bars: bitmask of BAR's that must be configured
  * @mask_interrupts: ignore interrupt
+ * @dma_mask: used to set the consistent dma mask
  * @fault_reset_work_q_name: fw fault work queue
  * @fault_reset_work_q: ""
  * @fault_reset_work: ""
@@ -777,8 +785,11 @@
  * @reply_free_dma_pool:
  * @reply_free_host_index: tail index in pool to insert free replys
  * @reply_post_queue_depth: reply post queue depth
- * @reply_post_free: pool for reply post (64bit descriptor)
- * @reply_post_free_dma:
+ * @reply_post_struct: struct for reply_post_free physical & virt address
+ * @rdpq_array_capable: FW supports multiple reply queue addresses in ioc_init
+ * @rdpq_array_enable: rdpq_array support is enabled in the driver
+ * @rdpq_array_enable_assigned: this ensures that rdpq_array_enable flag
+ *				is assigned only ones
  * @reply_queue_count: number of reply queue's
  * @reply_queue_list: link list contaning the reply queue info
  * @reply_post_host_index: head index in the pool where FW completes IO
@@ -800,6 +811,7 @@
 	u8		ir_firmware;
 	int		bars;
 	u8		mask_interrupts;
+	int		dma_mask;
 
 	/* fw fault handler */
 	char		fault_reset_work_q_name[20];
@@ -970,8 +982,10 @@
 
 	/* reply post queue */
 	u16 		reply_post_queue_depth;
-	Mpi2ReplyDescriptorsUnion_t *reply_post_free;
-	dma_addr_t	reply_post_free_dma;
+	struct reply_post_struct *reply_post;
+	u8		rdpq_array_capable;
+	u8		rdpq_array_enable;
+	u8		rdpq_array_enable_assigned;
 	struct dma_pool *reply_post_free_dma_pool;
 	u8		reply_queue_count;
 	struct list_head reply_queue_list;
diff --git a/drivers/scsi/mpt2sas/mpt2sas_config.c b/drivers/scsi/mpt2sas/mpt2sas_config.c
index 0c47425..c72a2ff 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_config.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_config.c
@@ -2,7 +2,7 @@
  * This module provides common API for accessing firmware configuration pages
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_base.c
- * Copyright (C) 2007-2013  LSI Corporation
+ * Copyright (C) 2007-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.c b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
index 62df8f9..ca4e563 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.c
@@ -3,7 +3,7 @@
  * controllers
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.c
- * Copyright (C) 2007-2013  LSI Corporation
+ * Copyright (C) 2007-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_ctl.h b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
index 8b2ac18..fa0567c 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_ctl.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_ctl.h
@@ -3,7 +3,7 @@
  * controllers
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_ctl.h
- * Copyright (C) 2007-2013  LSI Corporation
+ * Copyright (C) 2007-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_debug.h b/drivers/scsi/mpt2sas/mpt2sas_debug.h
index a9021cbd..cc57ef3 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_debug.h
+++ b/drivers/scsi/mpt2sas/mpt2sas_debug.h
@@ -2,7 +2,7 @@
  * Logging Support for MPT (Message Passing Technology) based controllers
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_debug.c
- * Copyright (C) 2007-2013  LSI Corporation
+ * Copyright (C) 2007-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
index dd46101..c80ed04 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c
@@ -2,7 +2,7 @@
  * Scsi Host Layer for MPT (Message Passing Technology) based controllers
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_scsih.c
- * Copyright (C) 2007-2013  LSI Corporation
+ * Copyright (C) 2007-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -55,6 +55,8 @@
 #include <linux/raid_class.h>
 #include <linux/slab.h>
 
+#include <asm/unaligned.h>
+
 #include "mpt2sas_base.h"
 
 MODULE_AUTHOR(MPT2SAS_AUTHOR);
@@ -145,7 +147,7 @@
 };
 
 
-#define MPT2SAS_TURN_ON_FAULT_LED (0xFFFC)
+#define MPT2SAS_TURN_ON_PFA_LED (0xFFFC)
 #define MPT2SAS_PORT_ENABLE_COMPLETE (0xFFFD)
 #define MPT2SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
 /**
@@ -3858,85 +3860,46 @@
 	struct _raid_device *raid_device, Mpi2SCSIIORequest_t *mpi_request,
 	u16 smid)
 {
-	u32 v_lba, p_lba, stripe_off, stripe_unit, column, io_size;
+	sector_t v_lba, p_lba, stripe_off, column, io_size;
 	u32 stripe_sz, stripe_exp;
-	u8 num_pds, *cdb_ptr, i;
-	u8 cdb0 = scmd->cmnd[0];
-	u64 v_llba;
+	u8 num_pds, cmd = scmd->cmnd[0];
 
-	/*
-	 * Try Direct I/O to RAID memeber disks
-	 */
-	if (cdb0 == READ_16 || cdb0 == READ_10 ||
-	    cdb0 == WRITE_16 || cdb0 == WRITE_10) {
-		cdb_ptr = mpi_request->CDB.CDB32;
+	if (cmd != READ_10 && cmd != WRITE_10 &&
+	    cmd != READ_16 && cmd != WRITE_16)
+		return;
 
-		if ((cdb0 < READ_16) || !(cdb_ptr[2] | cdb_ptr[3] | cdb_ptr[4]
-			| cdb_ptr[5])) {
-			io_size = scsi_bufflen(scmd) >>
-			    raid_device->block_exponent;
-			i = (cdb0 < READ_16) ? 2 : 6;
-			/* get virtual lba */
-			v_lba = be32_to_cpu(*(__be32 *)(&cdb_ptr[i]));
+	if (cmd == READ_10 || cmd == WRITE_10)
+		v_lba = get_unaligned_be32(&mpi_request->CDB.CDB32[2]);
+	else
+		v_lba = get_unaligned_be64(&mpi_request->CDB.CDB32[2]);
 
-			if (((u64)v_lba + (u64)io_size - 1) <=
-			    (u32)raid_device->max_lba) {
-				stripe_sz = raid_device->stripe_sz;
-				stripe_exp = raid_device->stripe_exponent;
-				stripe_off = v_lba & (stripe_sz - 1);
+	io_size = scsi_bufflen(scmd) >> raid_device->block_exponent;
 
-				/* Check whether IO falls within a stripe */
-				if ((stripe_off + io_size) <= stripe_sz) {
-					num_pds = raid_device->num_pds;
-					p_lba = v_lba >> stripe_exp;
-					stripe_unit = p_lba / num_pds;
-					column = p_lba % num_pds;
-					p_lba = (stripe_unit << stripe_exp) +
-					    stripe_off;
-					mpi_request->DevHandle =
-						cpu_to_le16(raid_device->
-						    pd_handle[column]);
-					(*(__be32 *)(&cdb_ptr[i])) =
-						cpu_to_be32(p_lba);
-					/*
-					* WD: To indicate this I/O is directI/O
-					*/
-					_scsih_scsi_direct_io_set(ioc, smid, 1);
-				}
-			}
-		} else {
-			io_size = scsi_bufflen(scmd) >>
-			    raid_device->block_exponent;
-			/* get virtual lba */
-			v_llba = be64_to_cpu(*(__be64 *)(&cdb_ptr[2]));
+	if (v_lba + io_size - 1 > raid_device->max_lba)
+		return;
 
-			if ((v_llba + (u64)io_size - 1) <=
-			    raid_device->max_lba) {
-				stripe_sz = raid_device->stripe_sz;
-				stripe_exp = raid_device->stripe_exponent;
-				stripe_off = (u32) (v_llba & (stripe_sz - 1));
+	stripe_sz = raid_device->stripe_sz;
+	stripe_exp = raid_device->stripe_exponent;
+	stripe_off = v_lba & (stripe_sz - 1);
 
-				/* Check whether IO falls within a stripe */
-				if ((stripe_off + io_size) <= stripe_sz) {
-					num_pds = raid_device->num_pds;
-					p_lba = (u32)(v_llba >> stripe_exp);
-					stripe_unit = p_lba / num_pds;
-					column = p_lba % num_pds;
-					p_lba = (stripe_unit << stripe_exp) +
-					    stripe_off;
-					mpi_request->DevHandle =
-						cpu_to_le16(raid_device->
-						    pd_handle[column]);
-					(*(__be64 *)(&cdb_ptr[2])) =
-					    cpu_to_be64((u64)p_lba);
-					/*
-					* WD: To indicate this I/O is directI/O
-					*/
-					_scsih_scsi_direct_io_set(ioc, smid, 1);
-				}
-			}
-		}
-	}
+	/* Return unless IO falls within a stripe */
+	if (stripe_off + io_size > stripe_sz)
+		return;
+
+	num_pds = raid_device->num_pds;
+	p_lba = v_lba >> stripe_exp;
+	column = sector_div(p_lba, num_pds);
+	p_lba = (p_lba << stripe_exp) + stripe_off;
+
+	mpi_request->DevHandle = cpu_to_le16(raid_device->pd_handle[column]);
+
+	if (cmd == READ_10 || cmd == WRITE_10)
+		put_unaligned_be32(lower_32_bits(p_lba),
+				   &mpi_request->CDB.CDB32[2]);
+	else
+		put_unaligned_be64(p_lba, &mpi_request->CDB.CDB32[2]);
+
+	_scsih_scsi_direct_io_set(ioc, smid, 1);
 }
 
 /**
@@ -4308,7 +4271,7 @@
 #endif
 
 /**
- * _scsih_turn_on_fault_led - illuminate Fault LED
+ * _scsih_turn_on_pfa_led - illuminate PFA LED
  * @ioc: per adapter object
  * @handle: device handle
  * Context: process
@@ -4316,10 +4279,15 @@
  * Return nothing.
  */
 static void
-_scsih_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+_scsih_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 {
 	Mpi2SepReply_t mpi_reply;
 	Mpi2SepRequest_t mpi_request;
+	struct _sas_device *sas_device;
+
+	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+	if (!sas_device)
+		return;
 
 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
@@ -4334,6 +4302,47 @@
 		__FILE__, __LINE__, __func__);
 		return;
 	}
+	sas_device->pfa_led_on = 1;
+
+
+	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT
+		 "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+		 ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
+		 le32_to_cpu(mpi_reply.IOCLogInfo)));
+		return;
+	}
+}
+
+/**
+ * _scsih_turn_off_pfa_led - turn off PFA LED
+ * @ioc: per adapter object
+ * @sas_device: sas device whose PFA LED has to turned off
+ * Context: process
+ *
+ * Return nothing.
+ */
+static void
+_scsih_turn_off_pfa_led(struct MPT2SAS_ADAPTER *ioc,
+	struct _sas_device *sas_device)
+{
+	Mpi2SepReply_t mpi_reply;
+	Mpi2SepRequest_t mpi_request;
+
+	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+	mpi_request.SlotStatus = 0;
+	mpi_request.Slot = cpu_to_le16(sas_device->slot);
+	mpi_request.DevHandle = 0;
+	mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
+	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
+	if ((mpt2sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+	    &mpi_request)) != 0) {
+		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n", ioc->name,
+		__FILE__, __LINE__, __func__);
+		return;
+	}
 
 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
 		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "enclosure_processor: "
@@ -4345,7 +4354,7 @@
 }
 
 /**
- * _scsih_send_event_to_turn_on_fault_led - fire delayed event
+ * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
  * @ioc: per adapter object
  * @handle: device handle
  * Context: interrupt.
@@ -4353,14 +4362,14 @@
  * Return nothing.
  */
 static void
-_scsih_send_event_to_turn_on_fault_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
+_scsih_send_event_to_turn_on_pfa_led(struct MPT2SAS_ADAPTER *ioc, u16 handle)
 {
 	struct fw_event_work *fw_event;
 
 	fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
 	if (!fw_event)
 		return;
-	fw_event->event = MPT2SAS_TURN_ON_FAULT_LED;
+	fw_event->event = MPT2SAS_TURN_ON_PFA_LED;
 	fw_event->device_handle = handle;
 	fw_event->ioc = ioc;
 	_scsih_fw_event_add(ioc, fw_event);
@@ -4404,7 +4413,7 @@
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
 	if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
-		_scsih_send_event_to_turn_on_fault_led(ioc, handle);
+		_scsih_send_event_to_turn_on_pfa_led(ioc, handle);
 
 	/* insert into event log */
 	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
@@ -5325,6 +5334,12 @@
 {
 	struct MPT2SAS_TARGET *sas_target_priv_data;
 
+	if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
+		(sas_device->pfa_led_on)) {
+		_scsih_turn_off_pfa_led(ioc, sas_device);
+		sas_device->pfa_led_on = 0;
+	}
+
 	dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "%s: enter: "
 	    "handle(0x%04x), sas_addr(0x%016llx)\n", ioc->name, __func__,
 		sas_device->handle, (unsigned long long)
@@ -7441,8 +7456,8 @@
 		dewtprintk(ioc, printk(MPT2SAS_INFO_FMT "port enable: complete "
 		    "from worker thread\n", ioc->name));
 		break;
-	case MPT2SAS_TURN_ON_FAULT_LED:
-		_scsih_turn_on_fault_led(ioc, fw_event->device_handle);
+	case MPT2SAS_TURN_ON_PFA_LED:
+		_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
 		break;
 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
 		_scsih_sas_topology_change_event(ioc, fw_event);
@@ -8132,6 +8147,7 @@
 {
 	struct MPT2SAS_ADAPTER *ioc;
 	struct Scsi_Host *shost;
+	int rv;
 
 	shost = scsi_host_alloc(&scsih_driver_template,
 	    sizeof(struct MPT2SAS_ADAPTER));
@@ -8227,6 +8243,7 @@
 	if (!ioc->firmware_event_thread) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
+		rv = -ENODEV;
 		goto out_thread_fail;
 	}
 
@@ -8234,6 +8251,7 @@
 	if ((mpt2sas_base_attach(ioc))) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
+		rv = -ENODEV;
 		goto out_attach_fail;
 	}
 
@@ -8251,7 +8269,8 @@
 	} else
 		ioc->hide_drives = 0;
 
-	if ((scsi_add_host(shost, &pdev->dev))) {
+	rv = scsi_add_host(shost, &pdev->dev);
+	if (rv) {
 		printk(MPT2SAS_ERR_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
 		goto out_add_shost_fail;
@@ -8268,7 +8287,7 @@
  out_thread_fail:
 	list_del(&ioc->list);
 	scsi_host_put(shost);
-	return -ENODEV;
+	return rv;
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 410f4a3..0d1d064 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -2,7 +2,7 @@
  * SAS Transport Layer for MPT (Message Passing Technology) based controllers
  *
  * This code is based on drivers/scsi/mpt2sas/mpt2_transport.c
- * Copyright (C) 2007-2013  LSI Corporation
+ * Copyright (C) 2007-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/Kconfig b/drivers/scsi/mpt3sas/Kconfig
index d53e1b0..4d235dd 100644
--- a/drivers/scsi/mpt3sas/Kconfig
+++ b/drivers/scsi/mpt3sas/Kconfig
@@ -2,7 +2,7 @@
 # Kernel configuration file for the MPT3SAS
 #
 # This code is based on drivers/scsi/mpt3sas/Kconfig
-# Copyright (C) 2012-2013  LSI Corporation
+# Copyright (C) 2012-2014  LSI Corporation
 #  (mailto:DL-MPTFusionLinux@lsi.com)
 
 # This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2.h b/drivers/scsi/mpt3sas/mpi/mpi2.h
index 20da8f9..c34c115 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2.h
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
  *
  *
  *          Name:  mpi2.h
@@ -8,7 +8,7 @@
  *                 scatter/gather formats.
  * Creation Date:  June 21, 2006
  *
- * mpi2.h Version:  02.00.29
+ * mpi2.h Version:  02.00.31
  *
  * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
  *       prefix are for use only on MPI v2.5 products, and must not be used
@@ -86,6 +86,8 @@
  * 11-27-12  02.00.28  Bumped MPI2_HEADER_VERSION_UNIT.
  * 12-20-12  02.00.29  Bumped MPI2_HEADER_VERSION_UNIT.
  *                     Added MPI25_SUP_REPLY_POST_HOST_INDEX_OFFSET.
+ * 04-09-13  02.00.30  Bumped MPI2_HEADER_VERSION_UNIT.
+ * 04-17-13  02.00.31  Bumped MPI2_HEADER_VERSION_UNIT.
  * --------------------------------------------------------------------------
  */
 
@@ -119,7 +121,7 @@
 #define MPI2_VERSION_02_05                  (0x0205)
 
 /*Unit and Dev versioning for this MPI header set */
-#define MPI2_HEADER_VERSION_UNIT            (0x1D)
+#define MPI2_HEADER_VERSION_UNIT            (0x1F)
 #define MPI2_HEADER_VERSION_DEV             (0x00)
 #define MPI2_HEADER_VERSION_UNIT_MASK       (0xFF00)
 #define MPI2_HEADER_VERSION_UNIT_SHIFT      (8)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
index 889aa70..e261a31 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_cnfg.h
@@ -1,12 +1,12 @@
 /*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
  *
  *
  *          Name:  mpi2_cnfg.h
  *         Title:  MPI Configuration messages and pages
  * Creation Date:  November 10, 2006
  *
- *   mpi2_cnfg.h Version:  02.00.24
+ *   mpi2_cnfg.h Version:  02.00.26
  *
  * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
  *       prefix are for use only on MPI v2.5 products, and must not be used
@@ -160,6 +160,11 @@
  * 12-20-12  02.00.24  Marked MPI2_SASIOUNIT1_CONTROL_CLEAR_AFFILIATION as
  *                     obsolete for MPI v2.5 and later.
  *                     Added some defines for 12G SAS speeds.
+ * 04-09-13  02.00.25  Added MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK.
+ *                     Fixed MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS to
+ *                     match the specification.
+ * 08-19-13  02.00.26  Added reserved words to MPI2_CONFIG_PAGE_IO_UNIT_7 for
+ *			future use.
  * --------------------------------------------------------------------------
  */
 
@@ -792,6 +797,7 @@
 #define MPI2_IOUNITPAGE1_PAGEVERSION                    (0x04)
 
 /*IO Unit Page 1 Flags defines */
+#define MPI2_IOUNITPAGE1_ATA_SECURITY_FREEZE_LOCK       (0x00004000)
 #define MPI25_IOUNITPAGE1_NEW_DEVICE_FAST_PATH_DISABLE  (0x00002000)
 #define MPI25_IOUNITPAGE1_DISABLE_FAST_PATH             (0x00001000)
 #define MPI2_IOUNITPAGE1_ENABLE_HOST_BASED_DISCOVERY    (0x00000800)
@@ -870,7 +876,7 @@
 #define MPI2_IOUNITPAGE5_PAGEVERSION                    (0x00)
 
 /*defines for IO Unit Page 5 DmaEngineCapabilities field */
-#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS      (0xFF00)
+#define MPI2_IOUNITPAGE5_DMA_CAP_MASK_MAX_REQUESTS      (0xFFFF0000)
 #define MPI2_IOUNITPAGE5_DMA_CAP_SHIFT_MAX_REQUESTS     (16)
 
 #define MPI2_IOUNITPAGE5_DMA_CAP_EEDP                   (0x0008)
@@ -920,11 +926,15 @@
 	U8
 		BoardTemperatureUnits;                  /*0x16 */
 	U8                      Reserved3;              /*0x17 */
+	U32			Reserved4;		/* 0x18 */
+	U32			Reserved5;		/* 0x1C */
+	U32			Reserved6;		/* 0x20 */
+	U32			Reserved7;		/* 0x24 */
 } MPI2_CONFIG_PAGE_IO_UNIT_7,
 	*PTR_MPI2_CONFIG_PAGE_IO_UNIT_7,
 	Mpi2IOUnitPage7_t, *pMpi2IOUnitPage7_t;
 
-#define MPI2_IOUNITPAGE7_PAGEVERSION                    (0x02)
+#define MPI2_IOUNITPAGE7_PAGEVERSION			(0x04)
 
 /*defines for IO Unit Page 7 CurrentPowerMode and PreviousPowerMode fields */
 #define MPI25_IOUNITPAGE7_PM_INIT_MASK              (0xC0)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_init.h b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
index f7928bf..068c98e 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_init.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_init.h
@@ -1,12 +1,12 @@
 /*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
  *
  *
  *          Name:  mpi2_init.h
  *         Title:  MPI SCSI initiator mode messages and structures
  * Creation Date:  June 23, 2006
  *
- * mpi2_init.h Version:  02.00.14
+ * mpi2_init.h Version:  02.00.15
  *
  * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
  *       prefix are for use only on MPI v2.5 products, and must not be used
@@ -44,6 +44,8 @@
  *                     Priority to match SAM-4.
  *                     Added EEDPErrorOffset to MPI2_SCSI_IO_REPLY.
  * 07-10-12  02.00.14  Added MPI2_SCSIIO_CONTROL_SHIFT_DATADIRECTION.
+ * 04-09-13  02.00.15  Added SCSIStatusQualifier field to MPI2_SCSI_IO_REPLY,
+ *                     replacing the Reserved4 field.
  * --------------------------------------------------------------------------
  */
 
@@ -347,7 +349,7 @@
 	U32 SenseCount;		/*0x18 */
 	U32 ResponseInfo;	/*0x1C */
 	U16 TaskTag;		/*0x20 */
-	U16 Reserved4;		/*0x22 */
+	U16 SCSIStatusQualifier; /* 0x22 */
 	U32 BidirectionalTransferCount;	/*0x24 */
 	U32 EEDPErrorOffset;	/*0x28 *//*MPI 2.5 only; Reserved in MPI 2.0*/
 	U32 Reserved6;		/*0x2C */
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
index e2bb821..4908309 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_ioc.h
@@ -1,12 +1,12 @@
 /*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
  *
  *
  *          Name:  mpi2_ioc.h
  *         Title:  MPI IOC, Port, Event, FW Download, and FW Upload messages
  * Creation Date:  October 11, 2006
  *
- * mpi2_ioc.h Version:  02.00.22
+ * mpi2_ioc.h Version:  02.00.23
  *
  * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
  *       prefix are for use only on MPI v2.5 products, and must not be used
@@ -127,6 +127,11 @@
  * 07-26-12  02.00.22  Added MPI2_IOCFACTS_EXCEPT_PARTIAL_MEMORY_FAILURE.
  *                     Added ElapsedSeconds field to
  *                     MPI2_EVENT_DATA_IR_OPERATION_STATUS.
+ * 08-19-13  02.00.23  For IOCInit, added MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE
+ *			and MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY.
+ *			Added MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE.
+ *			Added MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY.
+ *			Added Encrypted Hash Extended Image.
  * --------------------------------------------------------------------------
  */
 
@@ -182,6 +187,10 @@
 #define MPI2_WHOINIT_HOST_DRIVER                (0x04)
 #define MPI2_WHOINIT_MANUFACTURER               (0x05)
 
+/* MsgFlags */
+#define MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE    (0x01)
+
+
 /*MsgVersion */
 #define MPI2_IOCINIT_MSGVERSION_MAJOR_MASK      (0xFF00)
 #define MPI2_IOCINIT_MSGVERSION_MAJOR_SHIFT     (8)
@@ -194,9 +203,19 @@
 #define MPI2_IOCINIT_HDRVERSION_DEV_MASK        (0x00FF)
 #define MPI2_IOCINIT_HDRVERSION_DEV_SHIFT       (0)
 
-/*minimum depth for the Reply Descriptor Post Queue */
+/*minimum depth for a Reply Descriptor Post Queue */
 #define MPI2_RDPQ_DEPTH_MIN                     (16)
 
+/* Reply Descriptor Post Queue Array Entry */
+typedef struct _MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY {
+	U64                 RDPQBaseAddress;                    /* 0x00 */
+	U32                 Reserved1;                          /* 0x08 */
+	U32                 Reserved2;                          /* 0x0C */
+} MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
+*PTR_MPI2_IOC_INIT_RDPQ_ARRAY_ENTRY,
+Mpi2IOCInitRDPQArrayEntry, *pMpi2IOCInitRDPQArrayEntry;
+
+
 /*IOCInit Reply message */
 typedef struct _MPI2_IOC_INIT_REPLY {
 	U8 WhoInit;		/*0x00 */
@@ -306,6 +325,7 @@
 /*ProductID field uses MPI2_FW_HEADER_PID_ */
 
 /*IOCCapabilities */
+#define MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE     (0x00040000)
 #define MPI25_IOCFACTS_CAPABILITY_FAST_PATH_CAPABLE     (0x00020000)
 #define MPI2_IOCFACTS_CAPABILITY_HOST_BASED_DISCOVERY   (0x00010000)
 #define MPI2_IOCFACTS_CAPABILITY_MSI_X_INDEX            (0x00008000)
@@ -1140,6 +1160,7 @@
 #define MPI2_FW_DOWNLOAD_ITYPE_MEGARAID             (0x09)
 #define MPI2_FW_DOWNLOAD_ITYPE_COMPLETE             (0x0A)
 #define MPI2_FW_DOWNLOAD_ITYPE_COMMON_BOOT_BLOCK    (0x0B)
+#define MPI2_FW_DOWNLOAD_ITYPE_PUBLIC_KEY           (0x0C)
 #define MPI2_FW_DOWNLOAD_ITYPE_MIN_PRODUCT_SPECIFIC (0xF0)
 
 /*MPI v2.0 FWDownload TransactionContext Element */
@@ -1404,6 +1425,7 @@
 #define MPI2_EXT_IMAGE_TYPE_FLASH_LAYOUT            (0x06)
 #define MPI2_EXT_IMAGE_TYPE_SUPPORTED_DEVICES       (0x07)
 #define MPI2_EXT_IMAGE_TYPE_MEGARAID                (0x08)
+#define MPI2_EXT_IMAGE_TYPE_ENCRYPTED_HASH          (0x09)
 #define MPI2_EXT_IMAGE_TYPE_MIN_PRODUCT_SPECIFIC    (0x80)
 #define MPI2_EXT_IMAGE_TYPE_MAX_PRODUCT_SPECIFIC    (0xFF)
 
@@ -1560,6 +1582,42 @@
 /*defines for the ResetVector field */
 #define MPI2_INIT_IMAGE_RESETVECTOR_OFFSET      (0x14)
 
+
+/* Encrypted Hash Extended Image Data */
+
+typedef struct _MPI25_ENCRYPTED_HASH_ENTRY {
+	U8		HashImageType;		/* 0x00 */
+	U8		HashAlgorithm;		/* 0x01 */
+	U8		EncryptionAlgorithm;	/* 0x02 */
+	U8		Reserved1;		/* 0x03 */
+	U32		Reserved2;		/* 0x04 */
+	U32		EncryptedHash[1];	/* 0x08 */ /* variable length */
+} MPI25_ENCRYPTED_HASH_ENTRY, *PTR_MPI25_ENCRYPTED_HASH_ENTRY,
+Mpi25EncryptedHashEntry_t, *pMpi25EncryptedHashEntry_t;
+
+/* values for HashImageType */
+#define MPI25_HASH_IMAGE_TYPE_UNUSED		(0x00)
+#define MPI25_HASH_IMAGE_TYPE_FIRMWARE		(0x01)
+
+/* values for HashAlgorithm */
+#define MPI25_HASH_ALGORITHM_UNUSED		(0x00)
+#define MPI25_HASH_ALGORITHM_SHA256		(0x01)
+
+/* values for EncryptionAlgorithm */
+#define MPI25_ENCRYPTION_ALG_UNUSED		(0x00)
+#define MPI25_ENCRYPTION_ALG_RSA256		(0x01)
+
+typedef struct _MPI25_ENCRYPTED_HASH_DATA {
+	U8				ImageVersion;		/* 0x00 */
+	U8				NumHash;		/* 0x01 */
+	U16				Reserved1;		/* 0x02 */
+	U32				Reserved2;		/* 0x04 */
+	MPI25_ENCRYPTED_HASH_ENTRY	EncryptedHashEntry[1];  /* 0x08 */
+} MPI25_ENCRYPTED_HASH_DATA, *PTR_MPI25_ENCRYPTED_HASH_DATA,
+Mpi25EncryptedHashData_t, *pMpi25EncryptedHashData_t;
+
+
+
 /****************************************************************************
 * PowerManagementControl message
 ****************************************************************************/
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
index 7176523..13d93ca 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_raid.h
@@ -1,12 +1,12 @@
 /*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
  *
  *
  *          Name:  mpi2_raid.h
  *         Title:  MPI Integrated RAID messages and structures
  * Creation Date:  April 26, 2007
  *
- *   mpi2_raid.h Version:  02.00.09
+ *   mpi2_raid.h Version:  02.00.10
  *
  * Version History
  * ---------------
@@ -30,6 +30,7 @@
  * 02-06-12  02.00.08  Added MPI2_RAID_ACTION_PHYSDISK_HIDDEN.
  * 07-26-12  02.00.09  Added ElapsedSeconds field to MPI2_RAID_VOL_INDICATOR.
  *                     Added MPI2_RAID_VOL_FLAGS_ELAPSED_SECONDS_VALID define.
+ * 04-17-13  02.00.10  Added MPI25_RAID_ACTION_ADATA_ALLOW_PI.
  * --------------------------------------------------------------------------
  */
 
@@ -46,6 +47,9 @@
 * RAID Action messages
 ****************************************************************************/
 
+/* ActionDataWord defines for use with MPI2_RAID_ACTION_CREATE_VOLUME action */
+#define MPI25_RAID_ACTION_ADATA_ALLOW_PI            (0x80000000)
+
 /*ActionDataWord defines for use with MPI2_RAID_ACTION_DELETE_VOLUME action */
 #define MPI2_RAID_ACTION_ADATA_KEEP_LBA0            (0x00000000)
 #define MPI2_RAID_ACTION_ADATA_ZERO_LBA0            (0x00000001)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
index cba046f..156e305 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_sas.h
@@ -1,12 +1,12 @@
 /*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
  *
  *
  *          Name:  mpi2_sas.h
  *         Title:  MPI Serial Attached SCSI structures and definitions
  * Creation Date:  February 9, 2007
  *
- * mpi2_sas.h Version:  02.00.07
+ * mpi2_sas.h Version:  02.00.08
  *
  * NOTE: Names (typedefs, defines, etc.) beginning with an MPI25 or Mpi25
  *       prefix are for use only on MPI v2.5 products, and must not be used
@@ -30,6 +30,8 @@
  * 11-18-11  02.00.06  Incorporating additions for MPI v2.5.
  * 07-10-12  02.00.07  Added MPI2_SATA_PT_SGE_UNION for use in the SATA
  *                     Passthrough Request message.
+ * 08-19-13  02.00.08  Made MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL obsolete
+ *			for anything newer than MPI v2.0.
  * --------------------------------------------------------------------------
  */
 
@@ -251,7 +253,7 @@
 #define MPI2_SAS_OP_PHY_CLEAR_ERROR_LOG         (0x08)
 #define MPI2_SAS_OP_SEND_PRIMITIVE              (0x0A)
 #define MPI2_SAS_OP_FORCE_FULL_DISCOVERY        (0x0B)
-#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C)
+#define MPI2_SAS_OP_TRANSMIT_PORT_SELECT_SIGNAL (0x0C) /* MPI v2.0 only */
 #define MPI2_SAS_OP_REMOVE_DEVICE               (0x0D)
 #define MPI2_SAS_OP_LOOKUP_MAPPING              (0x0E)
 #define MPI2_SAS_OP_SET_IOC_PARAMETER           (0x0F)
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
index 34e9a7b..904910d 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_tool.h
@@ -1,12 +1,12 @@
 /*
- * Copyright (c) 2000-2013 LSI Corporation.
+ * Copyright (c) 2000-2014 LSI Corporation.
  *
  *
  *          Name:  mpi2_tool.h
  *         Title:  MPI diagnostic tool structures and definitions
  * Creation Date:  March 26, 2007
  *
- *   mpi2_tool.h Version:  02.00.10
+ *   mpi2_tool.h Version:  02.00.11
  *
  * Version History
  * ---------------
@@ -32,6 +32,7 @@
  *                     message.
  * 07-26-12  02.00.10  Modified MPI2_TOOLBOX_DIAGNOSTIC_CLI_REQUEST so that
  *                     it uses MPI Chain SGE as well as MPI Simple SGE.
+ * 08-19-13  02.00.11  Added MPI2_TOOLBOX_TEXT_DISPLAY_TOOL and related info.
  * --------------------------------------------------------------------------
  */
 
@@ -51,6 +52,7 @@
 #define MPI2_TOOLBOX_ISTWI_READ_WRITE_TOOL          (0x03)
 #define MPI2_TOOLBOX_BEACON_TOOL                    (0x05)
 #define MPI2_TOOLBOX_DIAGNOSTIC_CLI_TOOL            (0x06)
+#define MPI2_TOOLBOX_TEXT_DISPLAY_TOOL              (0x07)
 
 /****************************************************************************
 * Toolbox reply
@@ -331,6 +333,45 @@
 	Mpi2ToolboxDiagnosticCliReply_t,
 	*pMpi2ToolboxDiagnosticCliReply_t;
 
+
+/****************************************************************************
+*  Toolbox Console Text Display Tool
+****************************************************************************/
+
+/* Toolbox Console Text Display Tool request message */
+typedef struct _MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST {
+	U8			Tool;			/* 0x00 */
+	U8			Reserved1;		/* 0x01 */
+	U8			ChainOffset;		/* 0x02 */
+	U8			Function;		/* 0x03 */
+	U16			Reserved2;		/* 0x04 */
+	U8			Reserved3;		/* 0x06 */
+	U8			MsgFlags;		/* 0x07 */
+	U8			VP_ID;			/* 0x08 */
+	U8			VF_ID;			/* 0x09 */
+	U16			Reserved4;		/* 0x0A */
+	U8			Console;		/* 0x0C */
+	U8			Flags;			/* 0x0D */
+	U16			Reserved6;		/* 0x0E */
+	U8			TextToDisplay[4];	/* 0x10 */
+} MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
+*PTR_MPI2_TOOLBOX_TEXT_DISPLAY_REQUEST,
+Mpi2ToolboxTextDisplayRequest_t,
+*pMpi2ToolboxTextDisplayRequest_t;
+
+/* defines for the Console field */
+#define MPI2_TOOLBOX_CONSOLE_TYPE_MASK          (0xF0)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_DEFAULT       (0x00)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_UART          (0x10)
+#define MPI2_TOOLBOX_CONSOLE_TYPE_ETHERNET      (0x20)
+
+#define MPI2_TOOLBOX_CONSOLE_NUMBER_MASK        (0x0F)
+
+/* defines for the Flags field */
+#define MPI2_TOOLBOX_CONSOLE_FLAG_TIMESTAMP     (0x01)
+
+
+
 /*****************************************************************************
 *
 *      Diagnostic Buffer Messages
diff --git a/drivers/scsi/mpt3sas/mpi/mpi2_type.h b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
index ba1fed5..99ab093 100644
--- a/drivers/scsi/mpt3sas/mpi/mpi2_type.h
+++ b/drivers/scsi/mpt3sas/mpi/mpi2_type.h
@@ -1,5 +1,5 @@
 /*
- *  Copyright (c) 2000-2013 LSI Corporation.
+ *  Copyright (c) 2000-2014 LSI Corporation.
  *
  *
  *           Name:  mpi2_type.h
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.c b/drivers/scsi/mpt3sas/mpt3sas_base.c
index 93ce2b2..1560115 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.c
@@ -3,7 +3,7 @@
  * for access to MPT (Message Passing Technology) firmware.
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
- * Copyright (C) 2012-2013  LSI Corporation
+ * Copyright (C) 2012-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -91,6 +91,8 @@
 MODULE_PARM_DESC(mpt3sas_fwfault_debug,
 	" enable detection of firmware fault and halt firmware - (default=0)");
 
+static int
+_base_get_ioc_facts(struct MPT3SAS_ADAPTER *ioc, int sleep_flag);
 
 /**
  * _scsih_set_fwfault_debug - global setting of ioc->fwfault_debug.
@@ -1482,17 +1484,22 @@
 _base_config_dma_addressing(struct MPT3SAS_ADAPTER *ioc, struct pci_dev *pdev)
 {
 	struct sysinfo s;
-	char *desc = NULL;
+	u64 consistent_dma_mask;
+
+	if (ioc->dma_mask)
+		consistent_dma_mask = DMA_BIT_MASK(64);
+	else
+		consistent_dma_mask = DMA_BIT_MASK(32);
 
 	if (sizeof(dma_addr_t) > 4) {
 		const uint64_t required_mask =
 		    dma_get_required_mask(&pdev->dev);
 		if ((required_mask > DMA_BIT_MASK(32)) &&
 		    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) &&
-		    !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		    !pci_set_consistent_dma_mask(pdev, consistent_dma_mask)) {
 			ioc->base_add_sg_single = &_base_add_sg_single_64;
 			ioc->sge_size = sizeof(Mpi2SGESimple64_t);
-			desc = "64";
+			ioc->dma_mask = 64;
 			goto out;
 		}
 	}
@@ -1501,19 +1508,30 @@
 	    && !pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32))) {
 		ioc->base_add_sg_single = &_base_add_sg_single_32;
 		ioc->sge_size = sizeof(Mpi2SGESimple32_t);
-		desc = "32";
+		ioc->dma_mask = 32;
 	} else
 		return -ENODEV;
 
  out:
 	si_meminfo(&s);
 	pr_info(MPT3SAS_FMT
-		"%s BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
-		ioc->name, desc, convert_to_kb(s.totalram));
+		"%d BIT PCI BUS DMA ADDRESSING SUPPORTED, total mem (%ld kB)\n",
+		ioc->name, ioc->dma_mask, convert_to_kb(s.totalram));
 
 	return 0;
 }
 
+static int
+_base_change_consistent_dma_mask(struct MPT3SAS_ADAPTER *ioc,
+				      struct pci_dev *pdev)
+{
+	if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)))
+			return -ENODEV;
+	}
+	return 0;
+}
+
 /**
  * _base_check_enable_msix - checks MSIX capabable.
  * @ioc: per adapter object
@@ -1698,11 +1716,15 @@
 	  ": %d, max_msix_vectors: %d\n", ioc->name, ioc->msix_vector_count,
 	  ioc->cpu_count, max_msix_vectors);
 
+	if (!ioc->rdpq_array_enable && max_msix_vectors == -1)
+		max_msix_vectors = 8;
+
 	if (max_msix_vectors > 0) {
 		ioc->reply_queue_count = min_t(int, max_msix_vectors,
 			ioc->reply_queue_count);
 		ioc->msix_vector_count = ioc->reply_queue_count;
-	}
+	} else if (max_msix_vectors == 0)
+		goto try_ioapic;
 
 	entries = kcalloc(ioc->reply_queue_count, sizeof(struct msix_entry),
 	    GFP_KERNEL);
@@ -1716,10 +1738,10 @@
 	for (i = 0, a = entries; i < ioc->reply_queue_count; i++, a++)
 		a->entry = i;
 
-	r = pci_enable_msix(ioc->pdev, entries, ioc->reply_queue_count);
+	r = pci_enable_msix_exact(ioc->pdev, entries, ioc->reply_queue_count);
 	if (r) {
 		dfailprintk(ioc, pr_info(MPT3SAS_FMT
-			"pci_enable_msix failed (r=%d) !!!\n",
+			"pci_enable_msix_exact failed (r=%d) !!!\n",
 			ioc->name, r));
 		kfree(entries);
 		goto try_ioapic;
@@ -1742,6 +1764,7 @@
 /* failback to io_apic interrupt routing */
  try_ioapic:
 
+	ioc->reply_queue_count = 1;
 	r = _base_request_irq(ioc, 0, ioc->pdev->irq);
 
 	return r;
@@ -1821,6 +1844,16 @@
 	}
 
 	_base_mask_interrupts(ioc);
+
+	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
+	if (r)
+		goto out_fail;
+
+	if (!ioc->rdpq_array_enable_assigned) {
+		ioc->rdpq_array_enable = ioc->rdpq_array_capable;
+		ioc->rdpq_array_enable_assigned = 1;
+	}
+
 	r = _base_enable_msix(ioc);
 	if (r)
 		goto out_fail;
@@ -2185,6 +2218,53 @@
 	    &ioc->scsi_lookup_lock);
 }
 
+/**
+ * _base_display_intel_branding - Display branding string
+ * @ioc: per adapter object
+ *
+ * Return nothing.
+ */
+static void
+_base_display_intel_branding(struct MPT3SAS_ADAPTER *ioc)
+{
+	if (ioc->pdev->subsystem_vendor != PCI_VENDOR_ID_INTEL)
+		return;
+
+	switch (ioc->pdev->device) {
+	case MPI25_MFGPAGE_DEVID_SAS3008:
+		switch (ioc->pdev->subsystem_device) {
+		case MPT3SAS_INTEL_RMS3JC080_SSDID:
+			pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+				MPT3SAS_INTEL_RMS3JC080_BRANDING);
+			break;
+
+		case MPT3SAS_INTEL_RS3GC008_SSDID:
+			pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+				MPT3SAS_INTEL_RS3GC008_BRANDING);
+			break;
+		case MPT3SAS_INTEL_RS3FC044_SSDID:
+			pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+				MPT3SAS_INTEL_RS3FC044_BRANDING);
+			break;
+		case MPT3SAS_INTEL_RS3UC080_SSDID:
+			pr_info(MPT3SAS_FMT "%s\n", ioc->name,
+				MPT3SAS_INTEL_RS3UC080_BRANDING);
+			break;
+		default:
+			pr_info(MPT3SAS_FMT
+				"Intel(R) Controller: Subsystem ID: 0x%X\n",
+				ioc->name, ioc->pdev->subsystem_device);
+			break;
+		}
+		break;
+	default:
+		pr_info(MPT3SAS_FMT
+			"Intel(R) Controller: Subsystem ID: 0x%X\n",
+			ioc->name, ioc->pdev->subsystem_device);
+		break;
+	}
+}
+
 
 
 /**
@@ -2216,6 +2296,8 @@
 	   (bios_version & 0x0000FF00) >> 8,
 	    bios_version & 0x000000FF);
 
+	_base_display_intel_branding(ioc);
+
 	pr_info(MPT3SAS_FMT "Protocol=(", ioc->name);
 
 	if (ioc->facts.ProtocolFlags & MPI2_IOCFACTS_PROTOCOL_SCSI_INITIATOR) {
@@ -2447,7 +2529,8 @@
 static void
 _base_release_memory_pools(struct MPT3SAS_ADAPTER *ioc)
 {
-	int i;
+	int i = 0;
+	struct reply_post_struct *rps;
 
 	dexitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
 	    __func__));
@@ -2492,15 +2575,25 @@
 		ioc->reply_free = NULL;
 	}
 
-	if (ioc->reply_post_free) {
-		pci_pool_free(ioc->reply_post_free_dma_pool,
-		    ioc->reply_post_free, ioc->reply_post_free_dma);
+	if (ioc->reply_post) {
+		do {
+			rps = &ioc->reply_post[i];
+			if (rps->reply_post_free) {
+				pci_pool_free(
+				    ioc->reply_post_free_dma_pool,
+				    rps->reply_post_free,
+				    rps->reply_post_free_dma);
+				dexitprintk(ioc, pr_info(MPT3SAS_FMT
+				    "reply_post_free_pool(0x%p): free\n",
+				    ioc->name, rps->reply_post_free));
+				rps->reply_post_free = NULL;
+			}
+		} while (ioc->rdpq_array_enable &&
+			   (++i < ioc->reply_queue_count));
+
 		if (ioc->reply_post_free_dma_pool)
 			pci_pool_destroy(ioc->reply_post_free_dma_pool);
-		dexitprintk(ioc, pr_info(MPT3SAS_FMT
-		    "reply_post_free_pool(0x%p): free\n", ioc->name,
-		    ioc->reply_post_free));
-		ioc->reply_post_free = NULL;
+		kfree(ioc->reply_post);
 	}
 
 	if (ioc->config_page) {
@@ -2647,6 +2740,65 @@
 	    ioc->max_sges_in_chain_message, ioc->shost->sg_tablesize,
 	    ioc->chains_needed_per_io));
 
+	/* reply post queue, 16 byte align */
+	reply_post_free_sz = ioc->reply_post_queue_depth *
+	    sizeof(Mpi2DefaultReplyDescriptor_t);
+
+	sz = reply_post_free_sz;
+	if (_base_is_controller_msix_enabled(ioc) && !ioc->rdpq_array_enable)
+		sz *= ioc->reply_queue_count;
+
+	ioc->reply_post = kcalloc((ioc->rdpq_array_enable) ?
+	    (ioc->reply_queue_count):1,
+	    sizeof(struct reply_post_struct), GFP_KERNEL);
+
+	if (!ioc->reply_post) {
+		pr_err(MPT3SAS_FMT "reply_post_free pool: kcalloc failed\n",
+			ioc->name);
+		goto out;
+	}
+	ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
+	    ioc->pdev, sz, 16, 0);
+	if (!ioc->reply_post_free_dma_pool) {
+		pr_err(MPT3SAS_FMT
+		 "reply_post_free pool: pci_pool_create failed\n",
+		 ioc->name);
+		goto out;
+	}
+	i = 0;
+	do {
+		ioc->reply_post[i].reply_post_free =
+		    pci_pool_alloc(ioc->reply_post_free_dma_pool,
+		    GFP_KERNEL,
+		    &ioc->reply_post[i].reply_post_free_dma);
+		if (!ioc->reply_post[i].reply_post_free) {
+			pr_err(MPT3SAS_FMT
+			"reply_post_free pool: pci_pool_alloc failed\n",
+			ioc->name);
+			goto out;
+		}
+		memset(ioc->reply_post[i].reply_post_free, 0, sz);
+		dinitprintk(ioc, pr_info(MPT3SAS_FMT
+		    "reply post free pool (0x%p): depth(%d),"
+		    "element_size(%d), pool_size(%d kB)\n", ioc->name,
+		    ioc->reply_post[i].reply_post_free,
+		    ioc->reply_post_queue_depth, 8, sz/1024));
+		dinitprintk(ioc, pr_info(MPT3SAS_FMT
+		    "reply_post_free_dma = (0x%llx)\n", ioc->name,
+		    (unsigned long long)
+		    ioc->reply_post[i].reply_post_free_dma));
+		total_sz += sz;
+	} while (ioc->rdpq_array_enable && (++i < ioc->reply_queue_count));
+
+	if (ioc->dma_mask == 64) {
+		if (_base_change_consistent_dma_mask(ioc, ioc->pdev) != 0) {
+			pr_warn(MPT3SAS_FMT
+			    "no suitable consistent DMA mask for %s\n",
+			    ioc->name, pci_name(ioc->pdev));
+			goto out;
+		}
+	}
+
 	ioc->scsiio_depth = ioc->hba_queue_depth -
 	    ioc->hi_priority_depth - ioc->internal_depth;
 
@@ -2861,40 +3013,6 @@
 		ioc->name, (unsigned long long)ioc->reply_free_dma));
 	total_sz += sz;
 
-	/* reply post queue, 16 byte align */
-	reply_post_free_sz = ioc->reply_post_queue_depth *
-	    sizeof(Mpi2DefaultReplyDescriptor_t);
-	if (_base_is_controller_msix_enabled(ioc))
-		sz = reply_post_free_sz * ioc->reply_queue_count;
-	else
-		sz = reply_post_free_sz;
-	ioc->reply_post_free_dma_pool = pci_pool_create("reply_post_free pool",
-	    ioc->pdev, sz, 16, 0);
-	if (!ioc->reply_post_free_dma_pool) {
-		pr_err(MPT3SAS_FMT
-			"reply_post_free pool: pci_pool_create failed\n",
-			ioc->name);
-		goto out;
-	}
-	ioc->reply_post_free = pci_pool_alloc(ioc->reply_post_free_dma_pool ,
-	    GFP_KERNEL, &ioc->reply_post_free_dma);
-	if (!ioc->reply_post_free) {
-		pr_err(MPT3SAS_FMT
-			"reply_post_free pool: pci_pool_alloc failed\n",
-			ioc->name);
-		goto out;
-	}
-	memset(ioc->reply_post_free, 0, sz);
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT "reply post free pool" \
-	    "(0x%p): depth(%d), element_size(%d), pool_size(%d kB)\n",
-	    ioc->name, ioc->reply_post_free, ioc->reply_post_queue_depth, 8,
-	    sz/1024));
-	dinitprintk(ioc, pr_info(MPT3SAS_FMT
-		"reply_post_free_dma = (0x%llx)\n",
-		ioc->name, (unsigned long long)
-	    ioc->reply_post_free_dma));
-	total_sz += sz;
-
 	ioc->config_page_sz = 512;
 	ioc->config_page = pci_alloc_consistent(ioc->pdev,
 	    ioc->config_page_sz, &ioc->config_page_dma);
@@ -3577,6 +3695,9 @@
 	facts->IOCCapabilities = le32_to_cpu(mpi_reply.IOCCapabilities);
 	if ((facts->IOCCapabilities & MPI2_IOCFACTS_CAPABILITY_INTEGRATED_RAID))
 		ioc->ir_firmware = 1;
+	if ((facts->IOCCapabilities &
+	      MPI2_IOCFACTS_CAPABILITY_RDPQ_ARRAY_CAPABLE))
+		ioc->rdpq_array_capable = 1;
 	facts->FWVersion.Word = le32_to_cpu(mpi_reply.FWVersion.Word);
 	facts->IOCRequestFrameSize =
 	    le16_to_cpu(mpi_reply.IOCRequestFrameSize);
@@ -3613,9 +3734,12 @@
 {
 	Mpi2IOCInitRequest_t mpi_request;
 	Mpi2IOCInitReply_t mpi_reply;
-	int r;
+	int i, r = 0;
 	struct timeval current_time;
 	u16 ioc_status;
+	u32 reply_post_free_array_sz = 0;
+	Mpi2IOCInitRDPQArrayEntry *reply_post_free_array = NULL;
+	dma_addr_t reply_post_free_array_dma;
 
 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
 	    __func__));
@@ -3644,9 +3768,31 @@
 	    cpu_to_le64((u64)ioc->request_dma);
 	mpi_request.ReplyFreeQueueAddress =
 	    cpu_to_le64((u64)ioc->reply_free_dma);
-	mpi_request.ReplyDescriptorPostQueueAddress =
-	    cpu_to_le64((u64)ioc->reply_post_free_dma);
 
+	if (ioc->rdpq_array_enable) {
+		reply_post_free_array_sz = ioc->reply_queue_count *
+		    sizeof(Mpi2IOCInitRDPQArrayEntry);
+		reply_post_free_array = pci_alloc_consistent(ioc->pdev,
+			reply_post_free_array_sz, &reply_post_free_array_dma);
+		if (!reply_post_free_array) {
+			pr_err(MPT3SAS_FMT
+			"reply_post_free_array: pci_alloc_consistent failed\n",
+			ioc->name);
+			r = -ENOMEM;
+			goto out;
+		}
+		memset(reply_post_free_array, 0, reply_post_free_array_sz);
+		for (i = 0; i < ioc->reply_queue_count; i++)
+			reply_post_free_array[i].RDPQBaseAddress =
+			    cpu_to_le64(
+				(u64)ioc->reply_post[i].reply_post_free_dma);
+		mpi_request.MsgFlags = MPI2_IOCINIT_MSGFLAG_RDPQ_ARRAY_MODE;
+		mpi_request.ReplyDescriptorPostQueueAddress =
+		    cpu_to_le64((u64)reply_post_free_array_dma);
+	} else {
+		mpi_request.ReplyDescriptorPostQueueAddress =
+		    cpu_to_le64((u64)ioc->reply_post[0].reply_post_free_dma);
+	}
 
 	/* This time stamp specifies number of milliseconds
 	 * since epoch ~ midnight January 1, 1970.
@@ -3674,7 +3820,7 @@
 	if (r != 0) {
 		pr_err(MPT3SAS_FMT "%s: handshake failed (r=%d)\n",
 		    ioc->name, __func__, r);
-		return r;
+		goto out;
 	}
 
 	ioc_status = le16_to_cpu(mpi_reply.IOCStatus) & MPI2_IOCSTATUS_MASK;
@@ -3684,7 +3830,12 @@
 		r = -EIO;
 	}
 
-	return 0;
+out:
+	if (reply_post_free_array)
+		pci_free_consistent(ioc->pdev, reply_post_free_array_sz,
+				    reply_post_free_array,
+				    reply_post_free_array_dma);
+	return r;
 }
 
 /**
@@ -4234,7 +4385,7 @@
 	struct _tr_list *delayed_tr, *delayed_tr_next;
 	struct adapter_reply_queue *reply_q;
 	long reply_post_free;
-	u32 reply_post_free_sz;
+	u32 reply_post_free_sz, index = 0;
 
 	dinitprintk(ioc, pr_info(MPT3SAS_FMT "%s\n", ioc->name,
 	    __func__));
@@ -4305,9 +4456,9 @@
 		_base_assign_reply_queues(ioc);
 
 	/* initialize Reply Post Free Queue */
-	reply_post_free = (long)ioc->reply_post_free;
 	reply_post_free_sz = ioc->reply_post_queue_depth *
 	    sizeof(Mpi2DefaultReplyDescriptor_t);
+	reply_post_free = (long)ioc->reply_post[index].reply_post_free;
 	list_for_each_entry(reply_q, &ioc->reply_queue_list, list) {
 		reply_q->reply_post_host_index = 0;
 		reply_q->reply_post_free = (Mpi2ReplyDescriptorsUnion_t *)
@@ -4317,7 +4468,15 @@
 			    cpu_to_le64(ULLONG_MAX);
 		if (!_base_is_controller_msix_enabled(ioc))
 			goto skip_init_reply_post_free_queue;
-		reply_post_free += reply_post_free_sz;
+		/*
+		 * If RDPQ is enabled, switch to the next allocation.
+		 * Otherwise advance within the contiguous region.
+		 */
+		if (ioc->rdpq_array_enable)
+			reply_post_free = (long)
+			    ioc->reply_post[++index].reply_post_free;
+		else
+			reply_post_free += reply_post_free_sz;
 	}
  skip_init_reply_post_free_queue:
 
@@ -4428,6 +4587,8 @@
 		goto out_free_resources;
 	}
 
+	ioc->rdpq_array_enable_assigned = 0;
+	ioc->dma_mask = 0;
 	r = mpt3sas_base_map_resources(ioc);
 	if (r)
 		goto out_free_resources;
@@ -4804,6 +4965,12 @@
 	r = _base_get_ioc_facts(ioc, CAN_SLEEP);
 	if (r)
 		goto out;
+
+	if (ioc->rdpq_array_enable && !ioc->rdpq_array_capable)
+		panic("%s: Issue occurred with flashing controller firmware."
+		      "Please reboot the system and ensure that the correct"
+		      " firmware version is running\n", ioc->name);
+
 	r = _base_make_ioc_operational(ioc, sleep_flag);
 	if (!r)
 		_base_reset_handler(ioc, MPT3_IOC_DONE_RESET);
diff --git a/drivers/scsi/mpt3sas/mpt3sas_base.h b/drivers/scsi/mpt3sas/mpt3sas_base.h
index 9b90a6f..40926aa 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_base.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_base.h
@@ -3,7 +3,7 @@
  * for access to MPT (Message Passing Technology) firmware.
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
- * Copyright (C) 2012-2013  LSI Corporation
+ * Copyright (C) 2012-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -70,8 +70,8 @@
 #define MPT3SAS_DRIVER_NAME		"mpt3sas"
 #define MPT3SAS_AUTHOR	"LSI Corporation <DL-MPTFusionLinux@lsi.com>"
 #define MPT3SAS_DESCRIPTION	"LSI MPT Fusion SAS 3.0 Device Driver"
-#define MPT3SAS_DRIVER_VERSION		"02.100.00.00"
-#define MPT3SAS_MAJOR_VERSION		2
+#define MPT3SAS_DRIVER_VERSION		"04.100.00.00"
+#define MPT3SAS_MAJOR_VERSION		4
 #define MPT3SAS_MINOR_VERSION		100
 #define MPT3SAS_BUILD_VERSION		0
 #define MPT3SAS_RELEASE_VERSION	00
@@ -130,7 +130,25 @@
 #define MPT_TARGET_FLAGS_DELETED	0x04
 #define MPT_TARGET_FASTPATH_IO		0x08
 
+/*
+ * Intel HBA branding
+ */
+#define MPT3SAS_INTEL_RMS3JC080_BRANDING       \
+	"Intel(R) Integrated RAID Module RMS3JC080"
+#define MPT3SAS_INTEL_RS3GC008_BRANDING       \
+	"Intel(R) RAID Controller RS3GC008"
+#define MPT3SAS_INTEL_RS3FC044_BRANDING       \
+	"Intel(R) RAID Controller RS3FC044"
+#define MPT3SAS_INTEL_RS3UC080_BRANDING       \
+	"Intel(R) RAID Controller RS3UC080"
 
+/*
+ * Intel HBA SSDIDs
+ */
+#define MPT3SAS_INTEL_RMS3JC080_SSDID	0x3521
+#define MPT3SAS_INTEL_RS3GC008_SSDID	0x3522
+#define MPT3SAS_INTEL_RS3FC044_SSDID	0x3523
+#define MPT3SAS_INTEL_RS3UC080_SSDID    0x3524
 
 /*
  * status bits for ioc->diag_buffer_status
@@ -272,8 +290,10 @@
  * @channel: target channel
  * @slot: number number
  * @phy: phy identifier provided in sas device page 0
- * @fast_path: fast path feature enable bit
  * @responding: used in _scsih_sas_device_mark_responding
+ * @fast_path: fast path feature enable bit
+ * @pfa_led_on: flag for PFA LED status
+ *
  */
 struct _sas_device {
 	struct list_head list;
@@ -293,6 +313,7 @@
 	u8	phy;
 	u8	responding;
 	u8	fast_path;
+	u8	pfa_led_on;
 };
 
 /**
@@ -548,6 +569,11 @@
 	u16			MaxPostedCmdBuffers;
 };
 
+struct reply_post_struct {
+	Mpi2ReplyDescriptorsUnion_t	*reply_post_free;
+	dma_addr_t			reply_post_free_dma;
+};
+
 /**
  * enum mutex_type - task management mutex type
  * @TM_MUTEX_OFF: mutex is not required becuase calling function is acquiring it
@@ -576,6 +602,7 @@
  * @ir_firmware: IR firmware present
  * @bars: bitmask of BAR's that must be configured
  * @mask_interrupts: ignore interrupt
+ * @dma_mask: used to set the consistent dma mask
  * @fault_reset_work_q_name: fw fault work queue
  * @fault_reset_work_q: ""
  * @fault_reset_work: ""
@@ -691,8 +718,11 @@
  * @reply_free_dma_pool:
  * @reply_free_host_index: tail index in pool to insert free replys
  * @reply_post_queue_depth: reply post queue depth
- * @reply_post_free: pool for reply post (64bit descriptor)
- * @reply_post_free_dma:
+ * @reply_post_struct: struct for reply_post_free physical & virt address
+ * @rdpq_array_capable: FW supports multiple reply queue addresses in ioc_init
+ * @rdpq_array_enable: rdpq_array support is enabled in the driver
+ * @rdpq_array_enable_assigned: this ensures that rdpq_array_enable flag
+ *				is assigned only ones
  * @reply_queue_count: number of reply queue's
  * @reply_queue_list: link list contaning the reply queue info
  * @reply_post_host_index: head index in the pool where FW completes IO
@@ -714,6 +744,7 @@
 	u8		ir_firmware;
 	int		bars;
 	u8		mask_interrupts;
+	int		dma_mask;
 
 	/* fw fault handler */
 	char		fault_reset_work_q_name[20];
@@ -893,8 +924,10 @@
 
 	/* reply post queue */
 	u16		reply_post_queue_depth;
-	Mpi2ReplyDescriptorsUnion_t *reply_post_free;
-	dma_addr_t	reply_post_free_dma;
+	struct reply_post_struct *reply_post;
+	u8		rdpq_array_capable;
+	u8		rdpq_array_enable;
+	u8		rdpq_array_enable_assigned;
 	struct dma_pool *reply_post_free_dma_pool;
 	u8		reply_queue_count;
 	struct list_head reply_queue_list;
diff --git a/drivers/scsi/mpt3sas/mpt3sas_config.c b/drivers/scsi/mpt3sas/mpt3sas_config.c
index 936ec03..4472c2a 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_config.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_config.c
@@ -2,7 +2,7 @@
  * This module provides common API for accessing firmware configuration pages
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.c
- * Copyright (C) 2012-2013  LSI Corporation
+ * Copyright (C) 2012-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.c b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
index ba9cbe5..dca1487 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.c
@@ -3,7 +3,7 @@
  * controllers
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.c
- * Copyright (C) 2012-2013  LSI Corporation
+ * Copyright (C) 2012-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_ctl.h b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
index 53b0c48..5f3d7fd 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_ctl.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_ctl.h
@@ -3,7 +3,7 @@
  * controllers
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_ctl.h
- * Copyright (C) 2012-2013  LSI Corporation
+ * Copyright (C) 2012-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_debug.h b/drivers/scsi/mpt3sas/mpt3sas_debug.h
index 545b22d..4778e7d 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_debug.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_debug.h
@@ -2,7 +2,7 @@
  * Logging Support for MPT (Message Passing Technology) based controllers
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_debug.c
- * Copyright (C) 2012-2013  LSI Corporation
+ * Copyright (C) 2012-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_scsih.c b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
index 135f12c..857276b 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_scsih.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_scsih.c
@@ -2,7 +2,7 @@
  * Scsi Host Layer for MPT (Message Passing Technology) based controllers
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_scsih.c
- * Copyright (C) 2012-2013  LSI Corporation
+ * Copyright (C) 2012-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
@@ -159,7 +159,7 @@
 };
 
 #define MPT3SAS_PROCESS_TRIGGER_DIAG (0xFFFB)
-#define MPT3SAS_TURN_ON_FAULT_LED (0xFFFC)
+#define MPT3SAS_TURN_ON_PFA_LED (0xFFFC)
 #define MPT3SAS_PORT_ENABLE_COMPLETE (0xFFFD)
 #define MPT3SAS_ABRT_TASK_SET (0xFFFE)
 #define MPT3SAS_REMOVE_UNRESPONDING_DEVICES (0xFFFF)
@@ -3885,7 +3885,7 @@
 #endif
 
 /**
- * _scsih_turn_on_fault_led - illuminate Fault LED
+ * _scsih_turn_on_pfa_led - illuminate PFA LED
  * @ioc: per adapter object
  * @handle: device handle
  * Context: process
@@ -3893,10 +3893,15 @@
  * Return nothing.
  */
 static void
-_scsih_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+_scsih_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 {
 	Mpi2SepReply_t mpi_reply;
 	Mpi2SepRequest_t mpi_request;
+	struct _sas_device *sas_device;
+
+	sas_device = _scsih_sas_device_find_by_handle(ioc, handle);
+	if (!sas_device)
+		return;
 
 	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
 	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
@@ -3911,6 +3916,7 @@
 		__FILE__, __LINE__, __func__);
 		return;
 	}
+	sas_device->pfa_led_on = 1;
 
 	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
 		dewtprintk(ioc, pr_info(MPT3SAS_FMT
@@ -3920,9 +3926,46 @@
 		return;
 	}
 }
-
 /**
- * _scsih_send_event_to_turn_on_fault_led - fire delayed event
+ * _scsih_turn_off_pfa_led - turn off Fault LED
+ * @ioc: per adapter object
+ * @sas_device: sas device whose PFA LED has to turned off
+ * Context: process
+ *
+ * Return nothing.
+ */
+static void
+_scsih_turn_off_pfa_led(struct MPT3SAS_ADAPTER *ioc,
+	struct _sas_device *sas_device)
+{
+	Mpi2SepReply_t mpi_reply;
+	Mpi2SepRequest_t mpi_request;
+
+	memset(&mpi_request, 0, sizeof(Mpi2SepRequest_t));
+	mpi_request.Function = MPI2_FUNCTION_SCSI_ENCLOSURE_PROCESSOR;
+	mpi_request.Action = MPI2_SEP_REQ_ACTION_WRITE_STATUS;
+	mpi_request.SlotStatus = 0;
+	mpi_request.Slot = cpu_to_le16(sas_device->slot);
+	mpi_request.DevHandle = 0;
+	mpi_request.EnclosureHandle = cpu_to_le16(sas_device->enclosure_handle);
+	mpi_request.Flags = MPI2_SEP_REQ_FLAGS_ENCLOSURE_SLOT_ADDRESS;
+	if ((mpt3sas_base_scsi_enclosure_processor(ioc, &mpi_reply,
+		&mpi_request)) != 0) {
+		printk(MPT3SAS_FMT "failure at %s:%d/%s()!\n", ioc->name,
+		__FILE__, __LINE__, __func__);
+		return;
+	}
+
+	if (mpi_reply.IOCStatus || mpi_reply.IOCLogInfo) {
+		dewtprintk(ioc, printk(MPT3SAS_FMT
+		 "enclosure_processor: ioc_status (0x%04x), loginfo(0x%08x)\n",
+		 ioc->name, le16_to_cpu(mpi_reply.IOCStatus),
+		 le32_to_cpu(mpi_reply.IOCLogInfo)));
+		return;
+	}
+}
+/**
+ * _scsih_send_event_to_turn_on_pfa_led - fire delayed event
  * @ioc: per adapter object
  * @handle: device handle
  * Context: interrupt.
@@ -3930,14 +3973,14 @@
  * Return nothing.
  */
 static void
-_scsih_send_event_to_turn_on_fault_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
+_scsih_send_event_to_turn_on_pfa_led(struct MPT3SAS_ADAPTER *ioc, u16 handle)
 {
 	struct fw_event_work *fw_event;
 
 	fw_event = kzalloc(sizeof(struct fw_event_work), GFP_ATOMIC);
 	if (!fw_event)
 		return;
-	fw_event->event = MPT3SAS_TURN_ON_FAULT_LED;
+	fw_event->event = MPT3SAS_TURN_ON_PFA_LED;
 	fw_event->device_handle = handle;
 	fw_event->ioc = ioc;
 	_scsih_fw_event_add(ioc, fw_event);
@@ -3981,7 +4024,7 @@
 	spin_unlock_irqrestore(&ioc->sas_device_lock, flags);
 
 	if (ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM)
-		_scsih_send_event_to_turn_on_fault_led(ioc, handle);
+		_scsih_send_event_to_turn_on_pfa_led(ioc, handle);
 
 	/* insert into event log */
 	sz = offsetof(Mpi2EventNotificationReply_t, EventData) +
@@ -4911,7 +4954,11 @@
 {
 	struct MPT3SAS_TARGET *sas_target_priv_data;
 
-
+	if ((ioc->pdev->subsystem_vendor == PCI_VENDOR_ID_IBM) &&
+	     (sas_device->pfa_led_on)) {
+		_scsih_turn_off_pfa_led(ioc, sas_device);
+		sas_device->pfa_led_on = 0;
+	}
 	dewtprintk(ioc, pr_info(MPT3SAS_FMT
 		"%s: enter: handle(0x%04x), sas_addr(0x%016llx)\n",
 		ioc->name, __func__,
@@ -7065,8 +7112,8 @@
 			"port enable: complete from worker thread\n",
 			ioc->name));
 		break;
-	case MPT3SAS_TURN_ON_FAULT_LED:
-		_scsih_turn_on_fault_led(ioc, fw_event->device_handle);
+	case MPT3SAS_TURN_ON_PFA_LED:
+		_scsih_turn_on_pfa_led(ioc, fw_event->device_handle);
 		break;
 	case MPI2_EVENT_SAS_TOPOLOGY_CHANGE_LIST:
 		_scsih_sas_topology_change_event(ioc, fw_event);
@@ -7734,6 +7781,7 @@
 {
 	struct MPT3SAS_ADAPTER *ioc;
 	struct Scsi_Host *shost;
+	int rv;
 
 	shost = scsi_host_alloc(&scsih_driver_template,
 	    sizeof(struct MPT3SAS_ADAPTER));
@@ -7826,6 +7874,7 @@
 	if (!ioc->firmware_event_thread) {
 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
+		rv = -ENODEV;
 		goto out_thread_fail;
 	}
 
@@ -7833,12 +7882,13 @@
 	if ((mpt3sas_base_attach(ioc))) {
 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
+		rv = -ENODEV;
 		goto out_attach_fail;
 	}
-	if ((scsi_add_host(shost, &pdev->dev))) {
+	rv = scsi_add_host(shost, &pdev->dev);
+	if (rv) {
 		pr_err(MPT3SAS_FMT "failure at %s:%d/%s()!\n",
 		    ioc->name, __FILE__, __LINE__, __func__);
-		list_del(&ioc->list);
 		goto out_add_shost_fail;
 	}
 
@@ -7851,7 +7901,7 @@
  out_thread_fail:
 	list_del(&ioc->list);
 	scsi_host_put(shost);
-	return -ENODEV;
+	return rv;
 }
 
 #ifdef CONFIG_PM
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index 65170cb..d4bafaa 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -2,7 +2,7 @@
  * SAS Transport Layer for MPT (Message Passing Technology) based controllers
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_transport.c
- * Copyright (C) 2012-2013  LSI Corporation
+ * Copyright (C) 2012-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
index f6533ab..8a2dd11 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
@@ -3,7 +3,7 @@
  * (Message Passing Technology) based controllers
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_trigger_diag.c
- * Copyright (C) 2012-2013  LSI Corporation
+ * Copyright (C) 2012-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
index bb69392..f681db5 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
+++ b/drivers/scsi/mpt3sas/mpt3sas_trigger_diag.h
@@ -4,7 +4,7 @@
  * controllers
  *
  * This code is based on drivers/scsi/mpt3sas/mpt3sas_base.h
- * Copyright (C) 2012-2013  LSI Corporation
+ * Copyright (C) 2012-2014  LSI Corporation
  *  (mailto:DL-MPTFusionLinux@lsi.com)
  *
  * This program is free software; you can redistribute it and/or
diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c
index 53284eb..90abb03 100644
--- a/drivers/scsi/nsp32.c
+++ b/drivers/scsi/nsp32.c
@@ -915,7 +915,7 @@
 	int ret;
 
 	nsp32_dbg(NSP32_DEBUG_QUEUECOMMAND,
-		  "enter. target: 0x%x LUN: 0x%llu cmnd: 0x%x cmndlen: 0x%x "
+		  "enter. target: 0x%x LUN: 0x%llx cmnd: 0x%x cmndlen: 0x%x "
 		  "use_sg: 0x%x reqbuf: 0x%lx reqlen: 0x%x",
 		  SCpnt->device->id, SCpnt->device->lun, SCpnt->cmnd[0], SCpnt->cmd_len,
 		  scsi_sg_count(SCpnt), scsi_sglist(SCpnt), scsi_bufflen(SCpnt));
diff --git a/drivers/scsi/pm8001/pm8001_ctl.c b/drivers/scsi/pm8001/pm8001_ctl.c
index 7abbf28..be8269c 100644
--- a/drivers/scsi/pm8001/pm8001_ctl.c
+++ b/drivers/scsi/pm8001/pm8001_ctl.c
@@ -385,7 +385,6 @@
 	struct sas_ha_struct *sha = SHOST_TO_SAS_HA(shost);
 	struct pm8001_hba_info *pm8001_ha = sha->lldd_ha;
 	char *str = buf;
-	void *virt_addr;
 	int bios_index;
 	DECLARE_COMPLETION_ONSTACK(completion);
 	struct pm8001_ioctl_payload payload;
@@ -402,11 +401,10 @@
 		return -ENOMEM;
 	}
 	wait_for_completion(&completion);
-	virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
 	for (bios_index = BIOSOFFSET; bios_index < BIOS_OFFSET_LIMIT;
 		bios_index++)
 		str += sprintf(str, "%c",
-			*((u8 *)((u8 *)virt_addr+bios_index)));
+			*(payload.func_specific+bios_index));
 	kfree(payload.func_specific);
 	return str - buf;
 }
diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c
index dd12c6f..933f214 100644
--- a/drivers/scsi/pm8001/pm8001_hwi.c
+++ b/drivers/scsi/pm8001/pm8001_hwi.c
@@ -3132,6 +3132,7 @@
 void
 pm8001_mpi_get_nvmd_resp(struct pm8001_hba_info *pm8001_ha, void *piomb)
 {
+	struct fw_control_ex    *fw_control_context;
 	struct get_nvm_data_resp *pPayload =
 		(struct get_nvm_data_resp *)(piomb + 4);
 	u32 tag = le32_to_cpu(pPayload->tag);
@@ -3140,6 +3141,7 @@
 	u32 ir_tds_bn_dps_das_nvm =
 		le32_to_cpu(pPayload->ir_tda_bn_dps_das_nvm);
 	void *virt_addr = pm8001_ha->memoryMap.region[NVMD].virt_ptr;
+	fw_control_context = ccb->fw_control_context;
 
 	PM8001_MSG_DBG(pm8001_ha, pm8001_printk("Get nvm data complete!\n"));
 	if ((dlen_status & NVMD_STAT) != 0) {
@@ -3180,6 +3182,12 @@
 			pm8001_printk("Get NVMD success, IR=0, dataLen=%d\n",
 			(dlen_status & NVMD_LEN) >> 24));
 	}
+	/* Though fw_control_context is freed below, usrAddr still needs
+	 * to be updated as this holds the response to the request function
+	 */
+	memcpy(fw_control_context->usrAddr,
+		pm8001_ha->memoryMap.region[NVMD].virt_ptr,
+		fw_control_context->len);
 	kfree(ccb->fw_control_context);
 	ccb->task = NULL;
 	ccb->ccb_tag = 0xFFFFFFFF;
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 6f3275d..bcb64eb 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -4698,19 +4698,10 @@
 		for (i = 0; i < PMCRAID_NUM_MSIX_VECTORS; i++)
 			entries[i].entry = i;
 
-		rc = pci_enable_msix(pdev, entries, num_hrrq);
-		if (rc < 0)
+		num_hrrq = pci_enable_msix_range(pdev, entries, 1, num_hrrq);
+		if (num_hrrq < 0)
 			goto pmcraid_isr_legacy;
 
-		/* Check how many MSIX vectors are allocated and register
-		 * msi-x handlers for each of them giving appropriate buffer
-		 */
-		if (rc > 0) {
-			num_hrrq = rc;
-			if (pci_enable_msix(pdev, entries, num_hrrq))
-				goto pmcraid_isr_legacy;
-		}
-
 		for (i = 0; i < num_hrrq; i++) {
 			pinstance->hrrq_vector[i].hrrq_id = i;
 			pinstance->hrrq_vector[i].drv_inst = pinstance;
@@ -4746,7 +4737,6 @@
 	pinstance->hrrq_vector[0].drv_inst = pinstance;
 	pinstance->hrrq_vector[0].vector = pdev->irq;
 	pinstance->num_hrrq = 1;
-	rc = 0;
 
 	rc = request_irq(pdev->irq, pmcraid_isr, IRQF_SHARED,
 			 PMCRAID_DRIVER_NAME, &pinstance->hrrq_vector[0]);
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 16fe519..82b92c4 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -484,7 +484,8 @@
 		    start == (ha->flt_region_fw * 4))
 			valid = 1;
 		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha)
-			|| IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
+			|| IS_CNA_CAPABLE(ha) || IS_QLA2031(ha)
+			|| IS_QLA27XX(ha))
 			valid = 1;
 		if (!valid) {
 			ql_log(ql_log_warn, vha, 0x7065,
@@ -987,6 +988,8 @@
 			continue;
 		if (iter->is4GBp_only == 3 && !(IS_CNA_CAPABLE(vha->hw)))
 			continue;
+		if (iter->is4GBp_only == 0x27 && !IS_QLA27XX(vha->hw))
+			continue;
 
 		sysfs_remove_bin_file(&host->shost_gendev.kobj,
 		    iter->attr);
@@ -1014,7 +1017,7 @@
 	char fw_str[128];
 
 	return scnprintf(buf, PAGE_SIZE, "%s\n",
-	    ha->isp_ops->fw_version_str(vha, fw_str));
+	    ha->isp_ops->fw_version_str(vha, fw_str, sizeof(fw_str)));
 }
 
 static ssize_t
@@ -1440,7 +1443,7 @@
 {
 	scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 	int rval = QLA_FUNCTION_FAILED;
-	uint16_t state[5];
+	uint16_t state[6];
 	uint32_t pstate;
 
 	if (IS_QLAFX00(vha->hw)) {
@@ -1456,8 +1459,8 @@
 	if (rval != QLA_SUCCESS)
 		memset(state, -1, sizeof(state));
 
-	return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
-	    state[1], state[2], state[3], state[4]);
+	return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n",
+	    state[0], state[1], state[2], state[3], state[4], state[5]);
 }
 
 static ssize_t
@@ -1924,7 +1927,8 @@
 {
 	scsi_qla_host_t *vha = shost_priv(shost);
 
-	qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost));
+	qla2x00_get_sym_node_name(vha, fc_host_symbolic_name(shost),
+	    sizeof(fc_host_symbolic_name(shost)));
 }
 
 static void
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index 524f9eb..2e2bb6f 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1390,7 +1390,7 @@
 		    start == (ha->flt_region_fw * 4))
 			valid = 1;
 		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
-		    IS_CNA_CAPABLE(ha) || IS_QLA2031(ha))
+		    IS_CNA_CAPABLE(ha) || IS_QLA2031(ha) || IS_QLA27XX(ha))
 			valid = 1;
 		if (!valid) {
 			ql_log(ql_log_warn, vha, 0x7058,
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index c72ee97b..d77fe43 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,19 +11,15 @@
  * ----------------------------------------------------------------------
  * |             Level            |   Last Value Used  |     Holes	|
  * ----------------------------------------------------------------------
- * | Module Init and Probe        |       0x017d       | 0x004b,0x0141	|
- * |                              |                    | 0x0144,0x0146	|
+ * | Module Init and Probe        |       0x017d       | 0x0144,0x0146	|
  * |                              |                    | 0x015b-0x0160	|
  * |                              |                    | 0x016e-0x0170	|
- * | Mailbox commands             |       0x118d       | 0x1018-0x1019	|
- * |                              |                    | 0x10ca         |
- * |                              |                    | 0x1115-0x1116  |
- * |                              |                    | 0x111a-0x111b	|
- * |                              |                    | 0x1155-0x1158  |
- * | Device Discovery             |       0x2095       | 0x2020-0x2022, |
+ * | Mailbox commands             |       0x118d       | 0x1115-0x1116	|
+ * |                              |                    | 0x111a-0x111b  |
+ * | Device Discovery             |       0x2016       | 0x2020-0x2022, |
  * |                              |                    | 0x2011-0x2012, |
- * |                              |                    | 0x2016         |
- * | Queue Command and IO tracing |       0x3059       | 0x3006-0x300b  |
+ * |                              |                    | 0x2099-0x20a4  |
+ * | Queue Command and IO tracing |       0x3059       | 0x300b         |
  * |                              |                    | 0x3027-0x3028  |
  * |                              |                    | 0x303d-0x3041  |
  * |                              |                    | 0x302d,0x3033  |
@@ -31,10 +27,10 @@
  * |                              |                    | 0x303a		|
  * | DPC Thread                   |       0x4023       | 0x4002,0x4013  |
  * | Async Events                 |       0x5087       | 0x502b-0x502f  |
- * |                              |                    | 0x5047,0x5052  |
+ * |                              |                    | 0x5047		|
  * |                              |                    | 0x5084,0x5075	|
  * |                              |                    | 0x503d,0x5044  |
- * |                              |                    | 0x507b		|
+ * |                              |                    | 0x507b,0x505f	|
  * | Timer Routines               |       0x6012       |                |
  * | User Space Interactions      |       0x70e2       | 0x7018,0x702e  |
  * |				  |		       | 0x7020,0x7024  |
@@ -64,13 +60,15 @@
  * |                              |                    | 0xb13c-0xb140  |
  * |                              |                    | 0xb149		|
  * | MultiQ                       |       0xc00c       |		|
- * | Misc                         |       0xd212       | 0xd017-0xd019	|
- * |                              |                    | 0xd020		|
- * |                              |                    | 0xd030-0xd0ff	|
+ * | Misc                         |       0xd213       | 0xd011-0xd017	|
+ * |                              |                    | 0xd021,0xd024	|
+ * |                              |                    | 0xd025,0xd029	|
+ * |                              |                    | 0xd02a,0xd02e	|
+ * |                              |                    | 0xd031-0xd0ff	|
  * |                              |                    | 0xd101-0xd1fe	|
- * |                              |                    | 0xd213-0xd2fe	|
- * | Target Mode		  |	  0xe078       |		|
- * | Target Mode Management	  |	  0xf072       | 0xf002-0xf003	|
+ * |                              |                    | 0xd214-0xd2fe	|
+ * | Target Mode		  |	  0xe079       |		|
+ * | Target Mode Management	  |	  0xf072       | 0xf002		|
  * |                              |                    | 0xf046-0xf049  |
  * | Target Mode Task Management  |	  0x1000b      |		|
  * ----------------------------------------------------------------------
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index b643991..5f6b296 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -191,6 +191,11 @@
 					     * reset-recovery completion is
 					     * second
 					     */
+/* ISP2031: Values for laser on/off */
+#define PORT_0_2031	0x00201340
+#define PORT_1_2031	0x00201350
+#define LASER_ON_2031	0x01800100
+#define LASER_OFF_2031	0x01800180
 
 /*
  * The ISP2312 v2 chip cannot access the FLASH/GPIO registers via MMIO in an
@@ -261,6 +266,7 @@
 #define REQUEST_ENTRY_CNT_2100		128	/* Number of request entries. */
 #define REQUEST_ENTRY_CNT_2200		2048	/* Number of request entries. */
 #define REQUEST_ENTRY_CNT_24XX		2048	/* Number of request entries. */
+#define REQUEST_ENTRY_CNT_83XX		8192	/* Number of request entries. */
 #define RESPONSE_ENTRY_CNT_2100		64	/* Number of response entries.*/
 #define RESPONSE_ENTRY_CNT_2300		512	/* Number of response entries.*/
 #define RESPONSE_ENTRY_CNT_MQ		128	/* Number of response entries.*/
@@ -803,6 +809,7 @@
 #define MBA_FW_RESTART_CMPLT	0x8060	/* Firmware restart complete */
 #define MBA_INIT_REQUIRED	0x8061	/* Initialization required */
 #define MBA_SHUTDOWN_REQUESTED	0x8062	/* Shutdown Requested */
+#define MBA_DPORT_DIAGNOSTICS	0x8080	/* D-port Diagnostics */
 #define MBA_FW_INIT_FAILURE	0x8401	/* Firmware initialization failure */
 #define MBA_MIRROR_LUN_CHANGE	0x8402	/* Mirror LUN State Change
 					   Notification */
@@ -948,6 +955,7 @@
 #define MBC_WRITE_SFP			0x30	/* Write SFP Data. */
 #define MBC_READ_SFP			0x31	/* Read SFP Data. */
 #define MBC_SET_TIMEOUT_PARAMS		0x32	/* Set FW timeouts. */
+#define MBC_DPORT_DIAGNOSTICS		0x47	/* D-Port Diagnostics */
 #define MBC_MID_INITIALIZE_FIRMWARE	0x48	/* MID Initialize firmware. */
 #define MBC_MID_GET_VP_DATABASE		0x49	/* MID Get VP Database. */
 #define MBC_MID_GET_VP_ENTRY		0x4a	/* MID Get VP Entry. */
@@ -2016,6 +2024,8 @@
 	unsigned long last_ramp_up;
 
 	uint16_t port_id;
+
+	unsigned long retry_delay_timestamp;
 } fc_port_t;
 
 #include "qla_mr.h"
@@ -2056,10 +2066,21 @@
 
 #define CT_REJECT_RESPONSE	0x8001
 #define CT_ACCEPT_RESPONSE	0x8002
-#define CT_REASON_INVALID_COMMAND_CODE	0x01
-#define CT_REASON_CANNOT_PERFORM	0x09
-#define CT_REASON_COMMAND_UNSUPPORTED	0x0b
-#define CT_EXPL_ALREADY_REGISTERED	0x10
+#define CT_REASON_INVALID_COMMAND_CODE		0x01
+#define CT_REASON_CANNOT_PERFORM		0x09
+#define CT_REASON_COMMAND_UNSUPPORTED		0x0b
+#define CT_EXPL_ALREADY_REGISTERED		0x10
+#define CT_EXPL_HBA_ATTR_NOT_REGISTERED		0x11
+#define CT_EXPL_MULTIPLE_HBA_ATTR		0x12
+#define CT_EXPL_INVALID_HBA_BLOCK_LENGTH	0x13
+#define CT_EXPL_MISSING_REQ_HBA_ATTR		0x14
+#define CT_EXPL_PORT_NOT_REGISTERED_		0x15
+#define CT_EXPL_MISSING_HBA_ID_PORT_LIST	0x16
+#define CT_EXPL_HBA_NOT_REGISTERED		0x17
+#define CT_EXPL_PORT_ATTR_NOT_REGISTERED	0x20
+#define CT_EXPL_PORT_NOT_REGISTERED		0x21
+#define CT_EXPL_MULTIPLE_PORT_ATTR		0x22
+#define CT_EXPL_INVALID_PORT_BLOCK_LENGTH	0x23
 
 #define NS_N_PORT_TYPE	0x01
 #define NS_NL_PORT_TYPE	0x02
@@ -2116,33 +2137,40 @@
  * HBA attribute types.
  */
 #define FDMI_HBA_ATTR_COUNT			9
-#define FDMI_HBA_NODE_NAME			1
-#define FDMI_HBA_MANUFACTURER			2
-#define FDMI_HBA_SERIAL_NUMBER			3
-#define FDMI_HBA_MODEL				4
-#define FDMI_HBA_MODEL_DESCRIPTION		5
-#define FDMI_HBA_HARDWARE_VERSION		6
-#define FDMI_HBA_DRIVER_VERSION			7
-#define FDMI_HBA_OPTION_ROM_VERSION		8
-#define FDMI_HBA_FIRMWARE_VERSION		9
+#define FDMIV2_HBA_ATTR_COUNT			17
+#define FDMI_HBA_NODE_NAME			0x1
+#define FDMI_HBA_MANUFACTURER			0x2
+#define FDMI_HBA_SERIAL_NUMBER			0x3
+#define FDMI_HBA_MODEL				0x4
+#define FDMI_HBA_MODEL_DESCRIPTION		0x5
+#define FDMI_HBA_HARDWARE_VERSION		0x6
+#define FDMI_HBA_DRIVER_VERSION			0x7
+#define FDMI_HBA_OPTION_ROM_VERSION		0x8
+#define FDMI_HBA_FIRMWARE_VERSION		0x9
 #define FDMI_HBA_OS_NAME_AND_VERSION		0xa
 #define FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH	0xb
+#define FDMI_HBA_NODE_SYMBOLIC_NAME		0xc
+#define FDMI_HBA_VENDOR_ID			0xd
+#define FDMI_HBA_NUM_PORTS			0xe
+#define FDMI_HBA_FABRIC_NAME			0xf
+#define FDMI_HBA_BOOT_BIOS_NAME			0x10
+#define FDMI_HBA_TYPE_VENDOR_IDENTIFIER		0xe0
 
 struct ct_fdmi_hba_attr {
 	uint16_t type;
 	uint16_t len;
 	union {
 		uint8_t node_name[WWN_SIZE];
-		uint8_t manufacturer[32];
-		uint8_t serial_num[8];
+		uint8_t manufacturer[64];
+		uint8_t serial_num[32];
 		uint8_t model[16];
 		uint8_t model_desc[80];
-		uint8_t hw_version[16];
+		uint8_t hw_version[32];
 		uint8_t driver_version[32];
 		uint8_t orom_version[16];
-		uint8_t fw_version[16];
+		uint8_t fw_version[32];
 		uint8_t os_version[128];
-		uint8_t max_ct_len[4];
+		uint32_t max_ct_len;
 	} a;
 };
 
@@ -2151,16 +2179,56 @@
 	struct ct_fdmi_hba_attr entry[FDMI_HBA_ATTR_COUNT];
 };
 
+struct ct_fdmiv2_hba_attr {
+	uint16_t type;
+	uint16_t len;
+	union {
+		uint8_t node_name[WWN_SIZE];
+		uint8_t manufacturer[32];
+		uint8_t serial_num[32];
+		uint8_t model[16];
+		uint8_t model_desc[80];
+		uint8_t hw_version[16];
+		uint8_t driver_version[32];
+		uint8_t orom_version[16];
+		uint8_t fw_version[32];
+		uint8_t os_version[128];
+		uint32_t max_ct_len;
+		uint8_t sym_name[256];
+		uint32_t vendor_id;
+		uint32_t num_ports;
+		uint8_t fabric_name[WWN_SIZE];
+		uint8_t bios_name[32];
+		uint8_t vendor_indentifer[8];
+	} a;
+};
+
+struct ct_fdmiv2_hba_attributes {
+	uint32_t count;
+	struct ct_fdmiv2_hba_attr entry[FDMIV2_HBA_ATTR_COUNT];
+};
+
 /*
  * Port attribute types.
  */
 #define FDMI_PORT_ATTR_COUNT		6
-#define FDMI_PORT_FC4_TYPES		1
-#define FDMI_PORT_SUPPORT_SPEED		2
-#define FDMI_PORT_CURRENT_SPEED		3
-#define FDMI_PORT_MAX_FRAME_SIZE	4
-#define FDMI_PORT_OS_DEVICE_NAME	5
-#define FDMI_PORT_HOST_NAME		6
+#define FDMIV2_PORT_ATTR_COUNT		16
+#define FDMI_PORT_FC4_TYPES		0x1
+#define FDMI_PORT_SUPPORT_SPEED		0x2
+#define FDMI_PORT_CURRENT_SPEED		0x3
+#define FDMI_PORT_MAX_FRAME_SIZE	0x4
+#define FDMI_PORT_OS_DEVICE_NAME	0x5
+#define FDMI_PORT_HOST_NAME		0x6
+#define FDMI_PORT_NODE_NAME		0x7
+#define FDMI_PORT_NAME			0x8
+#define FDMI_PORT_SYM_NAME		0x9
+#define FDMI_PORT_TYPE			0xa
+#define FDMI_PORT_SUPP_COS		0xb
+#define FDMI_PORT_FABRIC_NAME		0xc
+#define FDMI_PORT_FC4_TYPE		0xd
+#define FDMI_PORT_STATE			0x101
+#define FDMI_PORT_COUNT			0x102
+#define FDMI_PORT_ID			0x103
 
 #define FDMI_PORT_SPEED_1GB		0x1
 #define FDMI_PORT_SPEED_2GB		0x2
@@ -2171,6 +2239,41 @@
 #define FDMI_PORT_SPEED_32GB		0x40
 #define FDMI_PORT_SPEED_UNKNOWN		0x8000
 
+#define FC_CLASS_2	0x04
+#define FC_CLASS_3	0x08
+#define FC_CLASS_2_3	0x0C
+
+struct ct_fdmiv2_port_attr {
+	uint16_t type;
+	uint16_t len;
+	union {
+		uint8_t fc4_types[32];
+		uint32_t sup_speed;
+		uint32_t cur_speed;
+		uint32_t max_frame_size;
+		uint8_t os_dev_name[32];
+		uint8_t host_name[32];
+		uint8_t node_name[WWN_SIZE];
+		uint8_t port_name[WWN_SIZE];
+		uint8_t port_sym_name[128];
+		uint32_t port_type;
+		uint32_t port_supported_cos;
+		uint8_t fabric_name[WWN_SIZE];
+		uint8_t port_fc4_type[32];
+		uint32_t port_state;
+		uint32_t num_ports;
+		uint32_t port_id;
+	} a;
+};
+
+/*
+ * Port Attribute Block.
+ */
+struct ct_fdmiv2_port_attributes {
+	uint32_t count;
+	struct ct_fdmiv2_port_attr entry[FDMIV2_PORT_ATTR_COUNT];
+};
+
 struct ct_fdmi_port_attr {
 	uint16_t type;
 	uint16_t len;
@@ -2184,9 +2287,6 @@
 	} a;
 };
 
-/*
- * Port Attribute Block.
- */
 struct ct_fdmi_port_attributes {
 	uint32_t count;
 	struct ct_fdmi_port_attr entry[FDMI_PORT_ATTR_COUNT];
@@ -2286,6 +2386,13 @@
 
 		struct {
 			uint8_t hba_identifier[8];
+			uint32_t entry_count;
+			uint8_t port_name[8];
+			struct ct_fdmiv2_hba_attributes attrs;
+		} rhba2;
+
+		struct {
+			uint8_t hba_identifier[8];
 			struct ct_fdmi_hba_attributes attrs;
 		} rhat;
 
@@ -2296,6 +2403,11 @@
 
 		struct {
 			uint8_t port_name[8];
+			struct ct_fdmiv2_port_attributes attrs;
+		} rpa2;
+
+		struct {
+			uint8_t port_name[8];
 		} dhba;
 
 		struct {
@@ -2522,7 +2634,7 @@
 	int (*load_risc) (struct scsi_qla_host *, uint32_t *);
 
 	char * (*pci_info_str) (struct scsi_qla_host *, char *);
-	char * (*fw_version_str) (struct scsi_qla_host *, char *);
+	char * (*fw_version_str)(struct scsi_qla_host *, char *, size_t);
 
 	irq_handler_t intr_handler;
 	void (*enable_intrs) (struct qla_hw_data *);
@@ -2664,6 +2776,9 @@
 	uint32_t control_requests;
 
 	uint64_t jiffies_at_last_reset;
+	uint32_t stat_max_pend_cmds;
+	uint32_t stat_max_qfull_cmds_alloc;
+	uint32_t stat_max_qfull_cmds_dropped;
 };
 
 struct bidi_statistics {
@@ -2786,8 +2901,22 @@
 	uint8_t saved_add_firmware_options[2];
 
 	uint8_t tgt_node_name[WWN_SIZE];
+
+	struct list_head q_full_list;
+	uint32_t num_pend_cmds;
+	uint32_t num_qfull_cmds_alloc;
+	uint32_t num_qfull_cmds_dropped;
+	spinlock_t q_full_lock;
+	uint32_t leak_exchg_thresh_hold;
 };
 
+#define MAX_QFULL_CMDS_ALLOC	8192
+#define Q_FULL_THRESH_HOLD_PERCENT 90
+#define Q_FULL_THRESH_HOLD(ha) \
+	((ha->fw_xcb_count/100) * Q_FULL_THRESH_HOLD_PERCENT)
+
+#define LEAK_EXCHG_THRESH_HOLD_PERCENT 75	/* 75 percent */
+
 /*
  * Qlogic host adapter specific data structure.
 */
@@ -2834,7 +2963,8 @@
 
 		uint32_t        mr_reset_hdlr_active:1;
 		uint32_t        mr_intr_valid:1;
-		/* 34 bits */
+		uint32_t	fawwpn_enabled:1;
+		/* 35 bits */
 	} flags;
 
 	/* This spinlock is used to protect "io transactions", you must
@@ -3032,6 +3162,7 @@
 #define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha))
 #define IS_TGT_MODE_CAPABLE(ha)	(ha->tgt.atio_q_length)
 #define IS_SHADOW_REG_CAPABLE(ha)  (IS_QLA27XX(ha))
+#define IS_DPORT_CAPABLE(ha)  (IS_QLA83XX(ha) || IS_QLA27XX(ha))
 
 	/* HBA serial number */
 	uint8_t		serial0;
@@ -3333,6 +3464,7 @@
 	struct work_struct board_disable;
 
 	struct mr_data_fx00 mr;
+	uint32_t chip_reset;
 
 	struct qlt_hw_data tgt;
 	int	allow_cna_fw_dump;
@@ -3402,6 +3534,11 @@
 #define FX00_CRITEMP_RECOVERY	25
 #define FX00_HOST_INFO_RESEND	26
 
+	unsigned long	pci_flags;
+#define PFLG_DISCONNECTED	0	/* PCI device removed */
+#define PFLG_DRIVER_REMOVING	1	/* PCI driver .remove */
+#define PFLG_DRIVER_PROBING	2	/* PCI driver .probe */
+
 	uint32_t	device_flags;
 #define SWITCH_FOUND		BIT_0
 #define DFLG_NO_CABLE		BIT_1
diff --git a/drivers/scsi/qla2xxx/qla_fw.h b/drivers/scsi/qla2xxx/qla_fw.h
index eb8f572..42bb357 100644
--- a/drivers/scsi/qla2xxx/qla_fw.h
+++ b/drivers/scsi/qla2xxx/qla_fw.h
@@ -91,7 +91,7 @@
 	/* Firmware Initialization Control Block. */
 	uint16_t version;
 	uint16_t reserved_1;
-	uint16_t frame_payload_size;
+	__le16 frame_payload_size;
 	uint16_t execution_throttle;
 	uint16_t exchange_count;
 	uint16_t hard_address;
@@ -317,8 +317,8 @@
 	 * BIT 3  = Reserved
 	 * BIT 4  = Enable Target Mode
 	 * BIT 5  = Disable Initiator Mode
-	 * BIT 6  = Reserved
-	 * BIT 7  = Reserved
+	 * BIT 6  = Acquire FA-WWN
+	 * BIT 7  = Enable D-port Diagnostics
 	 *
 	 * BIT 8  = Reserved
 	 * BIT 9  = Non Participating LIP
@@ -567,7 +567,7 @@
 #define SF_TRANSFERRED_DATA	BIT_11
 #define SF_FCP_RSP_DMA		BIT_0
 
-	uint16_t reserved_2;
+	uint16_t retry_delay;
 	uint16_t scsi_status;		/* SCSI status. */
 #define SS_CONFIRMATION_REQ		BIT_12
 
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index d646540..b1865a7 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -72,6 +72,7 @@
 extern void qla2x00_async_adisc_done(struct scsi_qla_host *, fc_port_t *,
     uint16_t *);
 extern void *qla2x00_alloc_iocbs(struct scsi_qla_host *, srb_t *);
+extern void *qla2x00_alloc_iocbs_ready(struct scsi_qla_host *, srb_t *);
 extern int qla24xx_update_fcport_fcp_prio(scsi_qla_host_t *, fc_port_t *);
 
 extern fc_port_t *
@@ -475,7 +476,8 @@
 extern int qla25xx_write_nvram_data(scsi_qla_host_t *, uint8_t *, uint32_t,
 				    uint32_t);
 extern int qla2x00_is_a_vp_did(scsi_qla_host_t *, uint32_t);
-bool qla2x00_check_reg_for_disconnect(scsi_qla_host_t *, uint32_t);
+bool qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *, uint32_t);
+bool qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *, uint16_t);
 
 extern int qla2x00_beacon_on(struct scsi_qla_host *);
 extern int qla2x00_beacon_off(struct scsi_qla_host *);
@@ -561,7 +563,7 @@
 extern int qla2x00_fdmi_register(scsi_qla_host_t *);
 extern int qla2x00_gfpn_id(scsi_qla_host_t *, sw_info_t *);
 extern int qla2x00_gpsc(scsi_qla_host_t *, sw_info_t *);
-extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
+extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *, size_t);
 
 /*
  * Global Function Prototypes in qla_attr.c source file.
@@ -613,7 +615,7 @@
 extern int qlafx00_chip_diag(scsi_qla_host_t *);
 extern void qlafx00_config_rings(struct scsi_qla_host *);
 extern char *qlafx00_pci_info_str(struct scsi_qla_host *, char *);
-extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *);
+extern char *qlafx00_fw_version_str(struct scsi_qla_host *, char *, size_t);
 extern irqreturn_t qlafx00_intr_handler(int, void *);
 extern void qlafx00_enable_intrs(struct qla_hw_data *);
 extern void qlafx00_disable_intrs(struct qla_hw_data *);
@@ -765,4 +767,5 @@
 extern int qla8044_abort_isp(scsi_qla_host_t *);
 extern int qla8044_check_fw_alive(struct scsi_qla_host *);
 
+extern void qlt_host_reset_handler(struct qla_hw_data *ha);
 #endif /* _QLA_GBL_H */
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index a0df3b1..dccc4dc 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -6,6 +6,7 @@
  */
 #include "qla_def.h"
 #include "qla_target.h"
+#include <linux/utsname.h>
 
 static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
 static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
@@ -143,10 +144,10 @@
 			if (ct_rsp->header.response !=
 			    __constant_cpu_to_be16(CT_ACCEPT_RESPONSE)) {
 				ql_dbg(ql_dbg_disc + ql_dbg_buffer, vha, 0x2077,
-				    "%s failed rejected request on port_id: "
-				    "%02x%02x%02x.\n", routine,
-				    vha->d_id.b.domain, vha->d_id.b.area,
-				    vha->d_id.b.al_pa);
+				    "%s failed rejected request on port_id: %02x%02x%02x Compeltion status 0x%x, response 0x%x\n",
+				    routine, vha->d_id.b.domain,
+				    vha->d_id.b.area, vha->d_id.b.al_pa,
+				    comp_status, ct_rsp->header.response);
 				ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha,
 				    0x2078, (uint8_t *)&ct_rsp->header,
 				    sizeof(struct ct_rsp_hdr));
@@ -622,15 +623,16 @@
 }
 
 void
-qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn)
+qla2x00_get_sym_node_name(scsi_qla_host_t *vha, uint8_t *snn, size_t size)
 {
 	struct qla_hw_data *ha = vha->hw;
 
 	if (IS_QLAFX00(ha))
-		sprintf(snn, "%s FW:v%s DVR:v%s", ha->model_number,
+		snprintf(snn, size, "%s FW:v%s DVR:v%s", ha->model_number,
 		    ha->mr.fw_version, qla2x00_version_str);
 	else
-		sprintf(snn, "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
+		snprintf(snn, size,
+		    "%s FW:v%d.%02d.%02d DVR:v%s", ha->model_number,
 		    ha->fw_major_version, ha->fw_minor_version,
 		    ha->fw_subminor_version, qla2x00_version_str);
 }
@@ -670,7 +672,8 @@
 	memcpy(ct_req->req.rsnn_nn.node_name, vha->node_name, WWN_SIZE);
 
 	/* Prepare the Symbolic Node Name */
-	qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name);
+	qla2x00_get_sym_node_name(vha, ct_req->req.rsnn_nn.sym_node_name,
+	    sizeof(ct_req->req.rsnn_nn.sym_node_name));
 
 	/* Calculate SNN length */
 	ct_req->req.rsnn_nn.name_len =
@@ -1263,7 +1266,7 @@
 	ms_iocb_entry_t *ms_pkt;
 	struct ct_sns_req *ct_req;
 	struct ct_sns_rsp *ct_rsp;
-	uint8_t *entries;
+	void *entries;
 	struct ct_fdmi_hba_attr *eiter;
 	struct qla_hw_data *ha = vha->hw;
 
@@ -1288,7 +1291,7 @@
 	entries = ct_req->req.rhba.hba_identifier;
 
 	/* Nodename. */
-	eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+	eiter = entries + size;
 	eiter->type = __constant_cpu_to_be16(FDMI_HBA_NODE_NAME);
 	eiter->len = __constant_cpu_to_be16(4 + WWN_SIZE);
 	memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
@@ -1298,11 +1301,12 @@
 	    "NodeName = %8phN.\n", eiter->a.node_name);
 
 	/* Manufacturer. */
-	eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+	eiter = entries + size;
 	eiter->type = __constant_cpu_to_be16(FDMI_HBA_MANUFACTURER);
 	alen = strlen(QLA2XXX_MANUFACTURER);
-	strncpy(eiter->a.manufacturer, QLA2XXX_MANUFACTURER, alen + 1);
-	alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+	snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
+	    "%s", "QLogic Corporation");
+	alen += 4 - (alen & 3);
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
@@ -1310,12 +1314,19 @@
 	    "Manufacturer = %s.\n", eiter->a.manufacturer);
 
 	/* Serial number. */
-	eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+	eiter = entries + size;
 	eiter->type = __constant_cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
-	sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
-	sprintf(eiter->a.serial_num, "%c%05d", 'A' + sn / 100000, sn % 100000);
+	if (IS_FWI2_CAPABLE(ha))
+		qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
+		    sizeof(eiter->a.serial_num));
+	else {
+		sn = ((ha->serial0 & 0x1f) << 16) |
+			(ha->serial2 << 8) | ha->serial1;
+		snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
+		    "%c%05d", 'A' + sn / 100000, sn % 100000);
+	}
 	alen = strlen(eiter->a.serial_num);
-	alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+	alen += 4 - (alen & 3);
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
@@ -1323,11 +1334,12 @@
 	    "Serial no. = %s.\n", eiter->a.serial_num);
 
 	/* Model name. */
-	eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+	eiter = entries + size;
 	eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL);
-	strcpy(eiter->a.model, ha->model_number);
+	snprintf(eiter->a.model, sizeof(eiter->a.model),
+	    "%s", ha->model_number);
 	alen = strlen(eiter->a.model);
-	alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+	alen += 4 - (alen & 3);
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
@@ -1335,11 +1347,12 @@
 	    "Model Name = %s.\n", eiter->a.model);
 
 	/* Model description. */
-	eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+	eiter = entries + size;
 	eiter->type = __constant_cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
-	strncpy(eiter->a.model_desc, ha->model_desc, 80);
+	snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
+	    "%s", ha->model_desc);
 	alen = strlen(eiter->a.model_desc);
-	alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+	alen += 4 - (alen & 3);
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
@@ -1347,11 +1360,23 @@
 	    "Model Desc = %s.\n", eiter->a.model_desc);
 
 	/* Hardware version. */
-	eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+	eiter = entries + size;
 	eiter->type = __constant_cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
-	strcpy(eiter->a.hw_version, ha->adapter_id);
+	if (!IS_FWI2_CAPABLE(ha)) {
+		snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+		    "HW:%s", ha->adapter_id);
+	} else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
+		    sizeof(eiter->a.hw_version))) {
+		;
+	} else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
+		    sizeof(eiter->a.hw_version))) {
+		;
+	} else {
+		snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+		    "HW:%s", ha->adapter_id);
+	}
 	alen = strlen(eiter->a.hw_version);
-	alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+	alen += 4 - (alen & 3);
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
@@ -1359,11 +1384,12 @@
 	    "Hardware ver = %s.\n", eiter->a.hw_version);
 
 	/* Driver version. */
-	eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+	eiter = entries + size;
 	eiter->type = __constant_cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
-	strcpy(eiter->a.driver_version, qla2x00_version_str);
+	snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
+	    "%s", qla2x00_version_str);
 	alen = strlen(eiter->a.driver_version);
-	alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+	alen += 4 - (alen & 3);
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
@@ -1371,11 +1397,12 @@
 	    "Driver ver = %s.\n", eiter->a.driver_version);
 
 	/* Option ROM version. */
-	eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+	eiter = entries + size;
 	eiter->type = __constant_cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
-	strcpy(eiter->a.orom_version, "0.00");
+	snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
+	    "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
 	alen = strlen(eiter->a.orom_version);
-	alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+	alen += 4 - (alen & 3);
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
@@ -1383,11 +1410,12 @@
 	    "Optrom vers = %s.\n", eiter->a.orom_version);
 
 	/* Firmware version */
-	eiter = (struct ct_fdmi_hba_attr *) (entries + size);
+	eiter = entries + size;
 	eiter->type = __constant_cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
-	ha->isp_ops->fw_version_str(vha, eiter->a.fw_version);
+	ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
+	    sizeof(eiter->a.fw_version));
 	alen = strlen(eiter->a.fw_version);
-	alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+	alen += 4 - (alen & 3);
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
@@ -1419,6 +1447,11 @@
 			ql_dbg(ql_dbg_disc, vha, 0x2034,
 			    "HBA already registered.\n");
 			rval = QLA_ALREADY_REGISTERED;
+		} else {
+			ql_dbg(ql_dbg_disc, vha, 0x20ad,
+			    "RHBA FDMI registration failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
+			    ct_rsp->header.reason_code,
+			    ct_rsp->header.explanation_code);
 		}
 	} else {
 		ql_dbg(ql_dbg_disc, vha, 0x2035,
@@ -1429,6 +1462,534 @@
 }
 
 /**
+ * qla2x00_fdmi_rpa() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
+{
+	int rval, alen;
+	uint32_t size;
+	struct qla_hw_data *ha = vha->hw;
+	ms_iocb_entry_t *ms_pkt;
+	struct ct_sns_req *ct_req;
+	struct ct_sns_rsp *ct_rsp;
+	void *entries;
+	struct ct_fdmi_port_attr *eiter;
+	struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
+	struct new_utsname *p_sysid = NULL;
+
+	/* Issue RPA */
+	/* Prepare common MS IOCB */
+	/*   Request size adjusted after CT preparation */
+	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RPA_RSP_SIZE);
+
+	/* Prepare CT request */
+	ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RPA_CMD,
+	    RPA_RSP_SIZE);
+	ct_rsp = &ha->ct_sns->p.rsp;
+
+	/* Prepare FDMI command arguments -- attribute block, attributes. */
+	memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
+	size = WWN_SIZE + 4;
+
+	/* Attributes */
+	ct_req->req.rpa.attrs.count = cpu_to_be32(FDMI_PORT_ATTR_COUNT);
+	entries = ct_req->req.rpa.port_name;
+
+	/* FC4 types. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
+	eiter->len = cpu_to_be16(4 + 32);
+	eiter->a.fc4_types[2] = 0x01;
+	size += 4 + 32;
+
+	ql_dbg(ql_dbg_disc, vha, 0x2039,
+	    "FC4_TYPES=%02x %02x.\n",
+	    eiter->a.fc4_types[2],
+	    eiter->a.fc4_types[1]);
+
+	/* Supported speed. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
+	eiter->len = cpu_to_be16(4 + 4);
+	if (IS_CNA_CAPABLE(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_10GB);
+	else if (IS_QLA27XX(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_32GB|
+		    FDMI_PORT_SPEED_16GB|
+		    FDMI_PORT_SPEED_8GB);
+	else if (IS_QLA2031(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_16GB|
+		    FDMI_PORT_SPEED_8GB|
+		    FDMI_PORT_SPEED_4GB);
+	else if (IS_QLA25XX(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_8GB|
+		    FDMI_PORT_SPEED_4GB|
+		    FDMI_PORT_SPEED_2GB|
+		    FDMI_PORT_SPEED_1GB);
+	else if (IS_QLA24XX_TYPE(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_4GB|
+		    FDMI_PORT_SPEED_2GB|
+		    FDMI_PORT_SPEED_1GB);
+	else if (IS_QLA23XX(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_2GB|
+		    FDMI_PORT_SPEED_1GB);
+	else
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_1GB);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x203a,
+	    "Supported_Speed=%x.\n", eiter->a.sup_speed);
+
+	/* Current speed. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
+	eiter->len = cpu_to_be16(4 + 4);
+	switch (ha->link_data_rate) {
+	case PORT_SPEED_1GB:
+		eiter->a.cur_speed =
+		    cpu_to_be32(FDMI_PORT_SPEED_1GB);
+		break;
+	case PORT_SPEED_2GB:
+		eiter->a.cur_speed =
+		    cpu_to_be32(FDMI_PORT_SPEED_2GB);
+		break;
+	case PORT_SPEED_4GB:
+		eiter->a.cur_speed =
+		    cpu_to_be32(FDMI_PORT_SPEED_4GB);
+		break;
+	case PORT_SPEED_8GB:
+		eiter->a.cur_speed =
+		    cpu_to_be32(FDMI_PORT_SPEED_8GB);
+		break;
+	case PORT_SPEED_10GB:
+		eiter->a.cur_speed =
+		    cpu_to_be32(FDMI_PORT_SPEED_10GB);
+		break;
+	case PORT_SPEED_16GB:
+		eiter->a.cur_speed =
+		    cpu_to_be32(FDMI_PORT_SPEED_16GB);
+		break;
+	case PORT_SPEED_32GB:
+		eiter->a.cur_speed =
+		    cpu_to_be32(FDMI_PORT_SPEED_32GB);
+		break;
+	default:
+		eiter->a.cur_speed =
+		    cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
+		break;
+	}
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x203b,
+	    "Current_Speed=%x.\n", eiter->a.cur_speed);
+
+	/* Max frame size. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
+	eiter->len = cpu_to_be16(4 + 4);
+	eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
+	    le16_to_cpu(icb24->frame_payload_size) :
+	    le16_to_cpu(ha->init_cb->frame_payload_size);
+	eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x203c,
+	    "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
+
+	/* OS device name. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
+	snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
+	    "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
+	alen = strlen(eiter->a.os_dev_name);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x204b,
+	    "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
+
+	/* Hostname. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
+	p_sysid = utsname();
+	if (p_sysid) {
+		snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+		    "%s", p_sysid->nodename);
+	} else {
+		snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+		    "%s", fc_host_system_hostname(vha->host));
+	}
+	alen = strlen(eiter->a.host_name);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x203d, "HostName=%s.\n", eiter->a.host_name);
+
+	/* Update MS request size. */
+	qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+
+	ql_dbg(ql_dbg_disc, vha, 0x203e,
+	    "RPA portname  %016llx, size = %d.\n",
+	    wwn_to_u64(ct_req->req.rpa.port_name), size);
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
+	    entries, size);
+
+	/* Execute MS IOCB */
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+	    sizeof(ms_iocb_entry_t));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x2040,
+		    "RPA issue IOCB failed (%d).\n", rval);
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
+	    QLA_SUCCESS) {
+		rval = QLA_FUNCTION_FAILED;
+		if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+		    ct_rsp->header.explanation_code ==
+		    CT_EXPL_ALREADY_REGISTERED) {
+			ql_dbg(ql_dbg_disc, vha, 0x20cd,
+			    "RPA already registered.\n");
+			rval = QLA_ALREADY_REGISTERED;
+		}
+
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x2041,
+		    "RPA exiting normally.\n");
+	}
+
+	return rval;
+}
+
+/**
+ * qla2x00_fdmiv2_rhba() -
+ * @ha: HA context
+ *
+ * Returns 0 on success.
+ */
+static int
+qla2x00_fdmiv2_rhba(scsi_qla_host_t *vha)
+{
+	int rval, alen;
+	uint32_t size, sn;
+	ms_iocb_entry_t *ms_pkt;
+	struct ct_sns_req *ct_req;
+	struct ct_sns_rsp *ct_rsp;
+	void *entries;
+	struct ct_fdmiv2_hba_attr *eiter;
+	struct qla_hw_data *ha = vha->hw;
+	struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
+	struct new_utsname *p_sysid = NULL;
+
+	/* Issue RHBA */
+	/* Prepare common MS IOCB */
+	/*   Request size adjusted after CT preparation */
+	ms_pkt = ha->isp_ops->prep_ms_fdmi_iocb(vha, 0, RHBA_RSP_SIZE);
+
+	/* Prepare CT request */
+	ct_req = qla2x00_prep_ct_fdmi_req(ha->ct_sns, RHBA_CMD,
+	    RHBA_RSP_SIZE);
+	ct_rsp = &ha->ct_sns->p.rsp;
+
+	/* Prepare FDMI command arguments -- attribute block, attributes. */
+	memcpy(ct_req->req.rhba2.hba_identifier, vha->port_name, WWN_SIZE);
+	ct_req->req.rhba2.entry_count = cpu_to_be32(1);
+	memcpy(ct_req->req.rhba2.port_name, vha->port_name, WWN_SIZE);
+	size = 2 * WWN_SIZE + 4 + 4;
+
+	/* Attributes */
+	ct_req->req.rhba2.attrs.count = cpu_to_be32(FDMIV2_HBA_ATTR_COUNT);
+	entries = ct_req->req.rhba2.hba_identifier;
+
+	/* Nodename. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_NODE_NAME);
+	eiter->len = cpu_to_be16(4 + WWN_SIZE);
+	memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
+	size += 4 + WWN_SIZE;
+
+	ql_dbg(ql_dbg_disc, vha, 0x207d,
+	    "NodeName = %016llx.\n", wwn_to_u64(eiter->a.node_name));
+
+	/* Manufacturer. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_MANUFACTURER);
+	snprintf(eiter->a.manufacturer, sizeof(eiter->a.manufacturer),
+	    "%s", "QLogic Corporation");
+	eiter->a.manufacturer[strlen("QLogic Corporation")] = '\0';
+	alen = strlen(eiter->a.manufacturer);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20a5,
+	    "Manufacturer = %s.\n", eiter->a.manufacturer);
+
+	/* Serial number. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_SERIAL_NUMBER);
+	if (IS_FWI2_CAPABLE(ha))
+		qla2xxx_get_vpd_field(vha, "SN", eiter->a.serial_num,
+		    sizeof(eiter->a.serial_num));
+	else {
+		sn = ((ha->serial0 & 0x1f) << 16) |
+			(ha->serial2 << 8) | ha->serial1;
+		snprintf(eiter->a.serial_num, sizeof(eiter->a.serial_num),
+		    "%c%05d", 'A' + sn / 100000, sn % 100000);
+	}
+	alen = strlen(eiter->a.serial_num);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20a6,
+	    "Serial no. = %s.\n", eiter->a.serial_num);
+
+	/* Model name. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_MODEL);
+	snprintf(eiter->a.model, sizeof(eiter->a.model),
+	    "%s", ha->model_number);
+	alen = strlen(eiter->a.model);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20a7,
+	    "Model Name = %s.\n", eiter->a.model);
+
+	/* Model description. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_MODEL_DESCRIPTION);
+	snprintf(eiter->a.model_desc, sizeof(eiter->a.model_desc),
+	    "%s", ha->model_desc);
+	alen = strlen(eiter->a.model_desc);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20a8,
+	    "Model Desc = %s.\n", eiter->a.model_desc);
+
+	/* Hardware version. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_HARDWARE_VERSION);
+	if (!IS_FWI2_CAPABLE(ha)) {
+		snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+		    "HW:%s", ha->adapter_id);
+	} else if (qla2xxx_get_vpd_field(vha, "MN", eiter->a.hw_version,
+		    sizeof(eiter->a.hw_version))) {
+		;
+	} else if (qla2xxx_get_vpd_field(vha, "EC", eiter->a.hw_version,
+		    sizeof(eiter->a.hw_version))) {
+		;
+	} else {
+		snprintf(eiter->a.hw_version, sizeof(eiter->a.hw_version),
+		    "HW:%s", ha->adapter_id);
+	}
+	alen = strlen(eiter->a.hw_version);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20a9,
+	    "Hardware ver = %s.\n", eiter->a.hw_version);
+
+	/* Driver version. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_DRIVER_VERSION);
+	snprintf(eiter->a.driver_version, sizeof(eiter->a.driver_version),
+	    "%s", qla2x00_version_str);
+	alen = strlen(eiter->a.driver_version);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20aa,
+	    "Driver ver = %s.\n", eiter->a.driver_version);
+
+	/* Option ROM version. */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_OPTION_ROM_VERSION);
+	snprintf(eiter->a.orom_version, sizeof(eiter->a.orom_version),
+	    "%d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+	alen = strlen(eiter->a.orom_version);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha , 0x20ab,
+	    "Optrom version = %d.%02d.\n", eiter->a.orom_version[1],
+	    eiter->a.orom_version[0]);
+
+	/* Firmware version */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_FIRMWARE_VERSION);
+	ha->isp_ops->fw_version_str(vha, eiter->a.fw_version,
+	    sizeof(eiter->a.fw_version));
+	alen = strlen(eiter->a.fw_version);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20ac,
+	    "Firmware vers = %s.\n", eiter->a.fw_version);
+
+	/* OS Name and Version */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_OS_NAME_AND_VERSION);
+	p_sysid = utsname();
+	if (p_sysid) {
+		snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
+		    "%s %s %s",
+		    p_sysid->sysname, p_sysid->release, p_sysid->version);
+	} else {
+		snprintf(eiter->a.os_version, sizeof(eiter->a.os_version),
+		    "%s %s", "Linux", fc_host_system_hostname(vha->host));
+	}
+	alen = strlen(eiter->a.os_version);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20ae,
+	    "OS Name and Version = %s.\n", eiter->a.os_version);
+
+	/* MAX CT Payload Length */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_MAXIMUM_CT_PAYLOAD_LENGTH);
+	eiter->a.max_ct_len = IS_FWI2_CAPABLE(ha) ?
+	    le16_to_cpu(icb24->frame_payload_size) :
+	    le16_to_cpu(ha->init_cb->frame_payload_size);
+	eiter->a.max_ct_len = cpu_to_be32(eiter->a.max_ct_len);
+	eiter->len = cpu_to_be16(4 + 4);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20af,
+	    "CT Payload Length = 0x%x.\n", eiter->a.max_ct_len);
+
+	/* Node Sybolic Name */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_NODE_SYMBOLIC_NAME);
+	qla2x00_get_sym_node_name(vha, eiter->a.sym_name,
+	    sizeof(eiter->a.sym_name));
+	alen = strlen(eiter->a.sym_name);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20b0,
+	    "Symbolic Name = %s.\n", eiter->a.sym_name);
+
+	/* Vendor Id */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_VENDOR_ID);
+	eiter->a.vendor_id = cpu_to_be32(0x1077);
+	eiter->len = cpu_to_be16(4 + 4);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20b1,
+	    "Vendor Id = %x.\n", eiter->a.vendor_id);
+
+	/* Num Ports */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_NUM_PORTS);
+	eiter->a.num_ports = cpu_to_be32(1);
+	eiter->len = cpu_to_be16(4 + 4);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20b2,
+	    "Port Num = %x.\n", eiter->a.num_ports);
+
+	/* Fabric Name */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_FABRIC_NAME);
+	memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
+	eiter->len = cpu_to_be16(4 + WWN_SIZE);
+	size += 4 + WWN_SIZE;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20b3,
+	    "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+
+	/* BIOS Version */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_BOOT_BIOS_NAME);
+	snprintf(eiter->a.bios_name, sizeof(eiter->a.bios_name),
+	    "BIOS %d.%02d", ha->bios_revision[1], ha->bios_revision[0]);
+	alen = strlen(eiter->a.bios_name);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20b4,
+	    "BIOS Name = %s\n", eiter->a.bios_name);
+
+	/* Vendor Identifier */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_HBA_TYPE_VENDOR_IDENTIFIER);
+	snprintf(eiter->a.vendor_indentifer, sizeof(eiter->a.vendor_indentifer),
+	    "%s", "QLGC");
+	alen = strlen(eiter->a.vendor_indentifer);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20b1,
+	    "Vendor Identifier = %s.\n", eiter->a.vendor_indentifer);
+
+	/* Update MS request size. */
+	qla2x00_update_ms_fdmi_iocb(vha, size + 16);
+
+	ql_dbg(ql_dbg_disc, vha, 0x20b5,
+	    "RHBA identifier = %016llx.\n",
+	    wwn_to_u64(ct_req->req.rhba2.hba_identifier));
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20b6,
+	    entries, size);
+
+	/* Execute MS IOCB */
+	rval = qla2x00_issue_iocb(vha, ha->ms_iocb, ha->ms_iocb_dma,
+	    sizeof(ms_iocb_entry_t));
+	if (rval != QLA_SUCCESS) {
+		/*EMPTY*/
+		ql_dbg(ql_dbg_disc, vha, 0x20b7,
+		    "RHBA issue IOCB failed (%d).\n", rval);
+	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RHBA") !=
+	    QLA_SUCCESS) {
+		rval = QLA_FUNCTION_FAILED;
+
+		if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+		    ct_rsp->header.explanation_code ==
+		    CT_EXPL_ALREADY_REGISTERED) {
+			ql_dbg(ql_dbg_disc, vha, 0x20b8,
+			    "HBA already registered.\n");
+			rval = QLA_ALREADY_REGISTERED;
+		} else {
+			ql_dbg(ql_dbg_disc, vha, 0x2016,
+			    "RHBA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
+			    ct_rsp->header.reason_code,
+			    ct_rsp->header.explanation_code);
+		}
+	} else {
+		ql_dbg(ql_dbg_disc, vha, 0x20b9,
+		    "RHBA FDMI V2 exiting normally.\n");
+	}
+
+	return rval;
+}
+
+/**
  * qla2x00_fdmi_dhba() -
  * @ha: HA context
  *
@@ -1477,23 +2038,24 @@
 }
 
 /**
- * qla2x00_fdmi_rpa() -
+ * qla2x00_fdmiv2_rpa() -
  * @ha: HA context
  *
  * Returns 0 on success.
  */
 static int
-qla2x00_fdmi_rpa(scsi_qla_host_t *vha)
+qla2x00_fdmiv2_rpa(scsi_qla_host_t *vha)
 {
 	int rval, alen;
-	uint32_t size, max_frame_size;
+	uint32_t size;
 	struct qla_hw_data *ha = vha->hw;
 	ms_iocb_entry_t *ms_pkt;
 	struct ct_sns_req *ct_req;
 	struct ct_sns_rsp *ct_rsp;
-	uint8_t *entries;
-	struct ct_fdmi_port_attr *eiter;
+	void *entries;
+	struct ct_fdmiv2_port_attr *eiter;
 	struct init_cb_24xx *icb24 = (struct init_cb_24xx *)ha->init_cb;
+	struct new_utsname *p_sysid = NULL;
 
 	/* Issue RPA */
 	/* Prepare common MS IOCB */
@@ -1505,147 +2067,258 @@
 	ct_rsp = &ha->ct_sns->p.rsp;
 
 	/* Prepare FDMI command arguments -- attribute block, attributes. */
-	memcpy(ct_req->req.rpa.port_name, vha->port_name, WWN_SIZE);
+	memcpy(ct_req->req.rpa2.port_name, vha->port_name, WWN_SIZE);
 	size = WWN_SIZE + 4;
 
 	/* Attributes */
-	ct_req->req.rpa.attrs.count =
-	    __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT - 1);
-	entries = ct_req->req.rpa.port_name;
+	ct_req->req.rpa2.attrs.count = cpu_to_be32(FDMIV2_PORT_ATTR_COUNT);
+	entries = ct_req->req.rpa2.port_name;
 
 	/* FC4 types. */
-	eiter = (struct ct_fdmi_port_attr *) (entries + size);
-	eiter->type = __constant_cpu_to_be16(FDMI_PORT_FC4_TYPES);
-	eiter->len = __constant_cpu_to_be16(4 + 32);
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPES);
+	eiter->len = cpu_to_be16(4 + 32);
 	eiter->a.fc4_types[2] = 0x01;
 	size += 4 + 32;
 
-	ql_dbg(ql_dbg_disc, vha, 0x2039,
+	ql_dbg(ql_dbg_disc, vha, 0x20ba,
 	    "FC4_TYPES=%02x %02x.\n",
 	    eiter->a.fc4_types[2],
 	    eiter->a.fc4_types[1]);
 
 	/* Supported speed. */
-	eiter = (struct ct_fdmi_port_attr *) (entries + size);
-	eiter->type = __constant_cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
-	eiter->len = __constant_cpu_to_be16(4 + 4);
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_SUPPORT_SPEED);
+	eiter->len = cpu_to_be16(4 + 4);
 	if (IS_CNA_CAPABLE(ha))
-		eiter->a.sup_speed = __constant_cpu_to_be32(
+		eiter->a.sup_speed = cpu_to_be32(
 		    FDMI_PORT_SPEED_10GB);
 	else if (IS_QLA27XX(ha))
-		eiter->a.sup_speed = __constant_cpu_to_be32(
-		    FDMI_PORT_SPEED_32GB|FDMI_PORT_SPEED_16GB|
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_32GB|
+		    FDMI_PORT_SPEED_16GB|
 		    FDMI_PORT_SPEED_8GB);
-	else if (IS_QLA25XX(ha))
-		eiter->a.sup_speed = __constant_cpu_to_be32(
-		    FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
-		    FDMI_PORT_SPEED_4GB|FDMI_PORT_SPEED_8GB);
-	else if (IS_QLA24XX_TYPE(ha))
-		eiter->a.sup_speed = __constant_cpu_to_be32(
-		    FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB|
+	else if (IS_QLA2031(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_16GB|
+		    FDMI_PORT_SPEED_8GB|
 		    FDMI_PORT_SPEED_4GB);
+	else if (IS_QLA25XX(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_8GB|
+		    FDMI_PORT_SPEED_4GB|
+		    FDMI_PORT_SPEED_2GB|
+		    FDMI_PORT_SPEED_1GB);
+	else if (IS_QLA24XX_TYPE(ha))
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_4GB|
+		    FDMI_PORT_SPEED_2GB|
+		    FDMI_PORT_SPEED_1GB);
 	else if (IS_QLA23XX(ha))
-		eiter->a.sup_speed =__constant_cpu_to_be32(
-		    FDMI_PORT_SPEED_1GB|FDMI_PORT_SPEED_2GB);
+		eiter->a.sup_speed = cpu_to_be32(
+		    FDMI_PORT_SPEED_2GB|
+		    FDMI_PORT_SPEED_1GB);
 	else
-		eiter->a.sup_speed = __constant_cpu_to_be32(
+		eiter->a.sup_speed = cpu_to_be32(
 		    FDMI_PORT_SPEED_1GB);
 	size += 4 + 4;
 
-	ql_dbg(ql_dbg_disc, vha, 0x203a,
-	    "Supported_Speed=%x.\n", eiter->a.sup_speed);
+	ql_dbg(ql_dbg_disc, vha, 0x20bb,
+	    "Supported Port Speed = %x.\n", eiter->a.sup_speed);
 
 	/* Current speed. */
-	eiter = (struct ct_fdmi_port_attr *) (entries + size);
-	eiter->type = __constant_cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
-	eiter->len = __constant_cpu_to_be16(4 + 4);
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_CURRENT_SPEED);
+	eiter->len = cpu_to_be16(4 + 4);
 	switch (ha->link_data_rate) {
 	case PORT_SPEED_1GB:
-		eiter->a.cur_speed =
-		    __constant_cpu_to_be32(FDMI_PORT_SPEED_1GB);
+		eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_1GB);
 		break;
 	case PORT_SPEED_2GB:
-		eiter->a.cur_speed =
-		    __constant_cpu_to_be32(FDMI_PORT_SPEED_2GB);
+		eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_2GB);
 		break;
 	case PORT_SPEED_4GB:
-		eiter->a.cur_speed =
-		    __constant_cpu_to_be32(FDMI_PORT_SPEED_4GB);
+		eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_4GB);
 		break;
 	case PORT_SPEED_8GB:
-		eiter->a.cur_speed =
-		    __constant_cpu_to_be32(FDMI_PORT_SPEED_8GB);
+		eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_8GB);
 		break;
 	case PORT_SPEED_10GB:
-		eiter->a.cur_speed =
-		    __constant_cpu_to_be32(FDMI_PORT_SPEED_10GB);
+		eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_10GB);
 		break;
 	case PORT_SPEED_16GB:
-		eiter->a.cur_speed =
-		    __constant_cpu_to_be32(FDMI_PORT_SPEED_16GB);
+		eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_16GB);
 		break;
 	case PORT_SPEED_32GB:
-		eiter->a.cur_speed =
-		    __constant_cpu_to_be32(FDMI_PORT_SPEED_32GB);
+		eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_32GB);
 		break;
 	default:
-		eiter->a.cur_speed =
-		    __constant_cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
+		eiter->a.cur_speed = cpu_to_be32(FDMI_PORT_SPEED_UNKNOWN);
 		break;
 	}
 	size += 4 + 4;
 
-	ql_dbg(ql_dbg_disc, vha, 0x203b,
-	    "Current_Speed=%x.\n", eiter->a.cur_speed);
+	ql_dbg(ql_dbg_disc, vha, 0x20bc,
+	    "Current_Speed = %x.\n", eiter->a.cur_speed);
 
 	/* Max frame size. */
-	eiter = (struct ct_fdmi_port_attr *) (entries + size);
-	eiter->type = __constant_cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
-	eiter->len = __constant_cpu_to_be16(4 + 4);
-	max_frame_size = IS_FWI2_CAPABLE(ha) ?
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_MAX_FRAME_SIZE);
+	eiter->len = cpu_to_be16(4 + 4);
+	eiter->a.max_frame_size = IS_FWI2_CAPABLE(ha) ?
 	    le16_to_cpu(icb24->frame_payload_size):
 	    le16_to_cpu(ha->init_cb->frame_payload_size);
-	eiter->a.max_frame_size = cpu_to_be32(max_frame_size);
+	eiter->a.max_frame_size = cpu_to_be32(eiter->a.max_frame_size);
 	size += 4 + 4;
 
-	ql_dbg(ql_dbg_disc, vha, 0x203c,
-	    "Max_Frame_Size=%x.\n", eiter->a.max_frame_size);
+	ql_dbg(ql_dbg_disc, vha, 0x20bc,
+	    "Max_Frame_Size = %x.\n", eiter->a.max_frame_size);
 
 	/* OS device name. */
-	eiter = (struct ct_fdmi_port_attr *) (entries + size);
-	eiter->type = __constant_cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_OS_DEVICE_NAME);
 	alen = strlen(QLA2XXX_DRIVER_NAME);
-	strncpy(eiter->a.os_dev_name, QLA2XXX_DRIVER_NAME, alen + 1);
-	alen += (alen & 3) ? (4 - (alen & 3)) : 4;
+	snprintf(eiter->a.os_dev_name, sizeof(eiter->a.os_dev_name),
+	    "%s:host%lu", QLA2XXX_DRIVER_NAME, vha->host_no);
+	alen += 4 - (alen & 3);
 	eiter->len = cpu_to_be16(4 + alen);
 	size += 4 + alen;
 
-	ql_dbg(ql_dbg_disc, vha, 0x204b,
-	    "OS_Device_Name=%s.\n", eiter->a.os_dev_name);
+	ql_dbg(ql_dbg_disc, vha, 0x20be,
+	    "OS_Device_Name = %s.\n", eiter->a.os_dev_name);
 
 	/* Hostname. */
-	if (strlen(fc_host_system_hostname(vha->host))) {
-		ct_req->req.rpa.attrs.count =
-		    __constant_cpu_to_be32(FDMI_PORT_ATTR_COUNT);
-		eiter = (struct ct_fdmi_port_attr *) (entries + size);
-		eiter->type = __constant_cpu_to_be16(FDMI_PORT_HOST_NAME);
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_HOST_NAME);
+	p_sysid = utsname();
+	if (p_sysid) {
+		snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
+		    "%s", p_sysid->nodename);
+	} else {
 		snprintf(eiter->a.host_name, sizeof(eiter->a.host_name),
 		    "%s", fc_host_system_hostname(vha->host));
-		alen = strlen(eiter->a.host_name);
-		alen += (alen & 3) ? (4 - (alen & 3)) : 4;
-		eiter->len = cpu_to_be16(4 + alen);
-		size += 4 + alen;
-
-		ql_dbg(ql_dbg_disc, vha, 0x203d,
-		    "HostName=%s.\n", eiter->a.host_name);
 	}
+	alen = strlen(eiter->a.host_name);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x203d,
+	    "HostName=%s.\n", eiter->a.host_name);
+
+	/* Node Name */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_NODE_NAME);
+	memcpy(eiter->a.node_name, vha->node_name, WWN_SIZE);
+	eiter->len = cpu_to_be16(4 + WWN_SIZE);
+	size += 4 + WWN_SIZE;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c0,
+	    "Node Name = %016llx.\n", wwn_to_u64(eiter->a.node_name));
+
+	/* Port Name */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_NAME);
+	memcpy(eiter->a.port_name, vha->port_name, WWN_SIZE);
+	eiter->len = cpu_to_be16(4 + WWN_SIZE);
+	size += 4 + WWN_SIZE;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c1,
+	    "Port Name = %016llx.\n", wwn_to_u64(eiter->a.port_name));
+
+	/* Port Symbolic Name */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_SYM_NAME);
+	qla2x00_get_sym_node_name(vha, eiter->a.port_sym_name,
+	    sizeof(eiter->a.port_sym_name));
+	alen = strlen(eiter->a.port_sym_name);
+	alen += 4 - (alen & 3);
+	eiter->len = cpu_to_be16(4 + alen);
+	size += 4 + alen;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c2,
+	    "port symbolic name = %s\n", eiter->a.port_sym_name);
+
+	/* Port Type */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_TYPE);
+	eiter->a.port_type = cpu_to_be32(NS_NX_PORT_TYPE);
+	eiter->len = cpu_to_be16(4 + 4);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c3,
+	    "Port Type = %x.\n", eiter->a.port_type);
+
+	/* Class of Service  */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_SUPP_COS);
+	eiter->a.port_supported_cos = cpu_to_be32(FC_CLASS_3);
+	eiter->len = cpu_to_be16(4 + 4);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c4,
+	    "Supported COS = %08x\n", eiter->a.port_supported_cos);
+
+	/* Port Fabric Name */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_FABRIC_NAME);
+	memcpy(eiter->a.fabric_name, vha->fabric_node_name, WWN_SIZE);
+	eiter->len = cpu_to_be16(4 + WWN_SIZE);
+	size += 4 + WWN_SIZE;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c5,
+	    "Fabric Name = %016llx.\n", wwn_to_u64(eiter->a.fabric_name));
+
+	/* FC4_type */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_FC4_TYPE);
+	eiter->a.port_fc4_type[0] = 0;
+	eiter->a.port_fc4_type[1] = 0;
+	eiter->a.port_fc4_type[2] = 1;
+	eiter->a.port_fc4_type[3] = 0;
+	eiter->len = cpu_to_be16(4 + 32);
+	size += 4 + 32;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c6,
+	    "Port Active FC4 Type = %02x %02x.\n",
+	    eiter->a.port_fc4_type[2], eiter->a.port_fc4_type[1]);
+
+	/* Port State */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_STATE);
+	eiter->a.port_state = cpu_to_be32(1);
+	eiter->len = cpu_to_be16(4 + 4);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c7,
+	    "Port State = %x.\n", eiter->a.port_state);
+
+	/* Number of Ports */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_COUNT);
+	eiter->a.num_ports = cpu_to_be32(1);
+	eiter->len = cpu_to_be16(4 + 4);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c8,
+	    "Number of ports = %x.\n", eiter->a.num_ports);
+
+	/* Port Id */
+	eiter = entries + size;
+	eiter->type = cpu_to_be16(FDMI_PORT_ID);
+	eiter->a.port_id = cpu_to_be32(vha->d_id.b24);
+	eiter->len = cpu_to_be16(4 + 4);
+	size += 4 + 4;
+
+	ql_dbg(ql_dbg_disc, vha, 0x20c8,
+	    "Port Id = %x.\n", eiter->a.port_id);
 
 	/* Update MS request size. */
 	qla2x00_update_ms_fdmi_iocb(vha, size + 16);
 
 	ql_dbg(ql_dbg_disc, vha, 0x203e,
 	    "RPA portname= %8phN size=%d.\n", ct_req->req.rpa.port_name, size);
-	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x2079,
+	ql_dump_buffer(ql_dbg_disc + ql_dbg_buffer, vha, 0x20ca,
 	    entries, size);
 
 	/* Execute MS IOCB */
@@ -1653,14 +2326,26 @@
 	    sizeof(ms_iocb_entry_t));
 	if (rval != QLA_SUCCESS) {
 		/*EMPTY*/
-		ql_dbg(ql_dbg_disc, vha, 0x2040,
-		    "RPA issue IOCB failed (%d).\n", rval);
+		ql_dbg(ql_dbg_disc, vha, 0x20cb,
+		    "RPA FDMI v2 issue IOCB failed (%d).\n", rval);
 	} else if (qla2x00_chk_ms_status(vha, ms_pkt, ct_rsp, "RPA") !=
 	    QLA_SUCCESS) {
 		rval = QLA_FUNCTION_FAILED;
+		if (ct_rsp->header.reason_code == CT_REASON_CANNOT_PERFORM &&
+		    ct_rsp->header.explanation_code ==
+		    CT_EXPL_ALREADY_REGISTERED) {
+			ql_dbg(ql_dbg_disc, vha, 0x20ce,
+			    "RPA FDMI v2 already registered\n");
+			rval = QLA_ALREADY_REGISTERED;
+		} else {
+			ql_dbg(ql_dbg_disc, vha, 0x2020,
+			    "RPA FDMI v2 failed, CT Reason code: 0x%x, CT Explanation 0x%x\n",
+			    ct_rsp->header.reason_code,
+			    ct_rsp->header.explanation_code);
+		}
 	} else {
-		ql_dbg(ql_dbg_disc, vha, 0x2041,
-		    "RPA exiting nornally.\n");
+		ql_dbg(ql_dbg_disc, vha, 0x20cc,
+		    "RPA FDMI V2 exiting normally.\n");
 	}
 
 	return rval;
@@ -1675,8 +2360,8 @@
 int
 qla2x00_fdmi_register(scsi_qla_host_t *vha)
 {
-	int rval;
-       struct qla_hw_data *ha = vha->hw;
+	int rval = QLA_FUNCTION_FAILED;
+	struct qla_hw_data *ha = vha->hw;
 
 	if (IS_QLA2100(ha) || IS_QLA2200(ha) ||
 	    IS_QLAFX00(ha))
@@ -1686,6 +2371,26 @@
 	if (rval)
 		return rval;
 
+	rval = qla2x00_fdmiv2_rhba(vha);
+	if (rval) {
+		if (rval != QLA_ALREADY_REGISTERED)
+			goto try_fdmi;
+
+		rval = qla2x00_fdmi_dhba(vha);
+		if (rval)
+			goto try_fdmi;
+
+		rval = qla2x00_fdmiv2_rhba(vha);
+		if (rval)
+			goto try_fdmi;
+	}
+	rval = qla2x00_fdmiv2_rpa(vha);
+	if (rval)
+		goto try_fdmi;
+
+	goto out;
+
+try_fdmi:
 	rval = qla2x00_fdmi_rhba(vha);
 	if (rval) {
 		if (rval != QLA_ALREADY_REGISTERED)
@@ -1700,7 +2405,7 @@
 			return rval;
 	}
 	rval = qla2x00_fdmi_rpa(vha);
-
+out:
 	return rval;
 }
 
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 46990f4..a4dde7e 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -1848,7 +1848,9 @@
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	}
 
-	if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
+	if (IS_QLA27XX(ha))
+		ha->flags.fac_supported = 1;
+	else if (rval == QLA_SUCCESS && IS_FAC_REQUIRED(ha)) {
 		uint32_t size;
 
 		rval = qla81xx_fac_get_sector_size(vha, &size);
@@ -2196,6 +2198,15 @@
 		mid_init_cb->options = __constant_cpu_to_le16(BIT_1);
 		mid_init_cb->init_cb.execution_throttle =
 		    cpu_to_le16(ha->fw_xcb_count);
+		/* D-Port Status */
+		if (IS_DPORT_CAPABLE(ha))
+			mid_init_cb->init_cb.firmware_options_1 |=
+			    cpu_to_le16(BIT_7);
+		/* Enable FA-WWPN */
+		ha->flags.fawwpn_enabled =
+		    (mid_init_cb->init_cb.firmware_options_1 & BIT_6) ? 1 : 0;
+		ql_dbg(ql_dbg_init, vha, 0x0141, "FA-WWPN Support: %s.\n",
+		    (ha->flags.fawwpn_enabled) ? "enabled" : "disabled");
 	}
 
 	rval = qla2x00_init_firmware(vha, ha->init_cb_size);
@@ -2224,7 +2235,7 @@
 	unsigned long	wtime, mtime, cs84xx_time;
 	uint16_t	min_wait;	/* Minimum wait time if loop is down */
 	uint16_t	wait_time;	/* Wait time if loop is coming ready */
-	uint16_t	state[5];
+	uint16_t	state[6];
 	struct qla_hw_data *ha = vha->hw;
 
 	if (IS_QLAFX00(vha->hw))
@@ -2329,8 +2340,8 @@
 	} while (1);
 
 	ql_dbg(ql_dbg_taskm, vha, 0x803a,
-	    "fw_state=%x (%x, %x, %x, %x) " "curr time=%lx.\n", state[0],
-	    state[1], state[2], state[3], state[4], jiffies);
+	    "fw_state=%x (%x, %x, %x, %x %x) curr time=%lx.\n", state[0],
+	    state[1], state[2], state[3], state[4], state[5], jiffies);
 
 	if (rval && !(vha->device_flags & DFLG_NO_CABLE)) {
 		ql_log(ql_log_warn, vha, 0x803b,
@@ -2596,18 +2607,18 @@
 			nv->firmware_options[1] = BIT_7 | BIT_5;
 			nv->add_firmware_options[0] = BIT_5;
 			nv->add_firmware_options[1] = BIT_5 | BIT_4;
-			nv->frame_payload_size = __constant_cpu_to_le16(2048);
+			nv->frame_payload_size = 2048;
 			nv->special_options[1] = BIT_7;
 		} else if (IS_QLA2200(ha)) {
 			nv->firmware_options[0] = BIT_2 | BIT_1;
 			nv->firmware_options[1] = BIT_7 | BIT_5;
 			nv->add_firmware_options[0] = BIT_5;
 			nv->add_firmware_options[1] = BIT_5 | BIT_4;
-			nv->frame_payload_size = __constant_cpu_to_le16(1024);
+			nv->frame_payload_size = 1024;
 		} else if (IS_QLA2100(ha)) {
 			nv->firmware_options[0] = BIT_3 | BIT_1;
 			nv->firmware_options[1] = BIT_5;
-			nv->frame_payload_size = __constant_cpu_to_le16(1024);
+			nv->frame_payload_size = 1024;
 		}
 
 		nv->max_iocb_allocation = __constant_cpu_to_le16(256);
@@ -2643,7 +2654,7 @@
 	 * are valid.
 	 */
 	if (ia64_platform_is("sn2")) {
-		nv->frame_payload_size = __constant_cpu_to_le16(2048);
+		nv->frame_payload_size = 2048;
 		if (IS_QLA23XX(ha))
 			nv->special_options[1] = BIT_7;
 	}
@@ -3192,7 +3203,7 @@
 qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
 	int rval;
-	uint16_t mb[4];
+	uint16_t mb[MAILBOX_REGISTER_COUNT];
 	struct qla_hw_data *ha = vha->hw;
 
 	if (!IS_IIDMA_CAPABLE(ha))
@@ -4564,6 +4575,10 @@
 		/* Requeue all commands in outstanding command list. */
 		qla2x00_abort_all_cmds(vha, DID_RESET << 16);
 	}
+
+	ha->chip_reset++;
+	/* memory barrier */
+	wmb();
 }
 
 /*
@@ -4958,7 +4973,7 @@
 		memset(nv, 0, ha->nvram_size);
 		nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
 		nv->version = __constant_cpu_to_le16(ICB_VERSION);
-		nv->frame_payload_size = __constant_cpu_to_le16(2048);
+		nv->frame_payload_size = 2048;
 		nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
 		nv->exchange_count = __constant_cpu_to_le16(0);
 		nv->hard_address = __constant_cpu_to_le16(124);
@@ -5225,7 +5240,7 @@
 				ql_log(ql_log_fatal, vha, 0x008f,
 				    "Failed to load segment %d of firmware.\n",
 				    fragment);
-				break;
+				return QLA_FUNCTION_FAILED;
 			}
 
 			faddr += dlen;
@@ -5528,7 +5543,7 @@
 				ql_log(ql_log_fatal, vha, 0x0098,
 				    "Failed to load segment %d of firmware.\n",
 				    fragment);
-				break;
+				return QLA_FUNCTION_FAILED;
 			}
 
 			fwcode += dlen;
@@ -5905,7 +5920,7 @@
 		memset(nv, 0, ha->nvram_size);
 		nv->nvram_version = __constant_cpu_to_le16(ICB_VERSION);
 		nv->version = __constant_cpu_to_le16(ICB_VERSION);
-		nv->frame_payload_size = __constant_cpu_to_le16(2048);
+		nv->frame_payload_size = 2048;
 		nv->execution_throttle = __constant_cpu_to_le16(0xFFFF);
 		nv->exchange_count = __constant_cpu_to_le16(0);
 		nv->port_name[0] = 0x21;
diff --git a/drivers/scsi/qla2xxx/qla_inline.h b/drivers/scsi/qla2xxx/qla_inline.h
index b3b1d6f..fee9eb7 100644
--- a/drivers/scsi/qla2xxx/qla_inline.h
+++ b/drivers/scsi/qla2xxx/qla_inline.h
@@ -279,3 +279,11 @@
 		complete(&ha->mbx_intr_comp);
 	}
 }
+
+static inline void
+qla2x00_set_retry_delay_timestamp(fc_port_t *fcport, uint16_t retry_delay)
+{
+	if (retry_delay)
+		fcport->retry_delay_timestamp = jiffies +
+		    (retry_delay * HZ / 10);
+}
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 150529d..f0edb07 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1858,6 +1858,17 @@
 }
 
 /* Generic Control-SRB manipulation functions. */
+
+/* hardware_lock assumed to be held. */
+void *
+qla2x00_alloc_iocbs_ready(scsi_qla_host_t *vha, srb_t *sp)
+{
+	if (qla2x00_reset_active(vha))
+		return NULL;
+
+	return qla2x00_alloc_iocbs(vha, sp);
+}
+
 void *
 qla2x00_alloc_iocbs(scsi_qla_host_t *vha, srb_t *sp)
 {
@@ -1901,7 +1912,7 @@
 
 skip_cmd_array:
 	/* Check for room on request queue. */
-	if (req->cnt < req_cnt) {
+	if (req->cnt < req_cnt + 2) {
 		if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha))
 			cnt = RD_REG_DWORD(&reg->isp25mq.req_q_out);
 		else if (IS_P3P_TYPE(ha))
@@ -1920,7 +1931,7 @@
 			req->cnt = req->length -
 			    (req->ring_index - cnt);
 	}
-	if (req->cnt < req_cnt)
+	if (req->cnt < req_cnt + 2)
 		goto queuing_error;
 
 	/* Prep packet */
@@ -2648,7 +2659,7 @@
 	return QLA_FUNCTION_FAILED;
 }
 
-void
+static void
 qla24xx_abort_iocb(srb_t *sp, struct abort_entry_24xx *abt_iocb)
 {
 	struct srb_iocb *aio = &sp->u.iocb_cmd;
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index 550a4a3..a04a1b1 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -56,16 +56,8 @@
 	vha = pci_get_drvdata(ha->pdev);
 	for (iter = 50; iter--; ) {
 		hccr = RD_REG_WORD(&reg->hccr);
-		/* Check for PCI disconnection */
-		if (hccr == 0xffff) {
-			/*
-			 * Schedule this on the default system workqueue so that
-			 * all the adapter workqueues and the DPC thread can be
-			 * shutdown cleanly.
-			 */
-			schedule_work(&ha->board_disable);
+		if (qla2x00_check_reg16_for_disconnect(vha, hccr))
 			break;
-		}
 		if (hccr & HCCR_RISC_PAUSE) {
 			if (pci_channel_offline(ha->pdev))
 				break;
@@ -121,21 +113,31 @@
 }
 
 bool
-qla2x00_check_reg_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
+qla2x00_check_reg32_for_disconnect(scsi_qla_host_t *vha, uint32_t reg)
 {
 	/* Check for PCI disconnection */
 	if (reg == 0xffffffff) {
-		/*
-		 * Schedule this on the default system workqueue so that all the
-		 * adapter workqueues and the DPC thread can be shutdown
-		 * cleanly.
-		 */
-		schedule_work(&vha->hw->board_disable);
+		if (!test_and_set_bit(PFLG_DISCONNECTED, &vha->pci_flags) &&
+		    !test_bit(PFLG_DRIVER_REMOVING, &vha->pci_flags) &&
+		    !test_bit(PFLG_DRIVER_PROBING, &vha->pci_flags)) {
+			/*
+			 * Schedule this (only once) on the default system
+			 * workqueue so that all the adapter workqueues and the
+			 * DPC thread can be shutdown cleanly.
+			 */
+			schedule_work(&vha->hw->board_disable);
+		}
 		return true;
 	} else
 		return false;
 }
 
+bool
+qla2x00_check_reg16_for_disconnect(scsi_qla_host_t *vha, uint16_t reg)
+{
+	return qla2x00_check_reg32_for_disconnect(vha, 0xffff0000 | reg);
+}
+
 /**
  * qla2300_intr_handler() - Process interrupts for the ISP23xx and ISP63xx.
  * @irq:
@@ -174,7 +176,7 @@
 	vha = pci_get_drvdata(ha->pdev);
 	for (iter = 50; iter--; ) {
 		stat = RD_REG_DWORD(&reg->u.isp2300.host_status);
-		if (qla2x00_check_reg_for_disconnect(vha, stat))
+		if (qla2x00_check_reg32_for_disconnect(vha, stat))
 			break;
 		if (stat & HSR_RISC_PAUSED) {
 			if (unlikely(pci_channel_offline(ha->pdev)))
@@ -573,8 +575,9 @@
 	struct device_reg_2xxx __iomem *reg = &ha->iobase->isp;
 	struct device_reg_24xx __iomem *reg24 = &ha->iobase->isp24;
 	struct device_reg_82xx __iomem *reg82 = &ha->iobase->isp82;
-	uint32_t	rscn_entry, host_pid;
+	uint32_t	rscn_entry, host_pid, tmp_pid;
 	unsigned long	flags;
+	fc_port_t	*fcport = NULL;
 
 	/* Setup to process RIO completion. */
 	handle_cnt = 0;
@@ -730,7 +733,7 @@
 		else
 			ha->link_data_rate = mb[1];
 
-		ql_dbg(ql_dbg_async, vha, 0x500a,
+		ql_log(ql_log_info, vha, 0x500a,
 		    "LOOP UP detected (%s Gbps).\n",
 		    qla2x00_get_link_speed_str(ha, ha->link_data_rate));
 
@@ -743,13 +746,23 @@
 			? RD_REG_WORD(&reg24->mailbox4) : 0;
 		mbx = (IS_P3P_TYPE(ha)) ? RD_REG_WORD(&reg82->mailbox_out[4])
 			: mbx;
-		ql_dbg(ql_dbg_async, vha, 0x500b,
+		ql_log(ql_log_info, vha, 0x500b,
 		    "LOOP DOWN detected (%x %x %x %x).\n",
 		    mb[1], mb[2], mb[3], mbx);
 
 		if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
 			atomic_set(&vha->loop_state, LOOP_DOWN);
 			atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
+			/*
+			 * In case of loop down, restore WWPN from
+			 * NVRAM in case of FA-WWPN capable ISP
+			 */
+			if (ha->flags.fawwpn_enabled) {
+				void *wwpn = ha->init_cb->port_name;
+
+				memcpy(vha->port_name, wwpn, WWN_SIZE);
+			}
+
 			vha->device_flags |= DFLG_NO_CABLE;
 			qla2x00_mark_all_devices_lost(vha, 1);
 		}
@@ -908,7 +921,8 @@
 		 * it.  Otherwise ignore it and Wait for RSCN to come in.
 		 */
 		atomic_set(&vha->loop_down_timer, 0);
-		if (mb[1] != 0xffff || (mb[2] != 0x6 && mb[2] != 0x4)) {
+		if (atomic_read(&vha->loop_state) != LOOP_DOWN &&
+		    atomic_read(&vha->loop_state) != LOOP_DEAD) {
 			ql_dbg(ql_dbg_async, vha, 0x5011,
 			    "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
 			    mb[1], mb[2], mb[3]);
@@ -920,9 +934,6 @@
 		ql_dbg(ql_dbg_async, vha, 0x5012,
 		    "Port database changed %04x %04x %04x.\n",
 		    mb[1], mb[2], mb[3]);
-		ql_log(ql_log_warn, vha, 0x505f,
-		    "Link is operational (%s Gbps).\n",
-		    qla2x00_get_link_speed_str(ha, ha->link_data_rate));
 
 		/*
 		 * Mark all devices as missing so we will login again.
@@ -969,6 +980,20 @@
 		if (qla2x00_is_a_vp_did(vha, rscn_entry))
 			break;
 
+		/*
+		 * Search for the rport related to this RSCN entry and mark it
+		 * as lost.
+		 */
+		list_for_each_entry(fcport, &vha->vp_fcports, list) {
+			if (atomic_read(&fcport->state) != FCS_ONLINE)
+				continue;
+			tmp_pid = fcport->d_id.b24;
+			if (fcport->d_id.b24 == rscn_entry) {
+				qla2x00_mark_device_lost(vha, fcport, 0, 0);
+				break;
+			}
+		}
+
 		atomic_set(&vha->loop_down_timer, 0);
 		vha->flags.management_server_logged_in = 0;
 
@@ -1086,6 +1111,14 @@
 		qla83xx_handle_8200_aen(vha, mb);
 		break;
 
+	case MBA_DPORT_DIAGNOSTICS:
+		ql_dbg(ql_dbg_async, vha, 0x5052,
+		    "D-Port Diagnostics: %04x %04x=%s\n", mb[0], mb[1],
+		    mb[1] == 0 ? "start" :
+		    mb[1] == 1 ? "done (ok)" :
+		    mb[1] == 2 ? "done (error)" : "other");
+		break;
+
 	default:
 		ql_dbg(ql_dbg_async, vha, 0x5057,
 		    "Unknown AEN:%04x %04x %04x %04x\n",
@@ -1975,6 +2008,7 @@
 	int logit = 1;
 	int res = 0;
 	uint16_t state_flags = 0;
+	uint16_t retry_delay = 0;
 
 	sts = (sts_entry_t *) pkt;
 	sts24 = (struct sts_entry_24xx *) pkt;
@@ -2068,6 +2102,9 @@
 		host_to_fcp_swap(sts24->data, sizeof(sts24->data));
 		ox_id = le16_to_cpu(sts24->ox_id);
 		par_sense_len = sizeof(sts24->data);
+		/* Valid values of the retry delay timer are 0x1-0xffef */
+		if (sts24->retry_delay > 0 && sts24->retry_delay < 0xfff1)
+			retry_delay = sts24->retry_delay;
 	} else {
 		if (scsi_status & SS_SENSE_LEN_VALID)
 			sense_len = le16_to_cpu(sts->req_sense_length);
@@ -2102,6 +2139,14 @@
 		comp_status = CS_DATA_OVERRUN;
 
 	/*
+	 * Check retry_delay_timer value if we receive a busy or
+	 * queue full.
+	 */
+	if (lscsi_status == SAM_STAT_TASK_SET_FULL ||
+	    lscsi_status == SAM_STAT_BUSY)
+		qla2x00_set_retry_delay_timestamp(fcport, retry_delay);
+
+	/*
 	 * Based on Host and scsi status generate status code for Linux
 	 */
 	switch (comp_status) {
@@ -2633,7 +2678,7 @@
 	vha = pci_get_drvdata(ha->pdev);
 	for (iter = 50; iter--; ) {
 		stat = RD_REG_DWORD(&reg->host_status);
-		if (qla2x00_check_reg_for_disconnect(vha, stat))
+		if (qla2x00_check_reg32_for_disconnect(vha, stat))
 			break;
 		if (stat & HSRX_RISC_PAUSED) {
 			if (unlikely(pci_channel_offline(ha->pdev)))
@@ -2723,7 +2768,7 @@
 	 * we process the response queue.
 	 */
 	stat = RD_REG_DWORD(&reg->host_status);
-	if (qla2x00_check_reg_for_disconnect(vha, stat))
+	if (qla2x00_check_reg32_for_disconnect(vha, stat))
 		goto out;
 	qla24xx_process_response_queue(vha, rsp);
 	if (!ha->flags.disable_msix_handshake) {
@@ -2763,7 +2808,7 @@
 		hccr = RD_REG_DWORD_RELAXED(&reg->hccr);
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 	}
-	if (qla2x00_check_reg_for_disconnect(vha, hccr))
+	if (qla2x00_check_reg32_for_disconnect(vha, hccr))
 		goto out;
 	queue_work_on((int) (rsp->id - 1), ha->wq, &rsp->q_work);
 
@@ -2798,7 +2843,7 @@
 	vha = pci_get_drvdata(ha->pdev);
 	do {
 		stat = RD_REG_DWORD(&reg->host_status);
-		if (qla2x00_check_reg_for_disconnect(vha, stat))
+		if (qla2x00_check_reg32_for_disconnect(vha, stat))
 			break;
 		if (stat & HSRX_RISC_PAUSED) {
 			if (unlikely(pci_channel_offline(ha->pdev)))
@@ -2923,27 +2968,22 @@
 	for (i = 0; i < ha->msix_count; i++)
 		entries[i].entry = i;
 
-	ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
-	if (ret) {
-		if (ret < MIN_MSIX_COUNT)
-			goto msix_failed;
-
+	ret = pci_enable_msix_range(ha->pdev,
+				    entries, MIN_MSIX_COUNT, ha->msix_count);
+	if (ret < 0) {
+		ql_log(ql_log_fatal, vha, 0x00c7,
+		    "MSI-X: Failed to enable support, "
+		    "giving   up -- %d/%d.\n",
+		    ha->msix_count, ret);
+		goto msix_out;
+	} else if (ret < ha->msix_count) {
 		ql_log(ql_log_warn, vha, 0x00c6,
 		    "MSI-X: Failed to enable support "
 		    "-- %d/%d\n Retry with %d vectors.\n",
 		    ha->msix_count, ret, ret);
-		ha->msix_count = ret;
-		ret = pci_enable_msix(ha->pdev, entries, ha->msix_count);
-		if (ret) {
-msix_failed:
-			ql_log(ql_log_fatal, vha, 0x00c7,
-			    "MSI-X: Failed to enable support, "
-			    "giving   up -- %d/%d.\n",
-			    ha->msix_count, ret);
-			goto msix_out;
-		}
-		ha->max_rsp_queues = ha->msix_count - 1;
 	}
+	ha->msix_count = ret;
+	ha->max_rsp_queues = ha->msix_count - 1;
 	ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
 				ha->msix_count, GFP_KERNEL);
 	if (!ha->msix_entries) {
@@ -3103,10 +3143,11 @@
 	}
 
 clear_risc_ints:
+	if (IS_FWI2_CAPABLE(ha) || IS_QLAFX00(ha))
+		goto fail;
 
 	spin_lock_irq(&ha->hardware_lock);
-	if (!IS_FWI2_CAPABLE(ha))
-		WRT_REG_WORD(&reg->isp.semaphore, 0);
+	WRT_REG_WORD(&reg->isp.semaphore, 0);
 	spin_unlock_irq(&ha->hardware_lock);
 
 fail:
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index d9aafc0..72971daa 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -117,7 +117,7 @@
 	command = mcp->mb[0];
 	mboxes = mcp->out_mb;
 
-	ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1111,
+	ql_dbg(ql_dbg_mbx, vha, 0x1111,
 	    "Mailbox registers (OUT):\n");
 	for (cnt = 0; cnt < ha->mbx_count; cnt++) {
 		if (IS_QLA2200(ha) && cnt == 8)
@@ -373,7 +373,7 @@
 
 mbx_done:
 	if (rval) {
-		ql_log(ql_log_warn, base_vha, 0x1020,
+		ql_dbg(ql_dbg_disc, base_vha, 0x1020,
 		    "**** Failed mbx[0]=%x, mb[1]=%x, mb[2]=%x, mb[3]=%x, cmd=%x ****.\n",
 		    mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3], command);
 	} else {
@@ -1085,6 +1085,8 @@
 	mcp->in_mb = MBX_9|MBX_7|MBX_6|MBX_3|MBX_2|MBX_1|MBX_0;
 	if (IS_CNA_CAPABLE(vha->hw))
 		mcp->in_mb |= MBX_13|MBX_12|MBX_11|MBX_10;
+	if (IS_FWI2_CAPABLE(vha->hw))
+		mcp->in_mb |= MBX_19|MBX_18|MBX_17|MBX_16;
 	mcp->tov = MBX_TOV_SECONDS;
 	mcp->flags = 0;
 	rval = qla2x00_mailbox_command(vha, mcp);
@@ -1118,6 +1120,22 @@
 			vha->fcoe_vn_port_mac[1] = mcp->mb[13] >> 8;
 			vha->fcoe_vn_port_mac[0] = mcp->mb[13] & 0xff;
 		}
+		/* If FA-WWN supported */
+		if (mcp->mb[7] & BIT_14) {
+			vha->port_name[0] = MSB(mcp->mb[16]);
+			vha->port_name[1] = LSB(mcp->mb[16]);
+			vha->port_name[2] = MSB(mcp->mb[17]);
+			vha->port_name[3] = LSB(mcp->mb[17]);
+			vha->port_name[4] = MSB(mcp->mb[18]);
+			vha->port_name[5] = LSB(mcp->mb[18]);
+			vha->port_name[6] = MSB(mcp->mb[19]);
+			vha->port_name[7] = LSB(mcp->mb[19]);
+			fc_host_port_name(vha->host) =
+			    wwn_to_u64(vha->port_name);
+			ql_dbg(ql_dbg_mbx, vha, 0x10ca,
+			    "FA-WWN acquired %016llx\n",
+			    wwn_to_u64(vha->port_name));
+		}
 	}
 
 	return rval;
@@ -1546,7 +1564,7 @@
 	mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
 	mcp->out_mb = MBX_0;
 	if (IS_FWI2_CAPABLE(vha->hw))
-		mcp->in_mb = MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
+		mcp->in_mb = MBX_6|MBX_5|MBX_4|MBX_3|MBX_2|MBX_1|MBX_0;
 	else
 		mcp->in_mb = MBX_1|MBX_0;
 	mcp->tov = MBX_TOV_SECONDS;
@@ -1560,6 +1578,7 @@
 		states[2] = mcp->mb[3];
 		states[3] = mcp->mb[4];
 		states[4] = mcp->mb[5];
+		states[5] = mcp->mb[6];  /* DPORT status */
 	}
 
 	if (rval != QLA_SUCCESS) {
@@ -3328,8 +3347,24 @@
 		    rptid_entry->port_id[2], rptid_entry->port_id[1],
 		    rptid_entry->port_id[0]);
 
+		/* FA-WWN is only for physical port */
+		if (!vp_idx) {
+			void *wwpn = ha->init_cb->port_name;
+
+			if (!MSB(stat)) {
+				if (rptid_entry->vp_idx_map[1] & BIT_6)
+					wwpn = rptid_entry->reserved_4 + 8;
+			}
+			memcpy(vha->port_name, wwpn, WWN_SIZE);
+			fc_host_port_name(vha->host) =
+			    wwn_to_u64(vha->port_name);
+			ql_dbg(ql_dbg_mbx, vha, 0x1018,
+			    "FA-WWN portname %016llx (%x)\n",
+			    fc_host_port_name(vha->host), MSB(stat));
+		}
+
 		vp = vha;
-		if (vp_idx == 0 && (MSB(stat) != 1))
+		if (vp_idx == 0)
 			goto reg_needed;
 
 		if (MSB(stat) != 0 && MSB(stat) != 2) {
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index 8999824..5c2e031 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -702,6 +702,7 @@
 	req->req_q_in = &reg->isp25mq.req_q_in;
 	req->req_q_out = &reg->isp25mq.req_q_out;
 	req->max_q_depth = ha->req_q_map[0]->max_q_depth;
+	req->out_ptr = (void *)(req->ring + req->length);
 	mutex_unlock(&ha->vport_lock);
 	ql_dbg(ql_dbg_multiq, base_vha, 0xc004,
 	    "ring_ptr=%p ring_index=%d, "
@@ -811,6 +812,7 @@
 	reg = ISP_QUE_REG(ha, que_id);
 	rsp->rsp_q_in = &reg->isp25mq.rsp_q_in;
 	rsp->rsp_q_out = &reg->isp25mq.rsp_q_out;
+	rsp->in_ptr = (void *)(rsp->ring + rsp->length);
 	mutex_unlock(&ha->vport_lock);
 	ql_dbg(ql_dbg_multiq, base_vha, 0xc00b,
 	    "options=%x id=%d rsp_q_in=%p rsp_q_out=%p",
diff --git a/drivers/scsi/qla2xxx/qla_mr.c b/drivers/scsi/qla2xxx/qla_mr.c
index 4775baa..8086759 100644
--- a/drivers/scsi/qla2xxx/qla_mr.c
+++ b/drivers/scsi/qla2xxx/qla_mr.c
@@ -695,11 +695,11 @@
 }
 
 char *
-qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str)
+qlafx00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
 {
 	struct qla_hw_data *ha = vha->hw;
 
-	sprintf(str, "%s", ha->mr.fw_version);
+	snprintf(str, size, "%s", ha->mr.fw_version);
 	return str;
 }
 
@@ -1551,7 +1551,10 @@
 			ha->mr.fw_reset_timer_tick =
 			    QLAFX00_MAX_RESET_INTERVAL;
 		}
-		ha->mr.old_aenmbx0_state = aenmbx0;
+		if (ha->mr.old_aenmbx0_state != aenmbx0) {
+			ha->mr.old_aenmbx0_state = aenmbx0;
+			ha->mr.fw_reset_timer_tick = QLAFX00_RESET_INTERVAL;
+		}
 		ha->mr.fw_reset_timer_tick--;
 	}
 	if (test_bit(FX00_CRITEMP_RECOVERY, &vha->dpc_flags)) {
@@ -1675,17 +1678,16 @@
 	fc_port_t	*fcport;
 
 	/* Check for matching device in remote port list. */
-	fcport = NULL;
 	list_for_each_entry(fcport, &vha->vp_fcports, list) {
 		if (fcport->tgt_id == tgt_id) {
 			ql_dbg(ql_dbg_async, vha, 0x5072,
 			    "Matching fcport(%p) found with TGT-ID: 0x%x "
 			    "and Remote TGT_ID: 0x%x\n",
 			    fcport, fcport->tgt_id, tgt_id);
-			break;
+			return fcport;
 		}
 	}
-	return fcport;
+	return NULL;
 }
 
 static void
@@ -2924,7 +2926,7 @@
 	vha = pci_get_drvdata(ha->pdev);
 	for (iter = 50; iter--; clr_intr = 0) {
 		stat = QLAFX00_RD_INTR_REG(ha);
-		if (qla2x00_check_reg_for_disconnect(vha, stat))
+		if (qla2x00_check_reg32_for_disconnect(vha, stat))
 			break;
 		intr_stat = stat & QLAFX00_HST_INT_STS_BITS;
 		if (!intr_stat)
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index 58f3c91..54cb2ac 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -857,7 +857,7 @@
 			break;
 		if (timeout >= qla82xx_rom_lock_timeout) {
 			lock_owner = qla82xx_rd_32(ha, QLA82XX_ROM_LOCK_ID);
-			ql_log(ql_log_warn, vha, 0xb157,
+			ql_dbg(ql_dbg_p3p, vha, 0xb157,
 			    "%s: Simultaneous flash access by following ports, active port = %d: accessing port = %d",
 			    __func__, ha->portnum, lock_owner);
 			return -1;
@@ -2123,7 +2123,7 @@
 	vha = pci_get_drvdata(ha->pdev);
 	do {
 		host_int = RD_REG_DWORD(&reg->host_int);
-		if (qla2x00_check_reg_for_disconnect(vha, host_int))
+		if (qla2x00_check_reg32_for_disconnect(vha, host_int))
 			break;
 		if (host_int) {
 			stat = RD_REG_DWORD(&reg->host_status);
@@ -2184,7 +2184,7 @@
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	vha = pci_get_drvdata(ha->pdev);
 	host_int = RD_REG_DWORD(&reg->host_int);
-	if (qla2x00_check_reg_for_disconnect(vha, host_int))
+	if (qla2x00_check_reg32_for_disconnect(vha, host_int))
 		goto out;
 	qla24xx_process_response_queue(vha, rsp);
 	WRT_REG_DWORD(&reg->host_int, 0);
@@ -2219,7 +2219,7 @@
 	vha = pci_get_drvdata(ha->pdev);
 
 	host_int = RD_REG_DWORD(&reg->host_int);
-	if (qla2x00_check_reg_for_disconnect(vha, host_int))
+	if (qla2x00_check_reg32_for_disconnect(vha, host_int))
 		goto out;
 	if (host_int) {
 		stat = RD_REG_DWORD(&reg->host_status);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index be9698d..dabd254 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -105,7 +105,7 @@
 		"based on total number of SG elements.");
 
 int ql2xfdmienable=1;
-module_param(ql2xfdmienable, int, S_IRUGO);
+module_param(ql2xfdmienable, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(ql2xfdmienable,
 		"Enables FDMI registrations. "
 		"0 - no FDMI. Default is 1 - perform FDMI.");
@@ -238,7 +238,9 @@
 
 static int qla2x00_change_queue_depth(struct scsi_device *, int, int);
 static int qla2x00_change_queue_type(struct scsi_device *, int);
+static void qla2x00_clear_drv_active(struct qla_hw_data *);
 static void qla2x00_free_device(scsi_qla_host_t *);
+static void qla83xx_disable_laser(scsi_qla_host_t *vha);
 
 struct scsi_host_template qla2xxx_driver_template = {
 	.module			= THIS_MODULE,
@@ -547,14 +549,13 @@
 }
 
 static char *
-qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str)
+qla2x00_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
 {
 	char un_str[10];
 	struct qla_hw_data *ha = vha->hw;
 
-	sprintf(str, "%d.%02d.%02d ", ha->fw_major_version,
-	    ha->fw_minor_version,
-	    ha->fw_subminor_version);
+	snprintf(str, size, "%d.%02d.%02d ", ha->fw_major_version,
+	    ha->fw_minor_version, ha->fw_subminor_version);
 
 	if (ha->fw_attributes & BIT_9) {
 		strcat(str, "FLX");
@@ -586,11 +587,11 @@
 }
 
 static char *
-qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str)
+qla24xx_fw_version_str(struct scsi_qla_host *vha, char *str, size_t size)
 {
 	struct qla_hw_data *ha = vha->hw;
 
-	sprintf(str, "%d.%02d.%02d (%x)", ha->fw_major_version,
+	snprintf(str, size, "%d.%02d.%02d (%x)", ha->fw_major_version,
 	    ha->fw_minor_version, ha->fw_subminor_version, ha->fw_attributes);
 	return str;
 }
@@ -730,6 +731,15 @@
 		goto qc24_target_busy;
 	}
 
+	/*
+	 * Return target busy if we've received a non-zero retry_delay_timer
+	 * in a FCP_RSP.
+	 */
+	if (time_after(jiffies, fcport->retry_delay_timestamp))
+		fcport->retry_delay_timestamp = 0;
+	else
+		goto qc24_target_busy;
+
 	sp = qla2x00_get_sp(vha, fcport, GFP_ATOMIC);
 	if (!sp)
 		goto qc24_host_busy;
@@ -860,8 +870,10 @@
 {
 	struct qla_hw_data *ha = vha->hw;
 
-	while ((!(vha->flags.online) || ha->dpc_active ||
-	    ha->flags.mbox_busy))
+	while (((qla2x00_reset_active(vha)) || ha->dpc_active ||
+	    ha->flags.mbox_busy) ||
+		test_bit(FX00_RESET_RECOVERY, &vha->dpc_flags) ||
+		test_bit(FX00_TARGET_SCAN, &vha->dpc_flags))
 		msleep(1000);
 }
 
@@ -1351,6 +1363,8 @@
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req;
 
+	qlt_host_reset_handler(ha);
+
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	for (que = 0; que < ha->max_req_queues; que++) {
 		req = ha->req_q_map[que];
@@ -2384,6 +2398,8 @@
 	    "Memory allocated for ha=%p.\n", ha);
 	ha->pdev = pdev;
 	ha->tgt.enable_class_2 = ql2xenableclass2;
+	INIT_LIST_HEAD(&ha->tgt.q_full_list);
+	spin_lock_init(&ha->tgt.q_full_lock);
 
 	/* Clear our data area */
 	ha->bars = bars;
@@ -2527,7 +2543,7 @@
 		ha->portnum = PCI_FUNC(ha->pdev->devfn);
 		ha->max_fibre_devices = MAX_FIBRE_DEVICES_2400;
 		ha->mbx_count = MAILBOX_REGISTER_COUNT;
-		req_length = REQUEST_ENTRY_CNT_24XX;
+		req_length = REQUEST_ENTRY_CNT_83XX;
 		rsp_length = RESPONSE_ENTRY_CNT_2300;
 		ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
 		ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
@@ -2631,6 +2647,7 @@
 	}
 
 	pci_set_drvdata(pdev, base_vha);
+	set_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
 
 	host = base_vha->host;
 	base_vha->req = req;
@@ -2923,10 +2940,11 @@
 	    pdev->device, ha->isp_ops->pci_info_str(base_vha, pci_info),
 	    pci_name(pdev), ha->flags.enable_64bit_addressing ? '+' : '-',
 	    base_vha->host_no,
-	    ha->isp_ops->fw_version_str(base_vha, fw_str));
+	    ha->isp_ops->fw_version_str(base_vha, fw_str, sizeof(fw_str)));
 
 	qlt_add_target(ha, base_vha);
 
+	clear_bit(PFLG_DRIVER_PROBING, &base_vha->pci_flags);
 	return 0;
 
 probe_init_failed:
@@ -2954,16 +2972,8 @@
 	scsi_host_put(base_vha->host);
 
 probe_hw_failed:
-	if (IS_QLA82XX(ha)) {
-		qla82xx_idc_lock(ha);
-		qla82xx_clear_drv_active(ha);
-		qla82xx_idc_unlock(ha);
-	}
-	if (IS_QLA8044(ha)) {
-		qla8044_idc_lock(ha);
-		qla8044_clear_drv_active(ha);
-		qla8044_idc_unlock(ha);
-	}
+	qla2x00_clear_drv_active(ha);
+
 iospace_config_failed:
 	if (IS_P3P_TYPE(ha)) {
 		if (!ha->nx_pcibase)
@@ -3026,6 +3036,9 @@
 	qla2x00_free_irqs(vha);
 
 	qla2x00_free_fw_dump(ha);
+
+	pci_disable_pcie_error_reporting(pdev);
+	pci_disable_device(pdev);
 }
 
 /* Deletes all the virtual ports for a given ha */
@@ -3119,10 +3132,8 @@
 }
 
 static void
-qla2x00_clear_drv_active(scsi_qla_host_t *vha)
+qla2x00_clear_drv_active(struct qla_hw_data *ha)
 {
-	struct qla_hw_data *ha = vha->hw;
-
 	if (IS_QLA8044(ha)) {
 		qla8044_idc_lock(ha);
 		qla8044_clear_drv_active(ha);
@@ -3140,16 +3151,26 @@
 	scsi_qla_host_t *base_vha;
 	struct qla_hw_data  *ha;
 
-	/*
-	 * If the PCI device is disabled that means that probe failed and any
-	 * resources should be have cleaned up on probe exit.
-	 */
-	if (!atomic_read(&pdev->enable_cnt))
-		return;
-
 	base_vha = pci_get_drvdata(pdev);
 	ha = base_vha->hw;
 
+	/* Indicate device removal to prevent future board_disable and wait
+	 * until any pending board_disable has completed. */
+	set_bit(PFLG_DRIVER_REMOVING, &base_vha->pci_flags);
+	cancel_work_sync(&ha->board_disable);
+
+	/*
+	 * If the PCI device is disabled then there was a PCI-disconnect and
+	 * qla2x00_disable_board_on_pci_error has taken care of most of the
+	 * resources.
+	 */
+	if (!atomic_read(&pdev->enable_cnt)) {
+		scsi_host_put(base_vha->host);
+		kfree(ha);
+		pci_set_drvdata(pdev, NULL);
+		return;
+	}
+
 	qla2x00_wait_for_hba_ready(base_vha);
 
 	set_bit(UNLOADING, &base_vha->dpc_flags);
@@ -3173,6 +3194,10 @@
 
 	qla84xx_put_chip(base_vha);
 
+	/* Laser should be disabled only for ISP2031 */
+	if (IS_QLA2031(ha))
+		qla83xx_disable_laser(base_vha);
+
 	/* Disable timer */
 	if (base_vha->timer_active)
 		qla2x00_stop_timer(base_vha);
@@ -3191,9 +3216,9 @@
 
 	qla2x00_free_device(base_vha);
 
-	scsi_host_put(base_vha->host);
+	qla2x00_clear_drv_active(ha);
 
-	qla2x00_clear_drv_active(base_vha);
+	scsi_host_put(base_vha->host);
 
 	qla2x00_unmap_iobases(ha);
 
@@ -4808,18 +4833,15 @@
 	qla82xx_md_free(base_vha);
 	qla2x00_free_queues(ha);
 
-	scsi_host_put(base_vha->host);
-
 	qla2x00_unmap_iobases(ha);
 
 	pci_release_selected_regions(ha->pdev, ha->bars);
-	kfree(ha);
-	ha = NULL;
-
 	pci_disable_pcie_error_reporting(pdev);
 	pci_disable_device(pdev);
-	pci_set_drvdata(pdev, NULL);
 
+	/*
+	 * Let qla2x00_remove_one cleanup qla_hw_data on device removal.
+	 */
 }
 
 /**************************************************************************
@@ -5192,13 +5214,7 @@
 	 */
 	if (!pci_channel_offline(ha->pdev)) {
 		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
-		if (w == 0xffff)
-			/*
-			 * Schedule this on the default system workqueue so that
-			 * all the adapter workqueues and the DPC thread can be
-			 * shutdown cleanly.
-			 */
-			schedule_work(&ha->board_disable);
+		qla2x00_check_reg16_for_disconnect(vha, w);
 	}
 
 	/* Make sure qla82xx_watchdog is run only for physical port */
@@ -5706,6 +5722,32 @@
 	ha->flags.eeh_busy = 0;
 }
 
+static void
+qla83xx_disable_laser(scsi_qla_host_t *vha)
+{
+	uint32_t reg, data, fn;
+	struct qla_hw_data *ha = vha->hw;
+	struct device_reg_24xx __iomem *isp_reg = &ha->iobase->isp24;
+
+	/* pci func #/port # */
+	ql_dbg(ql_dbg_init, vha, 0x004b,
+	    "Disabling Laser for hba: %p\n", vha);
+
+	fn = (RD_REG_DWORD(&isp_reg->ctrl_status) &
+		(BIT_15|BIT_14|BIT_13|BIT_12));
+
+	fn = (fn >> 12);
+
+	if (fn & 1)
+		reg = PORT_1_2031;
+	else
+		reg = PORT_0_2031;
+
+	data = LASER_OFF_2031;
+
+	qla83xx_wr_reg(vha, reg, data);
+}
+
 static const struct pci_error_handlers qla2xxx_err_handler = {
 	.error_detected = qla2xxx_pci_error_detected,
 	.mmio_enabled = qla2xxx_pci_mmio_enabled,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index bca173e..b656a05 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -2580,7 +2580,8 @@
 	uint32_t faddr, left, burst;
 	struct qla_hw_data *ha = vha->hw;
 
-	if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA27XX(ha))
+	if (IS_QLA25XX(ha) || IS_QLA81XX(ha) || IS_QLA83XX(ha) ||
+	    IS_QLA27XX(ha))
 		goto try_fast;
 	if (offset & 0xfff)
 		goto slow_read;
@@ -3091,7 +3092,7 @@
 		ha->fw_revision[2] = dcode[2];
 		ha->fw_revision[3] = dcode[3];
 		ql_dbg(ql_dbg_init, vha, 0x0060,
-		    "Firmware revision %d.%d.%d.%d.\n",
+		    "Firmware revision %d.%d.%d (%x).\n",
 		    ha->fw_revision[0], ha->fw_revision[1],
 		    ha->fw_revision[2], ha->fw_revision[3]);
 	}
@@ -3162,7 +3163,7 @@
 	}
 
 	if (pos < end - len && *pos != 0x78)
-		return snprintf(str, size, "%.*s", len, pos + 3);
+		return scnprintf(str, size, "%.*s", len, pos + 3);
 
 	return 0;
 }
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index e632e14..829752c 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -42,6 +42,11 @@
 #include "qla_def.h"
 #include "qla_target.h"
 
+static int ql2xtgt_tape_enable;
+module_param(ql2xtgt_tape_enable, int, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(ql2xtgt_tape_enable,
+		"Enables Sequence level error recovery (aka FC Tape). Default is 0 - no SLER. 1 - Enable SLER.");
+
 static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
 module_param(qlini_mode, charp, S_IRUGO);
 MODULE_PARM_DESC(qlini_mode,
@@ -54,6 +59,8 @@
 
 int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
 
+static int temp_sam_status = SAM_STAT_BUSY;
+
 /*
  * From scsi/fc/fc_fcp.h
  */
@@ -101,6 +108,10 @@
 	*cmd, struct atio_from_isp *atio, int ha_locked);
 static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
 	struct qla_tgt_srr_imm *imm, int ha_lock);
+static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
+	struct qla_tgt_cmd *cmd);
+static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
+	struct atio_from_isp *atio, uint16_t status, int qfull);
 /*
  * Global Variables
  */
@@ -178,6 +189,27 @@
 	return NULL;
 }
 
+static inline void qlt_incr_num_pend_cmds(struct scsi_qla_host *vha)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
+
+	vha->hw->tgt.num_pend_cmds++;
+	if (vha->hw->tgt.num_pend_cmds > vha->hw->qla_stats.stat_max_pend_cmds)
+		vha->hw->qla_stats.stat_max_pend_cmds =
+			vha->hw->tgt.num_pend_cmds;
+	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+}
+static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vha->hw->tgt.q_full_lock, flags);
+	vha->hw->tgt.num_pend_cmds--;
+	spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
+}
+
 void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
 	struct atio_from_isp *atio)
 {
@@ -1008,6 +1040,8 @@
 	    "qla_target(%d): Sending 24xx Notify Ack %d\n",
 	    vha->vp_idx, nack->u.isp24.status);
 
+	/* Memory Barrier */
+	wmb();
 	qla2x00_start_iocbs(vha, vha->req);
 }
 
@@ -1031,7 +1065,7 @@
 	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
 		return;
 
-	resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+	resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
 	if (!resp) {
 		ql_dbg(ql_dbg_tgt, vha, 0xe04a,
 		    "qla_target(%d): %s failed: unable to allocate "
@@ -1085,6 +1119,8 @@
 
 	vha->vha_tgt.qla_tgt->abts_resp_expected++;
 
+	/* Memory Barrier */
+	wmb();
 	qla2x00_start_iocbs(vha, vha->req);
 }
 
@@ -1102,7 +1138,7 @@
 	if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
 		return;
 
-	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
+	ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs_ready(vha, NULL);
 	if (ctio == NULL) {
 		ql_dbg(ql_dbg_tgt, vha, 0xe04b,
 		    "qla_target(%d): %s failed: unable to allocate "
@@ -1130,6 +1166,8 @@
 		CTIO7_FLAGS_TERMINATE);
 	ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
 
+	/* Memory Barrier */
+	wmb();
 	qla2x00_start_iocbs(vha, vha->req);
 
 	qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
@@ -1178,6 +1216,7 @@
 
 	mcmd->sess = sess;
 	memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
+	mcmd->reset_count = vha->hw->chip_reset;
 
 	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, TMR_ABORT_TASK,
 	    abts->exchange_addr_to_abort);
@@ -1300,6 +1339,8 @@
 	ctio->u.status1.response_len = __constant_cpu_to_le16(8);
 	ctio->u.status1.sense_data[0] = resp_code;
 
+	/* Memory Barrier */
+	wmb();
 	qla2x00_start_iocbs(ha, ha->req);
 }
 
@@ -1321,6 +1362,21 @@
 	    mcmd, mcmd->fc_tm_rsp, mcmd->flags);
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
+
+	if (qla2x00_reset_active(vha) || mcmd->reset_count != ha->chip_reset) {
+		/*
+		 * Either a chip reset is active or this request was from
+		 * previous life, just abort the processing.
+		 */
+		ql_dbg(ql_dbg_async, vha, 0xe100,
+			"RESET-TMR active/old-count/new-count = %d/%d/%d.\n",
+			qla2x00_reset_active(vha), mcmd->reset_count,
+			ha->chip_reset);
+		ha->tgt.tgt_ops->free_mcmd(mcmd);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		return;
+	}
+
 	if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
 		qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
 		    0, 0, 0, 0, 0, 0);
@@ -1397,8 +1453,6 @@
 		}
 	}
 
-	ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
-	    prm->seg_cnt, prm->req_cnt);
 	return 0;
 
 out_err:
@@ -1431,17 +1485,12 @@
 static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
 	uint32_t req_cnt)
 {
-	struct qla_hw_data *ha = vha->hw;
-	device_reg_t __iomem *reg = ha->iobase;
-	uint32_t cnt;
+	uint32_t cnt, cnt_in;
 
 	if (vha->req->cnt < (req_cnt + 2)) {
-		cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
+		cnt = (uint16_t)RD_REG_DWORD(vha->req->req_q_out);
+		cnt_in = (uint16_t)RD_REG_DWORD(vha->req->req_q_in);
 
-		ql_dbg(ql_dbg_tgt, vha, 0xe00a,
-		    "Request ring circled: cnt=%d, vha->->ring_index=%d, "
-		    "vha->req->cnt=%d, req_cnt=%d\n", cnt,
-		    vha->req->ring_index, vha->req->cnt, req_cnt);
 		if  (vha->req->ring_index < cnt)
 			vha->req->cnt = cnt - vha->req->ring_index;
 		else
@@ -1450,11 +1499,10 @@
 	}
 
 	if (unlikely(vha->req->cnt < (req_cnt + 2))) {
-		ql_dbg(ql_dbg_tgt, vha, 0xe00b,
-		    "qla_target(%d): There is no room in the "
-		    "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
-		    "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
-		    vha->req->cnt, req_cnt);
+		ql_dbg(ql_dbg_io, vha, 0x305a,
+		    "qla_target(%d): There is no room in the request ring: vha->req->ring_index=%d, vha->req->cnt=%d, req_cnt=%d Req-out=%d Req-in=%d Req-Length=%d\n",
+		    vha->vp_idx, vha->req->ring_index,
+		    vha->req->cnt, req_cnt, cnt, cnt_in, vha->req->length);
 		return -EAGAIN;
 	}
 	vha->req->cnt -= req_cnt;
@@ -1491,7 +1539,7 @@
 		if (h > DEFAULT_OUTSTANDING_COMMANDS)
 			h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
 		if (h == ha->tgt.current_handle) {
-			ql_dbg(ql_dbg_tgt, vha, 0xe04e,
+			ql_dbg(ql_dbg_io, vha, 0x305b,
 			    "qla_target(%d): Ran out of "
 			    "empty cmd slots in ha %p\n", vha->vp_idx, ha);
 			h = QLA_TGT_NULL_HANDLE;
@@ -1548,9 +1596,6 @@
 	pkt->u.status0.ox_id = cpu_to_le16(temp);
 	pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
 
-	ql_dbg(ql_dbg_tgt, vha, 0xe00c,
-	    "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
-	    vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp);
 	return 0;
 }
 
@@ -1608,14 +1653,6 @@
 			}
 			*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
 
-			ql_dbg(ql_dbg_tgt, vha, 0xe00d,
-			    "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
-			    (long long unsigned int)
-			    pci_dma_hi32(sg_dma_address(prm->sg)),
-			    (long long unsigned int)
-			    pci_dma_lo32(sg_dma_address(prm->sg)),
-			    (int)sg_dma_len(prm->sg));
-
 			prm->sg = sg_next(prm->sg);
 		}
 	}
@@ -1633,11 +1670,6 @@
 	int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
 	struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
 
-	ql_dbg(ql_dbg_tgt, vha, 0xe00e,
-	    "iocb->scsi_status=%x, iocb->flags=%x\n",
-	    le16_to_cpu(pkt24->u.status0.scsi_status),
-	    le16_to_cpu(pkt24->u.status0.flags));
-
 	pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
 
 	/* Setup packet address segment pointer */
@@ -1655,7 +1687,6 @@
 	}
 
 	/* If scatter gather */
-	ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
 
 	/* Load command entry data segments */
 	for (cnt = 0;
@@ -1670,14 +1701,6 @@
 		}
 		*dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
 
-		ql_dbg(ql_dbg_tgt, vha, 0xe010,
-		    "S/G Segment phys_addr=%llx:%llx, len=%d\n",
-		    (long long unsigned int)pci_dma_hi32(sg_dma_address(
-		    prm->sg)),
-		    (long long unsigned int)pci_dma_lo32(sg_dma_address(
-		    prm->sg)),
-		    (int)sg_dma_len(prm->sg));
-
 		prm->sg = sg_next(prm->sg);
 	}
 
@@ -1708,6 +1731,7 @@
 		    se_cmd, cmd->tag);
 
 		cmd->state = QLA_TGT_STATE_ABORTED;
+		cmd->cmd_flags |= BIT_6;
 
 		qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
 
@@ -1715,10 +1739,6 @@
 		return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
 	}
 
-	ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u ox_id %04x\n",
-		vha->vp_idx, cmd->tag,
-		be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
-
 	prm->cmd = cmd;
 	prm->tgt = tgt;
 	prm->rq_result = scsi_status;
@@ -1729,15 +1749,10 @@
 	prm->req_cnt = 1;
 	prm->add_status_pkt = 0;
 
-	ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
-	    prm->rq_result, xmit_type);
-
 	/* Send marker if required */
 	if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
 		return -EFAULT;
 
-	ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
-
 	if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
 		if  (qlt_pci_map_calc_cnt(prm) != 0)
 			return -EAGAIN;
@@ -1747,7 +1762,7 @@
 
 	if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
 		prm->residual = se_cmd->residual_count;
-		ql_dbg(ql_dbg_tgt, vha, 0xe014,
+		ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x305c,
 		    "Residual underflow: %d (tag %d, "
 		    "op %x, bufflen %d, rq_result %x)\n", prm->residual,
 		    cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
@@ -1755,7 +1770,7 @@
 		prm->rq_result |= SS_RESIDUAL_UNDER;
 	} else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
 		prm->residual = se_cmd->residual_count;
-		ql_dbg(ql_dbg_tgt, vha, 0xe015,
+		ql_dbg(ql_dbg_io, vha, 0x305d,
 		    "Residual overflow: %d (tag %d, "
 		    "op %x, bufflen %d, rq_result %x)\n", prm->residual,
 		    cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
@@ -1778,10 +1793,6 @@
 		}
 	}
 
-	ql_dbg(ql_dbg_tgt, vha, 0xe016,
-	    "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
-	    prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
-
 	return 0;
 }
 
@@ -2310,6 +2321,21 @@
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
+	if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
+		/*
+		 * Either a chip reset is active or this request was from
+		 * previous life, just abort the processing.
+		 */
+		cmd->state = QLA_TGT_STATE_PROCESSED;
+		qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+		ql_dbg(ql_dbg_async, vha, 0xe101,
+			"RESET-RSP active/old-count/new-count = %d/%d/%d.\n",
+			qla2x00_reset_active(vha), cmd->reset_count,
+			ha->chip_reset);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		return 0;
+	}
+
 	/* Does F/W have an IOCBs for this request */
 	res = qlt_check_reserve_free_req(vha, full_req_cnt);
 	if (unlikely(res))
@@ -2358,8 +2384,9 @@
 			struct ctio7_to_24xx *ctio =
 				(struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
 
-			ql_dbg(ql_dbg_tgt, vha, 0xe019,
-			    "Building additional status packet\n");
+			ql_dbg(ql_dbg_io, vha, 0x305e,
+			    "Building additional status packet 0x%p.\n",
+			    ctio);
 
 			/*
 			 * T10Dif: ctio_crc2_to_fw overlay ontop of
@@ -2391,11 +2418,10 @@
 
 
 	cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
+	cmd->cmd_sent_to_fw = 1;
 
-	ql_dbg(ql_dbg_tgt, vha, 0xe01a,
-	    "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
-	    pkt, scsi_status);
-
+	/* Memory Barrier */
+	wmb();
 	qla2x00_start_iocbs(vha, vha->req);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
@@ -2430,17 +2456,27 @@
 	if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
 		return -EIO;
 
-	ql_dbg(ql_dbg_tgt, vha, 0xe01b,
-		"%s: CTIO_start: vha(%d) se_cmd %p ox_id %04x\n",
-		__func__, (int)vha->vp_idx, &cmd->se_cmd,
-		be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
-
 	/* Calculate number of entries and segments required */
 	if (qlt_pci_map_calc_cnt(&prm) != 0)
 		return -EAGAIN;
 
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 
+	if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) {
+		/*
+		 * Either a chip reset is active or this request was from
+		 * previous life, just abort the processing.
+		 */
+		cmd->state = QLA_TGT_STATE_NEED_DATA;
+		qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
+		ql_dbg(ql_dbg_async, vha, 0xe102,
+			"RESET-XFR active/old-count/new-count = %d/%d/%d.\n",
+			qla2x00_reset_active(vha), cmd->reset_count,
+			ha->chip_reset);
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
+		return 0;
+	}
+
 	/* Does F/W have an IOCBs for this request */
 	res = qlt_check_reserve_free_req(vha, prm.req_cnt);
 	if (res != 0)
@@ -2460,7 +2496,10 @@
 		qlt_load_data_segments(&prm, vha);
 
 	cmd->state = QLA_TGT_STATE_NEED_DATA;
+	cmd->cmd_sent_to_fw = 1;
 
+	/* Memory Barrier */
+	wmb();
 	qla2x00_start_iocbs(vha, vha->req);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
@@ -2503,7 +2542,7 @@
 	    "iocb(s) %p Returned STATUS.\n", sts);
 
 	ql_dbg(ql_dbg_tgt, vha, 0xf075,
-	    "dif check TGT cdb 0x%x lba 0x%llu: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
+	    "dif check TGT cdb 0x%x lba 0x%llx: [Actual|Expected] Ref Tag[0x%x|0x%x], App Tag [0x%x|0x%x], Guard [0x%x|0x%x]\n",
 	    cmd->atio.u.isp24.fcp_cmnd.cdb[0], lba,
 	    a_ref_tag, e_ref_tag, a_app_tag, e_app_tag, a_guard, e_guard);
 
@@ -2626,7 +2665,7 @@
 
 	ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
 
-	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
+	pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
 	if (pkt == NULL) {
 		ql_dbg(ql_dbg_tgt, vha, 0xe050,
 		    "qla_target(%d): %s failed: unable to allocate "
@@ -2669,6 +2708,8 @@
 	if (ctio24->u.status1.residual != 0)
 		ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
 
+	/* Memory Barrier */
+	wmb();
 	qla2x00_start_iocbs(vha, vha->req);
 	return ret;
 }
@@ -2684,24 +2725,19 @@
 
 	if (ha_locked) {
 		rc = __qlt_send_term_exchange(vha, cmd, atio);
+		if (rc == -ENOMEM)
+			qlt_alloc_qfull_cmd(vha, atio, 0, 0);
 		goto done;
 	}
 	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
 	rc = __qlt_send_term_exchange(vha, cmd, atio);
+	if (rc == -ENOMEM)
+		qlt_alloc_qfull_cmd(vha, atio, 0, 0);
 	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
 done:
-	/*
-	 * Terminate exchange will tell fw to release any active CTIO
-	 * that's in FW posession and cleanup the exchange.
-	 *
-	 * "cmd->state == QLA_TGT_STATE_ABORTED" means CTIO is still
-	 * down at FW.  Free the cmd later when CTIO comes back later
-	 * w/aborted(0x2) status.
-	 *
-	 * "cmd->state != QLA_TGT_STATE_ABORTED" means CTIO is already
-	 * back w/some err.  Free the cmd now.
-	 */
-	if ((rc == 1) && (cmd->state != QLA_TGT_STATE_ABORTED)) {
+	if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
+	    !cmd->cmd_sent_to_fw)) {
 		if (!ha_locked && !in_interrupt())
 			msleep(250); /* just in case */
 
@@ -2712,6 +2748,53 @@
 	return;
 }
 
+static void qlt_init_term_exchange(struct scsi_qla_host *vha)
+{
+	struct list_head free_list;
+	struct qla_tgt_cmd *cmd, *tcmd;
+
+	vha->hw->tgt.leak_exchg_thresh_hold =
+	    (vha->hw->fw_xcb_count/100) * LEAK_EXCHG_THRESH_HOLD_PERCENT;
+
+	cmd = tcmd = NULL;
+	if (!list_empty(&vha->hw->tgt.q_full_list)) {
+		INIT_LIST_HEAD(&free_list);
+		list_splice_init(&vha->hw->tgt.q_full_list, &free_list);
+
+		list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
+			list_del(&cmd->cmd_list);
+			/* This cmd was never sent to TCM.  There is no need
+			 * to schedule free or call free_cmd
+			 */
+			qlt_free_cmd(cmd);
+			vha->hw->tgt.num_qfull_cmds_alloc--;
+		}
+	}
+	vha->hw->tgt.num_qfull_cmds_dropped = 0;
+}
+
+static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
+{
+	uint32_t total_leaked;
+
+	total_leaked = vha->hw->tgt.num_qfull_cmds_dropped;
+
+	if (vha->hw->tgt.leak_exchg_thresh_hold &&
+	    (total_leaked > vha->hw->tgt.leak_exchg_thresh_hold)) {
+
+		ql_dbg(ql_dbg_tgt, vha, 0xe079,
+		    "Chip reset due to exchange starvation: %d/%d.\n",
+		    total_leaked, vha->hw->fw_xcb_count);
+
+		if (IS_P3P_TYPE(vha->hw))
+			set_bit(FCOE_CTX_RESET_NEEDED, &vha->dpc_flags);
+		else
+			set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
+		qla2xxx_wake_dpc(vha);
+	}
+
+}
+
 void qlt_free_cmd(struct qla_tgt_cmd *cmd)
 {
 	struct qla_tgt_sess *sess = cmd->sess;
@@ -2721,7 +2804,13 @@
 	    __func__, &cmd->se_cmd,
 	    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
 
+	BUG_ON(cmd->cmd_in_wq);
+
+	if (!cmd->q_full)
+		qlt_decr_num_pend_cmds(cmd->vha);
+
 	BUG_ON(cmd->sg_mapped);
+	cmd->jiffies_at_free = get_jiffies_64();
 	if (unlikely(cmd->free_sg))
 		kfree(cmd->sg);
 
@@ -2729,6 +2818,7 @@
 		WARN_ON(1);
 		return;
 	}
+	cmd->jiffies_at_free = get_jiffies_64();
 	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
 }
 EXPORT_SYMBOL(qlt_free_cmd);
@@ -2742,6 +2832,7 @@
 	struct qla_tgt_srr_imm *imm;
 
 	tgt->ctio_srr_id++;
+	cmd->cmd_flags |= BIT_15;
 
 	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
 	    "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
@@ -2863,11 +2954,9 @@
 	    CTIO_INTERMEDIATE_HANDLE_MARK);
 
 	if (handle != QLA_TGT_NULL_HANDLE) {
-		if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
-			ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
-			    "SKIP_HANDLE CTIO\n");
+		if (unlikely(handle == QLA_TGT_SKIP_HANDLE))
 			return NULL;
-		}
+
 		/* handle-1 is actually used */
 		if (unlikely(handle > DEFAULT_OUTSTANDING_COMMANDS)) {
 			ql_dbg(ql_dbg_tgt, vha, 0xe052,
@@ -2894,6 +2983,81 @@
 	return cmd;
 }
 
+/* hardware_lock should be held by caller. */
+static void
+qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint32_t handle;
+
+	if (cmd->sg_mapped)
+		qlt_unmap_sg(vha, cmd);
+
+	handle = qlt_make_handle(vha);
+
+	/* TODO: fix debug message type and ids. */
+	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
+		ql_dbg(ql_dbg_io, vha, 0xff00,
+		    "HOST-ABORT: handle=%d, state=PROCESSED.\n", handle);
+	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
+		cmd->write_data_transferred = 0;
+		cmd->state = QLA_TGT_STATE_DATA_IN;
+
+		ql_dbg(ql_dbg_io, vha, 0xff01,
+		    "HOST-ABORT: handle=%d, state=DATA_IN.\n", handle);
+
+		ha->tgt.tgt_ops->handle_data(cmd);
+		return;
+	} else if (cmd->state == QLA_TGT_STATE_ABORTED) {
+		ql_dbg(ql_dbg_io, vha, 0xff02,
+		    "HOST-ABORT: handle=%d, state=ABORTED.\n", handle);
+	} else {
+		ql_dbg(ql_dbg_io, vha, 0xff03,
+		    "HOST-ABORT: handle=%d, state=BAD(%d).\n", handle,
+		    cmd->state);
+		dump_stack();
+	}
+
+	cmd->cmd_flags |= BIT_12;
+	ha->tgt.tgt_ops->free_cmd(cmd);
+}
+
+void
+qlt_host_reset_handler(struct qla_hw_data *ha)
+{
+	struct qla_tgt_cmd *cmd;
+	unsigned long flags;
+	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
+	scsi_qla_host_t *vha = NULL;
+	struct qla_tgt *tgt = base_vha->vha_tgt.qla_tgt;
+	uint32_t i;
+
+	if (!base_vha->hw->tgt.tgt_ops)
+		return;
+
+	if (!tgt || qla_ini_mode_enabled(base_vha)) {
+		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
+			"Target mode disabled\n");
+		return;
+	}
+
+	ql_dbg(ql_dbg_tgt_mgt, vha, 0xff10,
+	    "HOST-ABORT-HNDLR: base_vha->dpc_flags=%lx.\n",
+	    base_vha->dpc_flags);
+
+	spin_lock_irqsave(&ha->hardware_lock, flags);
+	for (i = 1; i < DEFAULT_OUTSTANDING_COMMANDS + 1; i++) {
+		cmd = qlt_get_cmd(base_vha, i);
+		if (!cmd)
+			continue;
+		/* ha->tgt.cmds entry is cleared by qlt_get_cmd. */
+		vha = cmd->vha;
+		qlt_abort_cmd_on_host_reset(vha, cmd);
+	}
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+}
+
+
 /*
  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  */
@@ -2905,10 +3069,6 @@
 	struct target_core_fabric_ops *tfo;
 	struct qla_tgt_cmd *cmd;
 
-	ql_dbg(ql_dbg_tgt, vha, 0xe01e,
-	    "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
-	    vha->vp_idx, ctio, status, handle);
-
 	if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
 		/* That could happen only in case of an error/reset/abort */
 		if (status != CTIO_SUCCESS) {
@@ -2925,6 +3085,7 @@
 
 	se_cmd = &cmd->se_cmd;
 	tfo = se_cmd->se_tfo;
+	cmd->cmd_sent_to_fw = 0;
 
 	if (cmd->sg_mapped)
 		qlt_unmap_sg(vha, cmd);
@@ -3011,7 +3172,8 @@
 		 * level.
 		 */
 		if ((cmd->state != QLA_TGT_STATE_NEED_DATA) &&
-			(cmd->state != QLA_TGT_STATE_ABORTED)) {
+		    (cmd->state != QLA_TGT_STATE_ABORTED)) {
+			cmd->cmd_flags |= BIT_13;
 			if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
 				return;
 		}
@@ -3019,7 +3181,7 @@
 skip_term:
 
 	if (cmd->state == QLA_TGT_STATE_PROCESSED) {
-		ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
+		;
 	} else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
 		int rx_status = 0;
 
@@ -3030,10 +3192,6 @@
 		else
 			cmd->write_data_transferred = 1;
 
-		ql_dbg(ql_dbg_tgt, vha, 0xe020,
-		    "Data received, context %x, rx_status %d\n",
-		    0x0, rx_status);
-
 		ha->tgt.tgt_ops->handle_data(cmd);
 		return;
 	} else if (cmd->state == QLA_TGT_STATE_ABORTED) {
@@ -3051,6 +3209,7 @@
 		dump_stack();
 	}
 
+
 	ha->tgt.tgt_ops->free_cmd(cmd);
 }
 
@@ -3103,6 +3262,8 @@
 	uint32_t data_length;
 	int ret, fcp_task_attr, data_dir, bidi = 0;
 
+	cmd->cmd_in_wq = 0;
+	cmd->cmd_flags |= BIT_1;
 	if (tgt->tgt_stop)
 		goto out_term;
 
@@ -3128,11 +3289,6 @@
 	    &atio->u.isp24.fcp_cmnd.add_cdb[
 	    atio->u.isp24.fcp_cmnd.add_cdb_len]));
 
-	ql_dbg(ql_dbg_tgt, vha, 0xe022,
-		"qla_target: START qla cmd: %p se_cmd %p lun: 0x%04x (tag %d) len(%d) ox_id %x\n",
-		cmd, &cmd->se_cmd, cmd->unpacked_lun, cmd->tag, data_length,
-		cmd->atio.u.isp24.fcp_hdr.ox_id);
-
 	ret = ha->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
 				          fcp_task_attr, data_dir, bidi);
 	if (ret != 0)
@@ -3146,13 +3302,16 @@
 	return;
 
 out_term:
-	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
+	ql_dbg(ql_dbg_io, vha, 0x3060, "Terminating work cmd %p", cmd);
 	/*
 	 * cmd has not sent to target yet, so pass NULL as the second
 	 * argument to qlt_send_term_exchange() and free the memory here.
 	 */
+	cmd->cmd_flags |= BIT_2;
 	spin_lock_irqsave(&ha->hardware_lock, flags);
 	qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
+
+	qlt_decr_num_pend_cmds(vha);
 	percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
 	ha->tgt.tgt_ops->put_sess(sess);
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -3183,6 +3342,7 @@
 	memcpy(&cmd->atio, atio, sizeof(*atio));
 	cmd->state = QLA_TGT_STATE_NEW;
 	cmd->tgt = vha->vha_tgt.qla_tgt;
+	qlt_incr_num_pend_cmds(vha);
 	cmd->vha = vha;
 	cmd->se_cmd.map_tag = tag;
 	cmd->sess = sess;
@@ -3264,7 +3424,7 @@
 	struct qla_tgt_cmd *cmd;
 
 	if (unlikely(tgt->tgt_stop)) {
-		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
+		ql_dbg(ql_dbg_io, vha, 0x3061,
 		    "New command while device %p is shutting down\n", tgt);
 		return -EFAULT;
 	}
@@ -3277,6 +3437,7 @@
 			return -ENOMEM;
 
 		memcpy(&op->atio, atio, sizeof(*atio));
+		op->vha = vha;
 		INIT_WORK(&op->work, qlt_create_sess_from_atio);
 		queue_work(qla_tgt_wq, &op->work);
 		return 0;
@@ -3288,12 +3449,19 @@
 
 	cmd = qlt_get_tag(vha, sess, atio);
 	if (!cmd) {
-		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
+		ql_dbg(ql_dbg_io, vha, 0x3062,
 		    "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
 		ha->tgt.tgt_ops->put_sess(sess);
 		return -ENOMEM;
 	}
 
+	cmd->cmd_flags = 0;
+	cmd->jiffies_at_alloc = get_jiffies_64();
+
+	cmd->reset_count = vha->hw->chip_reset;
+
+	cmd->cmd_in_wq = 1;
+	cmd->cmd_flags |= BIT_0;
 	INIT_WORK(&cmd->work, qlt_do_work);
 	queue_work(qla_tgt_wq, &cmd->work);
 	return 0;
@@ -3327,6 +3495,7 @@
 	}
 	mcmd->tmr_func = fn;
 	mcmd->flags = flags;
+	mcmd->reset_count = vha->hw->chip_reset;
 
 	switch (fn) {
 	case QLA_TGT_CLEAR_ACA:
@@ -3462,6 +3631,7 @@
 
 	lun = a->u.isp24.fcp_cmnd.lun;
 	unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
+	mcmd->reset_count = vha->hw->chip_reset;
 
 	rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
 	    le16_to_cpu(iocb->u.isp2x.seq_id));
@@ -3753,8 +3923,10 @@
 			qlt_send_notify_ack(vha, ntfy,
 			    0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
 			spin_unlock_irqrestore(&ha->hardware_lock, flags);
-			if (xmit_type & QLA_TGT_XMIT_DATA)
+			if (xmit_type & QLA_TGT_XMIT_DATA) {
+				cmd->cmd_flags |= BIT_8;
 				qlt_rdy_to_xfer(cmd);
+			}
 		} else {
 			ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
 			    "qla_target(%d): SRR for out data for cmd "
@@ -3772,8 +3944,10 @@
 	}
 
 	/* Transmit response in case of status and data-in cases */
-	if (resp)
+	if (resp) {
+		cmd->cmd_flags |= BIT_7;
 		qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
+	}
 
 	return;
 
@@ -3786,8 +3960,10 @@
 	if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
 		cmd->state = QLA_TGT_STATE_DATA_IN;
 		dump_stack();
-	} else
+	} else {
+		cmd->cmd_flags |= BIT_9;
 		qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+	}
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 }
 
@@ -3901,7 +4077,7 @@
 
 	tgt->imm_srr_id++;
 
-	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
+	ql_log(ql_log_warn, vha, 0xf02d, "qla_target(%d): SRR received\n",
 	    vha->vp_idx);
 
 	imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
@@ -4121,7 +4297,7 @@
  * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
  * This function sends busy to ISP 2xxx or 24xx.
  */
-static void qlt_send_busy(struct scsi_qla_host *vha,
+static int __qlt_send_busy(struct scsi_qla_host *vha,
 	struct atio_from_isp *atio, uint16_t status)
 {
 	struct ctio7_to_24xx *ctio24;
@@ -4133,16 +4309,16 @@
 	    atio->u.isp24.fcp_hdr.s_id);
 	if (!sess) {
 		qlt_send_term_exchange(vha, NULL, atio, 1);
-		return;
+		return 0;
 	}
 	/* Sending marker isn't necessary, since we called from ISR */
 
 	pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
 	if (!pkt) {
-		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
+		ql_dbg(ql_dbg_io, vha, 0x3063,
 		    "qla_target(%d): %s failed: unable to allocate "
 		    "request packet", vha->vp_idx, __func__);
-		return;
+		return -ENOMEM;
 	}
 
 	pkt->entry_count = 1;
@@ -4167,13 +4343,192 @@
 	 */
 	ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
 	ctio24->u.status1.scsi_status = cpu_to_le16(status);
-	ctio24->u.status1.residual = get_unaligned((uint32_t *)
-	    &atio->u.isp24.fcp_cmnd.add_cdb[
-	    atio->u.isp24.fcp_cmnd.add_cdb_len]);
-	if (ctio24->u.status1.residual != 0)
-		ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
-
+	/* Memory Barrier */
+	wmb();
 	qla2x00_start_iocbs(vha, vha->req);
+	return 0;
+}
+
+/*
+ * This routine is used to allocate a command for either a QFull condition
+ * (ie reply SAM_STAT_BUSY) or to terminate an exchange that did not go
+ * out previously.
+ */
+static void
+qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
+	struct atio_from_isp *atio, uint16_t status, int qfull)
+{
+	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
+	struct qla_hw_data *ha = vha->hw;
+	struct qla_tgt_sess *sess;
+	struct se_session *se_sess;
+	struct qla_tgt_cmd *cmd;
+	int tag;
+
+	if (unlikely(tgt->tgt_stop)) {
+		ql_dbg(ql_dbg_io, vha, 0x300a,
+			"New command while device %p is shutting down\n", tgt);
+		return;
+	}
+
+	if ((vha->hw->tgt.num_qfull_cmds_alloc + 1) > MAX_QFULL_CMDS_ALLOC) {
+		vha->hw->tgt.num_qfull_cmds_dropped++;
+		if (vha->hw->tgt.num_qfull_cmds_dropped >
+			vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
+			vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
+				vha->hw->tgt.num_qfull_cmds_dropped;
+
+		ql_dbg(ql_dbg_io, vha, 0x3068,
+			"qla_target(%d): %s: QFull CMD dropped[%d]\n",
+			vha->vp_idx, __func__,
+			vha->hw->tgt.num_qfull_cmds_dropped);
+
+		qlt_chk_exch_leak_thresh_hold(vha);
+		return;
+	}
+
+	sess = ha->tgt.tgt_ops->find_sess_by_s_id
+		(vha, atio->u.isp24.fcp_hdr.s_id);
+	if (!sess)
+		return;
+
+	se_sess = sess->se_sess;
+
+	tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
+	if (tag < 0)
+		return;
+
+	cmd = &((struct qla_tgt_cmd *)se_sess->sess_cmd_map)[tag];
+	if (!cmd) {
+		ql_dbg(ql_dbg_io, vha, 0x3009,
+			"qla_target(%d): %s: Allocation of cmd failed\n",
+			vha->vp_idx, __func__);
+
+		vha->hw->tgt.num_qfull_cmds_dropped++;
+		if (vha->hw->tgt.num_qfull_cmds_dropped >
+			vha->hw->qla_stats.stat_max_qfull_cmds_dropped)
+			vha->hw->qla_stats.stat_max_qfull_cmds_dropped =
+				vha->hw->tgt.num_qfull_cmds_dropped;
+
+		qlt_chk_exch_leak_thresh_hold(vha);
+		return;
+	}
+
+	memset(cmd, 0, sizeof(struct qla_tgt_cmd));
+
+	qlt_incr_num_pend_cmds(vha);
+	INIT_LIST_HEAD(&cmd->cmd_list);
+	memcpy(&cmd->atio, atio, sizeof(*atio));
+
+	cmd->tgt = vha->vha_tgt.qla_tgt;
+	cmd->vha = vha;
+	cmd->reset_count = vha->hw->chip_reset;
+	cmd->q_full = 1;
+
+	if (qfull) {
+		cmd->q_full = 1;
+		/* NOTE: borrowing the state field to carry the status */
+		cmd->state = status;
+	} else
+		cmd->term_exchg = 1;
+
+	list_add_tail(&cmd->cmd_list, &vha->hw->tgt.q_full_list);
+
+	vha->hw->tgt.num_qfull_cmds_alloc++;
+	if (vha->hw->tgt.num_qfull_cmds_alloc >
+		vha->hw->qla_stats.stat_max_qfull_cmds_alloc)
+		vha->hw->qla_stats.stat_max_qfull_cmds_alloc =
+			vha->hw->tgt.num_qfull_cmds_alloc;
+}
+
+int
+qlt_free_qfull_cmds(struct scsi_qla_host *vha)
+{
+	struct qla_hw_data *ha = vha->hw;
+	unsigned long flags;
+	struct qla_tgt_cmd *cmd, *tcmd;
+	struct list_head free_list;
+	int rc = 0;
+
+	if (list_empty(&ha->tgt.q_full_list))
+		return 0;
+
+	INIT_LIST_HEAD(&free_list);
+
+	spin_lock_irqsave(&vha->hw->hardware_lock, flags);
+
+	if (list_empty(&ha->tgt.q_full_list)) {
+		spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+		return 0;
+	}
+
+	list_for_each_entry_safe(cmd, tcmd, &ha->tgt.q_full_list, cmd_list) {
+		if (cmd->q_full)
+			/* cmd->state is a borrowed field to hold status */
+			rc = __qlt_send_busy(vha, &cmd->atio, cmd->state);
+		else if (cmd->term_exchg)
+			rc = __qlt_send_term_exchange(vha, NULL, &cmd->atio);
+
+		if (rc == -ENOMEM)
+			break;
+
+		if (cmd->q_full)
+			ql_dbg(ql_dbg_io, vha, 0x3006,
+			    "%s: busy sent for ox_id[%04x]\n", __func__,
+			    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
+		else if (cmd->term_exchg)
+			ql_dbg(ql_dbg_io, vha, 0x3007,
+			    "%s: Term exchg sent for ox_id[%04x]\n", __func__,
+			    be16_to_cpu(cmd->atio.u.isp24.fcp_hdr.ox_id));
+		else
+			ql_dbg(ql_dbg_io, vha, 0x3008,
+			    "%s: Unexpected cmd in QFull list %p\n", __func__,
+			    cmd);
+
+		list_del(&cmd->cmd_list);
+		list_add_tail(&cmd->cmd_list, &free_list);
+
+		/* piggy back on hardware_lock for protection */
+		vha->hw->tgt.num_qfull_cmds_alloc--;
+	}
+	spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
+
+	cmd = NULL;
+
+	list_for_each_entry_safe(cmd, tcmd, &free_list, cmd_list) {
+		list_del(&cmd->cmd_list);
+		/* This cmd was never sent to TCM.  There is no need
+		 * to schedule free or call free_cmd
+		 */
+		qlt_free_cmd(cmd);
+	}
+	return rc;
+}
+
+static void
+qlt_send_busy(struct scsi_qla_host *vha,
+	struct atio_from_isp *atio, uint16_t status)
+{
+	int rc = 0;
+
+	rc = __qlt_send_busy(vha, atio, status);
+	if (rc == -ENOMEM)
+		qlt_alloc_qfull_cmd(vha, atio, status, 1);
+}
+
+static int
+qlt_chk_qfull_thresh_hold(struct scsi_qla_host *vha,
+	struct atio_from_isp *atio)
+{
+	struct qla_hw_data *ha = vha->hw;
+	uint16_t status;
+
+	if (ha->tgt.num_pend_cmds < Q_FULL_THRESH_HOLD(ha))
+		return 0;
+
+	status = temp_sam_status;
+	qlt_send_busy(vha, atio, status);
+	return 1;
 }
 
 /* ha->hardware_lock supposed to be held on entry */
@@ -4186,14 +4541,10 @@
 	int rc;
 
 	if (unlikely(tgt == NULL)) {
-		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
+		ql_dbg(ql_dbg_io, vha, 0x3064,
 		    "ATIO pkt, but no tgt (ha %p)", ha);
 		return;
 	}
-	ql_dbg(ql_dbg_tgt, vha, 0xe02c,
-	    "qla_target(%d): ATIO pkt %p: type %02x count %02x",
-	    vha->vp_idx, atio, atio->u.raw.entry_type,
-	    atio->u.raw.entry_count);
 	/*
 	 * In tgt_stop mode we also should allow all requests to pass.
 	 * Otherwise, some commands can stuck.
@@ -4203,33 +4554,28 @@
 
 	switch (atio->u.raw.entry_type) {
 	case ATIO_TYPE7:
-		ql_dbg(ql_dbg_tgt, vha, 0xe02d,
-		    "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, cdb %x, add_cdb_len %x, data_length %04x, s_id %02x%02x%02x\n",
-		    vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
-		    atio->u.isp24.fcp_cmnd.rddata,
-		    atio->u.isp24.fcp_cmnd.wrdata,
-		    atio->u.isp24.fcp_cmnd.cdb[0],
-		    atio->u.isp24.fcp_cmnd.add_cdb_len,
-		    be32_to_cpu(get_unaligned((uint32_t *)
-			&atio->u.isp24.fcp_cmnd.add_cdb[
-			atio->u.isp24.fcp_cmnd.add_cdb_len])),
-		    atio->u.isp24.fcp_hdr.s_id[0],
-		    atio->u.isp24.fcp_hdr.s_id[1],
-		    atio->u.isp24.fcp_hdr.s_id[2]);
-
 		if (unlikely(atio->u.isp24.exchange_addr ==
 		    ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
-			ql_dbg(ql_dbg_tgt, vha, 0xe058,
+			ql_dbg(ql_dbg_io, vha, 0x3065,
 			    "qla_target(%d): ATIO_TYPE7 "
 			    "received with UNKNOWN exchange address, "
 			    "sending QUEUE_FULL\n", vha->vp_idx);
 			qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
 			break;
 		}
-		if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
+
+
+
+		if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0)) {
+			rc = qlt_chk_qfull_thresh_hold(vha, atio);
+			if (rc != 0) {
+				tgt->irq_cmd_count--;
+				return;
+			}
 			rc = qlt_handle_cmd_for_atio(vha, atio);
-		else
+		} else {
 			rc = qlt_handle_task_mgmt(vha, atio);
+		}
 		if (unlikely(rc != 0)) {
 			if (rc == -ESRCH) {
 #if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
@@ -4293,11 +4639,6 @@
 		return;
 	}
 
-	ql_dbg(ql_dbg_tgt, vha, 0xe02f,
-	    "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
-	    "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
-	    pkt->entry_count, pkt->entry_status, pkt->handle);
-
 	/*
 	 * In tgt_stop mode we also should allow all requests to pass.
 	 * Otherwise, some commands can stuck.
@@ -4310,9 +4651,6 @@
 	case CTIO_TYPE7:
 	{
 		struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
-		ql_dbg(ql_dbg_tgt, vha, 0xe030,
-			"CTIO[0x%x] 12/CTIO7 7A/CRC2: instance %d\n",
-			entry->entry_type, vha->vp_idx);
 		qlt_do_ctio_completion(vha, entry->handle,
 		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
 		    entry);
@@ -4323,15 +4661,6 @@
 	{
 		struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
 		int rc;
-		ql_dbg(ql_dbg_tgt, vha, 0xe031,
-		    "ACCEPT_TGT_IO instance %d status %04x "
-		    "lun %04x read/write %d data_length %04x "
-		    "target_id %02x rx_id %04x\n ", vha->vp_idx,
-		    le16_to_cpu(atio->u.isp2x.status),
-		    le16_to_cpu(atio->u.isp2x.lun),
-		    atio->u.isp2x.execution_codes,
-		    le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
-		    atio), atio->u.isp2x.rx_id);
 		if (atio->u.isp2x.status !=
 		    __constant_cpu_to_le16(ATIO_CDB_VALID)) {
 			ql_dbg(ql_dbg_tgt, vha, 0xe05e,
@@ -4340,10 +4669,12 @@
 			    le16_to_cpu(atio->u.isp2x.status));
 			break;
 		}
-		ql_dbg(ql_dbg_tgt, vha, 0xe032,
-		    "FCP CDB: 0x%02x, sizeof(cdb): %lu",
-		    atio->u.isp2x.cdb[0], (unsigned long
-		    int)sizeof(atio->u.isp2x.cdb));
+
+		rc = qlt_chk_qfull_thresh_hold(vha, atio);
+		if (rc != 0) {
+			tgt->irq_cmd_count--;
+			return;
+		}
 
 		rc = qlt_handle_cmd_for_atio(vha, atio);
 		if (unlikely(rc != 0)) {
@@ -4376,8 +4707,6 @@
 	case CONTINUE_TGT_IO_TYPE:
 	{
 		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
-		ql_dbg(ql_dbg_tgt, vha, 0xe033,
-		    "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
 		qlt_do_ctio_completion(vha, entry->handle,
 		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
 		    entry);
@@ -4387,8 +4716,6 @@
 	case CTIO_A64_TYPE:
 	{
 		struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
-		ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
-		    vha->vp_idx);
 		qlt_do_ctio_completion(vha, entry->handle,
 		    le16_to_cpu(entry->status)|(pkt->entry_status << 16),
 		    entry);
@@ -4492,11 +4819,6 @@
 	struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
 	int login_code;
 
-	ql_dbg(ql_dbg_tgt, vha, 0xe039,
-	    "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
-	    vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
-	    ha->operating_mode, ha->current_topology);
-
 	if (!ha->tgt.tgt_ops)
 		return;
 
@@ -4573,11 +4895,6 @@
 		break;
 
 	default:
-		ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
-		    "qla_target(%d): Async event %#x occurred: "
-		    "ignore (m[0]=%x, m[1]=%x, m[2]=%x, m[3]=%x)", vha->vp_idx,
-		    code, le16_to_cpu(mailbox[0]), le16_to_cpu(mailbox[1]),
-		    le16_to_cpu(mailbox[2]), le16_to_cpu(mailbox[3]));
 		break;
 	}
 
@@ -4598,8 +4915,6 @@
 		return NULL;
 	}
 
-	ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
-
 	fcport->loop_id = loop_id;
 
 	rc = qla2x00_get_port_database(vha, fcport, 0);
@@ -4898,6 +5213,10 @@
 		qlt_release(vha->vha_tgt.qla_tgt);
 		return 0;
 	}
+
+	/* free left over qfull cmds */
+	qlt_init_term_exchange(vha);
+
 	mutex_lock(&qla_tgt_mutex);
 	list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
 	mutex_unlock(&qla_tgt_mutex);
@@ -5295,8 +5614,13 @@
 		nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
 		/* Enable initial LIP */
 		nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
-		/* Enable FC tapes support */
-		nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+		if (ql2xtgt_tape_enable)
+			/* Enable FC Tape support */
+			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
+		else
+			/* Disable FC Tape support */
+			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
+
 		/* Disable Full Login after LIP */
 		nv->host_p &= __constant_cpu_to_le32(~BIT_10);
 		/* Enable target PRLI control */
@@ -5378,8 +5702,13 @@
 		nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
 		/* Enable initial LIP */
 		nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
-		/* Enable FC tapes support */
-		nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
+		if (ql2xtgt_tape_enable)
+			/* Enable FC tape support */
+			nv->firmware_options_2 |= cpu_to_le32(BIT_12);
+		else
+			/* Disable FC tape support */
+			nv->firmware_options_2 &= cpu_to_le32(~BIT_12);
+
 		/* Disable Full Login after LIP */
 		nv->host_p &= __constant_cpu_to_le32(~BIT_10);
 		/* Enable target PRLI control */
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index d1d24fb..8ff330f 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -915,6 +915,10 @@
 	unsigned int aborted:1; /* Needed in case of SRR */
 	unsigned int write_data_transferred:1;
 	unsigned int ctx_dsd_alloced:1;
+	unsigned int q_full:1;
+	unsigned int term_exchg:1;
+	unsigned int cmd_sent_to_fw:1;
+	unsigned int cmd_in_wq:1;
 
 	struct scatterlist *sg;	/* cmd data buffer SG vector */
 	int sg_cnt;		/* SG segments count */
@@ -923,10 +927,12 @@
 	uint32_t tag;
 	uint32_t unpacked_lun;
 	enum dma_data_direction dma_data_direction;
+	uint32_t reset_count;
 
 	uint16_t loop_id;	/* to save extra sess dereferences */
 	struct qla_tgt *tgt;	/* to save extra sess dereferences */
 	struct scsi_qla_host *vha;
+	struct list_head cmd_list;
 
 	struct atio_from_isp atio;
 	/* t10dif */
@@ -935,6 +941,29 @@
 	uint32_t blk_sz;
 	struct crc_context *ctx;
 
+	uint64_t jiffies_at_alloc;
+	uint64_t jiffies_at_free;
+	/* BIT_0 - Atio Arrival / schedule to work
+	 * BIT_1 - qlt_do_work
+	 * BIT_2 - qlt_do work failed
+	 * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
+	 * BIT_4 - read respond/tcm_qla2xx_queue_data_in
+	 * BIT_5 - status respond / tcm_qla2xx_queue_status
+	 * BIT_6 - tcm request to abort/Term exchange.
+	 *	pre_xmit_response->qlt_send_term_exchange
+	 * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
+	 * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
+	 * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
+	 * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
+	 * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work
+	 * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
+	 * BIT_13 - Bad completion -
+	 *	qlt_ctio_do_completion --> qlt_term_ctio_exchange
+	 * BIT_14 - Back end data received/sent.
+	 * BIT_15 - SRR prepare ctio
+	 * BIT_16 - complete free
+	 */
+	uint32_t cmd_flags;
 };
 
 struct qla_tgt_sess_work_param {
@@ -958,6 +987,7 @@
 	struct se_cmd se_cmd;
 	struct work_struct free_work;
 	unsigned int flags;
+	uint32_t reset_count;
 #define QLA24XX_MGMT_SEND_NACK	1
 	union {
 		struct atio_from_isp atio;
@@ -1089,5 +1119,6 @@
 extern void qlt_stop_phase2(struct qla_tgt *);
 extern irqreturn_t qla83xx_msix_atio_q(int, void *);
 extern void qlt_83xx_iospace_config(struct qla_hw_data *);
+extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
 
 #endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index cb9a0c4..a8c0c73 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -128,18 +128,10 @@
 static inline void
 qla27xx_insertbuf(void *mem, ulong size, void *buf, ulong *len)
 {
-	ulong cnt = size;
 
-	if (buf && mem) {
+	if (buf && mem && size) {
 		buf += *len;
-		while (cnt >= sizeof(uint32_t)) {
-			*(__le32 *)buf = cpu_to_le32p(mem);
-			buf += sizeof(uint32_t);
-			mem += sizeof(uint32_t);
-			cnt -= sizeof(uint32_t);
-		}
-		if (cnt)
-			memcpy(buf, mem, cnt);
+		memcpy(buf, mem, size);
 	}
 	*len += size;
 }
@@ -151,8 +143,6 @@
 
 	if (buf) {
 		value = RD_REG_BYTE((__iomem void *)window);
-		ql_dbg(ql_dbg_misc, NULL, 0xd011,
-		    "%s: -> %x\n", __func__, value);
 	}
 	qla27xx_insert32(value, buf, len);
 }
@@ -164,8 +154,6 @@
 
 	if (buf) {
 		value = RD_REG_WORD((__iomem void *)window);
-		ql_dbg(ql_dbg_misc, NULL, 0xd012,
-		    "%s: -> %x\n", __func__, value);
 	}
 	qla27xx_insert32(value, buf, len);
 }
@@ -177,8 +165,6 @@
 
 	if (buf) {
 		value = RD_REG_DWORD((__iomem void *)window);
-		ql_dbg(ql_dbg_misc, NULL, 0xd013,
-		    "%s: -> %x\n", __func__, value);
 	}
 	qla27xx_insert32(value, buf, len);
 }
@@ -197,10 +183,6 @@
 {
 	void *window = (void *)reg + offset;
 
-	if (buf) {
-		ql_dbg(ql_dbg_misc, NULL, 0xd014,
-		    "%s: @%x\n", __func__, offset);
-	}
 	qla27xx_read32(window, buf, len);
 }
 
@@ -211,8 +193,6 @@
 	__iomem void *window = reg + offset;
 
 	if (buf) {
-		ql_dbg(ql_dbg_misc, NULL, 0xd015,
-		    "%s: @%x <- %x\n", __func__, offset, data);
 		WRT_REG_DWORD(window, data);
 	}
 }
@@ -225,11 +205,6 @@
 	void *window = (void *)reg + offset;
 	void (*readn)(void *, void *, ulong *) = qla27xx_read_vector(width);
 
-	if (buf) {
-		ql_dbg(ql_dbg_misc, NULL, 0xd016,
-		    "%s: base=%x offset=%x count=%x width=%x\n",
-		    __func__, addr, offset, count, width);
-	}
 	qla27xx_write_reg(reg, IOBASE_ADDR, addr, buf);
 	while (count--) {
 		qla27xx_insert32(addr, buf, len);
@@ -380,14 +355,9 @@
 			ent->t262.start_addr = start;
 			ent->t262.end_addr = end;
 		}
-	} else if (ent->t262.ram_area == T262_RAM_AREA_DDR_RAM) {
-		ql_dbg(ql_dbg_misc, vha, 0xd021,
-		    "%s: unsupported ddr ram\n", __func__);
-		qla27xx_skip_entry(ent, buf);
-		goto done;
 	} else {
 		ql_dbg(ql_dbg_misc, vha, 0xd022,
-		    "%s: unknown area %u\n", __func__, ent->t262.ram_area);
+		    "%s: unknown area %x\n", __func__, ent->t262.ram_area);
 		qla27xx_skip_entry(ent, buf);
 		goto done;
 	}
@@ -402,8 +372,6 @@
 
 	dwords = end - start + 1;
 	if (buf) {
-		ql_dbg(ql_dbg_misc, vha, 0xd024,
-		    "%s: @%lx -> (%lx dwords)\n", __func__, start, dwords);
 		buf += *len;
 		qla24xx_dump_ram(vha->hw, start, buf, dwords, &buf);
 	}
@@ -448,13 +416,9 @@
 				count++;
 			}
 		}
-	} else if (ent->t263.queue_type == T263_QUEUE_TYPE_ATIO) {
-		ql_dbg(ql_dbg_misc, vha, 0xd025,
-		    "%s: unsupported atio queue\n", __func__);
-		qla27xx_skip_entry(ent, buf);
 	} else {
 		ql_dbg(ql_dbg_misc, vha, 0xd026,
-		    "%s: unknown queue %u\n", __func__, ent->t263.queue_type);
+		    "%s: unknown queue %x\n", __func__, ent->t263.queue_type);
 		qla27xx_skip_entry(ent, buf);
 	}
 
@@ -549,17 +513,9 @@
 			    "%s: missing eft\n", __func__);
 			qla27xx_skip_entry(ent, buf);
 		}
-	} else if (ent->t268.buf_type == T268_BUF_TYPE_EXCH_BUFOFF) {
-		ql_dbg(ql_dbg_misc, vha, 0xd029,
-		    "%s: unsupported exchange offload buffer\n", __func__);
-		qla27xx_skip_entry(ent, buf);
-	} else if (ent->t268.buf_type == T268_BUF_TYPE_EXTD_LOGIN) {
-		ql_dbg(ql_dbg_misc, vha, 0xd02a,
-		    "%s: unsupported extended login buffer\n", __func__);
-		qla27xx_skip_entry(ent, buf);
 	} else {
 		ql_dbg(ql_dbg_misc, vha, 0xd02b,
-		    "%s: unknown buf %x\n", __func__, ent->t268.buf_type);
+		    "%s: unknown buffer %x\n", __func__, ent->t268.buf_type);
 		qla27xx_skip_entry(ent, buf);
 	}
 
@@ -695,13 +651,9 @@
 				count++;
 			}
 		}
-	} else if (ent->t274.queue_type == T274_QUEUE_TYPE_ATIO_SHAD) {
-		ql_dbg(ql_dbg_misc, vha, 0xd02e,
-		    "%s: unsupported atio queue\n", __func__);
-		qla27xx_skip_entry(ent, buf);
 	} else {
 		ql_dbg(ql_dbg_misc, vha, 0xd02f,
-		    "%s: unknown queue %u\n", __func__, ent->t274.queue_type);
+		    "%s: unknown queue %x\n", __func__, ent->t274.queue_type);
 		qla27xx_skip_entry(ent, buf);
 	}
 
@@ -715,6 +667,32 @@
 }
 
 static int
+qla27xx_fwdt_entry_t275(struct scsi_qla_host *vha,
+	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
+{
+	ulong offset = offsetof(typeof(*ent), t275.buffer);
+
+	ql_dbg(ql_dbg_misc, vha, 0xd213,
+	    "%s: buffer(%x) [%lx]\n", __func__, ent->t275.length, *len);
+	if (!ent->t275.length) {
+		ql_dbg(ql_dbg_misc, vha, 0xd020,
+		    "%s: buffer zero length\n", __func__);
+		qla27xx_skip_entry(ent, buf);
+		goto done;
+	}
+	if (offset + ent->t275.length > ent->hdr.entry_size) {
+		ql_dbg(ql_dbg_misc, vha, 0xd030,
+		    "%s: buffer overflow\n", __func__);
+		qla27xx_skip_entry(ent, buf);
+		goto done;
+	}
+
+	qla27xx_insertbuf(ent->t275.buffer, ent->t275.length, buf, len);
+done:
+	return false;
+}
+
+static int
 qla27xx_fwdt_entry_other(struct scsi_qla_host *vha,
 	struct qla27xx_fwdt_entry *ent, void *buf, ulong *len)
 {
@@ -726,7 +704,7 @@
 }
 
 struct qla27xx_fwdt_entry_call {
-	int type;
+	uint type;
 	int (*call)(
 	    struct scsi_qla_host *,
 	    struct qla27xx_fwdt_entry *,
@@ -756,18 +734,21 @@
 	{ ENTRY_TYPE_RDREMRAM		, qla27xx_fwdt_entry_t272  } ,
 	{ ENTRY_TYPE_PCICFG		, qla27xx_fwdt_entry_t273  } ,
 	{ ENTRY_TYPE_GET_SHADOW		, qla27xx_fwdt_entry_t274  } ,
+	{ ENTRY_TYPE_WRITE_BUF		, qla27xx_fwdt_entry_t275  } ,
 	{ -1				, qla27xx_fwdt_entry_other }
 };
 
-static inline int (*qla27xx_find_entry(int type))
+static inline int (*qla27xx_find_entry(uint type))
 	(struct scsi_qla_host *, struct qla27xx_fwdt_entry *, void *, ulong *)
 {
 	struct qla27xx_fwdt_entry_call *list = ql27xx_fwdt_entry_call_list;
 
-	while (list->type != -1 && list->type != type)
+	while (list->type < type)
 		list++;
 
-	return list->call;
+	if (list->type == type)
+		return list->call;
+	return qla27xx_fwdt_entry_other;
 }
 
 static inline void *
@@ -792,6 +773,15 @@
 			break;
 		ent = qla27xx_next_entry(ent);
 	}
+
+	if (count)
+		ql_dbg(ql_dbg_misc, vha, 0xd018,
+		    "%s: residual count (%lx)\n", __func__, count);
+
+	if (ent->hdr.entry_type != ENTRY_TYPE_TMP_END)
+		ql_dbg(ql_dbg_misc, vha, 0xd019,
+		    "%s: missing end (%lx)\n", __func__, count);
+
 	ql_dbg(ql_dbg_misc, vha, 0xd01b,
 	    "%s: len=%lx\n", __func__, *len);
 }
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.h b/drivers/scsi/qla2xxx/qla_tmpl.h
index 1967424..141c1c5 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.h
+++ b/drivers/scsi/qla2xxx/qla_tmpl.h
@@ -53,6 +53,7 @@
 #define ENTRY_TYPE_RDREMRAM		272
 #define ENTRY_TYPE_PCICFG		273
 #define ENTRY_TYPE_GET_SHADOW		274
+#define ENTRY_TYPE_WRITE_BUF		275
 
 #define CAPTURE_FLAG_PHYS_ONLY		BIT_0
 #define CAPTURE_FLAG_PHYS_VIRT		BIT_1
@@ -193,6 +194,11 @@
 			uint8_t  queue_type;
 			uint8_t  reserved[3];
 		} t274;
+
+		struct __packed {
+			uint32_t length;
+			uint8_t  buffer[];
+		} t275;
 	};
 };
 
@@ -208,6 +214,8 @@
 #define T268_BUF_TYPE_EXTD_TRACE	1
 #define T268_BUF_TYPE_EXCH_BUFOFF	2
 #define T268_BUF_TYPE_EXTD_LOGIN	3
+#define T268_BUF_TYPE_REQ_MIRROR	4
+#define T268_BUF_TYPE_RSP_MIRROR	5
 
 #define T274_QUEUE_TYPE_REQ_SHAD	1
 #define T274_QUEUE_TYPE_RSP_SHAD	2
diff --git a/drivers/scsi/qla2xxx/qla_version.h b/drivers/scsi/qla2xxx/qla_version.h
index 4d2c98c..d88b862 100644
--- a/drivers/scsi/qla2xxx/qla_version.h
+++ b/drivers/scsi/qla2xxx/qla_version.h
@@ -7,7 +7,7 @@
 /*
  * Driver version
  */
-#define QLA2XXX_VERSION      "8.07.00.08-k"
+#define QLA2XXX_VERSION      "8.07.00.16-k"
 
 #define QLA_DRIVER_MAJOR_VER	8
 #define QLA_DRIVER_MINOR_VER	7
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index e2beab9..031b296 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -50,8 +50,12 @@
 #include "qla_target.h"
 #include "tcm_qla2xxx.h"
 
-struct workqueue_struct *tcm_qla2xxx_free_wq;
-struct workqueue_struct *tcm_qla2xxx_cmd_wq;
+static struct workqueue_struct *tcm_qla2xxx_free_wq;
+static struct workqueue_struct *tcm_qla2xxx_cmd_wq;
+
+/* Local pointer to allocated TCM configfs fabric module */
+static struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
+static struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
 
 /*
  * Parse WWN.
@@ -386,6 +390,11 @@
 {
 	struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
 
+	cmd->cmd_in_wq = 0;
+
+	WARN_ON(cmd->cmd_flags &  BIT_16);
+
+	cmd->cmd_flags |= BIT_16;
 	transport_generic_free_cmd(&cmd->se_cmd, 0);
 }
 
@@ -396,6 +405,7 @@
  */
 static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
 {
+	cmd->cmd_in_wq = 1;
 	INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
 	queue_work(tcm_qla2xxx_free_wq, &cmd->work);
 }
@@ -405,6 +415,13 @@
  */
 static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
 {
+	struct qla_tgt_cmd *cmd;
+
+	if ((se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) == 0) {
+		cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
+		cmd->cmd_flags |= BIT_14;
+	}
+
 	return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
 }
 
@@ -511,8 +528,13 @@
 
 static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
 {
-	struct qla_tgt_cmd *cmd = container_of(se_cmd,
-				struct qla_tgt_cmd, se_cmd);
+	struct qla_tgt_cmd *cmd;
+
+	/* check for task mgmt cmd */
+	if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+		return 0xffffffff;
+
+	cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
 
 	return cmd->tag;
 }
@@ -562,6 +584,8 @@
 	 * Ensure that the complete FCP WRITE payload has been received.
 	 * Otherwise return an exception via CHECK_CONDITION status.
 	 */
+	cmd->cmd_in_wq = 0;
+	cmd->cmd_flags |= BIT_11;
 	if (!cmd->write_data_transferred) {
 		/*
 		 * Check if se_cmd has already been aborted via LUN_RESET, and
@@ -590,6 +614,8 @@
  */
 static void tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
 {
+	cmd->cmd_flags |= BIT_10;
+	cmd->cmd_in_wq = 1;
 	INIT_WORK(&cmd->work, tcm_qla2xxx_handle_data_work);
 	queue_work(tcm_qla2xxx_free_wq, &cmd->work);
 }
@@ -633,6 +659,7 @@
 	struct qla_tgt_cmd *cmd = container_of(se_cmd,
 				struct qla_tgt_cmd, se_cmd);
 
+	cmd->cmd_flags |= BIT_4;
 	cmd->bufflen = se_cmd->data_length;
 	cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
 	cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
@@ -640,6 +667,7 @@
 	cmd->sg_cnt = se_cmd->t_data_nents;
 	cmd->sg = se_cmd->t_data_sg;
 	cmd->offset = 0;
+	cmd->cmd_flags |= BIT_3;
 
 	cmd->prot_sg_cnt = se_cmd->t_prot_nents;
 	cmd->prot_sg = se_cmd->t_prot_sg;
@@ -665,6 +693,11 @@
 	cmd->offset = 0;
 	cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
 	cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
+	if (cmd->cmd_flags &  BIT_5) {
+		pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
+		dump_stack();
+	}
+	cmd->cmd_flags |= BIT_5;
 
 	if (se_cmd->data_direction == DMA_FROM_DEVICE) {
 		/*
@@ -734,10 +767,6 @@
 	cmd->sg_mapped = 0;
 }
 
-/* Local pointer to allocated TCM configfs fabric module */
-struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
-struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
-
 static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
 			struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
 /*
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index d81f3cc..79c77b4 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -670,14 +670,10 @@
 		return SCSI_MLQUEUE_DEVICE_BUSY;
 	}
 
-	/*
-	 * If SCSI-2 or lower, store the LUN value in cmnd.
-	 */
-	if (cmd->device->scsi_level <= SCSI_2 &&
-	    cmd->device->scsi_level != SCSI_UNKNOWN) {
+	/* Store the LUN value in cmnd, if needed. */
+	if (cmd->device->lun_in_cdb)
 		cmd->cmnd[1] = (cmd->cmnd[1] & 0x1f) |
 			       (cmd->device->lun << 5 & 0xe0);
-	}
 
 	scsi_log_send(cmd);
 
@@ -1371,7 +1367,11 @@
 module_param(scsi_logging_level, int, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(scsi_logging_level, "a bit mask of logging levels");
 
+#ifdef CONFIG_SCSI_MQ_DEFAULT
+bool scsi_use_blk_mq = true;
+#else
 bool scsi_use_blk_mq = false;
+#endif
 module_param_named(use_blk_mq, scsi_use_blk_mq, bool, S_IWUSR | S_IRUGO);
 
 static int __init init_scsi(void)
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index d19c0e3..2b6d447 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -123,7 +123,7 @@
 #define DEF_PHYSBLK_EXP 0
 #define DEF_PTYPE   0
 #define DEF_REMOVABLE false
-#define DEF_SCSI_LEVEL   5    /* INQUIRY, byte2 [5->SPC-3] */
+#define DEF_SCSI_LEVEL   6    /* INQUIRY, byte2 [6->SPC-4] */
 #define DEF_SECTOR_SIZE 512
 #define DEF_TAGGED_QUEUING 0 /* 0 | MSG_SIMPLE_TAG | MSG_ORDERED_TAG */
 #define DEF_UNMAP_ALIGNMENT 0
@@ -929,7 +929,7 @@
 {
 	unsigned char pq_pdt;
 	unsigned char * arr;
-	unsigned char *cmd = (unsigned char *)scp->cmnd;
+	unsigned char *cmd = scp->cmnd;
 	int alloc_len, n, ret;
 
 	alloc_len = (cmd[3] << 8) + cmd[4];
@@ -1056,15 +1056,15 @@
 	memcpy(&arr[16], inq_product_id, 16);
 	memcpy(&arr[32], inq_product_rev, 4);
 	/* version descriptors (2 bytes each) follow */
-	arr[58] = 0x0; arr[59] = 0x77; /* SAM-3 ANSI */
-	arr[60] = 0x3; arr[61] = 0x14;  /* SPC-3 ANSI */
+	arr[58] = 0x0; arr[59] = 0xa2;  /* SAM-5 rev 4 */
+	arr[60] = 0x4; arr[61] = 0x68;  /* SPC-4 rev 37 */
 	n = 62;
 	if (scsi_debug_ptype == 0) {
-		arr[n++] = 0x3; arr[n++] = 0x3d; /* SBC-2 ANSI */
+		arr[n++] = 0x4; arr[n++] = 0xc5; /* SBC-4 rev 36 */
 	} else if (scsi_debug_ptype == 1) {
-		arr[n++] = 0x3; arr[n++] = 0x60; /* SSC-2 no version */
+		arr[n++] = 0x5; arr[n++] = 0x25; /* SSC-4 rev 3 */
 	}
-	arr[n++] = 0xc; arr[n++] = 0xf;  /* SAS-1.1 rev 10 */
+	arr[n++] = 0x20; arr[n++] = 0xe6;  /* SPL-3 rev 7 */
 	ret = fill_from_dev_buffer(scp, arr,
 			    min(alloc_len, SDEBUG_LONG_INQ_SZ));
 	kfree(arr);
@@ -1075,7 +1075,7 @@
 			 struct sdebug_dev_info * devip)
 {
 	unsigned char * sbuff;
-	unsigned char *cmd = (unsigned char *)scp->cmnd;
+	unsigned char *cmd = scp->cmnd;
 	unsigned char arr[SCSI_SENSE_BUFFERSIZE];
 	int want_dsense;
 	int len = 18;
@@ -1115,7 +1115,7 @@
 static int resp_start_stop(struct scsi_cmnd * scp,
 			   struct sdebug_dev_info * devip)
 {
-	unsigned char *cmd = (unsigned char *)scp->cmnd;
+	unsigned char *cmd = scp->cmnd;
 	int power_cond, errsts, start;
 
 	errsts = check_readiness(scp, UAS_ONLY, devip);
@@ -1177,7 +1177,7 @@
 static int resp_readcap16(struct scsi_cmnd * scp,
 			  struct sdebug_dev_info * devip)
 {
-	unsigned char *cmd = (unsigned char *)scp->cmnd;
+	unsigned char *cmd = scp->cmnd;
 	unsigned char arr[SDEBUG_READCAP16_ARR_SZ];
 	unsigned long long capac;
 	int errsts, k, alloc_len;
@@ -1222,7 +1222,7 @@
 static int resp_report_tgtpgs(struct scsi_cmnd * scp,
 			      struct sdebug_dev_info * devip)
 {
-	unsigned char *cmd = (unsigned char *)scp->cmnd;
+	unsigned char *cmd = scp->cmnd;
 	unsigned char * arr;
 	int host_no = devip->sdbg_host->shost->host_no;
 	int n, ret, alen, rlen;
@@ -1468,7 +1468,7 @@
 	int k, alloc_len, msense_6, offset, len, errsts, target_dev_id;
 	unsigned char * ap;
 	unsigned char arr[SDEBUG_MAX_MSENSE_SZ];
-	unsigned char *cmd = (unsigned char *)scp->cmnd;
+	unsigned char *cmd = scp->cmnd;
 
 	errsts = check_readiness(scp, UAS_ONLY, devip);
 	if (errsts)
@@ -1630,7 +1630,7 @@
 	int pf, sp, ps, md_len, bd_len, off, spf, pg_len;
 	int param_len, res, errsts, mpage;
 	unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
-	unsigned char *cmd = (unsigned char *)scp->cmnd;
+	unsigned char *cmd = scp->cmnd;
 
 	errsts = check_readiness(scp, UAS_ONLY, devip);
 	if (errsts)
@@ -1739,7 +1739,7 @@
 {
 	int ppc, sp, pcontrol, pcode, subpcode, alloc_len, errsts, len, n;
 	unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
-	unsigned char *cmd = (unsigned char *)scp->cmnd;
+	unsigned char *cmd = scp->cmnd;
 
 	errsts = check_readiness(scp, UAS_ONLY, devip);
 	if (errsts)
@@ -2414,7 +2414,7 @@
 	unsigned int alloc_len;
 	int lun_cnt, i, upper, num, n;
 	u64 wlun, lun;
-	unsigned char *cmd = (unsigned char *)scp->cmnd;
+	unsigned char *cmd = scp->cmnd;
 	int select_report = (int)cmd[2];
 	struct scsi_lun *one_lun;
 	unsigned char arr[SDEBUG_RLUN_ARR_SZ];
@@ -2743,6 +2743,13 @@
 		if (test_bit(k, queued_in_use_bm)) {
 			sqcp = &queued_arr[k];
 			if (cmnd == sqcp->a_cmnd) {
+				devip = (struct sdebug_dev_info *)
+					cmnd->device->hostdata;
+				if (devip)
+					atomic_dec(&devip->num_in_q);
+				sqcp->a_cmnd = NULL;
+				spin_unlock_irqrestore(&queued_arr_lock,
+						       iflags);
 				if (scsi_debug_ndelay > 0) {
 					if (sqcp->sd_hrtp)
 						hrtimer_cancel(
@@ -2755,18 +2762,13 @@
 					if (sqcp->tletp)
 						tasklet_kill(sqcp->tletp);
 				}
-				__clear_bit(k, queued_in_use_bm);
-				devip = (struct sdebug_dev_info *)
-					cmnd->device->hostdata;
-				if (devip)
-					atomic_dec(&devip->num_in_q);
-				sqcp->a_cmnd = NULL;
-				break;
+				clear_bit(k, queued_in_use_bm);
+				return 1;
 			}
 		}
 	}
 	spin_unlock_irqrestore(&queued_arr_lock, iflags);
-	return (k < qmax) ? 1 : 0;
+	return 0;
 }
 
 /* Deletes (stops) timers or tasklets of all queued commands */
@@ -2782,6 +2784,13 @@
 		if (test_bit(k, queued_in_use_bm)) {
 			sqcp = &queued_arr[k];
 			if (sqcp->a_cmnd) {
+				devip = (struct sdebug_dev_info *)
+					sqcp->a_cmnd->device->hostdata;
+				if (devip)
+					atomic_dec(&devip->num_in_q);
+				sqcp->a_cmnd = NULL;
+				spin_unlock_irqrestore(&queued_arr_lock,
+						       iflags);
 				if (scsi_debug_ndelay > 0) {
 					if (sqcp->sd_hrtp)
 						hrtimer_cancel(
@@ -2794,12 +2803,8 @@
 					if (sqcp->tletp)
 						tasklet_kill(sqcp->tletp);
 				}
-				__clear_bit(k, queued_in_use_bm);
-				devip = (struct sdebug_dev_info *)
-					sqcp->a_cmnd->device->hostdata;
-				if (devip)
-					atomic_dec(&devip->num_in_q);
-				sqcp->a_cmnd = NULL;
+				clear_bit(k, queued_in_use_bm);
+				spin_lock_irqsave(&queued_arr_lock, iflags);
 			}
 		}
 	}
@@ -3006,7 +3011,7 @@
 	      int scsi_result, int delta_jiff)
 {
 	unsigned long iflags;
-	int k, num_in_q, tsf, qdepth, inject;
+	int k, num_in_q, qdepth, inject;
 	struct sdebug_queued_cmd *sqcp = NULL;
 	struct scsi_device *sdp = cmnd->device;
 
@@ -3019,55 +3024,48 @@
 	if ((scsi_result) && (SCSI_DEBUG_OPT_NOISE & scsi_debug_opts))
 		sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
 			    __func__, scsi_result);
-	if (delta_jiff == 0) {
-		/* using same thread to call back mid-layer */
-		cmnd->result = scsi_result;
-		cmnd->scsi_done(cmnd);
-		return 0;
-	}
+	if (delta_jiff == 0)
+		goto respond_in_thread;
 
-	/* deferred response cases */
+	/* schedule the response at a later time if resources permit */
 	spin_lock_irqsave(&queued_arr_lock, iflags);
 	num_in_q = atomic_read(&devip->num_in_q);
 	qdepth = cmnd->device->queue_depth;
-	k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
-	tsf = 0;
 	inject = 0;
-	if ((qdepth > 0) && (num_in_q >= qdepth))
-		tsf = 1;
-	else if ((scsi_debug_every_nth != 0) &&
-		 (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts)) {
+	if ((qdepth > 0) && (num_in_q >= qdepth)) {
+		if (scsi_result) {
+			spin_unlock_irqrestore(&queued_arr_lock, iflags);
+			goto respond_in_thread;
+		} else
+			scsi_result = device_qfull_result;
+	} else if ((scsi_debug_every_nth != 0) &&
+		   (SCSI_DEBUG_OPT_RARE_TSF & scsi_debug_opts) &&
+		   (scsi_result == 0)) {
 		if ((num_in_q == (qdepth - 1)) &&
 		    (atomic_inc_return(&sdebug_a_tsf) >=
 		     abs(scsi_debug_every_nth))) {
 			atomic_set(&sdebug_a_tsf, 0);
 			inject = 1;
-			tsf = 1;
+			scsi_result = device_qfull_result;
 		}
 	}
 
-	/* if (tsf) simulate device reporting SCSI status of TASK SET FULL.
-	 * Might override existing CHECK CONDITION. */
-	if (tsf)
-		scsi_result = device_qfull_result;
+	k = find_first_zero_bit(queued_in_use_bm, scsi_debug_max_queue);
 	if (k >= scsi_debug_max_queue) {
-		if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
-			tsf = 1;
 		spin_unlock_irqrestore(&queued_arr_lock, iflags);
+		if (scsi_result)
+			goto respond_in_thread;
+		else if (SCSI_DEBUG_OPT_ALL_TSF & scsi_debug_opts)
+			scsi_result = device_qfull_result;
 		if (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts)
 			sdev_printk(KERN_INFO, sdp,
-				    "%s: num_in_q=%d, bypass q, %s%s\n",
-				    __func__, num_in_q,
-				    (inject ? "<inject> " : ""),
-				    (tsf ?  "status: TASK SET FULL" :
-					    "report: host busy"));
-		if (tsf) {
-			/* queued_arr full so respond in same thread */
-			cmnd->result = scsi_result;
-			cmnd->scsi_done(cmnd);
-			/* As scsi_done() is called "inline" must return 0 */
-			return 0;
-		} else
+				    "%s: max_queue=%d exceeded, %s\n",
+				    __func__, scsi_debug_max_queue,
+				    (scsi_result ?  "status: TASK SET FULL" :
+						    "report: host busy"));
+		if (scsi_result)
+			goto respond_in_thread;
+		else
 			return SCSI_MLQUEUE_HOST_BUSY;
 	}
 	__set_bit(k, queued_in_use_bm);
@@ -3117,12 +3115,18 @@
 		else
 			tasklet_schedule(sqcp->tletp);
 	}
-	if (tsf && (SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts))
+	if ((SCSI_DEBUG_OPT_Q_NOISE & scsi_debug_opts) &&
+	    (scsi_result == device_qfull_result))
 		sdev_printk(KERN_INFO, sdp,
 			    "%s: num_in_q=%d +1, %s%s\n", __func__,
 			    num_in_q, (inject ? "<inject> " : ""),
 			    "status: TASK SET FULL");
 	return 0;
+
+respond_in_thread:	/* call back to mid-layer using invocation thread */
+	cmnd->result = scsi_result;
+	cmnd->scsi_done(cmnd);
+	return 0;
 }
 
 /* Note: The following macros create attribute files in the
@@ -3206,7 +3210,7 @@
 MODULE_PARM_DESC(physblk_exp, "physical block exponent (def=0)");
 MODULE_PARM_DESC(ptype, "SCSI peripheral type(def=0[disk])");
 MODULE_PARM_DESC(removable, "claim to have removable media (def=0)");
-MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=5[SPC-3])");
+MODULE_PARM_DESC(scsi_level, "SCSI level to simulate(def=6[SPC-4])");
 MODULE_PARM_DESC(sector_size, "logical block size in bytes (def=512)");
 MODULE_PARM_DESC(unmap_alignment, "lowest aligned thin provisioning lba (def=0)");
 MODULE_PARM_DESC(unmap_granularity, "thin provisioning granularity in blocks (def=1)");
@@ -4085,7 +4089,7 @@
 static int
 scsi_debug_queuecommand(struct scsi_cmnd *SCpnt)
 {
-	unsigned char *cmd = (unsigned char *) SCpnt->cmnd;
+	unsigned char *cmd = SCpnt->cmnd;
 	int len, k;
 	unsigned int num;
 	unsigned long long lba;
@@ -4103,7 +4107,7 @@
 
 	scsi_set_resid(SCpnt, 0);
 	if ((SCSI_DEBUG_OPT_NOISE & scsi_debug_opts) &&
-	    !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts) && cmd) {
+	    !(SCSI_DEBUG_OPT_NO_CDB_NOISE & scsi_debug_opts)) {
 		char b[120];
 		int n;
 
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 5db8454..6b20ef3 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1238,9 +1238,9 @@
 /**
  * scsi_eh_test_devices - check if devices are responding from error recovery.
  * @cmd_list:	scsi commands in error recovery.
- * @work_q:     queue for commands which still need more error recovery
- * @done_q:     queue for commands which are finished
- * @try_stu:    boolean on if a STU command should be tried in addition to TUR.
+ * @work_q:	queue for commands which still need more error recovery
+ * @done_q:	queue for commands which are finished
+ * @try_stu:	boolean on if a STU command should be tried in addition to TUR.
  *
  * Decription:
  *    Tests if devices are in a working state.  Commands to devices now in
@@ -1373,7 +1373,7 @@
  /**
  * scsi_eh_stu - send START_UNIT if needed
  * @shost:	&scsi host being recovered.
- * @work_q:     &list_head for pending commands.
+ * @work_q:	&list_head for pending commands.
  * @done_q:	&list_head for processed commands.
  *
  * Notes:
@@ -1436,7 +1436,7 @@
 /**
  * scsi_eh_bus_device_reset - send bdr if needed
  * @shost:	scsi host being recovered.
- * @work_q:     &list_head for pending commands.
+ * @work_q:	&list_head for pending commands.
  * @done_q:	&list_head for processed commands.
  *
  * Notes:
@@ -1502,7 +1502,7 @@
 /**
  * scsi_eh_target_reset - send target reset if needed
  * @shost:	scsi host being recovered.
- * @work_q:     &list_head for pending commands.
+ * @work_q:	&list_head for pending commands.
  * @done_q:	&list_head for processed commands.
  *
  * Notes:
@@ -1567,7 +1567,7 @@
 /**
  * scsi_eh_bus_reset - send a bus reset
  * @shost:	&scsi host being recovered.
- * @work_q:     &list_head for pending commands.
+ * @work_q:	&list_head for pending commands.
  * @done_q:	&list_head for processed commands.
  */
 static int scsi_eh_bus_reset(struct Scsi_Host *shost,
@@ -1638,8 +1638,9 @@
 
 /**
  * scsi_eh_host_reset - send a host reset
- * @work_q:	list_head for processed commands.
- * @done_q:	list_head for processed commands.
+ * @shost:	host to be reset.
+ * @work_q:	&list_head for pending commands.
+ * @done_q:	&list_head for processed commands.
  */
 static int scsi_eh_host_reset(struct Scsi_Host *shost,
 			      struct list_head *work_q,
@@ -1677,8 +1678,8 @@
 
 /**
  * scsi_eh_offline_sdevs - offline scsi devices that fail to recover
- * @work_q:	list_head for processed commands.
- * @done_q:	list_head for processed commands.
+ * @work_q:	&list_head for pending commands.
+ * @done_q:	&list_head for processed commands.
  */
 static void scsi_eh_offline_sdevs(struct list_head *work_q,
 				  struct list_head *done_q)
@@ -2043,8 +2044,8 @@
 
 /**
  * scsi_eh_ready_devs - check device ready state and recover if not.
- * @shost: 	host to be recovered.
- * @work_q:     &list_head for pending commands.
+ * @shost:	host to be recovered.
+ * @work_q:	&list_head for pending commands.
  * @done_q:	&list_head for processed commands.
  */
 void scsi_eh_ready_devs(struct Scsi_Host *shost,
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index aaea4b9..db8c449 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -645,16 +645,18 @@
 static void scsi_mq_uninit_cmd(struct scsi_cmnd *cmd)
 {
 	struct scsi_device *sdev = cmd->device;
+	struct Scsi_Host *shost = sdev->host;
 	unsigned long flags;
 
-	BUG_ON(list_empty(&cmd->list));
-
 	scsi_mq_free_sgtables(cmd);
 	scsi_uninit_cmd(cmd);
 
-	spin_lock_irqsave(&sdev->list_lock, flags);
-	list_del_init(&cmd->list);
-	spin_unlock_irqrestore(&sdev->list_lock, flags);
+	if (shost->use_cmd_list) {
+		BUG_ON(list_empty(&cmd->list));
+		spin_lock_irqsave(&sdev->list_lock, flags);
+		list_del_init(&cmd->list);
+		spin_unlock_irqrestore(&sdev->list_lock, flags);
+	}
 }
 
 /*
@@ -1816,13 +1818,11 @@
 	INIT_DELAYED_WORK(&cmd->abort_work, scmd_eh_abort_handler);
 	cmd->jiffies_at_alloc = jiffies;
 
-	/*
-	 * XXX: cmd_list lookups are only used by two drivers, try to get
-	 * rid of this list in common code.
-	 */
-	spin_lock_irq(&sdev->list_lock);
-	list_add_tail(&cmd->list, &sdev->cmd_list);
-	spin_unlock_irq(&sdev->list_lock);
+	if (shost->use_cmd_list) {
+		spin_lock_irq(&sdev->list_lock);
+		list_add_tail(&cmd->list, &sdev->cmd_list);
+		spin_unlock_irq(&sdev->list_lock);
+	}
 
 	sg = (void *)cmd + sizeof(struct scsi_cmnd) + shost->hostt->cmd_size;
 	cmd->sdb.table.sgl = sg;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 56675db..ba3f1e8 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -736,6 +736,16 @@
 		sdev->scsi_level++;
 	sdev->sdev_target->scsi_level = sdev->scsi_level;
 
+	/*
+	 * If SCSI-2 or lower, and if the transport requires it,
+	 * store the LUN value in CDB[1].
+	 */
+	sdev->lun_in_cdb = 0;
+	if (sdev->scsi_level <= SCSI_2 &&
+	    sdev->scsi_level != SCSI_UNKNOWN &&
+	    !sdev->host->no_scsi2_lun_in_cdb)
+		sdev->lun_in_cdb = 1;
+
 	return 0;
 }
 
@@ -805,6 +815,19 @@
 	} else {
 		sdev->type = (inq_result[0] & 0x1f);
 		sdev->removable = (inq_result[1] & 0x80) >> 7;
+
+		/*
+		 * some devices may respond with wrong type for
+		 * well-known logical units. Force well-known type
+		 * to enumerate them correctly.
+		 */
+		if (scsi_is_wlun(sdev->lun) && sdev->type != TYPE_WLUN) {
+			sdev_printk(KERN_WARNING, sdev,
+				"%s: correcting incorrect peripheral device type 0x%x for W-LUN 0x%16xhN\n",
+				__func__, sdev->type, (unsigned int)sdev->lun);
+			sdev->type = TYPE_WLUN;
+		}
+
 	}
 
 	if (sdev->type == TYPE_RBC || sdev->type == TYPE_ROM) {
@@ -1733,6 +1756,9 @@
 		/* target removed before the device could be added */
 		if (sdev->sdev_state == SDEV_DEL)
 			continue;
+		/* If device is already visible, skip adding it to sysfs */
+		if (sdev->is_visible)
+			continue;
 		if (!scsi_host_scan_allowed(shost) ||
 		    scsi_sysfs_add_sdev(sdev) != 0)
 			__scsi_remove_device(sdev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 8b4105a..f4cb7b3 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1044,10 +1044,6 @@
 	pm_runtime_enable(&sdev->sdev_gendev);
 	scsi_autopm_put_target(starget);
 
-	/* The following call will keep sdev active indefinitely, until
-	 * its driver does a corresponding scsi_autopm_pm_device().  Only
-	 * drivers supporting autosuspend will do this.
-	 */
 	scsi_autopm_get_device(sdev);
 
 	error = device_add(&sdev->sdev_gendev);
@@ -1085,6 +1081,7 @@
 		}
 	}
 
+	scsi_autopm_put_device(sdev);
 	return error;
 }
 
@@ -1263,7 +1260,19 @@
 	sdev->sdev_dev.class = &sdev_class;
 	dev_set_name(&sdev->sdev_dev, "%d:%d:%d:%llu",
 		     sdev->host->host_no, sdev->channel, sdev->id, sdev->lun);
+	/*
+	 * Get a default scsi_level from the target (derived from sibling
+	 * devices).  This is the best we can do for guessing how to set
+	 * sdev->lun_in_cdb for the initial INQUIRY command.  For LUN 0 the
+	 * setting doesn't matter, because all the bits are zero anyway.
+	 * But it does matter for higher LUNs.
+	 */
 	sdev->scsi_level = starget->scsi_level;
+	if (sdev->scsi_level <= SCSI_2 &&
+			sdev->scsi_level != SCSI_UNKNOWN &&
+			!shost->no_scsi2_lun_in_cdb)
+		sdev->lun_in_cdb = 1;
+
 	transport_setup_device(&sdev->sdev_gendev);
 	spin_lock_irqsave(shost->host_lock, flags);
 	list_add_tail(&sdev->same_target_siblings, &starget->devices);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 2c2041c..0cb5c9f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -185,7 +185,7 @@
 	if (ct < 0)
 		return -EINVAL;
 	rcd = ct & 0x01 ? 1 : 0;
-	wce = ct & 0x02 ? 1 : 0;
+	wce = (ct & 0x02) && !sdkp->write_prot ? 1 : 0;
 
 	if (sdkp->cache_override) {
 		sdkp->WCE = wce;
@@ -2490,6 +2490,10 @@
 			sdkp->DPOFUA = 0;
 		}
 
+		/* No cache flush allowed for write protected devices */
+		if (sdkp->WCE && sdkp->write_prot)
+			sdkp->WCE = 0;
+
 		if (sdkp->first_scan || old_wce != sdkp->WCE ||
 		    old_rcd != sdkp->RCD || old_dpofua != sdkp->DPOFUA)
 			sd_printk(KERN_NOTICE, sdkp,
@@ -2961,6 +2965,7 @@
 	int index;
 	int error;
 
+	scsi_autopm_get_device(sdp);
 	error = -ENODEV;
 	if (sdp->type != TYPE_DISK && sdp->type != TYPE_MOD && sdp->type != TYPE_RBC)
 		goto out;
@@ -3037,6 +3042,7 @@
  out_free:
 	kfree(sdkp);
  out:
+	scsi_autopm_put_device(sdp);
 	return error;
 }
 
diff --git a/drivers/scsi/sr.c b/drivers/scsi/sr.c
index 7eeb936..2de44cc 100644
--- a/drivers/scsi/sr.c
+++ b/drivers/scsi/sr.c
@@ -657,6 +657,7 @@
 	struct scsi_cd *cd;
 	int minor, error;
 
+	scsi_autopm_get_device(sdev);
 	error = -ENODEV;
 	if (sdev->type != TYPE_ROM && sdev->type != TYPE_WORM)
 		goto fail;
@@ -744,6 +745,7 @@
 fail_free:
 	kfree(cd);
 fail:
+	scsi_autopm_put_device(sdev);
 	return error;
 }
 
diff --git a/drivers/scsi/st.c b/drivers/scsi/st.c
index aff9689..d3fd6e8 100644
--- a/drivers/scsi/st.c
+++ b/drivers/scsi/st.c
@@ -4105,6 +4105,7 @@
 		return -ENODEV;
 	}
 
+	scsi_autopm_get_device(SDp);
 	i = queue_max_segments(SDp->request_queue);
 	if (st_max_sg_segs < i)
 		i = st_max_sg_segs;
@@ -4244,6 +4245,7 @@
 out_buffer_free:
 	kfree(buffer);
 out:
+	scsi_autopm_put_device(SDp);
 	return -ENODEV;
 };
 
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index fecac5d0..733e5f7 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -1152,24 +1152,12 @@
 	stor_pkt->vm_srb.sense_info_length =
 	vstor_packet->vm_srb.sense_info_length;
 
-	if (vstor_packet->vm_srb.scsi_status != 0 ||
-		vstor_packet->vm_srb.srb_status != SRB_STATUS_SUCCESS){
-		dev_warn(&device->device,
-			 "cmd 0x%x scsi status 0x%x srb status 0x%x\n",
-			 stor_pkt->vm_srb.cdb[0],
-			 vstor_packet->vm_srb.scsi_status,
-			 vstor_packet->vm_srb.srb_status);
-	}
 
 	if ((vstor_packet->vm_srb.scsi_status & 0xFF) == 0x02) {
 		/* CHECK_CONDITION */
 		if (vstor_packet->vm_srb.srb_status &
 			SRB_STATUS_AUTOSENSE_VALID) {
 			/* autosense data available */
-			dev_warn(&device->device,
-				 "stor pkt %p autosense data valid - len %d\n",
-				 request,
-				 vstor_packet->vm_srb.sense_info_length);
 
 			memcpy(request->sense_buffer,
 			       vstor_packet->vm_srb.sense_data,
diff --git a/drivers/scsi/ufs/Kconfig b/drivers/scsi/ufs/Kconfig
index f07f9017..6e07b2a 100644
--- a/drivers/scsi/ufs/Kconfig
+++ b/drivers/scsi/ufs/Kconfig
@@ -35,6 +35,8 @@
 config SCSI_UFSHCD
 	tristate "Universal Flash Storage Controller Driver Core"
 	depends on SCSI && SCSI_DMA
+	select PM_DEVFREQ
+	select DEVFREQ_GOV_SIMPLE_ONDEMAND
 	---help---
 	This selects the support for UFS devices in Linux, say Y and make
 	  sure that you know the name of your UFS host adapter (the card
diff --git a/drivers/scsi/ufs/ufs.h b/drivers/scsi/ufs/ufs.h
index fafcf5e..42c459a 100644
--- a/drivers/scsi/ufs/ufs.h
+++ b/drivers/scsi/ufs/ufs.h
@@ -49,6 +49,27 @@
 #define UPIU_HEADER_DWORD(byte3, byte2, byte1, byte0)\
 			cpu_to_be32((byte3 << 24) | (byte2 << 16) |\
 			 (byte1 << 8) | (byte0))
+/*
+ * UFS device may have standard LUs and LUN id could be from 0x00 to
+ * 0x7F. Standard LUs use "Peripheral Device Addressing Format".
+ * UFS device may also have the Well Known LUs (also referred as W-LU)
+ * which again could be from 0x00 to 0x7F. For W-LUs, device only use
+ * the "Extended Addressing Format" which means the W-LUNs would be
+ * from 0xc100 (SCSI_W_LUN_BASE) onwards.
+ * This means max. LUN number reported from UFS device could be 0xC17F.
+ */
+#define UFS_UPIU_MAX_UNIT_NUM_ID	0x7F
+#define UFS_MAX_LUNS		(SCSI_W_LUN_BASE + UFS_UPIU_MAX_UNIT_NUM_ID)
+#define UFS_UPIU_WLUN_ID	(1 << 7)
+#define UFS_UPIU_MAX_GENERAL_LUN	8
+
+/* Well known logical unit id in LUN field of UPIU */
+enum {
+	UFS_UPIU_REPORT_LUNS_WLUN	= 0x81,
+	UFS_UPIU_UFS_DEVICE_WLUN	= 0xD0,
+	UFS_UPIU_BOOT_WLUN		= 0xB0,
+	UFS_UPIU_RPMB_WLUN		= 0xC4,
+};
 
 /*
  * UFS Protocol Information Unit related definitions
@@ -108,11 +129,13 @@
 /* Flag idn for Query Requests*/
 enum flag_idn {
 	QUERY_FLAG_IDN_FDEVICEINIT      = 0x01,
+	QUERY_FLAG_IDN_PWR_ON_WPE	= 0x03,
 	QUERY_FLAG_IDN_BKOPS_EN         = 0x04,
 };
 
 /* Attribute idn for Query requests */
 enum attr_idn {
+	QUERY_ATTR_IDN_ACTIVE_ICC_LVL	= 0x03,
 	QUERY_ATTR_IDN_BKOPS_STATUS	= 0x05,
 	QUERY_ATTR_IDN_EE_CONTROL	= 0x0D,
 	QUERY_ATTR_IDN_EE_STATUS	= 0x0E,
@@ -129,10 +152,29 @@
 	QUERY_DESC_IDN_RFU_1		= 0x6,
 	QUERY_DESC_IDN_GEOMETRY		= 0x7,
 	QUERY_DESC_IDN_POWER		= 0x8,
-	QUERY_DESC_IDN_RFU_2		= 0x9,
+	QUERY_DESC_IDN_MAX,
 };
 
-#define UNIT_DESC_MAX_SIZE       0x22
+enum desc_header_offset {
+	QUERY_DESC_LENGTH_OFFSET	= 0x00,
+	QUERY_DESC_DESC_TYPE_OFFSET	= 0x01,
+};
+
+enum ufs_desc_max_size {
+	QUERY_DESC_DEVICE_MAX_SIZE		= 0x1F,
+	QUERY_DESC_CONFIGURAION_MAX_SIZE	= 0x90,
+	QUERY_DESC_UNIT_MAX_SIZE		= 0x23,
+	QUERY_DESC_INTERCONNECT_MAX_SIZE	= 0x06,
+	/*
+	 * Max. 126 UNICODE characters (2 bytes per character) plus 2 bytes
+	 * of descriptor header.
+	 */
+	QUERY_DESC_STRING_MAX_SIZE		= 0xFE,
+	QUERY_DESC_GEOMETRY_MAZ_SIZE		= 0x44,
+	QUERY_DESC_POWER_MAX_SIZE		= 0x62,
+	QUERY_DESC_RFU_MAX_SIZE			= 0x00,
+};
+
 /* Unit descriptor parameters offsets in bytes*/
 enum unit_desc_param {
 	UNIT_DESC_PARAM_LEN			= 0x0,
@@ -153,6 +195,43 @@
 	UNIT_DESC_PARAM_LARGE_UNIT_SIZE_M1	= 0x22,
 };
 
+/*
+ * Logical Unit Write Protect
+ * 00h: LU not write protected
+ * 01h: LU write protected when fPowerOnWPEn =1
+ * 02h: LU permanently write protected when fPermanentWPEn =1
+ */
+enum ufs_lu_wp_type {
+	UFS_LU_NO_WP		= 0x00,
+	UFS_LU_POWER_ON_WP	= 0x01,
+	UFS_LU_PERM_WP		= 0x02,
+};
+
+/* bActiveICCLevel parameter current units */
+enum {
+	UFSHCD_NANO_AMP		= 0,
+	UFSHCD_MICRO_AMP	= 1,
+	UFSHCD_MILI_AMP		= 2,
+	UFSHCD_AMP		= 3,
+};
+
+#define POWER_DESC_MAX_SIZE			0x62
+#define POWER_DESC_MAX_ACTV_ICC_LVLS		16
+
+/* Attribute  bActiveICCLevel parameter bit masks definitions */
+#define ATTR_ICC_LVL_UNIT_OFFSET	14
+#define ATTR_ICC_LVL_UNIT_MASK		(0x3 << ATTR_ICC_LVL_UNIT_OFFSET)
+#define ATTR_ICC_LVL_VALUE_MASK		0x3FF
+
+/* Power descriptor parameters offsets in bytes */
+enum power_desc_param_offset {
+	PWR_DESC_LEN			= 0x0,
+	PWR_DESC_TYPE			= 0x1,
+	PWR_DESC_ACTIVE_LVLS_VCC_0	= 0x2,
+	PWR_DESC_ACTIVE_LVLS_VCCQ_0	= 0x22,
+	PWR_DESC_ACTIVE_LVLS_VCCQ2_0	= 0x42,
+};
+
 /* Exception event mask values */
 enum {
 	MASK_EE_STATUS		= 0xFFFF,
@@ -160,11 +239,12 @@
 };
 
 /* Background operation status */
-enum {
+enum bkops_status {
 	BKOPS_STATUS_NO_OP               = 0x0,
 	BKOPS_STATUS_NON_CRITICAL        = 0x1,
 	BKOPS_STATUS_PERF_IMPACT         = 0x2,
 	BKOPS_STATUS_CRITICAL            = 0x3,
+	BKOPS_STATUS_MAX		 = BKOPS_STATUS_CRITICAL,
 };
 
 /* UTP QUERY Transaction Specific Fields OpCode */
@@ -225,6 +305,14 @@
 	UPIU_TASK_MANAGEMENT_FUNC_FAILED	= 0x05,
 	UPIU_INCORRECT_LOGICAL_UNIT_NO		= 0x09,
 };
+
+/* UFS device power modes */
+enum ufs_dev_pwr_mode {
+	UFS_ACTIVE_PWR_MODE	= 1,
+	UFS_SLEEP_PWR_MODE	= 2,
+	UFS_POWERDOWN_PWR_MODE	= 3,
+};
+
 /**
  * struct utp_upiu_header - UPIU header structure
  * @dword_0: UPIU header DW-0
@@ -362,4 +450,42 @@
 	struct utp_upiu_query upiu_res;
 };
 
+#define UFS_VREG_VCC_MIN_UV	   2700000 /* uV */
+#define UFS_VREG_VCC_MAX_UV	   3600000 /* uV */
+#define UFS_VREG_VCC_1P8_MIN_UV    1700000 /* uV */
+#define UFS_VREG_VCC_1P8_MAX_UV    1950000 /* uV */
+#define UFS_VREG_VCCQ_MIN_UV	   1100000 /* uV */
+#define UFS_VREG_VCCQ_MAX_UV	   1300000 /* uV */
+#define UFS_VREG_VCCQ2_MIN_UV	   1650000 /* uV */
+#define UFS_VREG_VCCQ2_MAX_UV	   1950000 /* uV */
+
+/*
+ * VCCQ & VCCQ2 current requirement when UFS device is in sleep state
+ * and link is in Hibern8 state.
+ */
+#define UFS_VREG_LPM_LOAD_UA	1000 /* uA */
+
+struct ufs_vreg {
+	struct regulator *reg;
+	const char *name;
+	bool enabled;
+	int min_uV;
+	int max_uV;
+	int min_uA;
+	int max_uA;
+};
+
+struct ufs_vreg_info {
+	struct ufs_vreg *vcc;
+	struct ufs_vreg *vccq;
+	struct ufs_vreg *vccq2;
+	struct ufs_vreg *vdd_hba;
+};
+
+struct ufs_dev_info {
+	bool f_power_on_wp_en;
+	/* Keeps information if any of the LU is power on write protected */
+	bool is_lu_power_on_wp;
+};
+
 #endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshcd-pci.c b/drivers/scsi/ufs/ufshcd-pci.c
index afaabe2..955ed55 100644
--- a/drivers/scsi/ufs/ufshcd-pci.c
+++ b/drivers/scsi/ufs/ufshcd-pci.c
@@ -43,34 +43,24 @@
  * @pdev: pointer to PCI device handle
  * @state: power state
  *
- * Returns -ENOSYS
+ * Returns 0 if successful
+ * Returns non-zero otherwise
  */
 static int ufshcd_pci_suspend(struct device *dev)
 {
-	/*
-	 * TODO:
-	 * 1. Call ufshcd_suspend
-	 * 2. Do bus specific power management
-	 */
-
-	return -ENOSYS;
+	return ufshcd_system_suspend(dev_get_drvdata(dev));
 }
 
 /**
  * ufshcd_pci_resume - resume power management function
  * @pdev: pointer to PCI device handle
  *
- * Returns -ENOSYS
+ * Returns 0 if successful
+ * Returns non-zero otherwise
  */
 static int ufshcd_pci_resume(struct device *dev)
 {
-	/*
-	 * TODO:
-	 * 1. Call ufshcd_resume.
-	 * 2. Do bus specific wake up
-	 */
-
-	return -ENOSYS;
+	return ufshcd_system_resume(dev_get_drvdata(dev));
 }
 #else
 #define ufshcd_pci_suspend	NULL
@@ -80,30 +70,15 @@
 #ifdef CONFIG_PM_RUNTIME
 static int ufshcd_pci_runtime_suspend(struct device *dev)
 {
-	struct ufs_hba *hba = dev_get_drvdata(dev);
-
-	if (!hba)
-		return 0;
-
-	return ufshcd_runtime_suspend(hba);
+	return ufshcd_runtime_suspend(dev_get_drvdata(dev));
 }
 static int ufshcd_pci_runtime_resume(struct device *dev)
 {
-	struct ufs_hba *hba = dev_get_drvdata(dev);
-
-	if (!hba)
-		return 0;
-
-	return ufshcd_runtime_resume(hba);
+	return ufshcd_runtime_resume(dev_get_drvdata(dev));
 }
 static int ufshcd_pci_runtime_idle(struct device *dev)
 {
-	struct ufs_hba *hba = dev_get_drvdata(dev);
-
-	if (!hba)
-		return 0;
-
-	return ufshcd_runtime_idle(hba);
+	return ufshcd_runtime_idle(dev_get_drvdata(dev));
 }
 #else /* !CONFIG_PM_RUNTIME */
 #define ufshcd_pci_runtime_suspend	NULL
@@ -117,7 +92,7 @@
  */
 static void ufshcd_pci_shutdown(struct pci_dev *pdev)
 {
-	ufshcd_hba_stop((struct ufs_hba *)pci_get_drvdata(pdev));
+	ufshcd_shutdown((struct ufs_hba *)pci_get_drvdata(pdev));
 }
 
 /**
@@ -164,7 +139,15 @@
 
 	mmio_base = pcim_iomap_table(pdev)[0];
 
-	err = ufshcd_init(&pdev->dev, &hba, mmio_base, pdev->irq);
+	err = ufshcd_alloc_host(&pdev->dev, &hba);
+	if (err) {
+		dev_err(&pdev->dev, "Allocation failed\n");
+		return err;
+	}
+
+	INIT_LIST_HEAD(&hba->clk_list_head);
+
+	err = ufshcd_init(hba, mmio_base, pdev->irq);
 	if (err) {
 		dev_err(&pdev->dev, "Initialization failed\n");
 		return err;
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 5e46232..8adf067 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -35,53 +35,236 @@
 
 #include <linux/platform_device.h>
 #include <linux/pm_runtime.h>
+#include <linux/of.h>
 
 #include "ufshcd.h"
 
+static const struct of_device_id ufs_of_match[];
+static struct ufs_hba_variant_ops *get_variant_ops(struct device *dev)
+{
+	if (dev->of_node) {
+		const struct of_device_id *match;
+
+		match = of_match_node(ufs_of_match, dev->of_node);
+		if (match)
+			return (struct ufs_hba_variant_ops *)match->data;
+	}
+
+	return NULL;
+}
+
+static int ufshcd_parse_clock_info(struct ufs_hba *hba)
+{
+	int ret = 0;
+	int cnt;
+	int i;
+	struct device *dev = hba->dev;
+	struct device_node *np = dev->of_node;
+	char *name;
+	u32 *clkfreq = NULL;
+	struct ufs_clk_info *clki;
+	int len = 0;
+	size_t sz = 0;
+
+	if (!np)
+		goto out;
+
+	INIT_LIST_HEAD(&hba->clk_list_head);
+
+	cnt = of_property_count_strings(np, "clock-names");
+	if (!cnt || (cnt == -EINVAL)) {
+		dev_info(dev, "%s: Unable to find clocks, assuming enabled\n",
+				__func__);
+	} else if (cnt < 0) {
+		dev_err(dev, "%s: count clock strings failed, err %d\n",
+				__func__, cnt);
+		ret = cnt;
+	}
+
+	if (cnt <= 0)
+		goto out;
+
+	if (!of_get_property(np, "freq-table-hz", &len)) {
+		dev_info(dev, "freq-table-hz property not specified\n");
+		goto out;
+	}
+
+	if (len <= 0)
+		goto out;
+
+	sz = len / sizeof(*clkfreq);
+	if (sz != 2 * cnt) {
+		dev_err(dev, "%s len mismatch\n", "freq-table-hz");
+		ret = -EINVAL;
+		goto out;
+	}
+
+	clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq),
+			GFP_KERNEL);
+	if (!clkfreq) {
+		dev_err(dev, "%s: no memory\n", "freq-table-hz");
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = of_property_read_u32_array(np, "freq-table-hz",
+			clkfreq, sz);
+	if (ret && (ret != -EINVAL)) {
+		dev_err(dev, "%s: error reading array %d\n",
+				"freq-table-hz", ret);
+		goto free_clkfreq;
+	}
+
+	for (i = 0; i < sz; i += 2) {
+		ret = of_property_read_string_index(np,
+				"clock-names", i/2, (const char **)&name);
+		if (ret)
+			goto free_clkfreq;
+
+		clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
+		if (!clki) {
+			ret = -ENOMEM;
+			goto free_clkfreq;
+		}
+
+		clki->min_freq = clkfreq[i];
+		clki->max_freq = clkfreq[i+1];
+		clki->name = kstrdup(name, GFP_KERNEL);
+		dev_dbg(dev, "%s: min %u max %u name %s\n", "freq-table-hz",
+				clki->min_freq, clki->max_freq, clki->name);
+		list_add_tail(&clki->list, &hba->clk_list_head);
+	}
+free_clkfreq:
+	kfree(clkfreq);
+out:
+	return ret;
+}
+
+#define MAX_PROP_SIZE 32
+static int ufshcd_populate_vreg(struct device *dev, const char *name,
+		struct ufs_vreg **out_vreg)
+{
+	int ret = 0;
+	char prop_name[MAX_PROP_SIZE];
+	struct ufs_vreg *vreg = NULL;
+	struct device_node *np = dev->of_node;
+
+	if (!np) {
+		dev_err(dev, "%s: non DT initialization\n", __func__);
+		goto out;
+	}
+
+	snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
+	if (!of_parse_phandle(np, prop_name, 0)) {
+		dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
+				__func__, prop_name);
+		goto out;
+	}
+
+	vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+	if (!vreg) {
+		dev_err(dev, "No memory for %s regulator\n", name);
+		goto out;
+	}
+
+	vreg->name = kstrdup(name, GFP_KERNEL);
+
+	/* if fixed regulator no need further initialization */
+	snprintf(prop_name, MAX_PROP_SIZE, "%s-fixed-regulator", name);
+	if (of_property_read_bool(np, prop_name))
+		goto out;
+
+	snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
+	ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
+	if (ret) {
+		dev_err(dev, "%s: unable to find %s err %d\n",
+				__func__, prop_name, ret);
+		goto out_free;
+	}
+
+	vreg->min_uA = 0;
+	if (!strcmp(name, "vcc")) {
+		if (of_property_read_bool(np, "vcc-supply-1p8")) {
+			vreg->min_uV = UFS_VREG_VCC_1P8_MIN_UV;
+			vreg->max_uV = UFS_VREG_VCC_1P8_MAX_UV;
+		} else {
+			vreg->min_uV = UFS_VREG_VCC_MIN_UV;
+			vreg->max_uV = UFS_VREG_VCC_MAX_UV;
+		}
+	} else if (!strcmp(name, "vccq")) {
+		vreg->min_uV = UFS_VREG_VCCQ_MIN_UV;
+		vreg->max_uV = UFS_VREG_VCCQ_MAX_UV;
+	} else if (!strcmp(name, "vccq2")) {
+		vreg->min_uV = UFS_VREG_VCCQ2_MIN_UV;
+		vreg->max_uV = UFS_VREG_VCCQ2_MAX_UV;
+	}
+
+	goto out;
+
+out_free:
+	devm_kfree(dev, vreg);
+	vreg = NULL;
+out:
+	if (!ret)
+		*out_vreg = vreg;
+	return ret;
+}
+
+/**
+ * ufshcd_parse_regulator_info - get regulator info from device tree
+ * @hba: per adapter instance
+ *
+ * Get regulator info from device tree for vcc, vccq, vccq2 power supplies.
+ * If any of the supplies are not defined it is assumed that they are always-on
+ * and hence return zero. If the property is defined but parsing is failed
+ * then return corresponding error.
+ */
+static int ufshcd_parse_regulator_info(struct ufs_hba *hba)
+{
+	int err;
+	struct device *dev = hba->dev;
+	struct ufs_vreg_info *info = &hba->vreg_info;
+
+	err = ufshcd_populate_vreg(dev, "vdd-hba", &info->vdd_hba);
+	if (err)
+		goto out;
+
+	err = ufshcd_populate_vreg(dev, "vcc", &info->vcc);
+	if (err)
+		goto out;
+
+	err = ufshcd_populate_vreg(dev, "vccq", &info->vccq);
+	if (err)
+		goto out;
+
+	err = ufshcd_populate_vreg(dev, "vccq2", &info->vccq2);
+out:
+	return err;
+}
+
 #ifdef CONFIG_PM
 /**
  * ufshcd_pltfrm_suspend - suspend power management function
  * @dev: pointer to device handle
  *
- *
- * Returns 0
+ * Returns 0 if successful
+ * Returns non-zero otherwise
  */
 static int ufshcd_pltfrm_suspend(struct device *dev)
 {
-	struct platform_device *pdev = to_platform_device(dev);
-	struct ufs_hba *hba =  platform_get_drvdata(pdev);
-
-	/*
-	 * TODO:
-	 * 1. Call ufshcd_suspend
-	 * 2. Do bus specific power management
-	 */
-
-	disable_irq(hba->irq);
-
-	return 0;
+	return ufshcd_system_suspend(dev_get_drvdata(dev));
 }
 
 /**
  * ufshcd_pltfrm_resume - resume power management function
  * @dev: pointer to device handle
  *
- * Returns 0
+ * Returns 0 if successful
+ * Returns non-zero otherwise
  */
 static int ufshcd_pltfrm_resume(struct device *dev)
 {
-	struct platform_device *pdev = to_platform_device(dev);
-	struct ufs_hba *hba =  platform_get_drvdata(pdev);
-
-	/*
-	 * TODO:
-	 * 1. Call ufshcd_resume.
-	 * 2. Do bus specific wake up
-	 */
-
-	enable_irq(hba->irq);
-
-	return 0;
+	return ufshcd_system_resume(dev_get_drvdata(dev));
 }
 #else
 #define ufshcd_pltfrm_suspend	NULL
@@ -91,30 +274,15 @@
 #ifdef CONFIG_PM_RUNTIME
 static int ufshcd_pltfrm_runtime_suspend(struct device *dev)
 {
-	struct ufs_hba *hba =  dev_get_drvdata(dev);
-
-	if (!hba)
-		return 0;
-
-	return ufshcd_runtime_suspend(hba);
+	return ufshcd_runtime_suspend(dev_get_drvdata(dev));
 }
 static int ufshcd_pltfrm_runtime_resume(struct device *dev)
 {
-	struct ufs_hba *hba =  dev_get_drvdata(dev);
-
-	if (!hba)
-		return 0;
-
-	return ufshcd_runtime_resume(hba);
+	return ufshcd_runtime_resume(dev_get_drvdata(dev));
 }
 static int ufshcd_pltfrm_runtime_idle(struct device *dev)
 {
-	struct ufs_hba *hba =  dev_get_drvdata(dev);
-
-	if (!hba)
-		return 0;
-
-	return ufshcd_runtime_idle(hba);
+	return ufshcd_runtime_idle(dev_get_drvdata(dev));
 }
 #else /* !CONFIG_PM_RUNTIME */
 #define ufshcd_pltfrm_runtime_suspend	NULL
@@ -122,6 +290,11 @@
 #define ufshcd_pltfrm_runtime_idle	NULL
 #endif /* CONFIG_PM_RUNTIME */
 
+static void ufshcd_pltfrm_shutdown(struct platform_device *pdev)
+{
+	ufshcd_shutdown((struct ufs_hba *)platform_get_drvdata(pdev));
+}
+
 /**
  * ufshcd_pltfrm_probe - probe routine of the driver
  * @pdev: pointer to Platform device handle
@@ -138,8 +311,8 @@
 
 	mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 	mmio_base = devm_ioremap_resource(dev, mem_res);
-	if (IS_ERR(mmio_base)) {
-		err = PTR_ERR(mmio_base);
+	if (IS_ERR(*(void **)&mmio_base)) {
+		err = PTR_ERR(*(void **)&mmio_base);
 		goto out;
 	}
 
@@ -150,10 +323,31 @@
 		goto out;
 	}
 
+	err = ufshcd_alloc_host(dev, &hba);
+	if (err) {
+		dev_err(&pdev->dev, "Allocation failed\n");
+		goto out;
+	}
+
+	hba->vops = get_variant_ops(&pdev->dev);
+
+	err = ufshcd_parse_clock_info(hba);
+	if (err) {
+		dev_err(&pdev->dev, "%s: clock parse failed %d\n",
+				__func__, err);
+		goto out;
+	}
+	err = ufshcd_parse_regulator_info(hba);
+	if (err) {
+		dev_err(&pdev->dev, "%s: regulator init failed %d\n",
+				__func__, err);
+		goto out;
+	}
+
 	pm_runtime_set_active(&pdev->dev);
 	pm_runtime_enable(&pdev->dev);
 
-	err = ufshcd_init(dev, &hba, mmio_base, irq);
+	err = ufshcd_init(hba, mmio_base, irq);
 	if (err) {
 		dev_err(dev, "Intialization failed\n");
 		goto out_disable_rpm;
@@ -201,6 +395,7 @@
 static struct platform_driver ufshcd_pltfrm_driver = {
 	.probe	= ufshcd_pltfrm_probe,
 	.remove	= ufshcd_pltfrm_remove,
+	.shutdown = ufshcd_pltfrm_shutdown,
 	.driver	= {
 		.name	= "ufshcd",
 		.owner	= THIS_MODULE,
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index ba27215..497c38a 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -3,6 +3,7 @@
  *
  * This code is based on drivers/scsi/ufs/ufshcd.c
  * Copyright (C) 2011-2013 Samsung India Software Operations
+ * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
  *
  * Authors:
  *	Santosh Yaraganavi <santosh.sy@samsung.com>
@@ -31,16 +32,19 @@
  * circumstances will the contributor of this Program be liable for
  * any damages of any kind arising from your use or distribution of
  * this program.
+ *
+ * The Linux Foundation chooses to take subject only to the GPLv2
+ * license terms, and distributes only under these terms.
  */
 
 #include <linux/async.h>
+#include <linux/devfreq.h>
 
 #include "ufshcd.h"
 #include "unipro.h"
 
 #define UFSHCD_ENABLE_INTRS	(UTP_TRANSFER_REQ_COMPL |\
 				 UTP_TASK_REQ_COMPL |\
-				 UIC_POWER_MODE |\
 				 UFSHCD_ERROR_MASK)
 /* UIC command timeout, unit: ms */
 #define UIC_CMD_TIMEOUT	500
@@ -58,16 +62,44 @@
 /* Task management command timeout */
 #define TM_CMD_TIMEOUT	100 /* msecs */
 
+/* maximum number of link-startup retries */
+#define DME_LINKSTARTUP_RETRIES 3
+
+/* maximum number of reset retries before giving up */
+#define MAX_HOST_RESET_RETRIES 5
+
 /* Expose the flag value from utp_upiu_query.value */
 #define MASK_QUERY_UPIU_FLAG_LOC 0xFF
 
 /* Interrupt aggregation default timeout, unit: 40us */
 #define INT_AGGR_DEF_TO	0x02
 
+#define ufshcd_toggle_vreg(_dev, _vreg, _on)				\
+	({                                                              \
+		int _ret;                                               \
+		if (_on)                                                \
+			_ret = ufshcd_enable_vreg(_dev, _vreg);         \
+		else                                                    \
+			_ret = ufshcd_disable_vreg(_dev, _vreg);        \
+		_ret;                                                   \
+	})
+
+static u32 ufs_query_desc_max_size[] = {
+	QUERY_DESC_DEVICE_MAX_SIZE,
+	QUERY_DESC_CONFIGURAION_MAX_SIZE,
+	QUERY_DESC_UNIT_MAX_SIZE,
+	QUERY_DESC_RFU_MAX_SIZE,
+	QUERY_DESC_INTERCONNECT_MAX_SIZE,
+	QUERY_DESC_STRING_MAX_SIZE,
+	QUERY_DESC_RFU_MAX_SIZE,
+	QUERY_DESC_GEOMETRY_MAZ_SIZE,
+	QUERY_DESC_POWER_MAX_SIZE,
+	QUERY_DESC_RFU_MAX_SIZE,
+};
+
 enum {
 	UFSHCD_MAX_CHANNEL	= 0,
 	UFSHCD_MAX_ID		= 1,
-	UFSHCD_MAX_LUNS		= 8,
 	UFSHCD_CMD_PER_LUN	= 32,
 	UFSHCD_CAN_QUEUE	= 32,
 };
@@ -106,12 +138,79 @@
 #define ufshcd_clear_eh_in_progress(h) \
 	(h->eh_flags &= ~UFSHCD_EH_IN_PROGRESS)
 
+#define ufshcd_set_ufs_dev_active(h) \
+	((h)->curr_dev_pwr_mode = UFS_ACTIVE_PWR_MODE)
+#define ufshcd_set_ufs_dev_sleep(h) \
+	((h)->curr_dev_pwr_mode = UFS_SLEEP_PWR_MODE)
+#define ufshcd_set_ufs_dev_poweroff(h) \
+	((h)->curr_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE)
+#define ufshcd_is_ufs_dev_active(h) \
+	((h)->curr_dev_pwr_mode == UFS_ACTIVE_PWR_MODE)
+#define ufshcd_is_ufs_dev_sleep(h) \
+	((h)->curr_dev_pwr_mode == UFS_SLEEP_PWR_MODE)
+#define ufshcd_is_ufs_dev_poweroff(h) \
+	((h)->curr_dev_pwr_mode == UFS_POWERDOWN_PWR_MODE)
+
+static struct ufs_pm_lvl_states ufs_pm_lvl_states[] = {
+	{UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+	{UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+	{UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE},
+	{UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+	{UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE},
+	{UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE},
+};
+
+static inline enum ufs_dev_pwr_mode
+ufs_get_pm_lvl_to_dev_pwr_mode(enum ufs_pm_level lvl)
+{
+	return ufs_pm_lvl_states[lvl].dev_state;
+}
+
+static inline enum uic_link_state
+ufs_get_pm_lvl_to_link_pwr_state(enum ufs_pm_level lvl)
+{
+	return ufs_pm_lvl_states[lvl].link_state;
+}
+
 static void ufshcd_tmc_handler(struct ufs_hba *hba);
 static void ufshcd_async_scan(void *data, async_cookie_t cookie);
 static int ufshcd_reset_and_restore(struct ufs_hba *hba);
 static int ufshcd_clear_tm_cmd(struct ufs_hba *hba, int tag);
-static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba,
-					struct scsi_device *sdev);
+static void ufshcd_hba_exit(struct ufs_hba *hba);
+static int ufshcd_probe_hba(struct ufs_hba *hba);
+static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+				 bool skip_ref_clk);
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on);
+static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba);
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba);
+static int ufshcd_host_reset_and_restore(struct ufs_hba *hba);
+static irqreturn_t ufshcd_intr(int irq, void *__hba);
+static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+		struct ufs_pa_layer_attr *desired_pwr_mode);
+
+static inline int ufshcd_enable_irq(struct ufs_hba *hba)
+{
+	int ret = 0;
+
+	if (!hba->is_irq_enabled) {
+		ret = request_irq(hba->irq, ufshcd_intr, IRQF_SHARED, UFSHCD,
+				hba);
+		if (ret)
+			dev_err(hba->dev, "%s: request_irq failed, ret=%d\n",
+				__func__, ret);
+		hba->is_irq_enabled = true;
+	}
+
+	return ret;
+}
+
+static inline void ufshcd_disable_irq(struct ufs_hba *hba)
+{
+	if (hba->is_irq_enabled) {
+		free_irq(hba->irq, hba);
+		hba->is_irq_enabled = false;
+	}
+}
 
 /*
  * ufshcd_wait_for_register - wait for register value to change
@@ -175,13 +274,14 @@
 /**
  * ufshcd_is_device_present - Check if any device connected to
  *			      the host controller
- * @reg_hcs - host controller status register value
+ * @hba: pointer to adapter instance
  *
  * Returns 1 if device present, 0 if no device detected
  */
-static inline int ufshcd_is_device_present(u32 reg_hcs)
+static inline int ufshcd_is_device_present(struct ufs_hba *hba)
 {
-	return (DEVICE_PRESENT & reg_hcs) ? 1 : 0;
+	return (ufshcd_readl(hba, REG_CONTROLLER_STATUS) &
+						DEVICE_PRESENT) ? 1 : 0;
 }
 
 /**
@@ -413,6 +513,265 @@
 	return (ufshcd_readl(hba, REG_CONTROLLER_ENABLE) & 0x1) ? 0 : 1;
 }
 
+static void ufshcd_ungate_work(struct work_struct *work)
+{
+	int ret;
+	unsigned long flags;
+	struct ufs_hba *hba = container_of(work, struct ufs_hba,
+			clk_gating.ungate_work);
+
+	cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->clk_gating.state == CLKS_ON) {
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		goto unblock_reqs;
+	}
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	ufshcd_setup_clocks(hba, true);
+
+	/* Exit from hibern8 */
+	if (ufshcd_can_hibern8_during_gating(hba)) {
+		/* Prevent gating in this path */
+		hba->clk_gating.is_suspended = true;
+		if (ufshcd_is_link_hibern8(hba)) {
+			ret = ufshcd_uic_hibern8_exit(hba);
+			if (ret)
+				dev_err(hba->dev, "%s: hibern8 exit failed %d\n",
+					__func__, ret);
+			else
+				ufshcd_set_link_active(hba);
+		}
+		hba->clk_gating.is_suspended = false;
+	}
+unblock_reqs:
+	if (ufshcd_is_clkscaling_enabled(hba))
+		devfreq_resume_device(hba->devfreq);
+	scsi_unblock_requests(hba->host);
+}
+
+/**
+ * ufshcd_hold - Enable clocks that were gated earlier due to ufshcd_release.
+ * Also, exit from hibern8 mode and set the link as active.
+ * @hba: per adapter instance
+ * @async: This indicates whether caller should ungate clocks asynchronously.
+ */
+int ufshcd_hold(struct ufs_hba *hba, bool async)
+{
+	int rc = 0;
+	unsigned long flags;
+
+	if (!ufshcd_is_clkgating_allowed(hba))
+		goto out;
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->clk_gating.active_reqs++;
+
+start:
+	switch (hba->clk_gating.state) {
+	case CLKS_ON:
+		break;
+	case REQ_CLKS_OFF:
+		if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+			hba->clk_gating.state = CLKS_ON;
+			break;
+		}
+		/*
+		 * If we here, it means gating work is either done or
+		 * currently running. Hence, fall through to cancel gating
+		 * work and to enable clocks.
+		 */
+	case CLKS_OFF:
+		scsi_block_requests(hba->host);
+		hba->clk_gating.state = REQ_CLKS_ON;
+		schedule_work(&hba->clk_gating.ungate_work);
+		/*
+		 * fall through to check if we should wait for this
+		 * work to be done or not.
+		 */
+	case REQ_CLKS_ON:
+		if (async) {
+			rc = -EAGAIN;
+			hba->clk_gating.active_reqs--;
+			break;
+		}
+
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+		flush_work(&hba->clk_gating.ungate_work);
+		/* Make sure state is CLKS_ON before returning */
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		goto start;
+	default:
+		dev_err(hba->dev, "%s: clk gating is in invalid state %d\n",
+				__func__, hba->clk_gating.state);
+		break;
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+	return rc;
+}
+
+static void ufshcd_gate_work(struct work_struct *work)
+{
+	struct ufs_hba *hba = container_of(work, struct ufs_hba,
+			clk_gating.gate_work.work);
+	unsigned long flags;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->clk_gating.is_suspended) {
+		hba->clk_gating.state = CLKS_ON;
+		goto rel_lock;
+	}
+
+	if (hba->clk_gating.active_reqs
+		|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+		|| hba->lrb_in_use || hba->outstanding_tasks
+		|| hba->active_uic_cmd || hba->uic_async_done)
+		goto rel_lock;
+
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+
+	/* put the link into hibern8 mode before turning off clocks */
+	if (ufshcd_can_hibern8_during_gating(hba)) {
+		if (ufshcd_uic_hibern8_enter(hba)) {
+			hba->clk_gating.state = CLKS_ON;
+			goto out;
+		}
+		ufshcd_set_link_hibern8(hba);
+	}
+
+	if (ufshcd_is_clkscaling_enabled(hba)) {
+		devfreq_suspend_device(hba->devfreq);
+		hba->clk_scaling.window_start_t = 0;
+	}
+
+	if (!ufshcd_is_link_active(hba))
+		ufshcd_setup_clocks(hba, false);
+	else
+		/* If link is active, device ref_clk can't be switched off */
+		__ufshcd_setup_clocks(hba, false, true);
+
+	/*
+	 * In case you are here to cancel this work the gating state
+	 * would be marked as REQ_CLKS_ON. In this case keep the state
+	 * as REQ_CLKS_ON which would anyway imply that clocks are off
+	 * and a request to turn them on is pending. By doing this way,
+	 * we keep the state machine in tact and this would ultimately
+	 * prevent from doing cancel work multiple times when there are
+	 * new requests arriving before the current cancel work is done.
+	 */
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (hba->clk_gating.state == REQ_CLKS_OFF)
+		hba->clk_gating.state = CLKS_OFF;
+
+rel_lock:
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+out:
+	return;
+}
+
+/* host lock must be held before calling this variant */
+static void __ufshcd_release(struct ufs_hba *hba)
+{
+	if (!ufshcd_is_clkgating_allowed(hba))
+		return;
+
+	hba->clk_gating.active_reqs--;
+
+	if (hba->clk_gating.active_reqs || hba->clk_gating.is_suspended
+		|| hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL
+		|| hba->lrb_in_use || hba->outstanding_tasks
+		|| hba->active_uic_cmd || hba->uic_async_done)
+		return;
+
+	hba->clk_gating.state = REQ_CLKS_OFF;
+	schedule_delayed_work(&hba->clk_gating.gate_work,
+			msecs_to_jiffies(hba->clk_gating.delay_ms));
+}
+
+void ufshcd_release(struct ufs_hba *hba)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	__ufshcd_release(hba);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+}
+
+static ssize_t ufshcd_clkgate_delay_show(struct device *dev,
+		struct device_attribute *attr, char *buf)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+
+	return snprintf(buf, PAGE_SIZE, "%lu\n", hba->clk_gating.delay_ms);
+}
+
+static ssize_t ufshcd_clkgate_delay_store(struct device *dev,
+		struct device_attribute *attr, const char *buf, size_t count)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	unsigned long flags, value;
+
+	if (kstrtoul(buf, 0, &value))
+		return -EINVAL;
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->clk_gating.delay_ms = value;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	return count;
+}
+
+static void ufshcd_init_clk_gating(struct ufs_hba *hba)
+{
+	if (!ufshcd_is_clkgating_allowed(hba))
+		return;
+
+	hba->clk_gating.delay_ms = 150;
+	INIT_DELAYED_WORK(&hba->clk_gating.gate_work, ufshcd_gate_work);
+	INIT_WORK(&hba->clk_gating.ungate_work, ufshcd_ungate_work);
+
+	hba->clk_gating.delay_attr.show = ufshcd_clkgate_delay_show;
+	hba->clk_gating.delay_attr.store = ufshcd_clkgate_delay_store;
+	sysfs_attr_init(&hba->clk_gating.delay_attr.attr);
+	hba->clk_gating.delay_attr.attr.name = "clkgate_delay_ms";
+	hba->clk_gating.delay_attr.attr.mode = S_IRUGO | S_IWUSR;
+	if (device_create_file(hba->dev, &hba->clk_gating.delay_attr))
+		dev_err(hba->dev, "Failed to create sysfs for clkgate_delay\n");
+}
+
+static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
+{
+	if (!ufshcd_is_clkgating_allowed(hba))
+		return;
+	device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
+}
+
+/* Must be called with host lock acquired */
+static void ufshcd_clk_scaling_start_busy(struct ufs_hba *hba)
+{
+	if (!ufshcd_is_clkscaling_enabled(hba))
+		return;
+
+	if (!hba->clk_scaling.is_busy_started) {
+		hba->clk_scaling.busy_start_t = ktime_get();
+		hba->clk_scaling.is_busy_started = true;
+	}
+}
+
+static void ufshcd_clk_scaling_update_busy(struct ufs_hba *hba)
+{
+	struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+
+	if (!ufshcd_is_clkscaling_enabled(hba))
+		return;
+
+	if (!hba->outstanding_reqs && scaling->is_busy_started) {
+		scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
+					scaling->busy_start_t));
+		scaling->busy_start_t = ktime_set(0, 0);
+		scaling->is_busy_started = false;
+	}
+}
 /**
  * ufshcd_send_command - Send SCSI or device management commands
  * @hba: per adapter instance
@@ -421,6 +780,7 @@
 static inline
 void ufshcd_send_command(struct ufs_hba *hba, unsigned int task_tag)
 {
+	ufshcd_clk_scaling_start_busy(hba);
 	__set_bit(task_tag, &hba->outstanding_reqs);
 	ufshcd_writel(hba, 1 << task_tag, REG_UTP_TRANSFER_REQ_DOOR_BELL);
 }
@@ -576,15 +936,12 @@
  * @uic_cmd: UIC command
  *
  * Identical to ufshcd_send_uic_cmd() expect mutex. Must be called
- * with mutex held.
+ * with mutex held and host_lock locked.
  * Returns 0 only if success.
  */
 static int
 __ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 {
-	int ret;
-	unsigned long flags;
-
 	if (!ufshcd_ready_for_uic_cmd(hba)) {
 		dev_err(hba->dev,
 			"Controller not ready to accept UIC commands\n");
@@ -593,13 +950,9 @@
 
 	init_completion(&uic_cmd->done);
 
-	spin_lock_irqsave(hba->host->host_lock, flags);
 	ufshcd_dispatch_uic_cmd(hba, uic_cmd);
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
-	ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
-
-	return ret;
+	return 0;
 }
 
 /**
@@ -613,11 +966,19 @@
 ufshcd_send_uic_cmd(struct ufs_hba *hba, struct uic_command *uic_cmd)
 {
 	int ret;
+	unsigned long flags;
 
+	ufshcd_hold(hba, false);
 	mutex_lock(&hba->uic_cmd_mutex);
+	spin_lock_irqsave(hba->host->host_lock, flags);
 	ret = __ufshcd_send_uic_cmd(hba, uic_cmd);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	if (!ret)
+		ret = ufshcd_wait_for_uic_cmd(hba, uic_cmd);
+
 	mutex_unlock(&hba->uic_cmd_mutex);
 
+	ufshcd_release(hba);
 	return ret;
 }
 
@@ -867,6 +1228,32 @@
 	return ret;
 }
 
+/*
+ * ufshcd_scsi_to_upiu_lun - maps scsi LUN to UPIU LUN
+ * @scsi_lun: scsi LUN id
+ *
+ * Returns UPIU LUN id
+ */
+static inline u8 ufshcd_scsi_to_upiu_lun(unsigned int scsi_lun)
+{
+	if (scsi_is_wlun(scsi_lun))
+		return (scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID)
+			| UFS_UPIU_WLUN_ID;
+	else
+		return scsi_lun & UFS_UPIU_MAX_UNIT_NUM_ID;
+}
+
+/**
+ * ufshcd_upiu_wlun_to_scsi_wlun - maps UPIU W-LUN id to SCSI W-LUN ID
+ * @scsi_lun: UPIU W-LUN id
+ *
+ * Returns SCSI W-LUN id
+ */
+static inline u16 ufshcd_upiu_wlun_to_scsi_wlun(u8 upiu_wlun_id)
+{
+	return (upiu_wlun_id & ~UFS_UPIU_WLUN_ID) | SCSI_W_LUN_BASE;
+}
+
 /**
  * ufshcd_queuecommand - main entry point for SCSI requests
  * @cmd: command from SCSI Midlayer
@@ -918,6 +1305,14 @@
 		goto out;
 	}
 
+	err = ufshcd_hold(hba, true);
+	if (err) {
+		err = SCSI_MLQUEUE_HOST_BUSY;
+		clear_bit_unlock(tag, &hba->lrb_in_use);
+		goto out;
+	}
+	WARN_ON(hba->clk_gating.state != CLKS_ON);
+
 	lrbp = &hba->lrb[tag];
 
 	WARN_ON(lrbp->cmd);
@@ -925,7 +1320,7 @@
 	lrbp->sense_bufflen = SCSI_SENSE_BUFFERSIZE;
 	lrbp->sense_buffer = cmd->sense_buffer;
 	lrbp->task_tag = tag;
-	lrbp->lun = cmd->device->lun;
+	lrbp->lun = ufshcd_scsi_to_upiu_lun(cmd->device->lun);
 	lrbp->intr_cmd = false;
 	lrbp->command_type = UTP_CMD_TYPE_SCSI;
 
@@ -1193,6 +1588,7 @@
 
 	BUG_ON(!hba);
 
+	ufshcd_hold(hba, false);
 	mutex_lock(&hba->dev_cmd.lock);
 	ufshcd_init_query(hba, &request, &response, opcode, idn, index,
 			selector);
@@ -1236,6 +1632,7 @@
 
 out_unlock:
 	mutex_unlock(&hba->dev_cmd.lock);
+	ufshcd_release(hba);
 	return err;
 }
 
@@ -1259,6 +1656,7 @@
 
 	BUG_ON(!hba);
 
+	ufshcd_hold(hba, false);
 	if (!attr_val) {
 		dev_err(hba->dev, "%s: attribute value required for opcode 0x%x\n",
 				__func__, opcode);
@@ -1298,6 +1696,7 @@
 out_unlock:
 	mutex_unlock(&hba->dev_cmd.lock);
 out:
+	ufshcd_release(hba);
 	return err;
 }
 
@@ -1325,6 +1724,7 @@
 
 	BUG_ON(!hba);
 
+	ufshcd_hold(hba, false);
 	if (!desc_buf) {
 		dev_err(hba->dev, "%s: descriptor buffer required for opcode 0x%x\n",
 				__func__, opcode);
@@ -1374,10 +1774,120 @@
 out_unlock:
 	mutex_unlock(&hba->dev_cmd.lock);
 out:
+	ufshcd_release(hba);
 	return err;
 }
 
 /**
+ * ufshcd_read_desc_param - read the specified descriptor parameter
+ * @hba: Pointer to adapter instance
+ * @desc_id: descriptor idn value
+ * @desc_index: descriptor index
+ * @param_offset: offset of the parameter to read
+ * @param_read_buf: pointer to buffer where parameter would be read
+ * @param_size: sizeof(param_read_buf)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static int ufshcd_read_desc_param(struct ufs_hba *hba,
+				  enum desc_idn desc_id,
+				  int desc_index,
+				  u32 param_offset,
+				  u8 *param_read_buf,
+				  u32 param_size)
+{
+	int ret;
+	u8 *desc_buf;
+	u32 buff_len;
+	bool is_kmalloc = true;
+
+	/* safety checks */
+	if (desc_id >= QUERY_DESC_IDN_MAX)
+		return -EINVAL;
+
+	buff_len = ufs_query_desc_max_size[desc_id];
+	if ((param_offset + param_size) > buff_len)
+		return -EINVAL;
+
+	if (!param_offset && (param_size == buff_len)) {
+		/* memory space already available to hold full descriptor */
+		desc_buf = param_read_buf;
+		is_kmalloc = false;
+	} else {
+		/* allocate memory to hold full descriptor */
+		desc_buf = kmalloc(buff_len, GFP_KERNEL);
+		if (!desc_buf)
+			return -ENOMEM;
+	}
+
+	ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
+				      desc_id, desc_index, 0, desc_buf,
+				      &buff_len);
+
+	if (ret || (buff_len < ufs_query_desc_max_size[desc_id]) ||
+	    (desc_buf[QUERY_DESC_LENGTH_OFFSET] !=
+	     ufs_query_desc_max_size[desc_id])
+	    || (desc_buf[QUERY_DESC_DESC_TYPE_OFFSET] != desc_id)) {
+		dev_err(hba->dev, "%s: Failed reading descriptor. desc_id %d param_offset %d buff_len %d ret %d",
+			__func__, desc_id, param_offset, buff_len, ret);
+		if (!ret)
+			ret = -EINVAL;
+
+		goto out;
+	}
+
+	if (is_kmalloc)
+		memcpy(param_read_buf, &desc_buf[param_offset], param_size);
+out:
+	if (is_kmalloc)
+		kfree(desc_buf);
+	return ret;
+}
+
+static inline int ufshcd_read_desc(struct ufs_hba *hba,
+				   enum desc_idn desc_id,
+				   int desc_index,
+				   u8 *buf,
+				   u32 size)
+{
+	return ufshcd_read_desc_param(hba, desc_id, desc_index, 0, buf, size);
+}
+
+static inline int ufshcd_read_power_desc(struct ufs_hba *hba,
+					 u8 *buf,
+					 u32 size)
+{
+	return ufshcd_read_desc(hba, QUERY_DESC_IDN_POWER, 0, buf, size);
+}
+
+/**
+ * ufshcd_read_unit_desc_param - read the specified unit descriptor parameter
+ * @hba: Pointer to adapter instance
+ * @lun: lun id
+ * @param_offset: offset of the parameter to read
+ * @param_read_buf: pointer to buffer where parameter would be read
+ * @param_size: sizeof(param_read_buf)
+ *
+ * Return 0 in case of success, non-zero otherwise
+ */
+static inline int ufshcd_read_unit_desc_param(struct ufs_hba *hba,
+					      int lun,
+					      enum unit_desc_param param_offset,
+					      u8 *param_read_buf,
+					      u32 param_size)
+{
+	/*
+	 * Unit descriptors are only available for general purpose LUs (LUN id
+	 * from 0 to 7) and RPMB Well known LU.
+	 */
+	if (lun != UFS_UPIU_RPMB_WLUN && (lun >= UFS_UPIU_MAX_GENERAL_LUN))
+		return -EOPNOTSUPP;
+
+	return ufshcd_read_desc_param(hba, QUERY_DESC_IDN_UNIT, lun,
+				      param_offset, param_read_buf, param_size);
+}
+
+/**
  * ufshcd_memory_alloc - allocate memory for host memory space data structures
  * @hba: per adapter instance
  *
@@ -1621,6 +2131,75 @@
 EXPORT_SYMBOL_GPL(ufshcd_dme_get_attr);
 
 /**
+ * ufshcd_uic_pwr_ctrl - executes UIC commands (which affects the link power
+ * state) and waits for it to take effect.
+ *
+ * @hba: per adapter instance
+ * @cmd: UIC command to execute
+ *
+ * DME operations like DME_SET(PA_PWRMODE), DME_HIBERNATE_ENTER &
+ * DME_HIBERNATE_EXIT commands take some time to take its effect on both host
+ * and device UniPro link and hence it's final completion would be indicated by
+ * dedicated status bits in Interrupt Status register (UPMS, UHES, UHXS) in
+ * addition to normal UIC command completion Status (UCCS). This function only
+ * returns after the relevant status bits indicate the completion.
+ *
+ * Returns 0 on success, non-zero value on failure
+ */
+static int ufshcd_uic_pwr_ctrl(struct ufs_hba *hba, struct uic_command *cmd)
+{
+	struct completion uic_async_done;
+	unsigned long flags;
+	u8 status;
+	int ret;
+
+	mutex_lock(&hba->uic_cmd_mutex);
+	init_completion(&uic_async_done);
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->uic_async_done = &uic_async_done;
+	ret = __ufshcd_send_uic_cmd(hba, cmd);
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	if (ret) {
+		dev_err(hba->dev,
+			"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+			cmd->command, cmd->argument3, ret);
+		goto out;
+	}
+	ret = ufshcd_wait_for_uic_cmd(hba, cmd);
+	if (ret) {
+		dev_err(hba->dev,
+			"pwr ctrl cmd 0x%x with mode 0x%x uic error %d\n",
+			cmd->command, cmd->argument3, ret);
+		goto out;
+	}
+
+	if (!wait_for_completion_timeout(hba->uic_async_done,
+					 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
+		dev_err(hba->dev,
+			"pwr ctrl cmd 0x%x with mode 0x%x completion timeout\n",
+			cmd->command, cmd->argument3);
+		ret = -ETIMEDOUT;
+		goto out;
+	}
+
+	status = ufshcd_get_upmcrs(hba);
+	if (status != PWR_LOCAL) {
+		dev_err(hba->dev,
+			"pwr ctrl cmd 0x%0x failed, host umpcrs:0x%x\n",
+			cmd->command, status);
+		ret = (status != PWR_OK) ? status : -1;
+	}
+out:
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	hba->uic_async_done = NULL;
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	mutex_unlock(&hba->uic_cmd_mutex);
+
+	return ret;
+}
+
+/**
  * ufshcd_uic_change_pwr_mode - Perform the UIC power mode chage
  *				using DME_SET primitives.
  * @hba: per adapter instance
@@ -1631,88 +2210,120 @@
 static int ufshcd_uic_change_pwr_mode(struct ufs_hba *hba, u8 mode)
 {
 	struct uic_command uic_cmd = {0};
-	struct completion pwr_done;
-	unsigned long flags;
-	u8 status;
 	int ret;
 
 	uic_cmd.command = UIC_CMD_DME_SET;
 	uic_cmd.argument1 = UIC_ARG_MIB(PA_PWRMODE);
 	uic_cmd.argument3 = mode;
-	init_completion(&pwr_done);
+	ufshcd_hold(hba, false);
+	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+	ufshcd_release(hba);
 
-	mutex_lock(&hba->uic_cmd_mutex);
+	return ret;
+}
 
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	hba->pwr_done = &pwr_done;
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-	ret = __ufshcd_send_uic_cmd(hba, &uic_cmd);
+static int ufshcd_uic_hibern8_enter(struct ufs_hba *hba)
+{
+	struct uic_command uic_cmd = {0};
+
+	uic_cmd.command = UIC_CMD_DME_HIBER_ENTER;
+
+	return ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
+}
+
+static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
+{
+	struct uic_command uic_cmd = {0};
+	int ret;
+
+	uic_cmd.command = UIC_CMD_DME_HIBER_EXIT;
+	ret = ufshcd_uic_pwr_ctrl(hba, &uic_cmd);
 	if (ret) {
-		dev_err(hba->dev,
-			"pwr mode change with mode 0x%x uic error %d\n",
-			mode, ret);
-		goto out;
+		ufshcd_set_link_off(hba);
+		ret = ufshcd_host_reset_and_restore(hba);
 	}
 
-	if (!wait_for_completion_timeout(hba->pwr_done,
-					 msecs_to_jiffies(UIC_CMD_TIMEOUT))) {
-		dev_err(hba->dev,
-			"pwr mode change with mode 0x%x completion timeout\n",
-			mode);
-		ret = -ETIMEDOUT;
-		goto out;
-	}
-
-	status = ufshcd_get_upmcrs(hba);
-	if (status != PWR_LOCAL) {
-		dev_err(hba->dev,
-			"pwr mode change failed, host umpcrs:0x%x\n",
-			status);
-		ret = (status != PWR_OK) ? status : -1;
-	}
-out:
-	spin_lock_irqsave(hba->host->host_lock, flags);
-	hba->pwr_done = NULL;
-	spin_unlock_irqrestore(hba->host->host_lock, flags);
-	mutex_unlock(&hba->uic_cmd_mutex);
 	return ret;
 }
 
 /**
- * ufshcd_config_max_pwr_mode - Set & Change power mode with
- *	maximum capability attribute information.
- * @hba: per adapter instance
- *
- * Returns 0 on success, non-zero value on failure
+ * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
+ * @hba: per-adapter instance
  */
-static int ufshcd_config_max_pwr_mode(struct ufs_hba *hba)
+static int ufshcd_get_max_pwr_mode(struct ufs_hba *hba)
 {
-	enum {RX = 0, TX = 1};
-	u32 lanes[] = {1, 1};
-	u32 gear[] = {1, 1};
-	u8 pwr[] = {FASTAUTO_MODE, FASTAUTO_MODE};
-	int ret;
+	struct ufs_pa_layer_attr *pwr_info = &hba->max_pwr_info.info;
+
+	if (hba->max_pwr_info.is_valid)
+		return 0;
+
+	pwr_info->pwr_tx = FASTAUTO_MODE;
+	pwr_info->pwr_rx = FASTAUTO_MODE;
+	pwr_info->hs_rate = PA_HS_MODE_B;
 
 	/* Get the connected lane count */
-	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES), &lanes[RX]);
-	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES), &lanes[TX]);
+	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDRXDATALANES),
+			&pwr_info->lane_rx);
+	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_CONNECTEDTXDATALANES),
+			&pwr_info->lane_tx);
+
+	if (!pwr_info->lane_rx || !pwr_info->lane_tx) {
+		dev_err(hba->dev, "%s: invalid connected lanes value. rx=%d, tx=%d\n",
+				__func__,
+				pwr_info->lane_rx,
+				pwr_info->lane_tx);
+		return -EINVAL;
+	}
 
 	/*
 	 * First, get the maximum gears of HS speed.
 	 * If a zero value, it means there is no HSGEAR capability.
 	 * Then, get the maximum gears of PWM speed.
 	 */
-	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[RX]);
-	if (!gear[RX]) {
-		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR), &gear[RX]);
-		pwr[RX] = SLOWAUTO_MODE;
+	ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &pwr_info->gear_rx);
+	if (!pwr_info->gear_rx) {
+		ufshcd_dme_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
+				&pwr_info->gear_rx);
+		if (!pwr_info->gear_rx) {
+			dev_err(hba->dev, "%s: invalid max pwm rx gear read = %d\n",
+				__func__, pwr_info->gear_rx);
+			return -EINVAL;
+		}
+		pwr_info->pwr_rx = SLOWAUTO_MODE;
 	}
 
-	ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR), &gear[TX]);
-	if (!gear[TX]) {
+	ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXHSGEAR),
+			&pwr_info->gear_tx);
+	if (!pwr_info->gear_tx) {
 		ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_MAXRXPWMGEAR),
-				    &gear[TX]);
-		pwr[TX] = SLOWAUTO_MODE;
+				&pwr_info->gear_tx);
+		if (!pwr_info->gear_tx) {
+			dev_err(hba->dev, "%s: invalid max pwm tx gear read = %d\n",
+				__func__, pwr_info->gear_tx);
+			return -EINVAL;
+		}
+		pwr_info->pwr_tx = SLOWAUTO_MODE;
+	}
+
+	hba->max_pwr_info.is_valid = true;
+	return 0;
+}
+
+static int ufshcd_change_power_mode(struct ufs_hba *hba,
+			     struct ufs_pa_layer_attr *pwr_mode)
+{
+	int ret;
+
+	/* if already configured to the requested pwr_mode */
+	if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
+	    pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
+	    pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
+	    pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
+	    pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
+	    pwr_mode->pwr_tx == hba->pwr_info.pwr_tx &&
+	    pwr_mode->hs_rate == hba->pwr_info.hs_rate) {
+		dev_dbg(hba->dev, "%s: power already configured\n", __func__);
+		return 0;
 	}
 
 	/*
@@ -1721,23 +2332,67 @@
 	 * - PA_TXGEAR, PA_ACTIVETXDATALANES, PA_TXTERMINATION,
 	 * - PA_HSSERIES
 	 */
-	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), gear[RX]);
-	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), lanes[RX]);
-	if (pwr[RX] == FASTAUTO_MODE)
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), pwr_mode->gear_rx);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES),
+			pwr_mode->lane_rx);
+	if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
+			pwr_mode->pwr_rx == FAST_MODE)
 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), TRUE);
+	else
+		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), FALSE);
 
-	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), gear[TX]);
-	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), lanes[TX]);
-	if (pwr[TX] == FASTAUTO_MODE)
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), pwr_mode->gear_tx);
+	ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES),
+			pwr_mode->lane_tx);
+	if (pwr_mode->pwr_tx == FASTAUTO_MODE ||
+			pwr_mode->pwr_tx == FAST_MODE)
 		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), TRUE);
+	else
+		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), FALSE);
 
-	if (pwr[RX] == FASTAUTO_MODE || pwr[TX] == FASTAUTO_MODE)
-		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), PA_HS_MODE_B);
+	if (pwr_mode->pwr_rx == FASTAUTO_MODE ||
+	    pwr_mode->pwr_tx == FASTAUTO_MODE ||
+	    pwr_mode->pwr_rx == FAST_MODE ||
+	    pwr_mode->pwr_tx == FAST_MODE)
+		ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES),
+						pwr_mode->hs_rate);
 
-	ret = ufshcd_uic_change_pwr_mode(hba, pwr[RX] << 4 | pwr[TX]);
-	if (ret)
+	ret = ufshcd_uic_change_pwr_mode(hba, pwr_mode->pwr_rx << 4
+			| pwr_mode->pwr_tx);
+
+	if (ret) {
 		dev_err(hba->dev,
-			"pwr_mode: power mode change failed %d\n", ret);
+			"%s: power mode change failed %d\n", __func__, ret);
+	} else {
+		if (hba->vops && hba->vops->pwr_change_notify)
+			hba->vops->pwr_change_notify(hba,
+				POST_CHANGE, NULL, pwr_mode);
+
+		memcpy(&hba->pwr_info, pwr_mode,
+			sizeof(struct ufs_pa_layer_attr));
+	}
+
+	return ret;
+}
+
+/**
+ * ufshcd_config_pwr_mode - configure a new power mode
+ * @hba: per-adapter instance
+ * @desired_pwr_mode: desired power configuration
+ */
+static int ufshcd_config_pwr_mode(struct ufs_hba *hba,
+		struct ufs_pa_layer_attr *desired_pwr_mode)
+{
+	struct ufs_pa_layer_attr final_params = { 0 };
+	int ret;
+
+	if (hba->vops && hba->vops->pwr_change_notify)
+		hba->vops->pwr_change_notify(hba,
+		     PRE_CHANGE, desired_pwr_mode, &final_params);
+	else
+		memcpy(&final_params, desired_pwr_mode, sizeof(final_params));
+
+	ret = ufshcd_change_power_mode(hba, &final_params);
 
 	return ret;
 }
@@ -1798,11 +2453,10 @@
  * @hba: per adapter instance
  *
  * To bring UFS host controller to operational state,
- * 1. Check if device is present
- * 2. Enable required interrupts
- * 3. Configure interrupt aggregation
- * 4. Program UTRL and UTMRL base addres
- * 5. Configure run-stop-registers
+ * 1. Enable required interrupts
+ * 2. Configure interrupt aggregation
+ * 3. Program UTRL and UTMRL base addres
+ * 4. Configure run-stop-registers
  *
  * Returns 0 on success, non-zero value on failure
  */
@@ -1811,14 +2465,6 @@
 	int err = 0;
 	u32 reg;
 
-	/* check if device present */
-	reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
-	if (!ufshcd_is_device_present(reg)) {
-		dev_err(hba->dev, "cc: Device not present\n");
-		err = -ENXIO;
-		goto out;
-	}
-
 	/* Enable required interrupts */
 	ufshcd_enable_intr(hba, UFSHCD_ENABLE_INTRS);
 
@@ -1839,6 +2485,7 @@
 	 * UCRDY, UTMRLDY and UTRLRDY bits must be 1
 	 * DEI, HEI bits must be 0
 	 */
+	reg = ufshcd_readl(hba, REG_CONTROLLER_STATUS);
 	if (!(ufshcd_get_lists_status(reg))) {
 		ufshcd_enable_run_stop_reg(hba);
 	} else {
@@ -1885,6 +2532,12 @@
 		msleep(5);
 	}
 
+	/* UniPro link is disabled at this point */
+	ufshcd_set_link_off(hba);
+
+	if (hba->vops && hba->vops->hce_enable_notify)
+		hba->vops->hce_enable_notify(hba, PRE_CHANGE);
+
 	/* start controller initialization sequence */
 	ufshcd_hba_start(hba);
 
@@ -1912,6 +2565,13 @@
 		}
 		msleep(5);
 	}
+
+	/* enable UIC related interrupts */
+	ufshcd_enable_intr(hba, UFSHCD_UIC_MASK);
+
+	if (hba->vops && hba->vops->hce_enable_notify)
+		hba->vops->hce_enable_notify(hba, POST_CHANGE);
+
 	return 0;
 }
 
@@ -1924,16 +2584,42 @@
 static int ufshcd_link_startup(struct ufs_hba *hba)
 {
 	int ret;
+	int retries = DME_LINKSTARTUP_RETRIES;
 
-	/* enable UIC related interrupts */
-	ufshcd_enable_intr(hba, UIC_COMMAND_COMPL);
+	do {
+		if (hba->vops && hba->vops->link_startup_notify)
+			hba->vops->link_startup_notify(hba, PRE_CHANGE);
 
-	ret = ufshcd_dme_link_startup(hba);
+		ret = ufshcd_dme_link_startup(hba);
+
+		/* check if device is detected by inter-connect layer */
+		if (!ret && !ufshcd_is_device_present(hba)) {
+			dev_err(hba->dev, "%s: Device not present\n", __func__);
+			ret = -ENXIO;
+			goto out;
+		}
+
+		/*
+		 * DME link lost indication is only received when link is up,
+		 * but we can't be sure if the link is up until link startup
+		 * succeeds. So reset the local Uni-Pro and try again.
+		 */
+		if (ret && ufshcd_hba_enable(hba))
+			goto out;
+	} while (ret && retries--);
+
 	if (ret)
+		/* failed to get the link up... retire */
 		goto out;
 
-	ret = ufshcd_make_hba_operational(hba);
+	/* Include any host controller configuration via UIC commands */
+	if (hba->vops && hba->vops->link_startup_notify) {
+		ret = hba->vops->link_startup_notify(hba, POST_CHANGE);
+		if (ret)
+			goto out;
+	}
 
+	ret = ufshcd_make_hba_operational(hba);
 out:
 	if (ret)
 		dev_err(hba->dev, "link startup failed %d\n", ret);
@@ -1955,6 +2641,7 @@
 	int err = 0;
 	int retries;
 
+	ufshcd_hold(hba, false);
 	mutex_lock(&hba->dev_cmd.lock);
 	for (retries = NOP_OUT_RETRIES; retries > 0; retries--) {
 		err = ufshcd_exec_dev_cmd(hba, DEV_CMD_TYPE_NOP,
@@ -1966,6 +2653,7 @@
 		dev_dbg(hba->dev, "%s: error %d retrying\n", __func__, err);
 	}
 	mutex_unlock(&hba->dev_cmd.lock);
+	ufshcd_release(hba);
 
 	if (err)
 		dev_err(hba->dev, "%s: NOP OUT failed %d\n", __func__, err);
@@ -1973,6 +2661,100 @@
 }
 
 /**
+ * ufshcd_set_queue_depth - set lun queue depth
+ * @sdev: pointer to SCSI device
+ *
+ * Read bLUQueueDepth value and activate scsi tagged command
+ * queueing. For WLUN, queue depth is set to 1. For best-effort
+ * cases (bLUQueueDepth = 0) the queue depth is set to a maximum
+ * value that host can queue.
+ */
+static void ufshcd_set_queue_depth(struct scsi_device *sdev)
+{
+	int ret = 0;
+	u8 lun_qdepth;
+	struct ufs_hba *hba;
+
+	hba = shost_priv(sdev->host);
+
+	lun_qdepth = hba->nutrs;
+	ret = ufshcd_read_unit_desc_param(hba,
+					  ufshcd_scsi_to_upiu_lun(sdev->lun),
+					  UNIT_DESC_PARAM_LU_Q_DEPTH,
+					  &lun_qdepth,
+					  sizeof(lun_qdepth));
+
+	/* Some WLUN doesn't support unit descriptor */
+	if (ret == -EOPNOTSUPP)
+		lun_qdepth = 1;
+	else if (!lun_qdepth)
+		/* eventually, we can figure out the real queue depth */
+		lun_qdepth = hba->nutrs;
+	else
+		lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
+
+	dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
+			__func__, lun_qdepth);
+	scsi_activate_tcq(sdev, lun_qdepth);
+}
+
+/*
+ * ufshcd_get_lu_wp - returns the "b_lu_write_protect" from UNIT DESCRIPTOR
+ * @hba: per-adapter instance
+ * @lun: UFS device lun id
+ * @b_lu_write_protect: pointer to buffer to hold the LU's write protect info
+ *
+ * Returns 0 in case of success and b_lu_write_protect status would be returned
+ * @b_lu_write_protect parameter.
+ * Returns -ENOTSUPP if reading b_lu_write_protect is not supported.
+ * Returns -EINVAL in case of invalid parameters passed to this function.
+ */
+static int ufshcd_get_lu_wp(struct ufs_hba *hba,
+			    u8 lun,
+			    u8 *b_lu_write_protect)
+{
+	int ret;
+
+	if (!b_lu_write_protect)
+		ret = -EINVAL;
+	/*
+	 * According to UFS device spec, RPMB LU can't be write
+	 * protected so skip reading bLUWriteProtect parameter for
+	 * it. For other W-LUs, UNIT DESCRIPTOR is not available.
+	 */
+	else if (lun >= UFS_UPIU_MAX_GENERAL_LUN)
+		ret = -ENOTSUPP;
+	else
+		ret = ufshcd_read_unit_desc_param(hba,
+					  lun,
+					  UNIT_DESC_PARAM_LU_WR_PROTECT,
+					  b_lu_write_protect,
+					  sizeof(*b_lu_write_protect));
+	return ret;
+}
+
+/**
+ * ufshcd_get_lu_power_on_wp_status - get LU's power on write protect
+ * status
+ * @hba: per-adapter instance
+ * @sdev: pointer to SCSI device
+ *
+ */
+static inline void ufshcd_get_lu_power_on_wp_status(struct ufs_hba *hba,
+						    struct scsi_device *sdev)
+{
+	if (hba->dev_info.f_power_on_wp_en &&
+	    !hba->dev_info.is_lu_power_on_wp) {
+		u8 b_lu_write_protect;
+
+		if (!ufshcd_get_lu_wp(hba, ufshcd_scsi_to_upiu_lun(sdev->lun),
+				      &b_lu_write_protect) &&
+		    (b_lu_write_protect == UFS_LU_POWER_ON_WP))
+			hba->dev_info.is_lu_power_on_wp = true;
+	}
+}
+
+/**
  * ufshcd_slave_alloc - handle initial SCSI device configurations
  * @sdev: pointer to SCSI device
  *
@@ -1981,7 +2763,6 @@
 static int ufshcd_slave_alloc(struct scsi_device *sdev)
 {
 	struct ufs_hba *hba;
-	int lun_qdepth;
 
 	hba = shost_priv(sdev->host);
 	sdev->tagged_supported = 1;
@@ -1996,16 +2777,10 @@
 	/* REPORT SUPPORTED OPERATION CODES is not supported */
 	sdev->no_report_opcodes = 1;
 
-	lun_qdepth = ufshcd_read_sdev_qdepth(hba, sdev);
-	if (lun_qdepth <= 0)
-		/* eventually, we can figure out the real queue depth */
-		lun_qdepth = hba->nutrs;
-	else
-		lun_qdepth = min_t(int, lun_qdepth, hba->nutrs);
 
-	dev_dbg(hba->dev, "%s: activate tcq with queue depth %d\n",
-			__func__, lun_qdepth);
-	scsi_activate_tcq(sdev, lun_qdepth);
+	ufshcd_set_queue_depth(sdev);
+
+	ufshcd_get_lu_power_on_wp_status(hba, sdev);
 
 	return 0;
 }
@@ -2068,6 +2843,9 @@
 
 	hba = shost_priv(sdev->host);
 	scsi_deactivate_tcq(sdev, hba->nutrs);
+	/* Drop the reference as it won't be needed anymore */
+	if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN)
+		hba->sdev_ufs_device = NULL;
 }
 
 /**
@@ -2234,8 +3012,8 @@
 		complete(&hba->active_uic_cmd->done);
 	}
 
-	if ((intr_status & UIC_POWER_MODE) && hba->pwr_done)
-		complete(hba->pwr_done);
+	if ((intr_status & UFSHCD_UIC_PWR_MASK) && hba->uic_async_done)
+		complete(hba->uic_async_done);
 }
 
 /**
@@ -2275,6 +3053,7 @@
 			clear_bit_unlock(index, &hba->lrb_in_use);
 			/* Do not touch lrbp after scsi done */
 			cmd->scsi_done(cmd);
+			__ufshcd_release(hba);
 		} else if (lrbp->command_type == UTP_CMD_TYPE_DEV_MANAGE) {
 			if (hba->dev_cmd.complete)
 				complete(hba->dev_cmd.complete);
@@ -2284,6 +3063,8 @@
 	/* clear corresponding bits of completed commands */
 	hba->outstanding_reqs ^= completed_reqs;
 
+	ufshcd_clk_scaling_update_busy(hba);
+
 	/* we might have free'd some tags above */
 	wake_up(&hba->dev_cmd.tag_wq);
 }
@@ -2447,31 +3228,60 @@
 }
 
 /**
+ * ufshcd_bkops_ctrl - control the auto bkops based on current bkops status
+ * @hba: per-adapter instance
+ * @status: bkops_status value
+ *
+ * Read the bkops_status from the UFS device and Enable fBackgroundOpsEn
+ * flag in the device to permit background operations if the device
+ * bkops_status is greater than or equal to "status" argument passed to
+ * this function, disable otherwise.
+ *
+ * Returns 0 for success, non-zero in case of failure.
+ *
+ * NOTE: Caller of this function can check the "hba->auto_bkops_enabled" flag
+ * to know whether auto bkops is enabled or disabled after this function
+ * returns control to it.
+ */
+static int ufshcd_bkops_ctrl(struct ufs_hba *hba,
+			     enum bkops_status status)
+{
+	int err;
+	u32 curr_status = 0;
+
+	err = ufshcd_get_bkops_status(hba, &curr_status);
+	if (err) {
+		dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
+				__func__, err);
+		goto out;
+	} else if (curr_status > BKOPS_STATUS_MAX) {
+		dev_err(hba->dev, "%s: invalid BKOPS status %d\n",
+				__func__, curr_status);
+		err = -EINVAL;
+		goto out;
+	}
+
+	if (curr_status >= status)
+		err = ufshcd_enable_auto_bkops(hba);
+	else
+		err = ufshcd_disable_auto_bkops(hba);
+out:
+	return err;
+}
+
+/**
  * ufshcd_urgent_bkops - handle urgent bkops exception event
  * @hba: per-adapter instance
  *
  * Enable fBackgroundOpsEn flag in the device to permit background
  * operations.
+ *
+ * If BKOPs is enabled, this function returns 0, 1 if the bkops in not enabled
+ * and negative error value for any other failure.
  */
 static int ufshcd_urgent_bkops(struct ufs_hba *hba)
 {
-	int err;
-	u32 status = 0;
-
-	err = ufshcd_get_bkops_status(hba, &status);
-	if (err) {
-		dev_err(hba->dev, "%s: failed to get BKOPS status %d\n",
-				__func__, err);
-		goto out;
-	}
-
-	status = status & 0xF;
-
-	/* handle only if status indicates performance impact or critical */
-	if (status >= BKOPS_STATUS_PERF_IMPACT)
-		err = ufshcd_enable_auto_bkops(hba);
-out:
-	return err;
+	return ufshcd_bkops_ctrl(hba, BKOPS_STATUS_PERF_IMPACT);
 }
 
 static inline int ufshcd_get_ee_status(struct ufs_hba *hba, u32 *status)
@@ -2505,7 +3315,7 @@
 	status &= hba->ee_ctrl_mask;
 	if (status & MASK_EE_URGENT_BKOPS) {
 		err = ufshcd_urgent_bkops(hba);
-		if (err)
+		if (err < 0)
 			dev_err(hba->dev, "%s: failed to handle urgent bkops %d\n",
 					__func__, err);
 	}
@@ -2530,6 +3340,7 @@
 	hba = container_of(work, struct ufs_hba, eh_work);
 
 	pm_runtime_get_sync(hba->dev);
+	ufshcd_hold(hba, false);
 
 	spin_lock_irqsave(hba->host->host_lock, flags);
 	if (hba->ufshcd_state == UFSHCD_STATE_RESET) {
@@ -2583,6 +3394,7 @@
 
 out:
 	scsi_unblock_requests(hba->host);
+	ufshcd_release(hba);
 	pm_runtime_put_sync(hba->dev);
 }
 
@@ -2766,6 +3578,7 @@
 	 * the maximum wait time is bounded by %TM_CMD_TIMEOUT.
 	 */
 	wait_event(hba->tm_tag_wq, ufshcd_get_tm_free_slot(hba, &free_slot));
+	ufshcd_hold(hba, false);
 
 	spin_lock_irqsave(host->host_lock, flags);
 	task_req_descp = hba->utmrdl_base_addr;
@@ -2785,7 +3598,10 @@
 					      lun_id, task_tag);
 	task_req_upiup->header.dword_1 =
 		UPIU_HEADER_DWORD(0, tm_function, 0, 0);
-
+	/*
+	 * The host shall provide the same value for LUN field in the basic
+	 * header and for Input Parameter.
+	 */
 	task_req_upiup->input_param1 = cpu_to_be32(lun_id);
 	task_req_upiup->input_param2 = cpu_to_be32(task_id);
 
@@ -2814,6 +3630,7 @@
 	ufshcd_put_tm_slot(hba, free_slot);
 	wake_up(&hba->tm_tag_wq);
 
+	ufshcd_release(hba);
 	return err;
 }
 
@@ -2896,6 +3713,7 @@
 	hba = shost_priv(host);
 	tag = cmd->request->tag;
 
+	ufshcd_hold(hba, false);
 	/* If command is already aborted/completed, return SUCCESS */
 	if (!(test_bit(tag, &hba->outstanding_reqs)))
 		goto out;
@@ -2960,6 +3778,7 @@
 
 	clear_bit_unlock(tag, &hba->lrb_in_use);
 	wake_up(&hba->dev_cmd.tag_wq);
+
 out:
 	if (!err) {
 		err = SUCCESS;
@@ -2968,6 +3787,11 @@
 		err = FAILED;
 	}
 
+	/*
+	 * This ufshcd_release() corresponds to the original scsi cmd that got
+	 * aborted here (as we won't get any IRQ for it).
+	 */
+	ufshcd_release(hba);
 	return err;
 }
 
@@ -2984,7 +3808,6 @@
 static int ufshcd_host_reset_and_restore(struct ufs_hba *hba)
 {
 	int err;
-	async_cookie_t cookie;
 	unsigned long flags;
 
 	/* Reset the host controller */
@@ -2997,10 +3820,9 @@
 		goto out;
 
 	/* Establish the link again and restore the device */
-	cookie = async_schedule(ufshcd_async_scan, hba);
-	/* wait for async scan to be completed */
-	async_synchronize_cookie(++cookie);
-	if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL)
+	err = ufshcd_probe_hba(hba);
+
+	if (!err && (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL))
 		err = -EIO;
 out:
 	if (err)
@@ -3022,8 +3844,11 @@
 {
 	int err = 0;
 	unsigned long flags;
+	int retries = MAX_HOST_RESET_RETRIES;
 
-	err = ufshcd_host_reset_and_restore(hba);
+	do {
+		err = ufshcd_host_reset_and_restore(hba);
+	} while (err && --retries);
 
 	/*
 	 * After reset the door-bell might be cleared, complete
@@ -3051,6 +3876,7 @@
 
 	hba = shost_priv(cmd->device->host);
 
+	ufshcd_hold(hba, false);
 	/*
 	 * Check if there is any race with fatal error handling.
 	 * If so, wait for it to complete. Even though fatal error
@@ -3084,56 +3910,232 @@
 	ufshcd_clear_eh_in_progress(hba);
 	spin_unlock_irqrestore(hba->host->host_lock, flags);
 
+	ufshcd_release(hba);
 	return err;
 }
 
 /**
- * ufshcd_read_sdev_qdepth - read the lun command queue depth
- * @hba: Pointer to adapter instance
- * @sdev: pointer to SCSI device
+ * ufshcd_get_max_icc_level - calculate the ICC level
+ * @sup_curr_uA: max. current supported by the regulator
+ * @start_scan: row at the desc table to start scan from
+ * @buff: power descriptor buffer
  *
- * Return in case of success the lun's queue depth else error.
+ * Returns calculated max ICC level for specific regulator
  */
-static int ufshcd_read_sdev_qdepth(struct ufs_hba *hba,
-				struct scsi_device *sdev)
+static u32 ufshcd_get_max_icc_level(int sup_curr_uA, u32 start_scan, char *buff)
 {
-	int ret;
-	int buff_len = UNIT_DESC_MAX_SIZE;
-	u8 desc_buf[UNIT_DESC_MAX_SIZE];
+	int i;
+	int curr_uA;
+	u16 data;
+	u16 unit;
 
-	ret = ufshcd_query_descriptor(hba, UPIU_QUERY_OPCODE_READ_DESC,
-			QUERY_DESC_IDN_UNIT, sdev->lun, 0, desc_buf, &buff_len);
+	for (i = start_scan; i >= 0; i--) {
+		data = be16_to_cpu(*((u16 *)(buff + 2*i)));
+		unit = (data & ATTR_ICC_LVL_UNIT_MASK) >>
+						ATTR_ICC_LVL_UNIT_OFFSET;
+		curr_uA = data & ATTR_ICC_LVL_VALUE_MASK;
+		switch (unit) {
+		case UFSHCD_NANO_AMP:
+			curr_uA = curr_uA / 1000;
+			break;
+		case UFSHCD_MILI_AMP:
+			curr_uA = curr_uA * 1000;
+			break;
+		case UFSHCD_AMP:
+			curr_uA = curr_uA * 1000 * 1000;
+			break;
+		case UFSHCD_MICRO_AMP:
+		default:
+			break;
+		}
+		if (sup_curr_uA >= curr_uA)
+			break;
+	}
+	if (i < 0) {
+		i = 0;
+		pr_err("%s: Couldn't find valid icc_level = %d", __func__, i);
+	}
 
-	if (ret || (buff_len < UNIT_DESC_PARAM_LU_Q_DEPTH)) {
+	return (u32)i;
+}
+
+/**
+ * ufshcd_calc_icc_level - calculate the max ICC level
+ * In case regulators are not initialized we'll return 0
+ * @hba: per-adapter instance
+ * @desc_buf: power descriptor buffer to extract ICC levels from.
+ * @len: length of desc_buff
+ *
+ * Returns calculated ICC level
+ */
+static u32 ufshcd_find_max_sup_active_icc_level(struct ufs_hba *hba,
+							u8 *desc_buf, int len)
+{
+	u32 icc_level = 0;
+
+	if (!hba->vreg_info.vcc || !hba->vreg_info.vccq ||
+						!hba->vreg_info.vccq2) {
 		dev_err(hba->dev,
-			"%s:Failed reading unit descriptor. len = %d ret = %d"
-			, __func__, buff_len, ret);
-		if (!ret)
-			ret = -EINVAL;
-
+			"%s: Regulator capability was not set, actvIccLevel=%d",
+							__func__, icc_level);
 		goto out;
 	}
 
-	ret = desc_buf[UNIT_DESC_PARAM_LU_Q_DEPTH] & 0xFF;
+	if (hba->vreg_info.vcc)
+		icc_level = ufshcd_get_max_icc_level(
+				hba->vreg_info.vcc->max_uA,
+				POWER_DESC_MAX_ACTV_ICC_LVLS - 1,
+				&desc_buf[PWR_DESC_ACTIVE_LVLS_VCC_0]);
+
+	if (hba->vreg_info.vccq)
+		icc_level = ufshcd_get_max_icc_level(
+				hba->vreg_info.vccq->max_uA,
+				icc_level,
+				&desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ_0]);
+
+	if (hba->vreg_info.vccq2)
+		icc_level = ufshcd_get_max_icc_level(
+				hba->vreg_info.vccq2->max_uA,
+				icc_level,
+				&desc_buf[PWR_DESC_ACTIVE_LVLS_VCCQ2_0]);
+out:
+	return icc_level;
+}
+
+static void ufshcd_init_icc_levels(struct ufs_hba *hba)
+{
+	int ret;
+	int buff_len = QUERY_DESC_POWER_MAX_SIZE;
+	u8 desc_buf[QUERY_DESC_POWER_MAX_SIZE];
+
+	ret = ufshcd_read_power_desc(hba, desc_buf, buff_len);
+	if (ret) {
+		dev_err(hba->dev,
+			"%s: Failed reading power descriptor.len = %d ret = %d",
+			__func__, buff_len, ret);
+		return;
+	}
+
+	hba->init_prefetch_data.icc_level =
+			ufshcd_find_max_sup_active_icc_level(hba,
+			desc_buf, buff_len);
+	dev_dbg(hba->dev, "%s: setting icc_level 0x%x",
+			__func__, hba->init_prefetch_data.icc_level);
+
+	ret = ufshcd_query_attr(hba, UPIU_QUERY_OPCODE_WRITE_ATTR,
+			QUERY_ATTR_IDN_ACTIVE_ICC_LVL, 0, 0,
+			&hba->init_prefetch_data.icc_level);
+
+	if (ret)
+		dev_err(hba->dev,
+			"%s: Failed configuring bActiveICCLevel = %d ret = %d",
+			__func__, hba->init_prefetch_data.icc_level , ret);
+
+}
+
+/**
+ * ufshcd_scsi_add_wlus - Adds required W-LUs
+ * @hba: per-adapter instance
+ *
+ * UFS device specification requires the UFS devices to support 4 well known
+ * logical units:
+ *	"REPORT_LUNS" (address: 01h)
+ *	"UFS Device" (address: 50h)
+ *	"RPMB" (address: 44h)
+ *	"BOOT" (address: 30h)
+ * UFS device's power management needs to be controlled by "POWER CONDITION"
+ * field of SSU (START STOP UNIT) command. But this "power condition" field
+ * will take effect only when its sent to "UFS device" well known logical unit
+ * hence we require the scsi_device instance to represent this logical unit in
+ * order for the UFS host driver to send the SSU command for power management.
+
+ * We also require the scsi_device instance for "RPMB" (Replay Protected Memory
+ * Block) LU so user space process can control this LU. User space may also
+ * want to have access to BOOT LU.
+
+ * This function adds scsi device instances for each of all well known LUs
+ * (except "REPORT LUNS" LU).
+ *
+ * Returns zero on success (all required W-LUs are added successfully),
+ * non-zero error value on failure (if failed to add any of the required W-LU).
+ */
+static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
+{
+	int ret = 0;
+
+	hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
+		ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
+	if (IS_ERR(hba->sdev_ufs_device)) {
+		ret = PTR_ERR(hba->sdev_ufs_device);
+		hba->sdev_ufs_device = NULL;
+		goto out;
+	}
+
+	hba->sdev_boot = __scsi_add_device(hba->host, 0, 0,
+		ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
+	if (IS_ERR(hba->sdev_boot)) {
+		ret = PTR_ERR(hba->sdev_boot);
+		hba->sdev_boot = NULL;
+		goto remove_sdev_ufs_device;
+	}
+
+	hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
+		ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
+	if (IS_ERR(hba->sdev_rpmb)) {
+		ret = PTR_ERR(hba->sdev_rpmb);
+		hba->sdev_rpmb = NULL;
+		goto remove_sdev_boot;
+	}
+	goto out;
+
+remove_sdev_boot:
+	scsi_remove_device(hba->sdev_boot);
+remove_sdev_ufs_device:
+	scsi_remove_device(hba->sdev_ufs_device);
 out:
 	return ret;
 }
 
 /**
- * ufshcd_async_scan - asynchronous execution for link startup
- * @data: data pointer to pass to this function
- * @cookie: cookie data
+ * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by
+ *			     ufshcd_scsi_add_wlus()
+ * @hba: per-adapter instance
+ *
  */
-static void ufshcd_async_scan(void *data, async_cookie_t cookie)
+static void ufshcd_scsi_remove_wlus(struct ufs_hba *hba)
 {
-	struct ufs_hba *hba = (struct ufs_hba *)data;
+	if (hba->sdev_ufs_device) {
+		scsi_remove_device(hba->sdev_ufs_device);
+		hba->sdev_ufs_device = NULL;
+	}
+
+	if (hba->sdev_boot) {
+		scsi_remove_device(hba->sdev_boot);
+		hba->sdev_boot = NULL;
+	}
+
+	if (hba->sdev_rpmb) {
+		scsi_remove_device(hba->sdev_rpmb);
+		hba->sdev_rpmb = NULL;
+	}
+}
+
+/**
+ * ufshcd_probe_hba - probe hba to detect device and initialize
+ * @hba: per-adapter instance
+ *
+ * Execute link-startup and verify device initialization
+ */
+static int ufshcd_probe_hba(struct ufs_hba *hba)
+{
 	int ret;
 
 	ret = ufshcd_link_startup(hba);
 	if (ret)
 		goto out;
 
-	ufshcd_config_max_pwr_mode(hba);
+	/* UniPro link is active now */
+	ufshcd_set_link_active(hba);
 
 	ret = ufshcd_verify_dev_init(hba);
 	if (ret)
@@ -3143,16 +4145,77 @@
 	if (ret)
 		goto out;
 
+	/* UFS device is also active now */
+	ufshcd_set_ufs_dev_active(hba);
 	ufshcd_force_reset_auto_bkops(hba);
 	hba->ufshcd_state = UFSHCD_STATE_OPERATIONAL;
+	hba->wlun_dev_clr_ua = true;
 
-	/* If we are in error handling context no need to scan the host */
-	if (!ufshcd_eh_in_progress(hba)) {
+	if (ufshcd_get_max_pwr_mode(hba)) {
+		dev_err(hba->dev,
+			"%s: Failed getting max supported power mode\n",
+			__func__);
+	} else {
+		ret = ufshcd_config_pwr_mode(hba, &hba->max_pwr_info.info);
+		if (ret)
+			dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+					__func__, ret);
+	}
+
+	/*
+	 * If we are in error handling context or in power management callbacks
+	 * context, no need to scan the host
+	 */
+	if (!ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+		bool flag;
+
+		/* clear any previous UFS device information */
+		memset(&hba->dev_info, 0, sizeof(hba->dev_info));
+		if (!ufshcd_query_flag(hba, UPIU_QUERY_OPCODE_READ_FLAG,
+				       QUERY_FLAG_IDN_PWR_ON_WPE, &flag))
+			hba->dev_info.f_power_on_wp_en = flag;
+
+		if (!hba->is_init_prefetch)
+			ufshcd_init_icc_levels(hba);
+
+		/* Add required well known logical units to scsi mid layer */
+		if (ufshcd_scsi_add_wlus(hba))
+			goto out;
+
 		scsi_scan_host(hba->host);
 		pm_runtime_put_sync(hba->dev);
 	}
+
+	if (!hba->is_init_prefetch)
+		hba->is_init_prefetch = true;
+
+	/* Resume devfreq after UFS device is detected */
+	if (ufshcd_is_clkscaling_enabled(hba))
+		devfreq_resume_device(hba->devfreq);
+
 out:
-	return;
+	/*
+	 * If we failed to initialize the device or the device is not
+	 * present, turn off the power/clocks etc.
+	 */
+	if (ret && !ufshcd_eh_in_progress(hba) && !hba->pm_op_in_progress) {
+		pm_runtime_put_sync(hba->dev);
+		ufshcd_hba_exit(hba);
+	}
+
+	return ret;
+}
+
+/**
+ * ufshcd_async_scan - asynchronous execution for probing hba
+ * @data: data pointer to pass to this function
+ * @cookie: cookie data
+ */
+static void ufshcd_async_scan(void *data, async_cookie_t cookie)
+{
+	struct ufs_hba *hba = (struct ufs_hba *)data;
+
+	ufshcd_probe_hba(hba);
 }
 
 static struct scsi_host_template ufshcd_driver_template = {
@@ -3171,70 +4234,956 @@
 	.sg_tablesize		= SG_ALL,
 	.cmd_per_lun		= UFSHCD_CMD_PER_LUN,
 	.can_queue		= UFSHCD_CAN_QUEUE,
+	.max_host_blocked	= 1,
 };
 
-/**
- * ufshcd_suspend - suspend power management function
- * @hba: per adapter instance
- * @state: power state
- *
- * Returns -ENOSYS
- */
-int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state)
+static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
+				   int ua)
 {
-	/*
-	 * TODO:
-	 * 1. Block SCSI requests from SCSI midlayer
-	 * 2. Change the internal driver state to non operational
-	 * 3. Set UTRLRSR and UTMRLRSR bits to zero
-	 * 4. Wait until outstanding commands are completed
-	 * 5. Set HCE to zero to send the UFS host controller to reset state
-	 */
+	int ret = 0;
+	struct regulator *reg = vreg->reg;
+	const char *name = vreg->name;
 
-	return -ENOSYS;
+	BUG_ON(!vreg);
+
+	ret = regulator_set_optimum_mode(reg, ua);
+	if (ret >= 0) {
+		/*
+		 * regulator_set_optimum_mode() returns new regulator
+		 * mode upon success.
+		 */
+		ret = 0;
+	} else {
+		dev_err(dev, "%s: %s set optimum mode(ua=%d) failed, err=%d\n",
+				__func__, name, ua, ret);
+	}
+
+	return ret;
 }
-EXPORT_SYMBOL_GPL(ufshcd_suspend);
+
+static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
+					 struct ufs_vreg *vreg)
+{
+	return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
+}
+
+static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
+					 struct ufs_vreg *vreg)
+{
+	return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
+}
+
+static int ufshcd_config_vreg(struct device *dev,
+		struct ufs_vreg *vreg, bool on)
+{
+	int ret = 0;
+	struct regulator *reg = vreg->reg;
+	const char *name = vreg->name;
+	int min_uV, uA_load;
+
+	BUG_ON(!vreg);
+
+	if (regulator_count_voltages(reg) > 0) {
+		min_uV = on ? vreg->min_uV : 0;
+		ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+		if (ret) {
+			dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+					__func__, name, ret);
+			goto out;
+		}
+
+		uA_load = on ? vreg->max_uA : 0;
+		ret = ufshcd_config_vreg_load(dev, vreg, uA_load);
+		if (ret)
+			goto out;
+	}
+out:
+	return ret;
+}
+
+static int ufshcd_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+	int ret = 0;
+
+	if (!vreg || vreg->enabled)
+		goto out;
+
+	ret = ufshcd_config_vreg(dev, vreg, true);
+	if (!ret)
+		ret = regulator_enable(vreg->reg);
+
+	if (!ret)
+		vreg->enabled = true;
+	else
+		dev_err(dev, "%s: %s enable failed, err=%d\n",
+				__func__, vreg->name, ret);
+out:
+	return ret;
+}
+
+static int ufshcd_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+	int ret = 0;
+
+	if (!vreg || !vreg->enabled)
+		goto out;
+
+	ret = regulator_disable(vreg->reg);
+
+	if (!ret) {
+		/* ignore errors on applying disable config */
+		ufshcd_config_vreg(dev, vreg, false);
+		vreg->enabled = false;
+	} else {
+		dev_err(dev, "%s: %s disable failed, err=%d\n",
+				__func__, vreg->name, ret);
+	}
+out:
+	return ret;
+}
+
+static int ufshcd_setup_vreg(struct ufs_hba *hba, bool on)
+{
+	int ret = 0;
+	struct device *dev = hba->dev;
+	struct ufs_vreg_info *info = &hba->vreg_info;
+
+	if (!info)
+		goto out;
+
+	ret = ufshcd_toggle_vreg(dev, info->vcc, on);
+	if (ret)
+		goto out;
+
+	ret = ufshcd_toggle_vreg(dev, info->vccq, on);
+	if (ret)
+		goto out;
+
+	ret = ufshcd_toggle_vreg(dev, info->vccq2, on);
+	if (ret)
+		goto out;
+
+out:
+	if (ret) {
+		ufshcd_toggle_vreg(dev, info->vccq2, false);
+		ufshcd_toggle_vreg(dev, info->vccq, false);
+		ufshcd_toggle_vreg(dev, info->vcc, false);
+	}
+	return ret;
+}
+
+static int ufshcd_setup_hba_vreg(struct ufs_hba *hba, bool on)
+{
+	struct ufs_vreg_info *info = &hba->vreg_info;
+
+	if (info)
+		return ufshcd_toggle_vreg(hba->dev, info->vdd_hba, on);
+
+	return 0;
+}
+
+static int ufshcd_get_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+	int ret = 0;
+
+	if (!vreg)
+		goto out;
+
+	vreg->reg = devm_regulator_get(dev, vreg->name);
+	if (IS_ERR(vreg->reg)) {
+		ret = PTR_ERR(vreg->reg);
+		dev_err(dev, "%s: %s get failed, err=%d\n",
+				__func__, vreg->name, ret);
+	}
+out:
+	return ret;
+}
+
+static int ufshcd_init_vreg(struct ufs_hba *hba)
+{
+	int ret = 0;
+	struct device *dev = hba->dev;
+	struct ufs_vreg_info *info = &hba->vreg_info;
+
+	if (!info)
+		goto out;
+
+	ret = ufshcd_get_vreg(dev, info->vcc);
+	if (ret)
+		goto out;
+
+	ret = ufshcd_get_vreg(dev, info->vccq);
+	if (ret)
+		goto out;
+
+	ret = ufshcd_get_vreg(dev, info->vccq2);
+out:
+	return ret;
+}
+
+static int ufshcd_init_hba_vreg(struct ufs_hba *hba)
+{
+	struct ufs_vreg_info *info = &hba->vreg_info;
+
+	if (info)
+		return ufshcd_get_vreg(hba->dev, info->vdd_hba);
+
+	return 0;
+}
+
+static int __ufshcd_setup_clocks(struct ufs_hba *hba, bool on,
+					bool skip_ref_clk)
+{
+	int ret = 0;
+	struct ufs_clk_info *clki;
+	struct list_head *head = &hba->clk_list_head;
+	unsigned long flags;
+
+	if (!head || list_empty(head))
+		goto out;
+
+	list_for_each_entry(clki, head, list) {
+		if (!IS_ERR_OR_NULL(clki->clk)) {
+			if (skip_ref_clk && !strcmp(clki->name, "ref_clk"))
+				continue;
+
+			if (on && !clki->enabled) {
+				ret = clk_prepare_enable(clki->clk);
+				if (ret) {
+					dev_err(hba->dev, "%s: %s prepare enable failed, %d\n",
+						__func__, clki->name, ret);
+					goto out;
+				}
+			} else if (!on && clki->enabled) {
+				clk_disable_unprepare(clki->clk);
+			}
+			clki->enabled = on;
+			dev_dbg(hba->dev, "%s: clk: %s %sabled\n", __func__,
+					clki->name, on ? "en" : "dis");
+		}
+	}
+
+	if (hba->vops && hba->vops->setup_clocks)
+		ret = hba->vops->setup_clocks(hba, on);
+out:
+	if (ret) {
+		list_for_each_entry(clki, head, list) {
+			if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
+				clk_disable_unprepare(clki->clk);
+		}
+	} else if (!ret && on) {
+		spin_lock_irqsave(hba->host->host_lock, flags);
+		hba->clk_gating.state = CLKS_ON;
+		spin_unlock_irqrestore(hba->host->host_lock, flags);
+	}
+	return ret;
+}
+
+static int ufshcd_setup_clocks(struct ufs_hba *hba, bool on)
+{
+	return  __ufshcd_setup_clocks(hba, on, false);
+}
+
+static int ufshcd_init_clocks(struct ufs_hba *hba)
+{
+	int ret = 0;
+	struct ufs_clk_info *clki;
+	struct device *dev = hba->dev;
+	struct list_head *head = &hba->clk_list_head;
+
+	if (!head || list_empty(head))
+		goto out;
+
+	list_for_each_entry(clki, head, list) {
+		if (!clki->name)
+			continue;
+
+		clki->clk = devm_clk_get(dev, clki->name);
+		if (IS_ERR(clki->clk)) {
+			ret = PTR_ERR(clki->clk);
+			dev_err(dev, "%s: %s clk get failed, %d\n",
+					__func__, clki->name, ret);
+			goto out;
+		}
+
+		if (clki->max_freq) {
+			ret = clk_set_rate(clki->clk, clki->max_freq);
+			if (ret) {
+				dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+					__func__, clki->name,
+					clki->max_freq, ret);
+				goto out;
+			}
+			clki->curr_freq = clki->max_freq;
+		}
+		dev_dbg(dev, "%s: clk: %s, rate: %lu\n", __func__,
+				clki->name, clk_get_rate(clki->clk));
+	}
+out:
+	return ret;
+}
+
+static int ufshcd_variant_hba_init(struct ufs_hba *hba)
+{
+	int err = 0;
+
+	if (!hba->vops)
+		goto out;
+
+	if (hba->vops->init) {
+		err = hba->vops->init(hba);
+		if (err)
+			goto out;
+	}
+
+	if (hba->vops->setup_regulators) {
+		err = hba->vops->setup_regulators(hba, true);
+		if (err)
+			goto out_exit;
+	}
+
+	goto out;
+
+out_exit:
+	if (hba->vops->exit)
+		hba->vops->exit(hba);
+out:
+	if (err)
+		dev_err(hba->dev, "%s: variant %s init failed err %d\n",
+			__func__, hba->vops ? hba->vops->name : "", err);
+	return err;
+}
+
+static void ufshcd_variant_hba_exit(struct ufs_hba *hba)
+{
+	if (!hba->vops)
+		return;
+
+	if (hba->vops->setup_clocks)
+		hba->vops->setup_clocks(hba, false);
+
+	if (hba->vops->setup_regulators)
+		hba->vops->setup_regulators(hba, false);
+
+	if (hba->vops->exit)
+		hba->vops->exit(hba);
+}
+
+static int ufshcd_hba_init(struct ufs_hba *hba)
+{
+	int err;
+
+	/*
+	 * Handle host controller power separately from the UFS device power
+	 * rails as it will help controlling the UFS host controller power
+	 * collapse easily which is different than UFS device power collapse.
+	 * Also, enable the host controller power before we go ahead with rest
+	 * of the initialization here.
+	 */
+	err = ufshcd_init_hba_vreg(hba);
+	if (err)
+		goto out;
+
+	err = ufshcd_setup_hba_vreg(hba, true);
+	if (err)
+		goto out;
+
+	err = ufshcd_init_clocks(hba);
+	if (err)
+		goto out_disable_hba_vreg;
+
+	err = ufshcd_setup_clocks(hba, true);
+	if (err)
+		goto out_disable_hba_vreg;
+
+	err = ufshcd_init_vreg(hba);
+	if (err)
+		goto out_disable_clks;
+
+	err = ufshcd_setup_vreg(hba, true);
+	if (err)
+		goto out_disable_clks;
+
+	err = ufshcd_variant_hba_init(hba);
+	if (err)
+		goto out_disable_vreg;
+
+	hba->is_powered = true;
+	goto out;
+
+out_disable_vreg:
+	ufshcd_setup_vreg(hba, false);
+out_disable_clks:
+	ufshcd_setup_clocks(hba, false);
+out_disable_hba_vreg:
+	ufshcd_setup_hba_vreg(hba, false);
+out:
+	return err;
+}
+
+static void ufshcd_hba_exit(struct ufs_hba *hba)
+{
+	if (hba->is_powered) {
+		ufshcd_variant_hba_exit(hba);
+		ufshcd_setup_vreg(hba, false);
+		ufshcd_setup_clocks(hba, false);
+		ufshcd_setup_hba_vreg(hba, false);
+		hba->is_powered = false;
+	}
+}
+
+static int
+ufshcd_send_request_sense(struct ufs_hba *hba, struct scsi_device *sdp)
+{
+	unsigned char cmd[6] = {REQUEST_SENSE,
+				0,
+				0,
+				0,
+				SCSI_SENSE_BUFFERSIZE,
+				0};
+	char *buffer;
+	int ret;
+
+	buffer = kzalloc(SCSI_SENSE_BUFFERSIZE, GFP_KERNEL);
+	if (!buffer) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = scsi_execute_req_flags(sdp, cmd, DMA_FROM_DEVICE, buffer,
+				SCSI_SENSE_BUFFERSIZE, NULL,
+				msecs_to_jiffies(1000), 3, NULL, REQ_PM);
+	if (ret)
+		pr_err("%s: failed with err %d\n", __func__, ret);
+
+	kfree(buffer);
+out:
+	return ret;
+}
 
 /**
- * ufshcd_resume - resume power management function
+ * ufshcd_set_dev_pwr_mode - sends START STOP UNIT command to set device
+ *			     power mode
  * @hba: per adapter instance
+ * @pwr_mode: device power mode to set
  *
- * Returns -ENOSYS
+ * Returns 0 if requested power mode is set successfully
+ * Returns non-zero if failed to set the requested power mode
  */
-int ufshcd_resume(struct ufs_hba *hba)
+static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
+				     enum ufs_dev_pwr_mode pwr_mode)
 {
+	unsigned char cmd[6] = { START_STOP };
+	struct scsi_sense_hdr sshdr;
+	struct scsi_device *sdp = hba->sdev_ufs_device;
+	int ret;
+
+	if (!sdp || !scsi_device_online(sdp))
+		return -ENODEV;
+
 	/*
-	 * TODO:
-	 * 1. Set HCE to 1, to start the UFS host controller
-	 * initialization process
-	 * 2. Set UTRLRSR and UTMRLRSR bits to 1
-	 * 3. Change the internal driver state to operational
-	 * 4. Unblock SCSI requests from SCSI midlayer
+	 * If scsi commands fail, the scsi mid-layer schedules scsi error-
+	 * handling, which would wait for host to be resumed. Since we know
+	 * we are functional while we are here, skip host resume in error
+	 * handling context.
 	 */
+	hba->host->eh_noresume = 1;
+	if (hba->wlun_dev_clr_ua) {
+		ret = ufshcd_send_request_sense(hba, sdp);
+		if (ret)
+			goto out;
+		/* Unit attention condition is cleared now */
+		hba->wlun_dev_clr_ua = false;
+	}
 
-	return -ENOSYS;
+	cmd[4] = pwr_mode << 4;
+
+	/*
+	 * Current function would be generally called from the power management
+	 * callbacks hence set the REQ_PM flag so that it doesn't resume the
+	 * already suspended childs.
+	 */
+	ret = scsi_execute_req_flags(sdp, cmd, DMA_NONE, NULL, 0, &sshdr,
+				     START_STOP_TIMEOUT, 0, NULL, REQ_PM);
+	if (ret) {
+		sdev_printk(KERN_WARNING, sdp,
+			  "START_STOP failed for power mode: %d\n", pwr_mode);
+		scsi_show_result(ret);
+		if (driver_byte(ret) & DRIVER_SENSE) {
+			scsi_show_sense_hdr(&sshdr);
+			scsi_show_extd_sense(sshdr.asc, sshdr.ascq);
+		}
+	}
+
+	if (!ret)
+		hba->curr_dev_pwr_mode = pwr_mode;
+out:
+	hba->host->eh_noresume = 0;
+	return ret;
 }
-EXPORT_SYMBOL_GPL(ufshcd_resume);
 
-int ufshcd_runtime_suspend(struct ufs_hba *hba)
+static int ufshcd_link_state_transition(struct ufs_hba *hba,
+					enum uic_link_state req_link_state,
+					int check_for_bkops)
 {
-	if (!hba)
+	int ret = 0;
+
+	if (req_link_state == hba->uic_link_state)
 		return 0;
 
+	if (req_link_state == UIC_LINK_HIBERN8_STATE) {
+		ret = ufshcd_uic_hibern8_enter(hba);
+		if (!ret)
+			ufshcd_set_link_hibern8(hba);
+		else
+			goto out;
+	}
 	/*
-	 * The device is idle with no requests in the queue,
-	 * allow background operations.
+	 * If autobkops is enabled, link can't be turned off because
+	 * turning off the link would also turn off the device.
 	 */
-	return ufshcd_enable_auto_bkops(hba);
+	else if ((req_link_state == UIC_LINK_OFF_STATE) &&
+		   (!check_for_bkops || (check_for_bkops &&
+		    !hba->auto_bkops_enabled))) {
+		/*
+		 * Change controller state to "reset state" which
+		 * should also put the link in off/reset state
+		 */
+		ufshcd_hba_stop(hba);
+		/*
+		 * TODO: Check if we need any delay to make sure that
+		 * controller is reset
+		 */
+		ufshcd_set_link_off(hba);
+	}
+
+out:
+	return ret;
+}
+
+static void ufshcd_vreg_set_lpm(struct ufs_hba *hba)
+{
+	/*
+	 * If UFS device is either in UFS_Sleep turn off VCC rail to save some
+	 * power.
+	 *
+	 * If UFS device and link is in OFF state, all power supplies (VCC,
+	 * VCCQ, VCCQ2) can be turned off if power on write protect is not
+	 * required. If UFS link is inactive (Hibern8 or OFF state) and device
+	 * is in sleep state, put VCCQ & VCCQ2 rails in LPM mode.
+	 *
+	 * Ignore the error returned by ufshcd_toggle_vreg() as device is anyway
+	 * in low power state which would save some power.
+	 */
+	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
+	    !hba->dev_info.is_lu_power_on_wp) {
+		ufshcd_setup_vreg(hba, false);
+	} else if (!ufshcd_is_ufs_dev_active(hba)) {
+		ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+		if (!ufshcd_is_link_active(hba)) {
+			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+			ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq2);
+		}
+	}
+}
+
+static int ufshcd_vreg_set_hpm(struct ufs_hba *hba)
+{
+	int ret = 0;
+
+	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba) &&
+	    !hba->dev_info.is_lu_power_on_wp) {
+		ret = ufshcd_setup_vreg(hba, true);
+	} else if (!ufshcd_is_ufs_dev_active(hba)) {
+		ret = ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, true);
+		if (!ret && !ufshcd_is_link_active(hba)) {
+			ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq);
+			if (ret)
+				goto vcc_disable;
+			ret = ufshcd_config_vreg_hpm(hba, hba->vreg_info.vccq2);
+			if (ret)
+				goto vccq_lpm;
+		}
+	}
+	goto out;
+
+vccq_lpm:
+	ufshcd_config_vreg_lpm(hba, hba->vreg_info.vccq);
+vcc_disable:
+	ufshcd_toggle_vreg(hba->dev, hba->vreg_info.vcc, false);
+out:
+	return ret;
+}
+
+static void ufshcd_hba_vreg_set_lpm(struct ufs_hba *hba)
+{
+	if (ufshcd_is_link_off(hba))
+		ufshcd_setup_hba_vreg(hba, false);
+}
+
+static void ufshcd_hba_vreg_set_hpm(struct ufs_hba *hba)
+{
+	if (ufshcd_is_link_off(hba))
+		ufshcd_setup_hba_vreg(hba, true);
+}
+
+/**
+ * ufshcd_suspend - helper function for suspend operations
+ * @hba: per adapter instance
+ * @pm_op: desired low power operation type
+ *
+ * This function will try to put the UFS device and link into low power
+ * mode based on the "rpm_lvl" (Runtime PM level) or "spm_lvl"
+ * (System PM level).
+ *
+ * If this function is called during shutdown, it will make sure that
+ * both UFS device and UFS link is powered off.
+ *
+ * NOTE: UFS device & link must be active before we enter in this function.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+static int ufshcd_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+	int ret = 0;
+	enum ufs_pm_level pm_lvl;
+	enum ufs_dev_pwr_mode req_dev_pwr_mode;
+	enum uic_link_state req_link_state;
+
+	hba->pm_op_in_progress = 1;
+	if (!ufshcd_is_shutdown_pm(pm_op)) {
+		pm_lvl = ufshcd_is_runtime_pm(pm_op) ?
+			 hba->rpm_lvl : hba->spm_lvl;
+		req_dev_pwr_mode = ufs_get_pm_lvl_to_dev_pwr_mode(pm_lvl);
+		req_link_state = ufs_get_pm_lvl_to_link_pwr_state(pm_lvl);
+	} else {
+		req_dev_pwr_mode = UFS_POWERDOWN_PWR_MODE;
+		req_link_state = UIC_LINK_OFF_STATE;
+	}
+
+	/*
+	 * If we can't transition into any of the low power modes
+	 * just gate the clocks.
+	 */
+	ufshcd_hold(hba, false);
+	hba->clk_gating.is_suspended = true;
+
+	if (req_dev_pwr_mode == UFS_ACTIVE_PWR_MODE &&
+			req_link_state == UIC_LINK_ACTIVE_STATE) {
+		goto disable_clks;
+	}
+
+	if ((req_dev_pwr_mode == hba->curr_dev_pwr_mode) &&
+	    (req_link_state == hba->uic_link_state))
+		goto out;
+
+	/* UFS device & link must be active before we enter in this function */
+	if (!ufshcd_is_ufs_dev_active(hba) || !ufshcd_is_link_active(hba)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (ufshcd_is_runtime_pm(pm_op)) {
+		if (ufshcd_can_autobkops_during_suspend(hba)) {
+			/*
+			 * The device is idle with no requests in the queue,
+			 * allow background operations if bkops status shows
+			 * that performance might be impacted.
+			 */
+			ret = ufshcd_urgent_bkops(hba);
+			if (ret)
+				goto enable_gating;
+		} else {
+			/* make sure that auto bkops is disabled */
+			ufshcd_disable_auto_bkops(hba);
+		}
+	}
+
+	if ((req_dev_pwr_mode != hba->curr_dev_pwr_mode) &&
+	     ((ufshcd_is_runtime_pm(pm_op) && !hba->auto_bkops_enabled) ||
+	       !ufshcd_is_runtime_pm(pm_op))) {
+		/* ensure that bkops is disabled */
+		ufshcd_disable_auto_bkops(hba);
+		ret = ufshcd_set_dev_pwr_mode(hba, req_dev_pwr_mode);
+		if (ret)
+			goto enable_gating;
+	}
+
+	ret = ufshcd_link_state_transition(hba, req_link_state, 1);
+	if (ret)
+		goto set_dev_active;
+
+	ufshcd_vreg_set_lpm(hba);
+
+disable_clks:
+	/*
+	 * The clock scaling needs access to controller registers. Hence, Wait
+	 * for pending clock scaling work to be done before clocks are
+	 * turned off.
+	 */
+	if (ufshcd_is_clkscaling_enabled(hba)) {
+		devfreq_suspend_device(hba->devfreq);
+		hba->clk_scaling.window_start_t = 0;
+	}
+	/*
+	 * Call vendor specific suspend callback. As these callbacks may access
+	 * vendor specific host controller register space call them before the
+	 * host clocks are ON.
+	 */
+	if (hba->vops && hba->vops->suspend) {
+		ret = hba->vops->suspend(hba, pm_op);
+		if (ret)
+			goto set_link_active;
+	}
+
+	if (hba->vops && hba->vops->setup_clocks) {
+		ret = hba->vops->setup_clocks(hba, false);
+		if (ret)
+			goto vops_resume;
+	}
+
+	if (!ufshcd_is_link_active(hba))
+		ufshcd_setup_clocks(hba, false);
+	else
+		/* If link is active, device ref_clk can't be switched off */
+		__ufshcd_setup_clocks(hba, false, true);
+
+	hba->clk_gating.state = CLKS_OFF;
+	/*
+	 * Disable the host irq as host controller as there won't be any
+	 * host controller trasanction expected till resume.
+	 */
+	ufshcd_disable_irq(hba);
+	/* Put the host controller in low power mode if possible */
+	ufshcd_hba_vreg_set_lpm(hba);
+	goto out;
+
+vops_resume:
+	if (hba->vops && hba->vops->resume)
+		hba->vops->resume(hba, pm_op);
+set_link_active:
+	ufshcd_vreg_set_hpm(hba);
+	if (ufshcd_is_link_hibern8(hba) && !ufshcd_uic_hibern8_exit(hba))
+		ufshcd_set_link_active(hba);
+	else if (ufshcd_is_link_off(hba))
+		ufshcd_host_reset_and_restore(hba);
+set_dev_active:
+	if (!ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE))
+		ufshcd_disable_auto_bkops(hba);
+enable_gating:
+	hba->clk_gating.is_suspended = false;
+	ufshcd_release(hba);
+out:
+	hba->pm_op_in_progress = 0;
+	return ret;
+}
+
+/**
+ * ufshcd_resume - helper function for resume operations
+ * @hba: per adapter instance
+ * @pm_op: runtime PM or system PM
+ *
+ * This function basically brings the UFS device, UniPro link and controller
+ * to active state.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+	int ret;
+	enum uic_link_state old_link_state;
+
+	hba->pm_op_in_progress = 1;
+	old_link_state = hba->uic_link_state;
+
+	ufshcd_hba_vreg_set_hpm(hba);
+	/* Make sure clocks are enabled before accessing controller */
+	ret = ufshcd_setup_clocks(hba, true);
+	if (ret)
+		goto out;
+
+	/* enable the host irq as host controller would be active soon */
+	ret = ufshcd_enable_irq(hba);
+	if (ret)
+		goto disable_irq_and_vops_clks;
+
+	ret = ufshcd_vreg_set_hpm(hba);
+	if (ret)
+		goto disable_irq_and_vops_clks;
+
+	/*
+	 * Call vendor specific resume callback. As these callbacks may access
+	 * vendor specific host controller register space call them when the
+	 * host clocks are ON.
+	 */
+	if (hba->vops && hba->vops->resume) {
+		ret = hba->vops->resume(hba, pm_op);
+		if (ret)
+			goto disable_vreg;
+	}
+
+	if (ufshcd_is_link_hibern8(hba)) {
+		ret = ufshcd_uic_hibern8_exit(hba);
+		if (!ret)
+			ufshcd_set_link_active(hba);
+		else
+			goto vendor_suspend;
+	} else if (ufshcd_is_link_off(hba)) {
+		ret = ufshcd_host_reset_and_restore(hba);
+		/*
+		 * ufshcd_host_reset_and_restore() should have already
+		 * set the link state as active
+		 */
+		if (ret || !ufshcd_is_link_active(hba))
+			goto vendor_suspend;
+	}
+
+	if (!ufshcd_is_ufs_dev_active(hba)) {
+		ret = ufshcd_set_dev_pwr_mode(hba, UFS_ACTIVE_PWR_MODE);
+		if (ret)
+			goto set_old_link_state;
+	}
+
+	/*
+	 * If BKOPs operations are urgently needed at this moment then
+	 * keep auto-bkops enabled or else disable it.
+	 */
+	ufshcd_urgent_bkops(hba);
+	hba->clk_gating.is_suspended = false;
+
+	if (ufshcd_is_clkscaling_enabled(hba))
+		devfreq_resume_device(hba->devfreq);
+
+	/* Schedule clock gating in case of no access to UFS device yet */
+	ufshcd_release(hba);
+	goto out;
+
+set_old_link_state:
+	ufshcd_link_state_transition(hba, old_link_state, 0);
+vendor_suspend:
+	if (hba->vops && hba->vops->suspend)
+		hba->vops->suspend(hba, pm_op);
+disable_vreg:
+	ufshcd_vreg_set_lpm(hba);
+disable_irq_and_vops_clks:
+	ufshcd_disable_irq(hba);
+	ufshcd_setup_clocks(hba, false);
+out:
+	hba->pm_op_in_progress = 0;
+	return ret;
+}
+
+/**
+ * ufshcd_system_suspend - system suspend routine
+ * @hba: per adapter instance
+ * @pm_op: runtime PM or system PM
+ *
+ * Check the description of ufshcd_suspend() function for more details.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+int ufshcd_system_suspend(struct ufs_hba *hba)
+{
+	int ret = 0;
+
+	if (!hba || !hba->is_powered)
+		goto out;
+
+	if (pm_runtime_suspended(hba->dev)) {
+		if (hba->rpm_lvl == hba->spm_lvl)
+			/*
+			 * There is possibility that device may still be in
+			 * active state during the runtime suspend.
+			 */
+			if ((ufs_get_pm_lvl_to_dev_pwr_mode(hba->spm_lvl) ==
+			    hba->curr_dev_pwr_mode) && !hba->auto_bkops_enabled)
+				goto out;
+
+		/*
+		 * UFS device and/or UFS link low power states during runtime
+		 * suspend seems to be different than what is expected during
+		 * system suspend. Hence runtime resume the devic & link and
+		 * let the system suspend low power states to take effect.
+		 * TODO: If resume takes longer time, we might have optimize
+		 * it in future by not resuming everything if possible.
+		 */
+		ret = ufshcd_runtime_resume(hba);
+		if (ret)
+			goto out;
+	}
+
+	ret = ufshcd_suspend(hba, UFS_SYSTEM_PM);
+out:
+	if (!ret)
+		hba->is_sys_suspended = true;
+	return ret;
+}
+EXPORT_SYMBOL(ufshcd_system_suspend);
+
+/**
+ * ufshcd_system_resume - system resume routine
+ * @hba: per adapter instance
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+
+int ufshcd_system_resume(struct ufs_hba *hba)
+{
+	if (!hba || !hba->is_powered || pm_runtime_suspended(hba->dev))
+		/*
+		 * Let the runtime resume take care of resuming
+		 * if runtime suspended.
+		 */
+		return 0;
+
+	return ufshcd_resume(hba, UFS_SYSTEM_PM);
+}
+EXPORT_SYMBOL(ufshcd_system_resume);
+
+/**
+ * ufshcd_runtime_suspend - runtime suspend routine
+ * @hba: per adapter instance
+ *
+ * Check the description of ufshcd_suspend() function for more details.
+ *
+ * Returns 0 for success and non-zero for failure
+ */
+int ufshcd_runtime_suspend(struct ufs_hba *hba)
+{
+	if (!hba || !hba->is_powered)
+		return 0;
+
+	return ufshcd_suspend(hba, UFS_RUNTIME_PM);
 }
 EXPORT_SYMBOL(ufshcd_runtime_suspend);
 
+/**
+ * ufshcd_runtime_resume - runtime resume routine
+ * @hba: per adapter instance
+ *
+ * This function basically brings the UFS device, UniPro link and controller
+ * to active state. Following operations are done in this function:
+ *
+ * 1. Turn on all the controller related clocks
+ * 2. Bring the UniPro link out of Hibernate state
+ * 3. If UFS device is in sleep state, turn ON VCC rail and bring the UFS device
+ *    to active state.
+ * 4. If auto-bkops is enabled on the device, disable it.
+ *
+ * So following would be the possible power state after this function return
+ * successfully:
+ *	S1: UFS device in Active state with VCC rail ON
+ *	    UniPro link in Active state
+ *	    All the UFS/UniPro controller clocks are ON
+ *
+ * Returns 0 for success and non-zero for failure
+ */
 int ufshcd_runtime_resume(struct ufs_hba *hba)
 {
-	if (!hba)
+	if (!hba || !hba->is_powered)
 		return 0;
-
-	return ufshcd_disable_auto_bkops(hba);
+	else
+		return ufshcd_resume(hba, UFS_RUNTIME_PM);
 }
 EXPORT_SYMBOL(ufshcd_runtime_resume);
 
@@ -3245,6 +5194,36 @@
 EXPORT_SYMBOL(ufshcd_runtime_idle);
 
 /**
+ * ufshcd_shutdown - shutdown routine
+ * @hba: per adapter instance
+ *
+ * This function would power off both UFS device and UFS link.
+ *
+ * Returns 0 always to allow force shutdown even in case of errors.
+ */
+int ufshcd_shutdown(struct ufs_hba *hba)
+{
+	int ret = 0;
+
+	if (ufshcd_is_ufs_dev_poweroff(hba) && ufshcd_is_link_off(hba))
+		goto out;
+
+	if (pm_runtime_suspended(hba->dev)) {
+		ret = ufshcd_runtime_resume(hba);
+		if (ret)
+			goto out;
+	}
+
+	ret = ufshcd_suspend(hba, UFS_SHUTDOWN_PM);
+out:
+	if (ret)
+		dev_err(hba->dev, "%s failed, err %d\n", __func__, ret);
+	/* allow force shutdown even in case of errors */
+	return 0;
+}
+EXPORT_SYMBOL(ufshcd_shutdown);
+
+/**
  * ufshcd_remove - de-allocate SCSI host and host memory space
  *		data structure memory
  * @hba - per adapter instance
@@ -3252,11 +5231,17 @@
 void ufshcd_remove(struct ufs_hba *hba)
 {
 	scsi_remove_host(hba->host);
+	ufshcd_scsi_remove_wlus(hba);
 	/* disable interrupts */
 	ufshcd_disable_intr(hba, hba->intr_mask);
 	ufshcd_hba_stop(hba);
 
 	scsi_host_put(hba->host);
+
+	ufshcd_exit_clk_gating(hba);
+	if (ufshcd_is_clkscaling_enabled(hba))
+		devfreq_remove_device(hba->devfreq);
+	ufshcd_hba_exit(hba);
 }
 EXPORT_SYMBOL_GPL(ufshcd_remove);
 
@@ -3277,19 +5262,16 @@
 }
 
 /**
- * ufshcd_init - Driver initialization routine
+ * ufshcd_alloc_host - allocate Host Bus Adapter (HBA)
  * @dev: pointer to device handle
  * @hba_handle: driver private handle
- * @mmio_base: base register address
- * @irq: Interrupt line of device
  * Returns 0 on success, non-zero value on failure
  */
-int ufshcd_init(struct device *dev, struct ufs_hba **hba_handle,
-		 void __iomem *mmio_base, unsigned int irq)
+int ufshcd_alloc_host(struct device *dev, struct ufs_hba **hba_handle)
 {
 	struct Scsi_Host *host;
 	struct ufs_hba *hba;
-	int err;
+	int err = 0;
 
 	if (!dev) {
 		dev_err(dev,
@@ -3298,13 +5280,6 @@
 		goto out_error;
 	}
 
-	if (!mmio_base) {
-		dev_err(dev,
-		"Invalid memory reference for mmio_base is NULL\n");
-		err = -ENODEV;
-		goto out_error;
-	}
-
 	host = scsi_host_alloc(&ufshcd_driver_template,
 				sizeof(struct ufs_hba));
 	if (!host) {
@@ -3315,9 +5290,146 @@
 	hba = shost_priv(host);
 	hba->host = host;
 	hba->dev = dev;
+	*hba_handle = hba;
+
+out_error:
+	return err;
+}
+EXPORT_SYMBOL(ufshcd_alloc_host);
+
+static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
+{
+	int ret = 0;
+	struct ufs_clk_info *clki;
+	struct list_head *head = &hba->clk_list_head;
+
+	if (!head || list_empty(head))
+		goto out;
+
+	list_for_each_entry(clki, head, list) {
+		if (!IS_ERR_OR_NULL(clki->clk)) {
+			if (scale_up && clki->max_freq) {
+				if (clki->curr_freq == clki->max_freq)
+					continue;
+				ret = clk_set_rate(clki->clk, clki->max_freq);
+				if (ret) {
+					dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+						__func__, clki->name,
+						clki->max_freq, ret);
+					break;
+				}
+				clki->curr_freq = clki->max_freq;
+
+			} else if (!scale_up && clki->min_freq) {
+				if (clki->curr_freq == clki->min_freq)
+					continue;
+				ret = clk_set_rate(clki->clk, clki->min_freq);
+				if (ret) {
+					dev_err(hba->dev, "%s: %s clk set rate(%dHz) failed, %d\n",
+						__func__, clki->name,
+						clki->min_freq, ret);
+					break;
+				}
+				clki->curr_freq = clki->min_freq;
+			}
+		}
+		dev_dbg(hba->dev, "%s: clk: %s, rate: %lu\n", __func__,
+				clki->name, clk_get_rate(clki->clk));
+	}
+	if (hba->vops->clk_scale_notify)
+		hba->vops->clk_scale_notify(hba);
+out:
+	return ret;
+}
+
+static int ufshcd_devfreq_target(struct device *dev,
+				unsigned long *freq, u32 flags)
+{
+	int err = 0;
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+
+	if (!ufshcd_is_clkscaling_enabled(hba))
+		return -EINVAL;
+
+	if (*freq == UINT_MAX)
+		err = ufshcd_scale_clks(hba, true);
+	else if (*freq == 0)
+		err = ufshcd_scale_clks(hba, false);
+
+	return err;
+}
+
+static int ufshcd_devfreq_get_dev_status(struct device *dev,
+		struct devfreq_dev_status *stat)
+{
+	struct ufs_hba *hba = dev_get_drvdata(dev);
+	struct ufs_clk_scaling *scaling = &hba->clk_scaling;
+	unsigned long flags;
+
+	if (!ufshcd_is_clkscaling_enabled(hba))
+		return -EINVAL;
+
+	memset(stat, 0, sizeof(*stat));
+
+	spin_lock_irqsave(hba->host->host_lock, flags);
+	if (!scaling->window_start_t)
+		goto start_window;
+
+	if (scaling->is_busy_started)
+		scaling->tot_busy_t += ktime_to_us(ktime_sub(ktime_get(),
+					scaling->busy_start_t));
+
+	stat->total_time = jiffies_to_usecs((long)jiffies -
+				(long)scaling->window_start_t);
+	stat->busy_time = scaling->tot_busy_t;
+start_window:
+	scaling->window_start_t = jiffies;
+	scaling->tot_busy_t = 0;
+
+	if (hba->outstanding_reqs) {
+		scaling->busy_start_t = ktime_get();
+		scaling->is_busy_started = true;
+	} else {
+		scaling->busy_start_t = ktime_set(0, 0);
+		scaling->is_busy_started = false;
+	}
+	spin_unlock_irqrestore(hba->host->host_lock, flags);
+	return 0;
+}
+
+static struct devfreq_dev_profile ufs_devfreq_profile = {
+	.polling_ms	= 100,
+	.target		= ufshcd_devfreq_target,
+	.get_dev_status	= ufshcd_devfreq_get_dev_status,
+};
+
+/**
+ * ufshcd_init - Driver initialization routine
+ * @hba: per-adapter instance
+ * @mmio_base: base register address
+ * @irq: Interrupt line of device
+ * Returns 0 on success, non-zero value on failure
+ */
+int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
+{
+	int err;
+	struct Scsi_Host *host = hba->host;
+	struct device *dev = hba->dev;
+
+	if (!mmio_base) {
+		dev_err(hba->dev,
+		"Invalid memory reference for mmio_base is NULL\n");
+		err = -ENODEV;
+		goto out_error;
+	}
+
 	hba->mmio_base = mmio_base;
 	hba->irq = irq;
 
+	err = ufshcd_hba_init(hba);
+	if (err)
+		goto out_error;
+
 	/* Read capabilities registers */
 	ufshcd_hba_capabilities(hba);
 
@@ -3346,11 +5458,13 @@
 	host->can_queue = hba->nutrs;
 	host->cmd_per_lun = hba->nutrs;
 	host->max_id = UFSHCD_MAX_ID;
-	host->max_lun = UFSHCD_MAX_LUNS;
+	host->max_lun = UFS_MAX_LUNS;
 	host->max_channel = UFSHCD_MAX_CHANNEL;
 	host->unique_id = host->host_no;
 	host->max_cmd_len = MAX_CDB_SIZE;
 
+	hba->max_pwr_info.is_valid = false;
+
 	/* Initailize wait queue for task management */
 	init_waitqueue_head(&hba->tm_wq);
 	init_waitqueue_head(&hba->tm_tag_wq);
@@ -3368,24 +5482,27 @@
 	/* Initialize device management tag acquire wait queue */
 	init_waitqueue_head(&hba->dev_cmd.tag_wq);
 
+	ufshcd_init_clk_gating(hba);
 	/* IRQ registration */
 	err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED, UFSHCD, hba);
 	if (err) {
 		dev_err(hba->dev, "request irq failed\n");
-		goto out_disable;
+		goto exit_gating;
+	} else {
+		hba->is_irq_enabled = true;
 	}
 
 	/* Enable SCSI tag mapping */
 	err = scsi_init_shared_tag_map(host, host->can_queue);
 	if (err) {
 		dev_err(hba->dev, "init shared queue failed\n");
-		goto out_disable;
+		goto exit_gating;
 	}
 
 	err = scsi_add_host(host, hba->dev);
 	if (err) {
 		dev_err(hba->dev, "scsi_add_host failed\n");
-		goto out_disable;
+		goto exit_gating;
 	}
 
 	/* Host controller enable */
@@ -3395,19 +5512,40 @@
 		goto out_remove_scsi_host;
 	}
 
-	*hba_handle = hba;
+	if (ufshcd_is_clkscaling_enabled(hba)) {
+		hba->devfreq = devfreq_add_device(dev, &ufs_devfreq_profile,
+						   "simple_ondemand", NULL);
+		if (IS_ERR(hba->devfreq)) {
+			dev_err(hba->dev, "Unable to register with devfreq %ld\n",
+					PTR_ERR(hba->devfreq));
+			goto out_remove_scsi_host;
+		}
+		/* Suspend devfreq until the UFS device is detected */
+		devfreq_suspend_device(hba->devfreq);
+		hba->clk_scaling.window_start_t = 0;
+	}
 
 	/* Hold auto suspend until async scan completes */
 	pm_runtime_get_sync(dev);
 
+	/*
+	 * The device-initialize-sequence hasn't been invoked yet.
+	 * Set the device to power-off state
+	 */
+	ufshcd_set_ufs_dev_poweroff(hba);
+
 	async_schedule(ufshcd_async_scan, hba);
 
 	return 0;
 
 out_remove_scsi_host:
 	scsi_remove_host(hba->host);
+exit_gating:
+	ufshcd_exit_clk_gating(hba);
 out_disable:
+	hba->is_irq_enabled = false;
 	scsi_host_put(host);
+	ufshcd_hba_exit(hba);
 out_error:
 	return err;
 }
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index acf318e..58ecdff 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -52,6 +52,7 @@
 #include <linux/pm_runtime.h>
 #include <linux/clk.h>
 #include <linux/completion.h>
+#include <linux/regulator/consumer.h>
 
 #include <asm/irq.h>
 #include <asm/byteorder.h>
@@ -68,6 +69,8 @@
 #define UFSHCD "ufshcd"
 #define UFSHCD_DRIVER_VERSION "0.2"
 
+struct ufs_hba;
+
 enum dev_cmd_type {
 	DEV_CMD_TYPE_NOP		= 0x0,
 	DEV_CMD_TYPE_QUERY		= 0x1,
@@ -93,6 +96,54 @@
 	struct completion done;
 };
 
+/* Used to differentiate the power management options */
+enum ufs_pm_op {
+	UFS_RUNTIME_PM,
+	UFS_SYSTEM_PM,
+	UFS_SHUTDOWN_PM,
+};
+
+#define ufshcd_is_runtime_pm(op) ((op) == UFS_RUNTIME_PM)
+#define ufshcd_is_system_pm(op) ((op) == UFS_SYSTEM_PM)
+#define ufshcd_is_shutdown_pm(op) ((op) == UFS_SHUTDOWN_PM)
+
+/* Host <-> Device UniPro Link state */
+enum uic_link_state {
+	UIC_LINK_OFF_STATE	= 0, /* Link powered down or disabled */
+	UIC_LINK_ACTIVE_STATE	= 1, /* Link is in Fast/Slow/Sleep state */
+	UIC_LINK_HIBERN8_STATE	= 2, /* Link is in Hibernate state */
+};
+
+#define ufshcd_is_link_off(hba) ((hba)->uic_link_state == UIC_LINK_OFF_STATE)
+#define ufshcd_is_link_active(hba) ((hba)->uic_link_state == \
+				    UIC_LINK_ACTIVE_STATE)
+#define ufshcd_is_link_hibern8(hba) ((hba)->uic_link_state == \
+				    UIC_LINK_HIBERN8_STATE)
+#define ufshcd_set_link_off(hba) ((hba)->uic_link_state = UIC_LINK_OFF_STATE)
+#define ufshcd_set_link_active(hba) ((hba)->uic_link_state = \
+				    UIC_LINK_ACTIVE_STATE)
+#define ufshcd_set_link_hibern8(hba) ((hba)->uic_link_state = \
+				    UIC_LINK_HIBERN8_STATE)
+
+/*
+ * UFS Power management levels.
+ * Each level is in increasing order of power savings.
+ */
+enum ufs_pm_level {
+	UFS_PM_LVL_0, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_ACTIVE_STATE */
+	UFS_PM_LVL_1, /* UFS_ACTIVE_PWR_MODE, UIC_LINK_HIBERN8_STATE */
+	UFS_PM_LVL_2, /* UFS_SLEEP_PWR_MODE, UIC_LINK_ACTIVE_STATE */
+	UFS_PM_LVL_3, /* UFS_SLEEP_PWR_MODE, UIC_LINK_HIBERN8_STATE */
+	UFS_PM_LVL_4, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_HIBERN8_STATE */
+	UFS_PM_LVL_5, /* UFS_POWERDOWN_PWR_MODE, UIC_LINK_OFF_STATE */
+	UFS_PM_LVL_MAX
+};
+
+struct ufs_pm_lvl_states {
+	enum ufs_dev_pwr_mode dev_state;
+	enum uic_link_state link_state;
+};
+
 /**
  * struct ufshcd_lrb - local reference block
  * @utr_descriptor_ptr: UTRD address of the command
@@ -121,7 +172,7 @@
 
 	int command_type;
 	int task_tag;
-	unsigned int lun;
+	u8 lun; /* UPIU LUN id field is only 8-bit wide */
 	bool intr_cmd;
 };
 
@@ -153,6 +204,126 @@
 };
 
 /**
+ * struct ufs_clk_info - UFS clock related info
+ * @list: list headed by hba->clk_list_head
+ * @clk: clock node
+ * @name: clock name
+ * @max_freq: maximum frequency supported by the clock
+ * @min_freq: min frequency that can be used for clock scaling
+ * @curr_freq: indicates the current frequency that it is set to
+ * @enabled: variable to check against multiple enable/disable
+ */
+struct ufs_clk_info {
+	struct list_head list;
+	struct clk *clk;
+	const char *name;
+	u32 max_freq;
+	u32 min_freq;
+	u32 curr_freq;
+	bool enabled;
+};
+
+#define PRE_CHANGE      0
+#define POST_CHANGE     1
+
+struct ufs_pa_layer_attr {
+	u32 gear_rx;
+	u32 gear_tx;
+	u32 lane_rx;
+	u32 lane_tx;
+	u32 pwr_rx;
+	u32 pwr_tx;
+	u32 hs_rate;
+};
+
+struct ufs_pwr_mode_info {
+	bool is_valid;
+	struct ufs_pa_layer_attr info;
+};
+
+/**
+ * struct ufs_hba_variant_ops - variant specific callbacks
+ * @name: variant name
+ * @init: called when the driver is initialized
+ * @exit: called to cleanup everything done in init
+ * @clk_scale_notify: notifies that clks are scaled up/down
+ * @setup_clocks: called before touching any of the controller registers
+ * @setup_regulators: called before accessing the host controller
+ * @hce_enable_notify: called before and after HCE enable bit is set to allow
+ *                     variant specific Uni-Pro initialization.
+ * @link_startup_notify: called before and after Link startup is carried out
+ *                       to allow variant specific Uni-Pro initialization.
+ * @pwr_change_notify: called before and after a power mode change
+ *			is carried out to allow vendor spesific capabilities
+ *			to be set.
+ * @suspend: called during host controller PM callback
+ * @resume: called during host controller PM callback
+ */
+struct ufs_hba_variant_ops {
+	const char *name;
+	int	(*init)(struct ufs_hba *);
+	void    (*exit)(struct ufs_hba *);
+	void    (*clk_scale_notify)(struct ufs_hba *);
+	int     (*setup_clocks)(struct ufs_hba *, bool);
+	int     (*setup_regulators)(struct ufs_hba *, bool);
+	int     (*hce_enable_notify)(struct ufs_hba *, bool);
+	int     (*link_startup_notify)(struct ufs_hba *, bool);
+	int	(*pwr_change_notify)(struct ufs_hba *,
+					bool, struct ufs_pa_layer_attr *,
+					struct ufs_pa_layer_attr *);
+	int     (*suspend)(struct ufs_hba *, enum ufs_pm_op);
+	int     (*resume)(struct ufs_hba *, enum ufs_pm_op);
+};
+
+/* clock gating state  */
+enum clk_gating_state {
+	CLKS_OFF,
+	CLKS_ON,
+	REQ_CLKS_OFF,
+	REQ_CLKS_ON,
+};
+
+/**
+ * struct ufs_clk_gating - UFS clock gating related info
+ * @gate_work: worker to turn off clocks after some delay as specified in
+ * delay_ms
+ * @ungate_work: worker to turn on clocks that will be used in case of
+ * interrupt context
+ * @state: the current clocks state
+ * @delay_ms: gating delay in ms
+ * @is_suspended: clk gating is suspended when set to 1 which can be used
+ * during suspend/resume
+ * @delay_attr: sysfs attribute to control delay_attr
+ * @active_reqs: number of requests that are pending and should be waited for
+ * completion before gating clocks.
+ */
+struct ufs_clk_gating {
+	struct delayed_work gate_work;
+	struct work_struct ungate_work;
+	enum clk_gating_state state;
+	unsigned long delay_ms;
+	bool is_suspended;
+	struct device_attribute delay_attr;
+	int active_reqs;
+};
+
+struct ufs_clk_scaling {
+	ktime_t  busy_start_t;
+	bool is_busy_started;
+	unsigned long  tot_busy_t;
+	unsigned long window_start_t;
+};
+
+/**
+ * struct ufs_init_prefetch - contains data that is pre-fetched once during
+ * initialization
+ * @icc_level: icc level which was read during initialization
+ */
+struct ufs_init_prefetch {
+	u32 icc_level;
+};
+
+/**
  * struct ufs_hba - per adapter private structure
  * @mmio_base: UFSHCI base register address
  * @ucdl_base_addr: UFS Command Descriptor base address
@@ -171,6 +342,8 @@
  * @nutrs: Transfer Request Queue depth supported by controller
  * @nutmrs: Task Management Queue depth supported by controller
  * @ufs_version: UFS Version to which controller complies
+ * @vops: pointer to variant specific operations
+ * @priv: pointer to variant specific private data
  * @irq: Irq number of the controller
  * @active_uic_cmd: handle of active UIC command
  * @uic_cmd_mutex: mutex for uic command
@@ -183,6 +356,9 @@
  * @eh_flags: Error handling flags
  * @intr_mask: Interrupt Mask Bits
  * @ee_ctrl_mask: Exception event control mask
+ * @is_powered: flag to check if HBA is powered
+ * @is_init_prefetch: flag to check if data was pre-fetched in initialization
+ * @init_prefetch_data: data pre-fetched during initialization
  * @eh_work: Worker to handle UFS errors that require s/w attention
  * @eeh_work: Worker to handle exception events
  * @errors: HBA errors
@@ -191,6 +367,10 @@
  * @saved_uic_err: sticky UIC error mask
  * @dev_cmd: ufs device management command information
  * @auto_bkops_enabled: to track whether bkops is enabled in device
+ * @vreg_info: UFS device voltage regulator information
+ * @clk_list_head: UFS host controller clocks list node head
+ * @pwr_info: holds current power mode
+ * @max_pwr_info: keeps the device max valid pwm
  */
 struct ufs_hba {
 	void __iomem *mmio_base;
@@ -207,6 +387,21 @@
 
 	struct Scsi_Host *host;
 	struct device *dev;
+	/*
+	 * This field is to keep a reference to "scsi_device" corresponding to
+	 * "UFS device" W-LU.
+	 */
+	struct scsi_device *sdev_ufs_device;
+	struct scsi_device *sdev_rpmb;
+	struct scsi_device *sdev_boot;
+
+	enum ufs_dev_pwr_mode curr_dev_pwr_mode;
+	enum uic_link_state uic_link_state;
+	/* Desired UFS power management level during runtime PM */
+	enum ufs_pm_level rpm_lvl;
+	/* Desired UFS power management level during system PM */
+	enum ufs_pm_level spm_lvl;
+	int pm_op_in_progress;
 
 	struct ufshcd_lrb *lrb;
 	unsigned long lrb_in_use;
@@ -218,22 +413,28 @@
 	int nutrs;
 	int nutmrs;
 	u32 ufs_version;
+	struct ufs_hba_variant_ops *vops;
+	void *priv;
 	unsigned int irq;
+	bool is_irq_enabled;
 
-	struct uic_command *active_uic_cmd;
-	struct mutex uic_cmd_mutex;
 
 	wait_queue_head_t tm_wq;
 	wait_queue_head_t tm_tag_wq;
 	unsigned long tm_condition;
 	unsigned long tm_slots_in_use;
 
-	struct completion *pwr_done;
+	struct uic_command *active_uic_cmd;
+	struct mutex uic_cmd_mutex;
+	struct completion *uic_async_done;
 
 	u32 ufshcd_state;
 	u32 eh_flags;
 	u32 intr_mask;
 	u16 ee_ctrl_mask;
+	bool is_powered;
+	bool is_init_prefetch;
+	struct ufs_init_prefetch init_prefetch_data;
 
 	/* Work Queues */
 	struct work_struct eh_work;
@@ -248,16 +449,76 @@
 	/* Device management request data */
 	struct ufs_dev_cmd dev_cmd;
 
+	/* Keeps information of the UFS device connected to this host */
+	struct ufs_dev_info dev_info;
 	bool auto_bkops_enabled;
+	struct ufs_vreg_info vreg_info;
+	struct list_head clk_list_head;
+
+	bool wlun_dev_clr_ua;
+
+	struct ufs_pa_layer_attr pwr_info;
+	struct ufs_pwr_mode_info max_pwr_info;
+
+	struct ufs_clk_gating clk_gating;
+	/* Control to enable/disable host capabilities */
+	u32 caps;
+	/* Allow dynamic clk gating */
+#define UFSHCD_CAP_CLK_GATING	(1 << 0)
+	/* Allow hiberb8 with clk gating */
+#define UFSHCD_CAP_HIBERN8_WITH_CLK_GATING (1 << 1)
+	/* Allow dynamic clk scaling */
+#define UFSHCD_CAP_CLK_SCALING	(1 << 2)
+	/* Allow auto bkops to enabled during runtime suspend */
+#define UFSHCD_CAP_AUTO_BKOPS_SUSPEND (1 << 3)
+
+	struct devfreq *devfreq;
+	struct ufs_clk_scaling clk_scaling;
+	bool is_sys_suspended;
 };
 
+/* Returns true if clocks can be gated. Otherwise false */
+static inline bool ufshcd_is_clkgating_allowed(struct ufs_hba *hba)
+{
+	return hba->caps & UFSHCD_CAP_CLK_GATING;
+}
+static inline bool ufshcd_can_hibern8_during_gating(struct ufs_hba *hba)
+{
+	return hba->caps & UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
+}
+static inline int ufshcd_is_clkscaling_enabled(struct ufs_hba *hba)
+{
+	return hba->caps & UFSHCD_CAP_CLK_SCALING;
+}
+static inline bool ufshcd_can_autobkops_during_suspend(struct ufs_hba *hba)
+{
+	return hba->caps & UFSHCD_CAP_AUTO_BKOPS_SUSPEND;
+}
+
 #define ufshcd_writel(hba, val, reg)	\
 	writel((val), (hba)->mmio_base + (reg))
 #define ufshcd_readl(hba, reg)	\
 	readl((hba)->mmio_base + (reg))
 
-int ufshcd_init(struct device *, struct ufs_hba ** , void __iomem * ,
-			unsigned int);
+/**
+ * ufshcd_rmwl - read modify write into a register
+ * @hba - per adapter instance
+ * @mask - mask to apply on read value
+ * @val - actual value to write
+ * @reg - register address
+ */
+static inline void ufshcd_rmwl(struct ufs_hba *hba, u32 mask, u32 val, u32 reg)
+{
+	u32 tmp;
+
+	tmp = ufshcd_readl(hba, reg);
+	tmp &= ~mask;
+	tmp |= (val & mask);
+	ufshcd_writel(hba, tmp, reg);
+}
+
+int ufshcd_alloc_host(struct device *, struct ufs_hba **);
+int ufshcd_init(struct ufs_hba * , void __iomem * , unsigned int);
 void ufshcd_remove(struct ufs_hba *);
 
 /**
@@ -275,11 +536,12 @@
 		GENERAL_UPIU_REQUEST_SIZE + QUERY_DESC_MAX_SIZE);
 }
 
-extern int ufshcd_suspend(struct ufs_hba *hba, pm_message_t state);
-extern int ufshcd_resume(struct ufs_hba *hba);
 extern int ufshcd_runtime_suspend(struct ufs_hba *hba);
 extern int ufshcd_runtime_resume(struct ufs_hba *hba);
 extern int ufshcd_runtime_idle(struct ufs_hba *hba);
+extern int ufshcd_system_suspend(struct ufs_hba *hba);
+extern int ufshcd_system_resume(struct ufs_hba *hba);
+extern int ufshcd_shutdown(struct ufs_hba *hba);
 extern int ufshcd_dme_set_attr(struct ufs_hba *hba, u32 attr_sel,
 			       u8 attr_set, u32 mib_val, u8 peer);
 extern int ufshcd_dme_get_attr(struct ufs_hba *hba, u32 attr_sel,
@@ -331,4 +593,6 @@
 	return ufshcd_dme_get_attr(hba, attr_sel, mib_val, DME_PEER);
 }
 
+int ufshcd_hold(struct ufs_hba *hba, bool async);
+void ufshcd_release(struct ufs_hba *hba);
 #endif /* End of Header */
diff --git a/drivers/scsi/ufs/ufshci.h b/drivers/scsi/ufs/ufshci.h
index e1b844b..d572119 100644
--- a/drivers/scsi/ufs/ufshci.h
+++ b/drivers/scsi/ufs/ufshci.h
@@ -124,8 +124,11 @@
 #define CONTROLLER_FATAL_ERROR			UFS_BIT(16)
 #define SYSTEM_BUS_FATAL_ERROR			UFS_BIT(17)
 
-#define UFSHCD_UIC_MASK		(UIC_COMMAND_COMPL |\
-				 UIC_POWER_MODE)
+#define UFSHCD_UIC_PWR_MASK	(UIC_HIBERNATE_ENTER |\
+				UIC_HIBERNATE_EXIT |\
+				UIC_POWER_MODE)
+
+#define UFSHCD_UIC_MASK		(UIC_COMMAND_COMPL | UFSHCD_UIC_PWR_MASK)
 
 #define UFSHCD_ERROR_MASK	(UIC_ERROR |\
 				DEVICE_FATAL_ERROR |\
@@ -210,7 +213,7 @@
 #define UIC_GET_ATTR_ID(v)		(((v) >> 16) & 0xFFFF)
 
 /* UIC Commands */
-enum {
+enum uic_cmd_dme {
 	UIC_CMD_DME_GET			= 0x01,
 	UIC_CMD_DME_SET			= 0x02,
 	UIC_CMD_DME_PEER_GET		= 0x03,
diff --git a/drivers/scsi/ufs/unipro.h b/drivers/scsi/ufs/unipro.h
index 0bb8041..3fc3e21 100644
--- a/drivers/scsi/ufs/unipro.h
+++ b/drivers/scsi/ufs/unipro.h
@@ -13,6 +13,44 @@
 #define _UNIPRO_H_
 
 /*
+ * M-TX Configuration Attributes
+ */
+#define TX_MODE					0x0021
+#define TX_HSRATE_SERIES			0x0022
+#define TX_HSGEAR				0x0023
+#define TX_PWMGEAR				0x0024
+#define TX_AMPLITUDE				0x0025
+#define TX_HS_SLEWRATE				0x0026
+#define TX_SYNC_SOURCE				0x0027
+#define TX_HS_SYNC_LENGTH			0x0028
+#define TX_HS_PREPARE_LENGTH			0x0029
+#define TX_LS_PREPARE_LENGTH			0x002A
+#define TX_HIBERN8_CONTROL			0x002B
+#define TX_LCC_ENABLE				0x002C
+#define TX_PWM_BURST_CLOSURE_EXTENSION		0x002D
+#define TX_BYPASS_8B10B_ENABLE			0x002E
+#define TX_DRIVER_POLARITY			0x002F
+#define TX_HS_UNTERMINATED_LINE_DRIVE_ENABLE	0x0030
+#define TX_LS_TERMINATED_LINE_DRIVE_ENABLE	0x0031
+#define TX_LCC_SEQUENCER			0x0032
+#define TX_MIN_ACTIVATETIME			0x0033
+#define TX_PWM_G6_G7_SYNC_LENGTH		0x0034
+
+/*
+ * M-RX Configuration Attributes
+ */
+#define RX_MODE					0x00A1
+#define RX_HSRATE_SERIES			0x00A2
+#define RX_HSGEAR				0x00A3
+#define RX_PWMGEAR				0x00A4
+#define RX_LS_TERMINATED_ENABLE			0x00A5
+#define RX_HS_UNTERMINATED_ENABLE		0x00A6
+#define RX_ENTER_HIBERN8			0x00A7
+#define RX_BYPASS_8B10B_ENABLE			0x00A8
+#define RX_TERMINATION_FORCE_ENABLE		0x0089
+
+#define is_mphy_tx_attr(attr)			(attr < RX_MODE)
+/*
  * PHY Adpater attributes
  */
 #define PA_ACTIVETXDATALANES	0x1560
@@ -87,6 +125,24 @@
 	PA_HS_MODE_B	= 2,
 };
 
+enum ufs_pwm_gear_tag {
+	UFS_PWM_DONT_CHANGE,	/* Don't change Gear */
+	UFS_PWM_G1,		/* PWM Gear 1 (default for reset) */
+	UFS_PWM_G2,		/* PWM Gear 2 */
+	UFS_PWM_G3,		/* PWM Gear 3 */
+	UFS_PWM_G4,		/* PWM Gear 4 */
+	UFS_PWM_G5,		/* PWM Gear 5 */
+	UFS_PWM_G6,		/* PWM Gear 6 */
+	UFS_PWM_G7,		/* PWM Gear 7 */
+};
+
+enum ufs_hs_gear_tag {
+	UFS_HS_DONT_CHANGE,	/* Don't change Gear */
+	UFS_HS_G1,		/* HS Gear 1 (default for reset) */
+	UFS_HS_G2,		/* HS Gear 2 */
+	UFS_HS_G3,		/* HS Gear 3 */
+};
+
 /*
  * Data Link Layer Attributes
  */
diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c
index cedb292..bf3f8e2 100644
--- a/drivers/usb/storage/usb.c
+++ b/drivers/usb/storage/usb.c
@@ -983,6 +983,14 @@
 	if (!(us->fflags & US_FL_SCM_MULT_TARG))
 		us_to_host(us)->max_id = 1;
 
+	/*
+	 * Like Windows, we won't store the LUN bits in CDB[1] for SCSI-2
+	 * devices using the Bulk-Only transport (even though this violates
+	 * the SCSI spec).
+	 */
+	if (us->transport == usb_stor_Bulk_transport)
+		us_to_host(us)->no_scsi2_lun_in_cdb = 1;
+
 	/* Find the endpoints and calculate pipe values */
 	result = get_pipes(us);
 	if (result)
diff --git a/include/scsi/scsi.h b/include/scsi/scsi.h
index 261e708..d17178e 100644
--- a/include/scsi/scsi.h
+++ b/include/scsi/scsi.h
@@ -333,6 +333,7 @@
 #define TYPE_RBC	    0x0e
 #define TYPE_OSD            0x11
 #define TYPE_ZBC            0x14
+#define TYPE_WLUN           0x1e    /* well-known logical unit */
 #define TYPE_NO_LUN         0x7f
 
 /* SCSI protocols; these are taken from SPC-3 section 7.5 */
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 1a0d184..27ecee7 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -174,6 +174,7 @@
 	unsigned wce_default_on:1;	/* Cache is ON by default */
 	unsigned no_dif:1;	/* T10 PI (DIF) should be disabled */
 	unsigned broken_fua:1;		/* Don't set FUA bit */
+	unsigned lun_in_cdb:1;		/* Store LUN bits in CDB[1] */
 
 	atomic_t disk_events_disable_depth; /* disable depth for disk events */
 
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index ba20347..5e36248 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -606,7 +606,7 @@
 	/*
 	 * These three parameters can be used to allow for wide scsi,
 	 * and for host adapters that support multiple busses
-	 * The first two should be set to 1 more than the actual max id
+	 * The last two should be set to 1 more than the actual max id
 	 * or lun (e.g. 8 for SCSI parallel systems).
 	 */
 	unsigned int max_channel;
@@ -680,6 +680,7 @@
 	unsigned no_write_same:1;
 
 	unsigned use_blk_mq:1;
+	unsigned use_cmd_list:1;
 
 	/*
 	 * Optional work queue to be utilized by the transport
@@ -692,6 +693,9 @@
 	 */
 	struct workqueue_struct *tmf_work_q;
 
+	/* The transport requires the LUN bits NOT to be stored in CDB[1] */
+	unsigned no_scsi2_lun_in_cdb:1;
+
 	/*
 	 * Value host_blocked counts down from
 	 */