Merge branch 'rmobile/core' into rmobile-fixes-for-linus
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 8a817f6..a91f308 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -322,7 +322,6 @@
 prototypes:
 	int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
 	void (*fl_notify)(struct file_lock *);  /* unblock callback */
-	void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
 	void (*fl_release_private)(struct file_lock *);
 	void (*fl_break)(struct file_lock *); /* break_lease callback */
 
@@ -330,7 +329,6 @@
 			BKL	may block
 fl_compare_owner:	yes	no
 fl_notify:		yes	no
-fl_copy_lock:		yes	no
 fl_release_private:	yes	yes
 fl_break:		yes	no
 
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index e307914..93fe76e 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -15,10 +15,14 @@
   * Intel 82801I (ICH9)
   * Intel EP80579 (Tolapai)
   * Intel 82801JI (ICH10)
-  * Intel 3400/5 Series (PCH)
+  * Intel 5/3400 Series (PCH)
   * Intel Cougar Point (PCH)
+  * Intel Patsburg (PCH)
    Datasheets: Publicly available at the Intel website
 
+On Intel Patsburg and later chipsets, both the normal host SMBus controller
+and the additional 'Integrated Device Function' controllers are supported.
+
 Authors: 
 	Mark Studebaker <mdsxyz123@yahoo.com>
 	Jean Delvare <khali@linux-fr.org>
diff --git a/Documentation/scsi/ChangeLog.megaraid_sas b/Documentation/scsi/ChangeLog.megaraid_sas
index 3002356..00301ed 100644
--- a/Documentation/scsi/ChangeLog.megaraid_sas
+++ b/Documentation/scsi/ChangeLog.megaraid_sas
@@ -1,3 +1,50 @@
+1 Release Date    : Thur.  May 03, 2010 09:12:45 PST 2009 -
+			(emaild-id:megaraidlinux@lsi.com)
+			Bo Yang
+
+2 Current Version : 00.00.04.31-rc1
+3 Older Version   : 00.00.04.17.1-rc1
+
+1.	Add the Online Controller Reset (OCR) to the Driver.
+	OCR is the new feature for megaraid_sas driver which
+	will allow the fw to do the chip reset which will not
+	affact the OS behavious.
+
+	To add the OCR support, driver need to do:
+		a). reset the controller chips -- Xscale and Gen2 which
+		will change the function calls and add the reset function
+		related to this two chips.
+
+		b). during the reset, driver will store the pending cmds
+		which not returned by FW to driver's pending queue.  Driver
+		will re-issue those pending cmds again to FW after the OCR
+		finished.
+
+		c). In driver's timeout routine, driver will report to
+		OS as reset. Also driver's queue routine will block the
+		cmds until the OCR finished.
+
+		d). in Driver's ISR routine, if driver get the FW state as
+		state change, FW in Failure status and FW support online controller
+		reset (OCR), driver will start to do the controller reset.
+
+		e). In driver's IOCTL routine, the application cmds will wait for the
+		OCR to finish, then issue the cmds to FW.
+
+		f). Before driver kill adapter, driver will do last chance of
+		OCR to see if driver can bring back the FW.
+
+2.	Add the support update flag to the driver to tell LSI megaraid_sas
+	application which driver will support the device update.  So application
+	will not need to do the device update after application add/del the device
+	from the system.
+3.	In driver's timeout routine, driver will do three time reset if fw is in
+	failed state.  Driver will kill adapter if can't bring back FW after the
+	this three times reset.
+4.	Add the input parameter max_sectors to 1MB support to our GEN2 controller.
+	customer can use the input paramenter max_sectors to add 1MB support to GEN2
+	controller.
+
 1 Release Date    : Thur.  Oct 29, 2009 09:12:45 PST 2009 -
 			(emaild-id:megaraidlinux@lsi.com)
 			Bo Yang
diff --git a/MAINTAINERS b/MAINTAINERS
index cb8b580..0094224 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1757,6 +1757,7 @@
 W:	http://developer.axis.com
 S:	Maintained
 F:	arch/cris/
+F:	drivers/serial/crisv10.*
 
 CRYPTO API
 M:	Herbert Xu <herbert@gondor.apana.org.au>
diff --git a/Makefile b/Makefile
index 519db43..6619720 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
-SUBLEVEL = 36
-EXTRAVERSION =
+SUBLEVEL = 37
+EXTRAVERSION = -rc1
 NAME = Flesh-Eating Bats with Fangs
 
 # *DOCUMENTATION*
diff --git a/arch/arm/mach-u300/spi.c b/arch/arm/mach-u300/spi.c
index edb2c0d..00869de 100644
--- a/arch/arm/mach-u300/spi.c
+++ b/arch/arm/mach-u300/spi.c
@@ -67,7 +67,7 @@
 		.bus_num        = 0, /* Only one bus on this chip */
 		.chip_select    = 0,
 		/* Means SPI_CS_HIGH, change if e.g low CS */
-		.mode           = SPI_MODE_1 | SPI_LSB_FIRST | SPI_LOOP,
+		.mode           = SPI_MODE_1 | SPI_LOOP,
 	},
 #endif
 };
diff --git a/arch/m68k/include/asm/irqflags.h b/arch/m68k/include/asm/irqflags.h
index 4a5b284..7ef4115 100644
--- a/arch/m68k/include/asm/irqflags.h
+++ b/arch/m68k/include/asm/irqflags.h
@@ -2,7 +2,9 @@
 #define _M68K_IRQFLAGS_H
 
 #include <linux/types.h>
+#ifdef CONFIG_MMU
 #include <linux/hardirq.h>
+#endif
 #include <linux/preempt.h>
 #include <asm/thread_info.h>
 #include <asm/entry.h>
diff --git a/arch/m68k/include/asm/machdep.h b/arch/m68k/include/asm/machdep.h
index 789f3b2..415d548 100644
--- a/arch/m68k/include/asm/machdep.h
+++ b/arch/m68k/include/asm/machdep.h
@@ -40,5 +40,6 @@
 extern irqreturn_t arch_timer_interrupt(int irq, void *dummy);
 
 extern void config_BSP(char *command, int len);
+extern void do_IRQ(int irq, struct pt_regs *fp);
 
 #endif /* _M68K_MACHDEP_H */
diff --git a/arch/powerpc/kernel/kvm.c b/arch/powerpc/kernel/kvm.c
index 428d0e5..b06bdae 100644
--- a/arch/powerpc/kernel/kvm.c
+++ b/arch/powerpc/kernel/kvm.c
@@ -127,7 +127,7 @@
 
 static void kvm_patch_ins_b(u32 *inst, int addr)
 {
-#ifdef CONFIG_RELOCATABLE
+#if defined(CONFIG_RELOCATABLE) && defined(CONFIG_PPC_BOOK3S)
 	/* On relocatable kernels interrupts handlers and our code
 	   can be in different regions, so we don't patch them */
 
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index 0498469..1cc471f 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -416,7 +416,7 @@
 	lwz	r3, VCPU_PC(r4)
 	mtsrr0	r3
 	lwz	r3, VCPU_SHARED(r4)
-	lwz	r3, VCPU_SHARED_MSR(r3)
+	lwz	r3, (VCPU_SHARED_MSR + 4)(r3)
 	oris	r3, r3, KVMPPC_MSR_MASK@h
 	ori	r3, r3, KVMPPC_MSR_MASK@l
 	mtsrr1	r3
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index 71750f2..e3768ee 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -138,8 +138,8 @@
 	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
 
 	free_page((unsigned long)vcpu->arch.shared);
-	kvmppc_e500_tlb_uninit(vcpu_e500);
 	kvm_vcpu_uninit(vcpu);
+	kvmppc_e500_tlb_uninit(vcpu_e500);
 	kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
 }
 
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 2f87a16..38f756f 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -617,6 +617,7 @@
 	switch (ioctl) {
 	case KVM_PPC_GET_PVINFO: {
 		struct kvm_ppc_pvinfo pvinfo;
+		memset(&pvinfo, 0, sizeof(pvinfo));
 		r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
 		if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
 			r = -EFAULT;
diff --git a/arch/powerpc/kvm/timing.c b/arch/powerpc/kvm/timing.c
index 46fa04f..a021f58 100644
--- a/arch/powerpc/kvm/timing.c
+++ b/arch/powerpc/kvm/timing.c
@@ -35,7 +35,6 @@
 	int i;
 
 	/* pause guest execution to avoid concurrent updates */
-	local_irq_disable();
 	mutex_lock(&vcpu->mutex);
 
 	vcpu->arch.last_exit_type = 0xDEAD;
@@ -51,7 +50,6 @@
 	vcpu->arch.timing_last_enter.tv64 = 0;
 
 	mutex_unlock(&vcpu->mutex);
-	local_irq_enable();
 }
 
 static void add_exit_timing(struct kvm_vcpu *vcpu, u64 duration, int type)
diff --git a/arch/tile/include/asm/highmem.h b/arch/tile/include/asm/highmem.h
index e0f7ee18..b2a6c5d 100644
--- a/arch/tile/include/asm/highmem.h
+++ b/arch/tile/include/asm/highmem.h
@@ -23,7 +23,6 @@
 
 #include <linux/interrupt.h>
 #include <linux/threads.h>
-#include <asm/kmap_types.h>
 #include <asm/tlbflush.h>
 #include <asm/homecache.h>
 
diff --git a/arch/tile/include/asm/kmap_types.h b/arch/tile/include/asm/kmap_types.h
index 1480106..3d0f202 100644
--- a/arch/tile/include/asm/kmap_types.h
+++ b/arch/tile/include/asm/kmap_types.h
@@ -16,28 +16,42 @@
 #define _ASM_TILE_KMAP_TYPES_H
 
 /*
- * In TILE Linux each set of four of these uses another 16MB chunk of
- * address space, given 64 tiles and 64KB pages, so we only enable
- * ones that are required by the kernel configuration.
+ * In 32-bit TILE Linux we have to balance the desire to have a lot of
+ * nested atomic mappings with the fact that large page sizes and many
+ * processors chew up address space quickly.  In a typical
+ * 64-processor, 64KB-page layout build, making KM_TYPE_NR one larger
+ * adds 4MB of required address-space.  For now we leave KM_TYPE_NR
+ * set to depth 8.
  */
 enum km_type {
+	KM_TYPE_NR = 8
+};
+
+/*
+ * We provide dummy definitions of all the stray values that used to be
+ * required for kmap_atomic() and no longer are.
+ */
+enum {
 	KM_BOUNCE_READ,
 	KM_SKB_SUNRPC_DATA,
 	KM_SKB_DATA_SOFTIRQ,
 	KM_USER0,
 	KM_USER1,
 	KM_BIO_SRC_IRQ,
+	KM_BIO_DST_IRQ,
+	KM_PTE0,
+	KM_PTE1,
 	KM_IRQ0,
 	KM_IRQ1,
 	KM_SOFTIRQ0,
 	KM_SOFTIRQ1,
-	KM_MEMCPY0,
-	KM_MEMCPY1,
-#if defined(CONFIG_HIGHPTE)
-	KM_PTE0,
-	KM_PTE1,
-#endif
-	KM_TYPE_NR
+	KM_SYNC_ICACHE,
+	KM_SYNC_DCACHE,
+	KM_UML_USERCOPY,
+	KM_IRQ_PTE,
+	KM_NMI,
+	KM_NMI_PTE,
+	KM_KDB
 };
 
 #endif /* _ASM_TILE_KMAP_TYPES_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index dc4ccdd..a6604e9 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -344,10 +344,8 @@
 #define pgd_offset_k(address) pgd_offset(&init_mm, address)
 
 #if defined(CONFIG_HIGHPTE)
-extern pte_t *_pte_offset_map(pmd_t *, unsigned long address, enum km_type);
-#define pte_offset_map(dir, address) \
-	_pte_offset_map(dir, address, KM_PTE0)
-#define pte_unmap(pte) kunmap_atomic(pte, KM_PTE0)
+extern pte_t *pte_offset_map(pmd_t *, unsigned long address);
+#define pte_unmap(pte) kunmap_atomic(pte)
 #else
 #define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
 #define pte_unmap(pte) do { } while (0)
diff --git a/arch/tile/include/asm/stat.h b/arch/tile/include/asm/stat.h
index 3dc90fa..b16e5db 100644
--- a/arch/tile/include/asm/stat.h
+++ b/arch/tile/include/asm/stat.h
@@ -1 +1,4 @@
+#ifdef CONFIG_COMPAT
+#define __ARCH_WANT_STAT64	/* Used for compat_sys_stat64() etc. */
+#endif
 #include <asm-generic/stat.h>
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
index f2e3ff4..b35c2db 100644
--- a/arch/tile/include/asm/unistd.h
+++ b/arch/tile/include/asm/unistd.h
@@ -41,6 +41,7 @@
 #ifdef CONFIG_COMPAT
 #define __ARCH_WANT_SYS_LLSEEK
 #endif
+#define __ARCH_WANT_SYS_NEWFSTATAT
 #endif
 
 #endif /* _ASM_TILE_UNISTD_H */
diff --git a/arch/tile/kernel/compat.c b/arch/tile/kernel/compat.c
index 77739cd..67617a0 100644
--- a/arch/tile/kernel/compat.c
+++ b/arch/tile/kernel/compat.c
@@ -148,11 +148,11 @@
 #define compat_sys_readahead sys32_readahead
 #define compat_sys_sync_file_range compat_sys_sync_file_range2
 
-/* The native 64-bit "struct stat" matches the 32-bit "struct stat64". */
-#define compat_sys_stat64 sys_newstat
-#define compat_sys_lstat64 sys_newlstat
-#define compat_sys_fstat64 sys_newfstat
-#define compat_sys_fstatat64 sys_newfstatat
+/* We leverage the "struct stat64" type for 32-bit time_t/nsec. */
+#define compat_sys_stat64 sys_stat64
+#define compat_sys_lstat64 sys_lstat64
+#define compat_sys_fstat64 sys_fstat64
+#define compat_sys_fstatat64 sys_fstatat64
 
 /* The native sys_ptrace dynamically handles compat binaries. */
 #define compat_sys_ptrace sys_ptrace
diff --git a/arch/tile/kernel/early_printk.c b/arch/tile/kernel/early_printk.c
index 2c54fd4..493a0e6 100644
--- a/arch/tile/kernel/early_printk.c
+++ b/arch/tile/kernel/early_printk.c
@@ -54,7 +54,7 @@
 void early_panic(const char *fmt, ...)
 {
 	va_list ap;
-	raw_local_irq_disable_all();
+	arch_local_irq_disable_all();
 	va_start(ap, fmt);
 	early_printk("Kernel panic - not syncing: ");
 	early_vprintk(fmt, ap);
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 1e54a78..e910530 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -151,12 +151,12 @@
 
 static void enable_firewall_interrupts(void)
 {
-	raw_local_irq_unmask_now(INT_UDN_FIREWALL);
+	arch_local_irq_unmask_now(INT_UDN_FIREWALL);
 }
 
 static void disable_firewall_interrupts(void)
 {
-	raw_local_irq_mask_now(INT_UDN_FIREWALL);
+	arch_local_irq_mask_now(INT_UDN_FIREWALL);
 }
 
 /* Set up hardwall on this cpu based on the passed hardwall_info. */
@@ -768,13 +768,13 @@
 }
 
 static const struct file_operations dev_hardwall_fops = {
+	.open           = nonseekable_open,
 	.unlocked_ioctl = hardwall_ioctl,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl   = hardwall_compat_ioctl,
 #endif
 	.flush          = hardwall_flush,
 	.release        = hardwall_release,
-	.llseek		= noop_llseek,
 };
 
 static struct cdev hardwall_dev;
diff --git a/arch/tile/kernel/irq.c b/arch/tile/kernel/irq.c
index e639176..128805e 100644
--- a/arch/tile/kernel/irq.c
+++ b/arch/tile/kernel/irq.c
@@ -26,7 +26,7 @@
 #define IS_HW_CLEARED 1
 
 /*
- * The set of interrupts we enable for raw_local_irq_enable().
+ * The set of interrupts we enable for arch_local_irq_enable().
  * This is initialized to have just a single interrupt that the kernel
  * doesn't actually use as a sentinel.  During kernel init,
  * interrupts are added as the kernel gets prepared to support them.
@@ -225,7 +225,7 @@
 	/* Enable interrupt delivery. */
 	unmask_irqs(~0UL);
 #if CHIP_HAS_IPI()
-	raw_local_irq_unmask(INT_IPI_K);
+	arch_local_irq_unmask(INT_IPI_K);
 #endif
 }
 
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index ba7a265..0d8b9e9 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -182,13 +182,13 @@
 
 		if ((entry & IND_SOURCE)) {
 			void *va =
-				kmap_atomic_pfn(entry >> PAGE_SHIFT, KM_USER0);
+				kmap_atomic_pfn(entry >> PAGE_SHIFT);
 			r = kexec_bn2cl(va);
 			if (r) {
 				command_line = r;
 				break;
 			}
-			kunmap_atomic(va, KM_USER0);
+			kunmap_atomic(va);
 		}
 	}
 
@@ -198,7 +198,7 @@
 
 		hverr = hv_set_command_line(
 			(HV_VirtAddr) command_line, strlen(command_line));
-		kunmap_atomic(command_line, KM_USER0);
+		kunmap_atomic(command_line);
 	} else {
 		pr_info("%s: no command line found; making empty\n",
 		       __func__);
diff --git a/arch/tile/kernel/messaging.c b/arch/tile/kernel/messaging.c
index 997e393..0858ee6 100644
--- a/arch/tile/kernel/messaging.c
+++ b/arch/tile/kernel/messaging.c
@@ -34,7 +34,7 @@
 		panic("hv_register_message_state: error %d", rc);
 
 	/* Make sure downcall interrupts will be enabled. */
-	raw_local_irq_unmask(INT_INTCTRL_K);
+	arch_local_irq_unmask(INT_INTCTRL_K);
 }
 
 void hv_message_intr(struct pt_regs *regs, int intnum)
diff --git a/arch/tile/kernel/ptrace.c b/arch/tile/kernel/ptrace.c
index 9cd2988..e92e405 100644
--- a/arch/tile/kernel/ptrace.c
+++ b/arch/tile/kernel/ptrace.c
@@ -50,10 +50,10 @@
 {
 	unsigned long __user *datap = (long __user __force *)data;
 	unsigned long tmp;
-	int i;
 	long ret = -EIO;
-	unsigned long *childregs;
 	char *childreg;
+	struct pt_regs copyregs;
+	int ex1_offset;
 
 	switch (request) {
 
@@ -80,6 +80,16 @@
 		if (addr >= PTREGS_SIZE)
 			break;
 		childreg = (char *)task_pt_regs(child) + addr;
+
+		/* Guard against overwrites of the privilege level. */
+		ex1_offset = PTREGS_OFFSET_EX1;
+#if defined(CONFIG_COMPAT) && defined(__BIG_ENDIAN)
+		if (is_compat_task())   /* point at low word */
+			ex1_offset += sizeof(compat_long_t);
+#endif
+		if (addr == ex1_offset)
+			data = PL_ICS_EX1(USER_PL, EX1_ICS(data));
+
 #ifdef CONFIG_COMPAT
 		if (is_compat_task()) {
 			if (addr & (sizeof(compat_long_t)-1))
@@ -96,26 +106,19 @@
 		break;
 
 	case PTRACE_GETREGS:  /* Get all registers from the child. */
-		if (!access_ok(VERIFY_WRITE, datap, PTREGS_SIZE))
-			break;
-		childregs = (long *)task_pt_regs(child);
-		for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
-				++i) {
-			ret = __put_user(childregs[i], &datap[i]);
-			if (ret != 0)
-				break;
+		if (copy_to_user(datap, task_pt_regs(child),
+				 sizeof(struct pt_regs)) == 0) {
+			ret = 0;
 		}
 		break;
 
 	case PTRACE_SETREGS:  /* Set all registers in the child. */
-		if (!access_ok(VERIFY_READ, datap, PTREGS_SIZE))
-			break;
-		childregs = (long *)task_pt_regs(child);
-		for (i = 0; i < sizeof(struct pt_regs)/sizeof(unsigned long);
-				++i) {
-			ret = __get_user(childregs[i], &datap[i]);
-			if (ret != 0)
-				break;
+		if (copy_from_user(&copyregs, datap,
+				   sizeof(struct pt_regs)) == 0) {
+			copyregs.ex1 =
+				PL_ICS_EX1(USER_PL, EX1_ICS(copyregs.ex1));
+			*task_pt_regs(child) = copyregs;
+			ret = 0;
 		}
 		break;
 
diff --git a/arch/tile/kernel/reboot.c b/arch/tile/kernel/reboot.c
index acd86d2..baa3d90 100644
--- a/arch/tile/kernel/reboot.c
+++ b/arch/tile/kernel/reboot.c
@@ -27,7 +27,7 @@
 void machine_halt(void)
 {
 	warn_early_printk();
-	raw_local_irq_disable_all();
+	arch_local_irq_disable_all();
 	smp_send_stop();
 	hv_halt();
 }
@@ -35,14 +35,14 @@
 void machine_power_off(void)
 {
 	warn_early_printk();
-	raw_local_irq_disable_all();
+	arch_local_irq_disable_all();
 	smp_send_stop();
 	hv_power_off();
 }
 
 void machine_restart(char *cmd)
 {
-	raw_local_irq_disable_all();
+	arch_local_irq_disable_all();
 	smp_send_stop();
 	hv_restart((HV_VirtAddr) "vmlinux", (HV_VirtAddr) cmd);
 }
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index ae51cad..fb0b3cb 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -868,14 +868,14 @@
 
 	/* Allow asynchronous TLB interrupts. */
 #if CHIP_HAS_TILE_DMA()
-	raw_local_irq_unmask(INT_DMATLB_MISS);
-	raw_local_irq_unmask(INT_DMATLB_ACCESS);
+	arch_local_irq_unmask(INT_DMATLB_MISS);
+	arch_local_irq_unmask(INT_DMATLB_ACCESS);
 #endif
 #if CHIP_HAS_SN_PROC()
-	raw_local_irq_unmask(INT_SNITLB_MISS);
+	arch_local_irq_unmask(INT_SNITLB_MISS);
 #endif
 #ifdef __tilegx__
-	raw_local_irq_unmask(INT_SINGLE_STEP_K);
+	arch_local_irq_unmask(INT_SINGLE_STEP_K);
 #endif
 
 	/*
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index fb28e85..687719d 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -71,6 +71,9 @@
 	for (i = 0; i < sizeof(struct pt_regs)/sizeof(long); ++i)
 		err |= __get_user(regs->regs[i], &sc->gregs[i]);
 
+	/* Ensure that the PL is always set to USER_PL. */
+	regs->ex1 = PL_ICS_EX1(USER_PL, EX1_ICS(regs->ex1));
+
 	regs->faultnum = INT_SWINT_1_SIGRETURN;
 
 	err |= __get_user(*pr0, &sc->gregs[0]);
@@ -330,7 +333,7 @@
 			current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
 		}
 
-		return;
+		goto done;
 	}
 
 	/* Did we come from a system call? */
@@ -358,4 +361,8 @@
 		current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
 		sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
 	}
+
+done:
+	/* Avoid double syscall restart if there are nested signals. */
+	regs->faultnum = INT_SWINT_1_SIGRETURN;
 }
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 75255d9..9575b37 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -115,7 +115,7 @@
 static void smp_stop_cpu_interrupt(void)
 {
 	set_cpu_online(smp_processor_id(), 0);
-	raw_local_irq_disable_all();
+	arch_local_irq_disable_all();
 	for (;;)
 		asm("nap");
 }
diff --git a/arch/tile/kernel/time.c b/arch/tile/kernel/time.c
index 6bed820..f2e156e 100644
--- a/arch/tile/kernel/time.c
+++ b/arch/tile/kernel/time.c
@@ -132,7 +132,7 @@
 {
 	BUG_ON(ticks > MAX_TICK);
 	__insn_mtspr(SPR_TILE_TIMER_CONTROL, ticks);
-	raw_local_irq_unmask_now(INT_TILE_TIMER);
+	arch_local_irq_unmask_now(INT_TILE_TIMER);
 	return 0;
 }
 
@@ -143,7 +143,7 @@
 static void tile_timer_set_mode(enum clock_event_mode mode,
 				struct clock_event_device *evt)
 {
-	raw_local_irq_mask_now(INT_TILE_TIMER);
+	arch_local_irq_mask_now(INT_TILE_TIMER);
 }
 
 /*
@@ -172,7 +172,7 @@
 	evt->cpumask = cpumask_of(smp_processor_id());
 
 	/* Start out with timer not firing. */
-	raw_local_irq_mask_now(INT_TILE_TIMER);
+	arch_local_irq_mask_now(INT_TILE_TIMER);
 
 	/* Register tile timer. */
 	clockevents_register_device(evt);
@@ -188,7 +188,7 @@
 	 * Mask the timer interrupt here, since we are a oneshot timer
 	 * and there are now by definition no events pending.
 	 */
-	raw_local_irq_mask(INT_TILE_TIMER);
+	arch_local_irq_mask(INT_TILE_TIMER);
 
 	/* Track time spent here in an interrupt context */
 	irq_enter();
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c
index dfedea7..f7d4a6a 100644
--- a/arch/tile/lib/memcpy_tile64.c
+++ b/arch/tile/lib/memcpy_tile64.c
@@ -54,7 +54,7 @@
  * we must run with interrupts disabled to avoid the risk of some
  * other code seeing the incoherent data in our cache.  (Recall that
  * our cache is indexed by PA, so even if the other code doesn't use
- * our KM_MEMCPY virtual addresses, they'll still hit in cache using
+ * our kmap_atomic virtual addresses, they'll still hit in cache using
  * the normal VAs that aren't supposed to hit in cache.)
  */
 static void memcpy_multicache(void *dest, const void *source,
@@ -64,6 +64,7 @@
 	unsigned long flags, newsrc, newdst;
 	pmd_t *pmdp;
 	pte_t *ptep;
+	int type0, type1;
 	int cpu = get_cpu();
 
 	/*
@@ -77,7 +78,8 @@
 	sim_allow_multiple_caching(1);
 
 	/* Set up the new dest mapping */
-	idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + KM_MEMCPY0;
+	type0 = kmap_atomic_idx_push();
+	idx = FIX_KMAP_BEGIN + (KM_TYPE_NR * cpu) + type0;
 	newdst = __fix_to_virt(idx) + ((unsigned long)dest & (PAGE_SIZE-1));
 	pmdp = pmd_offset(pud_offset(pgd_offset_k(newdst), newdst), newdst);
 	ptep = pte_offset_kernel(pmdp, newdst);
@@ -87,7 +89,8 @@
 	}
 
 	/* Set up the new source mapping */
-	idx += (KM_MEMCPY0 - KM_MEMCPY1);
+	type1 = kmap_atomic_idx_push();
+	idx += (type0 - type1);
 	src_pte = hv_pte_set_nc(src_pte);
 	src_pte = hv_pte_clear_writable(src_pte);  /* be paranoid */
 	newsrc = __fix_to_virt(idx) + ((unsigned long)source & (PAGE_SIZE-1));
@@ -119,6 +122,8 @@
 	 * We're done: notify the simulator that all is back to normal,
 	 * and re-enable interrupts and pre-emption.
 	 */
+	kmap_atomic_idx_pop();
+	kmap_atomic_idx_pop();
 	sim_allow_multiple_caching(0);
 	local_irq_restore(flags);
 	put_cpu();
diff --git a/arch/tile/mm/highmem.c b/arch/tile/mm/highmem.c
index abb5733..31dbbd9 100644
--- a/arch/tile/mm/highmem.c
+++ b/arch/tile/mm/highmem.c
@@ -227,7 +227,7 @@
 void *__kmap_atomic(struct page *page)
 {
 	/* PAGE_NONE is a magic value that tells us to check immutability. */
-	return kmap_atomic_prot(page, type, PAGE_NONE);
+	return kmap_atomic_prot(page, PAGE_NONE);
 }
 EXPORT_SYMBOL(__kmap_atomic);
 
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 78e1982..0b9ce69 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -988,8 +988,12 @@
 /* Select whether to free (1) or mark unusable (0) the __init pages. */
 static int __init set_initfree(char *str)
 {
-	strict_strtol(str, 0, &initfree);
-	pr_info("initfree: %s free init pages\n", initfree ? "will" : "won't");
+	long val;
+	if (strict_strtol(str, 0, &val)) {
+		initfree = val;
+		pr_info("initfree: %s free init pages\n",
+			initfree ? "will" : "won't");
+	}
 	return 1;
 }
 __setup("initfree=", set_initfree);
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 335c246..1f5430c 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -134,9 +134,9 @@
 }
 
 #if defined(CONFIG_HIGHPTE)
-pte_t *_pte_offset_map(pmd_t *dir, unsigned long address, enum km_type type)
+pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
 {
-	pte_t *pte = kmap_atomic(pmd_page(*dir), type) +
+	pte_t *pte = kmap_atomic(pmd_page(*dir)) +
 		(pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
 	return &pte[pte_index(address)];
 }
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 908ea54..fb8b376 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -720,7 +720,7 @@
 	}
 }
 
-static void set_spte_track_bits(u64 *sptep, u64 new_spte)
+static int set_spte_track_bits(u64 *sptep, u64 new_spte)
 {
 	pfn_t pfn;
 	u64 old_spte = *sptep;
@@ -731,19 +731,20 @@
 		old_spte = __xchg_spte(sptep, new_spte);
 
 	if (!is_rmap_spte(old_spte))
-		return;
+		return 0;
 
 	pfn = spte_to_pfn(old_spte);
 	if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
 		kvm_set_pfn_accessed(pfn);
 	if (!shadow_dirty_mask || (old_spte & shadow_dirty_mask))
 		kvm_set_pfn_dirty(pfn);
+	return 1;
 }
 
 static void drop_spte(struct kvm *kvm, u64 *sptep, u64 new_spte)
 {
-	set_spte_track_bits(sptep, new_spte);
-	rmap_remove(kvm, sptep);
+	if (set_spte_track_bits(sptep, new_spte))
+		rmap_remove(kvm, sptep);
 }
 
 static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 2288ad8..cdac9e5 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2560,6 +2560,7 @@
 		!kvm_exception_is_soft(vcpu->arch.exception.nr);
 	events->exception.nr = vcpu->arch.exception.nr;
 	events->exception.has_error_code = vcpu->arch.exception.has_error_code;
+	events->exception.pad = 0;
 	events->exception.error_code = vcpu->arch.exception.error_code;
 
 	events->interrupt.injected =
@@ -2573,12 +2574,14 @@
 	events->nmi.injected = vcpu->arch.nmi_injected;
 	events->nmi.pending = vcpu->arch.nmi_pending;
 	events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
+	events->nmi.pad = 0;
 
 	events->sipi_vector = vcpu->arch.sipi_vector;
 
 	events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
 			 | KVM_VCPUEVENT_VALID_SIPI_VECTOR
 			 | KVM_VCPUEVENT_VALID_SHADOW);
+	memset(&events->reserved, 0, sizeof(events->reserved));
 }
 
 static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
@@ -2623,6 +2626,7 @@
 	dbgregs->dr6 = vcpu->arch.dr6;
 	dbgregs->dr7 = vcpu->arch.dr7;
 	dbgregs->flags = 0;
+	memset(&dbgregs->reserved, 0, sizeof(dbgregs->reserved));
 }
 
 static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
@@ -3106,6 +3110,7 @@
 		sizeof(ps->channels));
 	ps->flags = kvm->arch.vpit->pit_state.flags;
 	mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+	memset(&ps->reserved, 0, sizeof(ps->reserved));
 	return r;
 }
 
@@ -3169,10 +3174,6 @@
 		struct kvm_memslots *slots, *old_slots;
 		unsigned long *dirty_bitmap;
 
-		spin_lock(&kvm->mmu_lock);
-		kvm_mmu_slot_remove_write_access(kvm, log->slot);
-		spin_unlock(&kvm->mmu_lock);
-
 		r = -ENOMEM;
 		dirty_bitmap = vmalloc(n);
 		if (!dirty_bitmap)
@@ -3194,6 +3195,10 @@
 		dirty_bitmap = old_slots->memslots[log->slot].dirty_bitmap;
 		kfree(old_slots);
 
+		spin_lock(&kvm->mmu_lock);
+		kvm_mmu_slot_remove_write_access(kvm, log->slot);
+		spin_unlock(&kvm->mmu_lock);
+
 		r = -EFAULT;
 		if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) {
 			vfree(dirty_bitmap);
@@ -3486,6 +3491,7 @@
 		user_ns.clock = kvm->arch.kvmclock_offset + now_ns;
 		local_irq_enable();
 		user_ns.flags = 0;
+		memset(&user_ns.pad, 0, sizeof(user_ns.pad));
 
 		r = -EFAULT;
 		if (copy_to_user(argp, &user_ns, sizeof(user_ns)))
@@ -3972,8 +3978,10 @@
 		return X86EMUL_CONTINUE;
 
 	if (kvm_x86_ops->has_wbinvd_exit()) {
+		preempt_disable();
 		smp_call_function_many(vcpu->arch.wbinvd_dirty_mask,
 				wbinvd_ipi, NULL, 1);
+		preempt_enable();
 		cpumask_clear(vcpu->arch.wbinvd_dirty_mask);
 	}
 	wbinvd();
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 767107c..8f19b38 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -4363,9 +4363,9 @@
 out_put_disk:
 	while (dr--) {
 		del_timer(&motor_off_timer[dr]);
-		put_disk(disks[dr]);
 		if (disks[dr]->queue)
 			blk_cleanup_queue(disks[dr]->queue);
+		put_disk(disks[dr]);
 	}
 	return err;
 }
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 9dcb17d..84eb607 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -577,17 +577,11 @@
 	return ret;
 }
 
-static int ar_context_add_page(struct ar_context *ctx)
+static void ar_context_link_page(struct ar_context *ctx,
+				 struct ar_buffer *ab, dma_addr_t ab_bus)
 {
-	struct device *dev = ctx->ohci->card.device;
-	struct ar_buffer *ab;
-	dma_addr_t uninitialized_var(ab_bus);
 	size_t offset;
 
-	ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
-	if (ab == NULL)
-		return -ENOMEM;
-
 	ab->next = NULL;
 	memset(&ab->descriptor, 0, sizeof(ab->descriptor));
 	ab->descriptor.control        = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
@@ -606,6 +600,19 @@
 
 	reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
 	flush_writes(ctx->ohci);
+}
+
+static int ar_context_add_page(struct ar_context *ctx)
+{
+	struct device *dev = ctx->ohci->card.device;
+	struct ar_buffer *ab;
+	dma_addr_t uninitialized_var(ab_bus);
+
+	ab = dma_alloc_coherent(dev, PAGE_SIZE, &ab_bus, GFP_ATOMIC);
+	if (ab == NULL)
+		return -ENOMEM;
+
+	ar_context_link_page(ctx, ab, ab_bus);
 
 	return 0;
 }
@@ -730,16 +737,17 @@
 static void ar_context_tasklet(unsigned long data)
 {
 	struct ar_context *ctx = (struct ar_context *)data;
-	struct fw_ohci *ohci = ctx->ohci;
 	struct ar_buffer *ab;
 	struct descriptor *d;
 	void *buffer, *end;
+	__le16 res_count;
 
 	ab = ctx->current_buffer;
 	d = &ab->descriptor;
 
-	if (d->res_count == 0) {
-		size_t size, rest, offset;
+	res_count = ACCESS_ONCE(d->res_count);
+	if (res_count == 0) {
+		size_t size, size2, rest, pktsize, size3, offset;
 		dma_addr_t start_bus;
 		void *start;
 
@@ -750,29 +758,63 @@
 		 */
 
 		offset = offsetof(struct ar_buffer, data);
-		start = buffer = ab;
+		start = ab;
 		start_bus = le32_to_cpu(ab->descriptor.data_address) - offset;
+		buffer = ab->data;
 
 		ab = ab->next;
 		d = &ab->descriptor;
-		size = buffer + PAGE_SIZE - ctx->pointer;
+		size = start + PAGE_SIZE - ctx->pointer;
+		/* valid buffer data in the next page */
 		rest = le16_to_cpu(d->req_count) - le16_to_cpu(d->res_count);
+		/* what actually fits in this page */
+		size2 = min(rest, (size_t)PAGE_SIZE - offset - size);
 		memmove(buffer, ctx->pointer, size);
-		memcpy(buffer + size, ab->data, rest);
-		ctx->current_buffer = ab;
-		ctx->pointer = (void *) ab->data + rest;
-		end = buffer + size + rest;
+		memcpy(buffer + size, ab->data, size2);
 
-		while (buffer < end)
-			buffer = handle_ar_packet(ctx, buffer);
+		while (size > 0) {
+			void *next = handle_ar_packet(ctx, buffer);
+			pktsize = next - buffer;
+			if (pktsize >= size) {
+				/*
+				 * We have handled all the data that was
+				 * originally in this page, so we can now
+				 * continue in the next page.
+				 */
+				buffer = next;
+				break;
+			}
+			/* move the next packet to the start of the buffer */
+			memmove(buffer, next, size + size2 - pktsize);
+			size -= pktsize;
+			/* fill up this page again */
+			size3 = min(rest - size2,
+				    (size_t)PAGE_SIZE - offset - size - size2);
+			memcpy(buffer + size + size2,
+			       (void *) ab->data + size2, size3);
+			size2 += size3;
+		}
 
-		dma_free_coherent(ohci->card.device, PAGE_SIZE,
-				  start, start_bus);
-		ar_context_add_page(ctx);
+		if (rest > 0) {
+			/* handle the packets that are fully in the next page */
+			buffer = (void *) ab->data +
+					(buffer - (start + offset + size));
+			end = (void *) ab->data + rest;
+
+			while (buffer < end)
+				buffer = handle_ar_packet(ctx, buffer);
+
+			ctx->current_buffer = ab;
+			ctx->pointer = end;
+
+			ar_context_link_page(ctx, start, start_bus);
+		} else {
+			ctx->pointer = start + PAGE_SIZE;
+		}
 	} else {
 		buffer = ctx->pointer;
 		ctx->pointer = end =
-			(void *) ab + PAGE_SIZE - le16_to_cpu(d->res_count);
+			(void *) ab + PAGE_SIZE - le16_to_cpu(res_count);
 
 		while (buffer < end)
 			buffer = handle_ar_packet(ctx, buffer);
diff --git a/drivers/hwmon/ltc4261.c b/drivers/hwmon/ltc4261.c
index 2676261..4b50601 100644
--- a/drivers/hwmon/ltc4261.c
+++ b/drivers/hwmon/ltc4261.c
@@ -82,7 +82,7 @@
 			val = i2c_smbus_read_byte_data(client, i);
 			if (unlikely(val < 0)) {
 				dev_dbg(dev,
-					"Failed to read ADC value: error %d",
+					"Failed to read ADC value: error %d\n",
 					val);
 				ret = ERR_PTR(val);
 				goto abort;
@@ -230,8 +230,7 @@
 		return -ENODEV;
 
 	if (i2c_smbus_read_byte_data(client, LTC4261_STATUS) < 0) {
-		dev_err(&client->dev, "Failed to read register %d:%02x:%02x\n",
-			adapter->id, client->addr, LTC4261_STATUS);
+		dev_err(&client->dev, "Failed to read status register\n");
 		return -ENODEV;
 	}
 
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index c950be3..3a6321c 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -99,6 +99,7 @@
 	    ICH10
 	    5/3400 Series (PCH)
 	    Cougar Point (PCH)
+	    Patsburg (PCH)
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 59d6598..02835ce 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -3,6 +3,8 @@
     Philip Edelbrock <phil@netroedge.com>, and Mark D. Studebaker
     <mdsxyz123@yahoo.com>
     Copyright (C) 2007, 2008   Jean Delvare <khali@linux-fr.org>
+    Copyright (C) 2010         Intel Corporation,
+                               David Woodhouse <dwmw2@infradead.org>
 
     This program is free software; you can redistribute it and/or modify
     it under the terms of the GNU General Public License as published by
@@ -43,6 +45,10 @@
   ICH10                 0x3a60     32     hard     yes     yes     yes
   5/3400 Series (PCH)   0x3b30     32     hard     yes     yes     yes
   Cougar Point (PCH)    0x1c22     32     hard     yes     yes     yes
+  Patsburg (PCH)        0x1d22     32     hard     yes     yes     yes
+  Patsburg (PCH) IDF    0x1d70     32     hard     yes     yes     yes
+  Patsburg (PCH) IDF    0x1d71     32     hard     yes     yes     yes
+  Patsburg (PCH) IDF    0x1d72     32     hard     yes     yes     yes
 
   Features supported by this driver:
   Software PEC                     no
@@ -50,12 +56,11 @@
   Block buffer                     yes
   Block process call transaction   no
   I2C block read transaction       yes  (doesn't use the block buffer)
+  Slave mode                       no
 
   See the file Documentation/i2c/busses/i2c-i801 for details.
 */
 
-/* Note: we assume there can only be one I801, with one SMBus interface */
-
 #include <linux/module.h>
 #include <linux/pci.h>
 #include <linux/kernel.h>
@@ -69,16 +74,16 @@
 #include <linux/dmi.h>
 
 /* I801 SMBus address offsets */
-#define SMBHSTSTS	(0 + i801_smba)
-#define SMBHSTCNT	(2 + i801_smba)
-#define SMBHSTCMD	(3 + i801_smba)
-#define SMBHSTADD	(4 + i801_smba)
-#define SMBHSTDAT0	(5 + i801_smba)
-#define SMBHSTDAT1	(6 + i801_smba)
-#define SMBBLKDAT	(7 + i801_smba)
-#define SMBPEC		(8 + i801_smba)		/* ICH3 and later */
-#define SMBAUXSTS	(12 + i801_smba)	/* ICH4 and later */
-#define SMBAUXCTL	(13 + i801_smba)	/* ICH4 and later */
+#define SMBHSTSTS(p)	(0 + (p)->smba)
+#define SMBHSTCNT(p)	(2 + (p)->smba)
+#define SMBHSTCMD(p)	(3 + (p)->smba)
+#define SMBHSTADD(p)	(4 + (p)->smba)
+#define SMBHSTDAT0(p)	(5 + (p)->smba)
+#define SMBHSTDAT1(p)	(6 + (p)->smba)
+#define SMBBLKDAT(p)	(7 + (p)->smba)
+#define SMBPEC(p)	(8 + (p)->smba)		/* ICH3 and later */
+#define SMBAUXSTS(p)	(12 + (p)->smba)	/* ICH4 and later */
+#define SMBAUXCTL(p)	(13 + (p)->smba)	/* ICH4 and later */
 
 /* PCI Address Constants */
 #define SMBBAR		4
@@ -127,16 +132,25 @@
 				 SMBHSTSTS_BUS_ERR | SMBHSTSTS_DEV_ERR | \
 				 SMBHSTSTS_INTR)
 
-static unsigned long i801_smba;
-static unsigned char i801_original_hstcfg;
+/* Patsburg also has three 'Integrated Device Function' SMBus controllers */
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0	0x1d70
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1	0x1d71
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2	0x1d72
+
+struct i801_priv {
+	struct i2c_adapter adapter;
+	unsigned long smba;
+	unsigned char original_hstcfg;
+	struct pci_dev *pci_dev;
+	unsigned int features;
+};
+
 static struct pci_driver i801_driver;
-static struct pci_dev *I801_dev;
 
 #define FEATURE_SMBUS_PEC	(1 << 0)
 #define FEATURE_BLOCK_BUFFER	(1 << 1)
 #define FEATURE_BLOCK_PROC	(1 << 2)
 #define FEATURE_I2C_BLOCK_READ	(1 << 3)
-static unsigned int i801_features;
 
 static const char *i801_feature_names[] = {
 	"SMBus PEC",
@@ -151,24 +165,24 @@
 
 /* Make sure the SMBus host is ready to start transmitting.
    Return 0 if it is, -EBUSY if it is not. */
-static int i801_check_pre(void)
+static int i801_check_pre(struct i801_priv *priv)
 {
 	int status;
 
-	status = inb_p(SMBHSTSTS);
+	status = inb_p(SMBHSTSTS(priv));
 	if (status & SMBHSTSTS_HOST_BUSY) {
-		dev_err(&I801_dev->dev, "SMBus is busy, can't use it!\n");
+		dev_err(&priv->pci_dev->dev, "SMBus is busy, can't use it!\n");
 		return -EBUSY;
 	}
 
 	status &= STATUS_FLAGS;
 	if (status) {
-		dev_dbg(&I801_dev->dev, "Clearing status flags (%02x)\n",
+		dev_dbg(&priv->pci_dev->dev, "Clearing status flags (%02x)\n",
 			status);
-		outb_p(status, SMBHSTSTS);
-		status = inb_p(SMBHSTSTS) & STATUS_FLAGS;
+		outb_p(status, SMBHSTSTS(priv));
+		status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS;
 		if (status) {
-			dev_err(&I801_dev->dev,
+			dev_err(&priv->pci_dev->dev,
 				"Failed clearing status flags (%02x)\n",
 				status);
 			return -EBUSY;
@@ -179,48 +193,50 @@
 }
 
 /* Convert the status register to an error code, and clear it. */
-static int i801_check_post(int status, int timeout)
+static int i801_check_post(struct i801_priv *priv, int status, int timeout)
 {
 	int result = 0;
 
 	/* If the SMBus is still busy, we give up */
 	if (timeout) {
-		dev_err(&I801_dev->dev, "Transaction timeout\n");
+		dev_err(&priv->pci_dev->dev, "Transaction timeout\n");
 		/* try to stop the current command */
-		dev_dbg(&I801_dev->dev, "Terminating the current operation\n");
-		outb_p(inb_p(SMBHSTCNT) | SMBHSTCNT_KILL, SMBHSTCNT);
+		dev_dbg(&priv->pci_dev->dev, "Terminating the current operation\n");
+		outb_p(inb_p(SMBHSTCNT(priv)) | SMBHSTCNT_KILL,
+		       SMBHSTCNT(priv));
 		msleep(1);
-		outb_p(inb_p(SMBHSTCNT) & (~SMBHSTCNT_KILL), SMBHSTCNT);
+		outb_p(inb_p(SMBHSTCNT(priv)) & (~SMBHSTCNT_KILL),
+		       SMBHSTCNT(priv));
 
 		/* Check if it worked */
-		status = inb_p(SMBHSTSTS);
+		status = inb_p(SMBHSTSTS(priv));
 		if ((status & SMBHSTSTS_HOST_BUSY) ||
 		    !(status & SMBHSTSTS_FAILED))
-			dev_err(&I801_dev->dev,
+			dev_err(&priv->pci_dev->dev,
 				"Failed terminating the transaction\n");
-		outb_p(STATUS_FLAGS, SMBHSTSTS);
+		outb_p(STATUS_FLAGS, SMBHSTSTS(priv));
 		return -ETIMEDOUT;
 	}
 
 	if (status & SMBHSTSTS_FAILED) {
 		result = -EIO;
-		dev_err(&I801_dev->dev, "Transaction failed\n");
+		dev_err(&priv->pci_dev->dev, "Transaction failed\n");
 	}
 	if (status & SMBHSTSTS_DEV_ERR) {
 		result = -ENXIO;
-		dev_dbg(&I801_dev->dev, "No response\n");
+		dev_dbg(&priv->pci_dev->dev, "No response\n");
 	}
 	if (status & SMBHSTSTS_BUS_ERR) {
 		result = -EAGAIN;
-		dev_dbg(&I801_dev->dev, "Lost arbitration\n");
+		dev_dbg(&priv->pci_dev->dev, "Lost arbitration\n");
 	}
 
 	if (result) {
 		/* Clear error flags */
-		outb_p(status & STATUS_FLAGS, SMBHSTSTS);
-		status = inb_p(SMBHSTSTS) & STATUS_FLAGS;
+		outb_p(status & STATUS_FLAGS, SMBHSTSTS(priv));
+		status = inb_p(SMBHSTSTS(priv)) & STATUS_FLAGS;
 		if (status) {
-			dev_warn(&I801_dev->dev, "Failed clearing status "
+			dev_warn(&priv->pci_dev->dev, "Failed clearing status "
 				 "flags at end of transaction (%02x)\n",
 				 status);
 		}
@@ -229,86 +245,88 @@
 	return result;
 }
 
-static int i801_transaction(int xact)
+static int i801_transaction(struct i801_priv *priv, int xact)
 {
 	int status;
 	int result;
 	int timeout = 0;
 
-	result = i801_check_pre();
+	result = i801_check_pre(priv);
 	if (result < 0)
 		return result;
 
 	/* the current contents of SMBHSTCNT can be overwritten, since PEC,
 	 * INTREN, SMBSCMD are passed in xact */
-	outb_p(xact | I801_START, SMBHSTCNT);
+	outb_p(xact | I801_START, SMBHSTCNT(priv));
 
 	/* We will always wait for a fraction of a second! */
 	do {
 		msleep(1);
-		status = inb_p(SMBHSTSTS);
+		status = inb_p(SMBHSTSTS(priv));
 	} while ((status & SMBHSTSTS_HOST_BUSY) && (timeout++ < MAX_TIMEOUT));
 
-	result = i801_check_post(status, timeout > MAX_TIMEOUT);
+	result = i801_check_post(priv, status, timeout > MAX_TIMEOUT);
 	if (result < 0)
 		return result;
 
-	outb_p(SMBHSTSTS_INTR, SMBHSTSTS);
+	outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv));
 	return 0;
 }
 
 /* wait for INTR bit as advised by Intel */
-static void i801_wait_hwpec(void)
+static void i801_wait_hwpec(struct i801_priv *priv)
 {
 	int timeout = 0;
 	int status;
 
 	do {
 		msleep(1);
-		status = inb_p(SMBHSTSTS);
+		status = inb_p(SMBHSTSTS(priv));
 	} while ((!(status & SMBHSTSTS_INTR))
 		 && (timeout++ < MAX_TIMEOUT));
 
 	if (timeout > MAX_TIMEOUT)
-		dev_dbg(&I801_dev->dev, "PEC Timeout!\n");
+		dev_dbg(&priv->pci_dev->dev, "PEC Timeout!\n");
 
-	outb_p(status, SMBHSTSTS);
+	outb_p(status, SMBHSTSTS(priv));
 }
 
-static int i801_block_transaction_by_block(union i2c_smbus_data *data,
+static int i801_block_transaction_by_block(struct i801_priv *priv,
+					   union i2c_smbus_data *data,
 					   char read_write, int hwpec)
 {
 	int i, len;
 	int status;
 
-	inb_p(SMBHSTCNT); /* reset the data buffer index */
+	inb_p(SMBHSTCNT(priv)); /* reset the data buffer index */
 
 	/* Use 32-byte buffer to process this transaction */
 	if (read_write == I2C_SMBUS_WRITE) {
 		len = data->block[0];
-		outb_p(len, SMBHSTDAT0);
+		outb_p(len, SMBHSTDAT0(priv));
 		for (i = 0; i < len; i++)
-			outb_p(data->block[i+1], SMBBLKDAT);
+			outb_p(data->block[i+1], SMBBLKDAT(priv));
 	}
 
-	status = i801_transaction(I801_BLOCK_DATA | ENABLE_INT9 |
+	status = i801_transaction(priv, I801_BLOCK_DATA | ENABLE_INT9 |
 				  I801_PEC_EN * hwpec);
 	if (status)
 		return status;
 
 	if (read_write == I2C_SMBUS_READ) {
-		len = inb_p(SMBHSTDAT0);
+		len = inb_p(SMBHSTDAT0(priv));
 		if (len < 1 || len > I2C_SMBUS_BLOCK_MAX)
 			return -EPROTO;
 
 		data->block[0] = len;
 		for (i = 0; i < len; i++)
-			data->block[i + 1] = inb_p(SMBBLKDAT);
+			data->block[i + 1] = inb_p(SMBBLKDAT(priv));
 	}
 	return 0;
 }
 
-static int i801_block_transaction_byte_by_byte(union i2c_smbus_data *data,
+static int i801_block_transaction_byte_by_byte(struct i801_priv *priv,
+					       union i2c_smbus_data *data,
 					       char read_write, int command,
 					       int hwpec)
 {
@@ -318,15 +336,15 @@
 	int result;
 	int timeout;
 
-	result = i801_check_pre();
+	result = i801_check_pre(priv);
 	if (result < 0)
 		return result;
 
 	len = data->block[0];
 
 	if (read_write == I2C_SMBUS_WRITE) {
-		outb_p(len, SMBHSTDAT0);
-		outb_p(data->block[1], SMBBLKDAT);
+		outb_p(len, SMBHSTDAT0(priv));
+		outb_p(data->block[1], SMBBLKDAT(priv));
 	}
 
 	for (i = 1; i <= len; i++) {
@@ -342,34 +360,37 @@
 			else
 				smbcmd = I801_BLOCK_DATA;
 		}
-		outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT);
+		outb_p(smbcmd | ENABLE_INT9, SMBHSTCNT(priv));
 
 		if (i == 1)
-			outb_p(inb(SMBHSTCNT) | I801_START, SMBHSTCNT);
+			outb_p(inb(SMBHSTCNT(priv)) | I801_START,
+			       SMBHSTCNT(priv));
 
 		/* We will always wait for a fraction of a second! */
 		timeout = 0;
 		do {
 			msleep(1);
-			status = inb_p(SMBHSTSTS);
+			status = inb_p(SMBHSTSTS(priv));
 		} while ((!(status & SMBHSTSTS_BYTE_DONE))
 			 && (timeout++ < MAX_TIMEOUT));
 
-		result = i801_check_post(status, timeout > MAX_TIMEOUT);
+		result = i801_check_post(priv, status, timeout > MAX_TIMEOUT);
 		if (result < 0)
 			return result;
 
 		if (i == 1 && read_write == I2C_SMBUS_READ
 		 && command != I2C_SMBUS_I2C_BLOCK_DATA) {
-			len = inb_p(SMBHSTDAT0);
+			len = inb_p(SMBHSTDAT0(priv));
 			if (len < 1 || len > I2C_SMBUS_BLOCK_MAX) {
-				dev_err(&I801_dev->dev,
+				dev_err(&priv->pci_dev->dev,
 					"Illegal SMBus block read size %d\n",
 					len);
 				/* Recover */
-				while (inb_p(SMBHSTSTS) & SMBHSTSTS_HOST_BUSY)
-					outb_p(SMBHSTSTS_BYTE_DONE, SMBHSTSTS);
-				outb_p(SMBHSTSTS_INTR, SMBHSTSTS);
+				while (inb_p(SMBHSTSTS(priv)) &
+				       SMBHSTSTS_HOST_BUSY)
+					outb_p(SMBHSTSTS_BYTE_DONE,
+					       SMBHSTSTS(priv));
+				outb_p(SMBHSTSTS_INTR, SMBHSTSTS(priv));
 				return -EPROTO;
 			}
 			data->block[0] = len;
@@ -377,27 +398,28 @@
 
 		/* Retrieve/store value in SMBBLKDAT */
 		if (read_write == I2C_SMBUS_READ)
-			data->block[i] = inb_p(SMBBLKDAT);
+			data->block[i] = inb_p(SMBBLKDAT(priv));
 		if (read_write == I2C_SMBUS_WRITE && i+1 <= len)
-			outb_p(data->block[i+1], SMBBLKDAT);
+			outb_p(data->block[i+1], SMBBLKDAT(priv));
 
 		/* signals SMBBLKDAT ready */
-		outb_p(SMBHSTSTS_BYTE_DONE | SMBHSTSTS_INTR, SMBHSTSTS);
+		outb_p(SMBHSTSTS_BYTE_DONE | SMBHSTSTS_INTR, SMBHSTSTS(priv));
 	}
 
 	return 0;
 }
 
-static int i801_set_block_buffer_mode(void)
+static int i801_set_block_buffer_mode(struct i801_priv *priv)
 {
-	outb_p(inb_p(SMBAUXCTL) | SMBAUXCTL_E32B, SMBAUXCTL);
-	if ((inb_p(SMBAUXCTL) & SMBAUXCTL_E32B) == 0)
+	outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_E32B, SMBAUXCTL(priv));
+	if ((inb_p(SMBAUXCTL(priv)) & SMBAUXCTL_E32B) == 0)
 		return -EIO;
 	return 0;
 }
 
 /* Block transaction function */
-static int i801_block_transaction(union i2c_smbus_data *data, char read_write,
+static int i801_block_transaction(struct i801_priv *priv,
+				  union i2c_smbus_data *data, char read_write,
 				  int command, int hwpec)
 {
 	int result = 0;
@@ -406,11 +428,11 @@
 	if (command == I2C_SMBUS_I2C_BLOCK_DATA) {
 		if (read_write == I2C_SMBUS_WRITE) {
 			/* set I2C_EN bit in configuration register */
-			pci_read_config_byte(I801_dev, SMBHSTCFG, &hostc);
-			pci_write_config_byte(I801_dev, SMBHSTCFG,
+			pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &hostc);
+			pci_write_config_byte(priv->pci_dev, SMBHSTCFG,
 					      hostc | SMBHSTCFG_I2C_EN);
-		} else if (!(i801_features & FEATURE_I2C_BLOCK_READ)) {
-			dev_err(&I801_dev->dev,
+		} else if (!(priv->features & FEATURE_I2C_BLOCK_READ)) {
+			dev_err(&priv->pci_dev->dev,
 				"I2C block read is unsupported!\n");
 			return -EOPNOTSUPP;
 		}
@@ -429,22 +451,23 @@
 	/* Experience has shown that the block buffer can only be used for
 	   SMBus (not I2C) block transactions, even though the datasheet
 	   doesn't mention this limitation. */
-	if ((i801_features & FEATURE_BLOCK_BUFFER)
+	if ((priv->features & FEATURE_BLOCK_BUFFER)
 	 && command != I2C_SMBUS_I2C_BLOCK_DATA
-	 && i801_set_block_buffer_mode() == 0)
-		result = i801_block_transaction_by_block(data, read_write,
-							 hwpec);
+	 && i801_set_block_buffer_mode(priv) == 0)
+		result = i801_block_transaction_by_block(priv, data,
+							 read_write, hwpec);
 	else
-		result = i801_block_transaction_byte_by_byte(data, read_write,
+		result = i801_block_transaction_byte_by_byte(priv, data,
+							     read_write,
 							     command, hwpec);
 
 	if (result == 0 && hwpec)
-		i801_wait_hwpec();
+		i801_wait_hwpec(priv);
 
 	if (command == I2C_SMBUS_I2C_BLOCK_DATA
 	 && read_write == I2C_SMBUS_WRITE) {
 		/* restore saved configuration register value */
-		pci_write_config_byte(I801_dev, SMBHSTCFG, hostc);
+		pci_write_config_byte(priv->pci_dev, SMBHSTCFG, hostc);
 	}
 	return result;
 }
@@ -457,81 +480,85 @@
 	int hwpec;
 	int block = 0;
 	int ret, xact = 0;
+	struct i801_priv *priv = i2c_get_adapdata(adap);
 
-	hwpec = (i801_features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
+	hwpec = (priv->features & FEATURE_SMBUS_PEC) && (flags & I2C_CLIENT_PEC)
 		&& size != I2C_SMBUS_QUICK
 		&& size != I2C_SMBUS_I2C_BLOCK_DATA;
 
 	switch (size) {
 	case I2C_SMBUS_QUICK:
 		outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
-		       SMBHSTADD);
+		       SMBHSTADD(priv));
 		xact = I801_QUICK;
 		break;
 	case I2C_SMBUS_BYTE:
 		outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
-		       SMBHSTADD);
+		       SMBHSTADD(priv));
 		if (read_write == I2C_SMBUS_WRITE)
-			outb_p(command, SMBHSTCMD);
+			outb_p(command, SMBHSTCMD(priv));
 		xact = I801_BYTE;
 		break;
 	case I2C_SMBUS_BYTE_DATA:
 		outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
-		       SMBHSTADD);
-		outb_p(command, SMBHSTCMD);
+		       SMBHSTADD(priv));
+		outb_p(command, SMBHSTCMD(priv));
 		if (read_write == I2C_SMBUS_WRITE)
-			outb_p(data->byte, SMBHSTDAT0);
+			outb_p(data->byte, SMBHSTDAT0(priv));
 		xact = I801_BYTE_DATA;
 		break;
 	case I2C_SMBUS_WORD_DATA:
 		outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
-		       SMBHSTADD);
-		outb_p(command, SMBHSTCMD);
+		       SMBHSTADD(priv));
+		outb_p(command, SMBHSTCMD(priv));
 		if (read_write == I2C_SMBUS_WRITE) {
-			outb_p(data->word & 0xff, SMBHSTDAT0);
-			outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1);
+			outb_p(data->word & 0xff, SMBHSTDAT0(priv));
+			outb_p((data->word & 0xff00) >> 8, SMBHSTDAT1(priv));
 		}
 		xact = I801_WORD_DATA;
 		break;
 	case I2C_SMBUS_BLOCK_DATA:
 		outb_p(((addr & 0x7f) << 1) | (read_write & 0x01),
-		       SMBHSTADD);
-		outb_p(command, SMBHSTCMD);
+		       SMBHSTADD(priv));
+		outb_p(command, SMBHSTCMD(priv));
 		block = 1;
 		break;
 	case I2C_SMBUS_I2C_BLOCK_DATA:
 		/* NB: page 240 of ICH5 datasheet shows that the R/#W
 		 * bit should be cleared here, even when reading */
-		outb_p((addr & 0x7f) << 1, SMBHSTADD);
+		outb_p((addr & 0x7f) << 1, SMBHSTADD(priv));
 		if (read_write == I2C_SMBUS_READ) {
 			/* NB: page 240 of ICH5 datasheet also shows
 			 * that DATA1 is the cmd field when reading */
-			outb_p(command, SMBHSTDAT1);
+			outb_p(command, SMBHSTDAT1(priv));
 		} else
-			outb_p(command, SMBHSTCMD);
+			outb_p(command, SMBHSTCMD(priv));
 		block = 1;
 		break;
 	default:
-		dev_err(&I801_dev->dev, "Unsupported transaction %d\n", size);
+		dev_err(&priv->pci_dev->dev, "Unsupported transaction %d\n",
+			size);
 		return -EOPNOTSUPP;
 	}
 
 	if (hwpec)	/* enable/disable hardware PEC */
-		outb_p(inb_p(SMBAUXCTL) | SMBAUXCTL_CRC, SMBAUXCTL);
+		outb_p(inb_p(SMBAUXCTL(priv)) | SMBAUXCTL_CRC, SMBAUXCTL(priv));
 	else
-		outb_p(inb_p(SMBAUXCTL) & (~SMBAUXCTL_CRC), SMBAUXCTL);
+		outb_p(inb_p(SMBAUXCTL(priv)) & (~SMBAUXCTL_CRC),
+		       SMBAUXCTL(priv));
 
 	if (block)
-		ret = i801_block_transaction(data, read_write, size, hwpec);
+		ret = i801_block_transaction(priv, data, read_write, size,
+					     hwpec);
 	else
-		ret = i801_transaction(xact | ENABLE_INT9);
+		ret = i801_transaction(priv, xact | ENABLE_INT9);
 
 	/* Some BIOSes don't like it when PEC is enabled at reboot or resume
 	   time, so we forcibly disable it after every transaction. Turn off
 	   E32B for the same reason. */
 	if (hwpec || block)
-		outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B),
-		       SMBAUXCTL);
+		outb_p(inb_p(SMBAUXCTL(priv)) &
+		       ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
 
 	if (block)
 		return ret;
@@ -543,10 +570,11 @@
 	switch (xact & 0x7f) {
 	case I801_BYTE:	/* Result put in SMBHSTDAT0 */
 	case I801_BYTE_DATA:
-		data->byte = inb_p(SMBHSTDAT0);
+		data->byte = inb_p(SMBHSTDAT0(priv));
 		break;
 	case I801_WORD_DATA:
-		data->word = inb_p(SMBHSTDAT0) + (inb_p(SMBHSTDAT1) << 8);
+		data->word = inb_p(SMBHSTDAT0(priv)) +
+			     (inb_p(SMBHSTDAT1(priv)) << 8);
 		break;
 	}
 	return 0;
@@ -555,11 +583,13 @@
 
 static u32 i801_func(struct i2c_adapter *adapter)
 {
+	struct i801_priv *priv = i2c_get_adapdata(adapter);
+
 	return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE |
 	       I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA |
 	       I2C_FUNC_SMBUS_BLOCK_DATA | I2C_FUNC_SMBUS_WRITE_I2C_BLOCK |
-	       ((i801_features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) |
-	       ((i801_features & FEATURE_I2C_BLOCK_READ) ?
+	       ((priv->features & FEATURE_SMBUS_PEC) ? I2C_FUNC_SMBUS_PEC : 0) |
+	       ((priv->features & FEATURE_I2C_BLOCK_READ) ?
 		I2C_FUNC_SMBUS_READ_I2C_BLOCK : 0);
 }
 
@@ -568,12 +598,6 @@
 	.functionality	= i801_func,
 };
 
-static struct i2c_adapter i801_adapter = {
-	.owner		= THIS_MODULE,
-	.class		= I2C_CLASS_HWMON | I2C_CLASS_SPD,
-	.algo		= &smbus_algorithm,
-};
-
 static const struct pci_device_id i801_ids[] = {
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_3) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_3) },
@@ -592,6 +616,10 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_5_3400_SERIES_SMBUS) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF0) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF1) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS_IDF2) },
 	{ 0, }
 };
 
@@ -704,16 +732,25 @@
 {
 	unsigned char temp;
 	int err, i;
+	struct i801_priv *priv;
 
-	I801_dev = dev;
-	i801_features = 0;
+	priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+	if (!priv)
+		return -ENOMEM;
+
+	i2c_set_adapdata(&priv->adapter, priv);
+	priv->adapter.owner = THIS_MODULE;
+	priv->adapter.class = I2C_CLASS_HWMON | I2C_CLASS_SPD;
+	priv->adapter.algo = &smbus_algorithm;
+
+	priv->pci_dev = dev;
 	switch (dev->device) {
 	default:
-		i801_features |= FEATURE_I2C_BLOCK_READ;
+		priv->features |= FEATURE_I2C_BLOCK_READ;
 		/* fall through */
 	case PCI_DEVICE_ID_INTEL_82801DB_3:
-		i801_features |= FEATURE_SMBUS_PEC;
-		i801_features |= FEATURE_BLOCK_BUFFER;
+		priv->features |= FEATURE_SMBUS_PEC;
+		priv->features |= FEATURE_BLOCK_BUFFER;
 		/* fall through */
 	case PCI_DEVICE_ID_INTEL_82801CA_3:
 	case PCI_DEVICE_ID_INTEL_82801BA_2:
@@ -724,11 +761,11 @@
 
 	/* Disable features on user request */
 	for (i = 0; i < ARRAY_SIZE(i801_feature_names); i++) {
-		if (i801_features & disable_features & (1 << i))
+		if (priv->features & disable_features & (1 << i))
 			dev_notice(&dev->dev, "%s disabled by user\n",
 				   i801_feature_names[i]);
 	}
-	i801_features &= ~disable_features;
+	priv->features &= ~disable_features;
 
 	err = pci_enable_device(dev);
 	if (err) {
@@ -738,8 +775,8 @@
 	}
 
 	/* Determine the address of the SMBus area */
-	i801_smba = pci_resource_start(dev, SMBBAR);
-	if (!i801_smba) {
+	priv->smba = pci_resource_start(dev, SMBBAR);
+	if (!priv->smba) {
 		dev_err(&dev->dev, "SMBus base address uninitialized, "
 			"upgrade BIOS\n");
 		err = -ENODEV;
@@ -755,19 +792,19 @@
 	err = pci_request_region(dev, SMBBAR, i801_driver.name);
 	if (err) {
 		dev_err(&dev->dev, "Failed to request SMBus region "
-			"0x%lx-0x%Lx\n", i801_smba,
+			"0x%lx-0x%Lx\n", priv->smba,
 			(unsigned long long)pci_resource_end(dev, SMBBAR));
 		goto exit;
 	}
 
-	pci_read_config_byte(I801_dev, SMBHSTCFG, &temp);
-	i801_original_hstcfg = temp;
+	pci_read_config_byte(priv->pci_dev, SMBHSTCFG, &temp);
+	priv->original_hstcfg = temp;
 	temp &= ~SMBHSTCFG_I2C_EN;	/* SMBus timing */
 	if (!(temp & SMBHSTCFG_HST_EN)) {
 		dev_info(&dev->dev, "Enabling SMBus device\n");
 		temp |= SMBHSTCFG_HST_EN;
 	}
-	pci_write_config_byte(I801_dev, SMBHSTCFG, temp);
+	pci_write_config_byte(priv->pci_dev, SMBHSTCFG, temp);
 
 	if (temp & SMBHSTCFG_SMB_SMI_EN)
 		dev_dbg(&dev->dev, "SMBus using interrupt SMI#\n");
@@ -775,19 +812,19 @@
 		dev_dbg(&dev->dev, "SMBus using PCI Interrupt\n");
 
 	/* Clear special mode bits */
-	if (i801_features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
-		outb_p(inb_p(SMBAUXCTL) & ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B),
-		       SMBAUXCTL);
+	if (priv->features & (FEATURE_SMBUS_PEC | FEATURE_BLOCK_BUFFER))
+		outb_p(inb_p(SMBAUXCTL(priv)) &
+		       ~(SMBAUXCTL_CRC | SMBAUXCTL_E32B), SMBAUXCTL(priv));
 
 	/* set up the sysfs linkage to our parent device */
-	i801_adapter.dev.parent = &dev->dev;
+	priv->adapter.dev.parent = &dev->dev;
 
 	/* Retry up to 3 times on lost arbitration */
-	i801_adapter.retries = 3;
+	priv->adapter.retries = 3;
 
-	snprintf(i801_adapter.name, sizeof(i801_adapter.name),
-		"SMBus I801 adapter at %04lx", i801_smba);
-	err = i2c_add_adapter(&i801_adapter);
+	snprintf(priv->adapter.name, sizeof(priv->adapter.name),
+		"SMBus I801 adapter at %04lx", priv->smba);
+	err = i2c_add_adapter(&priv->adapter);
 	if (err) {
 		dev_err(&dev->dev, "Failed to add SMBus adapter\n");
 		goto exit_release;
@@ -801,27 +838,33 @@
 		memset(&info, 0, sizeof(struct i2c_board_info));
 		info.addr = apanel_addr;
 		strlcpy(info.type, "fujitsu_apanel", I2C_NAME_SIZE);
-		i2c_new_device(&i801_adapter, &info);
+		i2c_new_device(&priv->adapter, &info);
 	}
 #endif
 #if defined CONFIG_SENSORS_FSCHMD || defined CONFIG_SENSORS_FSCHMD_MODULE
 	if (dmi_name_in_vendors("FUJITSU"))
-		dmi_walk(dmi_check_onboard_devices, &i801_adapter);
+		dmi_walk(dmi_check_onboard_devices, &priv->adapter);
 #endif
 
+	pci_set_drvdata(dev, priv);
 	return 0;
 
 exit_release:
 	pci_release_region(dev, SMBBAR);
 exit:
+	kfree(priv);
 	return err;
 }
 
 static void __devexit i801_remove(struct pci_dev *dev)
 {
-	i2c_del_adapter(&i801_adapter);
-	pci_write_config_byte(I801_dev, SMBHSTCFG, i801_original_hstcfg);
+	struct i801_priv *priv = pci_get_drvdata(dev);
+
+	i2c_del_adapter(&priv->adapter);
+	pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
 	pci_release_region(dev, SMBBAR);
+	pci_set_drvdata(dev, NULL);
+	kfree(priv);
 	/*
 	 * do not call pci_disable_device(dev) since it can cause hard hangs on
 	 * some systems during power-off (eg. Fujitsu-Siemens Lifebook E8010)
@@ -831,8 +874,10 @@
 #ifdef CONFIG_PM
 static int i801_suspend(struct pci_dev *dev, pm_message_t mesg)
 {
+	struct i801_priv *priv = pci_get_drvdata(dev);
+
 	pci_save_state(dev);
-	pci_write_config_byte(dev, SMBHSTCFG, i801_original_hstcfg);
+	pci_write_config_byte(dev, SMBHSTCFG, priv->original_hstcfg);
 	pci_set_power_state(dev, pci_choose_state(dev, mesg));
 	return 0;
 }
diff --git a/drivers/input/mouse/appletouch.c b/drivers/input/mouse/appletouch.c
index a9cf768..b77f999 100644
--- a/drivers/input/mouse/appletouch.c
+++ b/drivers/input/mouse/appletouch.c
@@ -630,7 +630,7 @@
 	/* Just update the base values (i.e. touchpad in untouched state) */
 	if (dev->data[dev->info->datalen - 1] & ATP_STATUS_BASE_UPDATE) {
 
-		dprintk(KERN_DEBUG "appletouch: updated base values\n");
+		dprintk("appletouch: updated base values\n");
 
 		memcpy(dev->xy_old, dev->xy_cur, sizeof(dev->xy_old));
 		goto exit;
diff --git a/drivers/input/touchscreen/ad7879.c b/drivers/input/touchscreen/ad7879.c
index ba6f0bd..bc3b518 100644
--- a/drivers/input/touchscreen/ad7879.c
+++ b/drivers/input/touchscreen/ad7879.c
@@ -129,6 +129,9 @@
 	u16			cmd_crtl1;
 	u16			cmd_crtl2;
 	u16			cmd_crtl3;
+	int			x;
+	int			y;
+	int			Rt;
 };
 
 static int ad7879_read(struct ad7879 *ts, u8 reg)
@@ -175,13 +178,32 @@
 		Rt /= z1;
 		Rt = (Rt + 2047) >> 12;
 
-		if (!timer_pending(&ts->timer))
-			input_report_key(input_dev, BTN_TOUCH, 1);
+		/*
+		 * Sample found inconsistent, pressure is beyond
+		 * the maximum. Don't report it to user space.
+		 */
+		if (Rt > ts->pressure_max)
+			return -EINVAL;
 
-		input_report_abs(input_dev, ABS_X, x);
-		input_report_abs(input_dev, ABS_Y, y);
-		input_report_abs(input_dev, ABS_PRESSURE, Rt);
-		input_sync(input_dev);
+		/*
+		 * Note that we delay reporting events by one sample.
+		 * This is done to avoid reporting last sample of the
+		 * touch sequence, which may be incomplete if finger
+		 * leaves the surface before last reading is taken.
+		 */
+		if (timer_pending(&ts->timer)) {
+			/* Touch continues */
+			input_report_key(input_dev, BTN_TOUCH, 1);
+			input_report_abs(input_dev, ABS_X, ts->x);
+			input_report_abs(input_dev, ABS_Y, ts->y);
+			input_report_abs(input_dev, ABS_PRESSURE, ts->Rt);
+			input_sync(input_dev);
+		}
+
+		ts->x = x;
+		ts->y = y;
+		ts->Rt = Rt;
+
 		return 0;
 	}
 
diff --git a/drivers/input/touchscreen/bu21013_ts.c b/drivers/input/touchscreen/bu21013_ts.c
index ccde586..2ca9e5d 100644
--- a/drivers/input/touchscreen/bu21013_ts.c
+++ b/drivers/input/touchscreen/bu21013_ts.c
@@ -514,7 +514,7 @@
 err_cs_disable:
 	pdata->cs_dis(pdata->cs_pin);
 err_free_mem:
-	input_free_device(bu21013_data->in_dev);
+	input_free_device(in_dev);
 	kfree(bu21013_data);
 
 	return error;
diff --git a/drivers/isdn/hisax/isar.c b/drivers/isdn/hisax/isar.c
index 40b914b..2e72227 100644
--- a/drivers/isdn/hisax/isar.c
+++ b/drivers/isdn/hisax/isar.c
@@ -1427,8 +1427,8 @@
 					&bcs->hw.isar.reg->Flags))
 					bcs->hw.isar.dpath = 1;
 				else {
-					printk(KERN_WARNING"isar modeisar analog funktions only with DP1\n");
-					debugl1(cs, "isar modeisar analog funktions only with DP1");
+					printk(KERN_WARNING"isar modeisar analog functions only with DP1\n");
+					debugl1(cs, "isar modeisar analog functions only with DP1");
 					return(1);
 				}
 				break;
diff --git a/drivers/leds/leds-net5501.c b/drivers/leds/leds-net5501.c
index 3063f59..1739557 100644
--- a/drivers/leds/leds-net5501.c
+++ b/drivers/leds/leds-net5501.c
@@ -92,3 +92,5 @@
 }
 
 arch_initcall(soekris_init);
+
+MODULE_LICENSE("GPL");
diff --git a/drivers/media/IR/ir-keytable.c b/drivers/media/IR/ir-keytable.c
index 647d52b..f60107c 100644
--- a/drivers/media/IR/ir-keytable.c
+++ b/drivers/media/IR/ir-keytable.c
@@ -389,6 +389,8 @@
 	ke->len = sizeof(entry->scancode);
 	memcpy(ke->scancode, &entry->scancode, sizeof(entry->scancode));
 
+	retval = 0;
+
 out:
 	spin_unlock_irqrestore(&rc_tab->lock, flags);
 	return retval;
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 43579b3..5336310 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -3043,7 +3043,6 @@
 	atl1_pcie_patch(adapter);
 	/* assume we have no link for now */
 	netif_carrier_off(netdev);
-	netif_stop_queue(netdev);
 
 	setup_timer(&adapter->phy_config_timer, atl1_phy_config,
 		    (unsigned long)adapter);
diff --git a/drivers/net/bnx2x/bnx2x.h b/drivers/net/bnx2x/bnx2x.h
index 9eea225..863e73a 100644
--- a/drivers/net/bnx2x/bnx2x.h
+++ b/drivers/net/bnx2x/bnx2x.h
@@ -20,8 +20,8 @@
  * (you will need to reboot afterwards) */
 /* #define BNX2X_STOP_ON_ERROR */
 
-#define DRV_MODULE_VERSION      "1.60.00-3"
-#define DRV_MODULE_RELDATE      "2010/10/19"
+#define DRV_MODULE_VERSION      "1.60.00-4"
+#define DRV_MODULE_RELDATE      "2010/11/01"
 #define BNX2X_BC_VER            0x040200
 
 #define BNX2X_MULTI_QUEUE
diff --git a/drivers/net/bnx2x/bnx2x_hsi.h b/drivers/net/bnx2x/bnx2x_hsi.h
index 18c8e23..4cfd4e9 100644
--- a/drivers/net/bnx2x/bnx2x_hsi.h
+++ b/drivers/net/bnx2x/bnx2x_hsi.h
@@ -244,7 +244,14 @@
 
 	u16 xgxs_config_tx[4];				    /* 0x1A0 */
 
-	u32 Reserved1[57];				    /* 0x1A8 */
+	u32 Reserved1[56];				    /* 0x1A8 */
+	u32 default_cfg;				    /* 0x288 */
+	/*  Enable BAM on KR */
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_MASK		      0x00100000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_SHIFT		      20
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_DISABLED		      0x00000000
+#define PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED		      0x00100000
+
 	u32 speed_capability_mask2;			    /* 0x28C */
 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_MASK		      0x0000FFFF
 #define PORT_HW_CFG_SPEED_CAPABILITY2_D3_SHIFT		      0
diff --git a/drivers/net/bnx2x/bnx2x_link.c b/drivers/net/bnx2x/bnx2x_link.c
index 2326774..5809196 100644
--- a/drivers/net/bnx2x/bnx2x_link.c
+++ b/drivers/net/bnx2x/bnx2x_link.c
@@ -610,7 +610,7 @@
 	/* reset and unreset the BigMac */
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
 		     (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
-	udelay(10);
+	msleep(1);
 
 	REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
 		     (MISC_REGISTERS_RESET_REG_2_RST_BMAC0 << port));
@@ -3525,13 +3525,19 @@
 	DP(NETIF_MSG_LINK, "Before rom RX_ALARM(port1): 0x%x\n", tmp1);
 
 	/* Enable CL37 BAM */
-	bnx2x_cl45_read(bp, phy,
-			MDIO_AN_DEVAD,
-			MDIO_AN_REG_8073_BAM, &val);
-	bnx2x_cl45_write(bp, phy,
-			 MDIO_AN_DEVAD,
-			 MDIO_AN_REG_8073_BAM, val | 1);
+	if (REG_RD(bp, params->shmem_base +
+			 offsetof(struct shmem_region, dev_info.
+				  port_hw_config[params->port].default_cfg)) &
+	    PORT_HW_CFG_ENABLE_BAM_ON_KR_ENABLED) {
 
+		bnx2x_cl45_read(bp, phy,
+				MDIO_AN_DEVAD,
+				MDIO_AN_REG_8073_BAM, &val);
+		bnx2x_cl45_write(bp, phy,
+				 MDIO_AN_DEVAD,
+				 MDIO_AN_REG_8073_BAM, val | 1);
+		DP(NETIF_MSG_LINK, "Enable CL37 BAM on KR\n");
+	}
 	if (params->loopback_mode == LOOPBACK_EXT) {
 		bnx2x_807x_force_10G(bp, phy);
 		DP(NETIF_MSG_LINK, "Forced speed 10G on 807X\n");
@@ -5302,7 +5308,7 @@
 {
 	struct bnx2x *bp = params->bp;
 	u16 autoneg_val, an_1000_val, an_10_100_val;
-	bnx2x_wait_reset_complete(bp, phy);
+
 	bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
 		      1 << NIG_LATCH_BC_ENABLE_MI_INT);
 
@@ -5431,6 +5437,7 @@
 
 	/* HW reset */
 	bnx2x_ext_phy_hw_reset(bp, params->port);
+	bnx2x_wait_reset_complete(bp, phy);
 
 	bnx2x_cl45_write(bp, phy, MDIO_PMA_DEVAD, MDIO_PMA_REG_CTRL, 1<<15);
 	return bnx2x_848xx_cmn_config_init(phy, params, vars);
@@ -5441,7 +5448,7 @@
 				  struct link_vars *vars)
 {
 	struct bnx2x *bp = params->bp;
-	u8 port = params->port, initialize = 1;
+	u8 port, initialize = 1;
 	u16 val;
 	u16 temp;
 	u32 actual_phy_selection;
@@ -5450,11 +5457,16 @@
 	/* This is just for MDIO_CTL_REG_84823_MEDIA register. */
 
 	msleep(1);
+	if (CHIP_IS_E2(bp))
+		port = BP_PATH(bp);
+	else
+		port = params->port;
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
 		       MISC_REGISTERS_GPIO_OUTPUT_HIGH,
 		       port);
-	msleep(200); /* 100 is not enough */
-
+	bnx2x_wait_reset_complete(bp, phy);
+	/* Wait for GPHY to come out of reset */
+	msleep(50);
 	/* BCM84823 requires that XGXS links up first @ 10G for normal
 	behavior */
 	temp = vars->line_speed;
@@ -5625,7 +5637,11 @@
 				   struct link_params *params)
 {
 	struct bnx2x *bp = params->bp;
-	u8 port = params->port;
+	u8 port;
+	if (CHIP_IS_E2(bp))
+		port = BP_PATH(bp);
+	else
+		port = params->port;
 	bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
 			    MISC_REGISTERS_GPIO_OUTPUT_LOW,
 			    port);
@@ -6928,7 +6944,7 @@
 		  u8 reset_ext_phy)
 {
 	struct bnx2x *bp = params->bp;
-	u8 phy_index, port = params->port;
+	u8 phy_index, port = params->port, clear_latch_ind = 0;
 	DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
 	/* disable attentions */
 	vars->link_status = 0;
@@ -6966,9 +6982,18 @@
 				params->phy[phy_index].link_reset(
 					&params->phy[phy_index],
 					params);
+			if (params->phy[phy_index].flags &
+			    FLAGS_REARM_LATCH_SIGNAL)
+				clear_latch_ind = 1;
 		}
 	}
 
+	if (clear_latch_ind) {
+		/* Clear latching indication */
+		bnx2x_rearm_latch_signal(bp, port, 0);
+		bnx2x_bits_dis(bp, NIG_REG_LATCH_BC_0 + port*4,
+			       1 << NIG_LATCH_BC_ENABLE_MI_INT);
+	}
 	if (params->phy[INT_PHY].link_reset)
 		params->phy[INT_PHY].link_reset(
 			&params->phy[INT_PHY], params);
@@ -6999,6 +7024,7 @@
 	s8 port;
 	s8 port_of_path = 0;
 
+	bnx2x_ext_phy_hw_reset(bp, 0);
 	/* PART1 - Reset both phys */
 	for (port = PORT_MAX - 1; port >= PORT_0; port--) {
 		u32 shmem_base, shmem2_base;
@@ -7021,7 +7047,8 @@
 			return -EINVAL;
 		}
 		/* disable attentions */
-		bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4,
+		bnx2x_bits_dis(bp, NIG_REG_MASK_INTERRUPT_PORT0 +
+			       port_of_path*4,
 			     (NIG_MASK_XGXS0_LINK_STATUS |
 			      NIG_MASK_XGXS0_LINK10G |
 			      NIG_MASK_SERDES0_LINK_STATUS |
@@ -7132,7 +7159,7 @@
 		(1<<(MISC_REGISTERS_GPIO_3 + MISC_REGISTERS_GPIO_PORT_SHIFT)));
 	REG_WR(bp, MISC_REG_GPIO_EVENT_EN, val);
 
-	bnx2x_ext_phy_hw_reset(bp, 1);
+	bnx2x_ext_phy_hw_reset(bp, 0);
 	msleep(5);
 	for (port = 0; port < PORT_MAX; port++) {
 		u32 shmem_base, shmem2_base;
diff --git a/drivers/net/caif/caif_spi.c b/drivers/net/caif/caif_spi.c
index 8427533..8b4cea5 100644
--- a/drivers/net/caif/caif_spi.c
+++ b/drivers/net/caif/caif_spi.c
@@ -33,6 +33,9 @@
 MODULE_AUTHOR("Daniel Martensson<daniel.martensson@stericsson.com>");
 MODULE_DESCRIPTION("CAIF SPI driver");
 
+/* Returns the number of padding bytes for alignment. */
+#define PAD_POW2(x, pow) ((((x)&((pow)-1))==0) ? 0 : (((pow)-((x)&((pow)-1)))))
+
 static int spi_loop;
 module_param(spi_loop, bool, S_IRUGO);
 MODULE_PARM_DESC(spi_loop, "SPI running in loopback mode.");
@@ -41,7 +44,10 @@
 module_param(spi_frm_align, int, S_IRUGO);
 MODULE_PARM_DESC(spi_frm_align, "SPI frame alignment.");
 
-/* SPI padding options. */
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
 module_param(spi_up_head_align, int, S_IRUGO);
 MODULE_PARM_DESC(spi_up_head_align, "SPI uplink head alignment.");
 
@@ -240,15 +246,13 @@
 static const struct file_operations dbgfs_state_fops = {
 	.open = dbgfs_open,
 	.read = dbgfs_state,
-	.owner = THIS_MODULE,
-	.llseek = default_llseek,
+	.owner = THIS_MODULE
 };
 
 static const struct file_operations dbgfs_frame_fops = {
 	.open = dbgfs_open,
 	.read = dbgfs_frame,
-	.owner = THIS_MODULE,
-	.llseek = default_llseek,
+	.owner = THIS_MODULE
 };
 
 static inline void dev_debugfs_add(struct cfspi *cfspi)
@@ -337,6 +341,9 @@
 	u8 *dst = buf;
 	caif_assert(buf);
 
+	if (cfspi->slave && !cfspi->slave_talked)
+		cfspi->slave_talked = true;
+
 	do {
 		struct sk_buff *skb;
 		struct caif_payload_info *info;
@@ -357,8 +364,8 @@
 		 * Compute head offset i.e. number of bytes to add to
 		 * get the start of the payload aligned.
 		 */
-		if (spi_up_head_align) {
-			spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+		if (spi_up_head_align > 1) {
+			spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
 			*dst = (u8)(spad - 1);
 			dst += spad;
 		}
@@ -373,7 +380,7 @@
 		 * Compute tail offset i.e. number of bytes to add to
 		 * get the complete CAIF frame aligned.
 		 */
-		epad = (skb->len + spad) & spi_up_tail_align;
+		epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
 		dst += epad;
 
 		dev_kfree_skb(skb);
@@ -417,14 +424,14 @@
 		 * Compute head offset i.e. number of bytes to add to
 		 * get the start of the payload aligned.
 		 */
-		if (spi_up_head_align)
-			spad = 1 + ((info->hdr_len + 1) & spi_up_head_align);
+		if (spi_up_head_align > 1)
+			spad = 1 + PAD_POW2((info->hdr_len + 1), spi_up_head_align);
 
 		/*
 		 * Compute tail offset i.e. number of bytes to add to
 		 * get the complete CAIF frame aligned.
 		 */
-		epad = (skb->len + spad) & spi_up_tail_align;
+		epad = PAD_POW2((skb->len + spad), spi_up_tail_align);
 
 		if ((skb->len + spad + epad + frm_len) <= CAIF_MAX_SPI_FRAME) {
 			skb_queue_tail(&cfspi->chead, skb);
@@ -433,6 +440,7 @@
 		} else {
 			/* Put back packet. */
 			skb_queue_head(&cfspi->qhead, skb);
+			break;
 		}
 	} while (pkts <= CAIF_MAX_SPI_PKTS);
 
@@ -453,6 +461,15 @@
 {
 	struct cfspi *cfspi = (struct cfspi *)ifc->priv;
 
+	/*
+	 * The slave device is the master on the link. Interrupts before the
+	 * slave has transmitted are considered spurious.
+	 */
+	if (cfspi->slave && !cfspi->slave_talked) {
+		printk(KERN_WARNING "CFSPI: Spurious SS interrupt.\n");
+		return;
+	}
+
 	if (!in_interrupt())
 		spin_lock(&cfspi->lock);
 	if (assert) {
@@ -465,7 +482,8 @@
 		spin_unlock(&cfspi->lock);
 
 	/* Wake up the xfer thread. */
-	wake_up_interruptible(&cfspi->wait);
+	if (assert)
+		wake_up_interruptible(&cfspi->wait);
 }
 
 static void cfspi_xfer_done_cb(struct cfspi_ifc *ifc)
@@ -523,7 +541,7 @@
 		 * Compute head offset i.e. number of bytes added to
 		 * get the start of the payload aligned.
 		 */
-		if (spi_down_head_align) {
+		if (spi_down_head_align > 1) {
 			spad = 1 + *src;
 			src += spad;
 		}
@@ -564,7 +582,7 @@
 		 * Compute tail offset i.e. number of bytes added to
 		 * get the complete CAIF frame aligned.
 		 */
-		epad = (pkt_len + spad) & spi_down_tail_align;
+		epad = PAD_POW2((pkt_len + spad), spi_down_tail_align);
 		src += epad;
 	} while ((src - buf) < len);
 
@@ -625,11 +643,20 @@
 	cfspi->ndev = ndev;
 	cfspi->pdev = pdev;
 
-	/* Set flow info */
+	/* Set flow info. */
 	cfspi->flow_off_sent = 0;
 	cfspi->qd_low_mark = LOW_WATER_MARK;
 	cfspi->qd_high_mark = HIGH_WATER_MARK;
 
+	/* Set slave info. */
+	if (!strncmp(cfspi_spi_driver.driver.name, "cfspi_sspi", 10)) {
+		cfspi->slave = true;
+		cfspi->slave_talked = false;
+	} else {
+		cfspi->slave = false;
+		cfspi->slave_talked = false;
+	}
+
 	/* Assign the SPI device. */
 	cfspi->dev = dev;
 	/* Assign the device ifc to this SPI interface. */
diff --git a/drivers/net/caif/caif_spi_slave.c b/drivers/net/caif/caif_spi_slave.c
index 2111dbf..1b9943a 100644
--- a/drivers/net/caif/caif_spi_slave.c
+++ b/drivers/net/caif/caif_spi_slave.c
@@ -36,10 +36,15 @@
 #endif
 
 int spi_frm_align = 2;
-int spi_up_head_align = 1;
-int spi_up_tail_align;
-int spi_down_head_align = 3;
-int spi_down_tail_align = 1;
+
+/*
+ * SPI padding options.
+ * Warning: must be a base of 2 (& operation used) and can not be zero !
+ */
+int spi_up_head_align   = 1 << 1;
+int spi_up_tail_align   = 1 << 0;
+int spi_down_head_align = 1 << 2;
+int spi_down_tail_align = 1 << 1;
 
 #ifdef CONFIG_DEBUG_FS
 static inline void debugfs_store_prev(struct cfspi *cfspi)
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 407d4e2..046d846 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -3341,7 +3341,6 @@
 				adapter->name = adapter->port[i]->name;
 
 			__set_bit(i, &adapter->registered_device_map);
-			netif_tx_stop_all_queues(adapter->port[i]);
 		}
 	}
 	if (!adapter->registered_device_map) {
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
index f17703f..f50bc98 100644
--- a/drivers/net/cxgb4/cxgb4_main.c
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -3736,7 +3736,6 @@
 
 			__set_bit(i, &adapter->registered_device_map);
 			adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
-			netif_tx_stop_all_queues(adapter->port[i]);
 		}
 	}
 	if (!adapter->registered_device_map) {
diff --git a/drivers/net/cxgb4vf/cxgb4vf_main.c b/drivers/net/cxgb4vf/cxgb4vf_main.c
index 555ecc5..6de5e2e 100644
--- a/drivers/net/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/cxgb4vf/cxgb4vf_main.c
@@ -2600,7 +2600,6 @@
 		pi->xact_addr_filt = -1;
 		pi->rx_offload = RX_CSO;
 		netif_carrier_off(netdev);
-		netif_tx_stop_all_queues(netdev);
 		netdev->irq = pdev->irq;
 
 		netdev->features = (NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 385dc32..06bb9b7 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2871,7 +2871,6 @@
 	SET_ETHTOOL_OPS(ndev, &emac_ethtool_ops);
 
 	netif_carrier_off(ndev);
-	netif_stop_queue(ndev);
 
 	err = register_netdev(ndev);
 	if (err) {
diff --git a/drivers/net/jme.c b/drivers/net/jme.c
index d85edf3..c57d9a4 100644
--- a/drivers/net/jme.c
+++ b/drivers/net/jme.c
@@ -2955,11 +2955,7 @@
 	 * Tell stack that we are not ready to work until open()
 	 */
 	netif_carrier_off(netdev);
-	netif_stop_queue(netdev);
 
-	/*
-	 * Register netdev
-	 */
 	rc = register_netdev(netdev);
 	if (rc) {
 		pr_err("Cannot register net device\n");
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index a75ba95..e1d30d7 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -41,9 +41,6 @@
 MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
-MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
-MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
 MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
 
 char netxen_nic_driver_name[] = "netxen_nic";
diff --git a/drivers/net/qlcnic/qlcnic_main.c b/drivers/net/qlcnic/qlcnic_main.c
index 7a298cd..a3dcd04 100644
--- a/drivers/net/qlcnic/qlcnic_main.c
+++ b/drivers/net/qlcnic/qlcnic_main.c
@@ -1450,7 +1450,6 @@
 	netdev->irq = adapter->msix_entries[0].vector;
 
 	netif_carrier_off(netdev);
-	netif_stop_queue(netdev);
 
 	err = register_netdev(netdev);
 	if (err) {
diff --git a/drivers/net/smsc911x.h b/drivers/net/smsc911x.h
index 52f38e1..50f712e 100644
--- a/drivers/net/smsc911x.h
+++ b/drivers/net/smsc911x.h
@@ -22,7 +22,7 @@
 #define __SMSC911X_H__
 
 #define TX_FIFO_LOW_THRESHOLD	((u32)1600)
-#define SMSC911X_EEPROM_SIZE	((u32)7)
+#define SMSC911X_EEPROM_SIZE	((u32)128)
 #define USE_DEBUG		0
 
 /* This is the maximum number of packets to be received every
diff --git a/drivers/net/tulip/de2104x.c b/drivers/net/tulip/de2104x.c
index 28e1ffb..c78a505 100644
--- a/drivers/net/tulip/de2104x.c
+++ b/drivers/net/tulip/de2104x.c
@@ -2021,7 +2021,6 @@
 	de->media_timer.data = (unsigned long) de;
 
 	netif_carrier_off(dev);
-	netif_stop_queue(dev);
 
 	/* wake up device, assign resources */
 	rc = pci_enable_device(pdev);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ca7fc9d..c04d49e 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -45,6 +45,7 @@
 #include <linux/usb/usbnet.h>
 #include <linux/slab.h>
 #include <linux/kernel.h>
+#include <linux/pm_runtime.h>
 
 #define DRIVER_VERSION		"22-Aug-2005"
 
@@ -1273,6 +1274,16 @@
 	struct usb_device		*xdev;
 	int				status;
 	const char			*name;
+	struct usb_driver 	*driver = to_usb_driver(udev->dev.driver);
+
+	/* usbnet already took usb runtime pm, so have to enable the feature
+	 * for usb interface, otherwise usb_autopm_get_interface may return
+	 * failure if USB_SUSPEND(RUNTIME_PM) is enabled.
+	 */
+	if (!driver->supports_autosuspend) {
+		driver->supports_autosuspend = 1;
+		pm_runtime_enable(&udev->dev);
+	}
 
 	name = udev->dev.driver->name;
 	info = (struct driver_info *) prod->driver_info;
diff --git a/drivers/net/wireless/ipw2x00/libipw_module.c b/drivers/net/wireless/ipw2x00/libipw_module.c
index 32dee2c..d5ef696 100644
--- a/drivers/net/wireless/ipw2x00/libipw_module.c
+++ b/drivers/net/wireless/ipw2x00/libipw_module.c
@@ -54,6 +54,7 @@
 
 #define DRV_DESCRIPTION "802.11 data/management/control stack"
 #define DRV_NAME        "libipw"
+#define DRV_PROCNAME	"ieee80211"
 #define DRV_VERSION	LIBIPW_VERSION
 #define DRV_COPYRIGHT   "Copyright (C) 2004-2005 Intel Corporation <jketreno@linux.intel.com>"
 
@@ -293,16 +294,16 @@
 	struct proc_dir_entry *e;
 
 	libipw_debug_level = debug;
-	libipw_proc = proc_mkdir("ieee80211", init_net.proc_net);
+	libipw_proc = proc_mkdir(DRV_PROCNAME, init_net.proc_net);
 	if (libipw_proc == NULL) {
-		LIBIPW_ERROR("Unable to create " DRV_NAME
+		LIBIPW_ERROR("Unable to create " DRV_PROCNAME
 				" proc directory\n");
 		return -EIO;
 	}
 	e = proc_create("debug_level", S_IRUGO | S_IWUSR, libipw_proc,
 			&debug_level_proc_fops);
 	if (!e) {
-		remove_proc_entry(DRV_NAME, init_net.proc_net);
+		remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
 		libipw_proc = NULL;
 		return -EIO;
 	}
@@ -319,7 +320,7 @@
 #ifdef CONFIG_LIBIPW_DEBUG
 	if (libipw_proc) {
 		remove_proc_entry("debug_level", libipw_proc);
-		remove_proc_entry(DRV_NAME, init_net.proc_net);
+		remove_proc_entry(DRV_PROCNAME, init_net.proc_net);
 		libipw_proc = NULL;
 	}
 #endif				/* CONFIG_LIBIPW_DEBUG */
diff --git a/drivers/s390/scsi/zfcp_fc.h b/drivers/s390/scsi/zfcp_fc.h
index 938d503..b464ae0 100644
--- a/drivers/s390/scsi/zfcp_fc.h
+++ b/drivers/s390/scsi/zfcp_fc.h
@@ -270,7 +270,7 @@
 	if (unlikely(rsp_flags & FCP_SNS_LEN_VAL)) {
 		sense = (char *) &fcp_rsp[1];
 		if (rsp_flags & FCP_RSP_LEN_VAL)
-			sense += fcp_rsp->ext.fr_sns_len;
+			sense += fcp_rsp->ext.fr_rsp_len;
 		sense_len = min(fcp_rsp->ext.fr_sns_len,
 				(u32) SCSI_SENSE_BUFFERSIZE);
 		memcpy(scsi->sense_buffer, sense, sense_len);
diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c
index beaf091..be03174 100644
--- a/drivers/s390/scsi/zfcp_fsf.c
+++ b/drivers/s390/scsi/zfcp_fsf.c
@@ -532,9 +532,6 @@
 		fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN;
 		adapter->hydra_version = 0;
 
-		atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK,
-				&adapter->status);
-
 		zfcp_fsf_link_down_info_eval(req,
 			&qtcb->header.fsf_status_qual.link_down_info);
 		break;
diff --git a/drivers/s390/scsi/zfcp_unit.c b/drivers/s390/scsi/zfcp_unit.c
index 1119c53..20796eb 100644
--- a/drivers/s390/scsi/zfcp_unit.c
+++ b/drivers/s390/scsi/zfcp_unit.c
@@ -142,6 +142,8 @@
 		return -ENOMEM;
 	}
 
+	get_device(&port->dev);
+
 	if (device_register(&unit->dev)) {
 		put_device(&unit->dev);
 		return -ENOMEM;
@@ -152,8 +154,6 @@
 		return -EINVAL;
 	}
 
-	get_device(&port->dev);
-
 	write_lock_irq(&port->unit_list_lock);
 	list_add_tail(&unit->list, &port->unit_list);
 	write_unlock_irq(&port->unit_list_lock);
diff --git a/drivers/scsi/bfa/bfa.h b/drivers/scsi/bfa/bfa.h
index ceaac65..ff2bd07 100644
--- a/drivers/scsi/bfa/bfa.h
+++ b/drivers/scsi/bfa/bfa.h
@@ -29,13 +29,13 @@
 typedef void (*bfa_isr_func_t) (struct bfa_s *bfa, struct bfi_msg_s *m);
 typedef void    (*bfa_cb_cbfn_t) (void *cbarg, bfa_boolean_t complete);
 
-/**
+/*
  * Interrupt message handlers
  */
 void bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m);
 void bfa_isr_bind(enum bfi_mclass mc, bfa_isr_func_t isr_func);
 
-/**
+/*
  * Request and response queue related defines
  */
 #define BFA_REQQ_NELEMS_MIN	(4)
@@ -58,9 +58,9 @@
 #define bfa_reqq_produce(__bfa, __reqq)	do {				\
 		(__bfa)->iocfc.req_cq_pi[__reqq]++;			\
 		(__bfa)->iocfc.req_cq_pi[__reqq] &=			\
-			((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1);      \
-		bfa_reg_write((__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq],	\
-			      (__bfa)->iocfc.req_cq_pi[__reqq]);      \
+			((__bfa)->iocfc.cfg.drvcfg.num_reqq_elems - 1); \
+		writel((__bfa)->iocfc.req_cq_pi[__reqq],		\
+			(__bfa)->iocfc.bfa_regs.cpe_q_pi[__reqq]);	\
 		mmiowb();      \
 	} while (0)
 
@@ -76,7 +76,7 @@
 	(__index) &= ((__size) - 1);			\
 } while (0)
 
-/**
+/*
  * Queue element to wait for room in request queue. FIFO order is
  * maintained when fullfilling requests.
  */
@@ -86,7 +86,7 @@
 	void		*cbarg;
 };
 
-/**
+/*
  * Circular queue usage assignments
  */
 enum {
@@ -113,7 +113,7 @@
 
 #define bfa_reqq(__bfa, __reqq)	(&(__bfa)->reqq_waitq[__reqq])
 
-/**
+/*
  * static inline void
  * bfa_reqq_wait(struct bfa_s *bfa, int reqq, struct bfa_reqq_wait_s *wqe)
  */
@@ -130,7 +130,7 @@
 #define bfa_reqq_wcancel(__wqe)	list_del(&(__wqe)->qe)
 
 
-/**
+/*
  * Generic BFA callback element.
  */
 struct bfa_cb_qe_s {
@@ -163,7 +163,7 @@
 	} while (0)
 
 
-/**
+/*
  * PCI devices supported by the current BFA
  */
 struct bfa_pciid_s {
@@ -173,7 +173,7 @@
 
 extern char     bfa_version[];
 
-/**
+/*
  * BFA memory resources
  */
 enum bfa_mem_type {
@@ -202,19 +202,19 @@
 	((_m)->meminfo[BFA_MEM_TYPE_DMA - 1].dma_curp)
 
 struct bfa_iocfc_regs_s {
-	bfa_os_addr_t   intr_status;
-	bfa_os_addr_t   intr_mask;
-	bfa_os_addr_t   cpe_q_pi[BFI_IOC_MAX_CQS];
-	bfa_os_addr_t   cpe_q_ci[BFI_IOC_MAX_CQS];
-	bfa_os_addr_t   cpe_q_depth[BFI_IOC_MAX_CQS];
-	bfa_os_addr_t   cpe_q_ctrl[BFI_IOC_MAX_CQS];
-	bfa_os_addr_t   rme_q_ci[BFI_IOC_MAX_CQS];
-	bfa_os_addr_t   rme_q_pi[BFI_IOC_MAX_CQS];
-	bfa_os_addr_t   rme_q_depth[BFI_IOC_MAX_CQS];
-	bfa_os_addr_t   rme_q_ctrl[BFI_IOC_MAX_CQS];
+	void __iomem	*intr_status;
+	void __iomem	*intr_mask;
+	void __iomem	*cpe_q_pi[BFI_IOC_MAX_CQS];
+	void __iomem	*cpe_q_ci[BFI_IOC_MAX_CQS];
+	void __iomem	*cpe_q_depth[BFI_IOC_MAX_CQS];
+	void __iomem	*cpe_q_ctrl[BFI_IOC_MAX_CQS];
+	void __iomem	*rme_q_ci[BFI_IOC_MAX_CQS];
+	void __iomem	*rme_q_pi[BFI_IOC_MAX_CQS];
+	void __iomem	*rme_q_depth[BFI_IOC_MAX_CQS];
+	void __iomem	*rme_q_ctrl[BFI_IOC_MAX_CQS];
 };
 
-/**
+/*
  * MSIX vector handlers
  */
 #define BFA_MSIX_MAX_VECTORS	22
@@ -224,7 +224,7 @@
 	bfa_msix_handler_t handler[BFA_MSIX_MAX_VECTORS];
 };
 
-/**
+/*
  * Chip specific interfaces
  */
 struct bfa_hwif_s {
@@ -343,7 +343,7 @@
 				struct bfi_pbc_vport_s *pbc_vport);
 
 
-/**
+/*
  *----------------------------------------------------------------------
  *		BFA public interfaces
  *----------------------------------------------------------------------
diff --git a/drivers/scsi/bfa/bfa_cb_ioim.h b/drivers/scsi/bfa/bfa_cb_ioim.h
index a989a94..6f02101 100644
--- a/drivers/scsi/bfa/bfa_cb_ioim.h
+++ b/drivers/scsi/bfa/bfa_cb_ioim.h
@@ -37,18 +37,18 @@
 	} lun;
 
 	lun.bfa_lun     = 0;
-	lun.scsi_lun[0] = bfa_os_htons(luno);
+	lun.scsi_lun[0] = cpu_to_be16(luno);
 
 	return lun.bfa_lun;
 }
 
-/**
+/*
  * Get LUN for the I/O request
  */
 #define bfa_cb_ioim_get_lun(__dio)	\
 	bfad_int_to_lun(((struct scsi_cmnd *)__dio)->device->lun)
 
-/**
+/*
  * Get CDB for the I/O request
  */
 static inline u8 *
@@ -59,7 +59,7 @@
 	return (u8 *) cmnd->cmnd;
 }
 
-/**
+/*
  * Get I/O direction (read/write) for the I/O request
  */
 static inline enum fcp_iodir
@@ -77,7 +77,7 @@
 		return FCP_IODIR_NONE;
 }
 
-/**
+/*
  * Get IO size in bytes for the I/O request
  */
 static inline u32
@@ -88,7 +88,7 @@
 	return scsi_bufflen(cmnd);
 }
 
-/**
+/*
  * Get timeout for the I/O request
  */
 static inline u8
@@ -104,7 +104,7 @@
 	return 0;
 }
 
-/**
+/*
  * Get Command Reference Number for the I/O request. 0 if none.
  */
 static inline u8
@@ -113,7 +113,7 @@
 	return 0;
 }
 
-/**
+/*
  * Get SAM-3 priority for the I/O request. 0 is default.
  */
 static inline u8
@@ -122,7 +122,7 @@
 	return 0;
 }
 
-/**
+/*
  * Get task attributes for the I/O request. Default is FCP_TASK_ATTR_SIMPLE(0).
  */
 static inline u8
@@ -148,7 +148,7 @@
 	return task_attr;
 }
 
-/**
+/*
  * Get CDB length in bytes for the I/O request. Default is FCP_CMND_CDB_LEN(16).
  */
 static inline u8
@@ -159,7 +159,7 @@
 	return cmnd->cmd_len;
 }
 
-/**
+/*
  * Assign queue to be used for the I/O request. This value depends on whether
  * the driver wants to use the queues via any specific algorithm. Currently,
  * this is not supported.
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index c2fa07f..2345f48 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -21,11 +21,11 @@
 
 BFA_TRC_FILE(HAL, CORE);
 
-/**
+/*
  * BFA IOC FC related definitions
  */
 
-/**
+/*
  * IOC local definitions
  */
 #define BFA_IOCFC_TOV		5000	/* msecs */
@@ -54,7 +54,7 @@
 #define DEF_CFG_NUM_SBOOT_TGTS		16
 #define DEF_CFG_NUM_SBOOT_LUNS		16
 
-/**
+/*
  * forward declaration for IOC FC functions
  */
 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
@@ -63,7 +63,7 @@
 static void bfa_iocfc_reset_cbfn(void *bfa_arg);
 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
 
-/**
+/*
  * BFA Interrupt handling functions
  */
 static void
@@ -86,7 +86,7 @@
 
 	waitq = bfa_reqq(bfa, qid);
 	list_for_each_safe(qe, qen, waitq) {
-		/**
+		/*
 		 * Callback only as long as there is room in request queue
 		 */
 		if (bfa_reqq_full(bfa, qid))
@@ -104,7 +104,7 @@
 	bfa_intx(bfa);
 }
 
-/**
+/*
  *  hal_intr_api
  */
 bfa_boolean_t
@@ -113,15 +113,15 @@
 	u32 intr, qintr;
 	int queue;
 
-	intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
+	intr = readl(bfa->iocfc.bfa_regs.intr_status);
 	if (!intr)
 		return BFA_FALSE;
 
-	/**
+	/*
 	 * RME completion queue interrupt
 	 */
 	qintr = intr & __HFN_INT_RME_MASK;
-	bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
+	writel(qintr, bfa->iocfc.bfa_regs.intr_status);
 
 	for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
 		if (intr & (__HFN_INT_RME_Q0 << queue))
@@ -131,11 +131,11 @@
 	if (!intr)
 		return BFA_TRUE;
 
-	/**
+	/*
 	 * CPE completion queue interrupt
 	 */
 	qintr = intr & __HFN_INT_CPE_MASK;
-	bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, qintr);
+	writel(qintr, bfa->iocfc.bfa_regs.intr_status);
 
 	for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
 		if (intr & (__HFN_INT_CPE_Q0 << queue))
@@ -153,13 +153,13 @@
 void
 bfa_intx_enable(struct bfa_s *bfa)
 {
-	bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, bfa->iocfc.intr_mask);
+	writel(bfa->iocfc.intr_mask, bfa->iocfc.bfa_regs.intr_mask);
 }
 
 void
 bfa_intx_disable(struct bfa_s *bfa)
 {
-	bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
+	writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
 }
 
 void
@@ -188,8 +188,8 @@
 				__HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
 				__HFN_INT_MBOX_LPU1);
 
-	bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr_unmask);
-	bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, ~intr_unmask);
+	writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status);
+	writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask);
 	bfa->iocfc.intr_mask = ~intr_unmask;
 	bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
 }
@@ -198,7 +198,7 @@
 bfa_isr_disable(struct bfa_s *bfa)
 {
 	bfa_isr_mode_set(bfa, BFA_FALSE);
-	bfa_reg_write(bfa->iocfc.bfa_regs.intr_mask, -1L);
+	writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
 	bfa_msix_uninstall(bfa);
 }
 
@@ -211,7 +211,7 @@
 
 	bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
 
-	/**
+	/*
 	 * Resume any pending requests in the corresponding reqq.
 	 */
 	waitq = bfa_reqq(bfa, qid);
@@ -259,14 +259,14 @@
 		}
 	}
 
-	/**
+	/*
 	 * update CI
 	 */
 	bfa_rspq_ci(bfa, qid) = pi;
-	bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
+	writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
 	mmiowb();
 
-	/**
+	/*
 	 * Resume any pending requests in the corresponding reqq.
 	 */
 	waitq = bfa_reqq(bfa, qid);
@@ -279,7 +279,7 @@
 {
 	u32 intr, curr_value;
 
-	intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
+	intr = readl(bfa->iocfc.bfa_regs.intr_status);
 
 	if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
 		bfa_msix_lpu(bfa);
@@ -289,30 +289,30 @@
 
 	if (intr) {
 		if (intr & __HFN_INT_LL_HALT) {
-			/**
+			/*
 			 * If LL_HALT bit is set then FW Init Halt LL Port
 			 * Register needs to be cleared as well so Interrupt
 			 * Status Register will be cleared.
 			 */
-			curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
+			curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
 			curr_value &= ~__FW_INIT_HALT_P;
-			bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
+			writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
 		}
 
 		if (intr & __HFN_INT_ERR_PSS) {
-			/**
+			/*
 			 * ERR_PSS bit needs to be cleared as well in case
 			 * interrups are shared so driver's interrupt handler is
 			 * still called eventhough it is already masked out.
 			 */
-			curr_value = bfa_reg_read(
+			curr_value = readl(
 					bfa->ioc.ioc_regs.pss_err_status_reg);
 			curr_value &= __PSS_ERR_STATUS_SET;
-			bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
-					curr_value);
+			writel(curr_value,
+				bfa->ioc.ioc_regs.pss_err_status_reg);
 		}
 
-		bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
+		writel(intr, bfa->iocfc.bfa_regs.intr_status);
 		bfa_msix_errint(bfa, intr);
 	}
 }
@@ -323,11 +323,11 @@
 	bfa_isrs[mc] = isr_func;
 }
 
-/**
+/*
  * BFA IOC FC related functions
  */
 
-/**
+/*
  *  hal_ioc_pvt BFA IOC private functions
  */
 
@@ -366,7 +366,7 @@
 			    BFA_CACHELINE_SZ);
 }
 
-/**
+/*
  * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
  */
 static void
@@ -384,14 +384,14 @@
 
 	bfa_iocfc_reset_queues(bfa);
 
-	/**
+	/*
 	 * initialize IOC configuration info
 	 */
 	cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
 	cfg_info->num_cqs = cfg->fwcfg.num_cqs;
 
 	bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
-	/**
+	/*
 	 * dma map REQ and RSP circular queues and shadow pointers
 	 */
 	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
@@ -400,17 +400,17 @@
 		bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
 				    iocfc->req_cq_shadow_ci[i].pa);
 		cfg_info->req_cq_elems[i] =
-			bfa_os_htons(cfg->drvcfg.num_reqq_elems);
+			cpu_to_be16(cfg->drvcfg.num_reqq_elems);
 
 		bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
 				    iocfc->rsp_cq_ba[i].pa);
 		bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
 				    iocfc->rsp_cq_shadow_pi[i].pa);
 		cfg_info->rsp_cq_elems[i] =
-			bfa_os_htons(cfg->drvcfg.num_rspq_elems);
+			cpu_to_be16(cfg->drvcfg.num_rspq_elems);
 	}
 
-	/**
+	/*
 	 * Enable interrupt coalescing if it is driver init path
 	 * and not ioc disable/enable path.
 	 */
@@ -419,7 +419,7 @@
 
 	iocfc->cfgdone = BFA_FALSE;
 
-	/**
+	/*
 	 * dma map IOC configuration itself
 	 */
 	bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
@@ -440,9 +440,9 @@
 	iocfc->bfa = bfa;
 	iocfc->action = BFA_IOCFC_ACT_NONE;
 
-	bfa_os_assign(iocfc->cfg, *cfg);
+	iocfc->cfg = *cfg;
 
-	/**
+	/*
 	 * Initialize chip specific handlers.
 	 */
 	if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
@@ -503,13 +503,13 @@
 	for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
 		iocfc->req_cq_ba[i].kva = dm_kva;
 		iocfc->req_cq_ba[i].pa = dm_pa;
-		bfa_os_memset(dm_kva, 0, per_reqq_sz);
+		memset(dm_kva, 0, per_reqq_sz);
 		dm_kva += per_reqq_sz;
 		dm_pa += per_reqq_sz;
 
 		iocfc->rsp_cq_ba[i].kva = dm_kva;
 		iocfc->rsp_cq_ba[i].pa = dm_pa;
-		bfa_os_memset(dm_kva, 0, per_rspq_sz);
+		memset(dm_kva, 0, per_rspq_sz);
 		dm_kva += per_rspq_sz;
 		dm_pa += per_rspq_sz;
 	}
@@ -559,7 +559,7 @@
 	}
 }
 
-/**
+/*
  * Start BFA submodules.
  */
 static void
@@ -573,7 +573,7 @@
 		hal_mods[i]->start(bfa);
 }
 
-/**
+/*
  * Disable BFA submodules.
  */
 static void
@@ -623,7 +623,7 @@
 		complete(&bfad->disable_comp);
 }
 
-/**
+/*
  * Update BFA configuration from firmware configuration.
  */
 static void
@@ -634,15 +634,15 @@
 	struct bfa_iocfc_fwcfg_s	*fwcfg	 = &cfgrsp->fwcfg;
 
 	fwcfg->num_cqs	      = fwcfg->num_cqs;
-	fwcfg->num_ioim_reqs  = bfa_os_ntohs(fwcfg->num_ioim_reqs);
-	fwcfg->num_tskim_reqs = bfa_os_ntohs(fwcfg->num_tskim_reqs);
-	fwcfg->num_fcxp_reqs  = bfa_os_ntohs(fwcfg->num_fcxp_reqs);
-	fwcfg->num_uf_bufs    = bfa_os_ntohs(fwcfg->num_uf_bufs);
-	fwcfg->num_rports     = bfa_os_ntohs(fwcfg->num_rports);
+	fwcfg->num_ioim_reqs  = be16_to_cpu(fwcfg->num_ioim_reqs);
+	fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
+	fwcfg->num_fcxp_reqs  = be16_to_cpu(fwcfg->num_fcxp_reqs);
+	fwcfg->num_uf_bufs    = be16_to_cpu(fwcfg->num_uf_bufs);
+	fwcfg->num_rports     = be16_to_cpu(fwcfg->num_rports);
 
 	iocfc->cfgdone = BFA_TRUE;
 
-	/**
+	/*
 	 * Configuration is complete - initialize/start submodules
 	 */
 	bfa_fcport_init(bfa);
@@ -665,7 +665,7 @@
 	}
 }
 
-/**
+/*
  * IOC enable request is complete
  */
 static void
@@ -684,7 +684,7 @@
 	bfa_iocfc_send_cfg(bfa);
 }
 
-/**
+/*
  * IOC disable request is complete
  */
 static void
@@ -705,7 +705,7 @@
 	}
 }
 
-/**
+/*
  * Notify sub-modules of hardware failure.
  */
 static void
@@ -723,7 +723,7 @@
 			     bfa);
 }
 
-/**
+/*
  * Actions on chip-reset completion.
  */
 static void
@@ -735,11 +735,11 @@
 	bfa_isr_enable(bfa);
 }
 
-/**
+/*
  *  hal_ioc_public
  */
 
-/**
+/*
  * Query IOC memory requirement information.
  */
 void
@@ -754,7 +754,7 @@
 	*km_len += bfa_ioc_debug_trcsz(bfa_auto_recover);
 }
 
-/**
+/*
  * Query IOC memory requirement information.
  */
 void
@@ -772,7 +772,7 @@
 	ioc->trcmod = bfa->trcmod;
 	bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
 
-	/**
+	/*
 	 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
 	 */
 	if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
@@ -790,7 +790,7 @@
 		INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
 }
 
-/**
+/*
  * Query IOC memory requirement information.
  */
 void
@@ -799,7 +799,7 @@
 	bfa_ioc_detach(&bfa->ioc);
 }
 
-/**
+/*
  * Query IOC memory requirement information.
  */
 void
@@ -809,7 +809,7 @@
 	bfa_ioc_enable(&bfa->ioc);
 }
 
-/**
+/*
  * IOC start called from bfa_start(). Called to start IOC operations
  * at driver instantiation for this instance.
  */
@@ -820,7 +820,7 @@
 		bfa_iocfc_start_submod(bfa);
 }
 
-/**
+/*
  * IOC stop called from bfa_stop(). Called only when driver is unloaded
  * for this instance.
  */
@@ -876,12 +876,12 @@
 	attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
 
 	attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
-				bfa_os_ntohs(iocfc->cfginfo->intr_attr.delay) :
-				bfa_os_ntohs(iocfc->cfgrsp->intr_attr.delay);
+				be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
+				be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
 
 	attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
-			bfa_os_ntohs(iocfc->cfginfo->intr_attr.latency) :
-			bfa_os_ntohs(iocfc->cfgrsp->intr_attr.latency);
+			be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
+			be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
 
 	attr->config	= iocfc->cfg;
 }
@@ -893,8 +893,8 @@
 	struct bfi_iocfc_set_intr_req_s *m;
 
 	iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
-	iocfc->cfginfo->intr_attr.delay = bfa_os_htons(attr->delay);
-	iocfc->cfginfo->intr_attr.latency = bfa_os_htons(attr->latency);
+	iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
+	iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
 
 	if (!bfa_iocfc_is_operational(bfa))
 		return BFA_STATUS_OK;
@@ -924,7 +924,7 @@
 	iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
 	bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
 }
-/**
+/*
  * Enable IOC after it is disabled.
  */
 void
@@ -953,7 +953,7 @@
 	return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
 }
 
-/**
+/*
  * Return boot target port wwns -- read from boot information in flash.
  */
 void
@@ -998,11 +998,11 @@
 	return cfgrsp->pbc_cfg.nvports;
 }
 
-/**
+/*
  *  hal_api
  */
 
-/**
+/*
  * Use this function query the memory requirement of the BFA library.
  * This function needs to be called before bfa_attach() to get the
  * memory required of the BFA layer for a given driver configuration.
@@ -1038,7 +1038,7 @@
 
 	bfa_assert((cfg != NULL) && (meminfo != NULL));
 
-	bfa_os_memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
+	memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
 	meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
 		BFA_MEM_TYPE_KVA;
 	meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
@@ -1055,7 +1055,7 @@
 	meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
 }
 
-/**
+/*
  * Use this function to do attach the driver instance with the BFA
  * library. This function will not trigger any HW initialization
  * process (which will be done in bfa_init() call)
@@ -1092,7 +1092,7 @@
 
 	bfa_assert((cfg != NULL) && (meminfo != NULL));
 
-	/**
+	/*
 	 * initialize all memory pointers for iterative allocation
 	 */
 	for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
@@ -1109,7 +1109,7 @@
 	bfa_com_port_attach(bfa, meminfo);
 }
 
-/**
+/*
  * Use this function to delete a BFA IOC. IOC should be stopped (by
  * calling bfa_stop()) before this function call.
  *
@@ -1146,7 +1146,7 @@
 	bfa->plog = plog;
 }
 
-/**
+/*
  * Initialize IOC.
  *
  * This function will return immediately, when the IOC initialization is
@@ -1169,7 +1169,7 @@
 	bfa_iocfc_init(bfa);
 }
 
-/**
+/*
  * Use this function initiate the IOC configuration setup. This function
  * will return immediately.
  *
@@ -1183,7 +1183,7 @@
 	bfa_iocfc_start(bfa);
 }
 
-/**
+/*
  * Use this function quiese the IOC. This function will return immediately,
  * when the IOC is actually stopped, the bfad->comp will be set.
  *
@@ -1243,7 +1243,7 @@
 	bfa->fcs = BFA_TRUE;
 }
 
-/**
+/*
  * Periodic timer heart beat from driver
  */
 void
@@ -1252,7 +1252,7 @@
 	bfa_timer_beat(&bfa->timer_mod);
 }
 
-/**
+/*
  * Return the list of PCI vendor/device id lists supported by this
  * BFA instance.
  */
@@ -1270,7 +1270,7 @@
 	*pciids = __pciids;
 }
 
-/**
+/*
  * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
  * into BFA layer). The OS driver can then turn back and overwrite entries that
  * have been configured by the user.
@@ -1328,7 +1328,7 @@
 	bfa_ioc_get_attr(&bfa->ioc, ioc_attr);
 }
 
-/**
+/*
  * Retrieve firmware trace information on IOC failure.
  */
 bfa_status_t
@@ -1337,7 +1337,7 @@
 	return bfa_ioc_debug_fwsave(&bfa->ioc, trcdata, trclen);
 }
 
-/**
+/*
  * Clear the saved firmware trace information of an IOC.
  */
 void
@@ -1346,7 +1346,7 @@
 	bfa_ioc_debug_fwsave_clear(&bfa->ioc);
 }
 
-/**
+/*
  * Fetch firmware trace data.
  *
  * @param[in]		bfa			BFA instance
@@ -1362,7 +1362,7 @@
 	return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
 }
 
-/**
+/*
  * Dump firmware memory.
  *
  * @param[in]		bfa		BFA instance
@@ -1378,7 +1378,7 @@
 {
 	return bfa_ioc_debug_fwcore(&bfa->ioc, buf, offset, buflen);
 }
-/**
+/*
  * Reset hw semaphore & usage cnt regs and initialize.
  */
 void
@@ -1388,7 +1388,7 @@
 	bfa_ioc_pll_init(&bfa->ioc);
 }
 
-/**
+/*
  * Fetch firmware statistics data.
  *
  * @param[in]		bfa		BFA instance
diff --git a/drivers/scsi/bfa/bfa_cs.h b/drivers/scsi/bfa/bfa_cs.h
index 7260c74..99f242b 100644
--- a/drivers/scsi/bfa/bfa_cs.h
+++ b/drivers/scsi/bfa/bfa_cs.h
@@ -15,7 +15,7 @@
  * General Public License for more details.
  */
 
-/**
+/*
  *  bfa_cs.h BFA common services
  */
 
@@ -24,7 +24,7 @@
 
 #include "bfa_os_inc.h"
 
-/**
+/*
  * BFA TRC
  */
 
@@ -73,7 +73,7 @@
 #define BFA_TRC_MOD_SH	10
 #define BFA_TRC_MOD(__mod)	((BFA_TRC_ ## __mod) << BFA_TRC_MOD_SH)
 
-/**
+/*
  * Define a new tracing file (module). Module should match one defined above.
  */
 #define BFA_TRC_FILE(__mod, __submod)					\
@@ -155,7 +155,7 @@
 #define bfa_trc_fp(_trcp, _data)
 #endif
 
-/**
+/*
  * @ BFA LOG interfaces
  */
 #define bfa_assert(__cond)	do {					\
@@ -249,13 +249,13 @@
 #define bfa_q_is_on_q(_q, _qe)      \
 	bfa_q_is_on_q_func(_q, (struct list_head *)(_qe))
 
-/**
+/*
  * @ BFA state machine interfaces
  */
 
 typedef void (*bfa_sm_t)(void *sm, int event);
 
-/**
+/*
  * oc - object class eg. bfa_ioc
  * st - state, eg. reset
  * otype - object type, eg. struct bfa_ioc_s
@@ -269,7 +269,7 @@
 #define bfa_sm_get_state(_sm)		((_sm)->sm)
 #define bfa_sm_cmp_state(_sm, _state)	((_sm)->sm == (bfa_sm_t)(_state))
 
-/**
+/*
  * For converting from state machine function to state encoding.
  */
 struct bfa_sm_table_s {
@@ -279,12 +279,12 @@
 };
 #define BFA_SM(_sm)	((bfa_sm_t)(_sm))
 
-/**
+/*
  * State machine with entry actions.
  */
 typedef void (*bfa_fsm_t)(void *fsm, int event);
 
-/**
+/*
  * oc - object class eg. bfa_ioc
  * st - state, eg. reset
  * otype - object type, eg. struct bfa_ioc_s
@@ -314,7 +314,7 @@
 	return smt[i].state;
 }
 
-/**
+/*
  * @ Generic wait counter.
  */
 
@@ -340,7 +340,7 @@
 		wc->wc_resume(wc->wc_cbarg);
 }
 
-/**
+/*
  * Initialize a waiting counter.
  */
 static inline void
@@ -352,7 +352,7 @@
 	bfa_wc_up(wc);
 }
 
-/**
+/*
  * Wait for counter to reach zero
  */
 static inline void
diff --git a/drivers/scsi/bfa/bfa_defs.h b/drivers/scsi/bfa/bfa_defs.h
index d49877f..4b5b9e3 100644
--- a/drivers/scsi/bfa/bfa_defs.h
+++ b/drivers/scsi/bfa/bfa_defs.h
@@ -24,7 +24,7 @@
 #define BFA_MFG_SERIALNUM_SIZE                  11
 #define STRSZ(_n)                               (((_n) + 4) & ~3)
 
-/**
+/*
  * Manufacturing card type
  */
 enum {
@@ -45,7 +45,7 @@
 
 #pragma pack(1)
 
-/**
+/*
  * Check if Mezz card
  */
 #define bfa_mfg_is_mezz(type) (( \
@@ -55,7 +55,7 @@
 	(type) == BFA_MFG_TYPE_LIGHTNING_P0 || \
 	(type) == BFA_MFG_TYPE_LIGHTNING))
 
-/**
+/*
  * Check if the card having old wwn/mac handling
  */
 #define bfa_mfg_is_old_wwn_mac_model(type) (( \
@@ -78,12 +78,12 @@
 	(m)[2] = t & 0xFF;                                      \
 } while (0)
 
-/**
+/*
  * VPD data length
  */
 #define BFA_MFG_VPD_LEN                 512
 
-/**
+/*
  * VPD vendor tag
  */
 enum {
@@ -97,7 +97,7 @@
 	BFA_MFG_VPD_PCI_BRCD    = 0xf8,  /*  PCI VPD Brocade            */
 };
 
-/**
+/*
  * All numerical fields are in big-endian format.
  */
 struct bfa_mfg_vpd_s {
@@ -112,7 +112,7 @@
 
 #pragma pack()
 
-/**
+/*
  * Status return values
  */
 enum bfa_status {
@@ -167,11 +167,11 @@
 #define BFA_STRING_32	32
 #define BFA_VERSION_LEN 64
 
-/**
+/*
  * ---------------------- adapter definitions ------------
  */
 
-/**
+/*
  * BFA adapter level attributes.
  */
 enum {
@@ -215,7 +215,7 @@
 	u8		trunk_capable;
 };
 
-/**
+/*
  * ---------------------- IOC definitions ------------
  */
 
@@ -224,7 +224,7 @@
 	BFA_IOC_CHIP_REV_LEN	= 8,
 };
 
-/**
+/*
  * Driver and firmware versions.
  */
 struct bfa_ioc_driver_attr_s {
@@ -236,7 +236,7 @@
 	char		ob_ver[BFA_VERSION_LEN];	/*  openboot version */
 };
 
-/**
+/*
  * IOC PCI device attributes
  */
 struct bfa_ioc_pci_attr_s {
@@ -249,7 +249,7 @@
 	char		chip_rev[BFA_IOC_CHIP_REV_LEN];	 /*  chip revision */
 };
 
-/**
+/*
  * IOC states
  */
 enum bfa_ioc_state {
@@ -267,7 +267,7 @@
 	BFA_IOC_ENABLING	= 12,	/*  IOC is being enabled */
 };
 
-/**
+/*
  * IOC firmware stats
  */
 struct bfa_fw_ioc_stats_s {
@@ -279,7 +279,7 @@
 	u32	unknown_reqs;
 };
 
-/**
+/*
  * IOC driver stats
  */
 struct bfa_ioc_drv_stats_s {
@@ -296,7 +296,7 @@
 	u32	enable_replies;
 };
 
-/**
+/*
  * IOC statistics
  */
 struct bfa_ioc_stats_s {
@@ -310,7 +310,7 @@
 	BFA_IOC_TYPE_LL		= 3,
 };
 
-/**
+/*
  * IOC attributes returned in queries
  */
 struct bfa_ioc_attr_s {
@@ -323,11 +323,11 @@
 	u8				rsvd[7];	/*  64bit align    */
 };
 
-/**
+/*
  * ---------------------- mfg definitions ------------
  */
 
-/**
+/*
  * Checksum size
  */
 #define BFA_MFG_CHKSUM_SIZE			16
@@ -340,7 +340,7 @@
 
 #pragma pack(1)
 
-/**
+/*
  * All numerical fields are in big-endian format.
  */
 struct bfa_mfg_block_s {
@@ -373,11 +373,11 @@
 
 #pragma pack()
 
-/**
+/*
  * ---------------------- pci definitions ------------
  */
 
-/**
+/*
  * PCI device and vendor ID information
  */
 enum {
@@ -392,14 +392,14 @@
 	((devid) == BFA_PCI_DEVICE_ID_CT ||	\
 	 (devid) == BFA_PCI_DEVICE_ID_CT_FC)
 
-/**
+/*
  * PCI sub-system device and vendor ID information
  */
 enum {
 	BFA_PCI_FCOE_SSDEVICE_ID	= 0x14,
 };
 
-/**
+/*
  * Maximum number of device address ranges mapped through different BAR(s)
  */
 #define BFA_PCI_ACCESS_RANGES 1
@@ -430,7 +430,7 @@
 #define BOOT_CFG_REV1   1
 #define BOOT_CFG_VLAN   1
 
-/**
+/*
  *      Boot options setting. Boot options setting determines from where
  *      to get the boot lun information
  */
@@ -442,7 +442,7 @@
 };
 
 #pragma pack(1)
-/**
+/*
  * Boot lun information.
  */
 struct bfa_boot_bootlun_s {
@@ -451,7 +451,7 @@
 };
 #pragma pack()
 
-/**
+/*
  * BOOT boot configuraton
  */
 struct bfa_boot_pbc_s {
diff --git a/drivers/scsi/bfa/bfa_defs_fcs.h b/drivers/scsi/bfa/bfa_defs_fcs.h
index 96905d3..191d34a 100644
--- a/drivers/scsi/bfa/bfa_defs_fcs.h
+++ b/drivers/scsi/bfa/bfa_defs_fcs.h
@@ -21,7 +21,7 @@
 #include "bfa_fc.h"
 #include "bfa_defs_svc.h"
 
-/**
+/*
  * VF states
  */
 enum bfa_vf_state {
@@ -35,7 +35,7 @@
 	BFA_VF_ISOLATED  = 7,	/*  port isolated due to vf_id mismatch */
 };
 
-/**
+/*
  * VF statistics
  */
 struct bfa_vf_stats_s {
@@ -55,7 +55,7 @@
 	u32	resvd; /*  padding for 64 bit alignment */
 };
 
-/**
+/*
  * VF attributes returned in queries
  */
 struct bfa_vf_attr_s {
@@ -67,7 +67,7 @@
 #define BFA_FCS_MAX_LPORTS 256
 #define BFA_FCS_FABRIC_IPADDR_SZ  16
 
-/**
+/*
  * symbolic names for base port/virtual port
  */
 #define BFA_SYMNAME_MAXLEN	128	/* 128 bytes */
@@ -75,7 +75,7 @@
 	char	    symname[BFA_SYMNAME_MAXLEN];
 };
 
-/**
+/*
 * Roles of FCS port:
  *     - FCP IM and FCP TM roles cannot be enabled together for a FCS port
  *     - Create multiple ports if both IM and TM functions required.
@@ -86,19 +86,19 @@
 	BFA_LPORT_ROLE_FCP_MAX	= BFA_LPORT_ROLE_FCP_IM,
 };
 
-/**
+/*
  * FCS port configuration.
  */
 struct bfa_lport_cfg_s {
     wwn_t	       pwwn;       /*  port wwn */
     wwn_t	       nwwn;       /*  node wwn */
     struct bfa_lport_symname_s  sym_name;   /*  vm port symbolic name */
-	bfa_boolean_t       preboot_vp;  /*  vport created from PBC */
+    bfa_boolean_t       preboot_vp;  /*  vport created from PBC */
     enum bfa_lport_role     roles;      /*  FCS port roles */
     u8	     tag[16];	/*  opaque tag from application */
 };
 
-/**
+/*
  * FCS port states
  */
 enum bfa_lport_state {
@@ -108,7 +108,7 @@
 	BFA_LPORT_OFFLINE = 3,	/*  No login to fabric */
 };
 
-/**
+/*
  * FCS port type.
  */
 enum bfa_lport_type {
@@ -116,7 +116,7 @@
 	BFA_LPORT_TYPE_VIRTUAL,
 };
 
-/**
+/*
  * FCS port offline reason.
  */
 enum bfa_lport_offline_reason {
@@ -128,7 +128,7 @@
 	BFA_LPORT_OFFLINE_FAB_LOGOUT,
 };
 
-/**
+/*
  * FCS lport info.
  */
 struct bfa_lport_info_s {
@@ -150,7 +150,7 @@
 
 };
 
-/**
+/*
  * FCS port statistics
  */
 struct bfa_lport_stats_s {
@@ -222,7 +222,7 @@
 					    * (max retry of plogi) */
 };
 
-/**
+/*
  * BFA port attribute returned in queries
  */
 struct bfa_lport_attr_s {
@@ -239,7 +239,7 @@
 };
 
 
-/**
+/*
  * VPORT states
  */
 enum bfa_vport_state {
@@ -258,7 +258,7 @@
 	BFA_FCS_VPORT_MAX_STATE,
 };
 
-/**
+/*
  * vport statistics
  */
 struct bfa_vport_stats_s {
@@ -296,7 +296,7 @@
 	u32        rsvd;
 };
 
-/**
+/*
  * BFA vport attribute returned in queries
  */
 struct bfa_vport_attr_s {
@@ -305,7 +305,7 @@
 	u32          rsvd;
 };
 
-/**
+/*
  * FCS remote port states
  */
 enum bfa_rport_state {
@@ -321,7 +321,7 @@
 	BFA_RPORT_NSDISC	= 9,	/*  re-discover rport */
 };
 
-/**
+/*
  *  Rport Scsi Function : Initiator/Target.
  */
 enum bfa_rport_function {
@@ -329,7 +329,7 @@
 	BFA_RPORT_TARGET	= 0x02,	/*  SCSI Target	*/
 };
 
-/**
+/*
  * port/node symbolic names for rport
  */
 #define BFA_RPORT_SYMNAME_MAXLEN	255
@@ -337,7 +337,7 @@
 	char            symname[BFA_RPORT_SYMNAME_MAXLEN];
 };
 
-/**
+/*
  * FCS remote port statistics
  */
 struct bfa_rport_stats_s {
@@ -374,7 +374,7 @@
 	struct bfa_rport_hal_stats_s	hal_stats;  /*  BFA rport stats    */
 };
 
-/**
+/*
  * FCS remote port attributes returned in queries
  */
 struct bfa_rport_attr_s {
@@ -411,7 +411,7 @@
 #define BFA_MAX_IO_INDEX 7
 #define BFA_NO_IO_INDEX 9
 
-/**
+/*
  * FCS itnim states
  */
 enum bfa_itnim_state {
@@ -425,7 +425,7 @@
 	BFA_ITNIM_INITIATIOR	= 7,	/*  initiator */
 };
 
-/**
+/*
  * FCS remote port statistics
  */
 struct bfa_itnim_stats_s {
@@ -443,7 +443,7 @@
 	u32	rsvd;		/* padding for 64 bit alignment */
 };
 
-/**
+/*
  * FCS itnim attributes returned in queries
  */
 struct bfa_itnim_attr_s {
diff --git a/drivers/scsi/bfa/bfa_defs_svc.h b/drivers/scsi/bfa/bfa_defs_svc.h
index 56226fc..e24e9f7 100644
--- a/drivers/scsi/bfa/bfa_defs_svc.h
+++ b/drivers/scsi/bfa/bfa_defs_svc.h
@@ -27,7 +27,7 @@
 #define BFA_IOCFCOE_INTR_DELAY	25
 #define BFA_IOCFCOE_INTR_LATENCY 5
 
-/**
+/*
  * Interrupt coalescing configuration.
  */
 #pragma pack(1)
@@ -38,7 +38,7 @@
 	u16	delay;		/*  delay in microseconds     */
 };
 
-/**
+/*
  * IOC firmware configuraton
  */
 struct bfa_iocfc_fwcfg_s {
@@ -71,7 +71,7 @@
 	u32		rsvd;
 };
 
-/**
+/*
  * IOC configuration
  */
 struct bfa_iocfc_cfg_s {
@@ -79,7 +79,7 @@
 	struct bfa_iocfc_drvcfg_s	drvcfg;	/*  driver side config	  */
 };
 
-/**
+/*
  * IOC firmware IO stats
  */
 struct bfa_fw_io_stats_s {
@@ -152,7 +152,7 @@
 						 */
 };
 
-/**
+/*
  * IOC port firmware stats
  */
 
@@ -262,7 +262,7 @@
     u32    mac_invalids;       /*  Invalid mac assigned                */
 };
 
-/**
+/*
  * IOC firmware FCoE port stats
  */
 struct bfa_fw_fcoe_port_stats_s {
@@ -270,7 +270,7 @@
     struct bfa_fw_fip_stats_s   fip_stats;
 };
 
-/**
+/*
  * IOC firmware FC uport stats
  */
 struct bfa_fw_fc_uport_stats_s {
@@ -278,7 +278,7 @@
 	struct bfa_fw_port_lksm_stats_s		lksm_stats;
 };
 
-/**
+/*
  * IOC firmware FC port stats
  */
 union bfa_fw_fc_port_stats_s {
@@ -286,7 +286,7 @@
 	struct bfa_fw_fcoe_port_stats_s	fcoe_stats;
 };
 
-/**
+/*
  * IOC firmware port stats
  */
 struct bfa_fw_port_stats_s {
@@ -295,7 +295,7 @@
 	union  bfa_fw_fc_port_stats_s		fc_port;
 };
 
-/**
+/*
  * fcxchg module statistics
  */
 struct bfa_fw_fcxchg_stats_s {
@@ -308,7 +308,7 @@
 	u32	cls_tx;
 };
 
-/**
+/*
  *  Trunk statistics
  */
 struct bfa_fw_trunk_stats_s {
@@ -334,7 +334,7 @@
 	u32 elp_dropped;		/*  ELP dropped		*/
 };
 
-/**
+/*
  * IOCFC firmware stats
  */
 struct bfa_fw_iocfc_stats_s {
@@ -345,7 +345,7 @@
 	u32	set_intr_reqs;	/*  set interrupt reqs */
 };
 
-/**
+/*
  * IOC attributes returned in queries
  */
 struct bfa_iocfc_attr_s {
@@ -353,7 +353,7 @@
 	struct bfa_iocfc_intr_attr_s	intr_attr;	/*  interrupt attr */
 };
 
-/**
+/*
  * Eth_sndrcv mod stats
  */
 struct bfa_fw_eth_sndrcv_stats_s {
@@ -361,7 +361,7 @@
 	u32	rsvd;		/*  64bit align    */
 };
 
-/**
+/*
  * CT MAC mod stats
  */
 struct bfa_fw_mac_mod_stats_s {
@@ -379,7 +379,7 @@
 	u32	rsvd;		/*  64bit align    */
 };
 
-/**
+/*
  * CT MOD stats
  */
 struct bfa_fw_ct_mod_stats_s {
@@ -391,7 +391,7 @@
 	u32	rsvd;		/*  64bit align    */
 };
 
-/**
+/*
  * IOC firmware stats
  */
 struct bfa_fw_stats_s {
@@ -412,7 +412,7 @@
 #define BFA_IOCFC_PATHTOV_MAX	60
 #define BFA_IOCFC_QDEPTH_MAX	2000
 
-/**
+/*
  * QoS states
  */
 enum bfa_qos_state {
@@ -420,7 +420,7 @@
 	BFA_QOS_OFFLINE = 2,		/*  QoS is offline */
 };
 
-/**
+/*
  * QoS  Priority levels.
  */
 enum bfa_qos_priority {
@@ -430,7 +430,7 @@
 	BFA_QOS_LOW  =  3,	/*  QoS Priority Level Low */
 };
 
-/**
+/*
  * QoS  bandwidth allocation for each priority level
  */
 enum bfa_qos_bw_alloc {
@@ -439,7 +439,7 @@
 	BFA_QOS_BW_LOW  =  10,	/*  bandwidth allocation for Low */
 };
 #pragma pack(1)
-/**
+/*
  * QoS attribute returned in QoS Query
  */
 struct bfa_qos_attr_s {
@@ -448,7 +448,7 @@
 	u32  total_bb_cr;		/*  Total BB Credits */
 };
 
-/**
+/*
  * These fields should be displayed only from the CLI.
  * There will be a separate BFAL API (get_qos_vc_attr ?)
  * to retrieve this.
@@ -471,7 +471,7 @@
 							    * total_vc_count */
 };
 
-/**
+/*
  * QoS statistics
  */
 struct bfa_qos_stats_s {
@@ -489,7 +489,7 @@
 	u32	rsvd;		    /* padding for 64 bit alignment */
 };
 
-/**
+/*
  * FCoE statistics
  */
 struct bfa_fcoe_stats_s {
@@ -540,7 +540,7 @@
 	u64	rxf_bcast_vlan;	/*  Rx FCoE broadcast vlan frames   */
 };
 
-/**
+/*
  * QoS or FCoE stats (fcport stats excluding physical FC port stats)
  */
 union bfa_fcport_stats_u {
@@ -639,7 +639,7 @@
 	BFA_PORT_ST_MAX_STATE,
 };
 
-/**
+/*
  *	Port operational type (in sync with SNIA port type).
  */
 enum bfa_port_type {
@@ -651,7 +651,7 @@
 	BFA_PORT_TYPE_VPORT	= 22,	/*  NPIV - virtual port */
 };
 
-/**
+/*
  *	Port topology setting. A port's topology and fabric login status
  *	determine its operational type.
  */
@@ -662,7 +662,7 @@
 	BFA_PORT_TOPOLOGY_AUTO = 3,	/*  auto topology selection */
 };
 
-/**
+/*
  *	Physical port loopback types.
  */
 enum bfa_port_opmode {
@@ -679,7 +679,7 @@
 	(_mode == BFA_PORT_OPMODE_LB_SLW) ||		\
 	(_mode == BFA_PORT_OPMODE_LB_EXT))
 
-/**
+/*
  *	Port link state
  */
 enum bfa_port_linkstate {
@@ -687,7 +687,7 @@
 	BFA_PORT_LINKDOWN	= 2,	/*  Physical port/Trunk link down */
 };
 
-/**
+/*
  *	Port link state reason code
  */
 enum bfa_port_linkstate_rsn {
@@ -733,7 +733,7 @@
 	CEE_ISCSI_PRI_OVERLAP_FCOE_PRI		= 43
 };
 #pragma pack(1)
-/**
+/*
  *      Physical port configuration
  */
 struct bfa_port_cfg_s {
@@ -753,7 +753,7 @@
 };
 #pragma pack()
 
-/**
+/*
  *	Port attribute values.
  */
 struct bfa_port_attr_s {
@@ -800,7 +800,7 @@
 	u8			rsvd1[6];
 };
 
-/**
+/*
  *	      Port FCP mappings.
  */
 struct bfa_port_fcpmap_s {
@@ -815,7 +815,7 @@
 	char		luid[256];
 };
 
-/**
+/*
  *	      Port RNID info.
  */
 struct bfa_port_rnid_s {
@@ -848,7 +848,7 @@
 	mac_t	   mac;	    /*  FCF mac		  */
 };
 
-/**
+/*
  *	Trunk states for BCU/BFAL
  */
 enum bfa_trunk_state {
@@ -857,7 +857,7 @@
 	BFA_TRUNK_OFFLINE	= 2,	/*  Trunk is offline		*/
 };
 
-/**
+/*
  *	VC attributes for trunked link
  */
 struct bfa_trunk_vc_attr_s {
@@ -867,7 +867,7 @@
 	u16 vc_credits[8];
 };
 
-/**
+/*
  *	Link state information
  */
 struct bfa_port_link_s {
@@ -959,7 +959,7 @@
 	u32        rsvd;
 };
 #pragma pack(1)
-/**
+/*
  *  Rport's QoS attributes
  */
 struct bfa_rport_qos_attr_s {
@@ -987,7 +987,7 @@
 	struct bfa_itnim_latency_s io_latency;
 };
 
-/**
+/*
  * FC physical port statistics.
  */
 struct bfa_port_fc_stats_s {
@@ -1022,7 +1022,7 @@
 	u64     err_enc;        /*  Encoding err frame_8b10b    */
 };
 
-/**
+/*
  * Eth Physical Port statistics.
  */
 struct bfa_port_eth_stats_s {
@@ -1070,7 +1070,7 @@
 	u64     tx_iscsi_zero_pause; /*  Tx iSCSI zero pause    */
 };
 
-/**
+/*
  *              Port statistics.
  */
 union bfa_port_stats_u {
diff --git a/drivers/scsi/bfa/bfa_drv.c b/drivers/scsi/bfa/bfa_drv.c
index 1412764..0222d7c 100644
--- a/drivers/scsi/bfa/bfa_drv.c
+++ b/drivers/scsi/bfa/bfa_drv.c
@@ -17,7 +17,7 @@
 
 #include "bfa_modules.h"
 
-/**
+/*
  * BFA module list terminated by NULL
  */
 struct bfa_module_s *hal_mods[] = {
@@ -31,7 +31,7 @@
 	NULL
 };
 
-/**
+/*
  * Message handlers for various modules.
  */
 bfa_isr_func_t  bfa_isrs[BFI_MC_MAX] = {
@@ -70,7 +70,7 @@
 };
 
 
-/**
+/*
  * Message handlers for mailbox command classes
  */
 bfa_ioc_mbox_mcfunc_t  bfa_mbox_isrs[BFI_MC_MAX] = {
diff --git a/drivers/scsi/bfa/bfa_fc.h b/drivers/scsi/bfa/bfa_fc.h
index 6eff705..e929d25 100644
--- a/drivers/scsi/bfa/bfa_fc.h
+++ b/drivers/scsi/bfa/bfa_fc.h
@@ -1029,7 +1029,7 @@
 	struct link_e2e_beacon_param_s beacon_parm;
 };
 
-/**
+/*
  * If RPSC request is sent to the Domain Controller, the request is for
  * all the ports within that domain (TODO - I don't think FOS implements
  * this...).
@@ -1049,7 +1049,7 @@
 	struct fc_rpsc_speed_info_s speed_info[1];
 };
 
-/**
+/*
  * If RPSC2 request is sent to the Domain Controller,
  */
 #define FC_BRCD_TOKEN    0x42524344
@@ -1094,7 +1094,7 @@
     struct fc_rpsc2_port_info_s port_info[1];    /* port information */
 };
 
-/**
+/*
  * bit fields so that multiple classes can be specified
  */
 enum fc_cos {
@@ -1131,7 +1131,7 @@
 #define FC_VF_ID_MAX     0xEFF
 #define FC_VF_ID_CTL     0xFEF	/*  control VF_ID */
 
-/**
+/*
  * Virtual Fabric Tagging header format
  * @caution This is defined only in BIG ENDIAN format.
  */
@@ -1463,7 +1463,7 @@
 	u32	dap:24;	/* port identifier */
 };
 
-/**
+/*
  * RFT_ID
  */
 struct fcgs_rftid_req_s {
@@ -1472,7 +1472,7 @@
 	u32	fc4_type[8];	/* fc4 types */
 };
 
-/**
+/*
  * RFF_ID : Register FC4 features.
  */
 
@@ -1487,7 +1487,7 @@
     u32    fc4_type:8;		/* corresponding FC4 Type */
 };
 
-/**
+/*
  * GID_FT Request
  */
 struct fcgs_gidft_req_s {
@@ -1497,7 +1497,7 @@
 	u8	fc4_type;	/* FC_TYPE_FCP for SCSI devices */
 };		/* GID_FT Request */
 
-/**
+/*
  * GID_FT Response
  */
 struct fcgs_gidft_resp_s {
@@ -1506,7 +1506,7 @@
 	u32	pid:24;	/* port identifier */
 };		/* GID_FT Response */
 
-/**
+/*
  * RSPN_ID
  */
 struct fcgs_rspnid_req_s {
@@ -1516,7 +1516,7 @@
 	u8		spn[256];	/* symbolic port name */
 };
 
-/**
+/*
  * RPN_ID
  */
 struct fcgs_rpnid_req_s {
@@ -1525,7 +1525,7 @@
 	wwn_t		port_name;
 };
 
-/**
+/*
  * RNN_ID
  */
 struct fcgs_rnnid_req_s {
@@ -1534,7 +1534,7 @@
 	wwn_t		node_name;
 };
 
-/**
+/*
  * RCS_ID
  */
 struct fcgs_rcsid_req_s {
@@ -1543,7 +1543,7 @@
 	u32	cos;
 };
 
-/**
+/*
  * RPT_ID
  */
 struct fcgs_rptid_req_s {
@@ -1553,7 +1553,7 @@
 	u32	rsvd1:24;
 };
 
-/**
+/*
  * GA_NXT Request
  */
 struct fcgs_ganxt_req_s {
@@ -1561,7 +1561,7 @@
 	u32	port_id:24;
 };
 
-/**
+/*
  * GA_NXT Response
  */
 struct fcgs_ganxt_rsp_s {
diff --git a/drivers/scsi/bfa/bfa_fcbuild.c b/drivers/scsi/bfa/bfa_fcbuild.c
index b7d2657..9c72531 100644
--- a/drivers/scsi/bfa/bfa_fcbuild.c
+++ b/drivers/scsi/bfa/bfa_fcbuild.c
@@ -94,13 +94,13 @@
 	 */
 	plogi_tmpl.csp.verhi = FC_PH_VER_PH_3;
 	plogi_tmpl.csp.verlo = FC_PH_VER_4_3;
-	plogi_tmpl.csp.bbcred = bfa_os_htons(0x0004);
+	plogi_tmpl.csp.bbcred = cpu_to_be16(0x0004);
 	plogi_tmpl.csp.ciro = 0x1;
 	plogi_tmpl.csp.cisc = 0x0;
 	plogi_tmpl.csp.altbbcred = 0x0;
-	plogi_tmpl.csp.conseq = bfa_os_htons(0x00FF);
-	plogi_tmpl.csp.ro_bitmap = bfa_os_htons(0x0002);
-	plogi_tmpl.csp.e_d_tov = bfa_os_htonl(2000);
+	plogi_tmpl.csp.conseq = cpu_to_be16(0x00FF);
+	plogi_tmpl.csp.ro_bitmap = cpu_to_be16(0x0002);
+	plogi_tmpl.csp.e_d_tov = cpu_to_be32(2000);
 
 	plogi_tmpl.class3.class_valid = 1;
 	plogi_tmpl.class3.sequential = 1;
@@ -112,7 +112,7 @@
 	 */
 	prli_tmpl.command = FC_ELS_PRLI;
 	prli_tmpl.pglen = 0x10;
-	prli_tmpl.pagebytes = bfa_os_htons(0x0014);
+	prli_tmpl.pagebytes = cpu_to_be16(0x0014);
 	prli_tmpl.parampage.type = FC_TYPE_FCP;
 	prli_tmpl.parampage.imagepair = 1;
 	prli_tmpl.parampage.servparams.rxrdisab = 1;
@@ -137,7 +137,7 @@
 static void
 fc_gs_fchdr_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u32 ox_id)
 {
-	bfa_os_memset(fchs, 0, sizeof(struct fchs_s));
+	memset(fchs, 0, sizeof(struct fchs_s));
 
 	fchs->routing = FC_RTG_FC4_DEV_DATA;
 	fchs->cat_info = FC_CAT_UNSOLICIT_CTRL;
@@ -148,9 +148,9 @@
 	fchs->rx_id = FC_RXID_ANY;
 	fchs->d_id = (d_id);
 	fchs->s_id = (s_id);
-	fchs->ox_id = bfa_os_htons(ox_id);
+	fchs->ox_id = cpu_to_be16(ox_id);
 
-	/**
+	/*
 	 * @todo no need to set ox_id for request
 	 *       no need to set rx_id for response
 	 */
@@ -159,16 +159,16 @@
 void
 fc_els_req_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
 {
-	bfa_os_memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
+	memcpy(fchs, &fc_els_req_tmpl, sizeof(struct fchs_s));
 	fchs->d_id = (d_id);
 	fchs->s_id = (s_id);
-	fchs->ox_id = bfa_os_htons(ox_id);
+	fchs->ox_id = cpu_to_be16(ox_id);
 }
 
 static void
 fc_els_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
 {
-	bfa_os_memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
+	memcpy(fchs, &fc_els_rsp_tmpl, sizeof(struct fchs_s));
 	fchs->d_id = d_id;
 	fchs->s_id = s_id;
 	fchs->ox_id = ox_id;
@@ -198,7 +198,7 @@
 static void
 fc_bls_rsp_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
 {
-	bfa_os_memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
+	memcpy(fchs, &fc_bls_rsp_tmpl, sizeof(struct fchs_s));
 	fchs->d_id = d_id;
 	fchs->s_id = s_id;
 	fchs->ox_id = ox_id;
@@ -211,7 +211,7 @@
 {
 	struct fc_logi_s *plogi = (struct fc_logi_s *) (pld);
 
-	bfa_os_memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+	memcpy(plogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 
 	plogi->els_cmd.els_code = els_code;
 	if (els_code == FC_ELS_PLOGI)
@@ -219,10 +219,10 @@
 	else
 		fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
-	plogi->csp.rxsz = plogi->class3.rxsz = bfa_os_htons(pdu_size);
+	plogi->csp.rxsz = plogi->class3.rxsz = cpu_to_be16(pdu_size);
 
-	bfa_os_memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
-	bfa_os_memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
+	memcpy(&plogi->port_name, &port_name, sizeof(wwn_t));
+	memcpy(&plogi->node_name, &node_name, sizeof(wwn_t));
 
 	return sizeof(struct fc_logi_s);
 }
@@ -235,12 +235,12 @@
 	u32        d_id = bfa_os_hton3b(FC_FABRIC_PORT);
 	u32	*vvl_info;
 
-	bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+	memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 
 	flogi->els_cmd.els_code = FC_ELS_FLOGI;
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 
-	flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
+	flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
 	flogi->port_name = port_name;
 	flogi->node_name = node_name;
 
@@ -253,14 +253,14 @@
 	/* set AUTH capability */
 	flogi->csp.security = set_auth;
 
-	flogi->csp.bbcred = bfa_os_htons(local_bb_credits);
+	flogi->csp.bbcred = cpu_to_be16(local_bb_credits);
 
 	/* Set brcd token in VVL */
 	vvl_info = (u32 *)&flogi->vvl[0];
 
 	/* set the flag to indicate the presence of VVL */
 	flogi->csp.npiv_supp    = 1; /* @todo. field name is not correct */
-	vvl_info[0]	= bfa_os_htonl(FLOGI_VVL_BRCD);
+	vvl_info[0]	= cpu_to_be32(FLOGI_VVL_BRCD);
 
 	return sizeof(struct fc_logi_s);
 }
@@ -272,15 +272,15 @@
 {
 	u32        d_id = 0;
 
-	bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+	memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
 	flogi->els_cmd.els_code = FC_ELS_ACC;
-	flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
+	flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
 	flogi->port_name = port_name;
 	flogi->node_name = node_name;
 
-	flogi->csp.bbcred = bfa_os_htons(local_bb_credits);
+	flogi->csp.bbcred = cpu_to_be16(local_bb_credits);
 
 	return sizeof(struct fc_logi_s);
 }
@@ -291,12 +291,12 @@
 {
 	u32        d_id = bfa_os_hton3b(FC_FABRIC_PORT);
 
-	bfa_os_memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
+	memcpy(flogi, &plogi_tmpl, sizeof(struct fc_logi_s));
 
 	flogi->els_cmd.els_code = FC_ELS_FDISC;
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 
-	flogi->csp.rxsz = flogi->class3.rxsz = bfa_os_htons(pdu_size);
+	flogi->csp.rxsz = flogi->class3.rxsz = cpu_to_be16(pdu_size);
 	flogi->port_name = port_name;
 	flogi->node_name = node_name;
 
@@ -346,7 +346,7 @@
 		if (!plogi->class3.class_valid)
 			return FC_PARSE_FAILURE;
 
-		if (bfa_os_ntohs(plogi->class3.rxsz) < (FC_MIN_PDUSZ))
+		if (be16_to_cpu(plogi->class3.rxsz) < (FC_MIN_PDUSZ))
 			return FC_PARSE_FAILURE;
 
 		return FC_PARSE_OK;
@@ -363,8 +363,8 @@
 	if (plogi->class3.class_valid != 1)
 		return FC_PARSE_FAILURE;
 
-	if ((bfa_os_ntohs(plogi->class3.rxsz) < FC_MIN_PDUSZ)
-	    || (bfa_os_ntohs(plogi->class3.rxsz) > FC_MAX_PDUSZ)
+	if ((be16_to_cpu(plogi->class3.rxsz) < FC_MIN_PDUSZ)
+	    || (be16_to_cpu(plogi->class3.rxsz) > FC_MAX_PDUSZ)
 	    || (plogi->class3.rxsz == 0))
 		return FC_PARSE_FAILURE;
 
@@ -378,7 +378,7 @@
 	struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
 
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
-	bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
+	memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
 
 	prli->command = FC_ELS_PRLI;
 	prli->parampage.servparams.initiator     = 1;
@@ -397,7 +397,7 @@
 	struct fc_prli_s *prli = (struct fc_prli_s *) (pld);
 
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
-	bfa_os_memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
+	memcpy(prli, &prli_tmpl, sizeof(struct fc_prli_s));
 
 	prli->command = FC_ELS_ACC;
 
@@ -448,7 +448,7 @@
 {
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 
-	bfa_os_memset(logo, '\0', sizeof(struct fc_logo_s));
+	memset(logo, '\0', sizeof(struct fc_logo_s));
 	logo->els_cmd.els_code = FC_ELS_LOGO;
 	logo->nport_id = (s_id);
 	logo->orig_port_name = port_name;
@@ -461,7 +461,7 @@
 		 u32 s_id, u16 ox_id, wwn_t port_name,
 		 wwn_t node_name, u8 els_code)
 {
-	bfa_os_memset(adisc, '\0', sizeof(struct fc_adisc_s));
+	memset(adisc, '\0', sizeof(struct fc_adisc_s));
 
 	adisc->els_cmd.els_code = els_code;
 
@@ -537,7 +537,7 @@
 	if (pdisc->class3.class_valid != 1)
 		return FC_PARSE_FAILURE;
 
-	if ((bfa_os_ntohs(pdisc->class3.rxsz) <
+	if ((be16_to_cpu(pdisc->class3.rxsz) <
 		(FC_MIN_PDUSZ - sizeof(struct fchs_s)))
 	    || (pdisc->class3.rxsz == 0))
 		return FC_PARSE_FAILURE;
@@ -554,11 +554,11 @@
 u16
 fc_abts_build(struct fchs_s *fchs, u32 d_id, u32 s_id, u16 ox_id)
 {
-	bfa_os_memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s));
+	memcpy(fchs, &fc_bls_req_tmpl, sizeof(struct fchs_s));
 	fchs->cat_info = FC_CAT_ABTS;
 	fchs->d_id = (d_id);
 	fchs->s_id = (s_id);
-	fchs->ox_id = bfa_os_htons(ox_id);
+	fchs->ox_id = cpu_to_be16(ox_id);
 
 	return sizeof(struct fchs_s);
 }
@@ -582,9 +582,9 @@
 	/*
 	 * build rrq payload
 	 */
-	bfa_os_memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s));
+	memcpy(rrq, &rrq_tmpl, sizeof(struct fc_rrq_s));
 	rrq->s_id = (s_id);
-	rrq->ox_id = bfa_os_htons(rrq_oxid);
+	rrq->ox_id = cpu_to_be16(rrq_oxid);
 	rrq->rx_id = FC_RXID_ANY;
 
 	return sizeof(struct fc_rrq_s);
@@ -598,7 +598,7 @@
 
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
-	bfa_os_memset(acc, 0, sizeof(struct fc_els_cmd_s));
+	memset(acc, 0, sizeof(struct fc_els_cmd_s));
 	acc->els_code = FC_ELS_ACC;
 
 	return sizeof(struct fc_els_cmd_s);
@@ -610,7 +610,7 @@
 		u8 reason_code_expl)
 {
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
-	bfa_os_memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
+	memset(ls_rjt, 0, sizeof(struct fc_ls_rjt_s));
 
 	ls_rjt->els_cmd.els_code = FC_ELS_LS_RJT;
 	ls_rjt->reason_code = reason_code;
@@ -626,7 +626,7 @@
 {
 	fc_bls_rsp_build(fchs, d_id, s_id, ox_id);
 
-	bfa_os_memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s));
+	memcpy(ba_acc, &ba_acc_tmpl, sizeof(struct fc_ba_acc_s));
 
 	fchs->rx_id = rx_id;
 
@@ -641,7 +641,7 @@
 		u32 s_id, u16 ox_id)
 {
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
-	bfa_os_memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
+	memset(els_cmd, 0, sizeof(struct fc_els_cmd_s));
 	els_cmd->els_code = FC_ELS_ACC;
 
 	return sizeof(struct fc_els_cmd_s);
@@ -656,10 +656,10 @@
 
 	if (els_code == FC_ELS_PRLO) {
 		prlo = (struct fc_prlo_s *) (fc_frame + 1);
-		num_pages = (bfa_os_ntohs(prlo->payload_len) - 4) / 16;
+		num_pages = (be16_to_cpu(prlo->payload_len) - 4) / 16;
 	} else {
 		tprlo = (struct fc_tprlo_s *) (fc_frame + 1);
-		num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16;
+		num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
 	}
 	return num_pages;
 }
@@ -672,11 +672,11 @@
 
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
-	bfa_os_memset(tprlo_acc, 0, (num_pages * 16) + 4);
+	memset(tprlo_acc, 0, (num_pages * 16) + 4);
 	tprlo_acc->command = FC_ELS_ACC;
 
 	tprlo_acc->page_len = 0x10;
-	tprlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4);
+	tprlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
 
 	for (page = 0; page < num_pages; page++) {
 		tprlo_acc->tprlo_acc_params[page].opa_valid = 0;
@@ -685,7 +685,7 @@
 		tprlo_acc->tprlo_acc_params[page].orig_process_assc = 0;
 		tprlo_acc->tprlo_acc_params[page].resp_process_assc = 0;
 	}
-	return bfa_os_ntohs(tprlo_acc->payload_len);
+	return be16_to_cpu(tprlo_acc->payload_len);
 }
 
 u16
@@ -696,10 +696,10 @@
 
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
-	bfa_os_memset(prlo_acc, 0, (num_pages * 16) + 4);
+	memset(prlo_acc, 0, (num_pages * 16) + 4);
 	prlo_acc->command = FC_ELS_ACC;
 	prlo_acc->page_len = 0x10;
-	prlo_acc->payload_len = bfa_os_htons((num_pages * 16) + 4);
+	prlo_acc->payload_len = cpu_to_be16((num_pages * 16) + 4);
 
 	for (page = 0; page < num_pages; page++) {
 		prlo_acc->prlo_acc_params[page].opa_valid = 0;
@@ -709,7 +709,7 @@
 		prlo_acc->prlo_acc_params[page].resp_process_assc = 0;
 	}
 
-	return bfa_os_ntohs(prlo_acc->payload_len);
+	return be16_to_cpu(prlo_acc->payload_len);
 }
 
 u16
@@ -718,7 +718,7 @@
 {
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 
-	bfa_os_memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
+	memset(rnid, 0, sizeof(struct fc_rnid_cmd_s));
 
 	rnid->els_cmd.els_code = FC_ELS_RNID;
 	rnid->node_id_data_format = data_format;
@@ -732,7 +732,7 @@
 		  struct fc_rnid_common_id_data_s *common_id_data,
 		  struct fc_rnid_general_topology_data_s *gen_topo_data)
 {
-	bfa_os_memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
+	memset(rnid_acc, 0, sizeof(struct fc_rnid_acc_s));
 
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
@@ -745,7 +745,7 @@
 	if (data_format == RNID_NODEID_DATA_FORMAT_DISCOVERY) {
 		rnid_acc->specific_id_data_length =
 			sizeof(struct fc_rnid_general_topology_data_s);
-		bfa_os_assign(rnid_acc->gen_topology_data, *gen_topo_data);
+		rnid_acc->gen_topology_data = *gen_topo_data;
 		return sizeof(struct fc_rnid_acc_s);
 	} else {
 		return sizeof(struct fc_rnid_acc_s) -
@@ -760,7 +760,7 @@
 {
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 
-	bfa_os_memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
+	memset(rpsc, 0, sizeof(struct fc_rpsc_cmd_s));
 
 	rpsc->els_cmd.els_code = FC_ELS_RPSC;
 	return sizeof(struct fc_rpsc_cmd_s);
@@ -775,11 +775,11 @@
 
 	fc_els_req_build(fchs, bfa_os_hton3b(dctlr_id), s_id, 0);
 
-	bfa_os_memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
+	memset(rpsc2, 0, sizeof(struct fc_rpsc2_cmd_s));
 
 	rpsc2->els_cmd.els_code = FC_ELS_RPSC;
-	rpsc2->token = bfa_os_htonl(FC_BRCD_TOKEN);
-	rpsc2->num_pids  = bfa_os_htons(npids);
+	rpsc2->token = cpu_to_be32(FC_BRCD_TOKEN);
+	rpsc2->num_pids  = cpu_to_be16(npids);
 	for (i = 0; i < npids; i++)
 		rpsc2->pid_list[i].pid = pid_list[i];
 
@@ -791,18 +791,18 @@
 		u32 d_id, u32 s_id, u16 ox_id,
 		  struct fc_rpsc_speed_info_s *oper_speed)
 {
-	bfa_os_memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
+	memset(rpsc_acc, 0, sizeof(struct fc_rpsc_acc_s));
 
 	fc_els_rsp_build(fchs, d_id, s_id, ox_id);
 
 	rpsc_acc->command = FC_ELS_ACC;
-	rpsc_acc->num_entries = bfa_os_htons(1);
+	rpsc_acc->num_entries = cpu_to_be16(1);
 
 	rpsc_acc->speed_info[0].port_speed_cap =
-		bfa_os_htons(oper_speed->port_speed_cap);
+		cpu_to_be16(oper_speed->port_speed_cap);
 
 	rpsc_acc->speed_info[0].port_op_speed =
-		bfa_os_htons(oper_speed->port_op_speed);
+		cpu_to_be16(oper_speed->port_op_speed);
 
 	return sizeof(struct fc_rpsc_acc_s);
 }
@@ -830,12 +830,12 @@
 {
 	struct fc_logi_s *pdisc = (struct fc_logi_s *) (fchs + 1);
 
-	bfa_os_memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
+	memcpy(pdisc, &plogi_tmpl, sizeof(struct fc_logi_s));
 
 	pdisc->els_cmd.els_code = FC_ELS_PDISC;
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 
-	pdisc->csp.rxsz = pdisc->class3.rxsz = bfa_os_htons(pdu_size);
+	pdisc->csp.rxsz = pdisc->class3.rxsz = cpu_to_be16(pdu_size);
 	pdisc->port_name = port_name;
 	pdisc->node_name = node_name;
 
@@ -859,7 +859,7 @@
 	if (!pdisc->class3.class_valid)
 		return FC_PARSE_NWWN_NOT_EQUAL;
 
-	if (bfa_os_ntohs(pdisc->class3.rxsz) < (FC_MIN_PDUSZ))
+	if (be16_to_cpu(pdisc->class3.rxsz) < (FC_MIN_PDUSZ))
 		return FC_PARSE_RXSZ_INVAL;
 
 	return FC_PARSE_OK;
@@ -873,10 +873,10 @@
 	int             page;
 
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
-	bfa_os_memset(prlo, 0, (num_pages * 16) + 4);
+	memset(prlo, 0, (num_pages * 16) + 4);
 	prlo->command = FC_ELS_PRLO;
 	prlo->page_len = 0x10;
-	prlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
+	prlo->payload_len = cpu_to_be16((num_pages * 16) + 4);
 
 	for (page = 0; page < num_pages; page++) {
 		prlo->prlo_params[page].type = FC_TYPE_FCP;
@@ -886,7 +886,7 @@
 		prlo->prlo_params[page].resp_process_assc = 0;
 	}
 
-	return bfa_os_ntohs(prlo->payload_len);
+	return be16_to_cpu(prlo->payload_len);
 }
 
 u16
@@ -901,7 +901,7 @@
 	if (prlo->command != FC_ELS_ACC)
 		return FC_PARSE_FAILURE;
 
-	num_pages = ((bfa_os_ntohs(prlo->payload_len)) - 4) / 16;
+	num_pages = ((be16_to_cpu(prlo->payload_len)) - 4) / 16;
 
 	for (page = 0; page < num_pages; page++) {
 		if (prlo->prlo_acc_params[page].type != FC_TYPE_FCP)
@@ -931,10 +931,10 @@
 	int             page;
 
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
-	bfa_os_memset(tprlo, 0, (num_pages * 16) + 4);
+	memset(tprlo, 0, (num_pages * 16) + 4);
 	tprlo->command = FC_ELS_TPRLO;
 	tprlo->page_len = 0x10;
-	tprlo->payload_len = bfa_os_htons((num_pages * 16) + 4);
+	tprlo->payload_len = cpu_to_be16((num_pages * 16) + 4);
 
 	for (page = 0; page < num_pages; page++) {
 		tprlo->tprlo_params[page].type = FC_TYPE_FCP;
@@ -950,7 +950,7 @@
 		}
 	}
 
-	return bfa_os_ntohs(tprlo->payload_len);
+	return be16_to_cpu(tprlo->payload_len);
 }
 
 u16
@@ -965,7 +965,7 @@
 	if (tprlo->command != FC_ELS_ACC)
 		return FC_PARSE_ACC_INVAL;
 
-	num_pages = (bfa_os_ntohs(tprlo->payload_len) - 4) / 16;
+	num_pages = (be16_to_cpu(tprlo->payload_len) - 4) / 16;
 
 	for (page = 0; page < num_pages; page++) {
 		if (tprlo->tprlo_acc_params[page].type != FC_TYPE_FCP)
@@ -1011,32 +1011,32 @@
 static void
 fc_gs_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
 {
-	bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s));
+	memset(cthdr, 0, sizeof(struct ct_hdr_s));
 	cthdr->rev_id = CT_GS3_REVISION;
 	cthdr->gs_type = CT_GSTYPE_DIRSERVICE;
 	cthdr->gs_sub_type = CT_GSSUBTYPE_NAMESERVER;
-	cthdr->cmd_rsp_code = bfa_os_htons(cmd_code);
+	cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
 }
 
 static void
 fc_gs_fdmi_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code)
 {
-	bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s));
+	memset(cthdr, 0, sizeof(struct ct_hdr_s));
 	cthdr->rev_id = CT_GS3_REVISION;
 	cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
 	cthdr->gs_sub_type = CT_GSSUBTYPE_HBA_MGMTSERVER;
-	cthdr->cmd_rsp_code = bfa_os_htons(cmd_code);
+	cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
 }
 
 static void
 fc_gs_ms_cthdr_build(struct ct_hdr_s *cthdr, u32 s_id, u16 cmd_code,
 					 u8 sub_type)
 {
-	bfa_os_memset(cthdr, 0, sizeof(struct ct_hdr_s));
+	memset(cthdr, 0, sizeof(struct ct_hdr_s));
 	cthdr->rev_id = CT_GS3_REVISION;
 	cthdr->gs_type = CT_GSTYPE_MGMTSERVICE;
 	cthdr->gs_sub_type = sub_type;
-	cthdr->cmd_rsp_code = bfa_os_htons(cmd_code);
+	cthdr->cmd_rsp_code = cpu_to_be16(cmd_code);
 }
 
 u16
@@ -1050,7 +1050,7 @@
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_GID_PN);
 
-	bfa_os_memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s));
+	memset(gidpn, 0, sizeof(struct fcgs_gidpn_req_s));
 	gidpn->port_name = port_name;
 	return sizeof(struct fcgs_gidpn_req_s) + sizeof(struct ct_hdr_s);
 }
@@ -1066,7 +1066,7 @@
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_GPN_ID);
 
-	bfa_os_memset(gpnid, 0, sizeof(fcgs_gpnid_req_t));
+	memset(gpnid, 0, sizeof(fcgs_gpnid_req_t));
 	gpnid->dap = port_id;
 	return sizeof(fcgs_gpnid_req_t) + sizeof(struct ct_hdr_s);
 }
@@ -1082,7 +1082,7 @@
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_GNN_ID);
 
-	bfa_os_memset(gnnid, 0, sizeof(fcgs_gnnid_req_t));
+	memset(gnnid, 0, sizeof(fcgs_gnnid_req_t));
 	gnnid->dap = port_id;
 	return sizeof(fcgs_gnnid_req_t) + sizeof(struct ct_hdr_s);
 }
@@ -1090,7 +1090,7 @@
 u16
 fc_ct_rsp_parse(struct ct_hdr_s *cthdr)
 {
-	if (bfa_os_ntohs(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) {
+	if (be16_to_cpu(cthdr->cmd_rsp_code) != CT_RSP_ACCEPT) {
 		if (cthdr->reason_code == CT_RSN_LOGICAL_BUSY)
 			return FC_PARSE_BUSY;
 		else
@@ -1108,7 +1108,7 @@
 
 	fc_els_req_build(fchs, d_id, s_id, ox_id);
 
-	bfa_os_memset(scr, 0, sizeof(struct fc_scr_s));
+	memset(scr, 0, sizeof(struct fc_scr_s));
 	scr->command = FC_ELS_SCR;
 	scr->reg_func = FC_SCR_REG_FUNC_FULL;
 	if (set_br_reg)
@@ -1129,7 +1129,7 @@
 	rscn->pagelen = sizeof(rscn->event[0]);
 
 	payldlen = sizeof(u32) + rscn->pagelen;
-	rscn->payldlen = bfa_os_htons(payldlen);
+	rscn->payldlen = cpu_to_be16(payldlen);
 
 	rscn->event[0].format = FC_RSCN_FORMAT_PORTID;
 	rscn->event[0].portid = s_id;
@@ -1149,14 +1149,14 @@
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
 
-	bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
+	memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
 
 	rftid->dap = s_id;
 
 	/* By default, FCP FC4 Type is registered */
 	index = FC_TYPE_FCP >> 5;
 	type_value = 1 << (FC_TYPE_FCP % 32);
-	rftid->fc4_type[index] = bfa_os_htonl(type_value);
+	rftid->fc4_type[index] = cpu_to_be32(type_value);
 
 	return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
 }
@@ -1172,10 +1172,10 @@
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RFT_ID);
 
-	bfa_os_memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
+	memset(rftid, 0, sizeof(struct fcgs_rftid_req_s));
 
 	rftid->dap = s_id;
-	bfa_os_memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
+	memcpy((void *)rftid->fc4_type, (void *)fc4_bitmap,
 		(bitmap_size < 32 ? bitmap_size : 32));
 
 	return sizeof(struct fcgs_rftid_req_s) + sizeof(struct ct_hdr_s);
@@ -1192,7 +1192,7 @@
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RFF_ID);
 
-	bfa_os_memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
+	memset(rffid, 0, sizeof(struct fcgs_rffid_req_s));
 
 	rffid->dap	    = s_id;
 	rffid->fc4ftr_bits  = fc4_ftrs;
@@ -1214,7 +1214,7 @@
 	fc_gs_fchdr_build(fchs, d_id, s_id, ox_id);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RSPN_ID);
 
-	bfa_os_memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
+	memset(rspnid, 0, sizeof(struct fcgs_rspnid_req_s));
 
 	rspnid->dap = s_id;
 	rspnid->spn_len = (u8) strlen((char *)name);
@@ -1235,7 +1235,7 @@
 
 	fc_gs_cthdr_build(cthdr, s_id, GS_GID_FT);
 
-	bfa_os_memset(gidft, 0, sizeof(struct fcgs_gidft_req_s));
+	memset(gidft, 0, sizeof(struct fcgs_gidft_req_s));
 	gidft->fc4_type = fc4_type;
 	gidft->domain_id = 0;
 	gidft->area_id = 0;
@@ -1254,7 +1254,7 @@
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RPN_ID);
 
-	bfa_os_memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s));
+	memset(rpnid, 0, sizeof(struct fcgs_rpnid_req_s));
 	rpnid->port_id = port_id;
 	rpnid->port_name = port_name;
 
@@ -1272,7 +1272,7 @@
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RNN_ID);
 
-	bfa_os_memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s));
+	memset(rnnid, 0, sizeof(struct fcgs_rnnid_req_s));
 	rnnid->port_id = port_id;
 	rnnid->node_name = node_name;
 
@@ -1291,7 +1291,7 @@
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RCS_ID);
 
-	bfa_os_memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s));
+	memset(rcsid, 0, sizeof(struct fcgs_rcsid_req_s));
 	rcsid->port_id = port_id;
 	rcsid->cos = cos;
 
@@ -1309,7 +1309,7 @@
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_RPT_ID);
 
-	bfa_os_memset(rptid, 0, sizeof(struct fcgs_rptid_req_s));
+	memset(rptid, 0, sizeof(struct fcgs_rptid_req_s));
 	rptid->port_id = port_id;
 	rptid->port_type = port_type;
 
@@ -1326,7 +1326,7 @@
 	fc_gs_fchdr_build(fchs, d_id, s_id, 0);
 	fc_gs_cthdr_build(cthdr, s_id, GS_GA_NXT);
 
-	bfa_os_memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s));
+	memset(ganxt, 0, sizeof(struct fcgs_ganxt_req_s));
 	ganxt->port_id = port_id;
 
 	return sizeof(struct ct_hdr_s) + sizeof(struct fcgs_ganxt_req_s);
@@ -1365,7 +1365,7 @@
 
 	index = fc4_type >> 5;
 	type_value = 1 << (fc4_type % 32);
-	ptr[index] = bfa_os_htonl(type_value);
+	ptr[index] = cpu_to_be32(type_value);
 
 }
 
@@ -1383,7 +1383,7 @@
 	fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GMAL_CMD,
 			CT_GSSUBTYPE_CFGSERVER);
 
-	bfa_os_memset(gmal, 0, sizeof(fcgs_gmal_req_t));
+	memset(gmal, 0, sizeof(fcgs_gmal_req_t));
 	gmal->wwn = wwn;
 
 	return sizeof(struct ct_hdr_s) + sizeof(fcgs_gmal_req_t);
@@ -1403,7 +1403,7 @@
 	fc_gs_ms_cthdr_build(cthdr, s_id, GS_FC_GFN_CMD,
 			CT_GSSUBTYPE_CFGSERVER);
 
-	bfa_os_memset(gfn, 0, sizeof(fcgs_gfn_req_t));
+	memset(gfn, 0, sizeof(fcgs_gfn_req_t));
 	gfn->wwn = wwn;
 
 	return sizeof(struct ct_hdr_s) + sizeof(fcgs_gfn_req_t);
diff --git a/drivers/scsi/bfa/bfa_fcpim.c b/drivers/scsi/bfa/bfa_fcpim.c
index 33c8dd5..135c442 100644
--- a/drivers/scsi/bfa/bfa_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcpim.c
@@ -26,7 +26,7 @@
 	(__l->__stats += __r->__stats)
 
 
-/**
+/*
  *  BFA ITNIM Related definitions
  */
 static void bfa_itnim_update_del_itn_stats(struct bfa_itnim_s *itnim);
@@ -72,7 +72,7 @@
 	}								\
 } while (0)
 
-/**
+/*
  *  bfa_itnim_sm BFA itnim state machine
  */
 
@@ -89,7 +89,7 @@
 	BFA_ITNIM_SM_QRESUME = 9,	/*  queue space available */
 };
 
-/**
+/*
  *  BFA IOIM related definitions
  */
 #define bfa_ioim_move_to_comp_q(__ioim) do {				\
@@ -107,11 +107,11 @@
 	if ((__fcpim)->profile_start)					\
 		(__fcpim)->profile_start(__ioim);			\
 } while (0)
-/**
+/*
  *  hal_ioim_sm
  */
 
-/**
+/*
  * IO state machine events
  */
 enum bfa_ioim_event {
@@ -136,11 +136,11 @@
 };
 
 
-/**
+/*
  *  BFA TSKIM related definitions
  */
 
-/**
+/*
  * task management completion handling
  */
 #define bfa_tskim_qcomp(__tskim, __cbfn) do {				\
@@ -165,7 +165,7 @@
 	BFA_TSKIM_SM_CLEANUP_DONE = 9,	/*  TM abort completion	*/
 };
 
-/**
+/*
  * forward declaration for BFA ITNIM functions
  */
 static void     bfa_itnim_iocdisable_cleanup(struct bfa_itnim_s *itnim);
@@ -183,7 +183,7 @@
 static void     bfa_itnim_iotov_stop(struct bfa_itnim_s *itnim);
 static void     bfa_itnim_iotov_delete(struct bfa_itnim_s *itnim);
 
-/**
+/*
  * forward declaration of ITNIM state machine
  */
 static void     bfa_itnim_sm_uninit(struct bfa_itnim_s *itnim,
@@ -217,7 +217,7 @@
 static void     bfa_itnim_sm_deleting_qfull(struct bfa_itnim_s *itnim,
 					enum bfa_itnim_event event);
 
-/**
+/*
  * forward declaration for BFA IOIM functions
  */
 static bfa_boolean_t	bfa_ioim_send_ioreq(struct bfa_ioim_s *ioim);
@@ -233,7 +233,7 @@
 static bfa_boolean_t    bfa_ioim_is_abortable(struct bfa_ioim_s *ioim);
 
 
-/**
+/*
  * forward declaration of BFA IO state machine
  */
 static void     bfa_ioim_sm_uninit(struct bfa_ioim_s *ioim,
@@ -261,7 +261,7 @@
 static void	bfa_ioim_sm_cmnd_retry(struct bfa_ioim_s *ioim,
 					enum bfa_ioim_event event);
 
-/**
+/*
  * forward declaration for BFA TSKIM functions
  */
 static void     __bfa_cb_tskim_done(void *cbarg, bfa_boolean_t complete);
@@ -276,7 +276,7 @@
 static void     bfa_tskim_iocdisable_ios(struct bfa_tskim_s *tskim);
 
 
-/**
+/*
  * forward declaration of BFA TSKIM state machine
  */
 static void     bfa_tskim_sm_uninit(struct bfa_tskim_s *tskim,
@@ -294,11 +294,11 @@
 static void     bfa_tskim_sm_hcb(struct bfa_tskim_s *tskim,
 					enum bfa_tskim_event event);
 
-/**
+/*
  *  hal_fcpim_mod BFA FCP Initiator Mode module
  */
 
-/**
+/*
  *	Compute and return memory needed by FCP(im) module.
  */
 static void
@@ -307,7 +307,7 @@
 {
 	bfa_itnim_meminfo(cfg, km_len, dm_len);
 
-	/**
+	/*
 	 * IO memory
 	 */
 	if (cfg->fwcfg.num_ioim_reqs < BFA_IOIM_MIN)
@@ -320,7 +320,7 @@
 
 	*dm_len += cfg->fwcfg.num_ioim_reqs * BFI_IOIM_SNSLEN;
 
-	/**
+	/*
 	 * task management command memory
 	 */
 	if (cfg->fwcfg.num_tskim_reqs < BFA_TSKIM_MIN)
@@ -463,7 +463,7 @@
 	struct bfa_itnim_s *itnim;
 
 	/* accumulate IO stats from itnim */
-	bfa_os_memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
+	memset(stats, 0, sizeof(struct bfa_itnim_iostats_s));
 	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
 		itnim = (struct bfa_itnim_s *) qe;
 		if (itnim->rport->rport_info.lp_tag != lp_tag)
@@ -480,7 +480,7 @@
 	struct bfa_itnim_s *itnim;
 
 	/* accumulate IO stats from itnim */
-	bfa_os_memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
+	memset(modstats, 0, sizeof(struct bfa_itnim_iostats_s));
 	list_for_each_safe(qe, qen, &fcpim->itnim_q) {
 		itnim = (struct bfa_itnim_s *) qe;
 		bfa_fcpim_add_stats(modstats, &(itnim->stats));
@@ -560,7 +560,7 @@
 		itnim = (struct bfa_itnim_s *) qe;
 		bfa_itnim_clear_stats(itnim);
 	}
-	bfa_os_memset(&fcpim->del_itn_stats, 0,
+	memset(&fcpim->del_itn_stats, 0,
 		sizeof(struct bfa_fcpim_del_itn_stats_s));
 
 	return BFA_STATUS_OK;
@@ -604,11 +604,11 @@
 
 
 
-/**
+/*
  *  BFA ITNIM module state machine functions
  */
 
-/**
+/*
  *	Beginning/unallocated state - no events expected.
  */
 static void
@@ -629,7 +629,7 @@
 	}
 }
 
-/**
+/*
  *	Beginning state, only online event expected.
  */
 static void
@@ -660,7 +660,7 @@
 	}
 }
 
-/**
+/*
  *	Waiting for itnim create response from firmware.
  */
 static void
@@ -732,7 +732,7 @@
 	}
 }
 
-/**
+/*
  *	Waiting for itnim create response from firmware, a delete is pending.
  */
 static void
@@ -760,7 +760,7 @@
 	}
 }
 
-/**
+/*
  *	Online state - normal parking state.
  */
 static void
@@ -802,7 +802,7 @@
 	}
 }
 
-/**
+/*
  *	Second level error recovery need.
  */
 static void
@@ -833,7 +833,7 @@
 	}
 }
 
-/**
+/*
  *	Going offline. Waiting for active IO cleanup.
  */
 static void
@@ -870,7 +870,7 @@
 	}
 }
 
-/**
+/*
  *	Deleting itnim. Waiting for active IO cleanup.
  */
 static void
@@ -898,7 +898,7 @@
 	}
 }
 
-/**
+/*
  * Rport offline. Fimrware itnim is being deleted - awaiting f/w response.
  */
 static void
@@ -955,7 +955,7 @@
 	}
 }
 
-/**
+/*
  *	Offline state.
  */
 static void
@@ -987,7 +987,7 @@
 	}
 }
 
-/**
+/*
  *	IOC h/w failed state.
  */
 static void
@@ -1023,7 +1023,7 @@
 	}
 }
 
-/**
+/*
  *	Itnim is deleted, waiting for firmware response to delete.
  */
 static void
@@ -1068,7 +1068,7 @@
 	}
 }
 
-/**
+/*
  *	Initiate cleanup of all IOs on an IOC failure.
  */
 static void
@@ -1088,7 +1088,7 @@
 		bfa_ioim_iocdisable(ioim);
 	}
 
-	/**
+	/*
 	 * For IO request in pending queue, we pretend an early timeout.
 	 */
 	list_for_each_safe(qe, qen, &itnim->pending_q) {
@@ -1102,7 +1102,7 @@
 	}
 }
 
-/**
+/*
  *	IO cleanup completion
  */
 static void
@@ -1114,7 +1114,7 @@
 	bfa_sm_send_event(itnim, BFA_ITNIM_SM_CLEANUP);
 }
 
-/**
+/*
  *	Initiate cleanup of all IOs.
  */
 static void
@@ -1129,7 +1129,7 @@
 	list_for_each_safe(qe, qen, &itnim->io_q) {
 		ioim = (struct bfa_ioim_s *) qe;
 
-		/**
+		/*
 		 * Move IO to a cleanup queue from active queue so that a later
 		 * TM will not pickup this IO.
 		 */
@@ -1176,7 +1176,7 @@
 		bfa_cb_itnim_sler(itnim->ditn);
 }
 
-/**
+/*
  * Call to resume any I/O requests waiting for room in request queue.
  */
 static void
@@ -1190,7 +1190,7 @@
 
 
 
-/**
+/*
  *  bfa_itnim_public
  */
 
@@ -1210,7 +1210,7 @@
 bfa_itnim_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
 		u32 *dm_len)
 {
-	/**
+	/*
 	 * ITN memory
 	 */
 	*km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_itnim_s);
@@ -1229,7 +1229,7 @@
 	fcpim->itnim_arr = itnim;
 
 	for (i = 0; i < fcpim->num_itnims; i++, itnim++) {
-		bfa_os_memset(itnim, 0, sizeof(struct bfa_itnim_s));
+		memset(itnim, 0, sizeof(struct bfa_itnim_s));
 		itnim->bfa = bfa;
 		itnim->fcpim = fcpim;
 		itnim->reqq = BFA_REQQ_QOS_LO;
@@ -1264,7 +1264,7 @@
 
 	itnim->msg_no++;
 
-	/**
+	/*
 	 * check for room in queue to send request now
 	 */
 	m = bfa_reqq_next(itnim->bfa, itnim->reqq);
@@ -1281,7 +1281,7 @@
 	m->msg_no = itnim->msg_no;
 	bfa_stats(itnim, fw_create);
 
-	/**
+	/*
 	 * queue I/O message to firmware
 	 */
 	bfa_reqq_produce(itnim->bfa, itnim->reqq);
@@ -1293,7 +1293,7 @@
 {
 	struct bfi_itnim_delete_req_s *m;
 
-	/**
+	/*
 	 * check for room in queue to send request now
 	 */
 	m = bfa_reqq_next(itnim->bfa, itnim->reqq);
@@ -1307,14 +1307,14 @@
 	m->fw_handle = itnim->rport->fw_handle;
 	bfa_stats(itnim, fw_delete);
 
-	/**
+	/*
 	 * queue I/O message to firmware
 	 */
 	bfa_reqq_produce(itnim->bfa, itnim->reqq);
 	return BFA_TRUE;
 }
 
-/**
+/*
  * Cleanup all pending failed inflight requests.
  */
 static void
@@ -1329,7 +1329,7 @@
 	}
 }
 
-/**
+/*
  * Start all pending IO requests.
  */
 static void
@@ -1339,12 +1339,12 @@
 
 	bfa_itnim_iotov_stop(itnim);
 
-	/**
+	/*
 	 * Abort all inflight IO requests in the queue
 	 */
 	bfa_itnim_delayed_comp(itnim, BFA_FALSE);
 
-	/**
+	/*
 	 * Start all pending IO requests.
 	 */
 	while (!list_empty(&itnim->pending_q)) {
@@ -1354,7 +1354,7 @@
 	}
 }
 
-/**
+/*
  * Fail all pending IO requests
  */
 static void
@@ -1362,12 +1362,12 @@
 {
 	struct bfa_ioim_s *ioim;
 
-	/**
+	/*
 	 * Fail all inflight IO requests in the queue
 	 */
 	bfa_itnim_delayed_comp(itnim, BFA_TRUE);
 
-	/**
+	/*
 	 * Fail any pending IO requests.
 	 */
 	while (!list_empty(&itnim->pending_q)) {
@@ -1377,7 +1377,7 @@
 	}
 }
 
-/**
+/*
  * IO TOV timer callback. Fail any pending IO requests.
  */
 static void
@@ -1392,7 +1392,7 @@
 	bfa_cb_itnim_tov(itnim->ditn);
 }
 
-/**
+/*
  * Start IO TOV timer for failing back pending IO requests in offline state.
  */
 static void
@@ -1407,7 +1407,7 @@
 	}
 }
 
-/**
+/*
  * Stop IO TOV timer.
  */
 static void
@@ -1419,7 +1419,7 @@
 	}
 }
 
-/**
+/*
  * Stop IO TOV timer.
  */
 static void
@@ -1459,11 +1459,11 @@
 
 
 
-/**
+/*
  *  bfa_itnim_public
  */
 
-/**
+/*
  *	Itnim interrupt processing.
  */
 void
@@ -1509,7 +1509,7 @@
 
 
 
-/**
+/*
  *  bfa_itnim_api
  */
 
@@ -1552,7 +1552,7 @@
 	bfa_sm_send_event(itnim, BFA_ITNIM_SM_OFFLINE);
 }
 
-/**
+/*
  * Return true if itnim is considered offline for holding off IO request.
  * IO is not held if itnim is being deleted.
  */
@@ -1597,17 +1597,17 @@
 bfa_itnim_clear_stats(struct bfa_itnim_s *itnim)
 {
 	int j;
-	bfa_os_memset(&itnim->stats, 0, sizeof(itnim->stats));
-	bfa_os_memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
+	memset(&itnim->stats, 0, sizeof(itnim->stats));
+	memset(&itnim->ioprofile, 0, sizeof(itnim->ioprofile));
 	for (j = 0; j < BFA_IOBUCKET_MAX; j++)
 		itnim->ioprofile.io_latency.min[j] = ~0;
 }
 
-/**
+/*
  *  BFA IO module state machine functions
  */
 
-/**
+/*
  *	IO is not started (unallocated).
  */
 static void
@@ -1657,7 +1657,7 @@
 		break;
 
 	case BFA_IOIM_SM_ABORT:
-		/**
+		/*
 		 * IO in pending queue can get abort requests. Complete abort
 		 * requests immediately.
 		 */
@@ -1672,7 +1672,7 @@
 	}
 }
 
-/**
+/*
  *	IO is waiting for SG pages.
  */
 static void
@@ -1719,7 +1719,7 @@
 	}
 }
 
-/**
+/*
  *	IO is active.
  */
 static void
@@ -1803,7 +1803,7 @@
 	}
 }
 
-/**
+/*
 *	IO is retried with new tag.
 */
 static void
@@ -1844,7 +1844,7 @@
 		break;
 
 	case BFA_IOIM_SM_ABORT:
-		/** in this state IO abort is done.
+		/* in this state IO abort is done.
 		 * Waiting for IO tag resource free.
 		 */
 		bfa_sm_set_state(ioim, bfa_ioim_sm_hcb_free);
@@ -1857,7 +1857,7 @@
 	}
 }
 
-/**
+/*
  *	IO is being aborted, waiting for completion from firmware.
  */
 static void
@@ -1919,7 +1919,7 @@
 	}
 }
 
-/**
+/*
  * IO is being cleaned up (implicit abort), waiting for completion from
  * firmware.
  */
@@ -1937,7 +1937,7 @@
 		break;
 
 	case BFA_IOIM_SM_ABORT:
-		/**
+		/*
 		 * IO is already being aborted implicitly
 		 */
 		ioim->io_cbfn = __bfa_cb_ioim_abort;
@@ -1969,7 +1969,7 @@
 		break;
 
 	case BFA_IOIM_SM_CLEANUP:
-		/**
+		/*
 		 * IO can be in cleanup state already due to TM command.
 		 * 2nd cleanup request comes from ITN offline event.
 		 */
@@ -1980,7 +1980,7 @@
 	}
 }
 
-/**
+/*
  *	IO is waiting for room in request CQ
  */
 static void
@@ -2024,7 +2024,7 @@
 	}
 }
 
-/**
+/*
  *	Active IO is being aborted, waiting for room in request CQ.
  */
 static void
@@ -2075,7 +2075,7 @@
 	}
 }
 
-/**
+/*
  *	Active IO is being cleaned up, waiting for room in request CQ.
  */
 static void
@@ -2091,7 +2091,7 @@
 		break;
 
 	case BFA_IOIM_SM_ABORT:
-		/**
+		/*
 		 * IO is alraedy being cleaned up implicitly
 		 */
 		ioim->io_cbfn = __bfa_cb_ioim_abort;
@@ -2125,7 +2125,7 @@
 	}
 }
 
-/**
+/*
  * IO bfa callback is pending.
  */
 static void
@@ -2152,7 +2152,7 @@
 	}
 }
 
-/**
+/*
  * IO bfa callback is pending. IO resource cannot be freed.
  */
 static void
@@ -2185,7 +2185,7 @@
 	}
 }
 
-/**
+/*
  * IO is completed, waiting resource free from firmware.
  */
 static void
@@ -2214,7 +2214,7 @@
 
 
 
-/**
+/*
  *  hal_ioim_private
  */
 
@@ -2247,7 +2247,7 @@
 
 	m = (struct bfi_ioim_rsp_s *) &ioim->iosp->comp_rspmsg;
 	if (m->io_status == BFI_IOIM_STS_OK) {
-		/**
+		/*
 		 * setup sense information, if present
 		 */
 		if ((m->scsi_status == SCSI_STATUS_CHECK_CONDITION) &&
@@ -2256,15 +2256,15 @@
 			snsinfo = ioim->iosp->snsinfo;
 		}
 
-		/**
+		/*
 		 * setup residue value correctly for normal completions
 		 */
 		if (m->resid_flags == FCP_RESID_UNDER) {
-			residue = bfa_os_ntohl(m->residue);
+			residue = be32_to_cpu(m->residue);
 			bfa_stats(ioim->itnim, iocomp_underrun);
 		}
 		if (m->resid_flags == FCP_RESID_OVER) {
-			residue = bfa_os_ntohl(m->residue);
+			residue = be32_to_cpu(m->residue);
 			residue = -residue;
 			bfa_stats(ioim->itnim, iocomp_overrun);
 		}
@@ -2327,7 +2327,7 @@
 	bfa_sm_send_event(ioim, BFA_IOIM_SM_SGALLOCED);
 }
 
-/**
+/*
  * Send I/O request to firmware.
  */
 static	bfa_boolean_t
@@ -2343,7 +2343,7 @@
 	struct scatterlist *sg;
 	struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;
 
-	/**
+	/*
 	 * check for room in queue to send request now
 	 */
 	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
@@ -2354,14 +2354,14 @@
 		return BFA_FALSE;
 	}
 
-	/**
+	/*
 	 * build i/o request message next
 	 */
-	m->io_tag = bfa_os_htons(ioim->iotag);
+	m->io_tag = cpu_to_be16(ioim->iotag);
 	m->rport_hdl = ioim->itnim->rport->fw_handle;
 	m->io_timeout = bfa_cb_ioim_get_timeout(ioim->dio);
 
-	/**
+	/*
 	 * build inline IO SG element here
 	 */
 	sge = &m->sges[0];
@@ -2387,18 +2387,17 @@
 	sge->flags = BFI_SGE_PGDLEN;
 	bfa_sge_to_be(sge);
 
-	/**
+	/*
 	 * set up I/O command parameters
 	 */
-	bfa_os_assign(m->cmnd, cmnd_z0);
+	m->cmnd = cmnd_z0;
 	m->cmnd.lun = bfa_cb_ioim_get_lun(ioim->dio);
 	m->cmnd.iodir = bfa_cb_ioim_get_iodir(ioim->dio);
-	bfa_os_assign(m->cmnd.cdb,
-			*(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio));
+	m->cmnd.cdb = *(scsi_cdb_t *)bfa_cb_ioim_get_cdb(ioim->dio);
 	fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
-	m->cmnd.fcp_dl = bfa_os_htonl(fcp_dl);
+	m->cmnd.fcp_dl = cpu_to_be32(fcp_dl);
 
-	/**
+	/*
 	 * set up I/O message header
 	 */
 	switch (m->cmnd.iodir) {
@@ -2427,28 +2426,28 @@
 	m->cmnd.priority = bfa_cb_ioim_get_priority(ioim->dio);
 	m->cmnd.taskattr = bfa_cb_ioim_get_taskattr(ioim->dio);
 
-	/**
+	/*
 	 * Handle large CDB (>16 bytes).
 	 */
 	m->cmnd.addl_cdb_len = (bfa_cb_ioim_get_cdblen(ioim->dio) -
 					FCP_CMND_CDB_LEN) / sizeof(u32);
 	if (m->cmnd.addl_cdb_len) {
-		bfa_os_memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *)
+		memcpy(&m->cmnd.cdb + 1, (scsi_cdb_t *)
 				bfa_cb_ioim_get_cdb(ioim->dio) + 1,
 				m->cmnd.addl_cdb_len * sizeof(u32));
 		fcp_cmnd_fcpdl(&m->cmnd) =
-				bfa_os_htonl(bfa_cb_ioim_get_size(ioim->dio));
+				cpu_to_be32(bfa_cb_ioim_get_size(ioim->dio));
 	}
 #endif
 
-	/**
+	/*
 	 * queue I/O message to firmware
 	 */
 	bfa_reqq_produce(ioim->bfa, ioim->reqq);
 	return BFA_TRUE;
 }
 
-/**
+/*
  * Setup any additional SG pages needed.Inline SG element is setup
  * at queuing time.
  */
@@ -2459,7 +2458,7 @@
 
 	bfa_assert(ioim->nsges > BFI_SGE_INLINE);
 
-	/**
+	/*
 	 * allocate SG pages needed
 	 */
 	nsgpgs = BFA_SGPG_NPAGE(ioim->nsges);
@@ -2508,7 +2507,7 @@
 			sge->sg_len = sg_dma_len(sg);
 			pgcumsz += sge->sg_len;
 
-			/**
+			/*
 			 * set flags
 			 */
 			if (i < (nsges - 1))
@@ -2523,7 +2522,7 @@
 
 		sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);
 
-		/**
+		/*
 		 * set the link element of each page
 		 */
 		if (sgeid == ioim->nsges) {
@@ -2540,7 +2539,7 @@
 	} while (sgeid < ioim->nsges);
 }
 
-/**
+/*
  * Send I/O abort request to firmware.
  */
 static	bfa_boolean_t
@@ -2549,14 +2548,14 @@
 	struct bfi_ioim_abort_req_s *m;
 	enum bfi_ioim_h2i	msgop;
 
-	/**
+	/*
 	 * check for room in queue to send request now
 	 */
 	m = bfa_reqq_next(ioim->bfa, ioim->reqq);
 	if (!m)
 		return BFA_FALSE;
 
-	/**
+	/*
 	 * build i/o request message next
 	 */
 	if (ioim->iosp->abort_explicit)
@@ -2565,17 +2564,17 @@
 		msgop = BFI_IOIM_H2I_IOCLEANUP_REQ;
 
 	bfi_h2i_set(m->mh, BFI_MC_IOIM, msgop, bfa_lpuid(ioim->bfa));
-	m->io_tag    = bfa_os_htons(ioim->iotag);
+	m->io_tag    = cpu_to_be16(ioim->iotag);
 	m->abort_tag = ++ioim->abort_tag;
 
-	/**
+	/*
 	 * queue I/O message to firmware
 	 */
 	bfa_reqq_produce(ioim->bfa, ioim->reqq);
 	return BFA_TRUE;
 }
 
-/**
+/*
  * Call to resume any I/O requests waiting for room in request queue.
  */
 static void
@@ -2591,7 +2590,7 @@
 static void
 bfa_ioim_notify_cleanup(struct bfa_ioim_s *ioim)
 {
-	/**
+	/*
 	 * Move IO from itnim queue to fcpim global queue since itnim will be
 	 * freed.
 	 */
@@ -2624,13 +2623,13 @@
 	return BFA_TRUE;
 }
 
-/**
+/*
  *	or after the link comes back.
  */
 void
 bfa_ioim_delayed_comp(struct bfa_ioim_s *ioim, bfa_boolean_t iotov)
 {
-	/**
+	/*
 	 * If path tov timer expired, failback with PATHTOV status - these
 	 * IO requests are not normally retried by IO stack.
 	 *
@@ -2645,7 +2644,7 @@
 	}
 	bfa_cb_queue(ioim->bfa, &ioim->hcb_qe, ioim->io_cbfn, ioim);
 
-	/**
+	/*
 	 * Move IO to fcpim global queue since itnim will be
 	 * freed.
 	 */
@@ -2655,11 +2654,11 @@
 
 
 
-/**
+/*
  *  hal_ioim_friend
  */
 
-/**
+/*
  * Memory allocation and initialization.
  */
 void
@@ -2671,7 +2670,7 @@
 	u8			*snsinfo;
 	u32		snsbufsz;
 
-	/**
+	/*
 	 * claim memory first
 	 */
 	ioim = (struct bfa_ioim_s *) bfa_meminfo_kva(minfo);
@@ -2682,7 +2681,7 @@
 	fcpim->ioim_sp_arr = iosp;
 	bfa_meminfo_kva(minfo) = (u8 *) (iosp + fcpim->num_ioim_reqs);
 
-	/**
+	/*
 	 * Claim DMA memory for per IO sense data.
 	 */
 	snsbufsz = fcpim->num_ioim_reqs * BFI_IOIM_SNSLEN;
@@ -2694,7 +2693,7 @@
 	snsinfo = fcpim->snsbase.kva;
 	bfa_iocfc_set_snsbase(fcpim->bfa, fcpim->snsbase.pa);
 
-	/**
+	/*
 	 * Initialize ioim free queues
 	 */
 	INIT_LIST_HEAD(&fcpim->ioim_free_q);
@@ -2706,7 +2705,7 @@
 		/*
 		 * initialize IOIM
 		 */
-		bfa_os_memset(ioim, 0, sizeof(struct bfa_ioim_s));
+		memset(ioim, 0, sizeof(struct bfa_ioim_s));
 		ioim->iotag   = i;
 		ioim->bfa     = fcpim->bfa;
 		ioim->fcpim   = fcpim;
@@ -2723,7 +2722,7 @@
 	}
 }
 
-/**
+/*
  * Driver detach time call.
  */
 void
@@ -2740,7 +2739,7 @@
 	u16	iotag;
 	enum bfa_ioim_event evt = BFA_IOIM_SM_COMP;
 
-	iotag = bfa_os_ntohs(rsp->io_tag);
+	iotag = be16_to_cpu(rsp->io_tag);
 
 	ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
 	bfa_assert(ioim->iotag == iotag);
@@ -2750,7 +2749,7 @@
 	bfa_trc(ioim->bfa, rsp->reuse_io_tag);
 
 	if (bfa_sm_cmp_state(ioim, bfa_ioim_sm_active))
-		bfa_os_assign(ioim->iosp->comp_rspmsg, *m);
+		ioim->iosp->comp_rspmsg = *m;
 
 	switch (rsp->io_status) {
 	case BFI_IOIM_STS_OK:
@@ -2823,7 +2822,7 @@
 	struct bfa_ioim_s *ioim;
 	u16	iotag;
 
-	iotag = bfa_os_ntohs(rsp->io_tag);
+	iotag = be16_to_cpu(rsp->io_tag);
 
 	ioim = BFA_IOIM_FROM_TAG(fcpim, iotag);
 	bfa_assert(ioim->iotag == iotag);
@@ -2837,7 +2836,7 @@
 void
 bfa_ioim_profile_start(struct bfa_ioim_s *ioim)
 {
-	ioim->start_time = bfa_os_get_clock();
+	ioim->start_time = jiffies;
 }
 
 void
@@ -2845,7 +2844,7 @@
 {
 	u32 fcp_dl = bfa_cb_ioim_get_size(ioim->dio);
 	u32 index = bfa_ioim_get_index(fcp_dl);
-	u64 end_time = bfa_os_get_clock();
+	u64 end_time = jiffies;
 	struct bfa_itnim_latency_s *io_lat =
 			&(ioim->itnim->ioprofile.io_latency);
 	u32 val = (u32)(end_time - ioim->start_time);
@@ -2859,7 +2858,7 @@
 		io_lat->max[index] : val;
 	io_lat->avg[index] += val;
 }
-/**
+/*
  * Called by itnim to clean up IO while going offline.
  */
 void
@@ -2882,7 +2881,7 @@
 	bfa_sm_send_event(ioim, BFA_IOIM_SM_CLEANUP);
 }
 
-/**
+/*
  * IOC failure handling.
  */
 void
@@ -2893,7 +2892,7 @@
 	bfa_sm_send_event(ioim, BFA_IOIM_SM_HWFAIL);
 }
 
-/**
+/*
  * IO offline TOV popped. Fail the pending IO.
  */
 void
@@ -2905,11 +2904,11 @@
 
 
 
-/**
+/*
  *  hal_ioim_api
  */
 
-/**
+/*
  * Allocate IOIM resource for initiator mode I/O request.
  */
 struct bfa_ioim_s *
@@ -2919,7 +2918,7 @@
 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 	struct bfa_ioim_s *ioim;
 
-	/**
+	/*
 	 * alocate IOIM resource
 	 */
 	bfa_q_deq(&fcpim->ioim_free_q, &ioim);
@@ -2970,7 +2969,7 @@
 
 	bfa_ioim_cb_profile_start(ioim->fcpim, ioim);
 
-	/**
+	/*
 	 * Obtain the queue over which this request has to be issued
 	 */
 	ioim->reqq = bfa_fcpim_ioredirect_enabled(ioim->bfa) ?
@@ -2980,7 +2979,7 @@
 	bfa_sm_send_event(ioim, BFA_IOIM_SM_START);
 }
 
-/**
+/*
  * Driver I/O abort request.
  */
 bfa_status_t
@@ -2999,11 +2998,11 @@
 }
 
 
-/**
+/*
  *  BFA TSKIM state machine functions
  */
 
-/**
+/*
  *	Task management command beginning state.
  */
 static void
@@ -3016,7 +3015,7 @@
 		bfa_sm_set_state(tskim, bfa_tskim_sm_active);
 		bfa_tskim_gather_ios(tskim);
 
-		/**
+		/*
 		 * If device is offline, do not send TM on wire. Just cleanup
 		 * any pending IO requests and complete TM request.
 		 */
@@ -3040,7 +3039,7 @@
 	}
 }
 
-/**
+/*
  * brief
  *	TM command is active, awaiting completion from firmware to
  *	cleanup IO requests in TM scope.
@@ -3077,7 +3076,7 @@
 	}
 }
 
-/**
+/*
  *	An active TM is being cleaned up since ITN is offline. Awaiting cleanup
  *	completion event from firmware.
  */
@@ -3088,7 +3087,7 @@
 
 	switch (event) {
 	case BFA_TSKIM_SM_DONE:
-		/**
+		/*
 		 * Ignore and wait for ABORT completion from firmware.
 		 */
 		break;
@@ -3121,7 +3120,7 @@
 		break;
 
 	case BFA_TSKIM_SM_CLEANUP:
-		/**
+		/*
 		 * Ignore, TM command completed on wire.
 		 * Notify TM conmpletion on IO cleanup completion.
 		 */
@@ -3138,7 +3137,7 @@
 	}
 }
 
-/**
+/*
  *	Task management command is waiting for room in request CQ
  */
 static void
@@ -3153,7 +3152,7 @@
 		break;
 
 	case BFA_TSKIM_SM_CLEANUP:
-		/**
+		/*
 		 * No need to send TM on wire since ITN is offline.
 		 */
 		bfa_sm_set_state(tskim, bfa_tskim_sm_iocleanup);
@@ -3173,7 +3172,7 @@
 	}
 }
 
-/**
+/*
  *	Task management command is active, awaiting for room in request CQ
  *	to send clean up request.
  */
@@ -3186,7 +3185,7 @@
 	switch (event) {
 	case BFA_TSKIM_SM_DONE:
 		bfa_reqq_wcancel(&tskim->reqq_wait);
-		/**
+		/*
 		 *
 		 * Fall through !!!
 		 */
@@ -3208,7 +3207,7 @@
 	}
 }
 
-/**
+/*
  *	BFA callback is pending
  */
 static void
@@ -3236,7 +3235,7 @@
 
 
 
-/**
+/*
  *  hal_tskim_private
  */
 
@@ -3289,7 +3288,7 @@
 	return BFA_FALSE;
 }
 
-/**
+/*
  *	Gather affected IO requests and task management commands.
  */
 static void
@@ -3301,7 +3300,7 @@
 
 	INIT_LIST_HEAD(&tskim->io_q);
 
-	/**
+	/*
 	 * Gather any active IO requests first.
 	 */
 	list_for_each_safe(qe, qen, &itnim->io_q) {
@@ -3313,7 +3312,7 @@
 		}
 	}
 
-	/**
+	/*
 	 * Failback any pending IO requests immediately.
 	 */
 	list_for_each_safe(qe, qen, &itnim->pending_q) {
@@ -3327,7 +3326,7 @@
 	}
 }
 
-/**
+/*
  *	IO cleanup completion
  */
 static void
@@ -3339,7 +3338,7 @@
 	bfa_sm_send_event(tskim, BFA_TSKIM_SM_IOS_DONE);
 }
 
-/**
+/*
  *	Gather affected IO requests and task management commands.
  */
 static void
@@ -3359,7 +3358,7 @@
 	bfa_wc_wait(&tskim->wc);
 }
 
-/**
+/*
  *	Send task management request to firmware.
  */
 static bfa_boolean_t
@@ -3368,33 +3367,33 @@
 	struct bfa_itnim_s *itnim = tskim->itnim;
 	struct bfi_tskim_req_s *m;
 
-	/**
+	/*
 	 * check for room in queue to send request now
 	 */
 	m = bfa_reqq_next(tskim->bfa, itnim->reqq);
 	if (!m)
 		return BFA_FALSE;
 
-	/**
+	/*
 	 * build i/o request message next
 	 */
 	bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_TM_REQ,
 			bfa_lpuid(tskim->bfa));
 
-	m->tsk_tag = bfa_os_htons(tskim->tsk_tag);
+	m->tsk_tag = cpu_to_be16(tskim->tsk_tag);
 	m->itn_fhdl = tskim->itnim->rport->fw_handle;
 	m->t_secs = tskim->tsecs;
 	m->lun = tskim->lun;
 	m->tm_flags = tskim->tm_cmnd;
 
-	/**
+	/*
 	 * queue I/O message to firmware
 	 */
 	bfa_reqq_produce(tskim->bfa, itnim->reqq);
 	return BFA_TRUE;
 }
 
-/**
+/*
  *	Send abort request to cleanup an active TM to firmware.
  */
 static bfa_boolean_t
@@ -3403,29 +3402,29 @@
 	struct bfa_itnim_s	*itnim = tskim->itnim;
 	struct bfi_tskim_abortreq_s	*m;
 
-	/**
+	/*
 	 * check for room in queue to send request now
 	 */
 	m = bfa_reqq_next(tskim->bfa, itnim->reqq);
 	if (!m)
 		return BFA_FALSE;
 
-	/**
+	/*
 	 * build i/o request message next
 	 */
 	bfi_h2i_set(m->mh, BFI_MC_TSKIM, BFI_TSKIM_H2I_ABORT_REQ,
 			bfa_lpuid(tskim->bfa));
 
-	m->tsk_tag  = bfa_os_htons(tskim->tsk_tag);
+	m->tsk_tag  = cpu_to_be16(tskim->tsk_tag);
 
-	/**
+	/*
 	 * queue I/O message to firmware
 	 */
 	bfa_reqq_produce(tskim->bfa, itnim->reqq);
 	return BFA_TRUE;
 }
 
-/**
+/*
  *	Call to resume task management cmnd waiting for room in request queue.
  */
 static void
@@ -3437,7 +3436,7 @@
 	bfa_sm_send_event(tskim, BFA_TSKIM_SM_QRESUME);
 }
 
-/**
+/*
  * Cleanup IOs associated with a task mangement command on IOC failures.
  */
 static void
@@ -3454,11 +3453,11 @@
 
 
 
-/**
+/*
  *  hal_tskim_friend
  */
 
-/**
+/*
  * Notification on completions from related ioim.
  */
 void
@@ -3467,7 +3466,7 @@
 	bfa_wc_down(&tskim->wc);
 }
 
-/**
+/*
  * Handle IOC h/w failure notification from itnim.
  */
 void
@@ -3478,7 +3477,7 @@
 	bfa_sm_send_event(tskim, BFA_TSKIM_SM_HWFAIL);
 }
 
-/**
+/*
  * Cleanup TM command and associated IOs as part of ITNIM offline.
  */
 void
@@ -3489,7 +3488,7 @@
 	bfa_sm_send_event(tskim, BFA_TSKIM_SM_CLEANUP);
 }
 
-/**
+/*
  *	Memory allocation and initialization.
  */
 void
@@ -3507,7 +3506,7 @@
 		/*
 		 * initialize TSKIM
 		 */
-		bfa_os_memset(tskim, 0, sizeof(struct bfa_tskim_s));
+		memset(tskim, 0, sizeof(struct bfa_tskim_s));
 		tskim->tsk_tag = i;
 		tskim->bfa	= fcpim->bfa;
 		tskim->fcpim	= fcpim;
@@ -3525,7 +3524,7 @@
 void
 bfa_tskim_detach(struct bfa_fcpim_mod_s *fcpim)
 {
-	/**
+	/*
 	* @todo
 	*/
 }
@@ -3536,14 +3535,14 @@
 	struct bfa_fcpim_mod_s *fcpim = BFA_FCPIM_MOD(bfa);
 	struct bfi_tskim_rsp_s *rsp = (struct bfi_tskim_rsp_s *) m;
 	struct bfa_tskim_s *tskim;
-	u16	tsk_tag = bfa_os_ntohs(rsp->tsk_tag);
+	u16	tsk_tag = be16_to_cpu(rsp->tsk_tag);
 
 	tskim = BFA_TSKIM_FROM_TAG(fcpim, tsk_tag);
 	bfa_assert(tskim->tsk_tag == tsk_tag);
 
 	tskim->tsk_status = rsp->tsk_status;
 
-	/**
+	/*
 	 * Firmware sends BFI_TSKIM_STS_ABORTED status for abort
 	 * requests. All other statuses are for normal completions.
 	 */
@@ -3558,7 +3557,7 @@
 
 
 
-/**
+/*
  *  hal_tskim_api
  */
 
@@ -3585,7 +3584,7 @@
 	list_add_tail(&tskim->qe, &tskim->fcpim->tskim_free_q);
 }
 
-/**
+/*
  *	Start a task management command.
  *
  * @param[in]	tskim	BFA task management command instance
diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
index 3bf3431..db53717 100644
--- a/drivers/scsi/bfa/bfa_fcpim.h
+++ b/drivers/scsi/bfa/bfa_fcpim.h
@@ -104,7 +104,7 @@
 	bfa_fcpim_profile_t     profile_start;
 };
 
-/**
+/*
  * BFA IO (initiator mode)
  */
 struct bfa_ioim_s {
@@ -137,7 +137,7 @@
 	struct bfa_tskim_s	*tskim;		/*  Relevant TM cmd	*/
 };
 
-/**
+/*
  * BFA Task management command (initiator mode)
  */
 struct bfa_tskim_s {
@@ -160,7 +160,7 @@
 };
 
 
-/**
+/*
  * BFA i-t-n (initiator mode)
  */
 struct bfa_itnim_s {
@@ -303,7 +303,7 @@
 		struct bfa_itnim_ioprofile_s *ioprofile);
 #define bfa_itnim_get_reqq(__ioim) (((struct bfa_ioim_s *)__ioim)->itnim->reqq)
 
-/**
+/*
  *	BFA completion callback for bfa_itnim_online().
  *
  * @param[in]		itnim		FCS or driver itnim instance
@@ -312,7 +312,7 @@
  */
 void	bfa_cb_itnim_online(void *itnim);
 
-/**
+/*
  *	BFA completion callback for bfa_itnim_offline().
  *
  * @param[in]		itnim		FCS or driver itnim instance
@@ -323,7 +323,7 @@
 void	bfa_cb_itnim_tov_begin(void *itnim);
 void	bfa_cb_itnim_tov(void *itnim);
 
-/**
+/*
  *	BFA notification to FCS/driver for second level error recovery.
  *
  * Atleast one I/O request has timedout and target is unresponsive to
@@ -351,7 +351,7 @@
 				      bfa_boolean_t iotov);
 
 
-/**
+/*
  *	I/O completion notification.
  *
  * @param[in]		dio			driver IO structure
@@ -368,7 +368,7 @@
 				  u8 scsi_status, int sns_len,
 				  u8 *sns_info, s32 residue);
 
-/**
+/*
  *	I/O good completion notification.
  *
  * @param[in]		dio			driver IO structure
@@ -377,7 +377,7 @@
  */
 void	bfa_cb_ioim_good_comp(void *bfad, struct bfad_ioim_s *dio);
 
-/**
+/*
  *	I/O abort completion notification
  *
  * @param[in]		dio			driver IO that was aborted
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 9cebbe3..c94502d 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -15,7 +15,7 @@
  * General Public License for more details.
  */
 
-/**
+/*
  *  bfa_fcs.c BFA FCS main
  */
 
@@ -25,7 +25,7 @@
 
 BFA_TRC_FILE(FCS, FCS);
 
-/**
+/*
  * FCS sub-modules
  */
 struct bfa_fcs_mod_s {
@@ -43,7 +43,7 @@
 	  bfa_fcs_fabric_modexit },
 };
 
-/**
+/*
  *  fcs_api BFA FCS API
  */
 
@@ -58,11 +58,11 @@
 
 
 
-/**
+/*
  *  fcs_api BFA FCS API
  */
 
-/**
+/*
  * fcs attach -- called once to initialize data structures at driver attach time
  */
 void
@@ -86,7 +86,7 @@
 	}
 }
 
-/**
+/*
  * fcs initialization, called once after bfa initialization is complete
  */
 void
@@ -110,7 +110,7 @@
 	}
 }
 
-/**
+/*
  * Start FCS operations.
  */
 void
@@ -119,7 +119,7 @@
 	bfa_fcs_fabric_modstart(fcs);
 }
 
-/**
+/*
  *	brief
  *		FCS driver details initialization.
  *
@@ -138,7 +138,7 @@
 	bfa_fcs_fabric_psymb_init(&fcs->fabric);
 }
 
-/**
+/*
  *	brief
  *		FCS FDMI Driver Parameter Initialization
  *
@@ -154,7 +154,7 @@
 	fcs->fdmi_enabled = fdmi_enable;
 
 }
-/**
+/*
  *	brief
  *		FCS instance cleanup and exit.
  *
@@ -196,7 +196,7 @@
 	bfa_wc_down(&fcs->wc);
 }
 
-/**
+/*
  * Fabric module implementation.
  */
 
@@ -232,11 +232,11 @@
 					 u32 rsp_len,
 					 u32 resid_len,
 					 struct fchs_s *rspfchs);
-/**
+/*
  *  fcs_fabric_sm fabric state machine functions
  */
 
-/**
+/*
  * Fabric state machine events
  */
 enum bfa_fcs_fabric_event {
@@ -286,7 +286,7 @@
 					   enum bfa_fcs_fabric_event event);
 static void	bfa_fcs_fabric_sm_deleting(struct bfa_fcs_fabric_s *fabric,
 					   enum bfa_fcs_fabric_event event);
-/**
+/*
  *   Beginning state before fabric creation.
  */
 static void
@@ -312,7 +312,7 @@
 	}
 }
 
-/**
+/*
  *   Beginning state before fabric creation.
  */
 static void
@@ -345,7 +345,7 @@
 	}
 }
 
-/**
+/*
  *   Link is down, awaiting LINK UP event from port. This is also the
  *   first state at fabric creation.
  */
@@ -375,7 +375,7 @@
 	}
 }
 
-/**
+/*
  *   FLOGI is in progress, awaiting FLOGI reply.
  */
 static void
@@ -468,7 +468,7 @@
 	}
 }
 
-/**
+/*
  *   Authentication is in progress, awaiting authentication results.
  */
 static void
@@ -508,7 +508,7 @@
 	}
 }
 
-/**
+/*
  *   Authentication failed
  */
 static void
@@ -534,7 +534,7 @@
 	}
 }
 
-/**
+/*
  *   Port is in loopback mode.
  */
 static void
@@ -560,7 +560,7 @@
 	}
 }
 
-/**
+/*
  *   There is no attached fabric - private loop or NPort-to-NPort topology.
  */
 static void
@@ -593,7 +593,7 @@
 	}
 }
 
-/**
+/*
  *   Fabric is online - normal operating state.
  */
 static void
@@ -628,7 +628,7 @@
 	}
 }
 
-/**
+/*
  *   Exchanging virtual fabric parameters.
  */
 static void
@@ -652,7 +652,7 @@
 	}
 }
 
-/**
+/*
  *   EVFP exchange complete and VFT tagging is enabled.
  */
 static void
@@ -663,7 +663,7 @@
 	bfa_trc(fabric->fcs, event);
 }
 
-/**
+/*
  *   Port is isolated after EVFP exchange due to VF_ID mismatch (N and F).
  */
 static void
@@ -684,7 +684,7 @@
 		fabric->event_arg.swp_vfid);
 }
 
-/**
+/*
  *   Fabric is being deleted, awaiting vport delete completions.
  */
 static void
@@ -714,7 +714,7 @@
 
 
 
-/**
+/*
  *  fcs_fabric_private fabric private functions
  */
 
@@ -728,7 +728,7 @@
 	port_cfg->pwwn = bfa_ioc_get_pwwn(&fabric->fcs->bfa->ioc);
 }
 
-/**
+/*
  * Port Symbolic Name Creation for base port.
  */
 void
@@ -789,7 +789,7 @@
 	port_cfg->sym_name.symname[BFA_SYMNAME_MAXLEN - 1] = 0;
 }
 
-/**
+/*
  * bfa lps login completion callback
  */
 void
@@ -867,7 +867,7 @@
 	bfa_trc(fabric->fcs, fabric->is_npiv);
 	bfa_trc(fabric->fcs, fabric->is_auth);
 }
-/**
+/*
  *		Allocate and send FLOGI.
  */
 static void
@@ -897,7 +897,7 @@
 	bfa_fcs_fabric_set_opertype(fabric);
 	fabric->stats.fabric_onlines++;
 
-	/**
+	/*
 	 * notify online event to base and then virtual ports
 	 */
 	bfa_fcs_lport_online(&fabric->bport);
@@ -917,7 +917,7 @@
 	bfa_trc(fabric->fcs, fabric->fabric_name);
 	fabric->stats.fabric_offlines++;
 
-	/**
+	/*
 	 * notify offline event first to vports and then base port.
 	 */
 	list_for_each_safe(qe, qen, &fabric->vport_q) {
@@ -939,7 +939,7 @@
 	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELAYED);
 }
 
-/**
+/*
  * Delete all vports and wait for vport delete completions.
  */
 static void
@@ -965,11 +965,11 @@
 	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELCOMP);
 }
 
-/**
+/*
  *  fcs_fabric_public fabric public functions
  */
 
-/**
+/*
  * Attach time initialization.
  */
 void
@@ -978,9 +978,9 @@
 	struct bfa_fcs_fabric_s *fabric;
 
 	fabric = &fcs->fabric;
-	bfa_os_memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
+	memset(fabric, 0, sizeof(struct bfa_fcs_fabric_s));
 
-	/**
+	/*
 	 * Initialize base fabric.
 	 */
 	fabric->fcs = fcs;
@@ -989,7 +989,7 @@
 	fabric->lps = bfa_lps_alloc(fcs->bfa);
 	bfa_assert(fabric->lps);
 
-	/**
+	/*
 	 * Initialize fabric delete completion handler. Fabric deletion is
 	 * complete when the last vport delete is complete.
 	 */
@@ -1007,7 +1007,7 @@
 	bfa_trc(fcs, 0);
 }
 
-/**
+/*
  *   Module cleanup
  */
 void
@@ -1017,7 +1017,7 @@
 
 	bfa_trc(fcs, 0);
 
-	/**
+	/*
 	 * Cleanup base fabric.
 	 */
 	fabric = &fcs->fabric;
@@ -1025,7 +1025,7 @@
 	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_DELETE);
 }
 
-/**
+/*
  * Fabric module start -- kick starts FCS actions
  */
 void
@@ -1038,7 +1038,7 @@
 	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_START);
 }
 
-/**
+/*
  *   Suspend fabric activity as part of driver suspend.
  */
 void
@@ -1064,7 +1064,7 @@
 	return fabric->oper_type;
 }
 
-/**
+/*
  *   Link up notification from BFA physical port module.
  */
 void
@@ -1074,7 +1074,7 @@
 	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_UP);
 }
 
-/**
+/*
  *   Link down notification from BFA physical port module.
  */
 void
@@ -1084,7 +1084,7 @@
 	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_LINK_DOWN);
 }
 
-/**
+/*
  *   A child vport is being created in the fabric.
  *
  *   Call from vport module at vport creation. A list of base port and vports
@@ -1099,7 +1099,7 @@
 bfa_fcs_fabric_addvport(struct bfa_fcs_fabric_s *fabric,
 			struct bfa_fcs_vport_s *vport)
 {
-	/**
+	/*
 	 * - add vport to fabric's vport_q
 	 */
 	bfa_trc(fabric->fcs, fabric->vf_id);
@@ -1109,7 +1109,7 @@
 	bfa_wc_up(&fabric->wc);
 }
 
-/**
+/*
  *   A child vport is being deleted from fabric.
  *
  *   Vport is being deleted.
@@ -1123,7 +1123,7 @@
 	bfa_wc_down(&fabric->wc);
 }
 
-/**
+/*
  *   Base port is deleted.
  */
 void
@@ -1133,7 +1133,7 @@
 }
 
 
-/**
+/*
  *    Check if fabric is online.
  *
  *   param[in] fabric - Fabric instance. This can be a base fabric or vf.
@@ -1146,7 +1146,7 @@
 	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_online);
 }
 
-/**
+/*
  *	brief
  *
  */
@@ -1158,7 +1158,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Lookup for a vport withing a fabric given its pwwn
  */
 struct bfa_fcs_vport_s *
@@ -1176,7 +1176,7 @@
 	return NULL;
 }
 
-/**
+/*
  *    In a given fabric, return the number of lports.
  *
  *   param[in] fabric - Fabric instance. This can be a base fabric or vf.
@@ -1214,7 +1214,7 @@
 
 	return oui;
 }
-/**
+/*
  *		Unsolicited frame receive handling.
  */
 void
@@ -1230,7 +1230,7 @@
 	bfa_trc(fabric->fcs, len);
 	bfa_trc(fabric->fcs, pid);
 
-	/**
+	/*
 	 * Look for our own FLOGI frames being looped back. This means an
 	 * external loopback cable is in place. Our own FLOGI frames are
 	 * sometimes looped back when switch port gets temporarily bypassed.
@@ -1242,7 +1242,7 @@
 		return;
 	}
 
-	/**
+	/*
 	 * FLOGI/EVFP exchanges should be consumed by base fabric.
 	 */
 	if (fchs->d_id == bfa_os_hton3b(FC_FABRIC_PORT)) {
@@ -1252,7 +1252,7 @@
 	}
 
 	if (fabric->bport.pid == pid) {
-		/**
+		/*
 		 * All authentication frames should be routed to auth
 		 */
 		bfa_trc(fabric->fcs, els_cmd->els_code);
@@ -1266,7 +1266,7 @@
 		return;
 	}
 
-	/**
+	/*
 	 * look for a matching local port ID
 	 */
 	list_for_each(qe, &fabric->vport_q) {
@@ -1280,7 +1280,7 @@
 	bfa_fcs_lport_uf_recv(&fabric->bport, fchs, len);
 }
 
-/**
+/*
  *		Unsolicited frames to be processed by fabric.
  */
 static void
@@ -1304,7 +1304,7 @@
 	}
 }
 
-/**
+/*
  *	Process	incoming FLOGI
  */
 static void
@@ -1329,7 +1329,7 @@
 		return;
 	}
 
-	fabric->bb_credit = bfa_os_ntohs(flogi->csp.bbcred);
+	fabric->bb_credit = be16_to_cpu(flogi->csp.bbcred);
 	bport->port_topo.pn2n.rem_port_wwn = flogi->port_name;
 	bport->port_topo.pn2n.reply_oxid = fchs->ox_id;
 
@@ -1351,7 +1351,7 @@
 	struct fchs_s	fchs;
 
 	fcxp = bfa_fcs_fcxp_alloc(fabric->fcs);
-	/**
+	/*
 	 * Do not expect this failure -- expect remote node to retry
 	 */
 	if (!fcxp)
@@ -1370,7 +1370,7 @@
 		      FC_MAX_PDUSZ, 0);
 }
 
-/**
+/*
  *   Flogi Acc completion callback.
  */
 static void
@@ -1417,130 +1417,7 @@
 	}
 }
 
-/**
- *  fcs_vf_api virtual fabrics API
- */
-
-/**
- * Enable VF mode.
- *
- * @param[in]		fcs		fcs module instance
- * @param[in]		vf_id		default vf_id of port, FC_VF_ID_NULL
- *					to use standard default vf_id of 1.
- *
- * @retval	BFA_STATUS_OK		vf mode is enabled
- * @retval	BFA_STATUS_BUSY		Port is active. Port must be disabled
- *					before VF mode can be enabled.
- */
-bfa_status_t
-bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id)
-{
-	return BFA_STATUS_OK;
-}
-
-/**
- * Disable VF mode.
- *
- * @param[in]		fcs		fcs module instance
- *
- * @retval	BFA_STATUS_OK		vf mode is disabled
- * @retval	BFA_STATUS_BUSY		VFs are present and being used. All
- *					VFs must be deleted before disabling
- *					VF mode.
- */
-bfa_status_t
-bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs)
-{
-	return BFA_STATUS_OK;
-}
-
-/**
- *  Create a new VF instance.
- *
- *  A new VF is created using the given VF configuration. A VF is identified
- *  by VF id. No duplicate VF creation is allowed with the same VF id. Once
- *  a VF is created, VF is automatically started after link initialization
- *  and EVFP exchange is completed.
- *
- *	param[in] vf	 -	FCS vf data structure. Memory is
- *				allocated by caller (driver)
- *	param[in] fcs	 -	FCS module
- *	param[in] vf_cfg -	VF configuration
- *	param[in] vf_drv -	Opaque handle back to the driver's
- *				virtual vf structure
- *
- *	retval BFA_STATUS_OK VF creation is successful
- *	retval BFA_STATUS_FAILED VF creation failed
- *	retval BFA_STATUS_EEXIST A VF exists with the given vf_id
- */
-bfa_status_t
-bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs, u16 vf_id,
-		  struct bfa_lport_cfg_s *port_cfg, struct bfad_vf_s *vf_drv)
-{
-	bfa_trc(fcs, vf_id);
-	return BFA_STATUS_OK;
-}
-
-/**
- *	Use this function to delete a BFA VF object. VF object should
- *	be stopped before this function call.
- *
- *	param[in] vf - pointer to bfa_vf_t.
- *
- *	retval BFA_STATUS_OK	On vf deletion success
- *	retval BFA_STATUS_BUSY VF is not in a stopped state
- *	retval BFA_STATUS_INPROGRESS VF deletion in in progress
- */
-bfa_status_t
-bfa_fcs_vf_delete(bfa_fcs_vf_t *vf)
-{
-	bfa_trc(vf->fcs, vf->vf_id);
-	return BFA_STATUS_OK;
-}
-
-
-/**
- *	Returns attributes of the given VF.
- *
- *	param[in]	vf	pointer to bfa_vf_t.
- *	param[out] vf_attr	vf attributes returned
- *
- *	return None
- */
-void
-bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr)
-{
-	bfa_trc(vf->fcs, vf->vf_id);
-}
-
-/**
- *	Return statistics associated with the given vf.
- *
- *	param[in] vf		pointer to bfa_vf_t.
- *	param[out] vf_stats	vf statistics returned
- *
- *	@return None
- */
-void
-bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf, struct bfa_vf_stats_s *vf_stats)
-{
-	bfa_os_memcpy(vf_stats, &vf->stats, sizeof(struct bfa_vf_stats_s));
-}
-
-/**
- *	clear statistics associated with the given vf.
- *
- *	param[in]	vf	pointer to bfa_vf_t.
- *
- *	@return None
- */
-void
-bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf)
-{
-	bfa_os_memset(&vf->stats, 0, sizeof(struct bfa_vf_stats_s));
-}
-
-/**
+/*
  *	Returns FCS vf structure for a given vf_id.
  *
  *	param[in]	vf_id - VF_ID
@@ -1558,81 +1435,7 @@
 	return NULL;
 }
 
-/**
- *	Return the list of VFs configured.
- *
- *	param[in]	fcs	fcs module instance
- *	param[out]	vf_ids	returned list of vf_ids
- *	param[in,out]	nvfs	in:size of vf_ids array,
- *				out:total elements present,
- *				actual elements returned is limited by the size
- *
- *	return Driver VF structure
- */
-void
-bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
-{
-	bfa_trc(fcs, *nvfs);
-}
-
-/**
- *	Return the list of all VFs visible from fabric.
- *
- *	param[in]	fcs	fcs module instance
- *	param[out]	vf_ids	returned list of vf_ids
- *	param[in,out]	nvfs	in:size of vf_ids array,
- *				out:total elements present,
- *				actual elements returned is limited by the size
- *
- *	return Driver VF structure
- */
-void
-bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs)
-{
-	bfa_trc(fcs, *nvfs);
-}
-
-/**
- *	Return the list of local logical ports present in the given VF.
- *
- *	param[in]	vf	vf for which logical ports are returned
- *	param[out]	lpwwn	returned logical port wwn list
- *	param[in,out]	nlports	in:size of lpwwn list;
- *				out:total elements present,
- *				actual elements returned is limited by the size
- */
-void
-bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t lpwwn[], int *nlports)
-{
-	struct list_head	*qe;
-	struct bfa_fcs_vport_s *vport;
-	int	i;
-	struct bfa_fcs_s      *fcs;
-
-	if (vf == NULL || lpwwn == NULL || *nlports == 0)
-		return;
-
-	fcs = vf->fcs;
-
-	bfa_trc(fcs, vf->vf_id);
-	bfa_trc(fcs, (u32) *nlports);
-
-	i = 0;
-	lpwwn[i++] = vf->bport.port_cfg.pwwn;
-
-	list_for_each(qe, &vf->vport_q) {
-		if (i >= *nlports)
-			break;
-
-		vport = (struct bfa_fcs_vport_s *) qe;
-		lpwwn[i++] = vport->lport.port_cfg.pwwn;
-	}
-
-	bfa_trc(fcs, i);
-	*nlports = i;
-}
-
-/**
+/*
  * BFA FCS PPORT ( physical port)
  */
 static void
@@ -1662,11 +1465,11 @@
 	bfa_fcport_event_register(fcs->bfa, bfa_fcs_port_event_handler, fcs);
 }
 
-/**
+/*
  * BFA FCS UF ( Unsolicited Frames)
  */
 
-/**
+/*
  *		BFA callback for unsolicited frame receive handler.
  *
  * @param[in]		cbarg		callback arg for receive handler
@@ -1683,7 +1486,7 @@
 	struct fc_vft_s *vft;
 	struct bfa_fcs_fabric_s *fabric;
 
-	/**
+	/*
 	 * check for VFT header
 	 */
 	if (fchs->routing == FC_RTG_EXT_HDR &&
@@ -1695,7 +1498,7 @@
 		else
 			fabric = bfa_fcs_vf_lookup(fcs, (u16) vft->vf_id);
 
-		/**
+		/*
 		 * drop frame if vfid is unknown
 		 */
 		if (!fabric) {
@@ -1705,7 +1508,7 @@
 			return;
 		}
 
-		/**
+		/*
 		 * skip vft header
 		 */
 		fchs = (struct fchs_s *) (vft + 1);
diff --git a/drivers/scsi/bfa/bfa_fcs.h b/drivers/scsi/bfa/bfa_fcs.h
index d75045d..9cb6a55 100644
--- a/drivers/scsi/bfa/bfa_fcs.h
+++ b/drivers/scsi/bfa/bfa_fcs.h
@@ -196,7 +196,7 @@
 #define bfa_fcs_fabric_is_switched(__f)			\
 	((__f)->fab_type == BFA_FCS_FABRIC_SWITCHED)
 
-/**
+/*
  *   The design calls for a single implementation of base fabric and vf.
  */
 #define bfa_fcs_vf_t struct bfa_fcs_fabric_s
@@ -216,7 +216,7 @@
 
 #define bfa_fcs_lport_t struct bfa_fcs_lport_s
 
-/**
+/*
  * Symbolic Name related defines
  *  Total bytes 255.
  *  Physical Port's symbolic name 128 bytes.
@@ -239,7 +239,7 @@
 #define BFA_FCS_PORT_SYMBNAME_OSINFO_SZ			48
 #define BFA_FCS_PORT_SYMBNAME_OSPATCH_SZ		16
 
-/**
+/*
  * Get FC port ID for a logical port.
  */
 #define bfa_fcs_lport_get_fcid(_lport)	((_lport)->pid)
@@ -262,7 +262,7 @@
 #define bfa_fcs_lport_get_fabric_ipaddr(_lport)		\
 		((_lport)->fabric->fabric_ip_addr)
 
-/**
+/*
  * bfa fcs port public functions
  */
 
@@ -342,7 +342,7 @@
 #define bfa_fcs_vport_get_port(vport)			\
 	((struct bfa_fcs_lport_s  *)(&vport->port))
 
-/**
+/*
  * bfa fcs vport public functions
  */
 bfa_status_t bfa_fcs_vport_create(struct bfa_fcs_vport_s *vport,
@@ -393,7 +393,7 @@
 	enum bfa_port_speed	rpsc_speed;
 	/*  Current Speed from RPSC. O if RPSC fails */
 	enum bfa_port_speed	assigned_speed;
-	/**
+	/*
 	 * Speed assigned by the user.  will be used if RPSC is
 	 * not supported by the rport.
 	 */
@@ -434,7 +434,7 @@
 	return rport->bfa_rport;
 }
 
-/**
+/*
  * bfa fcs rport API functions
  */
 bfa_status_t bfa_fcs_rport_add(struct bfa_fcs_lport_s *port, wwn_t *pwwn,
@@ -573,7 +573,7 @@
 	return itnim->bfa_itnim;
 }
 
-/**
+/*
  * bfa fcs FCP Initiator mode API functions
  */
 void bfa_fcs_itnim_get_attr(struct bfa_fcs_itnim_s *itnim,
@@ -677,22 +677,9 @@
 void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
 void		bfa_fcs_start(struct bfa_fcs_s *fcs);
 
-/**
+/*
  * bfa fcs vf public functions
  */
-bfa_status_t bfa_fcs_vf_mode_enable(struct bfa_fcs_s *fcs, u16 vf_id);
-bfa_status_t bfa_fcs_vf_mode_disable(struct bfa_fcs_s *fcs);
-bfa_status_t bfa_fcs_vf_create(bfa_fcs_vf_t *vf, struct bfa_fcs_s *fcs,
-			       u16 vf_id, struct bfa_lport_cfg_s *port_cfg,
-			       struct bfad_vf_s *vf_drv);
-bfa_status_t bfa_fcs_vf_delete(bfa_fcs_vf_t *vf);
-void bfa_fcs_vf_list(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
-void bfa_fcs_vf_list_all(struct bfa_fcs_s *fcs, u16 *vf_ids, int *nvfs);
-void bfa_fcs_vf_get_attr(bfa_fcs_vf_t *vf, struct bfa_vf_attr_s *vf_attr);
-void bfa_fcs_vf_get_stats(bfa_fcs_vf_t *vf,
-			  struct bfa_vf_stats_s *vf_stats);
-void bfa_fcs_vf_clear_stats(bfa_fcs_vf_t *vf);
-void bfa_fcs_vf_get_ports(bfa_fcs_vf_t *vf, wwn_t vpwwn[], int *nports);
 bfa_fcs_vf_t *bfa_fcs_vf_lookup(struct bfa_fcs_s *fcs, u16 vf_id);
 u16 bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
 
@@ -729,11 +716,11 @@
 void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
 void bfa_fcs_port_attach(struct bfa_fcs_s *fcs);
 
-/**
+/*
  * BFA FCS callback interfaces
  */
 
-/**
+/*
  * fcb Main fcs callbacks
  */
 
@@ -742,7 +729,7 @@
 struct bfad_vport_s;
 struct bfad_rport_s;
 
-/**
+/*
  * lport callbacks
  */
 struct bfad_port_s *bfa_fcb_lport_new(struct bfad_s *bfad,
@@ -754,19 +741,19 @@
 			  struct bfad_vf_s *vf_drv,
 			  struct bfad_vport_s *vp_drv);
 
-/**
+/*
  * vport callbacks
  */
 void bfa_fcb_pbc_vport_create(struct bfad_s *bfad, struct bfi_pbc_vport_s);
 
-/**
+/*
  * rport callbacks
  */
 bfa_status_t bfa_fcb_rport_alloc(struct bfad_s *bfad,
 				 struct bfa_fcs_rport_s **rport,
 				 struct bfad_rport_s **rport_drv);
 
-/**
+/*
  * itnim callbacks
  */
 void bfa_fcb_itnim_alloc(struct bfad_s *bfad, struct bfa_fcs_itnim_s **itnim,
diff --git a/drivers/scsi/bfa/bfa_fcs_fcpim.c b/drivers/scsi/bfa/bfa_fcs_fcpim.c
index 569dfef..9662bcd 100644
--- a/drivers/scsi/bfa/bfa_fcs_fcpim.c
+++ b/drivers/scsi/bfa/bfa_fcs_fcpim.c
@@ -15,7 +15,7 @@
  * General Public License for more details.
  */
 
-/**
+/*
  *  fcpim.c - FCP initiator mode i-t nexus state machine
  */
 
@@ -38,7 +38,7 @@
 			    bfa_status_t req_status, u32 rsp_len,
 			    u32 resid_len, struct fchs_s *rsp_fchs);
 
-/**
+/*
  *  fcs_itnim_sm FCS itnim state machine events
  */
 
@@ -84,7 +84,7 @@
 	{BFA_SM(bfa_fcs_itnim_sm_initiator), BFA_ITNIM_INITIATIOR},
 };
 
-/**
+/*
  *  fcs_itnim_sm FCS itnim state machine
  */
 
@@ -494,11 +494,11 @@
 
 
 
-/**
+/*
  *  itnim_public FCS ITNIM public interfaces
  */
 
-/**
+/*
  *	Called by rport when a new rport is created.
  *
  * @param[in] rport	-  remote port.
@@ -554,7 +554,7 @@
 	return itnim;
 }
 
-/**
+/*
  *	Called by rport to delete  the instance of FCPIM.
  *
  * @param[in] rport	-  remote port.
@@ -566,7 +566,7 @@
 	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_DELETE);
 }
 
-/**
+/*
  * Notification from rport that PLOGI is complete to initiate FC-4 session.
  */
 void
@@ -586,7 +586,7 @@
 	}
 }
 
-/**
+/*
  * Called by rport to handle a remote device offline.
  */
 void
@@ -596,7 +596,7 @@
 	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_OFFLINE);
 }
 
-/**
+/*
  * Called by rport when remote port is known to be an initiator from
  * PRLI received.
  */
@@ -608,7 +608,7 @@
 	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_INITIATOR);
 }
 
-/**
+/*
  * Called by rport to check if the itnim is online.
  */
 bfa_status_t
@@ -625,7 +625,7 @@
 	}
 }
 
-/**
+/*
  * BFA completion callback for bfa_itnim_online().
  */
 void
@@ -637,7 +637,7 @@
 	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_ONLINE);
 }
 
-/**
+/*
  * BFA completion callback for bfa_itnim_offline().
  */
 void
@@ -649,7 +649,7 @@
 	bfa_sm_send_event(itnim, BFA_FCS_ITNIM_SM_HCB_OFFLINE);
 }
 
-/**
+/*
  * Mark the beginning of PATH TOV handling. IO completion callbacks
  * are still pending.
  */
@@ -661,7 +661,7 @@
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
 }
 
-/**
+/*
  * Mark the end of PATH TOV handling. All pending IOs are already cleaned up.
  */
 void
@@ -674,7 +674,7 @@
 	itnim_drv->state = ITNIM_STATE_TIMEOUT;
 }
 
-/**
+/*
  *		BFA notification to FCS/driver for second level error recovery.
  *
  * Atleast one I/O request has timedout and target is unresponsive to
@@ -736,7 +736,7 @@
 	if (itnim == NULL)
 		return BFA_STATUS_NO_FCPIM_NEXUS;
 
-	bfa_os_memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s));
+	memcpy(stats, &itnim->stats, sizeof(struct bfa_itnim_stats_s));
 
 	return BFA_STATUS_OK;
 }
@@ -753,7 +753,7 @@
 	if (itnim == NULL)
 		return BFA_STATUS_NO_FCPIM_NEXUS;
 
-	bfa_os_memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s));
+	memset(&itnim->stats, 0, sizeof(struct bfa_itnim_stats_s));
 	return BFA_STATUS_OK;
 }
 
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index b522bf3..377cbff 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -15,10 +15,6 @@
  * General Public License for more details.
  */
 
-/**
- *  bfa_fcs_lport.c BFA FCS port
- */
-
 #include "bfa_fcs.h"
 #include "bfa_fcbuild.h"
 #include "bfa_fc.h"
@@ -26,10 +22,6 @@
 
 BFA_TRC_FILE(FCS, PORT);
 
-/**
- * Forward declarations
- */
-
 static void     bfa_fcs_lport_send_ls_rjt(struct bfa_fcs_lport_s *port,
 					 struct fchs_s *rx_fchs, u8 reason_code,
 					 u8 reason_code_expl);
@@ -72,7 +64,7 @@
 			bfa_fcs_lport_n2n_offline},
 	};
 
-/**
+/*
  *  fcs_port_sm FCS logical port state machine
  */
 
@@ -240,7 +232,7 @@
 	}
 }
 
-/**
+/*
  *  fcs_port_pvt
  */
 
@@ -272,7 +264,7 @@
 			  FC_MAX_PDUSZ, 0);
 }
 
-/**
+/*
  * Process incoming plogi from a remote port.
  */
 static void
@@ -303,7 +295,7 @@
 		return;
 	}
 
-	/**
+	/*
 	 * Direct Attach P2P mode : verify address assigned by the r-port.
 	 */
 	if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
@@ -319,12 +311,12 @@
 		port->pid  = rx_fchs->d_id;
 	}
 
-	/**
+	/*
 	 * First, check if we know the device by pwwn.
 	 */
 	rport = bfa_fcs_lport_get_rport_by_pwwn(port, plogi->port_name);
 	if (rport) {
-		/**
+		/*
 		 * Direct Attach P2P mode : handle address assigned by r-port.
 		 */
 		if ((!bfa_fcs_fabric_is_switched(port->fabric)) &&
@@ -337,37 +329,37 @@
 		return;
 	}
 
-	/**
+	/*
 	 * Next, lookup rport by PID.
 	 */
 	rport = bfa_fcs_lport_get_rport_by_pid(port, rx_fchs->s_id);
 	if (!rport) {
-		/**
+		/*
 		 * Inbound PLOGI from a new device.
 		 */
 		bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
 		return;
 	}
 
-	/**
+	/*
 	 * Rport is known only by PID.
 	 */
 	if (rport->pwwn) {
-		/**
+		/*
 		 * This is a different device with the same pid. Old device
 		 * disappeared. Send implicit LOGO to old device.
 		 */
 		bfa_assert(rport->pwwn != plogi->port_name);
 		bfa_fcs_rport_logo_imp(rport);
 
-		/**
+		/*
 		 * Inbound PLOGI from a new device (with old PID).
 		 */
 		bfa_fcs_rport_plogi_create(port, rx_fchs, plogi);
 		return;
 	}
 
-	/**
+	/*
 	 * PLOGI crossing each other.
 	 */
 	bfa_assert(rport->pwwn == WWN_NULL);
@@ -479,12 +471,12 @@
 bfa_fs_port_get_gen_topo_data(struct bfa_fcs_lport_s *port,
 			struct fc_rnid_general_topology_data_s *gen_topo_data)
 {
-	bfa_os_memset(gen_topo_data, 0,
+	memset(gen_topo_data, 0,
 		      sizeof(struct fc_rnid_general_topology_data_s));
 
-	gen_topo_data->asso_type = bfa_os_htonl(RNID_ASSOCIATED_TYPE_HOST);
+	gen_topo_data->asso_type = cpu_to_be32(RNID_ASSOCIATED_TYPE_HOST);
 	gen_topo_data->phy_port_num = 0;	/* @todo */
-	gen_topo_data->num_attached_nodes = bfa_os_htonl(1);
+	gen_topo_data->num_attached_nodes = cpu_to_be32(1);
 }
 
 static void
@@ -598,10 +590,10 @@
 
 
 
-/**
+/*
  *  fcs_lport_api BFA FCS port API
  */
-/**
+/*
  *   Module initialization
  */
 void
@@ -610,7 +602,7 @@
 
 }
 
-/**
+/*
  *   Module cleanup
  */
 void
@@ -619,7 +611,7 @@
 	bfa_fcs_modexit_comp(fcs);
 }
 
-/**
+/*
  * Unsolicited frame receive handling.
  */
 void
@@ -637,7 +629,7 @@
 		return;
 	}
 
-	/**
+	/*
 	 * First, handle ELSs that donot require a login.
 	 */
 	/*
@@ -673,7 +665,7 @@
 			bfa_fcs_lport_abts_acc(lport, fchs);
 		return;
 	}
-	/**
+	/*
 	 * look for a matching remote port ID
 	 */
 	rport = bfa_fcs_lport_get_rport_by_pid(lport, pid);
@@ -686,7 +678,7 @@
 		return;
 	}
 
-	/**
+	/*
 	 * Only handles ELS frames for now.
 	 */
 	if (fchs->type != FC_TYPE_ELS) {
@@ -702,20 +694,20 @@
 	}
 
 	if (els_cmd->els_code == FC_ELS_LOGO) {
-		/**
+		/*
 		 * @todo Handle LOGO frames received.
 		 */
 		return;
 	}
 
 	if (els_cmd->els_code == FC_ELS_PRLI) {
-		/**
+		/*
 		 * @todo Handle PRLI frames received.
 		 */
 		return;
 	}
 
-	/**
+	/*
 	 * Unhandled ELS frames. Send a LS_RJT.
 	 */
 	bfa_fcs_lport_send_ls_rjt(lport, fchs, FC_LS_RJT_RSN_CMD_NOT_SUPP,
@@ -723,7 +715,7 @@
 
 }
 
-/**
+/*
  *   PID based Lookup for a R-Port in the Port R-Port Queue
  */
 struct bfa_fcs_rport_s *
@@ -742,7 +734,7 @@
 	return NULL;
 }
 
-/**
+/*
  *   PWWN based Lookup for a R-Port in the Port R-Port Queue
  */
 struct bfa_fcs_rport_s *
@@ -761,7 +753,7 @@
 	return NULL;
 }
 
-/**
+/*
  *   NWWN based Lookup for a R-Port in the Port R-Port Queue
  */
 struct bfa_fcs_rport_s *
@@ -780,7 +772,7 @@
 	return NULL;
 }
 
-/**
+/*
  * Called by rport module when new rports are discovered.
  */
 void
@@ -792,7 +784,7 @@
 	port->num_rports++;
 }
 
-/**
+/*
  * Called by rport module to when rports are deleted.
  */
 void
@@ -807,7 +799,7 @@
 	bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELRPORT);
 }
 
-/**
+/*
  * Called by fabric for base port when fabric login is complete.
  * Called by vport for virtual ports when FDISC is complete.
  */
@@ -817,7 +809,7 @@
 	bfa_sm_send_event(port, BFA_FCS_PORT_SM_ONLINE);
 }
 
-/**
+/*
  * Called by fabric for base port when fabric goes offline.
  * Called by vport for virtual ports when virtual port becomes offline.
  */
@@ -827,7 +819,7 @@
 	bfa_sm_send_event(port, BFA_FCS_PORT_SM_OFFLINE);
 }
 
-/**
+/*
  * Called by fabric to delete base lport and associated resources.
  *
  * Called by vport to delete lport and associated resources. Should call
@@ -839,7 +831,7 @@
 	bfa_sm_send_event(port, BFA_FCS_PORT_SM_DELETE);
 }
 
-/**
+/*
  * Return TRUE if port is online, else return FALSE
  */
 bfa_boolean_t
@@ -848,7 +840,7 @@
 	return bfa_sm_cmp_state(port, bfa_fcs_lport_sm_online);
 }
 
-/**
+/*
   * Attach time initialization of logical ports.
  */
 void
@@ -865,7 +857,7 @@
 	lport->num_rports = 0;
 }
 
-/**
+/*
  * Logical port initialization of base or virtual port.
  * Called by fabric for base port or by vport for virtual ports.
  */
@@ -878,7 +870,7 @@
 	struct bfad_s *bfad = (struct bfad_s *)lport->fcs->bfad;
 	char    lpwwn_buf[BFA_STRING_32];
 
-	bfa_os_assign(lport->port_cfg, *port_cfg);
+	lport->port_cfg = *port_cfg;
 
 	lport->bfad_port = bfa_fcb_lport_new(lport->fcs->bfad, lport,
 					lport->port_cfg.roles,
@@ -894,7 +886,7 @@
 	bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
 }
 
-/**
+/*
  *  fcs_lport_api
  */
 
@@ -934,11 +926,11 @@
 	}
 }
 
-/**
+/*
  *  bfa_fcs_lport_fab port fab functions
  */
 
-/**
+/*
  *   Called by port to initialize fabric services of the base port.
  */
 static void
@@ -949,7 +941,7 @@
 	bfa_fcs_lport_ms_init(port);
 }
 
-/**
+/*
  *   Called by port to notify transition to online state.
  */
 static void
@@ -959,7 +951,7 @@
 	bfa_fcs_lport_scn_online(port);
 }
 
-/**
+/*
  *   Called by port to notify transition to offline state.
  */
 static void
@@ -970,11 +962,11 @@
 	bfa_fcs_lport_ms_offline(port);
 }
 
-/**
+/*
  *  bfa_fcs_lport_n2n  functions
  */
 
-/**
+/*
  *   Called by fcs/port to initialize N2N topology.
  */
 static void
@@ -982,7 +974,7 @@
 {
 }
 
-/**
+/*
  *   Called by fcs/port to notify transition to online state.
  */
 static void
@@ -1006,7 +998,7 @@
 	    ((void *)&pcfg->pwwn, (void *)&n2n_port->rem_port_wwn,
 	     sizeof(wwn_t)) > 0) {
 		port->pid = N2N_LOCAL_PID;
-		/**
+		/*
 		 * First, check if we know the device by pwwn.
 		 */
 		rport = bfa_fcs_lport_get_rport_by_pwwn(port,
@@ -1035,7 +1027,7 @@
 	}
 }
 
-/**
+/*
  *   Called by fcs/port to notify transition to offline state.
  */
 static void
@@ -1094,11 +1086,11 @@
 				 struct bfa_fcs_fdmi_hba_attr_s *hba_attr);
 static void	bfa_fcs_fdmi_get_portattr(struct bfa_fcs_lport_fdmi_s *fdmi,
 				  struct bfa_fcs_fdmi_port_attr_s *port_attr);
-/**
+/*
  *  fcs_fdmi_sm FCS FDMI state machine
  */
 
-/**
+/*
  *  FDMI State Machine events
  */
 enum port_fdmi_event {
@@ -1143,7 +1135,7 @@
 static void     bfa_fcs_lport_fdmi_sm_disabled(
 				struct bfa_fcs_lport_fdmi_s *fdmi,
 				enum port_fdmi_event event);
-/**
+/*
  *	Start in offline state - awaiting MS to send start.
  */
 static void
@@ -1510,7 +1502,7 @@
 		bfa_sm_fault(port->fcs, event);
 	}
 }
-/**
+/*
  *  FDMI is disabled state.
  */
 static void
@@ -1525,7 +1517,7 @@
 	/* No op State. It can only be enabled at Driver Init. */
 }
 
-/**
+/*
 *  RHBA : Register HBA Attributes.
  */
 static void
@@ -1549,7 +1541,7 @@
 	fdmi->fcxp = fcxp;
 
 	pyld = bfa_fcxp_get_reqbuf(fcxp);
-	bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
+	memset(pyld, 0, FC_MAX_PDUSZ);
 
 	len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
 				   FDMI_RHBA);
@@ -1584,7 +1576,7 @@
 	bfa_fcs_fdmi_get_hbaattr(fdmi, fcs_hba_attr);
 
 	rhba->hba_id = bfa_fcs_lport_get_pwwn(port);
-	rhba->port_list.num_ports = bfa_os_htonl(1);
+	rhba->port_list.num_ports = cpu_to_be32(1);
 	rhba->port_list.port_entry = bfa_fcs_lport_get_pwwn(port);
 
 	len = sizeof(rhba->hba_id) + sizeof(rhba->port_list);
@@ -1601,86 +1593,69 @@
 	 * Node Name
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
-	attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_NODENAME);
+	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_NODENAME);
 	attr->len = sizeof(wwn_t);
 	memcpy(attr->value, &bfa_fcs_lport_get_nwwn(port), attr->len);
 	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 	len += attr->len;
 	count++;
-	attr->len =
-		bfa_os_htons(attr->len + sizeof(attr->type) +
+	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 			     sizeof(attr->len));
 
 	/*
 	 * Manufacturer
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
-	attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MANUFACTURER);
+	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MANUFACTURER);
 	attr->len = (u16) strlen(fcs_hba_attr->manufacturer);
 	memcpy(attr->value, fcs_hba_attr->manufacturer, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));	/* variable
-								 *fields need
-								 *to be 4 byte
-								 *aligned */
+	attr->len = fc_roundup(attr->len, sizeof(u32));
 	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 	len += attr->len;
 	count++;
-	attr->len =
-		bfa_os_htons(attr->len + sizeof(attr->type) +
+	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 			     sizeof(attr->len));
 
 	/*
 	 * Serial Number
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
-	attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_SERIALNUM);
+	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_SERIALNUM);
 	attr->len = (u16) strlen(fcs_hba_attr->serial_num);
 	memcpy(attr->value, fcs_hba_attr->serial_num, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));	/* variable
-								 *fields need
-								 *to be 4 byte
-								 *aligned */
+	attr->len = fc_roundup(attr->len, sizeof(u32));
 	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 	len += attr->len;
 	count++;
-	attr->len =
-		bfa_os_htons(attr->len + sizeof(attr->type) +
+	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 			     sizeof(attr->len));
 
 	/*
 	 * Model
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
-	attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL);
+	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL);
 	attr->len = (u16) strlen(fcs_hba_attr->model);
 	memcpy(attr->value, fcs_hba_attr->model, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));	/* variable
-								 *fields need
-								 *to be 4 byte
-								 *aligned */
+	attr->len = fc_roundup(attr->len, sizeof(u32));
 	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 	len += attr->len;
 	count++;
-	attr->len =
-		bfa_os_htons(attr->len + sizeof(attr->type) +
+	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 			     sizeof(attr->len));
 
 	/*
 	 * Model Desc
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
-	attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MODEL_DESC);
+	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MODEL_DESC);
 	attr->len = (u16) strlen(fcs_hba_attr->model_desc);
 	memcpy(attr->value, fcs_hba_attr->model_desc, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));	/* variable
-							 *fields need
-							 *to be 4 byte
-							 *aligned */
+	attr->len = fc_roundup(attr->len, sizeof(u32));
 	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 	len += attr->len;
 	count++;
-	attr->len =
-		bfa_os_htons(attr->len + sizeof(attr->type) +
+	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 			     sizeof(attr->len));
 
 	/*
@@ -1688,18 +1663,14 @@
 	 */
 	if (fcs_hba_attr->hw_version[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
-		attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_HW_VERSION);
+		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_HW_VERSION);
 		attr->len = (u16) strlen(fcs_hba_attr->hw_version);
 		memcpy(attr->value, fcs_hba_attr->hw_version, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));	/* variable
-								 *fields need
-								 *to be 4 byte
-								 *aligned */
+		attr->len = fc_roundup(attr->len, sizeof(u32));
 		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 		len += attr->len;
 		count++;
-		attr->len =
-			bfa_os_htons(attr->len + sizeof(attr->type) +
+		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 					 sizeof(attr->len));
 	}
 
@@ -1707,18 +1678,14 @@
 	 * Driver Version
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
-	attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_DRIVER_VERSION);
+	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_DRIVER_VERSION);
 	attr->len = (u16) strlen(fcs_hba_attr->driver_version);
 	memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));	/* variable
-							 *fields need
-							 *to be 4 byte
-							 *aligned */
+	attr->len = fc_roundup(attr->len, sizeof(u32));
 	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 	len += attr->len;;
 	count++;
-	attr->len =
-		bfa_os_htons(attr->len + sizeof(attr->type) +
+	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 			     sizeof(attr->len));
 
 	/*
@@ -1726,18 +1693,14 @@
 	 */
 	if (fcs_hba_attr->option_rom_ver[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
-		attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_ROM_VERSION);
+		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_ROM_VERSION);
 		attr->len = (u16) strlen(fcs_hba_attr->option_rom_ver);
 		memcpy(attr->value, fcs_hba_attr->option_rom_ver, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));	/* variable
-								 *fields need
-								 *to be 4 byte
-								 *aligned */
+		attr->len = fc_roundup(attr->len, sizeof(u32));
 		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 		len += attr->len;
 		count++;
-		attr->len =
-			bfa_os_htons(attr->len + sizeof(attr->type) +
+		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 					 sizeof(attr->len));
 	}
 
@@ -1745,18 +1708,14 @@
 	 * f/w Version = driver version
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
-	attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_FW_VERSION);
+	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_FW_VERSION);
 	attr->len = (u16) strlen(fcs_hba_attr->driver_version);
 	memcpy(attr->value, fcs_hba_attr->driver_version, attr->len);
-	attr->len = fc_roundup(attr->len, sizeof(u32));	/* variable
-							 *fields need
-							 *to be 4 byte
-							 *aligned */
+	attr->len = fc_roundup(attr->len, sizeof(u32));
 	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 	len += attr->len;
 	count++;
-	attr->len =
-		bfa_os_htons(attr->len + sizeof(attr->type) +
+	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 			     sizeof(attr->len));
 
 	/*
@@ -1764,18 +1723,14 @@
 	 */
 	if (fcs_hba_attr->os_name[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
-		attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_OS_NAME);
+		attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_OS_NAME);
 		attr->len = (u16) strlen(fcs_hba_attr->os_name);
 		memcpy(attr->value, fcs_hba_attr->os_name, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));	/* variable
-							     *fields need
-							     *to be 4 byte
-							     *aligned */
+		attr->len = fc_roundup(attr->len, sizeof(u32));
 		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 		len += attr->len;
 		count++;
-		attr->len =
-			bfa_os_htons(attr->len + sizeof(attr->type) +
+		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 					sizeof(attr->len));
 	}
 
@@ -1783,22 +1738,20 @@
 	 * MAX_CT_PAYLOAD
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
-	attr->type = bfa_os_htons(FDMI_HBA_ATTRIB_MAX_CT);
+	attr->type = cpu_to_be16(FDMI_HBA_ATTRIB_MAX_CT);
 	attr->len = sizeof(fcs_hba_attr->max_ct_pyld);
 	memcpy(attr->value, &fcs_hba_attr->max_ct_pyld, attr->len);
 	len += attr->len;
 	count++;
-	attr->len =
-		bfa_os_htons(attr->len + sizeof(attr->type) +
+	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 			     sizeof(attr->len));
 
 	/*
 	 * Update size of payload
 	 */
-	len += ((sizeof(attr->type) +
-		 sizeof(attr->len)) * count);
+	len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
 
-	rhba->hba_attr_blk.attr_count = bfa_os_htonl(count);
+	rhba->hba_attr_blk.attr_count = cpu_to_be32(count);
 	return len;
 }
 
@@ -1825,7 +1778,7 @@
 	}
 
 	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-	cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
 	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
 		bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
@@ -1837,7 +1790,7 @@
 	bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
 }
 
-/**
+/*
 *  RPRT : Register Port
  */
 static void
@@ -1861,7 +1814,7 @@
 	fdmi->fcxp = fcxp;
 
 	pyld = bfa_fcxp_get_reqbuf(fcxp);
-	bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
+	memset(pyld, 0, FC_MAX_PDUSZ);
 
 	len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
 				   FDMI_RPRT);
@@ -1879,7 +1832,7 @@
 	bfa_sm_send_event(fdmi, FDMISM_EVENT_RPRT_SENT);
 }
 
-/**
+/*
  * This routine builds Port Attribute Block that used in RPA, RPRT commands.
  */
 static          u16
@@ -1909,56 +1862,54 @@
 	 * FC4 Types
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
-	attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FC4_TYPES);
+	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FC4_TYPES);
 	attr->len = sizeof(fcs_port_attr.supp_fc4_types);
 	memcpy(attr->value, fcs_port_attr.supp_fc4_types, attr->len);
 	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 	len += attr->len;
 	++count;
 	attr->len =
-		bfa_os_htons(attr->len + sizeof(attr->type) +
+		cpu_to_be16(attr->len + sizeof(attr->type) +
 			     sizeof(attr->len));
 
 	/*
 	 * Supported Speed
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
-	attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_SUPP_SPEED);
+	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_SUPP_SPEED);
 	attr->len = sizeof(fcs_port_attr.supp_speed);
 	memcpy(attr->value, &fcs_port_attr.supp_speed, attr->len);
 	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 	len += attr->len;
 	++count;
 	attr->len =
-		bfa_os_htons(attr->len + sizeof(attr->type) +
+		cpu_to_be16(attr->len + sizeof(attr->type) +
 			     sizeof(attr->len));
 
 	/*
 	 * current Port Speed
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
-	attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_PORT_SPEED);
+	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_PORT_SPEED);
 	attr->len = sizeof(fcs_port_attr.curr_speed);
 	memcpy(attr->value, &fcs_port_attr.curr_speed, attr->len);
 	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 	len += attr->len;
 	++count;
-	attr->len =
-		bfa_os_htons(attr->len + sizeof(attr->type) +
+	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 			     sizeof(attr->len));
 
 	/*
 	 * max frame size
 	 */
 	attr = (struct fdmi_attr_s *) curr_ptr;
-	attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_FRAME_SIZE);
+	attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_FRAME_SIZE);
 	attr->len = sizeof(fcs_port_attr.max_frm_size);
 	memcpy(attr->value, &fcs_port_attr.max_frm_size, attr->len);
 	curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 	len += attr->len;
 	++count;
-	attr->len =
-		bfa_os_htons(attr->len + sizeof(attr->type) +
+	attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 			     sizeof(attr->len));
 
 	/*
@@ -1966,18 +1917,14 @@
 	 */
 	if (fcs_port_attr.os_device_name[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
-		attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_DEV_NAME);
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_DEV_NAME);
 		attr->len = (u16) strlen(fcs_port_attr.os_device_name);
 		memcpy(attr->value, fcs_port_attr.os_device_name, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));	/* variable
-							     *fields need
-							     *to be 4 byte
-							     *aligned */
+		attr->len = fc_roundup(attr->len, sizeof(u32));
 		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 		len += attr->len;
 		++count;
-		attr->len =
-			bfa_os_htons(attr->len + sizeof(attr->type) +
+		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 					sizeof(attr->len));
 	}
 	/*
@@ -1985,27 +1932,22 @@
 	 */
 	if (fcs_port_attr.host_name[0] != '\0') {
 		attr = (struct fdmi_attr_s *) curr_ptr;
-		attr->type = bfa_os_htons(FDMI_PORT_ATTRIB_HOST_NAME);
+		attr->type = cpu_to_be16(FDMI_PORT_ATTRIB_HOST_NAME);
 		attr->len = (u16) strlen(fcs_port_attr.host_name);
 		memcpy(attr->value, fcs_port_attr.host_name, attr->len);
-		attr->len = fc_roundup(attr->len, sizeof(u32));	/* variable
-							     *fields need
-							     *to be 4 byte
-							     *aligned */
+		attr->len = fc_roundup(attr->len, sizeof(u32));
 		curr_ptr += sizeof(attr->type) + sizeof(attr->len) + attr->len;
 		len += attr->len;
 		++count;
-		attr->len =
-			bfa_os_htons(attr->len + sizeof(attr->type) +
+		attr->len = cpu_to_be16(attr->len + sizeof(attr->type) +
 				sizeof(attr->len));
 	}
 
 	/*
 	 * Update size of payload
 	 */
-	port_attrib->attr_count = bfa_os_htonl(count);
-	len += ((sizeof(attr->type) +
-		 sizeof(attr->len)) * count);
+	port_attrib->attr_count = cpu_to_be32(count);
+	len += ((sizeof(attr->type) + sizeof(attr->len)) * count);
 	return len;
 }
 
@@ -2050,7 +1992,7 @@
 	}
 
 	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-	cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
 	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
 		bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
@@ -2062,7 +2004,7 @@
 	bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_ERROR);
 }
 
-/**
+/*
 *  RPA : Register Port Attributes.
  */
 static void
@@ -2086,15 +2028,13 @@
 	fdmi->fcxp = fcxp;
 
 	pyld = bfa_fcxp_get_reqbuf(fcxp);
-	bfa_os_memset(pyld, 0, FC_MAX_PDUSZ);
+	memset(pyld, 0, FC_MAX_PDUSZ);
 
 	len = fc_fdmi_reqhdr_build(&fchs, pyld, bfa_fcs_lport_get_fcid(port),
 				   FDMI_RPA);
 
-	attr_len =
-		bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi,
-					 (u8 *) ((struct ct_hdr_s *) pyld
-						      + 1));
+	attr_len = bfa_fcs_lport_fdmi_build_rpa_pyld(fdmi,
+				(u8 *) ((struct ct_hdr_s *) pyld + 1));
 
 	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
 			  FC_CLASS_3, len + attr_len, &fchs,
@@ -2143,7 +2083,7 @@
 	}
 
 	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-	cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
 	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
 		bfa_sm_send_event(fdmi, FDMISM_EVENT_RSP_OK);
@@ -2170,7 +2110,7 @@
 	struct bfa_fcs_lport_s *port = fdmi->ms->port;
 	struct bfa_fcs_driver_info_s  *driver_info = &port->fcs->driver_info;
 
-	bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
+	memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
 
 	bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc,
 					hba_attr->manufacturer);
@@ -2204,7 +2144,7 @@
 				sizeof(driver_info->host_os_patch));
 	}
 
-	hba_attr->max_ct_pyld = bfa_os_htonl(FC_MAX_PDUSZ);
+	hba_attr->max_ct_pyld = cpu_to_be32(FC_MAX_PDUSZ);
 }
 
 void
@@ -2215,7 +2155,7 @@
 	struct bfa_fcs_driver_info_s  *driver_info = &port->fcs->driver_info;
 	struct bfa_port_attr_s pport_attr;
 
-	bfa_os_memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s));
+	memset(port_attr, 0, sizeof(struct bfa_fcs_fdmi_port_attr_s));
 
 	/*
 	 * get pport attributes from hal
@@ -2230,17 +2170,17 @@
 	/*
 	 * Supported Speeds
 	 */
-	port_attr->supp_speed = bfa_os_htonl(BFA_FCS_FDMI_SUPORTED_SPEEDS);
+	port_attr->supp_speed = cpu_to_be32(BFA_FCS_FDMI_SUPORTED_SPEEDS);
 
 	/*
 	 * Current Speed
 	 */
-	port_attr->curr_speed = bfa_os_htonl(pport_attr.speed);
+	port_attr->curr_speed = cpu_to_be32(pport_attr.speed);
 
 	/*
 	 * Max PDU Size.
 	 */
-	port_attr->max_frm_size = bfa_os_htonl(FC_MAX_PDUSZ);
+	port_attr->max_frm_size = cpu_to_be32(FC_MAX_PDUSZ);
 
 	/*
 	 * OS device Name
@@ -2321,11 +2261,11 @@
 					       u32 rsp_len,
 					       u32 resid_len,
 					       struct fchs_s *rsp_fchs);
-/**
+/*
  *  fcs_ms_sm FCS MS state machine
  */
 
-/**
+/*
  *  MS State Machine events
  */
 enum port_ms_event {
@@ -2360,7 +2300,7 @@
 					       enum port_ms_event event);
 static void     bfa_fcs_lport_ms_sm_online(struct bfa_fcs_lport_ms_s *ms,
 					  enum port_ms_event event);
-/**
+/*
  *	Start in offline state - awaiting NS to send start.
  */
 static void
@@ -2432,7 +2372,7 @@
 		 */
 		bfa_fcs_lport_fdmi_online(ms);
 
-		/**
+		/*
 		 * if this is a Vport, go to online state.
 		 */
 		if (ms->port->vport) {
@@ -2595,7 +2535,7 @@
 		bfa_sm_fault(ms->port->fcs, event);
 	}
 }
-/**
+/*
  *  ms_pvt MS local functions
  */
 
@@ -2657,12 +2597,12 @@
 	}
 
 	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-	cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
 	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
 		gmal_resp = (struct fcgs_gmal_resp_s *)(cthdr + 1);
 
-		num_entries = bfa_os_ntohl(gmal_resp->ms_len);
+		num_entries = be32_to_cpu(gmal_resp->ms_len);
 		if (num_entries == 0) {
 			bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
 			return;
@@ -2795,7 +2735,7 @@
 		bfa_sm_fault(ms->port->fcs, event);
 	}
 }
-/**
+/*
  *  ms_pvt MS local functions
  */
 
@@ -2853,7 +2793,7 @@
 	}
 
 	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-	cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
 	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
 		gfn_resp = (wwn_t *)(cthdr + 1);
@@ -2871,7 +2811,7 @@
 	bfa_sm_send_event(ms, MSSM_EVENT_RSP_ERROR);
 }
 
-/**
+/*
  *  ms_pvt MS local functions
  */
 
@@ -3017,7 +2957,7 @@
 		bfa_sm_send_event(ms, MSSM_EVENT_PORT_FABRIC_RSCN);
 }
 
-/**
+/*
  * @page ns_sm_info VPORT NS State Machine
  *
  * @section ns_sm_interactions VPORT NS State Machine Interactions
@@ -3080,11 +3020,11 @@
 				u32 *pid_buf, u32 n_pids);
 
 static void bfa_fcs_lport_ns_boot_target_disc(bfa_fcs_lport_t *port);
-/**
+/*
  *  fcs_ns_sm FCS nameserver interface state machine
  */
 
-/**
+/*
  * VPort NS State Machine events
  */
 enum vport_ns_event {
@@ -3139,7 +3079,7 @@
 						enum vport_ns_event event);
 static void     bfa_fcs_lport_ns_sm_online(struct bfa_fcs_lport_ns_s *ns,
 					  enum vport_ns_event event);
-/**
+/*
  *	Start in offline state - awaiting linkup
  */
 static void
@@ -3628,7 +3568,7 @@
 
 
 
-/**
+/*
  *  ns_pvt Nameserver local functions
  */
 
@@ -3724,7 +3664,7 @@
 	}
 }
 
-/**
+/*
  * Register the symbolic port name.
  */
 static void
@@ -3738,7 +3678,7 @@
 	u8         symbl[256];
 	u8         *psymbl = &symbl[0];
 
-	bfa_os_memset(symbl, 0, sizeof(symbl));
+	memset(symbl, 0, sizeof(symbl));
 
 	bfa_trc(port->fcs, port->port_cfg.pwwn);
 
@@ -3755,7 +3695,7 @@
 	 * for V-Port, form a Port Symbolic Name
 	 */
 	if (port->vport) {
-		/**
+		/*
 		 * For Vports, we append the vport's port symbolic name
 		 * to that of the base port.
 		 */
@@ -3815,7 +3755,7 @@
 	}
 
 	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-	cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
 	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
 		port->stats.ns_rspnid_accepts++;
@@ -3829,7 +3769,7 @@
 	bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
 }
 
-/**
+/*
  * Register FC4-Types
  */
 static void
@@ -3887,7 +3827,7 @@
 	}
 
 	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-	cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
 	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
 		port->stats.ns_rftid_accepts++;
@@ -3901,7 +3841,7 @@
 	bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
 }
 
-/**
+/*
  * Register FC4-Features : Should be done after RFT_ID
  */
 static void
@@ -3964,7 +3904,7 @@
 	}
 
 	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-	cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
 	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
 		port->stats.ns_rffid_accepts++;
@@ -3982,7 +3922,7 @@
 	} else
 		bfa_sm_send_event(ns, NSSM_EVENT_RSP_ERROR);
 }
-/**
+/*
  * Query Fabric for FC4-Types Devices.
  *
 * TBD : Need to use a local (FCS private) response buffer, since the response
@@ -4058,7 +3998,7 @@
 	}
 
 	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-	cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
 	switch (cthdr->cmd_rsp_code) {
 
@@ -4102,7 +4042,7 @@
 	}
 }
 
-/**
+/*
  *     This routine will be called by bfa_timer on timer timeouts.
  *
  *	param[in]	port - pointer to bfa_fcs_lport_t.
@@ -4166,7 +4106,7 @@
 	}
 }
 
-/**
+/*
  *  fcs_ns_public FCS nameserver public interfaces
  */
 
@@ -4227,7 +4167,7 @@
 	}
 }
 
-/**
+/*
  * FCS SCN
  */
 
@@ -4250,11 +4190,11 @@
 					     struct fchs_s *rx_fchs);
 static void     bfa_fcs_lport_scn_timeout(void *arg);
 
-/**
+/*
  *  fcs_scm_sm FCS SCN state machine
  */
 
-/**
+/*
  * VPort SCN State Machine events
  */
 enum port_scn_event {
@@ -4278,7 +4218,7 @@
 static void     bfa_fcs_lport_scn_sm_online(struct bfa_fcs_lport_scn_s *scn,
 					   enum port_scn_event event);
 
-/**
+/*
  *	Starting state - awaiting link up.
  */
 static void
@@ -4382,11 +4322,11 @@
 
 
 
-/**
+/*
  *  fcs_scn_private FCS SCN private functions
  */
 
-/**
+/*
  * This routine will be called to send a SCR command.
  */
 static void
@@ -4499,7 +4439,7 @@
 			  FC_MAX_PDUSZ, 0);
 }
 
-/**
+/*
  *     This routine will be called by bfa_timer on timer timeouts.
  *
  *	param[in]	vport		- pointer to bfa_fcs_lport_t.
@@ -4522,7 +4462,7 @@
 
 
 
-/**
+/*
  *  fcs_scn_public FCS state change notification public interfaces
  */
 
@@ -4563,7 +4503,7 @@
 
 	bfa_trc(port->fcs, rpid);
 
-	/**
+	/*
 	 * If this is an unknown device, then it just came online.
 	 * Otherwise let rport handle the RSCN event.
 	 */
@@ -4579,7 +4519,7 @@
 		bfa_fcs_rport_scn(rport);
 }
 
-/**
+/*
  * rscn format based PID comparison
  */
 #define __fc_pid_match(__c0, __c1, __fmt)		\
@@ -4624,7 +4564,7 @@
 	int             i = 0, j;
 
 	num_entries =
-		(bfa_os_ntohs(rscn->payldlen) -
+		(be16_to_cpu(rscn->payldlen) -
 		 sizeof(u32)) / sizeof(rscn->event[0]);
 
 	bfa_trc(port->fcs, num_entries);
@@ -4691,18 +4631,18 @@
 		}
 	}
 
-	/**
-	 * If any of area, domain or fabric RSCN is received, do a fresh discovery
-	 * to find new devices.
+	/*
+	 * If any of area, domain or fabric RSCN is received, do a fresh
+	 * discovery to find new devices.
 	 */
 	if (nsquery)
 		bfa_fcs_lport_ns_query(port);
 }
 
-/**
+/*
  * BFA FCS port
  */
-/**
+/*
  *  fcs_port_api BFA FCS port API
  */
 struct bfa_fcs_lport_s *
@@ -4943,10 +4883,10 @@
 void
 bfa_fcs_lport_clear_stats(struct bfa_fcs_lport_s *fcs_port)
 {
-	bfa_os_memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s));
+	memset(&fcs_port->stats, 0, sizeof(struct bfa_lport_stats_s));
 }
 
-/**
+/*
  * FCS virtual port state machine
  */
 
@@ -4967,11 +4907,11 @@
 static void     bfa_fcs_vport_do_logo(struct bfa_fcs_vport_s *vport);
 static void     bfa_fcs_vport_free(struct bfa_fcs_vport_s *vport);
 
-/**
+/*
  *  fcs_vport_sm FCS virtual port state machine
  */
 
-/**
+/*
  * VPort State Machine events
  */
 enum bfa_fcs_vport_event {
@@ -5024,7 +4964,7 @@
 	{BFA_SM(bfa_fcs_vport_sm_error), BFA_FCS_VPORT_ERROR}
 };
 
-/**
+/*
  * Beginning state.
  */
 static void
@@ -5045,7 +4985,7 @@
 	}
 }
 
-/**
+/*
  * Created state - a start event is required to start up the state machine.
  */
 static void
@@ -5062,7 +5002,7 @@
 			bfa_sm_set_state(vport, bfa_fcs_vport_sm_fdisc);
 			bfa_fcs_vport_do_fdisc(vport);
 		} else {
-			/**
+			/*
 			 * Fabric is offline or not NPIV capable, stay in
 			 * offline state.
 			 */
@@ -5078,7 +5018,7 @@
 
 	case BFA_FCS_VPORT_SM_ONLINE:
 	case BFA_FCS_VPORT_SM_OFFLINE:
-		/**
+		/*
 		 * Ignore ONLINE/OFFLINE events from fabric
 		 * till vport is started.
 		 */
@@ -5089,7 +5029,7 @@
 	}
 }
 
-/**
+/*
  * Offline state - awaiting ONLINE event from fabric SM.
  */
 static void
@@ -5127,7 +5067,7 @@
 }
 
 
-/**
+/*
  * FDISC is sent and awaiting reply from fabric.
  */
 static void
@@ -5174,7 +5114,7 @@
 	}
 }
 
-/**
+/*
  * FDISC attempt failed - a timer is active to retry FDISC.
  */
 static void
@@ -5208,7 +5148,7 @@
 	}
 }
 
-/**
+/*
  * Vport is online (FDISC is complete).
  */
 static void
@@ -5235,7 +5175,7 @@
 	}
 }
 
-/**
+/*
  * Vport is being deleted - awaiting lport delete completion to send
  * LOGO to fabric.
  */
@@ -5264,7 +5204,7 @@
 	}
 }
 
-/**
+/*
  * Error State.
  * This state will be set when the Vport Creation fails due
  * to errors like Dup WWN. In this state only operation allowed
@@ -5288,7 +5228,7 @@
 	}
 }
 
-/**
+/*
  * Lport cleanup is in progress since vport is being deleted. Fabric is
  * offline, so no LOGO is needed to complete vport deletion.
  */
@@ -5313,7 +5253,7 @@
 	}
 }
 
-/**
+/*
  * LOGO is sent to fabric. Vport delete is in progress. Lport delete cleanup
  * is done.
  */
@@ -5347,10 +5287,10 @@
 
 
 
-/**
+/*
  *  fcs_vport_private FCS virtual port private functions
  */
-/**
+/*
  * This routine will be called to send a FDISC command.
  */
 static void
@@ -5397,7 +5337,7 @@
 	}
 }
 
-/**
+/*
  *	Called to send a logout to the fabric. Used when a V-Port is
  *	deleted/stopped.
  */
@@ -5411,7 +5351,7 @@
 }
 
 
-/**
+/*
  *     This routine will be called by bfa_timer on timer timeouts.
  *
  *	param[in]	vport		- pointer to bfa_fcs_vport_t.
@@ -5449,11 +5389,11 @@
 
 
 
-/**
+/*
  *  fcs_vport_public FCS virtual port public interfaces
  */
 
-/**
+/*
  * Online notification from fabric SM.
  */
 void
@@ -5463,7 +5403,7 @@
 	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
 }
 
-/**
+/*
  * Offline notification from fabric SM.
  */
 void
@@ -5473,7 +5413,7 @@
 	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
 }
 
-/**
+/*
  * Cleanup notification from fabric SM on link timer expiry.
  */
 void
@@ -5481,7 +5421,7 @@
 {
 	vport->vport_stats.fab_cleanup++;
 }
-/**
+/*
  * delete notification from fabric SM. To be invoked from within FCS.
  */
 void
@@ -5490,7 +5430,7 @@
 	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_DELETE);
 }
 
-/**
+/*
  * Delete completion callback from associated lport
  */
 void
@@ -5501,11 +5441,11 @@
 
 
 
-/**
+/*
  *  fcs_vport_api Virtual port API
  */
 
-/**
+/*
  *	Use this function to instantiate a new FCS vport object. This
  *	function will not trigger any HW initialization process (which will be
  *	done in vport_start() call)
@@ -5555,7 +5495,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  *	Use this function to instantiate a new FCS PBC vport object. This
  *	function will not trigger any HW initialization process (which will be
  *	done in vport_start() call)
@@ -5585,7 +5525,7 @@
 	return rc;
 }
 
-/**
+/*
  *	Use this function to findout if this is a pbc vport or not.
  *
  * @param[in] vport - pointer to bfa_fcs_vport_t.
@@ -5603,7 +5543,7 @@
 
 }
 
-/**
+/*
  * Use this function initialize the vport.
  *
  * @param[in] vport - pointer to bfa_fcs_vport_t.
@@ -5618,7 +5558,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  *	Use this function quiese the vport object. This function will return
  *	immediately, when the vport is actually stopped, the
  *	bfa_drv_vport_stop_cb() will be called.
@@ -5635,7 +5575,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  *	Use this function to delete a vport object. Fabric object should
  *	be stopped before this function call.
  *
@@ -5657,7 +5597,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  *	Use this function to get vport's current status info.
  *
  *	param[in] vport		pointer to bfa_fcs_vport_t.
@@ -5672,13 +5612,13 @@
 	if (vport == NULL || attr == NULL)
 		return;
 
-	bfa_os_memset(attr, 0, sizeof(struct bfa_vport_attr_s));
+	memset(attr, 0, sizeof(struct bfa_vport_attr_s));
 
 	bfa_fcs_lport_get_attr(&vport->lport, &attr->port_attr);
 	attr->vport_state = bfa_sm_to_state(vport_sm_table, vport->sm);
 }
 
-/**
+/*
  *	Use this function to get vport's statistics.
  *
  *	param[in]	vport	pointer to bfa_fcs_vport_t.
@@ -5693,7 +5633,7 @@
 	*stats = vport->vport_stats;
 }
 
-/**
+/*
  *	Use this function to clear vport's statistics.
  *
  *	param[in]	vport	pointer to bfa_fcs_vport_t.
@@ -5703,10 +5643,10 @@
 void
 bfa_fcs_vport_clr_stats(struct bfa_fcs_vport_s *vport)
 {
-	bfa_os_memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
+	memset(&vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
 }
 
-/**
+/*
  *	Lookup a virtual port. Excludes base port from lookup.
  */
 struct bfa_fcs_vport_s *
@@ -5728,7 +5668,7 @@
 	return vport;
 }
 
-/**
+/*
  * FDISC Response
  */
 void
@@ -5784,7 +5724,7 @@
 	}
 }
 
-/**
+/*
  * LOGO response
  */
 void
@@ -5794,7 +5734,7 @@
 	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
 }
 
-/**
+/*
  * Received clear virtual link
  */
 void
diff --git a/drivers/scsi/bfa/bfa_fcs_rport.c b/drivers/scsi/bfa/bfa_fcs_rport.c
index 635f0cd..47f35c0 100644
--- a/drivers/scsi/bfa/bfa_fcs_rport.c
+++ b/drivers/scsi/bfa/bfa_fcs_rport.c
@@ -15,7 +15,7 @@
  * General Public License for more details.
  */
 
-/**
+/*
  *  rport.c Remote port implementation.
  */
 
@@ -75,7 +75,7 @@
 static void	bfa_fcs_rport_process_adisc(struct bfa_fcs_rport_s *rport,
 				struct fchs_s *rx_fchs, u16 len);
 static void bfa_fcs_rport_send_prlo_acc(struct bfa_fcs_rport_s *rport);
-/**
+/*
  *  fcs_rport_sm FCS rport state machine events
  */
 
@@ -172,7 +172,7 @@
 	{BFA_SM(bfa_fcs_rport_sm_nsdisc_sent), BFA_RPORT_NSDISC},
 };
 
-/**
+/*
  *		Beginning state.
  */
 static void
@@ -210,7 +210,7 @@
 	}
 }
 
-/**
+/*
  *		PLOGI is being sent.
  */
 static void
@@ -262,7 +262,7 @@
 	}
 }
 
-/**
+/*
  *		PLOGI is being sent.
  */
 static void
@@ -287,7 +287,7 @@
 
 	case RPSM_EVENT_PLOGI_RCVD:
 	case RPSM_EVENT_SCN:
-		/**
+		/*
 		 * Ignore, SCN is possibly online notification.
 		 */
 		break;
@@ -309,7 +309,7 @@
 		break;
 
 	case RPSM_EVENT_HCB_OFFLINE:
-		/**
+		/*
 		 * Ignore BFA callback, on a PLOGI receive we call bfa offline.
 		 */
 		break;
@@ -319,7 +319,7 @@
 	}
 }
 
-/**
+/*
  *		PLOGI is sent.
  */
 static void
@@ -380,7 +380,7 @@
 	}
 }
 
-/**
+/*
  *		PLOGI is sent.
  */
 static void
@@ -475,7 +475,7 @@
 	}
 }
 
-/**
+/*
  *		PLOGI is complete. Awaiting BFA rport online callback. FC-4s
  *		are offline.
  */
@@ -519,7 +519,7 @@
 		break;
 
 	case RPSM_EVENT_SCN:
-		/**
+		/*
 		 * @todo
 		 * Ignore SCN - PLOGI just completed, FC-4 login should detect
 		 * device failures.
@@ -531,7 +531,7 @@
 	}
 }
 
-/**
+/*
  *		Rport is ONLINE. FC-4s active.
  */
 static void
@@ -580,7 +580,7 @@
 	}
 }
 
-/**
+/*
  *		An SCN event is received in ONLINE state. NS query is being sent
  *		prior to ADISC authentication with rport. FC-4s are paused.
  */
@@ -604,7 +604,7 @@
 		break;
 
 	case RPSM_EVENT_SCN:
-		/**
+		/*
 		 * ignore SCN, wait for response to query itself
 		 */
 		break;
@@ -638,7 +638,7 @@
 	}
 }
 
-/**
+/*
  *	An SCN event is received in ONLINE state. NS query is sent to rport.
  *	FC-4s are paused.
  */
@@ -697,7 +697,7 @@
 	}
 }
 
-/**
+/*
  *	An SCN event is received in ONLINE state. ADISC is being sent for
  *	authenticating with rport. FC-4s are paused.
  */
@@ -748,7 +748,7 @@
 	}
 }
 
-/**
+/*
  *		An SCN event is received in ONLINE state. ADISC is to rport.
  *		FC-4s are paused.
  */
@@ -765,7 +765,7 @@
 		break;
 
 	case RPSM_EVENT_PLOGI_RCVD:
-		/**
+		/*
 		 * Too complex to cleanup FC-4 & rport and then acc to PLOGI.
 		 * At least go offline when a PLOGI is received.
 		 */
@@ -787,7 +787,7 @@
 		break;
 
 	case RPSM_EVENT_SCN:
-		/**
+		/*
 		 * already processing RSCN
 		 */
 		break;
@@ -810,7 +810,7 @@
 	}
 }
 
-/**
+/*
  *		Rport has sent LOGO. Awaiting FC-4 offline completion callback.
  */
 static void
@@ -841,7 +841,7 @@
 	}
 }
 
-/**
+/*
  *		LOGO needs to be sent to rport. Awaiting FC-4 offline completion
  *		callback.
  */
@@ -864,7 +864,7 @@
 	}
 }
 
-/**
+/*
  *	Rport is going offline. Awaiting FC-4 offline completion callback.
  */
 static void
@@ -886,7 +886,7 @@
 	case RPSM_EVENT_LOGO_RCVD:
 	case RPSM_EVENT_PRLO_RCVD:
 	case RPSM_EVENT_ADDRESS_CHANGE:
-		/**
+		/*
 		 * rport is already going offline.
 		 * SCN - ignore and wait till transitioning to offline state
 		 */
@@ -901,7 +901,7 @@
 	}
 }
 
-/**
+/*
  *		Rport is offline. FC-4s are offline. Awaiting BFA rport offline
  *		callback.
  */
@@ -945,7 +945,7 @@
 	case RPSM_EVENT_SCN:
 	case RPSM_EVENT_LOGO_RCVD:
 	case RPSM_EVENT_PRLO_RCVD:
-		/**
+		/*
 		 * Ignore, already offline.
 		 */
 		break;
@@ -955,7 +955,7 @@
 	}
 }
 
-/**
+/*
  *		Rport is offline. FC-4s are offline. Awaiting BFA rport offline
  *		callback to send LOGO accept.
  */
@@ -1009,7 +1009,7 @@
 
 	case RPSM_EVENT_LOGO_RCVD:
 	case RPSM_EVENT_PRLO_RCVD:
-		/**
+		/*
 		 * Ignore - already processing a LOGO.
 		 */
 		break;
@@ -1019,7 +1019,7 @@
 	}
 }
 
-/**
+/*
  *		Rport is being deleted. FC-4s are offline.
  *  Awaiting BFA rport offline
  *		callback to send LOGO.
@@ -1048,7 +1048,7 @@
 	}
 }
 
-/**
+/*
  *		Rport is being deleted. FC-4s are offline. LOGO is being sent.
  */
 static void
@@ -1082,7 +1082,7 @@
 	}
 }
 
-/**
+/*
  *		Rport is offline. FC-4s are offline. BFA rport is offline.
  *		Timer active to delete stale rport.
  */
@@ -1142,7 +1142,7 @@
 	}
 }
 
-/**
+/*
  *	Rport address has changed. Nameserver discovery request is being sent.
  */
 static void
@@ -1199,7 +1199,7 @@
 	}
 }
 
-/**
+/*
  *		Nameserver discovery failed. Waiting for timeout to retry.
  */
 static void
@@ -1263,7 +1263,7 @@
 	}
 }
 
-/**
+/*
  *		Rport address has changed. Nameserver discovery request is sent.
  */
 static void
@@ -1329,13 +1329,13 @@
 		bfa_fcs_rport_send_prlo_acc(rport);
 		break;
 	case RPSM_EVENT_SCN:
-		/**
+		/*
 		 * ignore, wait for NS query response
 		 */
 		break;
 
 	case RPSM_EVENT_LOGO_RCVD:
-		/**
+		/*
 		 * Not logged-in yet. Accept LOGO.
 		 */
 		bfa_fcs_rport_send_logo_acc(rport);
@@ -1354,7 +1354,7 @@
 
 
 
-/**
+/*
  *  fcs_rport_private FCS RPORT provate functions
  */
 
@@ -1415,7 +1415,7 @@
 
 	plogi_rsp = (struct fc_logi_s *) BFA_FCXP_RSP_PLD(fcxp);
 
-	/**
+	/*
 	 * Check for failure first.
 	 */
 	if (plogi_rsp->els_cmd.els_code != FC_ELS_ACC) {
@@ -1436,7 +1436,7 @@
 		return;
 	}
 
-	/**
+	/*
 	 * PLOGI is complete. Make sure this device is not one of the known
 	 * device with a new FC port address.
 	 */
@@ -1468,7 +1468,7 @@
 		}
 	}
 
-	/**
+	/*
 	 * Normal login path -- no evil twins.
 	 */
 	rport->stats.plogi_accs++;
@@ -1621,7 +1621,7 @@
 	bfa_trc(rport->fcs, rport->pwwn);
 
 	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-	cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
 	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
 		/* Check if the pid is the same as before. */
@@ -1691,7 +1691,7 @@
 	bfa_trc(rport->fcs, rport->pwwn);
 
 	cthdr = (struct ct_hdr_s *) BFA_FCXP_RSP_PLD(fcxp);
-	cthdr->cmd_rsp_code = bfa_os_ntohs(cthdr->cmd_rsp_code);
+	cthdr->cmd_rsp_code = be16_to_cpu(cthdr->cmd_rsp_code);
 
 	if (cthdr->cmd_rsp_code == CT_RSP_ACCEPT) {
 		bfa_sm_send_event(rport, RPSM_EVENT_ACCEPTED);
@@ -1722,7 +1722,7 @@
 	}
 }
 
-/**
+/*
  *	Called to send a logout to the rport.
  */
 static void
@@ -1759,7 +1759,7 @@
 	bfa_sm_send_event(rport, RPSM_EVENT_FCXP_SENT);
 }
 
-/**
+/*
  *	Send ACC for a LOGO received.
  */
 static void
@@ -1788,7 +1788,7 @@
 			FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
 }
 
-/**
+/*
  *	brief
  *	This routine will be called by bfa_timer on timer timeouts.
  *
@@ -1961,7 +1961,7 @@
 	struct bfa_fcs_rport_s *rport;
 	struct bfad_rport_s	*rport_drv;
 
-	/**
+	/*
 	 * allocate rport
 	 */
 	if (bfa_fcb_rport_alloc(fcs->bfad, &rport, &rport_drv)
@@ -1979,7 +1979,7 @@
 	rport->pid = rpid;
 	rport->pwwn = pwwn;
 
-	/**
+	/*
 	 * allocate BFA rport
 	 */
 	rport->bfa_rport = bfa_rport_create(port->fcs->bfa, rport);
@@ -1989,7 +1989,7 @@
 		return NULL;
 	}
 
-	/**
+	/*
 	 * allocate FC-4s
 	 */
 	bfa_assert(bfa_fcs_lport_is_initiator(port));
@@ -2021,7 +2021,7 @@
 {
 	struct bfa_fcs_lport_s *port = rport->port;
 
-	/**
+	/*
 	 * - delete FC-4s
 	 * - delete BFA rport
 	 * - remove from queue of rports
@@ -2093,7 +2093,7 @@
 	}
 }
 
-/**
+/*
  * Update rport parameters from PLOGI or PLOGI accept.
  */
 static void
@@ -2101,14 +2101,14 @@
 {
 	bfa_fcs_lport_t *port = rport->port;
 
-	/**
+	/*
 	 * - port name
 	 * - node name
 	 */
 	rport->pwwn = plogi->port_name;
 	rport->nwwn = plogi->node_name;
 
-	/**
+	/*
 	 * - class of service
 	 */
 	rport->fc_cos = 0;
@@ -2118,16 +2118,16 @@
 	if (plogi->class2.class_valid)
 		rport->fc_cos |= FC_CLASS_2;
 
-	/**
+	/*
 	 * - CISC
 	 * - MAX receive frame size
 	 */
 	rport->cisc = plogi->csp.cisc;
-	rport->maxfrsize = bfa_os_ntohs(plogi->class3.rxsz);
+	rport->maxfrsize = be16_to_cpu(plogi->class3.rxsz);
 
-	bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred));
+	bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
 	bfa_trc(port->fcs, port->fabric->bb_credit);
-	/**
+	/*
 	 * Direct Attach P2P mode :
 	 * This is to handle a bug (233476) in IBM targets in Direct Attach
 	 *  Mode. Basically, in FLOGI Accept the target would have
@@ -2136,19 +2136,19 @@
 	 * in PLOGI.
 	 */
 	if ((!bfa_fcs_fabric_is_switched(port->fabric))	 &&
-		(bfa_os_ntohs(plogi->csp.bbcred) < port->fabric->bb_credit)) {
+		(be16_to_cpu(plogi->csp.bbcred) < port->fabric->bb_credit)) {
 
-		bfa_trc(port->fcs, bfa_os_ntohs(plogi->csp.bbcred));
+		bfa_trc(port->fcs, be16_to_cpu(plogi->csp.bbcred));
 		bfa_trc(port->fcs, port->fabric->bb_credit);
 
-		port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred);
+		port->fabric->bb_credit = be16_to_cpu(plogi->csp.bbcred);
 		bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
 					  port->fabric->bb_credit);
 	}
 
 }
 
-/**
+/*
  *	Called to handle LOGO received from an existing remote port.
  */
 static void
@@ -2164,11 +2164,11 @@
 
 
 
-/**
+/*
  *  fcs_rport_public FCS rport public interfaces
  */
 
-/**
+/*
  *	Called by bport/vport to create a remote port instance for a discovered
  *	remote device.
  *
@@ -2191,7 +2191,7 @@
 	return rport;
 }
 
-/**
+/*
  * Called to create a rport for which only the wwn is known.
  *
  * @param[in] port	- base port
@@ -2211,7 +2211,7 @@
 	bfa_sm_send_event(rport, RPSM_EVENT_ADDRESS_DISC);
 	return rport;
 }
-/**
+/*
  * Called by bport in private loop topology to indicate that a
  * rport has been discovered and plogi has been completed.
  *
@@ -2233,7 +2233,7 @@
 	bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_COMP);
 }
 
-/**
+/*
  *	Called by bport/vport to handle PLOGI received from a new remote port.
  *	If an existing rport does a plogi, it will be handled separately.
  */
@@ -2272,7 +2272,7 @@
 	return 0;
 }
 
-/**
+/*
  *	Called by bport/vport to handle PLOGI received from an existing
  *	 remote port.
  */
@@ -2280,7 +2280,7 @@
 bfa_fcs_rport_plogi(struct bfa_fcs_rport_s *rport, struct fchs_s *rx_fchs,
 			struct fc_logi_s *plogi)
 {
-	/**
+	/*
 	 * @todo Handle P2P and initiator-initiator.
 	 */
 
@@ -2289,7 +2289,7 @@
 	rport->reply_oxid = rx_fchs->ox_id;
 	bfa_trc(rport->fcs, rport->reply_oxid);
 
-	/**
+	/*
 	 * In Switched fabric topology,
 	 * PLOGI to each other. If our pwwn is smaller, ignore it,
 	 * if it is not a well known address.
@@ -2307,7 +2307,7 @@
 	bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_RCVD);
 }
 
-/**
+/*
  * Called by bport/vport to delete a remote port instance.
  *
  * Rport delete is called under the following conditions:
@@ -2321,7 +2321,7 @@
 	bfa_sm_send_event(rport, RPSM_EVENT_DELETE);
 }
 
-/**
+/*
  * Called by bport/vport to  when a target goes offline.
  *
  */
@@ -2331,7 +2331,7 @@
 	bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
 }
 
-/**
+/*
  * Called by bport in n2n when a target (attached port) becomes online.
  *
  */
@@ -2340,7 +2340,7 @@
 {
 	bfa_sm_send_event(rport, RPSM_EVENT_PLOGI_SEND);
 }
-/**
+/*
  *	Called by bport/vport to notify SCN for the remote port
  */
 void
@@ -2350,7 +2350,7 @@
 	bfa_sm_send_event(rport, RPSM_EVENT_SCN);
 }
 
-/**
+/*
  *	Called by	fcpim to notify that the ITN cleanup is done.
  */
 void
@@ -2359,7 +2359,7 @@
 	bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
 }
 
-/**
+/*
  *	Called by fcptm to notify that the ITN cleanup is done.
  */
 void
@@ -2368,7 +2368,7 @@
 	bfa_sm_send_event(rport, RPSM_EVENT_FC4_OFFLINE);
 }
 
-/**
+/*
  *	brief
  *	This routine BFA callback for bfa_rport_online() call.
  *
@@ -2391,7 +2391,7 @@
 	bfa_sm_send_event(rport, RPSM_EVENT_HCB_ONLINE);
 }
 
-/**
+/*
  *	brief
  *	This routine BFA callback for bfa_rport_offline() call.
  *
@@ -2413,7 +2413,7 @@
 	bfa_sm_send_event(rport, RPSM_EVENT_HCB_OFFLINE);
 }
 
-/**
+/*
  *	brief
  *	This routine is a static BFA callback when there is a QoS flow_id
  *	change notification
@@ -2437,7 +2437,7 @@
 	bfa_trc(rport->fcs, rport->pwwn);
 }
 
-/**
+/*
  *	brief
  *	This routine is a static BFA callback when there is a QoS priority
  *	change notification
@@ -2461,7 +2461,7 @@
 	bfa_trc(rport->fcs, rport->pwwn);
 }
 
-/**
+/*
  *		Called to process any unsolicted frames from this remote port
  */
 void
@@ -2470,7 +2470,7 @@
 	bfa_sm_send_event(rport, RPSM_EVENT_LOGO_IMP);
 }
 
-/**
+/*
  *		Called to process any unsolicted frames from this remote port
  */
 void
@@ -2577,7 +2577,7 @@
 			FC_MAX_PDUSZ, 0);
 }
 
-/**
+/*
  * Return state of rport.
  */
 int
@@ -2586,7 +2586,7 @@
 	return bfa_sm_to_state(rport_sm_table, rport->sm);
 }
 
-/**
+/*
  *	brief
  *		 Called by the Driver to set rport delete/ageout timeout
  *
@@ -2613,15 +2613,15 @@
 
 
 
-/**
+/*
  * Remote port implementation.
  */
 
-/**
+/*
  *  fcs_rport_api FCS rport API.
  */
 
-/**
+/*
  *	Direct API to add a target by port wwn. This interface is used, for
  *	example, by bios when target pwwn is known from boot lun configuration.
  */
@@ -2634,7 +2634,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  *	Direct API to remove a target and its associated resources. This
  *	interface is used, for example, by driver to remove target
  *	ports from the target list for a VM.
@@ -2663,7 +2663,7 @@
 
 }
 
-/**
+/*
  *	Remote device status for display/debug.
  */
 void
@@ -2674,7 +2674,7 @@
 	bfa_fcs_lport_t *port = rport->port;
 	bfa_port_speed_t rport_speed = rport->rpf.rpsc_speed;
 
-	bfa_os_memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
+	memset(rport_attr, 0, sizeof(struct bfa_rport_attr_s));
 
 	rport_attr->pid = rport->pid;
 	rport_attr->pwwn = rport->pwwn;
@@ -2704,7 +2704,7 @@
 	}
 }
 
-/**
+/*
  *	Per remote device statistics.
  */
 void
@@ -2717,7 +2717,7 @@
 void
 bfa_fcs_rport_clear_stats(struct bfa_fcs_rport_s *rport)
 {
-	bfa_os_memset((char *)&rport->stats, 0,
+	memset((char *)&rport->stats, 0,
 			sizeof(struct bfa_rport_stats_s));
 }
 
@@ -2767,7 +2767,7 @@
 
 
 
-/**
+/*
  * Remote port features (RPF) implementation.
  */
 
@@ -2786,7 +2786,7 @@
 
 static void     bfa_fcs_rpf_timeout(void *arg);
 
-/**
+/*
  *  fcs_rport_ftrs_sm FCS rport state machine events
  */
 
@@ -2981,7 +2981,7 @@
 		bfa_sm_fault(rport->fcs, event);
 	}
 }
-/**
+/*
  * Called when Rport is created.
  */
 void
@@ -2995,7 +2995,7 @@
 	bfa_sm_set_state(rpf, bfa_fcs_rpf_sm_uninit);
 }
 
-/**
+/*
  * Called when Rport becomes online
  */
 void
@@ -3010,7 +3010,7 @@
 		bfa_sm_send_event(&rport->rpf, RPFSM_EVENT_RPORT_ONLINE);
 }
 
-/**
+/*
  * Called when Rport becomes offline
  */
 void
@@ -3090,16 +3090,16 @@
 	rpsc2_acc = (struct fc_rpsc2_acc_s *) BFA_FCXP_RSP_PLD(fcxp);
 	if (rpsc2_acc->els_cmd == FC_ELS_ACC) {
 		rport->stats.rpsc_accs++;
-		num_ents = bfa_os_ntohs(rpsc2_acc->num_pids);
+		num_ents = be16_to_cpu(rpsc2_acc->num_pids);
 		bfa_trc(rport->fcs, num_ents);
 		if (num_ents > 0) {
 			bfa_assert(rpsc2_acc->port_info[0].pid != rport->pid);
 			bfa_trc(rport->fcs,
-				bfa_os_ntohs(rpsc2_acc->port_info[0].pid));
+				be16_to_cpu(rpsc2_acc->port_info[0].pid));
 			bfa_trc(rport->fcs,
-				bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
+				be16_to_cpu(rpsc2_acc->port_info[0].speed));
 			bfa_trc(rport->fcs,
-				bfa_os_ntohs(rpsc2_acc->port_info[0].index));
+				be16_to_cpu(rpsc2_acc->port_info[0].index));
 			bfa_trc(rport->fcs,
 				rpsc2_acc->port_info[0].type);
 
@@ -3109,7 +3109,7 @@
 			}
 
 			rpf->rpsc_speed = fc_rpsc_operspeed_to_bfa_speed(
-				bfa_os_ntohs(rpsc2_acc->port_info[0].speed));
+				be16_to_cpu(rpsc2_acc->port_info[0].speed));
 
 			bfa_sm_send_event(rpf, RPFSM_EVENT_RPSC_COMP);
 		}
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index c787d3a..d8464ae 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -22,7 +22,7 @@
 bfa_hwcb_reginit(struct bfa_s *bfa)
 {
 	struct bfa_iocfc_regs_s	*bfa_regs = &bfa->iocfc.bfa_regs;
-	bfa_os_addr_t		kva = bfa_ioc_bar0(&bfa->ioc);
+	void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
 	int			i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
 
 	if (fn == 0) {
@@ -60,8 +60,8 @@
 static void
 bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
 {
-	bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
-		__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq));
+	writel(__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq),
+			bfa->iocfc.bfa_regs.intr_status);
 }
 
 void
@@ -72,8 +72,8 @@
 static void
 bfa_hwcb_rspq_ack_msix(struct bfa_s *bfa, int rspq)
 {
-	bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
-		__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq));
+	writel(__HFN_INT_RME_Q0 << RME_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), rspq),
+			bfa->iocfc.bfa_regs.intr_status);
 }
 
 void
@@ -102,7 +102,7 @@
 	*num_vecs = __HFN_NUMINTS;
 }
 
-/**
+/*
  * No special setup required for crossbow -- vector assignments are implicit.
  */
 void
@@ -129,7 +129,7 @@
 		bfa->msix.handler[i] = bfa_msix_lpu_err;
 }
 
-/**
+/*
  * Crossbow -- dummy, interrupts are masked
  */
 void
@@ -142,7 +142,7 @@
 {
 }
 
-/**
+/*
  * No special enable/disable -- vector assignments are implicit.
  */
 void
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index c97ebaf..b0efbc7 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -31,15 +31,15 @@
 bfa_hwct_msix_lpu_err_set(struct bfa_s *bfa, bfa_boolean_t msix, int vec)
 {
 	int fn = bfa_ioc_pcifn(&bfa->ioc);
-	bfa_os_addr_t kva = bfa_ioc_bar0(&bfa->ioc);
+	void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
 
 	if (msix)
-		bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], vec);
+		writel(vec, kva + __ct_msix_err_vec_reg[fn]);
 	else
-		bfa_reg_write(kva + __ct_msix_err_vec_reg[fn], 0);
+		writel(0, kva + __ct_msix_err_vec_reg[fn]);
 }
 
-/**
+/*
  * Dummy interrupt handler for handling spurious interrupt during chip-reinit.
  */
 static void
@@ -51,7 +51,7 @@
 bfa_hwct_reginit(struct bfa_s *bfa)
 {
 	struct bfa_iocfc_regs_s	*bfa_regs = &bfa->iocfc.bfa_regs;
-	bfa_os_addr_t		kva = bfa_ioc_bar0(&bfa->ioc);
+	void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
 	int			i, q, fn = bfa_ioc_pcifn(&bfa->ioc);
 
 	if (fn == 0) {
@@ -88,8 +88,8 @@
 {
 	u32	r32;
 
-	r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
-	bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32);
+	r32 = readl(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
+	writel(r32, bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
 }
 
 void
@@ -97,8 +97,8 @@
 {
 	u32	r32;
 
-	r32 = bfa_reg_read(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
-	bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq], r32);
+	r32 = readl(bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
+	writel(r32, bfa->iocfc.bfa_regs.rme_q_ctrl[rspq]);
 }
 
 void
@@ -110,7 +110,7 @@
 	*num_vecs = BFA_MSIX_CT_MAX;
 }
 
-/**
+/*
  * Setup MSI-X vector for catapult
  */
 void
@@ -156,7 +156,7 @@
 		bfa->msix.handler[i] = bfa_hwct_msix_dummy;
 }
 
-/**
+/*
  * Enable MSI-X vectors
  */
 void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 6795b24..54475b5 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -23,7 +23,7 @@
 
 BFA_TRC_FILE(CNA, IOC);
 
-/**
+/*
  * IOC local definitions
  */
 #define BFA_IOC_TOV		3000	/* msecs */
@@ -49,7 +49,7 @@
 	  BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
 #define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
 
-/**
+/*
  * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
  */
 
@@ -73,7 +73,7 @@
 
 #define bfa_ioc_mbox_cmd_pending(__ioc)		\
 			(!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
-			bfa_reg_read((__ioc)->ioc_regs.hfn_mbox_cmd))
+			readl((__ioc)->ioc_regs.hfn_mbox_cmd))
 
 bfa_boolean_t bfa_auto_recover = BFA_TRUE;
 
@@ -101,11 +101,11 @@
 static void bfa_ioc_pf_failed(struct bfa_ioc_s *ioc);
 static void bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc);
 
-/**
+/*
  *  hal_ioc_sm
  */
 
-/**
+/*
  * IOC state machine definitions/declarations
  */
 enum ioc_event {
@@ -144,7 +144,7 @@
 	{BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
 };
 
-/**
+/*
  * IOCPF state machine definitions/declarations
  */
 
@@ -174,7 +174,7 @@
 static void bfa_iocpf_timeout(void *ioc_arg);
 static void bfa_iocpf_sem_timeout(void *ioc_arg);
 
-/**
+/*
  * IOCPF state machine events
  */
 enum iocpf_event {
@@ -191,7 +191,7 @@
 	IOCPF_E_TIMEOUT		= 11,	/*  f/w response timeout	*/
 };
 
-/**
+/*
  * IOCPF states
  */
 enum bfa_iocpf_state {
@@ -232,11 +232,11 @@
 	{BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
 };
 
-/**
+/*
  * IOC State Machine
  */
 
-/**
+/*
  * Beginning state. IOC uninit state.
  */
 
@@ -245,7 +245,7 @@
 {
 }
 
-/**
+/*
  * IOC is in uninit state.
  */
 static void
@@ -262,7 +262,7 @@
 		bfa_sm_fault(ioc, event);
 	}
 }
-/**
+/*
  * Reset entry actions -- initialize state machine
  */
 static void
@@ -271,7 +271,7 @@
 	bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
 }
 
-/**
+/*
  * IOC is in reset state.
  */
 static void
@@ -304,7 +304,7 @@
 	bfa_iocpf_enable(ioc);
 }
 
-/**
+/*
  * Host IOC function is being enabled, awaiting response from firmware.
  * Semaphore is acquired.
  */
@@ -352,7 +352,7 @@
 	bfa_ioc_send_getattr(ioc);
 }
 
-/**
+/*
  * IOC configuration in progress. Timer is active.
  */
 static void
@@ -447,7 +447,7 @@
 	BFA_LOG(KERN_INFO, bfad, log_level, "IOC disabled\n");
 }
 
-/**
+/*
  * IOC is being disabled
  */
 static void
@@ -474,7 +474,7 @@
 	}
 }
 
-/**
+/*
  * IOC disable completion entry.
  */
 static void
@@ -514,7 +514,7 @@
 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
 }
 
-/**
+/*
  * Hardware initialization failed.
  */
 static void
@@ -528,7 +528,7 @@
 		break;
 
 	case IOC_E_FAILED:
-		/**
+		/*
 		 * Initialization failure during iocpf init retry.
 		 */
 		ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
@@ -556,7 +556,7 @@
 	struct bfa_ioc_hbfail_notify_s	*notify;
 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
 
-	/**
+	/*
 	 * Notify driver and common modules registered for notification.
 	 */
 	ioc->cbfn->hbfail_cbfn(ioc->bfa);
@@ -569,7 +569,7 @@
 		"Heart Beat of IOC has failed\n");
 }
 
-/**
+/*
  * IOC failure.
  */
 static void
@@ -580,7 +580,7 @@
 	switch (event) {
 
 	case IOC_E_FAILED:
-		/**
+		/*
 		 * Initialization failure during iocpf recovery.
 		 * !!! Fall through !!!
 		 */
@@ -608,12 +608,12 @@
 
 
 
-/**
+/*
  * IOCPF State Machine
  */
 
 
-/**
+/*
  * Reset entry actions -- initialize state machine
  */
 static void
@@ -623,7 +623,7 @@
 	iocpf->auto_recover = bfa_auto_recover;
 }
 
-/**
+/*
  * Beginning state. IOC is in reset state.
  */
 static void
@@ -646,7 +646,7 @@
 	}
 }
 
-/**
+/*
  * Semaphore should be acquired for version check.
  */
 static void
@@ -655,7 +655,7 @@
 	bfa_ioc_hw_sem_get(iocpf->ioc);
 }
 
-/**
+/*
  * Awaiting h/w semaphore to continue with version check.
  */
 static void
@@ -692,7 +692,7 @@
 	}
 }
 
-/**
+/*
  * Notify enable completion callback.
  */
 static void
@@ -708,7 +708,7 @@
 	bfa_iocpf_timer_start(iocpf->ioc);
 }
 
-/**
+/*
  * Awaiting firmware version match.
  */
 static void
@@ -739,7 +739,7 @@
 	}
 }
 
-/**
+/*
  * Request for semaphore.
  */
 static void
@@ -748,7 +748,7 @@
 	bfa_ioc_hw_sem_get(iocpf->ioc);
 }
 
-/**
+/*
  * Awaiting semaphore for h/w initialzation.
  */
 static void
@@ -782,7 +782,7 @@
 	bfa_ioc_reset(iocpf->ioc, BFA_FALSE);
 }
 
-/**
+/*
  * Hardware is being initialized. Interrupts are enabled.
  * Holding hardware semaphore lock.
  */
@@ -839,7 +839,7 @@
 	bfa_ioc_send_enable(iocpf->ioc);
 }
 
-/**
+/*
  * Host IOC function is being enabled, awaiting response from firmware.
  * Semaphore is acquired.
  */
@@ -866,8 +866,7 @@
 	case IOCPF_E_TIMEOUT:
 		iocpf->retry_count++;
 		if (iocpf->retry_count < BFA_IOC_HWINIT_MAX) {
-			bfa_reg_write(ioc->ioc_regs.ioc_fwstate,
-				      BFI_IOC_UNINIT);
+			writel(BFI_IOC_UNINIT, ioc->ioc_regs.ioc_fwstate);
 			bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
 			break;
 		}
@@ -944,7 +943,7 @@
 	bfa_ioc_send_disable(iocpf->ioc);
 }
 
-/**
+/*
  * IOC is being disabled
  */
 static void
@@ -968,7 +967,7 @@
 		 */
 
 	case IOCPF_E_TIMEOUT:
-		bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
+		writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
 		bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
 		break;
 
@@ -980,7 +979,7 @@
 	}
 }
 
-/**
+/*
  * IOC disable completion entry.
  */
 static void
@@ -1018,7 +1017,7 @@
 	bfa_iocpf_timer_start(iocpf->ioc);
 }
 
-/**
+/*
  * Hardware initialization failed.
  */
 static void
@@ -1053,18 +1052,18 @@
 static void
 bfa_iocpf_sm_fail_entry(struct bfa_iocpf_s *iocpf)
 {
-	/**
+	/*
 	 * Mark IOC as failed in hardware and stop firmware.
 	 */
 	bfa_ioc_lpu_stop(iocpf->ioc);
-	bfa_reg_write(iocpf->ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
+	writel(BFI_IOC_FAIL, iocpf->ioc->ioc_regs.ioc_fwstate);
 
-	/**
+	/*
 	 * Notify other functions on HB failure.
 	 */
 	bfa_ioc_notify_hbfail(iocpf->ioc);
 
-	/**
+	/*
 	 * Flush any queued up mailbox requests.
 	 */
 	bfa_ioc_mbox_hbfail(iocpf->ioc);
@@ -1073,7 +1072,7 @@
 		bfa_iocpf_recovery_timer_start(iocpf->ioc);
 }
 
-/**
+/*
  * IOC is in failed state.
  */
 static void
@@ -1101,7 +1100,7 @@
 
 
 
-/**
+/*
  *  hal_ioc_pvt BFA IOC private functions
  */
 
@@ -1113,7 +1112,7 @@
 
 	ioc->cbfn->disable_cbfn(ioc->bfa);
 
-	/**
+	/*
 	 * Notify common modules registered for notification.
 	 */
 	list_for_each(qe, &ioc->hb_notify_q) {
@@ -1123,18 +1122,18 @@
 }
 
 bfa_boolean_t
-bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
+bfa_ioc_sem_get(void __iomem *sem_reg)
 {
 	u32 r32;
 	int cnt = 0;
 #define BFA_SEM_SPINCNT	3000
 
-	r32 = bfa_reg_read(sem_reg);
+	r32 = readl(sem_reg);
 
 	while (r32 && (cnt < BFA_SEM_SPINCNT)) {
 		cnt++;
-		bfa_os_udelay(2);
-		r32 = bfa_reg_read(sem_reg);
+		udelay(2);
+		r32 = readl(sem_reg);
 	}
 
 	if (r32 == 0)
@@ -1145,9 +1144,9 @@
 }
 
 void
-bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
+bfa_ioc_sem_release(void __iomem *sem_reg)
 {
-	bfa_reg_write(sem_reg, 1);
+	writel(1, sem_reg);
 }
 
 static void
@@ -1155,11 +1154,11 @@
 {
 	u32	r32;
 
-	/**
+	/*
 	 * First read to the semaphore register will return 0, subsequent reads
 	 * will return 1. Semaphore is released by writing 1 to the register
 	 */
-	r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
+	r32 = readl(ioc->ioc_regs.ioc_sem_reg);
 	if (r32 == 0) {
 		bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
 		return;
@@ -1171,7 +1170,7 @@
 void
 bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
 {
-	bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
+	writel(1, ioc->ioc_regs.ioc_sem_reg);
 }
 
 static void
@@ -1180,7 +1179,7 @@
 	bfa_sem_timer_stop(ioc);
 }
 
-/**
+/*
  * Initialize LPU local memory (aka secondary memory / SRAM)
  */
 static void
@@ -1190,7 +1189,7 @@
 	int		i;
 #define PSS_LMEM_INIT_TIME  10000
 
-	pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
+	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
 	pss_ctl &= ~__PSS_LMEM_RESET;
 	pss_ctl |= __PSS_LMEM_INIT_EN;
 
@@ -1198,18 +1197,18 @@
 	 * i2c workaround 12.5khz clock
 	 */
 	pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
-	bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
+	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
 
-	/**
+	/*
 	 * wait for memory initialization to be complete
 	 */
 	i = 0;
 	do {
-		pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
+		pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
 		i++;
 	} while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
 
-	/**
+	/*
 	 * If memory initialization is not successful, IOC timeout will catch
 	 * such failures.
 	 */
@@ -1217,7 +1216,7 @@
 	bfa_trc(ioc, pss_ctl);
 
 	pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
-	bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
+	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
 }
 
 static void
@@ -1225,13 +1224,13 @@
 {
 	u32	pss_ctl;
 
-	/**
+	/*
 	 * Take processor out of reset.
 	 */
-	pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
+	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
 	pss_ctl &= ~__PSS_LPU0_RESET;
 
-	bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
+	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
 }
 
 static void
@@ -1239,16 +1238,16 @@
 {
 	u32	pss_ctl;
 
-	/**
+	/*
 	 * Put processors in reset.
 	 */
-	pss_ctl = bfa_reg_read(ioc->ioc_regs.pss_ctl_reg);
+	pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
 	pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
 
-	bfa_reg_write(ioc->ioc_regs.pss_ctl_reg, pss_ctl);
+	writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
 }
 
-/**
+/*
  * Get driver and firmware versions.
  */
 void
@@ -1261,7 +1260,7 @@
 
 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
 	pgoff = bfa_ioc_smem_pgoff(ioc, loff);
-	bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 
 	for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr_s) / sizeof(u32));
 	     i++) {
@@ -1271,7 +1270,7 @@
 	}
 }
 
-/**
+/*
  * Returns TRUE if same.
  */
 bfa_boolean_t
@@ -1296,7 +1295,7 @@
 	return BFA_TRUE;
 }
 
-/**
+/*
  * Return true if current running version is valid. Firmware signature and
  * execution context (driver/bios) must match.
  */
@@ -1305,7 +1304,7 @@
 {
 	struct bfi_ioc_image_hdr_s fwhdr, *drv_fwhdr;
 
-	/**
+	/*
 	 * If bios/efi boot (flash based) -- return true
 	 */
 	if (bfa_ioc_is_bios_optrom(ioc))
@@ -1321,7 +1320,7 @@
 		return BFA_FALSE;
 	}
 
-	if (bfa_os_swap32(fwhdr.param) != boot_env) {
+	if (swab32(fwhdr.param) != boot_env) {
 		bfa_trc(ioc, fwhdr.param);
 		bfa_trc(ioc, boot_env);
 		return BFA_FALSE;
@@ -1330,7 +1329,7 @@
 	return bfa_ioc_fwver_cmp(ioc, &fwhdr);
 }
 
-/**
+/*
  * Conditionally flush any pending message from firmware at start.
  */
 static void
@@ -1338,9 +1337,9 @@
 {
 	u32	r32;
 
-	r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
+	r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
 	if (r32)
-		bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
+		writel(1, ioc->ioc_regs.lpu_mbox_cmd);
 }
 
 
@@ -1352,7 +1351,7 @@
 	u32 boot_type;
 	u32 boot_env;
 
-	ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
+	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
 
 	if (force)
 		ioc_fwstate = BFI_IOC_UNINIT;
@@ -1362,7 +1361,7 @@
 	boot_type = BFI_BOOT_TYPE_NORMAL;
 	boot_env = BFI_BOOT_LOADER_OS;
 
-	/**
+	/*
 	 * Flash based firmware boot BIOS env.
 	 */
 	if (bfa_ioc_is_bios_optrom(ioc)) {
@@ -1370,7 +1369,7 @@
 		boot_env = BFI_BOOT_LOADER_BIOS;
 	}
 
-	/**
+	/*
 	 * Flash based firmware boot UEFI env.
 	 */
 	if (bfa_ioc_is_uefi(ioc)) {
@@ -1378,7 +1377,7 @@
 		boot_env = BFI_BOOT_LOADER_UEFI;
 	}
 
-	/**
+	/*
 	 * check if firmware is valid
 	 */
 	fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
@@ -1389,7 +1388,7 @@
 		return;
 	}
 
-	/**
+	/*
 	 * If hardware initialization is in progress (initialized by other IOC),
 	 * just wait for an initialization completion interrupt.
 	 */
@@ -1398,7 +1397,7 @@
 		return;
 	}
 
-	/**
+	/*
 	 * If IOC function is disabled and firmware version is same,
 	 * just re-enable IOC.
 	 *
@@ -1409,7 +1408,7 @@
 	if (ioc_fwstate == BFI_IOC_DISABLED ||
 	    (!bfa_ioc_is_bios_optrom(ioc) && ioc_fwstate == BFI_IOC_OP)) {
 
-		/**
+		/*
 		 * When using MSI-X any pending firmware ready event should
 		 * be flushed. Otherwise MSI-X interrupts are not delivered.
 		 */
@@ -1419,7 +1418,7 @@
 		return;
 	}
 
-	/**
+	/*
 	 * Initialize the h/w for any other states.
 	 */
 	bfa_ioc_boot(ioc, boot_type, boot_env);
@@ -1449,17 +1448,17 @@
 	 * first write msg to mailbox registers
 	 */
 	for (i = 0; i < len / sizeof(u32); i++)
-		bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32),
-			      bfa_os_wtole(msgp[i]));
+		writel(cpu_to_le32(msgp[i]),
+			ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
 
 	for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
-		bfa_reg_write(ioc->ioc_regs.hfn_mbox + i * sizeof(u32), 0);
+		writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
 
 	/*
 	 * write 1 to mailbox CMD to trigger LPU event
 	 */
-	bfa_reg_write(ioc->ioc_regs.hfn_mbox_cmd, 1);
-	(void) bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
+	writel(1, ioc->ioc_regs.hfn_mbox_cmd);
+	(void) readl(ioc->ioc_regs.hfn_mbox_cmd);
 }
 
 static void
@@ -1472,7 +1471,7 @@
 		    bfa_ioc_portid(ioc));
 	enable_req.ioc_class = ioc->ioc_mc;
 	bfa_os_gettimeofday(&tv);
-	enable_req.tv_sec = bfa_os_ntohl(tv.tv_sec);
+	enable_req.tv_sec = be32_to_cpu(tv.tv_sec);
 	bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req_s));
 }
 
@@ -1503,7 +1502,7 @@
 	struct bfa_ioc_s  *ioc = cbarg;
 	u32	hb_count;
 
-	hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
+	hb_count = readl(ioc->ioc_regs.heartbeat);
 	if (ioc->hb_count == hb_count) {
 		printk(KERN_CRIT "Firmware heartbeat failure at %d", hb_count);
 		bfa_ioc_recover(ioc);
@@ -1519,7 +1518,7 @@
 static void
 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
 {
-	ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
+	ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
 	bfa_hb_timer_start(ioc);
 }
 
@@ -1530,7 +1529,7 @@
 }
 
 
-/**
+/*
  *	Initiate a full firmware download.
  */
 static void
@@ -1543,7 +1542,7 @@
 	u32 chunkno = 0;
 	u32 i;
 
-	/**
+	/*
 	 * Initialize LMEM first before code download
 	 */
 	bfa_ioc_lmem_init(ioc);
@@ -1554,7 +1553,7 @@
 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
 	pgoff = bfa_ioc_smem_pgoff(ioc, loff);
 
-	bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 
 	for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
 
@@ -1564,7 +1563,7 @@
 					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
 		}
 
-		/**
+		/*
 		 * write smem
 		 */
 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
@@ -1572,27 +1571,25 @@
 
 		loff += sizeof(u32);
 
-		/**
+		/*
 		 * handle page offset wrap around
 		 */
 		loff = PSS_SMEM_PGOFF(loff);
 		if (loff == 0) {
 			pgnum++;
-			bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
-				      pgnum);
+			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 		}
 	}
 
-	bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
-		      bfa_ioc_smem_pgnum(ioc, 0));
+	writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
 
 	/*
 	 * Set boot type and boot param at the end.
 	*/
 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
-			bfa_os_swap32(boot_type));
+			swab32(boot_type));
 	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_LOADER_OFF,
-			bfa_os_swap32(boot_env));
+			swab32(boot_env));
 }
 
 static void
@@ -1601,7 +1598,7 @@
 	bfa_ioc_hwinit(ioc, force);
 }
 
-/**
+/*
  * Update BFA configuration from firmware configuration.
  */
 static void
@@ -1609,14 +1606,14 @@
 {
 	struct bfi_ioc_attr_s	*attr = ioc->attr;
 
-	attr->adapter_prop  = bfa_os_ntohl(attr->adapter_prop);
-	attr->card_type     = bfa_os_ntohl(attr->card_type);
-	attr->maxfrsize	    = bfa_os_ntohs(attr->maxfrsize);
+	attr->adapter_prop  = be32_to_cpu(attr->adapter_prop);
+	attr->card_type     = be32_to_cpu(attr->card_type);
+	attr->maxfrsize	    = be16_to_cpu(attr->maxfrsize);
 
 	bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
 }
 
-/**
+/*
  * Attach time initialization of mbox logic.
  */
 static void
@@ -1632,7 +1629,7 @@
 	}
 }
 
-/**
+/*
  * Mbox poll timer -- restarts any pending mailbox requests.
  */
 static void
@@ -1642,27 +1639,27 @@
 	struct bfa_mbox_cmd_s		*cmd;
 	u32			stat;
 
-	/**
+	/*
 	 * If no command pending, do nothing
 	 */
 	if (list_empty(&mod->cmd_q))
 		return;
 
-	/**
+	/*
 	 * If previous command is not yet fetched by firmware, do nothing
 	 */
-	stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
+	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
 	if (stat)
 		return;
 
-	/**
+	/*
 	 * Enqueue command to firmware.
 	 */
 	bfa_q_deq(&mod->cmd_q, &cmd);
 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
 }
 
-/**
+/*
  * Cleanup any pending requests.
  */
 static void
@@ -1675,7 +1672,7 @@
 		bfa_q_deq(&mod->cmd_q, &cmd);
 }
 
-/**
+/*
  * Read data from SMEM to host through PCI memmap
  *
  * @param[in]	ioc	memory for IOC
@@ -1704,26 +1701,25 @@
 		return BFA_STATUS_FAILED;
 	}
 
-	bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 
 	len = sz/sizeof(u32);
 	bfa_trc(ioc, len);
 	for (i = 0; i < len; i++) {
 		r32 = bfa_mem_read(ioc->ioc_regs.smem_page_start, loff);
-		buf[i] = bfa_os_ntohl(r32);
+		buf[i] = be32_to_cpu(r32);
 		loff += sizeof(u32);
 
-		/**
+		/*
 		 * handle page offset wrap around
 		 */
 		loff = PSS_SMEM_PGOFF(loff);
 		if (loff == 0) {
 			pgnum++;
-			bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 		}
 	}
-	bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
-		      bfa_ioc_smem_pgnum(ioc, 0));
+	writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
 	/*
 	 *  release semaphore.
 	 */
@@ -1733,7 +1729,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Clear SMEM data from host through PCI memmap
  *
  * @param[in]	ioc	memory for IOC
@@ -1760,7 +1756,7 @@
 		return BFA_STATUS_FAILED;
 	}
 
-	bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+	writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 
 	len = sz/sizeof(u32); /* len in words */
 	bfa_trc(ioc, len);
@@ -1768,17 +1764,16 @@
 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff, 0);
 		loff += sizeof(u32);
 
-		/**
+		/*
 		 * handle page offset wrap around
 		 */
 		loff = PSS_SMEM_PGOFF(loff);
 		if (loff == 0) {
 			pgnum++;
-			bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
+			writel(pgnum, ioc->ioc_regs.host_page_num_fn);
 		}
 	}
-	bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
-		      bfa_ioc_smem_pgnum(ioc, 0));
+	writel(bfa_ioc_smem_pgnum(ioc, 0), ioc->ioc_regs.host_page_num_fn);
 
 	/*
 	 *  release semaphore.
@@ -1788,7 +1783,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  * hal iocpf to ioc interface
  */
 static void
@@ -1813,7 +1808,7 @@
 bfa_ioc_pf_fwmismatch(struct bfa_ioc_s *ioc)
 {
 	struct bfad_s *bfad = (struct bfad_s *)ioc->bfa->bfad;
-	/**
+	/*
 	 * Provide enable completion callback.
 	 */
 	ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
@@ -1824,7 +1819,7 @@
 
 
 
-/**
+/*
  *  hal_ioc_public
  */
 
@@ -1848,43 +1843,43 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Interface used by diag module to do firmware boot with memory test
  * as the entry vector.
  */
 void
 bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_env)
 {
-	bfa_os_addr_t	rb;
+	void __iomem *rb;
 
 	bfa_ioc_stats(ioc, ioc_boots);
 
 	if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
 		return;
 
-	/**
+	/*
 	 * Initialize IOC state of all functions on a chip reset.
 	 */
 	rb = ioc->pcidev.pci_bar_kva;
 	if (boot_type == BFI_BOOT_TYPE_MEMTEST) {
-		bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_MEMTEST);
-		bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_MEMTEST);
+		writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
+		writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
 	} else {
-		bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_INITING);
-		bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_INITING);
+		writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
+		writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
 	}
 
 	bfa_ioc_msgflush(ioc);
 	bfa_ioc_download_fw(ioc, boot_type, boot_env);
 
-	/**
+	/*
 	 * Enable interrupts just before starting LPU
 	 */
 	ioc->cbfn->reset_cbfn(ioc->bfa);
 	bfa_ioc_lpu_start(ioc);
 }
 
-/**
+/*
  * Enable/disable IOC failure auto recovery.
  */
 void
@@ -1904,7 +1899,7 @@
 bfa_boolean_t
 bfa_ioc_is_initialized(struct bfa_ioc_s *ioc)
 {
-	u32 r32 = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
+	u32 r32 = readl(ioc->ioc_regs.ioc_fwstate);
 
 	return ((r32 != BFI_IOC_UNINIT) &&
 		(r32 != BFI_IOC_INITING) &&
@@ -1918,21 +1913,21 @@
 	u32	r32;
 	int		i;
 
-	/**
+	/*
 	 * read the MBOX msg
 	 */
 	for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
 	     i++) {
-		r32 = bfa_reg_read(ioc->ioc_regs.lpu_mbox +
+		r32 = readl(ioc->ioc_regs.lpu_mbox +
 				   i * sizeof(u32));
-		msgp[i] = bfa_os_htonl(r32);
+		msgp[i] = cpu_to_be32(r32);
 	}
 
-	/**
+	/*
 	 * turn off mailbox interrupt by clearing mailbox status
 	 */
-	bfa_reg_write(ioc->ioc_regs.lpu_mbox_cmd, 1);
-	bfa_reg_read(ioc->ioc_regs.lpu_mbox_cmd);
+	writel(1, ioc->ioc_regs.lpu_mbox_cmd);
+	readl(ioc->ioc_regs.lpu_mbox_cmd);
 }
 
 void
@@ -1971,7 +1966,7 @@
 	}
 }
 
-/**
+/*
  * IOC attach time initialization and setup.
  *
  * @param[in]	ioc	memory for IOC
@@ -1996,7 +1991,7 @@
 	bfa_fsm_send_event(ioc, IOC_E_RESET);
 }
 
-/**
+/*
  * Driver detach time IOC cleanup.
  */
 void
@@ -2005,7 +2000,7 @@
 	bfa_fsm_send_event(ioc, IOC_E_DETACH);
 }
 
-/**
+/*
  * Setup IOC PCI properties.
  *
  * @param[in]	pcidev	PCI device information for this IOC
@@ -2019,7 +2014,7 @@
 	ioc->ctdev	= bfa_asic_id_ct(ioc->pcidev.device_id);
 	ioc->cna	= ioc->ctdev && !ioc->fcmode;
 
-	/**
+	/*
 	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
 	 */
 	if (ioc->ctdev)
@@ -2031,7 +2026,7 @@
 	bfa_ioc_reg_init(ioc);
 }
 
-/**
+/*
  * Initialize IOC dma memory
  *
  * @param[in]	dm_kva	kernel virtual address of IOC dma memory
@@ -2040,7 +2035,7 @@
 void
 bfa_ioc_mem_claim(struct bfa_ioc_s *ioc,  u8 *dm_kva, u64 dm_pa)
 {
-	/**
+	/*
 	 * dma memory for firmware attribute
 	 */
 	ioc->attr_dma.kva = dm_kva;
@@ -2048,7 +2043,7 @@
 	ioc->attr = (struct bfi_ioc_attr_s *) dm_kva;
 }
 
-/**
+/*
  * Return size of dma memory required.
  */
 u32
@@ -2073,7 +2068,7 @@
 	bfa_fsm_send_event(ioc, IOC_E_DISABLE);
 }
 
-/**
+/*
  * Returns memory required for saving firmware trace in case of crash.
  * Driver must call this interface to allocate memory required for
  * automatic saving of firmware trace. Driver should call
@@ -2086,7 +2081,7 @@
 	return (auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
 }
 
-/**
+/*
  * Initialize memory for saving firmware trace. Driver must initialize
  * trace memory before call bfa_ioc_enable().
  */
@@ -2109,7 +2104,7 @@
 	return PSS_SMEM_PGOFF(fmaddr);
 }
 
-/**
+/*
  * Register mailbox message handler functions
  *
  * @param[in]	ioc		IOC instance
@@ -2125,7 +2120,7 @@
 		mod->mbhdlr[mc].cbfn = mcfuncs[mc];
 }
 
-/**
+/*
  * Register mailbox message handler function, to be called by common modules
  */
 void
@@ -2138,7 +2133,7 @@
 	mod->mbhdlr[mc].cbarg	= cbarg;
 }
 
-/**
+/*
  * Queue a mailbox command request to firmware. Waits if mailbox is busy.
  * Responsibility of caller to serialize
  *
@@ -2151,7 +2146,7 @@
 	struct bfa_ioc_mbox_mod_s	*mod = &ioc->mbox_mod;
 	u32			stat;
 
-	/**
+	/*
 	 * If a previous command is pending, queue new command
 	 */
 	if (!list_empty(&mod->cmd_q)) {
@@ -2159,22 +2154,22 @@
 		return;
 	}
 
-	/**
+	/*
 	 * If mailbox is busy, queue command for poll timer
 	 */
-	stat = bfa_reg_read(ioc->ioc_regs.hfn_mbox_cmd);
+	stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
 	if (stat) {
 		list_add_tail(&cmd->qe, &mod->cmd_q);
 		return;
 	}
 
-	/**
+	/*
 	 * mailbox is free -- queue command to firmware
 	 */
 	bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
 }
 
-/**
+/*
  * Handle mailbox interrupts
  */
 void
@@ -2186,7 +2181,7 @@
 
 	bfa_ioc_msgget(ioc, &m);
 
-	/**
+	/*
 	 * Treat IOC message class as special.
 	 */
 	mc = m.mh.msg_class;
@@ -2214,7 +2209,7 @@
 	ioc->port_id = bfa_ioc_pcifn(ioc);
 }
 
-/**
+/*
  * return true if IOC is disabled
  */
 bfa_boolean_t
@@ -2224,7 +2219,7 @@
 		bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled);
 }
 
-/**
+/*
  * return true if IOC firmware is different.
  */
 bfa_boolean_t
@@ -2243,7 +2238,7 @@
 	 ((__sm) == BFI_IOC_FAIL) ||		\
 	 ((__sm) == BFI_IOC_CFG_DISABLED))
 
-/**
+/*
  * Check if adapter is disabled -- both IOCs should be in a disabled
  * state.
  */
@@ -2251,17 +2246,17 @@
 bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc)
 {
 	u32	ioc_state;
-	bfa_os_addr_t	rb = ioc->pcidev.pci_bar_kva;
+	void __iomem *rb = ioc->pcidev.pci_bar_kva;
 
 	if (!bfa_fsm_cmp_state(ioc, bfa_ioc_sm_disabled))
 		return BFA_FALSE;
 
-	ioc_state = bfa_reg_read(rb + BFA_IOC0_STATE_REG);
+	ioc_state = readl(rb + BFA_IOC0_STATE_REG);
 	if (!bfa_ioc_state_disabled(ioc_state))
 		return BFA_FALSE;
 
 	if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_FC_8G1P) {
-		ioc_state = bfa_reg_read(rb + BFA_IOC1_STATE_REG);
+		ioc_state = readl(rb + BFA_IOC1_STATE_REG);
 		if (!bfa_ioc_state_disabled(ioc_state))
 			return BFA_FALSE;
 	}
@@ -2269,7 +2264,7 @@
 	return BFA_TRUE;
 }
 
-/**
+/*
  * Add to IOC heartbeat failure notification queue. To be used by common
  * modules such as cee, port, diag.
  */
@@ -2293,7 +2288,7 @@
 	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
 	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
 	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
-	bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
+	memcpy(&ad_attr->vpd, &ioc_attr->vpd,
 		      sizeof(struct bfa_mfg_vpd_s));
 
 	ad_attr->nports = bfa_ioc_get_nports(ioc);
@@ -2343,8 +2338,8 @@
 void
 bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
 {
-	bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
-	bfa_os_memcpy((void *)serial_num,
+	memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
+	memcpy((void *)serial_num,
 			(void *)ioc->attr->brcd_serialnum,
 			BFA_ADAPTER_SERIAL_NUM_LEN);
 }
@@ -2352,8 +2347,8 @@
 void
 bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
 {
-	bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN);
-	bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
+	memset((void *)fw_ver, 0, BFA_VERSION_LEN);
+	memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
 }
 
 void
@@ -2361,7 +2356,7 @@
 {
 	bfa_assert(chip_rev);
 
-	bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
+	memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
 
 	chip_rev[0] = 'R';
 	chip_rev[1] = 'e';
@@ -2374,16 +2369,16 @@
 void
 bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
 {
-	bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
-	bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
+	memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
+	memcpy(optrom_ver, ioc->attr->optrom_version,
 		      BFA_VERSION_LEN);
 }
 
 void
 bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
 {
-	bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
-	bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+	memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
+	memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
 }
 
 void
@@ -2392,14 +2387,14 @@
 	struct bfi_ioc_attr_s	*ioc_attr;
 
 	bfa_assert(model);
-	bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
+	memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
 
 	ioc_attr = ioc->attr;
 
-	/**
+	/*
 	 * model name
 	 */
-	bfa_os_snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
+	snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
 		BFA_MFG_NAME, ioc_attr->card_type);
 }
 
@@ -2446,7 +2441,7 @@
 void
 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
 {
-	bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
+	memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
 
 	ioc_attr->state = bfa_ioc_get_state(ioc);
 	ioc_attr->port_id = ioc->port_id;
@@ -2460,7 +2455,7 @@
 	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
 }
 
-/**
+/*
  *  hal_wwn_public
  */
 wwn_t
@@ -2526,7 +2521,7 @@
 	return ioc->fcmode || !bfa_asic_id_ct(ioc->pcidev.device_id);
 }
 
-/**
+/*
  * Retrieve saved firmware trace from a prior IOC failure.
  */
 bfa_status_t
@@ -2541,12 +2536,12 @@
 	if (tlen > ioc->dbg_fwsave_len)
 		tlen = ioc->dbg_fwsave_len;
 
-	bfa_os_memcpy(trcdata, ioc->dbg_fwsave, tlen);
+	memcpy(trcdata, ioc->dbg_fwsave, tlen);
 	*trclen = tlen;
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Clear saved firmware trace
  */
 void
@@ -2555,7 +2550,7 @@
 	ioc->dbg_fwsave_once = BFA_TRUE;
 }
 
-/**
+/*
  * Retrieve saved firmware trace from a prior IOC failure.
  */
 bfa_status_t
@@ -2595,7 +2590,7 @@
 
 	bfa_ioc_send_fwsync(ioc);
 
-	/**
+	/*
 	 * After sending a fw sync mbox command wait for it to
 	 * take effect.  We will not wait for a response because
 	 *    1. fw_sync mbox cmd doesn't have a response.
@@ -2610,7 +2605,7 @@
 		fwsync_iter--;
 }
 
-/**
+/*
  * Dump firmware smem
  */
 bfa_status_t
@@ -2630,7 +2625,7 @@
 	loff = *offset;
 	dlen = *buflen;
 
-	/**
+	/*
 	 * First smem read, sync smem before proceeding
 	 * No need to sync before reading every chunk.
 	 */
@@ -2657,7 +2652,7 @@
 	return status;
 }
 
-/**
+/*
  * Firmware statistics
  */
 bfa_status_t
@@ -2702,7 +2697,7 @@
 	return status;
 }
 
-/**
+/*
  * Save firmware trace if configured.
  */
 static void
@@ -2716,7 +2711,7 @@
 	}
 }
 
-/**
+/*
  * Firmware failure detected. Start recovery actions.
  */
 static void
@@ -2738,7 +2733,7 @@
 		return;
 }
 
-/**
+/*
  *  hal_iocpf_pvt BFA IOC PF private functions
  */
 
@@ -2795,7 +2790,7 @@
 	bfa_ioc_hw_sem_get(ioc);
 }
 
-/**
+/*
  *  bfa timer function
  */
 void
@@ -2840,7 +2835,7 @@
 	}
 }
 
-/**
+/*
  * Should be called with lock protection
  */
 void
@@ -2858,7 +2853,7 @@
 	list_add_tail(&timer->qe, &mod->timer_q);
 }
 
-/**
+/*
  * Should be called with lock protection
  */
 void
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 288c580..9c407a8 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -22,29 +22,29 @@
 #include "bfa_cs.h"
 #include "bfi.h"
 
-/**
+/*
  * BFA timer declarations
  */
 typedef void (*bfa_timer_cbfn_t)(void *);
 
-/**
+/*
  * BFA timer data structure
  */
 struct bfa_timer_s {
 	struct list_head	qe;
 	bfa_timer_cbfn_t timercb;
 	void		*arg;
-	int		timeout;	/**< in millisecs. */
+	int		timeout;	/* in millisecs */
 };
 
-/**
+/*
  * Timer module structure
  */
 struct bfa_timer_mod_s {
 	struct list_head timer_q;
 };
 
-#define BFA_TIMER_FREQ 200 /**< specified in millisecs */
+#define BFA_TIMER_FREQ 200 /* specified in millisecs */
 
 void bfa_timer_beat(struct bfa_timer_mod_s *mod);
 void bfa_timer_init(struct bfa_timer_mod_s *mod);
@@ -53,7 +53,7 @@
 			unsigned int timeout);
 void bfa_timer_stop(struct bfa_timer_s *timer);
 
-/**
+/*
  * Generic Scatter Gather Element used by driver
  */
 struct bfa_sge_s {
@@ -62,9 +62,9 @@
 };
 
 #define bfa_sge_word_swap(__sge) do {					     \
-	((u32 *)(__sge))[0] = bfa_os_swap32(((u32 *)(__sge))[0]);      \
-	((u32 *)(__sge))[1] = bfa_os_swap32(((u32 *)(__sge))[1]);      \
-	((u32 *)(__sge))[2] = bfa_os_swap32(((u32 *)(__sge))[2]);      \
+	((u32 *)(__sge))[0] = swab32(((u32 *)(__sge))[0]);      \
+	((u32 *)(__sge))[1] = swab32(((u32 *)(__sge))[1]);      \
+	((u32 *)(__sge))[2] = swab32(((u32 *)(__sge))[2]);      \
 } while (0)
 
 #define bfa_swap_words(_x)  (	\
@@ -80,17 +80,17 @@
 #define bfa_sgaddr_le(_x)	(_x)
 #endif
 
-/**
+/*
  * PCI device information required by IOC
  */
 struct bfa_pcidev_s {
 	int		pci_slot;
 	u8		pci_func;
-	u16	device_id;
-	bfa_os_addr_t   pci_bar_kva;
+	u16		device_id;
+	void __iomem	*pci_bar_kva;
 };
 
-/**
+/*
  * Structure used to remember the DMA-able memory block's KVA and Physical
  * Address
  */
@@ -102,7 +102,7 @@
 #define BFA_DMA_ALIGN_SZ	256
 #define BFA_ROUNDUP(_l, _s)	(((_l) + ((_s) - 1)) & ~((_s) - 1))
 
-/**
+/*
  * smem size for Crossbow and Catapult
  */
 #define BFI_SMEM_CB_SIZE	0x200000U	/* ! 2MB for crossbow	*/
@@ -125,40 +125,38 @@
 static inline void
 __bfa_dma_be_addr_set(union bfi_addr_u *dma_addr, u64 pa)
 {
-	dma_addr->a32.addr_lo = (u32) bfa_os_htonl(pa);
-	dma_addr->a32.addr_hi = (u32) bfa_os_htonl(bfa_os_u32(pa));
+	dma_addr->a32.addr_lo = (u32) cpu_to_be32(pa);
+	dma_addr->a32.addr_hi = (u32) cpu_to_be32(bfa_os_u32(pa));
 }
 
 struct bfa_ioc_regs_s {
-	bfa_os_addr_t   hfn_mbox_cmd;
-	bfa_os_addr_t   hfn_mbox;
-	bfa_os_addr_t   lpu_mbox_cmd;
-	bfa_os_addr_t   lpu_mbox;
-	bfa_os_addr_t   pss_ctl_reg;
-	bfa_os_addr_t   pss_err_status_reg;
-	bfa_os_addr_t   app_pll_fast_ctl_reg;
-	bfa_os_addr_t   app_pll_slow_ctl_reg;
-	bfa_os_addr_t   ioc_sem_reg;
-	bfa_os_addr_t   ioc_usage_sem_reg;
-	bfa_os_addr_t   ioc_init_sem_reg;
-	bfa_os_addr_t   ioc_usage_reg;
-	bfa_os_addr_t   host_page_num_fn;
-	bfa_os_addr_t   heartbeat;
-	bfa_os_addr_t   ioc_fwstate;
-	bfa_os_addr_t   ll_halt;
-	bfa_os_addr_t   err_set;
-	bfa_os_addr_t   shirq_isr_next;
-	bfa_os_addr_t   shirq_msk_next;
-	bfa_os_addr_t   smem_page_start;
+	void __iomem *hfn_mbox_cmd;
+	void __iomem *hfn_mbox;
+	void __iomem *lpu_mbox_cmd;
+	void __iomem *lpu_mbox;
+	void __iomem *pss_ctl_reg;
+	void __iomem *pss_err_status_reg;
+	void __iomem *app_pll_fast_ctl_reg;
+	void __iomem *app_pll_slow_ctl_reg;
+	void __iomem *ioc_sem_reg;
+	void __iomem *ioc_usage_sem_reg;
+	void __iomem *ioc_init_sem_reg;
+	void __iomem *ioc_usage_reg;
+	void __iomem *host_page_num_fn;
+	void __iomem *heartbeat;
+	void __iomem *ioc_fwstate;
+	void __iomem *ll_halt;
+	void __iomem *err_set;
+	void __iomem *shirq_isr_next;
+	void __iomem *shirq_msk_next;
+	void __iomem *smem_page_start;
 	u32	smem_pg0;
 };
 
-#define bfa_reg_read(_raddr)	bfa_os_reg_read(_raddr)
-#define bfa_reg_write(_raddr, _val)	bfa_os_reg_write(_raddr, _val)
-#define bfa_mem_read(_raddr, _off)	bfa_os_mem_read(_raddr, _off)
+#define bfa_mem_read(_raddr, _off)	swab32(readl(((_raddr) + (_off))))
 #define bfa_mem_write(_raddr, _off, _val)	\
-					bfa_os_mem_write(_raddr, _off, _val)
-/**
+			writel(swab32((_val)), ((_raddr) + (_off)))
+/*
  * IOC Mailbox structures
  */
 struct bfa_mbox_cmd_s {
@@ -166,7 +164,7 @@
 	u32	msg[BFI_IOC_MSGSZ];
 };
 
-/**
+/*
  * IOC mailbox module
  */
 typedef void (*bfa_ioc_mbox_mcfunc_t)(void *cbarg, struct bfi_mbmsg_s *m);
@@ -179,7 +177,7 @@
 	} mbhdlr[BFI_MC_MAX];
 };
 
-/**
+/*
  * IOC callback function interfaces
  */
 typedef void (*bfa_ioc_enable_cbfn_t)(void *bfa, enum bfa_status status);
@@ -193,7 +191,7 @@
 	bfa_ioc_reset_cbfn_t	reset_cbfn;
 };
 
-/**
+/*
  * Heartbeat failure notification queue element.
  */
 struct bfa_ioc_hbfail_notify_s {
@@ -202,7 +200,7 @@
 	void			*cbarg;
 };
 
-/**
+/*
  * Initialize a heartbeat failure notification structure
  */
 #define bfa_ioc_hbfail_init(__notify, __cbfn, __cbarg) do {	\
@@ -249,7 +247,7 @@
 };
 
 struct bfa_ioc_hwif_s {
-	bfa_status_t (*ioc_pll_init) (bfa_os_addr_t rb, bfa_boolean_t fcmode);
+	bfa_status_t (*ioc_pll_init) (void __iomem *rb, bfa_boolean_t fcmode);
 	bfa_boolean_t	(*ioc_firmware_lock)	(struct bfa_ioc_s *ioc);
 	void		(*ioc_firmware_unlock)	(struct bfa_ioc_s *ioc);
 	void		(*ioc_reg_init)	(struct bfa_ioc_s *ioc);
@@ -267,7 +265,7 @@
 #define bfa_ioc_fetch_stats(__ioc, __stats) \
 		(((__stats)->drv_stats) = (__ioc)->stats)
 #define bfa_ioc_clr_stats(__ioc)	\
-		bfa_os_memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
+		memset(&(__ioc)->stats, 0, sizeof((__ioc)->stats))
 #define bfa_ioc_maxfrsize(__ioc)	((__ioc)->attr->maxfrsize)
 #define bfa_ioc_rx_bbcredit(__ioc)	((__ioc)->attr->rx_bbcredit)
 #define bfa_ioc_speed_sup(__ioc)	\
@@ -287,7 +285,7 @@
 #define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off)	(off % BFI_FLASH_CHUNK_SZ_WORDS)
 #define BFA_IOC_FLASH_CHUNK_ADDR(chunkno)  (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
 
-/**
+/*
  * IOC mailbox interface
  */
 void bfa_ioc_mbox_queue(struct bfa_ioc_s *ioc, struct bfa_mbox_cmd_s *cmd);
@@ -299,7 +297,7 @@
 void bfa_ioc_mbox_regisr(struct bfa_ioc_s *ioc, enum bfi_mclass mc,
 		bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg);
 
-/**
+/*
  * IOC interfaces
  */
 
@@ -308,9 +306,9 @@
 			   (__ioc)->fcmode))
 
 bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
-bfa_status_t bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode);
-bfa_boolean_t bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb);
-bfa_status_t bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode);
+bfa_status_t bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode);
+bfa_boolean_t bfa_ioc_ct_pll_init_complete(void __iomem *rb);
+bfa_status_t bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode);
 
 #define	bfa_ioc_isr_mode_set(__ioc, __msix)			\
 			((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
@@ -370,8 +368,8 @@
 bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
 void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
 	struct bfa_ioc_hbfail_notify_s *notify);
-bfa_boolean_t bfa_ioc_sem_get(bfa_os_addr_t sem_reg);
-void bfa_ioc_sem_release(bfa_os_addr_t sem_reg);
+bfa_boolean_t bfa_ioc_sem_get(void __iomem *sem_reg);
+void bfa_ioc_sem_release(void __iomem *sem_reg);
 void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
 void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
 			struct bfi_ioc_image_hdr_s *fwhdr);
@@ -441,7 +439,7 @@
 	}
 }
 
-/**
+/*
  * CNA TRCMOD declaration
  */
 /*
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
index d7ac864..9099450 100644
--- a/drivers/scsi/bfa/bfa_ioc_cb.c
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -34,7 +34,7 @@
 
 struct bfa_ioc_hwif_s hwif_cb;
 
-/**
+/*
  * Called from bfa_ioc_attach() to map asic specific calls.
  */
 void
@@ -52,7 +52,7 @@
 	ioc->ioc_hwif = &hwif_cb;
 }
 
-/**
+/*
  * Return true if firmware of current driver matches the running firmware.
  */
 static bfa_boolean_t
@@ -66,17 +66,17 @@
 {
 }
 
-/**
+/*
  * Notify other functions on HB failure.
  */
 static void
 bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc)
 {
-	bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
-	bfa_reg_read(ioc->ioc_regs.err_set);
+	writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
+	readl(ioc->ioc_regs.err_set);
 }
 
-/**
+/*
  * Host to LPU mailbox message addresses
  */
 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
@@ -84,7 +84,7 @@
 	{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }
 };
 
-/**
+/*
  * Host <-> LPU mailbox command/status registers
  */
 static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
@@ -96,7 +96,7 @@
 static void
 bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
 {
-	bfa_os_addr_t	rb;
+	void __iomem *rb;
 	int		pcifn = bfa_ioc_pcifn(ioc);
 
 	rb = bfa_ioc_bar0(ioc);
@@ -113,7 +113,7 @@
 		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
 	}
 
-	/**
+	/*
 	 * Host <-> LPU mailbox command/status registers
 	 */
 	ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
@@ -133,7 +133,7 @@
 	ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
 	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
 
-	/**
+	/*
 	 * sram memory access
 	 */
 	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
@@ -145,14 +145,14 @@
 	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
 }
 
-/**
+/*
  * Initialize IOC to port mapping.
  */
 
 static void
 bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
 {
-	/**
+	/*
 	 * For crossbow, port id is same as pci function.
 	 */
 	ioc->port_id = bfa_ioc_pcifn(ioc);
@@ -160,7 +160,7 @@
 	bfa_trc(ioc, ioc->port_id);
 }
 
-/**
+/*
  * Set interrupt mode for a function: INTX or MSIX
  */
 static void
@@ -168,7 +168,7 @@
 {
 }
 
-/**
+/*
  * Cleanup hw semaphore and usecnt registers
  */
 static void
@@ -180,14 +180,14 @@
 	 * before we clear it. If it is not locked, writing 1
 	 * will lock it instead of clearing it.
 	 */
-	bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
+	readl(ioc->ioc_regs.ioc_sem_reg);
 	bfa_ioc_hw_sem_release(ioc);
 }
 
 
 
 bfa_status_t
-bfa_ioc_cb_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
+bfa_ioc_cb_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
 {
 	u32	pll_sclk, pll_fclk;
 
@@ -199,38 +199,32 @@
 		__APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
 		__APP_PLL_400_JITLMT0_1(3U) |
 		__APP_PLL_400_CNTLMT0_1(3U);
-	bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
-	bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
-	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
-	bfa_reg_write(rb + APP_PLL_212_CTL_REG,
-			  __APP_PLL_212_LOGIC_SOFT_RESET);
-	bfa_reg_write(rb + APP_PLL_212_CTL_REG,
-			  __APP_PLL_212_BYPASS |
-			  __APP_PLL_212_LOGIC_SOFT_RESET);
-	bfa_reg_write(rb + APP_PLL_400_CTL_REG,
-			  __APP_PLL_400_LOGIC_SOFT_RESET);
-	bfa_reg_write(rb + APP_PLL_400_CTL_REG,
-			  __APP_PLL_400_BYPASS |
-			  __APP_PLL_400_LOGIC_SOFT_RESET);
-	bfa_os_udelay(2);
-	bfa_reg_write(rb + APP_PLL_212_CTL_REG,
-			  __APP_PLL_212_LOGIC_SOFT_RESET);
-	bfa_reg_write(rb + APP_PLL_400_CTL_REG,
-			  __APP_PLL_400_LOGIC_SOFT_RESET);
-	bfa_reg_write(rb + APP_PLL_212_CTL_REG,
-			  pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET);
-	bfa_reg_write(rb + APP_PLL_400_CTL_REG,
-			  pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
-	bfa_os_udelay(2000);
-	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
-	bfa_reg_write((rb + APP_PLL_212_CTL_REG), pll_sclk);
-	bfa_reg_write((rb + APP_PLL_400_CTL_REG), pll_fclk);
+	writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
+	writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
+	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+	writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG);
+	writel(__APP_PLL_212_BYPASS | __APP_PLL_212_LOGIC_SOFT_RESET,
+			rb + APP_PLL_212_CTL_REG);
+	writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG);
+	writel(__APP_PLL_400_BYPASS | __APP_PLL_400_LOGIC_SOFT_RESET,
+			rb + APP_PLL_400_CTL_REG);
+	udelay(2);
+	writel(__APP_PLL_212_LOGIC_SOFT_RESET, rb + APP_PLL_212_CTL_REG);
+	writel(__APP_PLL_400_LOGIC_SOFT_RESET, rb + APP_PLL_400_CTL_REG);
+	writel(pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET,
+			rb + APP_PLL_212_CTL_REG);
+	writel(pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET,
+			rb + APP_PLL_400_CTL_REG);
+	udelay(2000);
+	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+	writel(pll_sclk, (rb + APP_PLL_212_CTL_REG));
+	writel(pll_fclk, (rb + APP_PLL_400_CTL_REG));
 
 	return BFA_STATUS_OK;
 }
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
index f21b82c..115730c 100644
--- a/drivers/scsi/bfa/bfa_ioc_ct.c
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -34,7 +34,7 @@
 
 struct bfa_ioc_hwif_s hwif_ct;
 
-/**
+/*
  * Called from bfa_ioc_attach() to map asic specific calls.
  */
 void
@@ -52,7 +52,7 @@
 	ioc->ioc_hwif = &hwif_ct;
 }
 
-/**
+/*
  * Return true if firmware of current driver matches the running firmware.
  */
 static bfa_boolean_t
@@ -62,13 +62,13 @@
 	u32 usecnt;
 	struct bfi_ioc_image_hdr_s fwhdr;
 
-	/**
+	/*
 	 * Firmware match check is relevant only for CNA.
 	 */
 	if (!ioc->cna)
 		return BFA_TRUE;
 
-	/**
+	/*
 	 * If bios boot (flash based) -- do not increment usage count
 	 */
 	if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
@@ -76,27 +76,27 @@
 		return BFA_TRUE;
 
 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
-	usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
+	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
 
-	/**
+	/*
 	 * If usage count is 0, always return TRUE.
 	 */
 	if (usecnt == 0) {
-		bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
+		writel(1, ioc->ioc_regs.ioc_usage_reg);
 		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
 		bfa_trc(ioc, usecnt);
 		return BFA_TRUE;
 	}
 
-	ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
+	ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
 	bfa_trc(ioc, ioc_fwstate);
 
-	/**
+	/*
 	 * Use count cannot be non-zero and chip in uninitialized state.
 	 */
 	bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
 
-	/**
+	/*
 	 * Check if another driver with a different firmware is active
 	 */
 	bfa_ioc_fwver_get(ioc, &fwhdr);
@@ -106,11 +106,11 @@
 		return BFA_FALSE;
 	}
 
-	/**
+	/*
 	 * Same firmware version. Increment the reference count.
 	 */
 	usecnt++;
-	bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
+	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
 	bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
 	bfa_trc(ioc, usecnt);
 	return BFA_TRUE;
@@ -121,50 +121,50 @@
 {
 	u32 usecnt;
 
-	/**
+	/*
 	 * Firmware lock is relevant only for CNA.
 	 */
 	if (!ioc->cna)
 		return;
 
-	/**
+	/*
 	 * If bios boot (flash based) -- do not decrement usage count
 	 */
 	if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
 						BFA_IOC_FWIMG_MINSZ)
 		return;
 
-	/**
+	/*
 	 * decrement usage count
 	 */
 	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
-	usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
+	usecnt = readl(ioc->ioc_regs.ioc_usage_reg);
 	bfa_assert(usecnt > 0);
 
 	usecnt--;
-	bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
+	writel(usecnt, ioc->ioc_regs.ioc_usage_reg);
 	bfa_trc(ioc, usecnt);
 
 	bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
 }
 
-/**
+/*
  * Notify other functions on HB failure.
  */
 static void
 bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
 {
 	if (ioc->cna) {
-		bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
+		writel(__FW_INIT_HALT_P, ioc->ioc_regs.ll_halt);
 		/* Wait for halt to take effect */
-		bfa_reg_read(ioc->ioc_regs.ll_halt);
+		readl(ioc->ioc_regs.ll_halt);
 	} else {
-		bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
-		bfa_reg_read(ioc->ioc_regs.err_set);
+		writel(__PSS_ERR_STATUS_SET, ioc->ioc_regs.err_set);
+		readl(ioc->ioc_regs.err_set);
 	}
 }
 
-/**
+/*
  * Host to LPU mailbox message addresses
  */
 static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
@@ -174,7 +174,7 @@
 	{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
 };
 
-/**
+/*
  * Host <-> LPU mailbox command/status registers - port 0
  */
 static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
@@ -184,7 +184,7 @@
 	{ HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
 };
 
-/**
+/*
  * Host <-> LPU mailbox command/status registers - port 1
  */
 static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
@@ -197,7 +197,7 @@
 static void
 bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
 {
-	bfa_os_addr_t	rb;
+	void __iomem *rb;
 	int		pcifn = bfa_ioc_pcifn(ioc);
 
 	rb = bfa_ioc_bar0(ioc);
@@ -236,7 +236,7 @@
 	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
 	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
 
-	/**
+	/*
 	 * sram memory access
 	 */
 	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
@@ -248,7 +248,7 @@
 	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
 }
 
-/**
+/*
  * Initialize IOC to port mapping.
  */
 
@@ -256,13 +256,13 @@
 static void
 bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
 {
-	bfa_os_addr_t	rb = ioc->pcidev.pci_bar_kva;
+	void __iomem *rb = ioc->pcidev.pci_bar_kva;
 	u32	r32;
 
-	/**
+	/*
 	 * For catapult, base port id on personality register and IOC type
 	 */
-	r32 = bfa_reg_read(rb + FNC_PERS_REG);
+	r32 = readl(rb + FNC_PERS_REG);
 	r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
 	ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
 
@@ -270,22 +270,22 @@
 	bfa_trc(ioc, ioc->port_id);
 }
 
-/**
+/*
  * Set interrupt mode for a function: INTX or MSIX
  */
 static void
 bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
 {
-	bfa_os_addr_t	rb = ioc->pcidev.pci_bar_kva;
+	void __iomem *rb = ioc->pcidev.pci_bar_kva;
 	u32	r32, mode;
 
-	r32 = bfa_reg_read(rb + FNC_PERS_REG);
+	r32 = readl(rb + FNC_PERS_REG);
 	bfa_trc(ioc, r32);
 
 	mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
 		__F0_INTX_STATUS;
 
-	/**
+	/*
 	 * If already in desired mode, do not change anything
 	 */
 	if (!msix && mode)
@@ -300,10 +300,10 @@
 	r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
 	bfa_trc(ioc, r32);
 
-	bfa_reg_write(rb + FNC_PERS_REG, r32);
+	writel(r32, rb + FNC_PERS_REG);
 }
 
-/**
+/*
  * Cleanup hw semaphore and usecnt registers
  */
 static void
@@ -312,7 +312,7 @@
 
 	if (ioc->cna) {
 		bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
-		bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
+		writel(0, ioc->ioc_regs.ioc_usage_reg);
 		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
 	}
 
@@ -321,7 +321,7 @@
 	 * before we clear it. If it is not locked, writing 1
 	 * will lock it instead of clearing it.
 	 */
-	bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
+	readl(ioc->ioc_regs.ioc_sem_reg);
 	bfa_ioc_hw_sem_release(ioc);
 }
 
@@ -331,17 +331,17 @@
  * Check the firmware state to know if pll_init has been completed already
  */
 bfa_boolean_t
-bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb)
+bfa_ioc_ct_pll_init_complete(void __iomem *rb)
 {
-	if ((bfa_reg_read(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
-	  (bfa_reg_read(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
+	if ((readl(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
+	  (readl(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
 		return BFA_TRUE;
 
 	return BFA_FALSE;
 }
 
 bfa_status_t
-bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
+bfa_ioc_ct_pll_init(void __iomem *rb, bfa_boolean_t fcmode)
 {
 	u32	pll_sclk, pll_fclk, r32;
 
@@ -354,56 +354,51 @@
 		__APP_PLL_425_JITLMT0_1(3U) |
 		__APP_PLL_425_CNTLMT0_1(1U);
 	if (fcmode) {
-		bfa_reg_write((rb + OP_MODE), 0);
-		bfa_reg_write((rb + ETH_MAC_SER_REG),
-				__APP_EMS_CMLCKSEL |
-				__APP_EMS_REFCKBUFEN2 |
-				__APP_EMS_CHANNEL_SEL);
+		writel(0, (rb + OP_MODE));
+		writel(__APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2 |
+			 __APP_EMS_CHANNEL_SEL, (rb + ETH_MAC_SER_REG));
 	} else {
-		bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
-		bfa_reg_write((rb + ETH_MAC_SER_REG),
-				__APP_EMS_REFCKBUFEN1);
+		writel(__GLOBAL_FCOE_MODE, (rb + OP_MODE));
+		writel(__APP_EMS_REFCKBUFEN1, (rb + ETH_MAC_SER_REG));
 	}
-	bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
-	bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
-	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
-	bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
-		__APP_PLL_312_LOGIC_SOFT_RESET);
-	bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
-		__APP_PLL_425_LOGIC_SOFT_RESET);
-	bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
-		__APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
-	bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
-		__APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
-	bfa_reg_read(rb + HOSTFN0_INT_MSK);
-	bfa_os_udelay(2000);
-	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
-	bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
-		__APP_PLL_312_ENABLE);
-	bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
-		__APP_PLL_425_ENABLE);
+	writel(BFI_IOC_UNINIT, (rb + BFA_IOC0_STATE_REG));
+	writel(BFI_IOC_UNINIT, (rb + BFA_IOC1_STATE_REG));
+	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+	writel(0xffffffffU, (rb + HOSTFN0_INT_MSK));
+	writel(0xffffffffU, (rb + HOSTFN1_INT_MSK));
+	writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET,
+			rb + APP_PLL_312_CTL_REG);
+	writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET,
+			rb + APP_PLL_425_CTL_REG);
+	writel(pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE,
+			rb + APP_PLL_312_CTL_REG);
+	writel(pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE,
+			rb + APP_PLL_425_CTL_REG);
+	readl(rb + HOSTFN0_INT_MSK);
+	udelay(2000);
+	writel(0xffffffffU, (rb + HOSTFN0_INT_STATUS));
+	writel(0xffffffffU, (rb + HOSTFN1_INT_STATUS));
+	writel(pll_sclk | __APP_PLL_312_ENABLE, rb + APP_PLL_312_CTL_REG);
+	writel(pll_fclk | __APP_PLL_425_ENABLE, rb + APP_PLL_425_CTL_REG);
 	if (!fcmode) {
-		bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P);
-		bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P);
+		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P0));
+		writel(__PMM_1T_RESET_P, (rb + PMM_1T_RESET_REG_P1));
 	}
-	r32 = bfa_reg_read((rb + PSS_CTL_REG));
+	r32 = readl((rb + PSS_CTL_REG));
 	r32 &= ~__PSS_LMEM_RESET;
-	bfa_reg_write((rb + PSS_CTL_REG), r32);
-	bfa_os_udelay(1000);
+	writel(r32, (rb + PSS_CTL_REG));
+	udelay(1000);
 	if (!fcmode) {
-		bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0);
-		bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0);
+		writel(0, (rb + PMM_1T_RESET_REG_P0));
+		writel(0, (rb + PMM_1T_RESET_REG_P1));
 	}
 
-	bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
-	bfa_os_udelay(1000);
-	r32 = bfa_reg_read((rb + MBIST_STAT_REG));
-	bfa_reg_write((rb + MBIST_CTL_REG), 0);
+	writel(__EDRAM_BISTR_START, (rb + MBIST_CTL_REG));
+	udelay(1000);
+	r32 = readl((rb + MBIST_STAT_REG));
+	writel(0, (rb + MBIST_CTL_REG));
 	return BFA_STATUS_OK;
 }
diff --git a/drivers/scsi/bfa/bfa_modules.h b/drivers/scsi/bfa/bfa_modules.h
index 2cd5273..15407ab 100644
--- a/drivers/scsi/bfa/bfa_modules.h
+++ b/drivers/scsi/bfa/bfa_modules.h
@@ -15,7 +15,7 @@
  * General Public License for more details.
  */
 
-/**
+/*
  *  bfa_modules.h BFA modules
  */
 
@@ -52,7 +52,7 @@
 };
 
 
-/**
+/*
  * Macro to define a new BFA module
  */
 #define BFA_MODULE(__mod)						\
@@ -80,7 +80,7 @@
 
 #define BFA_CACHELINE_SZ	(256)
 
-/**
+/*
  * Structure used to interact between different BFA sub modules
  *
  * Each sub module needs to implement only the entry points relevant to it (and
diff --git a/drivers/scsi/bfa/bfa_os_inc.h b/drivers/scsi/bfa/bfa_os_inc.h
index 788a250..65df62e 100644
--- a/drivers/scsi/bfa/bfa_os_inc.h
+++ b/drivers/scsi/bfa/bfa_os_inc.h
@@ -15,10 +15,6 @@
  * General Public License for more details.
  */
 
-/**
- * Contains declarations all OS Specific files needed for BFA layer
- */
-
 #ifndef __BFA_OS_INC_H__
 #define __BFA_OS_INC_H__
 
@@ -44,11 +40,6 @@
 #define __BIGENDIAN
 #endif
 
-static inline u64 bfa_os_get_clock(void)
-{
-	return jiffies;
-}
-
 static inline u64 bfa_os_get_log_time(void)
 {
 	u64 system_time = 0;
@@ -63,13 +54,6 @@
 #define bfa_io_lat_clock_res_div HZ
 #define bfa_io_lat_clock_res_mul 1000
 
-#define BFA_ASSERT(p) do {						\
-	if (!(p)) {      \
-		printk(KERN_ERR "assert(%s) failed at %s:%d\n",		\
-		#p, __FILE__, __LINE__);      \
-	}								\
-} while (0)
-
 #define BFA_LOG(level, bfad, mask, fmt, arg...)				\
 do {									\
 	if (((mask) == 4) || (level[1] <= '4'))				\
@@ -81,22 +65,6 @@
 	((_x) & 0x00ff00) |			\
 	(((_x) & 0xff0000) >> 16))
 
-#define bfa_swap_8b(_x)					\
-	((((_x) & 0xff00000000000000ull) >> 56)		\
-	 | (((_x) & 0x00ff000000000000ull) >> 40)	\
-	 | (((_x) & 0x0000ff0000000000ull) >> 24)	\
-	 | (((_x) & 0x000000ff00000000ull) >> 8)	\
-	 | (((_x) & 0x00000000ff000000ull) << 8)	\
-	 | (((_x) & 0x0000000000ff0000ull) << 24)	\
-	 | (((_x) & 0x000000000000ff00ull) << 40)	\
-	 | (((_x) & 0x00000000000000ffull) << 56))
-
-#define bfa_os_swap32(_x)			\
-	((((_x) & 0xff) << 24)		|	\
-	(((_x) & 0x0000ff00) << 8)	|	\
-	(((_x) & 0x00ff0000) >> 8)	|	\
-	(((_x) & 0xff000000) >> 24))
-
 #define bfa_os_swap_sgaddr(_x)  ((u64)(                                 \
 	(((u64)(_x) & (u64)0x00000000000000ffull) << 32)        |       \
 	(((u64)(_x) & (u64)0x000000000000ff00ull) << 32)        |       \
@@ -108,59 +76,27 @@
 	(((u64)(_x) & (u64)0xff00000000000000ull) >> 32)))
 
 #ifndef __BIGENDIAN
-#define bfa_os_htons(_x) ((u16)((((_x) & 0xff00) >> 8) | \
-				 (((_x) & 0x00ff) << 8)))
-#define bfa_os_htonl(_x)	bfa_os_swap32(_x)
-#define bfa_os_htonll(_x)	bfa_swap_8b(_x)
-#define bfa_os_hton3b(_x)	bfa_swap_3b(_x)
-#define bfa_os_wtole(_x)   (_x)
+#define bfa_os_hton3b(_x)  bfa_swap_3b(_x)
 #define bfa_os_sgaddr(_x)  (_x)
-
 #else
-
-#define bfa_os_htons(_x)   (_x)
-#define bfa_os_htonl(_x)   (_x)
 #define bfa_os_hton3b(_x)  (_x)
-#define bfa_os_htonll(_x)  (_x)
-#define bfa_os_wtole(_x)   bfa_os_swap32(_x)
 #define bfa_os_sgaddr(_x)  bfa_os_swap_sgaddr(_x)
-
 #endif
 
-#define bfa_os_ntohs(_x)   bfa_os_htons(_x)
-#define bfa_os_ntohl(_x)   bfa_os_htonl(_x)
-#define bfa_os_ntohll(_x)  bfa_os_htonll(_x)
 #define bfa_os_ntoh3b(_x)  bfa_os_hton3b(_x)
-
 #define bfa_os_u32(__pa64) ((__pa64) >> 32)
 
-#define bfa_os_memset	memset
-#define bfa_os_memcpy	memcpy
-#define bfa_os_udelay	udelay
-#define bfa_os_vsprintf vsprintf
-#define bfa_os_snprintf snprintf
-
-#define bfa_os_assign(__t, __s) __t = __s
-#define bfa_os_addr_t void __iomem *
-
-#define bfa_os_reg_read(_raddr) readl(_raddr)
-#define bfa_os_reg_write(_raddr, _val) writel((_val), (_raddr))
-#define bfa_os_mem_read(_raddr, _off)					\
-	bfa_os_swap32(readl(((_raddr) + (_off))))
-#define bfa_os_mem_write(_raddr, _off, _val)				\
-	writel(bfa_os_swap32((_val)), ((_raddr) + (_off)))
-
-#define BFA_TRC_TS(_trcm)						\
-			({						\
-				struct timeval tv;			\
-									\
-				do_gettimeofday(&tv);      \
-				(tv.tv_sec*1000000+tv.tv_usec);      \
-			 })
+#define BFA_TRC_TS(_trcm)				\
+	({						\
+		struct timeval tv;			\
+							\
+		do_gettimeofday(&tv);			\
+		(tv.tv_sec*1000000+tv.tv_usec);		\
+	 })
 
 #define boolean_t int
 
-/**
+/*
  * For current time stamp, OS API will fill-in
  */
 struct bfa_timeval_s {
diff --git a/drivers/scsi/bfa/bfa_port.c b/drivers/scsi/bfa/bfa_port.c
index b6d170a..fff9622 100644
--- a/drivers/scsi/bfa/bfa_port.c
+++ b/drivers/scsi/bfa/bfa_port.c
@@ -37,16 +37,16 @@
 		t0 = dip[i];
 		t1 = dip[i + 1];
 #ifdef __BIGENDIAN
-		dip[i] = bfa_os_ntohl(t0);
-		dip[i + 1] = bfa_os_ntohl(t1);
+		dip[i] = be32_to_cpu(t0);
+		dip[i + 1] = be32_to_cpu(t1);
 #else
-		dip[i] = bfa_os_ntohl(t1);
-		dip[i + 1] = bfa_os_ntohl(t0);
+		dip[i] = be32_to_cpu(t1);
+		dip[i + 1] = be32_to_cpu(t0);
 #endif
 	}
 }
 
-/**
+/*
  * bfa_port_enable_isr()
  *
  *
@@ -63,7 +63,7 @@
 	port->endis_cbfn(port->endis_cbarg, status);
 }
 
-/**
+/*
  * bfa_port_disable_isr()
  *
  *
@@ -80,7 +80,7 @@
 	port->endis_cbfn(port->endis_cbarg, status);
 }
 
-/**
+/*
  * bfa_port_get_stats_isr()
  *
  *
@@ -112,7 +112,7 @@
 	}
 }
 
-/**
+/*
  * bfa_port_clear_stats_isr()
  *
  *
@@ -129,7 +129,7 @@
 	port->stats_status = status;
 	port->stats_busy   = BFA_FALSE;
 
-	/**
+	/*
 	* re-initialize time stamp for stats reset
 	*/
 	bfa_os_gettimeofday(&tv);
@@ -141,7 +141,7 @@
 	}
 }
 
-/**
+/*
  * bfa_port_isr()
  *
  *
@@ -189,7 +189,7 @@
 	}
 }
 
-/**
+/*
  * bfa_port_meminfo()
  *
  *
@@ -203,7 +203,7 @@
 	return BFA_ROUNDUP(sizeof(union bfa_port_stats_u), BFA_DMA_ALIGN_SZ);
 }
 
-/**
+/*
  * bfa_port_mem_claim()
  *
  *
@@ -220,7 +220,7 @@
 	port->stats_dma.pa  = dma_pa;
 }
 
-/**
+/*
  * bfa_port_enable()
  *
  *   Send the Port enable request to the f/w
@@ -264,7 +264,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  * bfa_port_disable()
  *
  *   Send the Port disable request to the f/w
@@ -308,7 +308,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  * bfa_port_get_stats()
  *
  *   Send the request to the f/w to fetch Port statistics.
@@ -348,7 +348,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  * bfa_port_clear_stats()
  *
  *
@@ -385,7 +385,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  * bfa_port_hbfail()
  *
  *
@@ -415,7 +415,7 @@
 	}
 }
 
-/**
+/*
  * bfa_port_attach()
  *
  *
@@ -449,7 +449,7 @@
 	bfa_ioc_hbfail_init(&port->hbfail, bfa_port_hbfail, port);
 	bfa_ioc_hbfail_register(port->ioc, &port->hbfail);
 
-	/**
+	/*
 	 * initialize time stamp for stats reset
 	 */
 	bfa_os_gettimeofday(&tv);
@@ -458,7 +458,7 @@
 	bfa_trc(port, 0);
 }
 
-/**
+/*
  * bfa_port_detach()
  *
  *
diff --git a/drivers/scsi/bfa/bfa_svc.c b/drivers/scsi/bfa/bfa_svc.c
index aa1dc74..c768143 100644
--- a/drivers/scsi/bfa/bfa_svc.c
+++ b/drivers/scsi/bfa/bfa_svc.c
@@ -29,7 +29,7 @@
 BFA_MODULE(rport);
 BFA_MODULE(uf);
 
-/**
+/*
  * LPS related definitions
  */
 #define BFA_LPS_MIN_LPORTS      (1)
@@ -41,7 +41,7 @@
 #define BFA_LPS_MAX_VPORTS_SUPP_CB  255
 #define BFA_LPS_MAX_VPORTS_SUPP_CT  190
 
-/**
+/*
  *  lps_pvt BFA LPS private functions
  */
 
@@ -55,7 +55,7 @@
 	BFA_LPS_SM_RX_CVL	= 7,	/* Rx clear virtual link	*/
 };
 
-/**
+/*
  * FC PORT related definitions
  */
 /*
@@ -67,7 +67,7 @@
 	(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
 
 
-/**
+/*
  * BFA port state machine events
  */
 enum bfa_fcport_sm_event {
@@ -82,7 +82,7 @@
 	BFA_FCPORT_SM_HWFAIL	= 9,	/*  IOC h/w failure		*/
 };
 
-/**
+/*
  * BFA port link notification state machine events
  */
 
@@ -92,7 +92,7 @@
 	BFA_FCPORT_LN_SM_NOTIFICATION	= 3	/*  done notification	*/
 };
 
-/**
+/*
  * RPORT related definitions
  */
 #define bfa_rport_offline_cb(__rp) do {					\
@@ -126,7 +126,7 @@
 	BFA_RPORT_SM_QRESUME	= 9,	/*  space in requeue queue	*/
 };
 
-/**
+/*
  * forward declarations FCXP related functions
  */
 static void	__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
@@ -138,7 +138,7 @@
 static void	bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
 				struct bfi_fcxp_send_req_s *send_req);
 
-/**
+/*
  * forward declarations for LPS functions
  */
 static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
@@ -163,7 +163,7 @@
 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
 static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
 
-/**
+/*
  * forward declaration for LPS state machine
  */
 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
@@ -175,7 +175,7 @@
 static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
 					event);
 
-/**
+/*
  * forward declaration for FC Port functions
  */
 static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
@@ -193,7 +193,7 @@
 static void bfa_fcport_stats_clr_timeout(void *cbarg);
 static void bfa_trunk_iocdisable(struct bfa_s *bfa);
 
-/**
+/*
  * forward declaration for FC PORT state machine
  */
 static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
@@ -252,7 +252,7 @@
 };
 
 
-/**
+/*
  * forward declaration for RPORT related functions
  */
 static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
@@ -265,7 +265,7 @@
 static void		__bfa_cb_rport_offline(void *cbarg,
 						bfa_boolean_t complete);
 
-/**
+/*
  * forward declaration for RPORT state machine
  */
 static void     bfa_rport_sm_uninit(struct bfa_rport_s *rp,
@@ -295,7 +295,7 @@
 static void     bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
 					enum bfa_rport_event event);
 
-/**
+/*
  * PLOG related definitions
  */
 static int
@@ -330,7 +330,7 @@
 
 	pl_recp = &(plog->plog_recs[tail]);
 
-	bfa_os_memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
+	memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
 
 	pl_recp->tv = bfa_os_get_log_time();
 	BFA_PL_LOG_REC_INCR(plog->tail);
@@ -342,9 +342,9 @@
 void
 bfa_plog_init(struct bfa_plog_s *plog)
 {
-	bfa_os_memset((char *)plog, 0, sizeof(struct bfa_plog_s));
+	memset((char *)plog, 0, sizeof(struct bfa_plog_s));
 
-	bfa_os_memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
+	memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
 	plog->head = plog->tail = 0;
 	plog->plog_enabled = 1;
 }
@@ -357,7 +357,7 @@
 	struct bfa_plog_rec_s  lp;
 
 	if (plog->plog_enabled) {
-		bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 		lp.mid = mid;
 		lp.eid = event;
 		lp.log_type = BFA_PL_LOG_TYPE_STRING;
@@ -381,15 +381,14 @@
 		num_ints = BFA_PL_INT_LOG_SZ;
 
 	if (plog->plog_enabled) {
-		bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 		lp.mid = mid;
 		lp.eid = event;
 		lp.log_type = BFA_PL_LOG_TYPE_INT;
 		lp.misc = misc;
 
 		for (i = 0; i < num_ints; i++)
-			bfa_os_assign(lp.log_entry.int_log[i],
-					intarr[i]);
+			lp.log_entry.int_log[i] = intarr[i];
 
 		lp.log_num_ints = (u8) num_ints;
 
@@ -407,7 +406,7 @@
 	u32	ints[BFA_PL_INT_LOG_SZ];
 
 	if (plog->plog_enabled) {
-		bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 
 		ints[0] = tmp_int[0];
 		ints[1] = tmp_int[1];
@@ -427,7 +426,7 @@
 	u32	ints[BFA_PL_INT_LOG_SZ];
 
 	if (plog->plog_enabled) {
-		bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
+		memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
 
 		ints[0] = tmp_int[0];
 		ints[1] = tmp_int[1];
@@ -462,7 +461,7 @@
 	return (bfa_boolean_t)plog->plog_enabled;
 }
 
-/**
+/*
  *  fcxp_pvt BFA FCXP private functions
  */
 
@@ -485,7 +484,7 @@
 	mod->req_pld_list_pa = dm_pa;
 	dm_kva += buf_pool_sz;
 	dm_pa += buf_pool_sz;
-	bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz);
+	memset(mod->req_pld_list_kva, 0, buf_pool_sz);
 
 	/*
 	 * Initialize the fcxp rsp payload list
@@ -495,7 +494,7 @@
 	mod->rsp_pld_list_pa = dm_pa;
 	dm_kva += buf_pool_sz;
 	dm_pa += buf_pool_sz;
-	bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
+	memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
 
 	bfa_meminfo_dma_virt(mi) = dm_kva;
 	bfa_meminfo_dma_phys(mi) = dm_pa;
@@ -508,7 +507,7 @@
 	struct bfa_fcxp_s *fcxp;
 
 	fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
-	bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
+	memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
 
 	INIT_LIST_HEAD(&mod->fcxp_free_q);
 	INIT_LIST_HEAD(&mod->fcxp_active_q);
@@ -559,11 +558,11 @@
 {
 	struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
 
-	bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
+	memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
 	mod->bfa = bfa;
 	mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
 
-	/**
+	/*
 	 * Initialize FCXP request and response payload sizes.
 	 */
 	mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
@@ -741,20 +740,20 @@
 {
 	struct bfa_fcxp_mod_s	*mod = BFA_FCXP_MOD(bfa);
 	struct bfa_fcxp_s	*fcxp;
-	u16		fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag);
+	u16		fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
 
 	bfa_trc(bfa, fcxp_tag);
 
-	fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len);
+	fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
 
-	/**
+	/*
 	 * @todo f/w should not set residue to non-0 when everything
 	 *	 is received.
 	 */
 	if (fcxp_rsp->req_status == BFA_STATUS_OK)
 		fcxp_rsp->residue_len = 0;
 	else
-		fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len);
+		fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
 
 	fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
 
@@ -856,7 +855,7 @@
 	}
 }
 
-/**
+/*
  * Handler to resume sending fcxp when space in available in cpe queue.
  */
 static void
@@ -871,7 +870,7 @@
 	bfa_fcxp_queue(fcxp, send_req);
 }
 
-/**
+/*
  * Queue fcxp send request to foimrware.
  */
 static void
@@ -885,26 +884,26 @@
 	bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
 		    bfa_lpuid(bfa));
 
-	send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag);
+	send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
 	if (rport) {
 		send_req->rport_fw_hndl = rport->fw_handle;
-		send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz);
+		send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
 		if (send_req->max_frmsz == 0)
-			send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
+			send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
 	} else {
 		send_req->rport_fw_hndl = 0;
-		send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
+		send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
 	}
 
-	send_req->vf_id = bfa_os_htons(reqi->vf_id);
+	send_req->vf_id = cpu_to_be16(reqi->vf_id);
 	send_req->lp_tag = reqi->lp_tag;
 	send_req->class = reqi->class;
 	send_req->rsp_timeout = rspi->rsp_timeout;
 	send_req->cts = reqi->cts;
 	send_req->fchs = reqi->fchs;
 
-	send_req->req_len = bfa_os_htonl(reqi->req_tot_len);
-	send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen);
+	send_req->req_len = cpu_to_be32(reqi->req_tot_len);
+	send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
 
 	/*
 	 * setup req sgles
@@ -955,11 +954,11 @@
 	bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
 }
 
-/**
+/*
  *  hal_fcxp_api BFA FCXP API
  */
 
-/**
+/*
  * Allocate an FCXP instance to send a response or to send a request
  * that has a response. Request/response buffers are allocated by caller.
  *
@@ -1005,7 +1004,7 @@
 	return fcxp;
 }
 
-/**
+/*
  * Get the internal request buffer pointer
  *
  * @param[in]	fcxp	BFA fcxp pointer
@@ -1032,7 +1031,7 @@
 	return mod->req_pld_sz;
 }
 
-/**
+/*
  * Get the internal response buffer pointer
  *
  * @param[in]	fcxp	BFA fcxp pointer
@@ -1052,7 +1051,7 @@
 	return rspbuf;
 }
 
-/**
+/*
  *		Free the BFA FCXP
  *
  * @param[in]	fcxp			BFA fcxp pointer
@@ -1069,7 +1068,7 @@
 	bfa_fcxp_put(fcxp);
 }
 
-/**
+/*
  * Send a FCXP request
  *
  * @param[in]	fcxp	BFA fcxp pointer
@@ -1103,7 +1102,7 @@
 
 	bfa_trc(bfa, fcxp->fcxp_tag);
 
-	/**
+	/*
 	 * setup request/response info
 	 */
 	reqi->bfa_rport = rport;
@@ -1118,7 +1117,7 @@
 	fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
 	fcxp->send_cbarg = cbarg;
 
-	/**
+	/*
 	 * If no room in CPE queue, wait for space in request queue
 	 */
 	send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
@@ -1132,7 +1131,7 @@
 	bfa_fcxp_queue(fcxp, send_req);
 }
 
-/**
+/*
  * Abort a BFA FCXP
  *
  * @param[in]	fcxp	BFA fcxp pointer
@@ -1186,7 +1185,7 @@
 void
 bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
 {
-	/**
+	/*
 	 * If waiting for room in request queue, cancel reqq wait
 	 * and free fcxp.
 	 */
@@ -1202,7 +1201,7 @@
 
 
 
-/**
+/*
  *  hal_fcxp_public BFA FCXP public functions
  */
 
@@ -1229,11 +1228,11 @@
 }
 
 
-/**
+/*
  *  BFA LPS state machine functions
  */
 
-/**
+/*
  * Init state -- no login
  */
 static void
@@ -1285,7 +1284,7 @@
 	}
 }
 
-/**
+/*
  * login is in progress -- awaiting response from firmware
  */
 static void
@@ -1327,7 +1326,7 @@
 	}
 }
 
-/**
+/*
  * login pending - awaiting space in request queue
  */
 static void
@@ -1359,7 +1358,7 @@
 	}
 }
 
-/**
+/*
  * login complete
  */
 static void
@@ -1400,7 +1399,7 @@
 	}
 }
 
-/**
+/*
  * logout in progress - awaiting firmware response
  */
 static void
@@ -1424,7 +1423,7 @@
 	}
 }
 
-/**
+/*
  * logout pending -- awaiting space in request queue
  */
 static void
@@ -1451,11 +1450,11 @@
 
 
 
-/**
+/*
  *  lps_pvt BFA LPS private functions
  */
 
-/**
+/*
  * return memory requirement
  */
 static void
@@ -1468,7 +1467,7 @@
 		*ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
 }
 
-/**
+/*
  * bfa module attach at initialization time
  */
 static void
@@ -1479,7 +1478,7 @@
 	struct bfa_lps_s	*lps;
 	int			i;
 
-	bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s));
+	memset(mod, 0, sizeof(struct bfa_lps_mod_s));
 	mod->num_lps = BFA_LPS_MAX_LPORTS;
 	if (cfg->drvcfg.min_cfg)
 		mod->num_lps = BFA_LPS_MIN_LPORTS;
@@ -1516,7 +1515,7 @@
 {
 }
 
-/**
+/*
  * IOC in disabled state -- consider all lps offline
  */
 static void
@@ -1532,7 +1531,7 @@
 	}
 }
 
-/**
+/*
  * Firmware login response
  */
 static void
@@ -1550,7 +1549,7 @@
 		lps->fport	= rsp->f_port;
 		lps->npiv_en	= rsp->npiv_en;
 		lps->lp_pid	= rsp->lp_pid;
-		lps->pr_bbcred	= bfa_os_ntohs(rsp->bb_credit);
+		lps->pr_bbcred	= be16_to_cpu(rsp->bb_credit);
 		lps->pr_pwwn	= rsp->port_name;
 		lps->pr_nwwn	= rsp->node_name;
 		lps->auth_req	= rsp->auth_req;
@@ -1579,7 +1578,7 @@
 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
 }
 
-/**
+/*
  * Firmware logout response
  */
 static void
@@ -1594,7 +1593,7 @@
 	bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
 }
 
-/**
+/*
  * Firmware received a Clear virtual link request (for FCoE)
  */
 static void
@@ -1608,7 +1607,7 @@
 	bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
 }
 
-/**
+/*
  * Space is available in request queue, resume queueing request to firmware.
  */
 static void
@@ -1619,7 +1618,7 @@
 	bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
 }
 
-/**
+/*
  * lps is freed -- triggered by vport delete
  */
 static void
@@ -1632,7 +1631,7 @@
 	list_add_tail(&lps->qe, &mod->lps_free_q);
 }
 
-/**
+/*
  * send login request to firmware
  */
 static void
@@ -1648,7 +1647,7 @@
 
 	m->lp_tag	= lps->lp_tag;
 	m->alpa		= lps->alpa;
-	m->pdu_size	= bfa_os_htons(lps->pdusz);
+	m->pdu_size	= cpu_to_be16(lps->pdusz);
 	m->pwwn		= lps->pwwn;
 	m->nwwn		= lps->nwwn;
 	m->fdisc	= lps->fdisc;
@@ -1657,7 +1656,7 @@
 	bfa_reqq_produce(lps->bfa, lps->reqq);
 }
 
-/**
+/*
  * send logout request to firmware
  */
 static void
@@ -1676,7 +1675,7 @@
 	bfa_reqq_produce(lps->bfa, lps->reqq);
 }
 
-/**
+/*
  * Indirect login completion handler for non-fcs
  */
 static void
@@ -1693,7 +1692,7 @@
 		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
 }
 
-/**
+/*
  * Login completion handler -- direct call for fcs, queue for others
  */
 static void
@@ -1711,7 +1710,7 @@
 		bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
 }
 
-/**
+/*
  * Indirect logout completion handler for non-fcs
  */
 static void
@@ -1726,7 +1725,7 @@
 		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
 }
 
-/**
+/*
  * Logout completion handler -- direct call for fcs, queue for others
  */
 static void
@@ -1741,7 +1740,7 @@
 		bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
 }
 
-/**
+/*
  * Clear virtual link completion handler for non-fcs
  */
 static void
@@ -1757,7 +1756,7 @@
 		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
 }
 
-/**
+/*
  * Received Clear virtual link event --direct call for fcs,
  * queue for others
  */
@@ -1777,7 +1776,7 @@
 
 
 
-/**
+/*
  *  lps_public BFA LPS public functions
  */
 
@@ -1790,7 +1789,7 @@
 		return BFA_LPS_MAX_VPORTS_SUPP_CB;
 }
 
-/**
+/*
  * Allocate a lport srvice tag.
  */
 struct bfa_lps_s  *
@@ -1810,7 +1809,7 @@
 	return lps;
 }
 
-/**
+/*
  * Free lport service tag. This can be called anytime after an alloc.
  * No need to wait for any pending login/logout completions.
  */
@@ -1820,7 +1819,7 @@
 	bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
 }
 
-/**
+/*
  * Initiate a lport login.
  */
 void
@@ -1837,7 +1836,7 @@
 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
 }
 
-/**
+/*
  * Initiate a lport fdisc login.
  */
 void
@@ -1854,7 +1853,7 @@
 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
 }
 
-/**
+/*
  * Initiate a lport logout (flogi).
  */
 void
@@ -1863,7 +1862,7 @@
 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
 }
 
-/**
+/*
  * Initiate a lport FDSIC logout.
  */
 void
@@ -1872,7 +1871,7 @@
 	bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
 }
 
-/**
+/*
  * Discard a pending login request -- should be called only for
  * link down handling.
  */
@@ -1882,7 +1881,7 @@
 	bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
 }
 
-/**
+/*
  * Return lport services tag
  */
 u8
@@ -1891,7 +1890,7 @@
 	return lps->lp_tag;
 }
 
-/**
+/*
  * Return lport services tag given the pid
  */
 u8
@@ -1910,7 +1909,7 @@
 	return 0;
 }
 
-/**
+/*
  * return if fabric login indicates support for NPIV
  */
 bfa_boolean_t
@@ -1919,7 +1918,7 @@
 	return lps->npiv_en;
 }
 
-/**
+/*
  * Return TRUE if attached to F-Port, else return FALSE
  */
 bfa_boolean_t
@@ -1928,7 +1927,7 @@
 	return lps->fport;
 }
 
-/**
+/*
  * Return TRUE if attached to a Brocade Fabric
  */
 bfa_boolean_t
@@ -1936,7 +1935,7 @@
 {
 	return lps->brcd_switch;
 }
-/**
+/*
  * return TRUE if authentication is required
  */
 bfa_boolean_t
@@ -1951,7 +1950,7 @@
 	return lps->ext_status;
 }
 
-/**
+/*
  * return port id assigned to the lport
  */
 u32
@@ -1960,7 +1959,7 @@
 	return lps->lp_pid;
 }
 
-/**
+/*
  * return port id assigned to the base lport
  */
 u32
@@ -1971,7 +1970,7 @@
 	return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
 }
 
-/**
+/*
  * Return bb_credit assigned in FLOGI response
  */
 u16
@@ -1980,7 +1979,7 @@
 	return lps->pr_bbcred;
 }
 
-/**
+/*
  * Return peer port name
  */
 wwn_t
@@ -1989,7 +1988,7 @@
 	return lps->pr_pwwn;
 }
 
-/**
+/*
  * Return peer node name
  */
 wwn_t
@@ -1998,7 +1997,7 @@
 	return lps->pr_nwwn;
 }
 
-/**
+/*
  * return reason code if login request is rejected
  */
 u8
@@ -2007,7 +2006,7 @@
 	return lps->lsrjt_rsn;
 }
 
-/**
+/*
  * return explanation code if login request is rejected
  */
 u8
@@ -2016,7 +2015,7 @@
 	return lps->lsrjt_expl;
 }
 
-/**
+/*
  * Return fpma/spma MAC for lport
  */
 mac_t
@@ -2025,7 +2024,7 @@
 	return lps->lp_mac;
 }
 
-/**
+/*
  * LPS firmware message class handler.
  */
 void
@@ -2055,7 +2054,7 @@
 	}
 }
 
-/**
+/*
  * FC PORT state machine functions
  */
 static void
@@ -2066,7 +2065,7 @@
 
 	switch (event) {
 	case BFA_FCPORT_SM_START:
-		/**
+		/*
 		 * Start event after IOC is configured and BFA is started.
 		 */
 		if (bfa_fcport_send_enable(fcport)) {
@@ -2080,7 +2079,7 @@
 		break;
 
 	case BFA_FCPORT_SM_ENABLE:
-		/**
+		/*
 		 * Port is persistently configured to be in enabled state. Do
 		 * not change state. Port enabling is done when START event is
 		 * received.
@@ -2088,7 +2087,7 @@
 		break;
 
 	case BFA_FCPORT_SM_DISABLE:
-		/**
+		/*
 		 * If a port is persistently configured to be disabled, the
 		 * first event will a port disable request.
 		 */
@@ -2124,13 +2123,13 @@
 		break;
 
 	case BFA_FCPORT_SM_ENABLE:
-		/**
+		/*
 		 * Already enable is in progress.
 		 */
 		break;
 
 	case BFA_FCPORT_SM_DISABLE:
-		/**
+		/*
 		 * Just send disable request to firmware when room becomes
 		 * available in request queue.
 		 */
@@ -2145,7 +2144,7 @@
 
 	case BFA_FCPORT_SM_LINKUP:
 	case BFA_FCPORT_SM_LINKDOWN:
-		/**
+		/*
 		 * Possible to get link events when doing back-to-back
 		 * enable/disables.
 		 */
@@ -2184,7 +2183,7 @@
 		break;
 
 	case BFA_FCPORT_SM_ENABLE:
-		/**
+		/*
 		 * Already being enabled.
 		 */
 		break;
@@ -2257,13 +2256,13 @@
 		break;
 
 	case BFA_FCPORT_SM_LINKDOWN:
-		/**
+		/*
 		 * Possible to get link down event.
 		 */
 		break;
 
 	case BFA_FCPORT_SM_ENABLE:
-		/**
+		/*
 		 * Already enabled.
 		 */
 		break;
@@ -2306,7 +2305,7 @@
 
 	switch (event) {
 	case BFA_FCPORT_SM_ENABLE:
-		/**
+		/*
 		 * Already enabled.
 		 */
 		break;
@@ -2399,14 +2398,14 @@
 		break;
 
 	case BFA_FCPORT_SM_DISABLE:
-		/**
+		/*
 		 * Already being disabled.
 		 */
 		break;
 
 	case BFA_FCPORT_SM_LINKUP:
 	case BFA_FCPORT_SM_LINKDOWN:
-		/**
+		/*
 		 * Possible to get link events when doing back-to-back
 		 * enable/disables.
 		 */
@@ -2453,7 +2452,7 @@
 
 	case BFA_FCPORT_SM_LINKUP:
 	case BFA_FCPORT_SM_LINKDOWN:
-		/**
+		/*
 		 * Possible to get link events when doing back-to-back
 		 * enable/disables.
 		 */
@@ -2483,7 +2482,7 @@
 		break;
 
 	case BFA_FCPORT_SM_DISABLE:
-		/**
+		/*
 		 * Already being disabled.
 		 */
 		break;
@@ -2508,7 +2507,7 @@
 
 	case BFA_FCPORT_SM_LINKUP:
 	case BFA_FCPORT_SM_LINKDOWN:
-		/**
+		/*
 		 * Possible to get link events when doing back-to-back
 		 * enable/disables.
 		 */
@@ -2533,7 +2532,7 @@
 
 	switch (event) {
 	case BFA_FCPORT_SM_START:
-		/**
+		/*
 		 * Ignore start event for a port that is disabled.
 		 */
 		break;
@@ -2557,7 +2556,7 @@
 		break;
 
 	case BFA_FCPORT_SM_DISABLE:
-		/**
+		/*
 		 * Already disabled.
 		 */
 		break;
@@ -2587,14 +2586,14 @@
 		break;
 
 	default:
-		/**
+		/*
 		 * Ignore all other events.
 		 */
 		;
 	}
 }
 
-/**
+/*
  * Port is enabled. IOC is down/failed.
  */
 static void
@@ -2613,14 +2612,14 @@
 		break;
 
 	default:
-		/**
+		/*
 		 * Ignore all events.
 		 */
 		;
 	}
 }
 
-/**
+/*
  * Port is disabled. IOC is down/failed.
  */
 static void
@@ -2639,14 +2638,14 @@
 		break;
 
 	default:
-		/**
+		/*
 		 * Ignore all events.
 		 */
 		;
 	}
 }
 
-/**
+/*
  * Link state is down
  */
 static void
@@ -2666,7 +2665,7 @@
 	}
 }
 
-/**
+/*
  * Link state is waiting for down notification
  */
 static void
@@ -2689,7 +2688,7 @@
 	}
 }
 
-/**
+/*
  * Link state is waiting for down notification and there is a pending up
  */
 static void
@@ -2713,7 +2712,7 @@
 	}
 }
 
-/**
+/*
  * Link state is up
  */
 static void
@@ -2733,7 +2732,7 @@
 	}
 }
 
-/**
+/*
  * Link state is waiting for up notification
  */
 static void
@@ -2756,7 +2755,7 @@
 	}
 }
 
-/**
+/*
  * Link state is waiting for up notification and there is a pending down
  */
 static void
@@ -2780,7 +2779,7 @@
 	}
 }
 
-/**
+/*
  * Link state is waiting for up notification and there are pending down and up
  */
 static void
@@ -2806,7 +2805,7 @@
 
 
 
-/**
+/*
  *  hal_port_private
  */
 
@@ -2821,7 +2820,7 @@
 		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
 }
 
-/**
+/*
  * Send SCN notification to upper layers.
  * trunk - false if caller is fcport to ignore fcport event in trunked mode
  */
@@ -2897,7 +2896,7 @@
 	bfa_meminfo_dma_phys(meminfo) = dm_pa;
 }
 
-/**
+/*
  * Memory initialization.
  */
 static void
@@ -2909,7 +2908,7 @@
 	struct bfa_fcport_ln_s *ln = &fcport->ln;
 	struct bfa_timeval_s tv;
 
-	bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
+	memset(fcport, 0, sizeof(struct bfa_fcport_s));
 	fcport->bfa = bfa;
 	ln->fcport = fcport;
 
@@ -2918,13 +2917,13 @@
 	bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
 	bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
 
-	/**
+	/*
 	 * initialize time stamp for stats reset
 	 */
 	bfa_os_gettimeofday(&tv);
 	fcport->stats_reset_time = tv.tv_sec;
 
-	/**
+	/*
 	 * initialize and set default configuration
 	 */
 	port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
@@ -2942,7 +2941,7 @@
 {
 }
 
-/**
+/*
  * Called when IOC is ready.
  */
 static void
@@ -2951,7 +2950,7 @@
 	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
 }
 
-/**
+/*
  * Called before IOC is stopped.
  */
 static void
@@ -2961,7 +2960,7 @@
 	bfa_trunk_iocdisable(bfa);
 }
 
-/**
+/*
  * Called when IOC failure is detected.
  */
 static void
@@ -2986,18 +2985,17 @@
 		fcport->myalpa = 0;
 
 	/* QoS Details */
-	bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
-	bfa_os_assign(fcport->qos_vc_attr,
-		pevent->link_state.vc_fcf.qos_vc_attr);
+	fcport->qos_attr = pevent->link_state.qos_attr;
+	fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
 
-	/**
+	/*
 	 * update trunk state if applicable
 	 */
 	if (!fcport->cfg.trunked)
 		trunk->attr.state = BFA_TRUNK_DISABLED;
 
 	/* update FCoE specific */
-	fcport->fcoe_vlan = bfa_os_ntohs(pevent->link_state.vc_fcf.fcf.vlan);
+	fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
 
 	bfa_trc(fcport->bfa, fcport->speed);
 	bfa_trc(fcport->bfa, fcport->topology);
@@ -3010,7 +3008,7 @@
 	fcport->topology = BFA_PORT_TOPOLOGY_NONE;
 }
 
-/**
+/*
  * Send port enable message to firmware.
  */
 static bfa_boolean_t
@@ -3018,13 +3016,13 @@
 {
 	struct bfi_fcport_enable_req_s *m;
 
-	/**
+	/*
 	 * Increment message tag before queue check, so that responses to old
 	 * requests are discarded.
 	 */
 	fcport->msgtag++;
 
-	/**
+	/*
 	 * check for room in queue to send request now
 	 */
 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
@@ -3040,19 +3038,19 @@
 	m->pwwn = fcport->pwwn;
 	m->port_cfg = fcport->cfg;
 	m->msgtag = fcport->msgtag;
-	m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
+	m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
 	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
 	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
 
-	/**
+	/*
 	 * queue I/O message to firmware
 	 */
 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
 	return BFA_TRUE;
 }
 
-/**
+/*
  * Send port disable message to firmware.
  */
 static	bfa_boolean_t
@@ -3060,13 +3058,13 @@
 {
 	struct bfi_fcport_req_s *m;
 
-	/**
+	/*
 	 * Increment message tag before queue check, so that responses to old
 	 * requests are discarded.
 	 */
 	fcport->msgtag++;
 
-	/**
+	/*
 	 * check for room in queue to send request now
 	 */
 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
@@ -3080,7 +3078,7 @@
 			bfa_lpuid(fcport->bfa));
 	m->msgtag = fcport->msgtag;
 
-	/**
+	/*
 	 * queue I/O message to firmware
 	 */
 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
@@ -3105,7 +3103,7 @@
 	struct bfa_fcport_s *fcport = port_cbarg;
 	struct bfi_fcport_set_svc_params_req_s *m;
 
-	/**
+	/*
 	 * check for room in queue to send request now
 	 */
 	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
@@ -3116,9 +3114,9 @@
 
 	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
 			bfa_lpuid(fcport->bfa));
-	m->tx_bbcredit = bfa_os_htons((u16)fcport->cfg.tx_bbcredit);
+	m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
 
-	/**
+	/*
 	 * queue I/O message to firmware
 	 */
 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
@@ -3134,7 +3132,7 @@
 
 	/* Now swap the 32 bit fields */
 	for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
-		dip[i] = bfa_os_ntohl(sip[i]);
+		dip[i] = be32_to_cpu(sip[i]);
 }
 
 static void
@@ -3148,11 +3146,11 @@
 	for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
 	     i = i + 2) {
 #ifdef __BIGENDIAN
-		dip[i] = bfa_os_ntohl(sip[i]);
-		dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
+		dip[i] = be32_to_cpu(sip[i]);
+		dip[i + 1] = be32_to_cpu(sip[i + 1]);
 #else
-		dip[i] = bfa_os_ntohl(sip[i + 1]);
-		dip[i + 1] = bfa_os_ntohl(sip[i]);
+		dip[i] = be32_to_cpu(sip[i + 1]);
+		dip[i + 1] = be32_to_cpu(sip[i]);
 #endif
 	}
 }
@@ -3223,7 +3221,7 @@
 	}
 	fcport->stats_qfull = BFA_FALSE;
 
-	bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
+	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
 	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
 			bfa_lpuid(fcport->bfa));
 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
@@ -3237,7 +3235,7 @@
 	if (complete) {
 		struct bfa_timeval_s tv;
 
-		/**
+		/*
 		 * re-initialize time stamp for stats reset
 		 */
 		bfa_os_gettimeofday(&tv);
@@ -3285,13 +3283,13 @@
 	}
 	fcport->stats_qfull = BFA_FALSE;
 
-	bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
+	memset(msg, 0, sizeof(struct bfi_fcport_req_s));
 	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
 			bfa_lpuid(fcport->bfa));
 	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
 }
 
-/**
+/*
  * Handle trunk SCN event from firmware.
  */
 static void
@@ -3312,7 +3310,7 @@
 	bfa_trc(fcport->bfa, scn->trunk_state);
 	bfa_trc(fcport->bfa, scn->trunk_speed);
 
-	/**
+	/*
 	 * Save off new state for trunk attribute query
 	 */
 	state_prev = trunk->attr.state;
@@ -3327,7 +3325,7 @@
 		lattr->trunk_wwn  = tlink->trunk_wwn;
 		lattr->fctl	  = tlink->fctl;
 		lattr->speed	  = tlink->speed;
-		lattr->deskew	  = bfa_os_ntohl(tlink->deskew);
+		lattr->deskew	  = be32_to_cpu(tlink->deskew);
 
 		if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
 			fcport->speed	 = tlink->speed;
@@ -3360,7 +3358,7 @@
 			BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
 	}
 
-	/**
+	/*
 	 * Notify upper layers if trunk state changed.
 	 */
 	if ((state_prev != trunk->attr.state) ||
@@ -3376,7 +3374,7 @@
 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 	int i = 0;
 
-	/**
+	/*
 	 * In trunked mode, notify upper layers that link is down
 	 */
 	if (fcport->cfg.trunked) {
@@ -3400,11 +3398,11 @@
 
 
 
-/**
+/*
  *  hal_port_public
  */
 
-/**
+/*
  * Called to initialize port attributes
  */
 void
@@ -3412,7 +3410,7 @@
 {
 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	/**
+	/*
 	 * Initialize port attributes from IOC hardware data.
 	 */
 	bfa_fcport_set_wwns(fcport);
@@ -3426,7 +3424,7 @@
 	bfa_assert(fcport->speed_sup);
 }
 
-/**
+/*
  * Firmware message handler.
  */
 void
@@ -3507,11 +3505,11 @@
 
 
 
-/**
+/*
  *  hal_port_api
  */
 
-/**
+/*
  * Registered callback for port events.
  */
 void
@@ -3552,7 +3550,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Configure port speed.
  */
 bfa_status_t
@@ -3574,7 +3572,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Get current speed.
  */
 enum bfa_port_speed
@@ -3585,7 +3583,7 @@
 	return fcport->speed;
 }
 
-/**
+/*
  * Configure port topology.
  */
 bfa_status_t
@@ -3610,7 +3608,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Get current topology.
  */
 enum bfa_port_topology
@@ -3710,7 +3708,7 @@
 	bfa_fcport_send_txcredit(fcport);
 }
 
-/**
+/*
  * Get port attributes.
  */
 
@@ -3729,7 +3727,7 @@
 {
 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	bfa_os_memset(attr, 0, sizeof(struct bfa_port_attr_s));
+	memset(attr, 0, sizeof(struct bfa_port_attr_s));
 
 	attr->nwwn = fcport->nwwn;
 	attr->pwwn = fcport->pwwn;
@@ -3737,7 +3735,7 @@
 	attr->factorypwwn =  bfa_ioc_get_mfg_pwwn(&bfa->ioc);
 	attr->factorynwwn =  bfa_ioc_get_mfg_nwwn(&bfa->ioc);
 
-	bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
+	memcpy(&attr->pport_cfg, &fcport->cfg,
 		sizeof(struct bfa_port_cfg_s));
 	/* speed attributes */
 	attr->pport_cfg.speed = fcport->cfg.speed;
@@ -3770,7 +3768,7 @@
 
 #define BFA_FCPORT_STATS_TOV	1000
 
-/**
+/*
  * Fetch port statistics (FCQoS or FCoE).
  */
 bfa_status_t
@@ -3796,7 +3794,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Reset port statistics (FCQoS or FCoE).
  */
 bfa_status_t
@@ -3820,7 +3818,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Fetch FCQoS port statistics
  */
 bfa_status_t
@@ -3833,7 +3831,7 @@
 	return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
 }
 
-/**
+/*
  * Reset FCoE port statistics
  */
 bfa_status_t
@@ -3845,7 +3843,7 @@
 	return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
 }
 
-/**
+/*
  * Fetch FCQoS port statistics
  */
 bfa_status_t
@@ -3858,7 +3856,7 @@
 	return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
 }
 
-/**
+/*
  * Reset FCoE port statistics
  */
 bfa_status_t
@@ -3876,7 +3874,7 @@
 	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
 	qos_attr->state = fcport->qos_attr.state;
-	qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr);
+	qos_attr->total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
 }
 
 void
@@ -3887,10 +3885,10 @@
 	struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
 	u32 i = 0;
 
-	qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
-	qos_vc_attr->shared_credit  = bfa_os_ntohs(bfa_vc_attr->shared_credit);
+	qos_vc_attr->total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
+	qos_vc_attr->shared_credit  = be16_to_cpu(bfa_vc_attr->shared_credit);
 	qos_vc_attr->elp_opmode_flags  =
-			bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
+			be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
 
 	/* Individual VC info */
 	while (i < qos_vc_attr->total_vc_count) {
@@ -3904,7 +3902,7 @@
 	}
 }
 
-/**
+/*
  * Fetch port attributes.
  */
 bfa_boolean_t
@@ -3939,7 +3937,7 @@
 
 	if (ioc_type == BFA_IOC_TYPE_FC) {
 		fcport->cfg.qos_enabled = on_off;
-		/**
+		/*
 		 * Notify fcpim of the change in QoS state
 		 */
 		bfa_fcpim_update_ioredirect(bfa);
@@ -3959,7 +3957,7 @@
 		fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
 }
 
-/**
+/*
  * Configure default minimum ratelim speed
  */
 bfa_status_t
@@ -3980,7 +3978,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Get default minimum ratelim speed
  */
 enum bfa_port_speed
@@ -4095,10 +4093,10 @@
 }
 
 
-/**
+/*
  * Rport State machine functions
  */
-/**
+/*
  * Beginning state, only online event expected.
  */
 static void
@@ -4151,7 +4149,7 @@
 	}
 }
 
-/**
+/*
  * Waiting for rport create response from firmware.
  */
 static void
@@ -4188,7 +4186,7 @@
 	}
 }
 
-/**
+/*
  * Request queue is full, awaiting queue resume to send create request.
  */
 static void
@@ -4229,7 +4227,7 @@
 	}
 }
 
-/**
+/*
  * Online state - normal parking state.
  */
 static void
@@ -4275,9 +4273,9 @@
 		bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
 
 		qos_scn->old_qos_attr.qos_flow_id  =
-			bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id);
+			be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
 		qos_scn->new_qos_attr.qos_flow_id  =
-			bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id);
+			be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
 
 		if (qos_scn->old_qos_attr.qos_flow_id !=
 			qos_scn->new_qos_attr.qos_flow_id)
@@ -4297,7 +4295,7 @@
 	}
 }
 
-/**
+/*
  * Firmware rport is being deleted - awaiting f/w response.
  */
 static void
@@ -4360,7 +4358,7 @@
 	}
 }
 
-/**
+/*
  * Offline state.
  */
 static void
@@ -4395,7 +4393,7 @@
 	}
 }
 
-/**
+/*
  * Rport is deleted, waiting for firmware response to delete.
  */
 static void
@@ -4447,7 +4445,7 @@
 	}
 }
 
-/**
+/*
  * Waiting for rport create response from firmware. A delete is pending.
  */
 static void
@@ -4478,7 +4476,7 @@
 	}
 }
 
-/**
+/*
  * Waiting for rport create response from firmware. Rport offline is pending.
  */
 static void
@@ -4513,7 +4511,7 @@
 	}
 }
 
-/**
+/*
  * IOC h/w failed.
  */
 static void
@@ -4553,7 +4551,7 @@
 
 
 
-/**
+/*
  *  bfa_rport_private BFA rport private functions
  */
 
@@ -4612,12 +4610,12 @@
 		   !(mod->num_rports & (mod->num_rports - 1)));
 
 	for (i = 0; i < mod->num_rports; i++, rp++) {
-		bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s));
+		memset(rp, 0, sizeof(struct bfa_rport_s));
 		rp->bfa = bfa;
 		rp->rport_tag = i;
 		bfa_sm_set_state(rp, bfa_rport_sm_uninit);
 
-		/**
+		/*
 		 *  - is unused
 		 */
 		if (i)
@@ -4626,7 +4624,7 @@
 		bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
 	}
 
-	/**
+	/*
 	 * consume memory
 	 */
 	bfa_meminfo_kva(meminfo) = (u8 *) rp;
@@ -4687,7 +4685,7 @@
 {
 	struct bfi_rport_create_req_s *m;
 
-	/**
+	/*
 	 * check for room in queue to send request now
 	 */
 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
@@ -4699,7 +4697,7 @@
 	bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
 			bfa_lpuid(rp->bfa));
 	m->bfa_handle = rp->rport_tag;
-	m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz);
+	m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
 	m->pid = rp->rport_info.pid;
 	m->lp_tag = rp->rport_info.lp_tag;
 	m->local_pid = rp->rport_info.local_pid;
@@ -4708,7 +4706,7 @@
 	m->vf_id = rp->rport_info.vf_id;
 	m->cisc = rp->rport_info.cisc;
 
-	/**
+	/*
 	 * queue I/O message to firmware
 	 */
 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
@@ -4720,7 +4718,7 @@
 {
 	struct bfi_rport_delete_req_s *m;
 
-	/**
+	/*
 	 * check for room in queue to send request now
 	 */
 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
@@ -4733,7 +4731,7 @@
 			bfa_lpuid(rp->bfa));
 	m->fw_handle = rp->fw_handle;
 
-	/**
+	/*
 	 * queue I/O message to firmware
 	 */
 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
@@ -4745,7 +4743,7 @@
 {
 	struct bfa_rport_speed_req_s *m;
 
-	/**
+	/*
 	 * check for room in queue to send request now
 	 */
 	m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
@@ -4759,7 +4757,7 @@
 	m->fw_handle = rp->fw_handle;
 	m->speed = (u8)rp->rport_info.speed;
 
-	/**
+	/*
 	 * queue I/O message to firmware
 	 */
 	bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
@@ -4768,11 +4766,11 @@
 
 
 
-/**
+/*
  *  bfa_rport_public
  */
 
-/**
+/*
  * Rport interrupt processing.
  */
 void
@@ -4814,7 +4812,7 @@
 
 
 
-/**
+/*
  *  bfa_rport_api
  */
 
@@ -4849,7 +4847,7 @@
 {
 	bfa_assert(rport_info->max_frmsz != 0);
 
-	/**
+	/*
 	 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
 	 * responses. Default to minimum size.
 	 */
@@ -4858,7 +4856,7 @@
 		rport_info->max_frmsz = FC_MIN_PDUSZ;
 	}
 
-	bfa_os_assign(rport->rport_info, *rport_info);
+	rport->rport_info = *rport_info;
 	bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
 }
 
@@ -4890,22 +4888,22 @@
 					struct bfa_rport_qos_attr_s *qos_attr)
 {
 	qos_attr->qos_priority  = rport->qos_attr.qos_priority;
-	qos_attr->qos_flow_id  = bfa_os_ntohl(rport->qos_attr.qos_flow_id);
+	qos_attr->qos_flow_id  = be32_to_cpu(rport->qos_attr.qos_flow_id);
 
 }
 
 void
 bfa_rport_clear_stats(struct bfa_rport_s *rport)
 {
-	bfa_os_memset(&rport->stats, 0, sizeof(rport->stats));
+	memset(&rport->stats, 0, sizeof(rport->stats));
 }
 
 
-/**
+/*
  * SGPG related functions
  */
 
-/**
+/*
  * Compute and return memory needed by FCP(im) module.
  */
 static void
@@ -4957,8 +4955,8 @@
 	bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
 
 	for (i = 0; i < mod->num_sgpgs; i++) {
-		bfa_os_memset(hsgpg, 0, sizeof(*hsgpg));
-		bfa_os_memset(sgpg, 0, sizeof(*sgpg));
+		memset(hsgpg, 0, sizeof(*hsgpg));
+		memset(sgpg, 0, sizeof(*sgpg));
 
 		hsgpg->sgpg = sgpg;
 		sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
@@ -4997,7 +4995,7 @@
 
 
 
-/**
+/*
  *  hal_sgpg_public BFA SGPG public functions
  */
 
@@ -5039,7 +5037,7 @@
 	if (list_empty(&mod->sgpg_wait_q))
 		return;
 
-	/**
+	/*
 	 * satisfy as many waiting requests as possible
 	 */
 	do {
@@ -5067,11 +5065,11 @@
 
 	wqe->nsgpg_total = wqe->nsgpg = nsgpg;
 
-	/**
+	/*
 	 * allocate any left to this one first
 	 */
 	if (mod->free_sgpgs) {
-		/**
+		/*
 		 * no one else is waiting for SGPG
 		 */
 		bfa_assert(list_empty(&mod->sgpg_wait_q));
@@ -5105,7 +5103,7 @@
 	wqe->cbarg = cbarg;
 }
 
-/**
+/*
  *  UF related functions
  */
 /*
@@ -5136,7 +5134,7 @@
 	bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
 	bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
 
-	bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
+	memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
 }
 
 static void
@@ -5153,11 +5151,11 @@
 
 	for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
 	     i++, uf_bp_msg++) {
-		bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
+		memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
 
 		uf_bp_msg->buf_tag = i;
 		buf_len = sizeof(struct bfa_uf_buf_s);
-		uf_bp_msg->buf_len = bfa_os_htons(buf_len);
+		uf_bp_msg->buf_len = cpu_to_be16(buf_len);
 		bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
 			    bfa_lpuid(ufm->bfa));
 
@@ -5173,7 +5171,7 @@
 		bfa_sge_to_be(&sge[1]);
 	}
 
-	/**
+	/*
 	 * advance pointer beyond consumed memory
 	 */
 	bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
@@ -5194,7 +5192,7 @@
 	 * Initialize UFs and queue it in UF free queue
 	 */
 	for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
-		bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s));
+		memset(uf, 0, sizeof(struct bfa_uf_s));
 		uf->bfa = ufm->bfa;
 		uf->uf_tag = i;
 		uf->pb_len = sizeof(struct bfa_uf_buf_s);
@@ -5203,7 +5201,7 @@
 		list_add_tail(&uf->qe, &ufm->uf_free_q);
 	}
 
-	/**
+	/*
 	 * advance memory pointer
 	 */
 	bfa_meminfo_kva(mi) = (u8 *) uf;
@@ -5241,7 +5239,7 @@
 {
 	struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
 
-	bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
+	memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
 	ufm->bfa = bfa;
 	ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
 	INIT_LIST_HEAD(&ufm->uf_free_q);
@@ -5279,7 +5277,7 @@
 	if (!uf_post_msg)
 		return BFA_STATUS_FAILED;
 
-	bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
+	memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
 		      sizeof(struct bfi_uf_buf_post_s));
 	bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
 
@@ -5310,8 +5308,8 @@
 	u8 *buf = &uf_buf->d[0];
 	struct fchs_s *fchs;
 
-	m->frm_len = bfa_os_ntohs(m->frm_len);
-	m->xfr_len = bfa_os_ntohs(m->xfr_len);
+	m->frm_len = be16_to_cpu(m->frm_len);
+	m->xfr_len = be16_to_cpu(m->xfr_len);
 
 	fchs = (struct fchs_s *)uf_buf;
 
@@ -5365,11 +5363,11 @@
 
 
 
-/**
+/*
  *  hal_uf_api
  */
 
-/**
+/*
  * Register handler for all unsolicted recieve frames.
  *
  * @param[in]	bfa		BFA instance
@@ -5385,7 +5383,7 @@
 	ufm->cbarg = cbarg;
 }
 
-/**
+/*
  *	Free an unsolicited frame back to BFA.
  *
  * @param[in]		uf		unsolicited frame to be freed
@@ -5401,7 +5399,7 @@
 
 
 
-/**
+/*
  *  uf_pub BFA uf module public functions
  */
 void
diff --git a/drivers/scsi/bfa/bfa_svc.h b/drivers/scsi/bfa/bfa_svc.h
index 9921dad..e2349d5 100644
--- a/drivers/scsi/bfa/bfa_svc.h
+++ b/drivers/scsi/bfa/bfa_svc.h
@@ -22,12 +22,12 @@
 #include "bfi_ms.h"
 
 
-/**
+/*
  * Scatter-gather DMA related defines
  */
 #define BFA_SGPG_MIN	(16)
 
-/**
+/*
  * Alignment macro for SG page allocation
  */
 #define BFA_SGPG_ROUNDUP(_l) (((_l) + (sizeof(struct bfi_sgpg_s) - 1))	\
@@ -48,7 +48,7 @@
 	union bfi_addr_u sgpg_pa;	/*  pa of SG page		*/
 };
 
-/**
+/*
  * Given number of SG elements, BFA_SGPG_NPAGE() returns the number of
  * SG pages required.
  */
@@ -75,7 +75,7 @@
 void bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe);
 
 
-/**
+/*
  * FCXP related defines
  */
 #define BFA_FCXP_MIN		(1)
@@ -115,12 +115,12 @@
 
 
 
-/**
+/*
  * Information needed for a FCXP request
  */
 struct bfa_fcxp_req_info_s {
 	struct bfa_rport_s *bfa_rport;
-					/** Pointer to the bfa rport that was
+					/* Pointer to the bfa rport that was
 					 * returned from bfa_rport_create().
 					 * This could be left NULL for WKA or
 					 * for FCXP interactions before the
@@ -137,11 +137,10 @@
 
 struct bfa_fcxp_rsp_info_s {
 	struct fchs_s	rsp_fchs;
-				/** !< Response frame's FC header will
+				/* Response frame's FC header will
 				 * be sent back in this field */
 	u8		rsp_timeout;
-				/** !< timeout in seconds, 0-no response
-				 */
+				/* timeout in seconds, 0-no response */
 	u8		rsvd2[3];
 	u32	rsp_maxlen;	/*  max response length expected */
 };
@@ -218,7 +217,7 @@
 void	bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 
 
-/**
+/*
  * RPORT related defines
  */
 #define BFA_RPORT_MIN	4
@@ -232,7 +231,7 @@
 
 #define BFA_RPORT_MOD(__bfa)	(&(__bfa)->modules.rport_mod)
 
-/**
+/*
  * Convert rport tag to RPORT
  */
 #define BFA_RPORT_FROM_TAG(__bfa, _tag)				\
@@ -244,7 +243,7 @@
  */
 void	bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 
-/**
+/*
  *	BFA rport information.
  */
 struct bfa_rport_info_s {
@@ -259,7 +258,7 @@
 	enum bfa_port_speed speed;	/*  Rport's current speed	    */
 };
 
-/**
+/*
  * BFA rport data structure
  */
 struct bfa_rport_s {
@@ -282,7 +281,7 @@
 #define BFA_RPORT_FC_COS(_rport)	((_rport)->rport_info.fc_class)
 
 
-/**
+/*
  * UF - unsolicited receive related defines
  */
 
@@ -305,7 +304,7 @@
 	struct bfa_sge_s sges[BFI_SGE_INLINE_MAX];
 };
 
-/**
+/*
  *      Callback prototype for unsolicited frame receive handler.
  *
  * @param[in]           cbarg           callback arg for receive handler
@@ -338,7 +337,7 @@
 
 #define BFA_UF_BUFSZ	(2 * 1024 + 256)
 
-/**
+/*
  * @todo private
  */
 struct bfa_uf_buf_s {
@@ -346,7 +345,7 @@
 };
 
 
-/**
+/*
  * LPS - bfa lport login/logout service interface
  */
 struct bfa_lps_s {
@@ -397,14 +396,14 @@
 void	bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 
 
-/**
+/*
  * FCPORT related defines
  */
 
 #define BFA_FCPORT(_bfa)	(&((_bfa)->modules.port))
 typedef void (*bfa_cb_port_t) (void *cbarg, enum bfa_status status);
 
-/**
+/*
  * Link notification data structure
  */
 struct bfa_fcport_ln_s {
@@ -418,7 +417,7 @@
 	struct bfa_trunk_attr_s	attr;
 };
 
-/**
+/*
  * BFA FC port data structure
  */
 struct bfa_fcport_s {
@@ -613,7 +612,7 @@
 			  void *cbarg);
 void bfa_uf_free(struct bfa_uf_s *uf);
 
-/**
+/*
  * bfa lport service api
  */
 
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index 4d8784e..1f93897 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -15,7 +15,7 @@
  * General Public License for more details.
  */
 
-/**
+/*
  *  bfad.c Linux driver PCI interface module.
  */
 #include <linux/module.h>
@@ -151,7 +151,7 @@
 static void
 bfad_sm_fcs_exit(struct bfad_s *bfad, enum bfad_sm_event event);
 
-/**
+/*
  * Beginning state for the driver instance, awaiting the pci_probe event
  */
 static void
@@ -181,7 +181,7 @@
 	}
 }
 
-/**
+/*
  * Driver Instance is created, awaiting event INIT to initialize the bfad
  */
 static void
@@ -364,7 +364,7 @@
 	}
 }
 
-/**
+/*
  *  BFA callbacks
  */
 void
@@ -376,7 +376,7 @@
 	complete(&fcomp->comp);
 }
 
-/**
+/*
  * bfa_init callback
  */
 void
@@ -401,7 +401,7 @@
 	complete(&bfad->comp);
 }
 
-/**
+/*
  *  BFA_FCS callbacks
  */
 struct bfad_port_s *
@@ -457,7 +457,7 @@
 	}
 }
 
-/**
+/*
  * FCS RPORT alloc callback, after successful PLOGI by FCS
  */
 bfa_status_t
@@ -478,7 +478,7 @@
 	return rc;
 }
 
-/**
+/*
  * FCS PBC VPORT Create
  */
 void
@@ -663,7 +663,7 @@
 	return rc;
 }
 
-/**
+/*
  * Create a vport under a vf.
  */
 bfa_status_t
@@ -716,30 +716,6 @@
 	return rc;
 }
 
-/**
- * Create a vf and its base vport implicitely.
- */
-bfa_status_t
-bfad_vf_create(struct bfad_s *bfad, u16 vf_id,
-	       struct bfa_lport_cfg_s *port_cfg)
-{
-	struct bfad_vf_s      *vf;
-	int		rc = BFA_STATUS_OK;
-
-	vf = kzalloc(sizeof(struct bfad_vf_s), GFP_KERNEL);
-	if (!vf) {
-		rc = BFA_STATUS_FAILED;
-		goto ext;
-	}
-
-	rc = bfa_fcs_vf_create(&vf->fcs_vf, &bfad->bfa_fcs, vf_id, port_cfg,
-			       vf);
-	if (rc != BFA_STATUS_OK)
-		kfree(vf);
-ext:
-	return rc;
-}
-
 void
 bfad_bfa_tmo(unsigned long data)
 {
@@ -885,20 +861,6 @@
 	pci_set_drvdata(pdev, NULL);
 }
 
-void
-bfad_fcs_port_cfg(struct bfad_s *bfad)
-{
-	struct bfa_lport_cfg_s  port_cfg;
-	struct bfa_port_attr_s attr;
-	char		symname[BFA_SYMNAME_MAXLEN];
-
-	sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no);
-	memcpy(port_cfg.sym_name.symname, symname, strlen(symname));
-	bfa_fcport_get_attr(&bfad->bfa, &attr);
-	port_cfg.nwwn = attr.nwwn;
-	port_cfg.pwwn = attr.pwwn;
-}
-
 bfa_status_t
 bfad_drv_init(struct bfad_s *bfad)
 {
@@ -1089,9 +1051,6 @@
 	bfa_fcs_init(&bfad->bfa_fcs);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
-	/* PPORT FCS config */
-	bfad_fcs_port_cfg(bfad);
-
 	retval = bfad_cfg_pport(bfad, BFA_LPORT_ROLE_FCP_IM);
 	if (retval != BFA_STATUS_OK) {
 		if (bfa_sm_cmp_state(bfad, bfad_sm_initializing))
@@ -1181,7 +1140,7 @@
 	return 0;
 }
 
-/**
+/*
  *  BFA driver interrupt functions
  */
 irqreturn_t
@@ -1240,7 +1199,7 @@
 	return IRQ_HANDLED;
 }
 
-/**
+/*
  * Initialize the MSIX entry table.
  */
 static void
@@ -1293,7 +1252,7 @@
 	return 0;
 }
 
-/**
+/*
  * Setup MSIX based interrupt.
  */
 int
@@ -1374,7 +1333,7 @@
 	}
 }
 
-/**
+/*
  * PCI probe entry.
  */
 int
@@ -1460,7 +1419,7 @@
 	return error;
 }
 
-/**
+/*
  * PCI remove entry.
  */
 void
@@ -1541,7 +1500,7 @@
 	.remove = __devexit_p(bfad_pci_remove),
 };
 
-/**
+/*
  * Driver module init.
  */
 static int __init
@@ -1581,7 +1540,7 @@
 	return error;
 }
 
-/**
+/*
  * Driver module exit.
  */
 static void __exit
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index d884372..ed9fff4 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -15,14 +15,14 @@
  * General Public License for more details.
  */
 
-/**
+/*
  *  bfa_attr.c Linux driver configuration interface module.
  */
 
 #include "bfad_drv.h"
 #include "bfad_im.h"
 
-/**
+/*
  * FC transport template entry, get SCSI target port ID.
  */
 void
@@ -48,7 +48,7 @@
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 }
 
-/**
+/*
  * FC transport template entry, get SCSI target nwwn.
  */
 void
@@ -70,11 +70,11 @@
 	if (itnim)
 		node_name = bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim);
 
-	fc_starget_node_name(starget) = bfa_os_htonll(node_name);
+	fc_starget_node_name(starget) = cpu_to_be64(node_name);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 }
 
-/**
+/*
  * FC transport template entry, get SCSI target pwwn.
  */
 void
@@ -96,11 +96,11 @@
 	if (itnim)
 		port_name = bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim);
 
-	fc_starget_port_name(starget) = bfa_os_htonll(port_name);
+	fc_starget_port_name(starget) = cpu_to_be64(port_name);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 }
 
-/**
+/*
  * FC transport template entry, get SCSI host port ID.
  */
 void
@@ -114,7 +114,7 @@
 			bfa_os_hton3b(bfa_fcs_lport_get_fcid(port->fcs_port));
 }
 
-/**
+/*
  * FC transport template entry, get SCSI host port type.
  */
 static void
@@ -146,7 +146,7 @@
 	}
 }
 
-/**
+/*
  * FC transport template entry, get SCSI host port state.
  */
 static void
@@ -183,7 +183,7 @@
 	}
 }
 
-/**
+/*
  * FC transport template entry, get SCSI host active fc4s.
  */
 static void
@@ -202,7 +202,7 @@
 	fc_host_active_fc4s(shost)[7] = 1;
 }
 
-/**
+/*
  * FC transport template entry, get SCSI host link speed.
  */
 static void
@@ -236,7 +236,7 @@
 	}
 }
 
-/**
+/*
  * FC transport template entry, get SCSI host port type.
  */
 static void
@@ -249,11 +249,11 @@
 
 	fabric_nwwn = bfa_fcs_lport_get_fabric_name(port->fcs_port);
 
-	fc_host_fabric_name(shost) = bfa_os_htonll(fabric_nwwn);
+	fc_host_fabric_name(shost) = cpu_to_be64(fabric_nwwn);
 
 }
 
-/**
+/*
  * FC transport template entry, get BFAD statistics.
  */
 static struct fc_host_statistics *
@@ -304,7 +304,7 @@
 	return hstats;
 }
 
-/**
+/*
  * FC transport template entry, reset BFAD statistics.
  */
 static void
@@ -331,7 +331,7 @@
 	return;
 }
 
-/**
+/*
  * FC transport template entry, get rport loss timeout.
  */
 static void
@@ -347,7 +347,7 @@
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 }
 
-/**
+/*
  * FC transport template entry, set rport loss timeout.
  */
 static void
@@ -633,7 +633,7 @@
 	.set_rport_dev_loss_tmo = bfad_im_set_rport_loss_tmo,
 };
 
-/**
+/*
  *  Scsi_Host_attrs SCSI host attributes
  */
 static ssize_t
@@ -733,7 +733,7 @@
 	u64        nwwn;
 
 	nwwn = bfa_fcs_lport_get_nwwn(port->fcs_port);
-	return snprintf(buf, PAGE_SIZE, "0x%llx\n", bfa_os_htonll(nwwn));
+	return snprintf(buf, PAGE_SIZE, "0x%llx\n", cpu_to_be64(nwwn));
 }
 
 static ssize_t
diff --git a/drivers/scsi/bfa/bfad_debugfs.c b/drivers/scsi/bfa/bfad_debugfs.c
index 69ed1c4..1fedeeb 100644
--- a/drivers/scsi/bfa/bfad_debugfs.c
+++ b/drivers/scsi/bfa/bfad_debugfs.c
@@ -318,7 +318,7 @@
 	regbuf =  (u32 *)bfad->regdata;
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	for (i = 0; i < len; i++) {
-		*regbuf = bfa_reg_read(reg_addr);
+		*regbuf = readl(reg_addr);
 		regbuf++;
 		reg_addr += sizeof(u32);
 	}
@@ -361,7 +361,7 @@
 
 	reg_addr = (u32 *) ((u8 *) bfa_ioc_bar0(ioc) + addr);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	bfa_reg_write(reg_addr, val);
+	writel(val, reg_addr);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
 	return nbytes;
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 98420bb..97f9b6c 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -15,11 +15,11 @@
  * General Public License for more details.
  */
 
-/**
+/*
  * Contains base driver definitions.
  */
 
-/**
+/*
  *  bfa_drv.h Linux driver data structures.
  */
 
@@ -309,7 +309,6 @@
 void		bfad_init_timer(struct bfad_s *bfad);
 int		bfad_pci_init(struct pci_dev *pdev, struct bfad_s *bfad);
 void		bfad_pci_uninit(struct pci_dev *pdev, struct bfad_s *bfad);
-void		bfad_fcs_port_cfg(struct bfad_s *bfad);
 void		bfad_drv_uninit(struct bfad_s *bfad);
 int		bfad_worker(void *ptr);
 void		bfad_debugfs_init(struct bfad_port_s *port);
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index d950ee4..8daa716 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -15,7 +15,7 @@
  * General Public License for more details.
  */
 
-/**
+/*
  *  bfad_im.c Linux driver IM module.
  */
 
@@ -164,10 +164,10 @@
 		wake_up(wq);
 }
 
-/**
+/*
  *  Scsi_Host_template SCSI host template
  */
-/**
+/*
  * Scsi_Host template entry, returns BFAD PCI info.
  */
 static const char *
@@ -196,7 +196,7 @@
 	return bfa_buf;
 }
 
-/**
+/*
  * Scsi_Host template entry, aborts the specified SCSI command.
  *
  * Returns: SUCCESS or FAILED.
@@ -280,7 +280,7 @@
 	return rc;
 }
 
-/**
+/*
  * Scsi_Host template entry, resets a LUN and abort its all commands.
  *
  * Returns: SUCCESS or FAILED.
@@ -319,7 +319,7 @@
 		goto out;
 	}
 
-	/**
+	/*
 	 * Set host_scribble to NULL to avoid aborting a task command
 	 * if happens.
 	 */
@@ -346,7 +346,7 @@
 	return rc;
 }
 
-/**
+/*
  * Scsi_Host template entry, resets the bus and abort all commands.
  */
 static int
@@ -396,7 +396,7 @@
 	return SUCCESS;
 }
 
-/**
+/*
  * Scsi_Host template entry slave_destroy.
  */
 static void
@@ -406,11 +406,11 @@
 	return;
 }
 
-/**
+/*
  *  BFA FCS itnim callbacks
  */
 
-/**
+/*
  * BFA FCS itnim alloc callback, after successful PRLI
  * Context: Interrupt
  */
@@ -433,7 +433,7 @@
 	bfad->bfad_flags |= BFAD_RPORT_ONLINE;
 }
 
-/**
+/*
  * BFA FCS itnim free callback.
  * Context: Interrupt. bfad_lock is held
  */
@@ -471,7 +471,7 @@
 		queue_work(im->drv_workq, &itnim_drv->itnim_work);
 }
 
-/**
+/*
  * BFA FCS itnim online callback.
  * Context: Interrupt. bfad_lock is held
  */
@@ -492,7 +492,7 @@
 		queue_work(im->drv_workq, &itnim_drv->itnim_work);
 }
 
-/**
+/*
  * BFA FCS itnim offline callback.
  * Context: Interrupt. bfad_lock is held
  */
@@ -519,7 +519,7 @@
 		queue_work(im->drv_workq, &itnim_drv->itnim_work);
 }
 
-/**
+/*
  * Allocate a Scsi_Host for a port.
  */
 int
@@ -751,7 +751,7 @@
 	return BFA_STATUS_OK;
 }
 
-/**
+/*
  * Scsi_Host template entry.
  *
  * Description:
@@ -896,7 +896,7 @@
 	return NULL;
 }
 
-/**
+/*
  * Scsi_Host template entry slave_alloc
  */
 static int
@@ -915,12 +915,16 @@
 static u32
 bfad_im_supported_speeds(struct bfa_s *bfa)
 {
-	struct bfa_ioc_attr_s ioc_attr;
+	struct bfa_ioc_attr_s *ioc_attr;
 	u32 supported_speed = 0;
 
-	bfa_get_attr(bfa, &ioc_attr);
-	if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
-		if (ioc_attr.adapter_attr.is_mezz) {
+	ioc_attr = kzalloc(sizeof(struct bfa_ioc_attr_s), GFP_KERNEL);
+	if (!ioc_attr)
+		return 0;
+
+	bfa_get_attr(bfa, ioc_attr);
+	if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_8GBPS) {
+		if (ioc_attr->adapter_attr.is_mezz) {
 			supported_speed |= FC_PORTSPEED_8GBIT |
 				FC_PORTSPEED_4GBIT |
 				FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
@@ -929,12 +933,13 @@
 				FC_PORTSPEED_4GBIT |
 				FC_PORTSPEED_2GBIT;
 		}
-	} else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
+	} else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_4GBPS) {
 		supported_speed |=  FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
 				FC_PORTSPEED_1GBIT;
-	} else if (ioc_attr.adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
+	} else if (ioc_attr->adapter_attr.max_speed == BFA_PORT_SPEED_10GBPS) {
 		supported_speed |= FC_PORTSPEED_10GBIT;
 	}
+	kfree(ioc_attr);
 	return supported_speed;
 }
 
@@ -944,14 +949,13 @@
 	struct Scsi_Host *host = im_port->shost;
 	struct bfad_s         *bfad = im_port->bfad;
 	struct bfad_port_s    *port = im_port->port;
-	struct bfa_port_attr_s pattr;
-	struct bfa_lport_attr_s port_attr;
 	char symname[BFA_SYMNAME_MAXLEN];
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
 
 	fc_host_node_name(host) =
-		bfa_os_htonll((bfa_fcs_lport_get_nwwn(port->fcs_port)));
+		cpu_to_be64((bfa_fcs_lport_get_nwwn(port->fcs_port)));
 	fc_host_port_name(host) =
-		bfa_os_htonll((bfa_fcs_lport_get_pwwn(port->fcs_port)));
+		cpu_to_be64((bfa_fcs_lport_get_pwwn(port->fcs_port)));
 	fc_host_max_npiv_vports(host) = bfa_lps_get_max_vport(&bfad->bfa);
 
 	fc_host_supported_classes(host) = FC_COS_CLASS3;
@@ -964,15 +968,12 @@
 	/* For fibre channel services type 0x20 */
 	fc_host_supported_fc4s(host)[7] = 1;
 
-	bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
-	strncpy(symname, port_attr.port_cfg.sym_name.symname,
+	strncpy(symname, bfad->bfa_fcs.fabric.bport.port_cfg.sym_name.symname,
 		BFA_SYMNAME_MAXLEN);
 	sprintf(fc_host_symbolic_name(host), "%s", symname);
 
 	fc_host_supported_speeds(host) = bfad_im_supported_speeds(&bfad->bfa);
-
-	bfa_fcport_get_attr(&bfad->bfa, &pattr);
-	fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize;
+	fc_host_maxframe_size(host) = fcport->cfg.maxfrsize;
 }
 
 static void
@@ -983,9 +984,9 @@
 	struct bfad_itnim_data_s *itnim_data;
 
 	rport_ids.node_name =
-		bfa_os_htonll(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim));
+		cpu_to_be64(bfa_fcs_itnim_get_nwwn(&itnim->fcs_itnim));
 	rport_ids.port_name =
-		bfa_os_htonll(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
+		cpu_to_be64(bfa_fcs_itnim_get_pwwn(&itnim->fcs_itnim));
 	rport_ids.port_id =
 		bfa_os_hton3b(bfa_fcs_itnim_get_fcid(&itnim->fcs_itnim));
 	rport_ids.roles = FC_RPORT_ROLE_UNKNOWN;
@@ -1015,7 +1016,7 @@
 	return;
 }
 
-/**
+/*
  * Work queue handler using FC transport service
 * Context: kernel
  */
@@ -1115,7 +1116,7 @@
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 }
 
-/**
+/*
  * Scsi_Host template entry, queue a SCSI command to the BFAD.
  */
 static int
diff --git a/drivers/scsi/bfa/bfi.h b/drivers/scsi/bfa/bfi.h
index 85f2224..58796d1 100644
--- a/drivers/scsi/bfa/bfi.h
+++ b/drivers/scsi/bfa/bfi.h
@@ -23,7 +23,7 @@
 
 #pragma pack(1)
 
-/**
+/*
  * BFI FW image type
  */
 #define	BFI_FLASH_CHUNK_SZ			256	/*  Flash chunk size */
@@ -35,7 +35,7 @@
 	BFI_IMAGE_MAX,
 };
 
-/**
+/*
  * Msg header common to all msgs
  */
 struct bfi_mhdr_s {
@@ -68,7 +68,7 @@
 #define BFI_I2H_OPCODE_BASE	128
 #define BFA_I2HM(_x)		((_x) + BFI_I2H_OPCODE_BASE)
 
-/**
+/*
  ****************************************************************************
  *
  * Scatter Gather Element and Page definition
@@ -79,7 +79,7 @@
 #define BFI_SGE_INLINE	1
 #define BFI_SGE_INLINE_MAX	(BFI_SGE_INLINE + 1)
 
-/**
+/*
  * SG Flags
  */
 enum {
@@ -90,7 +90,7 @@
 	BFI_SGE_PGDLEN		= 2,	/*  cumulative data length for page */
 };
 
-/**
+/*
  * DMA addresses
  */
 union bfi_addr_u {
@@ -100,7 +100,7 @@
 	} a32;
 };
 
-/**
+/*
  * Scatter Gather Element
  */
 struct bfi_sge_s {
@@ -116,7 +116,7 @@
 	union bfi_addr_u sga;
 };
 
-/**
+/*
  * Scatter Gather Page
  */
 #define BFI_SGPG_DATA_SGES		7
@@ -139,7 +139,7 @@
 	u32	pl[BFI_LMSG_PL_WSZ];
 };
 
-/**
+/*
  * Mailbox message structure
  */
 #define BFI_MBMSG_SZ		7
@@ -148,7 +148,7 @@
 	u32		pl[BFI_MBMSG_SZ];
 };
 
-/**
+/*
  * Message Classes
  */
 enum bfi_mclass {
@@ -186,7 +186,7 @@
 #define BFI_BOOT_LOADER_BIOS		1
 #define BFI_BOOT_LOADER_UEFI		2
 
-/**
+/*
  *----------------------------------------------------------------------
  *				IOC
  *----------------------------------------------------------------------
@@ -208,7 +208,7 @@
 	BFI_IOC_I2H_HBEAT		= BFA_I2HM(5),
 };
 
-/**
+/*
  * BFI_IOC_H2I_GETATTR_REQ message
  */
 struct bfi_ioc_getattr_req_s {
@@ -242,7 +242,7 @@
 	u32	card_type;	/*  card type			*/
 };
 
-/**
+/*
  * BFI_IOC_I2H_GETATTR_REPLY message
  */
 struct bfi_ioc_getattr_reply_s {
@@ -251,19 +251,19 @@
 	u8			rsvd[3];
 };
 
-/**
+/*
  * Firmware memory page offsets
  */
 #define BFI_IOC_SMEM_PG0_CB	(0x40)
 #define BFI_IOC_SMEM_PG0_CT	(0x180)
 
-/**
+/*
  * Firmware statistic offset
  */
 #define BFI_IOC_FWSTATS_OFF	(0x6B40)
 #define BFI_IOC_FWSTATS_SZ	(4096)
 
-/**
+/*
  * Firmware trace offset
  */
 #define BFI_IOC_TRC_OFF		(0x4b00)
@@ -280,7 +280,7 @@
 	u32	md5sum[BFI_IOC_MD5SUM_SZ];
 };
 
-/**
+/*
  *  BFI_IOC_I2H_READY_EVENT message
  */
 struct bfi_ioc_rdy_event_s {
@@ -294,7 +294,7 @@
 	u32	   hb_count;	/*  current heart beat count	*/
 };
 
-/**
+/*
  * IOC hardware/firmware state
  */
 enum bfi_ioc_state {
@@ -340,7 +340,7 @@
 	((__adap_type) & (BFI_ADAPTER_TTV | BFI_ADAPTER_PROTO |	\
 			BFI_ADAPTER_UNSUPP))
 
-/**
+/*
  * BFI_IOC_H2I_ENABLE_REQ & BFI_IOC_H2I_DISABLE_REQ messages
  */
 struct bfi_ioc_ctrl_req_s {
@@ -352,7 +352,7 @@
 #define bfi_ioc_enable_req_t struct bfi_ioc_ctrl_req_s;
 #define bfi_ioc_disable_req_t struct bfi_ioc_ctrl_req_s;
 
-/**
+/*
  * BFI_IOC_I2H_ENABLE_REPLY & BFI_IOC_I2H_DISABLE_REPLY messages
  */
 struct bfi_ioc_ctrl_reply_s {
@@ -364,7 +364,7 @@
 #define bfi_ioc_disable_reply_t struct bfi_ioc_ctrl_reply_s;
 
 #define BFI_IOC_MSGSZ   8
-/**
+/*
  * H2I Messages
  */
 union bfi_ioc_h2i_msg_u {
@@ -375,7 +375,7 @@
 	u32			mboxmsg[BFI_IOC_MSGSZ];
 };
 
-/**
+/*
  * I2H Messages
  */
 union bfi_ioc_i2h_msg_u {
@@ -385,7 +385,7 @@
 };
 
 
-/**
+/*
  *----------------------------------------------------------------------
  *				PBC
  *----------------------------------------------------------------------
@@ -394,7 +394,7 @@
 #define BFI_PBC_MAX_BLUNS	8
 #define BFI_PBC_MAX_VPORTS	16
 
-/**
+/*
  * PBC boot lun configuration
  */
 struct bfi_pbc_blun_s {
@@ -402,7 +402,7 @@
 	lun_t		tgt_lun;
 };
 
-/**
+/*
  * PBC virtual port configuration
  */
 struct bfi_pbc_vport_s {
@@ -410,7 +410,7 @@
 	wwn_t		vp_nwwn;
 };
 
-/**
+/*
  * BFI pre-boot configuration information
  */
 struct bfi_pbc_s {
@@ -427,7 +427,7 @@
 	struct bfi_pbc_vport_s vport[BFI_PBC_MAX_VPORTS];
 };
 
-/**
+/*
  *----------------------------------------------------------------------
  *				MSGQ
  *----------------------------------------------------------------------
@@ -531,7 +531,7 @@
 	BFI_PORT_I2H_CLEAR_STATS_RSP    = BFA_I2HM(4),
 };
 
-/**
+/*
  * Generic REQ type
  */
 struct bfi_port_generic_req_s {
@@ -540,7 +540,7 @@
 	u32     rsvd;
 };
 
-/**
+/*
  * Generic RSP type
  */
 struct bfi_port_generic_rsp_s {
@@ -550,7 +550,7 @@
 	u32     msgtag;         /*  msgtag for reply                */
 };
 
-/**
+/*
  * BFI_PORT_H2I_GET_STATS_REQ
  */
 struct bfi_port_get_stats_req_s {
diff --git a/drivers/scsi/bfa/bfi_ms.h b/drivers/scsi/bfa/bfi_ms.h
index 69ac85f..fa9f6fb 100644
--- a/drivers/scsi/bfa/bfi_ms.h
+++ b/drivers/scsi/bfa/bfi_ms.h
@@ -41,7 +41,7 @@
 	u16	rsvd_1;
 	u32	endian_sig;	/*  endian signature of host     */
 
-	/**
+	/*
 	 * Request and response circular queue base addresses, size and
 	 * shadow index pointers.
 	 */
@@ -58,7 +58,7 @@
 	struct bfa_iocfc_intr_attr_s intr_attr; /*  IOC interrupt attributes */
 };
 
-/**
+/*
  * Boot target wwn information for this port. This contains either the stored
  * or discovered boot target port wwns for the port.
  */
@@ -75,7 +75,7 @@
 	struct bfi_pbc_s		pbc_cfg;
 };
 
-/**
+/*
  * BFI_IOCFC_H2I_CFG_REQ message
  */
 struct bfi_iocfc_cfg_req_s {
@@ -84,7 +84,7 @@
 };
 
 
-/**
+/*
  * BFI_IOCFC_I2H_CFG_REPLY message
  */
 struct bfi_iocfc_cfg_reply_s {
@@ -95,7 +95,7 @@
 };
 
 
-/**
+/*
  * BFI_IOCFC_H2I_SET_INTR_REQ message
  */
 struct bfi_iocfc_set_intr_req_s {
@@ -107,7 +107,7 @@
 };
 
 
-/**
+/*
  * BFI_IOCFC_H2I_UPDATEQ_REQ message
  */
 struct bfi_iocfc_updateq_req_s {
@@ -119,7 +119,7 @@
 };
 
 
-/**
+/*
  * BFI_IOCFC_I2H_UPDATEQ_RSP message
  */
 struct bfi_iocfc_updateq_rsp_s {
@@ -129,7 +129,7 @@
 };
 
 
-/**
+/*
  * H2I Messages
  */
 union bfi_iocfc_h2i_msg_u {
@@ -140,7 +140,7 @@
 };
 
 
-/**
+/*
  * I2H Messages
  */
 union bfi_iocfc_i2h_msg_u {
@@ -173,7 +173,7 @@
 };
 
 
-/**
+/*
  * Generic REQ type
  */
 struct bfi_fcport_req_s {
@@ -181,7 +181,7 @@
 	u32	   msgtag;	/*  msgtag for reply		    */
 };
 
-/**
+/*
  * Generic RSP type
  */
 struct bfi_fcport_rsp_s {
@@ -191,7 +191,7 @@
 	u32	   msgtag;	/*  msgtag for reply		    */
 };
 
-/**
+/*
  * BFI_FCPORT_H2I_ENABLE_REQ
  */
 struct bfi_fcport_enable_req_s {
@@ -205,7 +205,7 @@
 	u32	   rsvd2;
 };
 
-/**
+/*
  * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
  */
 struct bfi_fcport_set_svc_params_req_s {
@@ -214,7 +214,7 @@
 	u16	   rsvd;
 };
 
-/**
+/*
  * BFI_FCPORT_I2H_EVENT
  */
 struct bfi_fcport_event_s {
@@ -222,7 +222,7 @@
 	struct bfa_port_link_s	link_state;
 };
 
-/**
+/*
  * BFI_FCPORT_I2H_TRUNK_SCN
  */
 struct bfi_fcport_trunk_link_s {
@@ -243,7 +243,7 @@
 	struct bfi_fcport_trunk_link_s tlink[BFI_FCPORT_MAX_LINKS];
 };
 
-/**
+/*
  * fcport H2I message
  */
 union bfi_fcport_h2i_msg_u {
@@ -255,7 +255,7 @@
 	struct bfi_fcport_req_s			*pstatsclear;
 };
 
-/**
+/*
  * fcport I2H message
  */
 union bfi_fcport_i2h_msg_u {
@@ -279,7 +279,7 @@
 
 #define BFA_FCXP_MAX_SGES	2
 
-/**
+/*
  * FCXP send request structure
  */
 struct bfi_fcxp_send_req_s {
@@ -299,7 +299,7 @@
 	struct bfi_sge_s   rsp_sge[BFA_FCXP_MAX_SGES];	/*  response buf   */
 };
 
-/**
+/*
  * FCXP send response structure
  */
 struct bfi_fcxp_send_rsp_s {
@@ -565,14 +565,14 @@
 	BFI_IOIM_I2H_IOABORT_RSP = BFA_I2HM(2),	/*  ABORT rsp	 */
 };
 
-/**
+/*
  * IO command DIF info
  */
 struct bfi_ioim_dif_s {
 	u32	dif_info[4];
 };
 
-/**
+/*
  * FCP IO messages overview
  *
  * @note
@@ -587,7 +587,7 @@
 	u16	rport_hdl;	/*  itnim/rport firmware handle */
 	struct fcp_cmnd_s	cmnd;	/*  IO request info	*/
 
-	/**
+	/*
 	 * SG elements array within the IO request must be double word
 	 * aligned. This aligment is required to optimize SGM setup for the IO.
 	 */
@@ -598,7 +598,7 @@
 	struct bfi_ioim_dif_s  dif;
 };
 
-/**
+/*
  *	This table shows various IO status codes from firmware and their
  *	meaning. Host driver can use these status codes to further process
  *	IO completions.
@@ -684,7 +684,7 @@
 };
 
 #define BFI_IOIM_SNSLEN	(256)
-/**
+/*
  * I/O response message
  */
 struct bfi_ioim_rsp_s {
@@ -746,7 +746,7 @@
 	BFI_TSKIM_STS_NOT_SUPP = 4,
 	BFI_TSKIM_STS_FAILED	= 5,
 
-	/**
+	/*
 	 * Defined by BFA
 	 */
 	BFI_TSKIM_STS_TIMEOUT  = 10,	/*  TM request timedout	*/
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 99f2b8c..8c04fad 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -692,6 +692,9 @@
 		&csk->daddr.sin_addr.s_addr, ntohs(csk->daddr.sin_port),
 		atid, tid, status, csk, csk->state, csk->flags);
 
+	if (status == CPL_ERR_RTX_NEG_ADVICE)
+		goto rel_skb;
+
 	if (status && status != CPL_ERR_TCAM_FULL &&
 	    status != CPL_ERR_CONN_EXIST &&
 	    status != CPL_ERR_ARP_MISS)
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index b9bcfa4..5be3ae1 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -773,6 +773,8 @@
 	{"ENGENIO", "INF-01-00"},
 	{"STK", "FLEXLINE 380"},
 	{"SUN", "CSM100_R_FC"},
+	{"SUN", "STK6580_6780"},
+	{"SUN", "SUN_6180"},
 	{NULL, NULL},
 };
 
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 844d618..d23a538 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -117,7 +117,7 @@
 
 static void fcoe_get_lesb(struct fc_lport *, struct fc_els_lesb *);
 
-module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_AUTO, S_IWUSR);
+module_param_call(create, fcoe_create, NULL, (void *)FIP_MODE_FABRIC, S_IWUSR);
 __MODULE_PARM_TYPE(create, "string");
 MODULE_PARM_DESC(create, " Creates fcoe instance on a ethernet interface");
 module_param_call(create_vn2vn, fcoe_create, NULL,
@@ -1243,7 +1243,6 @@
 	struct fcoe_interface *fcoe;
 	struct fc_frame_header *fh;
 	struct fcoe_percpu_s *fps;
-	struct fcoe_port *port;
 	struct ethhdr *eh;
 	unsigned int cpu;
 
@@ -1262,16 +1261,7 @@
 			skb_tail_pointer(skb), skb_end_pointer(skb),
 			skb->csum, skb->dev ? skb->dev->name : "<NULL>");
 
-	/* check for mac addresses */
 	eh = eth_hdr(skb);
-	port = lport_priv(lport);
-	if (compare_ether_addr(eh->h_dest, port->data_src_addr) &&
-	    compare_ether_addr(eh->h_dest, fcoe->ctlr.ctl_src_addr) &&
-	    compare_ether_addr(eh->h_dest, (u8[6])FC_FCOE_FLOGI_MAC)) {
-		FCOE_NETDEV_DBG(netdev, "wrong destination mac address:%pM\n",
-				eh->h_dest);
-		goto err;
-	}
 
 	if (is_fip_mode(&fcoe->ctlr) &&
 	    compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) {
@@ -1291,6 +1281,12 @@
 	skb_set_transport_header(skb, sizeof(struct fcoe_hdr));
 	fh = (struct fc_frame_header *) skb_transport_header(skb);
 
+	if (ntoh24(&eh->h_dest[3]) != ntoh24(fh->fh_d_id)) {
+		FCOE_NETDEV_DBG(netdev, "FC frame d_id mismatch with MAC:%pM\n",
+				eh->h_dest);
+		goto err;
+	}
+
 	fr = fcoe_dev_from_skb(skb);
 	fr->fr_dev = lport;
 	fr->ptype = ptype;
diff --git a/drivers/scsi/fcoe/libfcoe.c b/drivers/scsi/fcoe/libfcoe.c
index aa503d8..bc17c71 100644
--- a/drivers/scsi/fcoe/libfcoe.c
+++ b/drivers/scsi/fcoe/libfcoe.c
@@ -2296,7 +2296,7 @@
 {
 	struct fip_header *fiph;
 	enum fip_vn2vn_subcode sub;
-	union {
+	struct {
 		struct fc_rport_priv rdata;
 		struct fcoe_rport frport;
 	} buf;
diff --git a/drivers/scsi/gdth.c b/drivers/scsi/gdth.c
index 5a3f931..8411018 100644
--- a/drivers/scsi/gdth.c
+++ b/drivers/scsi/gdth.c
@@ -4177,6 +4177,14 @@
     ha = gdth_find_ha(gen.ionode);
     if (!ha)
         return -EFAULT;
+
+    if (gen.data_len > INT_MAX)
+        return -EINVAL;
+    if (gen.sense_len > INT_MAX)
+        return -EINVAL;
+    if (gen.data_len + gen.sense_len > INT_MAX)
+        return -EINVAL;
+
     if (gen.data_len + gen.sense_len != 0) {
         if (!(buf = gdth_ioctl_alloc(ha, gen.data_len + gen.sense_len,
                                      FALSE, &paddr)))
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index df9a12c..fa60d7d 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -9025,6 +9025,8 @@
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C4, 0, 0, 0 },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index aa8bb2f..b28a00f 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -82,6 +82,7 @@
 
 #define IPR_SUBS_DEV_ID_57B4    0x033B
 #define IPR_SUBS_DEV_ID_57B2    0x035F
+#define IPR_SUBS_DEV_ID_57C4    0x0354
 #define IPR_SUBS_DEV_ID_57C6    0x0357
 #define IPR_SUBS_DEV_ID_57CC    0x035C
 
diff --git a/drivers/scsi/libfc/fc_disc.c b/drivers/scsi/libfc/fc_disc.c
index 32f67c4..911b273 100644
--- a/drivers/scsi/libfc/fc_disc.c
+++ b/drivers/scsi/libfc/fc_disc.c
@@ -684,10 +684,9 @@
 {
 	struct fc_disc *disc = &lport->disc;
 
-	if (disc) {
+	if (disc->pending)
 		cancel_delayed_work_sync(&disc->disc_work);
-		fc_disc_stop_rports(disc);
-	}
+	fc_disc_stop_rports(disc);
 }
 
 /**
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index c797f6b..e340373 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -58,8 +58,7 @@
 #define FC_SRB_WRITE		(1 << 0)
 
 /*
- * The SCp.ptr should be tested and set under the host lock. NULL indicates
- * that the command has been retruned to the scsi layer.
+ * The SCp.ptr should be tested and set under the scsi_pkt_queue lock
  */
 #define CMD_SP(Cmnd)		    ((struct fc_fcp_pkt *)(Cmnd)->SCp.ptr)
 #define CMD_ENTRY_STATUS(Cmnd)	    ((Cmnd)->SCp.have_data_in)
@@ -1880,8 +1879,6 @@
 
 	lport = fsp->lp;
 	si = fc_get_scsi_internal(lport);
-	if (!fsp->cmd)
-		return;
 
 	/*
 	 * if can_queue ramp down is done then try can_queue ramp up
@@ -1891,11 +1888,6 @@
 		fc_fcp_can_queue_ramp_up(lport);
 
 	sc_cmd = fsp->cmd;
-	fsp->cmd = NULL;
-
-	if (!sc_cmd->SCp.ptr)
-		return;
-
 	CMD_SCSI_STATUS(sc_cmd) = fsp->cdb_status;
 	switch (fsp->status_code) {
 	case FC_COMPLETE:
@@ -1971,15 +1963,13 @@
 		break;
 	}
 
-	if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE) {
-		sc_cmd->result = (DID_REQUEUE << 16);
-		FC_FCP_DBG(fsp, "Returning DID_REQUEUE to scsi-ml\n");
-	}
+	if (lport->state != LPORT_ST_READY && fsp->status_code != FC_COMPLETE)
+		sc_cmd->result = (DID_TRANSPORT_DISRUPTED << 16);
 
 	spin_lock_irqsave(&si->scsi_queue_lock, flags);
 	list_del(&fsp->list);
-	spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 	sc_cmd->SCp.ptr = NULL;
+	spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 	sc_cmd->scsi_done(sc_cmd);
 
 	/* release ref from initial allocation in queue command */
@@ -1997,6 +1987,7 @@
 {
 	struct fc_fcp_pkt *fsp;
 	struct fc_lport *lport;
+	struct fc_fcp_internal *si;
 	int rc = FAILED;
 	unsigned long flags;
 
@@ -2006,7 +1997,8 @@
 	else if (!lport->link_up)
 		return rc;
 
-	spin_lock_irqsave(lport->host->host_lock, flags);
+	si = fc_get_scsi_internal(lport);
+	spin_lock_irqsave(&si->scsi_queue_lock, flags);
 	fsp = CMD_SP(sc_cmd);
 	if (!fsp) {
 		/* command completed while scsi eh was setting up */
@@ -2015,7 +2007,7 @@
 	}
 	/* grab a ref so the fsp and sc_cmd cannot be relased from under us */
 	fc_fcp_pkt_hold(fsp);
-	spin_unlock_irqrestore(lport->host->host_lock, flags);
+	spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
 
 	if (fc_fcp_lock_pkt(fsp)) {
 		/* completed while we were waiting for timer to be deleted */
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index d9b6e11..9be63ed 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1447,13 +1447,7 @@
 	}
 
 	did = fc_frame_did(fp);
-
-	if (!did) {
-		FC_LPORT_DBG(lport, "Bad FLOGI response\n");
-		goto out;
-	}
-
-	if (fc_frame_payload_op(fp) == ELS_LS_ACC) {
+	if (fc_frame_payload_op(fp) == ELS_LS_ACC && did) {
 		flp = fc_frame_payload_get(fp, sizeof(*flp));
 		if (flp) {
 			mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1492,8 +1486,10 @@
 				fc_lport_enter_dns(lport);
 			}
 		}
-	} else
+	} else {
+		FC_LPORT_DBG(lport, "FLOGI RJT or bad response\n");
 		fc_lport_error(lport, fp);
+	}
 
 out:
 	fc_frame_free(fp);
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index b9f2286..a84ef13 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -196,9 +196,9 @@
 void fc_set_rport_loss_tmo(struct fc_rport *rport, u32 timeout)
 {
 	if (timeout)
-		rport->dev_loss_tmo = timeout + 5;
+		rport->dev_loss_tmo = timeout;
 	else
-		rport->dev_loss_tmo = 30;
+		rport->dev_loss_tmo = 1;
 }
 EXPORT_SYMBOL(fc_set_rport_loss_tmo);
 
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index a50aa03..196de40 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -202,9 +202,12 @@
 	uint32_t elsRcvPRLO;
 	uint32_t elsRcvPRLI;
 	uint32_t elsRcvLIRR;
+	uint32_t elsRcvRLS;
 	uint32_t elsRcvRPS;
 	uint32_t elsRcvRPL;
 	uint32_t elsRcvRRQ;
+	uint32_t elsRcvRTV;
+	uint32_t elsRcvECHO;
 	uint32_t elsXmitFLOGI;
 	uint32_t elsXmitFDISC;
 	uint32_t elsXmitPLOGI;
@@ -549,9 +552,11 @@
 #define ELS_XRI_ABORT_EVENT	0x40
 #define ASYNC_EVENT		0x80
 #define LINK_DISABLED		0x100 /* Link disabled by user */
-#define FCF_DISC_INPROGRESS	0x200 /* FCF discovery in progress */
-#define HBA_FIP_SUPPORT		0x400 /* FIP support in HBA */
-#define HBA_AER_ENABLED		0x800 /* AER enabled with HBA */
+#define FCF_TS_INPROG           0x200 /* FCF table scan in progress */
+#define FCF_RR_INPROG           0x400 /* FCF roundrobin flogi in progress */
+#define HBA_FIP_SUPPORT		0x800 /* FIP support in HBA */
+#define HBA_AER_ENABLED		0x1000 /* AER enabled with HBA */
+#define HBA_DEVLOSS_TMO         0x2000 /* HBA in devloss timeout */
 	uint32_t fcp_ring_in_use; /* When polling test if intr-hndlr active*/
 	struct lpfc_dmabuf slim2p;
 
@@ -573,6 +578,7 @@
 	/* These fields used to be binfo */
 	uint32_t fc_pref_DID;	/* preferred D_ID */
 	uint8_t  fc_pref_ALPA;	/* preferred AL_PA */
+	uint32_t fc_edtovResol; /* E_D_TOV timer resolution */
 	uint32_t fc_edtov;	/* E_D_TOV timer value */
 	uint32_t fc_arbtov;	/* ARB_TOV timer value */
 	uint32_t fc_ratov;	/* R_A_TOV timer value */
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index f681eea..c1cbec0 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -3789,8 +3789,13 @@
 			break;
 		case MBX_SECURITY_MGMT:
 		case MBX_AUTH_PORT:
-			if (phba->pci_dev_grp == LPFC_PCI_DEV_OC)
+			if (phba->pci_dev_grp == LPFC_PCI_DEV_OC) {
+				printk(KERN_WARNING "mbox_read:Command 0x%x "
+				       "is not permitted\n", pmb->mbxCommand);
+				sysfs_mbox_idle(phba);
+				spin_unlock_irq(&phba->hbalock);
 				return -EPERM;
+			}
 			break;
 		case MBX_READ_SPARM64:
 		case MBX_READ_LA:
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f5d60b5..7260c3a 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -3142,12 +3142,12 @@
 	job = menlo->set_job;
 	job->dd_data = NULL; /* so timeout handler does not reply */
 
-	spin_lock_irqsave(&phba->hbalock, flags);
+	spin_lock(&phba->hbalock);
 	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
 	if (cmdiocbq->context2 && rspiocbq)
 		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
 		       &rspiocbq->iocb, sizeof(IOCB_t));
-	spin_unlock_irqrestore(&phba->hbalock, flags);
+	spin_unlock(&phba->hbalock);
 
 	bmp = menlo->bmp;
 	rspiocbq = menlo->rspiocbq;
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 03f4ddc..a5f5a09 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -44,6 +44,8 @@
 void lpfc_set_var(struct lpfc_hba *, LPFC_MBOXQ_t *, uint32_t, uint32_t);
 void lpfc_unreg_login(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
 void lpfc_unreg_did(struct lpfc_hba *, uint16_t, uint32_t, LPFC_MBOXQ_t *);
+void lpfc_sli4_unreg_all_rpis(struct lpfc_vport *);
+
 void lpfc_reg_vpi(struct lpfc_vport *, LPFC_MBOXQ_t *);
 void lpfc_register_new_vport(struct lpfc_hba *, struct lpfc_vport *,
 			struct lpfc_nodelist *);
@@ -229,6 +231,7 @@
 uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
 int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
 void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *, uint16_t);
 
 int lpfc_mem_alloc(struct lpfc_hba *, int align);
 void lpfc_mem_free(struct lpfc_hba *);
@@ -271,6 +274,7 @@
 void lpfc_sli_pcimem_bcopy(void *, void *, uint32_t);
 void lpfc_sli_bemem_bcopy(void *, void *, uint32_t);
 void lpfc_sli_abort_iocb_ring(struct lpfc_hba *, struct lpfc_sli_ring *);
+void lpfc_sli_hba_iocb_abort(struct lpfc_hba *);
 void lpfc_sli_flush_fcp_rings(struct lpfc_hba *);
 int lpfc_sli_ringpostbuf_put(struct lpfc_hba *, struct lpfc_sli_ring *,
 			     struct lpfc_dmabuf *);
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index e6ca12f..884f4d3 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -177,15 +177,18 @@
 		 (elscmd == ELS_CMD_LOGO)))
 		switch (elscmd) {
 		case ELS_CMD_FLOGI:
-		elsiocb->iocb_flag |= ((ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
+		elsiocb->iocb_flag |=
+			((LPFC_ELS_ID_FLOGI << LPFC_FIP_ELS_ID_SHIFT)
 					& LPFC_FIP_ELS_ID_MASK);
 		break;
 		case ELS_CMD_FDISC:
-		elsiocb->iocb_flag |= ((ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
+		elsiocb->iocb_flag |=
+			((LPFC_ELS_ID_FDISC << LPFC_FIP_ELS_ID_SHIFT)
 					& LPFC_FIP_ELS_ID_MASK);
 		break;
 		case ELS_CMD_LOGO:
-		elsiocb->iocb_flag |= ((ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
+		elsiocb->iocb_flag |=
+			((LPFC_ELS_ID_LOGO << LPFC_FIP_ELS_ID_SHIFT)
 					& LPFC_FIP_ELS_ID_MASK);
 		break;
 		}
@@ -517,18 +520,13 @@
 	if (sp->cmn.edtovResolution)	/* E_D_TOV ticks are in nanoseconds */
 		phba->fc_edtov = (phba->fc_edtov + 999999) / 1000000;
 
+	phba->fc_edtovResol = sp->cmn.edtovResolution;
 	phba->fc_ratov = (be32_to_cpu(sp->cmn.w2.r_a_tov) + 999) / 1000;
 
 	if (phba->fc_topology == TOPOLOGY_LOOP) {
 		spin_lock_irq(shost->host_lock);
 		vport->fc_flag |= FC_PUBLIC_LOOP;
 		spin_unlock_irq(shost->host_lock);
-	} else {
-		/*
-		 * If we are a N-port connected to a Fabric, fixup sparam's so
-		 * logins to devices on remote loops work.
-		 */
-		vport->fc_sparam.cmn.altBbCredit = 1;
 	}
 
 	vport->fc_myDID = irsp->un.ulpWord[4] & Mask_DID;
@@ -585,6 +583,10 @@
 			lpfc_unreg_rpi(vport, np);
 		}
 		lpfc_cleanup_pending_mbox(vport);
+
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_unreg_all_rpis(vport);
+
 		if (phba->sli3_options & LPFC_SLI3_NPIV_ENABLED) {
 			lpfc_mbx_unreg_vpi(vport);
 			spin_lock_irq(shost->host_lock);
@@ -800,7 +802,7 @@
 
 	if (irsp->ulpStatus) {
 		/*
-		 * In case of FIP mode, perform round robin FCF failover
+		 * In case of FIP mode, perform roundrobin FCF failover
 		 * due to new FCF discovery
 		 */
 		if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
@@ -808,48 +810,16 @@
 		    (irsp->ulpStatus != IOSTAT_LOCAL_REJECT) &&
 		    (irsp->un.ulpWord[4] != IOERR_SLI_ABORTED)) {
 			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
-					"2611 FLOGI failed on registered "
-					"FCF record fcf_index(%d), status: "
-					"x%x/x%x, tmo:x%x, trying to perform "
-					"round robin failover\n",
+					"2611 FLOGI failed on FCF (x%x), "
+					"status:x%x/x%x, tmo:x%x, perform "
+					"roundrobin FCF failover\n",
 					phba->fcf.current_rec.fcf_indx,
 					irsp->ulpStatus, irsp->un.ulpWord[4],
 					irsp->ulpTimeout);
 			fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
-			if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
-				/*
-				 * Exhausted the eligible FCF record list,
-				 * fail through to retry FLOGI on current
-				 * FCF record.
-				 */
-				lpfc_printf_log(phba, KERN_WARNING,
-						LOG_FIP | LOG_ELS,
-						"2760 Completed one round "
-						"of FLOGI FCF round robin "
-						"failover list, retry FLOGI "
-						"on currently registered "
-						"FCF index:%d\n",
-						phba->fcf.current_rec.fcf_indx);
-			} else {
-				lpfc_printf_log(phba, KERN_INFO,
-						LOG_FIP | LOG_ELS,
-						"2794 FLOGI FCF round robin "
-						"failover to FCF index x%x\n",
-						fcf_index);
-				rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
-								   fcf_index);
-				if (rc)
-					lpfc_printf_log(phba, KERN_WARNING,
-							LOG_FIP | LOG_ELS,
-							"2761 FLOGI round "
-							"robin FCF failover "
-							"read FCF failed "
-							"rc:x%x, fcf_index:"
-							"%d\n", rc,
-						phba->fcf.current_rec.fcf_indx);
-				else
-					goto out;
-			}
+			rc = lpfc_sli4_fcf_rr_next_proc(vport, fcf_index);
+			if (rc)
+				goto out;
 		}
 
 		/* FLOGI failure */
@@ -939,6 +909,7 @@
 			lpfc_nlp_put(ndlp);
 			spin_lock_irq(&phba->hbalock);
 			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
 			spin_unlock_irq(&phba->hbalock);
 			goto out;
 		}
@@ -947,13 +918,12 @@
 			if (phba->hba_flag & HBA_FIP_SUPPORT)
 				lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP |
 						LOG_ELS,
-						"2769 FLOGI successful on FCF "
-						"record: current_fcf_index:"
-						"x%x, terminate FCF round "
-						"robin failover process\n",
+						"2769 FLOGI to FCF (x%x) "
+						"completed successfully\n",
 						phba->fcf.current_rec.fcf_indx);
 			spin_lock_irq(&phba->hbalock);
 			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+			phba->hba_flag &= ~(FCF_RR_INPROG | HBA_DEVLOSS_TMO);
 			spin_unlock_irq(&phba->hbalock);
 			goto out;
 		}
@@ -1175,12 +1145,13 @@
 			return 0;
 	}
 
-	if (lpfc_issue_els_flogi(vport, ndlp, 0))
+	if (lpfc_issue_els_flogi(vport, ndlp, 0)) {
 		/* This decrement of reference count to node shall kick off
 		 * the release of the node.
 		 */
 		lpfc_nlp_put(ndlp);
-
+		return 0;
+	}
 	return 1;
 }
 
@@ -1645,6 +1616,13 @@
 	memcpy(pcmd, &vport->fc_sparam, sizeof(struct serv_parm));
 	sp = (struct serv_parm *) pcmd;
 
+	/*
+	 * If we are a N-port connected to a Fabric, fix-up paramm's so logins
+	 * to device on remote loops work.
+	 */
+	if ((vport->fc_flag & FC_FABRIC) && !(vport->fc_flag & FC_PUBLIC_LOOP))
+		sp->cmn.altBbCredit = 1;
+
 	if (sp->cmn.fcphLow < FC_PH_4_3)
 		sp->cmn.fcphLow = FC_PH_4_3;
 
@@ -3926,6 +3904,64 @@
 }
 
 /**
+ * lpfc_els_rsp_echo_acc - Issue echo acc response
+ * @vport: pointer to a virtual N_Port data structure.
+ * @data: pointer to echo data to return in the accept.
+ * @oldiocb: pointer to the original lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ *   0 - Successfully issued acc echo response
+ *   1 - Failed to issue acc echo response
+ **/
+static int
+lpfc_els_rsp_echo_acc(struct lpfc_vport *vport, uint8_t *data,
+		      struct lpfc_iocbq *oldiocb, struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba  *phba = vport->phba;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_sli *psli;
+	uint8_t *pcmd;
+	uint16_t cmdsize;
+	int rc;
+
+	psli = &phba->sli;
+	cmdsize = oldiocb->iocb.unsli3.rcvsli3.acc_len;
+
+	elsiocb = lpfc_prep_els_iocb(vport, 0, cmdsize, oldiocb->retry, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+	if (!elsiocb)
+		return 1;
+
+	elsiocb->iocb.ulpContext = oldiocb->iocb.ulpContext;	/* Xri */
+	/* Xmit ECHO ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
+			 "2876 Xmit ECHO ACC response tag x%x xri x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext);
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint32_t);
+	memcpy(pcmd, data, cmdsize - sizeof(uint32_t));
+
+	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_RSP,
+		"Issue ACC ECHO:  did:x%x flg:x%x",
+		ndlp->nlp_DID, ndlp->nlp_flag, 0);
+
+	phba->fc_stat.elsXmitACC++;
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	lpfc_nlp_put(ndlp);
+	elsiocb->context1 = NULL;  /* Don't need ndlp for cmpl,
+				    * it could be freed */
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0);
+	if (rc == IOCB_ERROR) {
+		lpfc_els_free_iocb(phba, elsiocb);
+		return 1;
+	}
+	return 0;
+}
+
+/**
  * lpfc_els_disc_adisc - Issue remaining adisc iocbs to npr nodes of a vport
  * @vport: pointer to a host virtual N_Port data structure.
  *
@@ -4684,6 +4720,30 @@
 }
 
 /**
+ * lpfc_els_rcv_echo - Process an unsolicited echo iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * Return code
+ *   0 - Successfully processed echo iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_echo(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		  struct lpfc_nodelist *ndlp)
+{
+	uint8_t *pcmd;
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) cmdiocb->context2)->virt);
+
+	/* skip over first word of echo command to find echo data */
+	pcmd += sizeof(uint32_t);
+
+	lpfc_els_rsp_echo_acc(vport, pcmd, cmdiocb, ndlp);
+	return 0;
+}
+
+/**
  * lpfc_els_rcv_lirr - Process an unsolicited lirr iocb
  * @vport: pointer to a host virtual N_Port data structure.
  * @cmdiocb: pointer to lpfc command iocb data structure.
@@ -4735,6 +4795,89 @@
 }
 
 /**
+ * lpfc_els_rsp_rls_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
+ * @phba: pointer to lpfc hba data structure.
+ * @pmb: pointer to the driver internal queue element for mailbox command.
+ *
+ * This routine is the completion callback function for the MBX_READ_LNK_STAT
+ * mailbox command. This callback function is to actually send the Accept
+ * (ACC) response to a Read Port Status (RPS) unsolicited IOCB event. It
+ * collects the link statistics from the completion of the MBX_READ_LNK_STAT
+ * mailbox command, constructs the RPS response with the link statistics
+ * collected, and then invokes the lpfc_sli_issue_iocb() routine to send ACC
+ * response to the RPS.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ **/
+static void
+lpfc_els_rsp_rls_acc(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb)
+{
+	MAILBOX_t *mb;
+	IOCB_t *icmd;
+	struct RLS_RSP *rls_rsp;
+	uint8_t *pcmd;
+	struct lpfc_iocbq *elsiocb;
+	struct lpfc_nodelist *ndlp;
+	uint16_t xri;
+	uint32_t cmdsize;
+
+	mb = &pmb->u.mb;
+
+	ndlp = (struct lpfc_nodelist *) pmb->context2;
+	xri = (uint16_t) ((unsigned long)(pmb->context1));
+	pmb->context1 = NULL;
+	pmb->context2 = NULL;
+
+	if (mb->mbxStatus) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return;
+	}
+
+	cmdsize = sizeof(struct RLS_RSP) + sizeof(uint32_t);
+	mempool_free(pmb, phba->mbox_mem_pool);
+	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+				     lpfc_max_els_tries, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+
+	/* Decrement the ndlp reference count from previous mbox command */
+	lpfc_nlp_put(ndlp);
+
+	if (!elsiocb)
+		return;
+
+	icmd = &elsiocb->iocb;
+	icmd->ulpContext = xri;
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+	*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint32_t); /* Skip past command */
+	rls_rsp = (struct RLS_RSP *)pcmd;
+
+	rls_rsp->linkFailureCnt = cpu_to_be32(mb->un.varRdLnk.linkFailureCnt);
+	rls_rsp->lossSyncCnt = cpu_to_be32(mb->un.varRdLnk.lossSyncCnt);
+	rls_rsp->lossSignalCnt = cpu_to_be32(mb->un.varRdLnk.lossSignalCnt);
+	rls_rsp->primSeqErrCnt = cpu_to_be32(mb->un.varRdLnk.primSeqErrCnt);
+	rls_rsp->invalidXmitWord = cpu_to_be32(mb->un.varRdLnk.invalidXmitWord);
+	rls_rsp->crcCnt = cpu_to_be32(mb->un.varRdLnk.crcCnt);
+
+	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+			 "2874 Xmit ELS RLS ACC response tag x%x xri x%x, "
+			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi);
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	phba->fc_stat.elsXmitACC++;
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+		lpfc_els_free_iocb(phba, elsiocb);
+}
+
+/**
  * lpfc_els_rsp_rps_acc - Completion callbk func for MBX_READ_LNK_STAT mbox cmd
  * @phba: pointer to lpfc hba data structure.
  * @pmb: pointer to the driver internal queue element for mailbox command.
@@ -4827,7 +4970,155 @@
 }
 
 /**
- * lpfc_els_rcv_rps - Process an unsolicited rps iocb
+ * lpfc_els_rcv_rls - Process an unsolicited rls iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Port Status (RPL) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
+ * response. Otherwise, it issue the MBX_READ_LNK_STAT mailbox command
+ * for reading the HBA link statistics. It is for the callback function,
+ * lpfc_els_rsp_rls_acc(), set to the MBX_READ_LNK_STAT mailbox command
+ * to actually sending out RPL Accept (ACC) response.
+ *
+ * Return codes
+ *   0 - Successfully processed rls iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rls(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		 struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	LPFC_MBOXQ_t *mbox;
+	struct lpfc_dmabuf *pcmd;
+	struct ls_rjt stat;
+
+	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+		/* reject the unsolicited RPS request and done with it */
+		goto reject_out;
+
+	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_ATOMIC);
+	if (mbox) {
+		lpfc_read_lnk_stat(phba, mbox);
+		mbox->context1 =
+		    (void *)((unsigned long) cmdiocb->iocb.ulpContext);
+		mbox->context2 = lpfc_nlp_get(ndlp);
+		mbox->vport = vport;
+		mbox->mbox_cmpl = lpfc_els_rsp_rls_acc;
+		if (lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT)
+			!= MBX_NOT_FINISHED)
+			/* Mbox completion will send ELS Response */
+			return 0;
+		/* Decrement reference count used for the failed mbox
+		 * command.
+		 */
+		lpfc_nlp_put(ndlp);
+		mempool_free(mbox, phba->mbox_mem_pool);
+	}
+reject_out:
+	/* issue rejection response */
+	stat.un.b.lsRjtRsvd0 = 0;
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+	stat.un.b.vendorUnique = 0;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return 0;
+}
+
+/**
+ * lpfc_els_rcv_rtv - Process an unsolicited rtv iocb
+ * @vport: pointer to a host virtual N_Port data structure.
+ * @cmdiocb: pointer to lpfc command iocb data structure.
+ * @ndlp: pointer to a node-list data structure.
+ *
+ * This routine processes Read Timout Value (RTV) IOCB received as an
+ * ELS unsolicited event. It first checks the remote port state. If the
+ * remote port is not in NLP_STE_UNMAPPED_NODE state or NLP_STE_MAPPED_NODE
+ * state, it invokes the lpfc_els_rsl_reject() routine to send the reject
+ * response. Otherwise, it sends the Accept(ACC) response to a Read Timeout
+ * Value (RTV) unsolicited IOCB event.
+ *
+ * Note that, in lpfc_prep_els_iocb() routine, the reference count of ndlp
+ * will be incremented by 1 for holding the ndlp and the reference to ndlp
+ * will be stored into the context1 field of the IOCB for the completion
+ * callback function to the RPS Accept Response ELS IOCB command.
+ *
+ * Return codes
+ *   0 - Successfully processed rtv iocb (currently always return 0)
+ **/
+static int
+lpfc_els_rcv_rtv(struct lpfc_vport *vport, struct lpfc_iocbq *cmdiocb,
+		 struct lpfc_nodelist *ndlp)
+{
+	struct lpfc_hba *phba = vport->phba;
+	struct ls_rjt stat;
+	struct RTV_RSP *rtv_rsp;
+	uint8_t *pcmd;
+	struct lpfc_iocbq *elsiocb;
+	uint32_t cmdsize;
+
+
+	if ((ndlp->nlp_state != NLP_STE_UNMAPPED_NODE) &&
+	    (ndlp->nlp_state != NLP_STE_MAPPED_NODE))
+		/* reject the unsolicited RPS request and done with it */
+		goto reject_out;
+
+	cmdsize = sizeof(struct RTV_RSP) + sizeof(uint32_t);
+	elsiocb = lpfc_prep_els_iocb(phba->pport, 0, cmdsize,
+				     lpfc_max_els_tries, ndlp,
+				     ndlp->nlp_DID, ELS_CMD_ACC);
+
+	if (!elsiocb)
+		return 1;
+
+	pcmd = (uint8_t *) (((struct lpfc_dmabuf *) elsiocb->context2)->virt);
+		*((uint32_t *) (pcmd)) = ELS_CMD_ACC;
+	pcmd += sizeof(uint32_t); /* Skip past command */
+
+	/* use the command's xri in the response */
+	elsiocb->iocb.ulpContext = cmdiocb->iocb.ulpContext;
+
+	rtv_rsp = (struct RTV_RSP *)pcmd;
+
+	/* populate RTV payload */
+	rtv_rsp->ratov = cpu_to_be32(phba->fc_ratov * 1000); /* report msecs */
+	rtv_rsp->edtov = cpu_to_be32(phba->fc_edtov);
+	bf_set(qtov_edtovres, rtv_rsp, phba->fc_edtovResol ? 1 : 0);
+	bf_set(qtov_rttov, rtv_rsp, 0); /* Field is for FC ONLY */
+	rtv_rsp->qtov = cpu_to_be32(rtv_rsp->qtov);
+
+	/* Xmit ELS RLS ACC response tag <ulpIoTag> */
+	lpfc_printf_vlog(ndlp->vport, KERN_INFO, LOG_ELS,
+			 "2875 Xmit ELS RTV ACC response tag x%x xri x%x, "
+			 "did x%x, nlp_flag x%x, nlp_state x%x, rpi x%x, "
+			 "Data: x%x x%x x%x\n",
+			 elsiocb->iotag, elsiocb->iocb.ulpContext,
+			 ndlp->nlp_DID, ndlp->nlp_flag, ndlp->nlp_state,
+			 ndlp->nlp_rpi,
+			rtv_rsp->ratov, rtv_rsp->edtov, rtv_rsp->qtov);
+	elsiocb->iocb_cmpl = lpfc_cmpl_els_rsp;
+	phba->fc_stat.elsXmitACC++;
+	if (lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, elsiocb, 0) == IOCB_ERROR)
+		lpfc_els_free_iocb(phba, elsiocb);
+	return 0;
+
+reject_out:
+	/* issue rejection response */
+	stat.un.b.lsRjtRsvd0 = 0;
+	stat.un.b.lsRjtRsnCode = LSRJT_UNABLE_TPC;
+	stat.un.b.lsRjtRsnCodeExp = LSEXP_CANT_GIVE_DATA;
+	stat.un.b.vendorUnique = 0;
+	lpfc_els_rsp_reject(vport, stat.un.lsRjtError, cmdiocb, ndlp, NULL);
+	return 0;
+}
+
+/* lpfc_els_rcv_rps - Process an unsolicited rps iocb
  * @vport: pointer to a host virtual N_Port data structure.
  * @cmdiocb: pointer to lpfc command iocb data structure.
  * @ndlp: pointer to a node-list data structure.
@@ -5017,7 +5308,6 @@
 	pcmd = (struct lpfc_dmabuf *) cmdiocb->context2;
 	lp = (uint32_t *) pcmd->virt;
 	rpl = (RPL *) (lp + 1);
-
 	maxsize = be32_to_cpu(rpl->maxsize);
 
 	/* We support only one port */
@@ -5836,6 +6126,16 @@
 		if (newnode)
 			lpfc_nlp_put(ndlp);
 		break;
+	case ELS_CMD_RLS:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RLS:         did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvRLS++;
+		lpfc_els_rcv_rls(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
 	case ELS_CMD_RPS:
 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
 			"RCV RPS:         did:x%x/ste:x%x flg:x%x",
@@ -5866,6 +6166,15 @@
 		if (newnode)
 			lpfc_nlp_put(ndlp);
 		break;
+	case ELS_CMD_RTV:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV RTV:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+		phba->fc_stat.elsRcvRTV++;
+		lpfc_els_rcv_rtv(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
 	case ELS_CMD_RRQ:
 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
 			"RCV RRQ:         did:x%x/ste:x%x flg:x%x",
@@ -5876,6 +6185,16 @@
 		if (newnode)
 			lpfc_nlp_put(ndlp);
 		break;
+	case ELS_CMD_ECHO:
+		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
+			"RCV ECHO:        did:x%x/ste:x%x flg:x%x",
+			did, vport->port_state, ndlp->nlp_flag);
+
+		phba->fc_stat.elsRcvECHO++;
+		lpfc_els_rcv_echo(vport, elsiocb, ndlp);
+		if (newnode)
+			lpfc_nlp_put(ndlp);
+		break;
 	default:
 		lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_ELS_UNSOL,
 			"RCV ELS cmd:     cmd:x%x did:x%x/ste:x%x",
@@ -6170,6 +6489,8 @@
 
 		default:
 			/* Try to recover from this error */
+			if (phba->sli_rev == LPFC_SLI_REV4)
+				lpfc_sli4_unreg_all_rpis(vport);
 			lpfc_mbx_unreg_vpi(vport);
 			spin_lock_irq(shost->host_lock);
 			vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
@@ -6437,6 +6758,10 @@
 			lpfc_unreg_rpi(vport, np);
 		}
 		lpfc_cleanup_pending_mbox(vport);
+
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_unreg_all_rpis(vport);
+
 		lpfc_mbx_unreg_vpi(vport);
 		spin_lock_irq(shost->host_lock);
 		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
@@ -6452,7 +6777,7 @@
 		 * to update the MAC address.
 		 */
 		lpfc_register_new_vport(phba, vport, ndlp);
-		return ;
+		goto out;
 	}
 
 	if (vport->fc_flag & FC_VPORT_NEEDS_INIT_VPI)
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index a345dde..a5d1695 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -20,6 +20,7 @@
  *******************************************************************/
 
 #include <linux/blkdev.h>
+#include <linux/delay.h>
 #include <linux/slab.h>
 #include <linux/pci.h>
 #include <linux/kthread.h>
@@ -63,6 +64,7 @@
 static void lpfc_disc_timeout_handler(struct lpfc_vport *);
 static void lpfc_disc_flush_list(struct lpfc_vport *vport);
 static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+static int lpfc_fcf_inuse(struct lpfc_hba *);
 
 void
 lpfc_terminate_rport_io(struct fc_rport *rport)
@@ -160,11 +162,17 @@
 	return;
 }
 
-/*
- * This function is called from the worker thread when dev_loss_tmo
- * expire.
- */
-static void
+/**
+ * lpfc_dev_loss_tmo_handler - Remote node devloss timeout handler
+ * @ndlp: Pointer to remote node object.
+ *
+ * This function is called from the worker thread when devloss timeout timer
+ * expires. For SLI4 host, this routine shall return 1 when at lease one
+ * remote node, including this @ndlp, is still in use of FCF; otherwise, this
+ * routine shall return 0 when there is no remote node is still in use of FCF
+ * when devloss timeout happened to this @ndlp.
+ **/
+static int
 lpfc_dev_loss_tmo_handler(struct lpfc_nodelist *ndlp)
 {
 	struct lpfc_rport_data *rdata;
@@ -175,17 +183,21 @@
 	int  put_node;
 	int  put_rport;
 	int warn_on = 0;
+	int fcf_inuse = 0;
 
 	rport = ndlp->rport;
 
 	if (!rport)
-		return;
+		return fcf_inuse;
 
 	rdata = rport->dd_data;
 	name = (uint8_t *) &ndlp->nlp_portname;
 	vport = ndlp->vport;
 	phba  = vport->phba;
 
+	if (phba->sli_rev == LPFC_SLI_REV4)
+		fcf_inuse = lpfc_fcf_inuse(phba);
+
 	lpfc_debugfs_disc_trc(vport, LPFC_DISC_TRC_RPORT,
 		"rport devlosstmo:did:x%x type:x%x id:x%x",
 		ndlp->nlp_DID, ndlp->nlp_type, rport->scsi_target_id);
@@ -209,7 +221,7 @@
 			lpfc_nlp_put(ndlp);
 		if (put_rport)
 			put_device(&rport->dev);
-		return;
+		return fcf_inuse;
 	}
 
 	if (ndlp->nlp_state == NLP_STE_MAPPED_NODE) {
@@ -220,7 +232,7 @@
 				 *name, *(name+1), *(name+2), *(name+3),
 				 *(name+4), *(name+5), *(name+6), *(name+7),
 				 ndlp->nlp_DID);
-		return;
+		return fcf_inuse;
 	}
 
 	if (ndlp->nlp_type & NLP_FABRIC) {
@@ -233,7 +245,7 @@
 			lpfc_nlp_put(ndlp);
 		if (put_rport)
 			put_device(&rport->dev);
-		return;
+		return fcf_inuse;
 	}
 
 	if (ndlp->nlp_sid != NLP_NO_SID) {
@@ -280,6 +292,74 @@
 	    (ndlp->nlp_state != NLP_STE_PRLI_ISSUE))
 		lpfc_disc_state_machine(vport, ndlp, NULL, NLP_EVT_DEVICE_RM);
 
+	return fcf_inuse;
+}
+
+/**
+ * lpfc_sli4_post_dev_loss_tmo_handler - SLI4 post devloss timeout handler
+ * @phba: Pointer to hba context object.
+ * @fcf_inuse: SLI4 FCF in-use state reported from devloss timeout handler.
+ * @nlp_did: remote node identifer with devloss timeout.
+ *
+ * This function is called from the worker thread after invoking devloss
+ * timeout handler and releasing the reference count for the ndlp with
+ * which the devloss timeout was handled for SLI4 host. For the devloss
+ * timeout of the last remote node which had been in use of FCF, when this
+ * routine is invoked, it shall be guaranteed that none of the remote are
+ * in-use of FCF. When devloss timeout to the last remote using the FCF,
+ * if the FIP engine is neither in FCF table scan process nor roundrobin
+ * failover process, the in-use FCF shall be unregistered. If the FIP
+ * engine is in FCF discovery process, the devloss timeout state shall
+ * be set for either the FCF table scan process or roundrobin failover
+ * process to unregister the in-use FCF.
+ **/
+static void
+lpfc_sli4_post_dev_loss_tmo_handler(struct lpfc_hba *phba, int fcf_inuse,
+				    uint32_t nlp_did)
+{
+	/* If devloss timeout happened to a remote node when FCF had no
+	 * longer been in-use, do nothing.
+	 */
+	if (!fcf_inuse)
+		return;
+
+	if ((phba->hba_flag & HBA_FIP_SUPPORT) && !lpfc_fcf_inuse(phba)) {
+		spin_lock_irq(&phba->hbalock);
+		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+			if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+				spin_unlock_irq(&phba->hbalock);
+				return;
+			}
+			phba->hba_flag |= HBA_DEVLOSS_TMO;
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2847 Last remote node (x%x) using "
+					"FCF devloss tmo\n", nlp_did);
+		}
+		if (phba->fcf.fcf_flag & FCF_REDISC_PROG) {
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2868 Devloss tmo to FCF rediscovery "
+					"in progress\n");
+			return;
+		}
+		if (!(phba->hba_flag & (FCF_TS_INPROG | FCF_RR_INPROG))) {
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2869 Devloss tmo to idle FIP engine, "
+					"unreg in-use FCF and rescan.\n");
+			/* Unregister in-use FCF and rescan */
+			lpfc_unregister_fcf_rescan(phba);
+			return;
+		}
+		spin_unlock_irq(&phba->hbalock);
+		if (phba->hba_flag & FCF_TS_INPROG)
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2870 FCF table scan in progress\n");
+		if (phba->hba_flag & FCF_RR_INPROG)
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2871 FLOGI roundrobin FCF failover "
+					"in progress\n");
+	}
 	lpfc_unregister_unused_fcf(phba);
 }
 
@@ -408,6 +488,8 @@
 	struct lpfc_work_evt  *evtp = NULL;
 	struct lpfc_nodelist  *ndlp;
 	int free_evt;
+	int fcf_inuse;
+	uint32_t nlp_did;
 
 	spin_lock_irq(&phba->hbalock);
 	while (!list_empty(&phba->work_list)) {
@@ -427,12 +509,17 @@
 			break;
 		case LPFC_EVT_DEV_LOSS:
 			ndlp = (struct lpfc_nodelist *)(evtp->evt_arg1);
-			lpfc_dev_loss_tmo_handler(ndlp);
+			fcf_inuse = lpfc_dev_loss_tmo_handler(ndlp);
 			free_evt = 0;
 			/* decrement the node reference count held for
 			 * this queued work
 			 */
+			nlp_did = ndlp->nlp_DID;
 			lpfc_nlp_put(ndlp);
+			if (phba->sli_rev == LPFC_SLI_REV4)
+				lpfc_sli4_post_dev_loss_tmo_handler(phba,
+								    fcf_inuse,
+								    nlp_did);
 			break;
 		case LPFC_EVT_ONLINE:
 			if (phba->link_state < LPFC_LINK_DOWN)
@@ -707,6 +794,8 @@
 					     : NLP_EVT_DEVICE_RECOVERY);
 	}
 	if (phba->sli3_options & LPFC_SLI3_VPORT_TEARDOWN) {
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			lpfc_sli4_unreg_all_rpis(vport);
 		lpfc_mbx_unreg_vpi(vport);
 		spin_lock_irq(shost->host_lock);
 		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
@@ -1021,8 +1110,7 @@
 			 "2017 REG_FCFI mbxStatus error x%x "
 			 "HBA state x%x\n",
 			 mboxq->u.mb.mbxStatus, vport->port_state);
-		mempool_free(mboxq, phba->mbox_mem_pool);
-		return;
+		goto fail_out;
 	}
 
 	/* Start FCoE discovery by sending a FLOGI. */
@@ -1031,20 +1119,30 @@
 	spin_lock_irq(&phba->hbalock);
 	phba->fcf.fcf_flag |= FCF_REGISTERED;
 	spin_unlock_irq(&phba->hbalock);
+
 	/* If there is a pending FCoE event, restart FCF table scan. */
-	if (lpfc_check_pending_fcoe_event(phba, 1)) {
-		mempool_free(mboxq, phba->mbox_mem_pool);
-		return;
-	}
+	if (lpfc_check_pending_fcoe_event(phba, LPFC_UNREG_FCF))
+		goto fail_out;
+
+	/* Mark successful completion of FCF table scan */
 	spin_lock_irq(&phba->hbalock);
 	phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
-	phba->hba_flag &= ~FCF_DISC_INPROGRESS;
-	spin_unlock_irq(&phba->hbalock);
-	if (vport->port_state != LPFC_FLOGI)
+	phba->hba_flag &= ~FCF_TS_INPROG;
+	if (vport->port_state != LPFC_FLOGI) {
+		phba->hba_flag |= FCF_RR_INPROG;
+		spin_unlock_irq(&phba->hbalock);
 		lpfc_initial_flogi(vport);
+		goto out;
+	}
+	spin_unlock_irq(&phba->hbalock);
+	goto out;
 
+fail_out:
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag &= ~FCF_RR_INPROG;
+	spin_unlock_irq(&phba->hbalock);
+out:
 	mempool_free(mboxq, phba->mbox_mem_pool);
-	return;
 }
 
 /**
@@ -1241,10 +1339,9 @@
 	int rc;
 
 	spin_lock_irq(&phba->hbalock);
-
 	/* If the FCF is not availabe do nothing. */
 	if (!(phba->fcf.fcf_flag & FCF_AVAILABLE)) {
-		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
 		spin_unlock_irq(&phba->hbalock);
 		return;
 	}
@@ -1252,19 +1349,22 @@
 	/* The FCF is already registered, start discovery */
 	if (phba->fcf.fcf_flag & FCF_REGISTERED) {
 		phba->fcf.fcf_flag |= (FCF_SCAN_DONE | FCF_IN_USE);
-		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
-		spin_unlock_irq(&phba->hbalock);
-		if (phba->pport->port_state != LPFC_FLOGI)
+		phba->hba_flag &= ~FCF_TS_INPROG;
+		if (phba->pport->port_state != LPFC_FLOGI) {
+			phba->hba_flag |= FCF_RR_INPROG;
+			spin_unlock_irq(&phba->hbalock);
 			lpfc_initial_flogi(phba->pport);
+			return;
+		}
+		spin_unlock_irq(&phba->hbalock);
 		return;
 	}
 	spin_unlock_irq(&phba->hbalock);
 
-	fcf_mbxq = mempool_alloc(phba->mbox_mem_pool,
-		GFP_KERNEL);
+	fcf_mbxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!fcf_mbxq) {
 		spin_lock_irq(&phba->hbalock);
-		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
 		spin_unlock_irq(&phba->hbalock);
 		return;
 	}
@@ -1275,7 +1375,7 @@
 	rc = lpfc_sli_issue_mbox(phba, fcf_mbxq, MBX_NOWAIT);
 	if (rc == MBX_NOT_FINISHED) {
 		spin_lock_irq(&phba->hbalock);
-		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
 		spin_unlock_irq(&phba->hbalock);
 		mempool_free(fcf_mbxq, phba->mbox_mem_pool);
 	}
@@ -1493,7 +1593,7 @@
 	 * FCF discovery, no need to restart FCF discovery.
 	 */
 	if ((phba->link_state  >= LPFC_LINK_UP) &&
-		(phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
+	    (phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
 		return 0;
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
@@ -1517,14 +1617,14 @@
 		lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
 	} else {
 		/*
-		 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
+		 * Do not continue FCF discovery and clear FCF_TS_INPROG
 		 * flag
 		 */
 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
 				"2833 Stop FCF discovery process due to link "
 				"state change (x%x)\n", phba->link_state);
 		spin_lock_irq(&phba->hbalock);
-		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+		phba->hba_flag &= ~(FCF_TS_INPROG | FCF_RR_INPROG);
 		phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
 		spin_unlock_irq(&phba->hbalock);
 	}
@@ -1729,6 +1829,65 @@
 }
 
 /**
+ * lpfc_sli4_fcf_rr_next_proc - processing next roundrobin fcf
+ * @vport: Pointer to vport object.
+ * @fcf_index: index to next fcf.
+ *
+ * This function processing the roundrobin fcf failover to next fcf index.
+ * When this function is invoked, there will be a current fcf registered
+ * for flogi.
+ * Return: 0 for continue retrying flogi on currently registered fcf;
+ *         1 for stop flogi on currently registered fcf;
+ */
+int lpfc_sli4_fcf_rr_next_proc(struct lpfc_vport *vport, uint16_t fcf_index)
+{
+	struct lpfc_hba *phba = vport->phba;
+	int rc;
+
+	if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
+		spin_lock_irq(&phba->hbalock);
+		if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+					"2872 Devloss tmo with no eligible "
+					"FCF, unregister in-use FCF (x%x) "
+					"and rescan FCF table\n",
+					phba->fcf.current_rec.fcf_indx);
+			lpfc_unregister_fcf_rescan(phba);
+			goto stop_flogi_current_fcf;
+		}
+		/* Mark the end to FLOGI roundrobin failover */
+		phba->hba_flag &= ~FCF_RR_INPROG;
+		/* Allow action to new fcf asynchronous event */
+		phba->fcf.fcf_flag &= ~(FCF_AVAILABLE | FCF_SCAN_DONE);
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2865 No FCF available, stop roundrobin FCF "
+				"failover and change port state:x%x/x%x\n",
+				phba->pport->port_state, LPFC_VPORT_UNKNOWN);
+		phba->pport->port_state = LPFC_VPORT_UNKNOWN;
+		goto stop_flogi_current_fcf;
+	} else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_ELS,
+				"2794 Try FLOGI roundrobin FCF failover to "
+				"(x%x)\n", fcf_index);
+		rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba, fcf_index);
+		if (rc)
+			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
+					"2761 FLOGI roundrobin FCF failover "
+					"failed (rc:x%x) to read FCF (x%x)\n",
+					rc, phba->fcf.current_rec.fcf_indx);
+		else
+			goto stop_flogi_current_fcf;
+	}
+	return 0;
+
+stop_flogi_current_fcf:
+	lpfc_can_disctmo(vport);
+	return 1;
+}
+
+/**
  * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
  * @phba: pointer to lpfc hba data structure.
  * @mboxq: pointer to mailbox object.
@@ -1756,7 +1915,7 @@
 	int rc;
 
 	/* If there is pending FCoE event restart FCF table scan */
-	if (lpfc_check_pending_fcoe_event(phba, 0)) {
+	if (lpfc_check_pending_fcoe_event(phba, LPFC_SKIP_UNREG_FCF)) {
 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
 		return;
 	}
@@ -1765,12 +1924,12 @@
 	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
 						      &next_fcf_index);
 	if (!new_fcf_record) {
-		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
 				"2765 Mailbox command READ_FCF_RECORD "
 				"failed to retrieve a FCF record.\n");
 		/* Let next new FCF event trigger fast failover */
 		spin_lock_irq(&phba->hbalock);
-		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+		phba->hba_flag &= ~FCF_TS_INPROG;
 		spin_unlock_irq(&phba->hbalock);
 		lpfc_sli4_mbox_cmd_free(phba, mboxq);
 		return;
@@ -1787,13 +1946,12 @@
 	/*
 	 * If the fcf record does not match with connect list entries
 	 * read the next entry; otherwise, this is an eligible FCF
-	 * record for round robin FCF failover.
+	 * record for roundrobin FCF failover.
 	 */
 	if (!rc) {
 		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
-				"2781 FCF record (x%x) failed FCF "
-				"connection list check, fcf_avail:x%x, "
-				"fcf_valid:x%x\n",
+				"2781 FCF (x%x) failed connection "
+				"list check: (x%x/x%x)\n",
 				bf_get(lpfc_fcf_record_fcf_index,
 				       new_fcf_record),
 				bf_get(lpfc_fcf_record_fcf_avail,
@@ -1803,6 +1961,16 @@
 		if ((phba->fcf.fcf_flag & FCF_IN_USE) &&
 		    lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
 		    new_fcf_record, LPFC_FCOE_IGNORE_VID)) {
+			if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) !=
+			    phba->fcf.current_rec.fcf_indx) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+					"2862 FCF (x%x) matches property "
+					"of in-use FCF (x%x)\n",
+					bf_get(lpfc_fcf_record_fcf_index,
+					       new_fcf_record),
+					phba->fcf.current_rec.fcf_indx);
+				goto read_next_fcf;
+			}
 			/*
 			 * In case the current in-use FCF record becomes
 			 * invalid/unavailable during FCF discovery that
@@ -1813,9 +1981,8 @@
 			    !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
 				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
 						"2835 Invalid in-use FCF "
-						"record (x%x) reported, "
-						"entering fast FCF failover "
-						"mode scanning.\n",
+						"(x%x), enter FCF failover "
+						"table scan.\n",
 						phba->fcf.current_rec.fcf_indx);
 				spin_lock_irq(&phba->hbalock);
 				phba->fcf.fcf_flag |= FCF_REDISC_FOV;
@@ -1844,22 +2011,29 @@
 	if (phba->fcf.fcf_flag & FCF_IN_USE) {
 		if (lpfc_sli4_fcf_record_match(phba, &phba->fcf.current_rec,
 		    new_fcf_record, vlan_id)) {
-			phba->fcf.fcf_flag |= FCF_AVAILABLE;
-			if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
-				/* Stop FCF redisc wait timer if pending */
-				__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
-			else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
-				/* If in fast failover, mark it's completed */
-				phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
-			spin_unlock_irq(&phba->hbalock);
-			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-					"2836 The new FCF record (x%x) "
-					"matches the in-use FCF record "
-					"(x%x)\n",
-					phba->fcf.current_rec.fcf_indx,
+			if (bf_get(lpfc_fcf_record_fcf_index, new_fcf_record) ==
+			    phba->fcf.current_rec.fcf_indx) {
+				phba->fcf.fcf_flag |= FCF_AVAILABLE;
+				if (phba->fcf.fcf_flag & FCF_REDISC_PEND)
+					/* Stop FCF redisc wait timer */
+					__lpfc_sli4_stop_fcf_redisc_wait_timer(
+									phba);
+				else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
+					/* Fast failover, mark completed */
+					phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+				spin_unlock_irq(&phba->hbalock);
+				lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+						"2836 New FCF matches in-use "
+						"FCF (x%x)\n",
+						phba->fcf.current_rec.fcf_indx);
+				goto out;
+			} else
+				lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+					"2863 New FCF (x%x) matches "
+					"property of in-use FCF (x%x)\n",
 					bf_get(lpfc_fcf_record_fcf_index,
-					       new_fcf_record));
-			goto out;
+					       new_fcf_record),
+					phba->fcf.current_rec.fcf_indx);
 		}
 		/*
 		 * Read next FCF record from HBA searching for the matching
@@ -1953,8 +2127,8 @@
 	 */
 	if (fcf_rec) {
 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-				"2840 Update current FCF record "
-				"with initial FCF record (x%x)\n",
+				"2840 Update initial FCF candidate "
+				"with FCF (x%x)\n",
 				bf_get(lpfc_fcf_record_fcf_index,
 				       new_fcf_record));
 		__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
@@ -1984,20 +2158,28 @@
 			 */
 			if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
 				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
-					       "2782 No suitable FCF record "
-					       "found during this round of "
-					       "post FCF rediscovery scan: "
-					       "fcf_evt_tag:x%x, fcf_index: "
-					       "x%x\n",
+					       "2782 No suitable FCF found: "
+					       "(x%x/x%x)\n",
 					       phba->fcoe_eventtag_at_fcf_scan,
 					       bf_get(lpfc_fcf_record_fcf_index,
 						      new_fcf_record));
-				/*
-				 * Let next new FCF event trigger fast
-				 * failover
-				 */
 				spin_lock_irq(&phba->hbalock);
-				phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+				if (phba->hba_flag & HBA_DEVLOSS_TMO) {
+					phba->hba_flag &= ~FCF_TS_INPROG;
+					spin_unlock_irq(&phba->hbalock);
+					/* Unregister in-use FCF and rescan */
+					lpfc_printf_log(phba, KERN_INFO,
+							LOG_FIP,
+							"2864 On devloss tmo "
+							"unreg in-use FCF and "
+							"rescan FCF table\n");
+					lpfc_unregister_fcf_rescan(phba);
+					return;
+				}
+				/*
+				 * Let next new FCF event trigger fast failover
+				 */
+				phba->hba_flag &= ~FCF_TS_INPROG;
 				spin_unlock_irq(&phba->hbalock);
 				return;
 			}
@@ -2015,9 +2197,8 @@
 
 			/* Replace in-use record with the new record */
 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-					"2842 Replace the current in-use "
-					"FCF record (x%x) with failover FCF "
-					"record (x%x)\n",
+					"2842 Replace in-use FCF (x%x) "
+					"with failover FCF (x%x)\n",
 					phba->fcf.current_rec.fcf_indx,
 					phba->fcf.failover_rec.fcf_indx);
 			memcpy(&phba->fcf.current_rec,
@@ -2029,15 +2210,8 @@
 			 * FCF failover.
 			 */
 			spin_lock_irq(&phba->hbalock);
-			phba->fcf.fcf_flag &=
-					~(FCF_REDISC_FOV | FCF_REDISC_RRU);
+			phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
 			spin_unlock_irq(&phba->hbalock);
-			/*
-			 * Set up the initial registered FCF index for FLOGI
-			 * round robin FCF failover.
-			 */
-			phba->fcf.fcf_rr_init_indx =
-					phba->fcf.failover_rec.fcf_indx;
 			/* Register to the new FCF record */
 			lpfc_register_fcf(phba);
 		} else {
@@ -2069,28 +2243,6 @@
 						LPFC_FCOE_FCF_GET_FIRST);
 				return;
 			}
-
-			/*
-			 * Otherwise, initial scan or post linkdown rescan,
-			 * register with the best FCF record found so far
-			 * through the FCF scanning process.
-			 */
-
-			/*
-			 * Mark the initial FCF discovery completed and
-			 * the start of the first round of the roundrobin
-			 * FCF failover.
-			 */
-			spin_lock_irq(&phba->hbalock);
-			phba->fcf.fcf_flag &=
-					~(FCF_INIT_DISC | FCF_REDISC_RRU);
-			spin_unlock_irq(&phba->hbalock);
-			/*
-			 * Set up the initial registered FCF index for FLOGI
-			 * round robin FCF failover
-			 */
-			phba->fcf.fcf_rr_init_indx =
-					phba->fcf.current_rec.fcf_indx;
 			/* Register to the new FCF record */
 			lpfc_register_fcf(phba);
 		}
@@ -2106,11 +2258,11 @@
 }
 
 /**
- * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler
+ * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf roundrobin read_fcf mbox cmpl hdler
  * @phba: pointer to lpfc hba data structure.
  * @mboxq: pointer to mailbox object.
  *
- * This is the callback function for FLOGI failure round robin FCF failover
+ * This is the callback function for FLOGI failure roundrobin FCF failover
  * read FCF record mailbox command from the eligible FCF record bmask for
  * performing the failover. If the FCF read back is not valid/available, it
  * fails through to retrying FLOGI to the currently registered FCF again.
@@ -2125,17 +2277,18 @@
 {
 	struct fcf_record *new_fcf_record;
 	uint32_t boot_flag, addr_mode;
-	uint16_t next_fcf_index;
+	uint16_t next_fcf_index, fcf_index;
 	uint16_t current_fcf_index;
 	uint16_t vlan_id;
+	int rc;
 
-	/* If link state is not up, stop the round robin failover process */
+	/* If link state is not up, stop the roundrobin failover process */
 	if (phba->link_state < LPFC_LINK_UP) {
 		spin_lock_irq(&phba->hbalock);
 		phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+		phba->hba_flag &= ~FCF_RR_INPROG;
 		spin_unlock_irq(&phba->hbalock);
-		lpfc_sli4_mbox_cmd_free(phba, mboxq);
-		return;
+		goto out;
 	}
 
 	/* Parse the FCF record from the non-embedded mailbox command */
@@ -2145,23 +2298,47 @@
 		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
 				"2766 Mailbox command READ_FCF_RECORD "
 				"failed to retrieve a FCF record.\n");
-		goto out;
+		goto error_out;
 	}
 
 	/* Get the needed parameters from FCF record */
-	lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
-				 &addr_mode, &vlan_id);
+	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+				      &addr_mode, &vlan_id);
 
 	/* Log the FCF record information if turned on */
 	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
 				      next_fcf_index);
 
+	fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+	if (!rc) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2848 Remove ineligible FCF (x%x) from "
+				"from roundrobin bmask\n", fcf_index);
+		/* Clear roundrobin bmask bit for ineligible FCF */
+		lpfc_sli4_fcf_rr_index_clear(phba, fcf_index);
+		/* Perform next round of roundrobin FCF failover */
+		fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
+		rc = lpfc_sli4_fcf_rr_next_proc(phba->pport, fcf_index);
+		if (rc)
+			goto out;
+		goto error_out;
+	}
+
+	if (fcf_index == phba->fcf.current_rec.fcf_indx) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2760 Perform FLOGI roundrobin FCF failover: "
+				"FCF (x%x) back to FCF (x%x)\n",
+				phba->fcf.current_rec.fcf_indx, fcf_index);
+		/* Wait 500 ms before retrying FLOGI to current FCF */
+		msleep(500);
+		lpfc_initial_flogi(phba->pport);
+		goto out;
+	}
+
 	/* Upload new FCF record to the failover FCF record */
 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-			"2834 Update the current FCF record (x%x) "
-			"with the next FCF record (x%x)\n",
-			phba->fcf.failover_rec.fcf_indx,
-			bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
+			"2834 Update current FCF (x%x) with new FCF (x%x)\n",
+			phba->fcf.failover_rec.fcf_indx, fcf_index);
 	spin_lock_irq(&phba->hbalock);
 	__lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
 				 new_fcf_record, addr_mode, vlan_id,
@@ -2178,14 +2355,13 @@
 	       sizeof(struct lpfc_fcf_rec));
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-			"2783 FLOGI round robin FCF failover from FCF "
-			"(x%x) to FCF (x%x).\n",
-			current_fcf_index,
-			bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
+			"2783 Perform FLOGI roundrobin FCF failover: FCF "
+			"(x%x) to FCF (x%x)\n", current_fcf_index, fcf_index);
 
+error_out:
+	lpfc_register_fcf(phba);
 out:
 	lpfc_sli4_mbox_cmd_free(phba, mboxq);
-	lpfc_register_fcf(phba);
 }
 
 /**
@@ -2194,10 +2370,10 @@
  * @mboxq: pointer to mailbox object.
  *
  * This is the callback function of read FCF record mailbox command for
- * updating the eligible FCF bmask for FLOGI failure round robin FCF
+ * updating the eligible FCF bmask for FLOGI failure roundrobin FCF
  * failover when a new FCF event happened. If the FCF read back is
  * valid/available and it passes the connection list check, it updates
- * the bmask for the eligible FCF record for round robin failover.
+ * the bmask for the eligible FCF record for roundrobin failover.
  */
 void
 lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
@@ -2639,7 +2815,7 @@
 		 * and get the FCF Table.
 		 */
 		spin_lock_irq(&phba->hbalock);
-		if (phba->hba_flag & FCF_DISC_INPROGRESS) {
+		if (phba->hba_flag & FCF_TS_INPROG) {
 			spin_unlock_irq(&phba->hbalock);
 			return;
 		}
@@ -3906,6 +4082,11 @@
 	LPFC_MBOXQ_t     *mbox;
 	int rc;
 
+	if (phba->sli_rev == LPFC_SLI_REV4) {
+		lpfc_sli4_unreg_all_rpis(vport);
+		return;
+	}
+
 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (mbox) {
 		lpfc_unreg_login(phba, vport->vpi, 0xffff, mbox);
@@ -3992,6 +4173,16 @@
 	}
 
 	spin_lock_irq(&phba->hbalock);
+	/* Cleanup REG_LOGIN completions which are not yet processed */
+	list_for_each_entry(mb, &phba->sli.mboxq_cmpl, list) {
+		if ((mb->u.mb.mbxCommand != MBX_REG_LOGIN64) ||
+			(ndlp != (struct lpfc_nodelist *) mb->context2))
+			continue;
+
+		mb->context2 = NULL;
+		mb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+	}
+
 	list_for_each_entry_safe(mb, nextmb, &phba->sli.mboxq, list) {
 		if ((mb->u.mb.mbxCommand == MBX_REG_LOGIN64) &&
 		    (ndlp == (struct lpfc_nodelist *) mb->context2)) {
@@ -5170,6 +5361,8 @@
 			if (ndlp)
 				lpfc_cancel_retry_delay_tmo(vports[i], ndlp);
 			lpfc_cleanup_pending_mbox(vports[i]);
+			if (phba->sli_rev == LPFC_SLI_REV4)
+				lpfc_sli4_unreg_all_rpis(vports[i]);
 			lpfc_mbx_unreg_vpi(vports[i]);
 			shost = lpfc_shost_from_vport(vports[i]);
 			spin_lock_irq(shost->host_lock);
diff --git a/drivers/scsi/lpfc/lpfc_hw.h b/drivers/scsi/lpfc/lpfc_hw.h
index a631647..9b83334 100644
--- a/drivers/scsi/lpfc/lpfc_hw.h
+++ b/drivers/scsi/lpfc/lpfc_hw.h
@@ -861,6 +861,47 @@
 	uint32_t crcCnt;
 } RPS_RSP;
 
+struct RLS {			/* Structure is in Big Endian format */
+	uint32_t rls;
+#define rls_rsvd_SHIFT		24
+#define rls_rsvd_MASK		0x000000ff
+#define rls_rsvd_WORD		rls
+#define rls_did_SHIFT		0
+#define rls_did_MASK		0x00ffffff
+#define rls_did_WORD		rls
+};
+
+struct  RLS_RSP {		/* Structure is in Big Endian format */
+	uint32_t linkFailureCnt;
+	uint32_t lossSyncCnt;
+	uint32_t lossSignalCnt;
+	uint32_t primSeqErrCnt;
+	uint32_t invalidXmitWord;
+	uint32_t crcCnt;
+};
+
+struct RTV_RSP {		/* Structure is in Big Endian format */
+	uint32_t ratov;
+	uint32_t edtov;
+	uint32_t qtov;
+#define qtov_rsvd0_SHIFT	28
+#define qtov_rsvd0_MASK		0x0000000f
+#define qtov_rsvd0_WORD		qtov		/* reserved */
+#define qtov_edtovres_SHIFT	27
+#define qtov_edtovres_MASK	0x00000001
+#define qtov_edtovres_WORD	qtov		/* E_D_TOV Resolution */
+#define qtov__rsvd1_SHIFT	19
+#define qtov_rsvd1_MASK		0x0000003f
+#define qtov_rsvd1_WORD		qtov		/* reserved */
+#define qtov_rttov_SHIFT	18
+#define qtov_rttov_MASK		0x00000001
+#define qtov_rttov_WORD		qtov		/* R_T_TOV value */
+#define qtov_rsvd2_SHIFT	0
+#define qtov_rsvd2_MASK		0x0003ffff
+#define qtov_rsvd2_WORD		qtov		/* reserved */
+};
+
+
 typedef struct  _RPL {		/* Structure is in Big Endian format */
 	uint32_t maxsize;
 	uint32_t index;
diff --git a/drivers/scsi/lpfc/lpfc_hw4.h b/drivers/scsi/lpfc/lpfc_hw4.h
index bbdcf96..6e4bc34 100644
--- a/drivers/scsi/lpfc/lpfc_hw4.h
+++ b/drivers/scsi/lpfc/lpfc_hw4.h
@@ -424,79 +424,6 @@
 #define FCOE_SOFn3	0x36
 };
 
-struct lpfc_wqe_generic{
-	struct ulp_bde64 bde;
-	uint32_t word3;
-	uint32_t word4;
-	uint32_t word5;
-	uint32_t word6;
-#define lpfc_wqe_gen_context_SHIFT	16
-#define lpfc_wqe_gen_context_MASK	0x0000FFFF
-#define lpfc_wqe_gen_context_WORD	word6
-#define lpfc_wqe_gen_xri_SHIFT		0
-#define lpfc_wqe_gen_xri_MASK		0x0000FFFF
-#define lpfc_wqe_gen_xri_WORD		word6
-	uint32_t word7;
-#define lpfc_wqe_gen_lnk_SHIFT		23
-#define lpfc_wqe_gen_lnk_MASK		0x00000001
-#define lpfc_wqe_gen_lnk_WORD		word7
-#define lpfc_wqe_gen_erp_SHIFT		22
-#define lpfc_wqe_gen_erp_MASK		0x00000001
-#define lpfc_wqe_gen_erp_WORD		word7
-#define lpfc_wqe_gen_pu_SHIFT		20
-#define lpfc_wqe_gen_pu_MASK		0x00000003
-#define lpfc_wqe_gen_pu_WORD		word7
-#define lpfc_wqe_gen_class_SHIFT	16
-#define lpfc_wqe_gen_class_MASK		0x00000007
-#define lpfc_wqe_gen_class_WORD		word7
-#define lpfc_wqe_gen_command_SHIFT	8
-#define lpfc_wqe_gen_command_MASK	0x000000FF
-#define lpfc_wqe_gen_command_WORD	word7
-#define lpfc_wqe_gen_status_SHIFT	4
-#define lpfc_wqe_gen_status_MASK	0x0000000F
-#define lpfc_wqe_gen_status_WORD	word7
-#define lpfc_wqe_gen_ct_SHIFT		2
-#define lpfc_wqe_gen_ct_MASK		0x00000003
-#define lpfc_wqe_gen_ct_WORD		word7
-	uint32_t abort_tag;
-	uint32_t word9;
-#define lpfc_wqe_gen_request_tag_SHIFT	0
-#define lpfc_wqe_gen_request_tag_MASK	0x0000FFFF
-#define lpfc_wqe_gen_request_tag_WORD	word9
-	uint32_t word10;
-#define lpfc_wqe_gen_ccp_SHIFT		24
-#define lpfc_wqe_gen_ccp_MASK		0x000000FF
-#define lpfc_wqe_gen_ccp_WORD		word10
-#define lpfc_wqe_gen_ccpe_SHIFT		23
-#define lpfc_wqe_gen_ccpe_MASK		0x00000001
-#define lpfc_wqe_gen_ccpe_WORD		word10
-#define lpfc_wqe_gen_pv_SHIFT		19
-#define lpfc_wqe_gen_pv_MASK		0x00000001
-#define lpfc_wqe_gen_pv_WORD		word10
-#define lpfc_wqe_gen_pri_SHIFT		16
-#define lpfc_wqe_gen_pri_MASK		0x00000007
-#define lpfc_wqe_gen_pri_WORD		word10
-	uint32_t word11;
-#define lpfc_wqe_gen_cq_id_SHIFT	16
-#define lpfc_wqe_gen_cq_id_MASK		0x0000FFFF
-#define lpfc_wqe_gen_cq_id_WORD		word11
-#define LPFC_WQE_CQ_ID_DEFAULT	0xffff
-#define lpfc_wqe_gen_wqec_SHIFT		7
-#define lpfc_wqe_gen_wqec_MASK		0x00000001
-#define lpfc_wqe_gen_wqec_WORD		word11
-#define ELS_ID_FLOGI 3
-#define ELS_ID_FDISC 2
-#define ELS_ID_LOGO  1
-#define ELS_ID_DEFAULT 0
-#define lpfc_wqe_gen_els_id_SHIFT	4
-#define lpfc_wqe_gen_els_id_MASK	0x00000003
-#define lpfc_wqe_gen_els_id_WORD	word11
-#define lpfc_wqe_gen_cmd_type_SHIFT	0
-#define lpfc_wqe_gen_cmd_type_MASK	0x0000000F
-#define lpfc_wqe_gen_cmd_type_WORD	word11
-	uint32_t payload[4];
-};
-
 struct lpfc_rqe {
 	uint32_t address_hi;
 	uint32_t address_lo;
@@ -2279,9 +2206,36 @@
 #define wqe_reqtag_MASK       0x0000FFFF
 #define wqe_reqtag_WORD       word9
 #define wqe_rcvoxid_SHIFT     16
-#define wqe_rcvoxid_MASK       0x0000FFFF
-#define wqe_rcvoxid_WORD       word9
+#define wqe_rcvoxid_MASK      0x0000FFFF
+#define wqe_rcvoxid_WORD      word9
 	uint32_t word10;
+#define wqe_ebde_cnt_SHIFT    0
+#define wqe_ebde_cnt_MASK     0x00000007
+#define wqe_ebde_cnt_WORD     word10
+#define wqe_lenloc_SHIFT      7
+#define wqe_lenloc_MASK       0x00000003
+#define wqe_lenloc_WORD       word10
+#define LPFC_WQE_LENLOC_NONE		0
+#define LPFC_WQE_LENLOC_WORD3	1
+#define LPFC_WQE_LENLOC_WORD12	2
+#define LPFC_WQE_LENLOC_WORD4	3
+#define wqe_qosd_SHIFT        9
+#define wqe_qosd_MASK         0x00000001
+#define wqe_qosd_WORD         word10
+#define wqe_xbl_SHIFT         11
+#define wqe_xbl_MASK          0x00000001
+#define wqe_xbl_WORD          word10
+#define wqe_iod_SHIFT         13
+#define wqe_iod_MASK          0x00000001
+#define wqe_iod_WORD          word10
+#define LPFC_WQE_IOD_WRITE	0
+#define LPFC_WQE_IOD_READ	1
+#define wqe_dbde_SHIFT        14
+#define wqe_dbde_MASK         0x00000001
+#define wqe_dbde_WORD         word10
+#define wqe_wqes_SHIFT        15
+#define wqe_wqes_MASK         0x00000001
+#define wqe_wqes_WORD         word10
 #define wqe_pri_SHIFT         16
 #define wqe_pri_MASK          0x00000007
 #define wqe_pri_WORD          word10
@@ -2295,18 +2249,26 @@
 #define wqe_ccpe_MASK         0x00000001
 #define wqe_ccpe_WORD         word10
 #define wqe_ccp_SHIFT         24
-#define wqe_ccp_MASK         0x000000ff
-#define wqe_ccp_WORD         word10
+#define wqe_ccp_MASK          0x000000ff
+#define wqe_ccp_WORD          word10
 	uint32_t word11;
-#define wqe_cmd_type_SHIFT  0
-#define wqe_cmd_type_MASK   0x0000000f
-#define wqe_cmd_type_WORD   word11
-#define wqe_wqec_SHIFT      7
-#define wqe_wqec_MASK       0x00000001
-#define wqe_wqec_WORD       word11
-#define wqe_cqid_SHIFT      16
-#define wqe_cqid_MASK       0x0000ffff
-#define wqe_cqid_WORD       word11
+#define wqe_cmd_type_SHIFT    0
+#define wqe_cmd_type_MASK     0x0000000f
+#define wqe_cmd_type_WORD     word11
+#define wqe_els_id_SHIFT      4
+#define wqe_els_id_MASK       0x00000003
+#define wqe_els_id_WORD       word11
+#define LPFC_ELS_ID_FLOGI	3
+#define LPFC_ELS_ID_FDISC	2
+#define LPFC_ELS_ID_LOGO	1
+#define LPFC_ELS_ID_DEFAULT	0
+#define wqe_wqec_SHIFT        7
+#define wqe_wqec_MASK         0x00000001
+#define wqe_wqec_WORD         word11
+#define wqe_cqid_SHIFT        16
+#define wqe_cqid_MASK         0x0000ffff
+#define wqe_cqid_WORD         word11
+#define LPFC_WQE_CQ_ID_DEFAULT	0xffff
 };
 
 struct wqe_did {
@@ -2325,6 +2287,15 @@
 #define wqe_xmit_bls_xo_WORD          word5
 };
 
+struct lpfc_wqe_generic{
+	struct ulp_bde64 bde;
+	uint32_t word3;
+	uint32_t word4;
+	uint32_t word5;
+	struct wqe_common wqe_com;
+	uint32_t payload[4];
+};
+
 struct els_request64_wqe {
 	struct ulp_bde64 bde;
 	uint32_t payload_len;
@@ -2356,9 +2327,9 @@
 
 struct xmit_els_rsp64_wqe {
 	struct ulp_bde64 bde;
-	uint32_t rsvd3;
+	uint32_t response_payload_len;
 	uint32_t rsvd4;
-	struct wqe_did	wqe_dest;
+	struct wqe_did wqe_dest;
 	struct wqe_common wqe_com; /* words 6-11 */
 	uint32_t rsvd_12_15[4];
 };
@@ -2427,7 +2398,7 @@
 
 struct xmit_seq64_wqe {
 	struct ulp_bde64 bde;
-	uint32_t paylaod_offset;
+	uint32_t rsvd3;
 	uint32_t relative_offset;
 	struct wqe_rctl_dfctl wge_ctl;
 	struct wqe_common wqe_com; /* words 6-11 */
@@ -2437,7 +2408,7 @@
 };
 struct xmit_bcast64_wqe {
 	struct ulp_bde64 bde;
-	uint32_t paylaod_len;
+	uint32_t seq_payload_len;
 	uint32_t rsvd4;
 	struct wqe_rctl_dfctl wge_ctl; /* word 5 */
 	struct wqe_common wqe_com;     /* words 6-11 */
@@ -2446,8 +2417,8 @@
 
 struct gen_req64_wqe {
 	struct ulp_bde64 bde;
-	uint32_t command_len;
-	uint32_t payload_len;
+	uint32_t request_payload_len;
+	uint32_t relative_offset;
 	struct wqe_rctl_dfctl wge_ctl; /* word 5 */
 	struct wqe_common wqe_com;     /* words 6-11 */
 	uint32_t rsvd_12_15[4];
@@ -2480,7 +2451,7 @@
 
 struct fcp_iwrite64_wqe {
 	struct ulp_bde64 bde;
-	uint32_t payload_len;
+	uint32_t payload_offset_len;
 	uint32_t total_xfer_len;
 	uint32_t initial_xfer_len;
 	struct wqe_common wqe_com;     /* words 6-11 */
@@ -2489,7 +2460,7 @@
 
 struct fcp_iread64_wqe {
 	struct ulp_bde64 bde;
-	uint32_t payload_len;          /* word 3 */
+	uint32_t payload_offset_len;   /* word 3 */
 	uint32_t total_xfer_len;       /* word 4 */
 	uint32_t rsrvd5;               /* word 5 */
 	struct wqe_common wqe_com;     /* words 6-11 */
@@ -2497,10 +2468,12 @@
 };
 
 struct fcp_icmnd64_wqe {
-	struct ulp_bde64 bde;	 /* words 0-2 */
-	uint32_t rsrvd[3];             /* words 3-5 */
+	struct ulp_bde64 bde;          /* words 0-2 */
+	uint32_t rsrvd3;               /* word 3 */
+	uint32_t rsrvd4;               /* word 4 */
+	uint32_t rsrvd5;               /* word 5 */
 	struct wqe_common wqe_com;     /* words 6-11 */
-	uint32_t rsvd_12_15[4];         /* word 12-15 */
+	uint32_t rsvd_12_15[4];        /* word 12-15 */
 };
 
 
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index 295c7dd..b306579 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -813,6 +813,7 @@
 
 	return 0;
 }
+
 /**
  * lpfc_hba_down_post_s4 - Perform lpfc uninitialization after HBA reset
  * @phba: pointer to lpfc HBA data structure.
@@ -2234,10 +2235,9 @@
 void
 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
 {
-	/* Clear pending FCF rediscovery wait and failover in progress flags */
-	phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
-				FCF_DEAD_DISC |
-				FCF_ACVL_DISC);
+	/* Clear pending FCF rediscovery wait flag */
+	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+
 	/* Now, try to stop the timer */
 	del_timer(&phba->fcf.redisc_wait);
 }
@@ -2261,6 +2261,8 @@
 		return;
 	}
 	__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
+	/* Clear failover in progress flags */
+	phba->fcf.fcf_flag &= ~(FCF_DEAD_DISC | FCF_ACVL_DISC);
 	spin_unlock_irq(&phba->hbalock);
 }
 
@@ -2935,8 +2937,7 @@
 	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
 	spin_unlock_irq(&phba->hbalock);
 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-			"2776 FCF rediscover wait timer expired, post "
-			"a worker thread event for FCF table scan\n");
+			"2776 FCF rediscover quiescent timer expired\n");
 	/* wake up worker thread */
 	lpfc_worker_wake_up(phba);
 }
@@ -3311,35 +3312,34 @@
 		if (event_type == LPFC_FCOE_EVENT_TYPE_NEW_FCF)
 			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
 					LOG_DISCOVERY,
-					"2546 New FCF found event: "
-					"evt_tag:x%x, fcf_index:x%x\n",
+					"2546 New FCF event, evt_tag:x%x, "
+					"index:x%x\n",
 					acqe_fcoe->event_tag,
 					acqe_fcoe->index);
 		else
 			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP |
 					LOG_DISCOVERY,
-					"2788 FCF parameter modified event: "
-					"evt_tag:x%x, fcf_index:x%x\n",
+					"2788 FCF param modified event, "
+					"evt_tag:x%x, index:x%x\n",
 					acqe_fcoe->event_tag,
 					acqe_fcoe->index);
 		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
 			/*
 			 * During period of FCF discovery, read the FCF
 			 * table record indexed by the event to update
-			 * FCF round robin failover eligible FCF bmask.
+			 * FCF roundrobin failover eligible FCF bmask.
 			 */
 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
 					LOG_DISCOVERY,
-					"2779 Read new FCF record with "
-					"fcf_index:x%x for updating FCF "
-					"round robin failover bmask\n",
+					"2779 Read FCF (x%x) for updating "
+					"roundrobin FCF failover bmask\n",
 					acqe_fcoe->index);
 			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
 		}
 
 		/* If the FCF discovery is in progress, do nothing. */
 		spin_lock_irq(&phba->hbalock);
-		if (phba->hba_flag & FCF_DISC_INPROGRESS) {
+		if (phba->hba_flag & FCF_TS_INPROG) {
 			spin_unlock_irq(&phba->hbalock);
 			break;
 		}
@@ -3358,15 +3358,15 @@
 
 		/* Otherwise, scan the entire FCF table and re-discover SAN */
 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
-				"2770 Start FCF table scan due to new FCF "
-				"event: evt_tag:x%x, fcf_index:x%x\n",
+				"2770 Start FCF table scan per async FCF "
+				"event, evt_tag:x%x, index:x%x\n",
 				acqe_fcoe->event_tag, acqe_fcoe->index);
 		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
 						     LPFC_FCOE_FCF_GET_FIRST);
 		if (rc)
 			lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
 					"2547 Issue FCF scan read FCF mailbox "
-					"command failed 0x%x\n", rc);
+					"command failed (x%x)\n", rc);
 		break;
 
 	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -3378,9 +3378,8 @@
 
 	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
-			"2549 FCF disconnected from network index 0x%x"
-			" tag 0x%x\n", acqe_fcoe->index,
-			acqe_fcoe->event_tag);
+			"2549 FCF (x%x) disconnected from network, "
+			"tag:x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
 		/*
 		 * If we are in the middle of FCF failover process, clear
 		 * the corresponding FCF bit in the roundrobin bitmap.
@@ -3494,9 +3493,8 @@
 			spin_unlock_irq(&phba->hbalock);
 			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
 					LOG_DISCOVERY,
-					"2773 Start FCF fast failover due "
-					"to CVL event: evt_tag:x%x\n",
-					acqe_fcoe->event_tag);
+					"2773 Start FCF failover per CVL, "
+					"evt_tag:x%x\n", acqe_fcoe->event_tag);
 			rc = lpfc_sli4_redisc_fcf_table(phba);
 			if (rc) {
 				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
@@ -3646,8 +3644,7 @@
 
 	/* Scan FCF table from the first entry to re-discover SAN */
 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
-			"2777 Start FCF table scan after FCF "
-			"rediscovery quiescent period over\n");
+			"2777 Start post-quiescent FCF table scan\n");
 	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
 	if (rc)
 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
@@ -4165,7 +4162,7 @@
 		goto out_free_active_sgl;
 	}
 
-	/* Allocate eligible FCF bmask memory for FCF round robin failover */
+	/* Allocate eligible FCF bmask memory for FCF roundrobin failover */
 	longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
 	phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
 					 GFP_KERNEL);
@@ -7271,6 +7268,51 @@
 }
 
 /**
+ * lpfc_sli4_xri_exchange_busy_wait - Wait for device XRI exchange busy
+ * @phba: Pointer to HBA context object.
+ *
+ * This function is called in the SLI4 code path to wait for completion
+ * of device's XRIs exchange busy. It will check the XRI exchange busy
+ * on outstanding FCP and ELS I/Os every 10ms for up to 10 seconds; after
+ * that, it will check the XRI exchange busy on outstanding FCP and ELS
+ * I/Os every 30 seconds, log error message, and wait forever. Only when
+ * all XRI exchange busy complete, the driver unload shall proceed with
+ * invoking the function reset ioctl mailbox command to the CNA and the
+ * the rest of the driver unload resource release.
+ **/
+static void
+lpfc_sli4_xri_exchange_busy_wait(struct lpfc_hba *phba)
+{
+	int wait_time = 0;
+	int fcp_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+	int els_xri_cmpl = list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+
+	while (!fcp_xri_cmpl || !els_xri_cmpl) {
+		if (wait_time > LPFC_XRI_EXCH_BUSY_WAIT_TMO) {
+			if (!fcp_xri_cmpl)
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"2877 FCP XRI exchange busy "
+						"wait time: %d seconds.\n",
+						wait_time/1000);
+			if (!els_xri_cmpl)
+				lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+						"2878 ELS XRI exchange busy "
+						"wait time: %d seconds.\n",
+						wait_time/1000);
+			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T2);
+			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T2;
+		} else {
+			msleep(LPFC_XRI_EXCH_BUSY_WAIT_T1);
+			wait_time += LPFC_XRI_EXCH_BUSY_WAIT_T1;
+		}
+		fcp_xri_cmpl =
+			list_empty(&phba->sli4_hba.lpfc_abts_scsi_buf_list);
+		els_xri_cmpl =
+			list_empty(&phba->sli4_hba.lpfc_abts_els_sgl_list);
+	}
+}
+
+/**
  * lpfc_sli4_hba_unset - Unset the fcoe hba
  * @phba: Pointer to HBA context object.
  *
@@ -7315,6 +7357,12 @@
 		spin_unlock_irq(&phba->hbalock);
 	}
 
+	/* Abort all iocbs associated with the hba */
+	lpfc_sli_hba_iocb_abort(phba);
+
+	/* Wait for completion of device XRI exchange busy */
+	lpfc_sli4_xri_exchange_busy_wait(phba);
+
 	/* Disable PCI subsystem interrupt */
 	lpfc_sli4_disable_intr(phba);
 
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 0dfa310..62d0957 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -797,6 +797,34 @@
 }
 
 /**
+ * lpfc_sli4_unreg_all_rpis - unregister all RPIs for a vport on SLI4 HBA.
+ * @vport: pointer to a vport object.
+ *
+ * This routine sends mailbox command to unregister all active RPIs for
+ * a vport.
+ **/
+void
+lpfc_sli4_unreg_all_rpis(struct lpfc_vport *vport)
+{
+	struct lpfc_hba  *phba  = vport->phba;
+	LPFC_MBOXQ_t     *mbox;
+	int rc;
+
+	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (mbox) {
+		lpfc_unreg_login(phba, vport->vpi,
+			vport->vpi + phba->vpi_base, mbox);
+		mbox->u.mb.un.varUnregLogin.rsvd1 = 0x4000 ;
+		mbox->vport = vport;
+		mbox->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
+		mbox->context1 = NULL;
+		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
+		if (rc == MBX_NOT_FINISHED)
+			mempool_free(mbox, phba->mbox_mem_pool);
+	}
+}
+
+/**
  * lpfc_reg_vpi - Prepare a mailbox command for registering vport identifier
  * @phba: pointer to lpfc hba data structure.
  * @vpi: virtual N_Port identifier.
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 3a65895..f64b65a 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -169,6 +169,7 @@
 	spin_lock_irqsave(shost->host_lock, flags);
 	if (!vport->stat_data_enabled ||
 		vport->stat_data_blocked ||
+		!pnode ||
 		!pnode->lat_data ||
 		(phba->bucket_type == LPFC_NO_BUCKET)) {
 		spin_unlock_irqrestore(shost->host_lock, flags);
@@ -2040,6 +2041,9 @@
 	struct lpfc_nodelist *pnode = lpfc_cmd->rdata->pnode;
 	unsigned long flags;
 
+	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
+		return;
+
 	/* If there is queuefull or busy condition send a scsi event */
 	if ((cmnd->result == SAM_STAT_TASK_SET_FULL) ||
 		(cmnd->result == SAM_STAT_BUSY)) {
@@ -3226,10 +3230,11 @@
 	struct lpfc_scsi_buf *lpfc_cmd;
 	struct lpfc_iocbq *iocbq;
 	struct lpfc_iocbq *iocbqrsp;
+	struct lpfc_nodelist *pnode = rdata->pnode;
 	int ret;
 	int status;
 
-	if (!rdata->pnode || !NLP_CHK_NODE_ACT(rdata->pnode))
+	if (!pnode || !NLP_CHK_NODE_ACT(pnode))
 		return FAILED;
 
 	lpfc_cmd = lpfc_get_scsi_buf(phba);
@@ -3256,7 +3261,7 @@
 			 "0702 Issue %s to TGT %d LUN %d "
 			 "rpi x%x nlp_flag x%x\n",
 			 lpfc_taskmgmt_name(task_mgmt_cmd), tgt_id, lun_id,
-			 rdata->pnode->nlp_rpi, rdata->pnode->nlp_flag);
+			 pnode->nlp_rpi, pnode->nlp_flag);
 
 	status = lpfc_sli_issue_iocb_wait(phba, LPFC_FCP_RING,
 					  iocbq, iocbqrsp, lpfc_cmd->timeout);
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 0d1e187..554efa6 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -95,7 +95,7 @@
 		return -ENOMEM;
 	/* set consumption flag every once in a while */
 	if (!((q->host_index + 1) % LPFC_RELEASE_NOTIFICATION_INTERVAL))
-		bf_set(lpfc_wqe_gen_wqec, &wqe->generic, 1);
+		bf_set(wqe_wqec, &wqe->generic.wqe_com, 1);
 
 	lpfc_sli_pcimem_bcopy(wqe, temp_wqe, q->entry_size);
 
@@ -1735,6 +1735,7 @@
 	struct lpfc_vport  *vport = pmb->vport;
 	struct lpfc_dmabuf *mp;
 	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost;
 	uint16_t rpi, vpi;
 	int rc;
 
@@ -1746,7 +1747,8 @@
 	}
 
 	if ((pmb->u.mb.mbxCommand == MBX_UNREG_LOGIN) &&
-	    (phba->sli_rev == LPFC_SLI_REV4))
+	    (phba->sli_rev == LPFC_SLI_REV4) &&
+	    (pmb->u.mb.un.varUnregLogin.rsvd1 == 0x0))
 		lpfc_sli4_free_rpi(phba, pmb->u.mb.un.varUnregLogin.rpi);
 
 	/*
@@ -1765,16 +1767,14 @@
 			return;
 	}
 
-	/* Unreg VPI, if the REG_VPI succeed after VLink failure */
 	if ((pmb->u.mb.mbxCommand == MBX_REG_VPI) &&
 		!(phba->pport->load_flag & FC_UNLOADING) &&
 		!pmb->u.mb.mbxStatus) {
-		lpfc_unreg_vpi(phba, pmb->u.mb.un.varRegVpi.vpi, pmb);
-		pmb->vport = vport;
-		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
-		rc = lpfc_sli_issue_mbox(phba, pmb, MBX_NOWAIT);
-		if (rc != MBX_NOT_FINISHED)
-			return;
+		shost = lpfc_shost_from_vport(vport);
+		spin_lock_irq(shost->host_lock);
+		vport->vpi_state |= LPFC_VPI_REGISTERED;
+		vport->fc_flag &= ~FC_VPORT_NEEDS_REG_VPI;
+		spin_unlock_irq(shost->host_lock);
 	}
 
 	if (pmb->u.mb.mbxCommand == MBX_REG_LOGIN64) {
@@ -5921,7 +5921,7 @@
  * lpfc_sli4_scmd_to_wqidx_distr - scsi command to SLI4 WQ index distribution
  * @phba: Pointer to HBA context object.
  *
- * This routine performs a round robin SCSI command to SLI4 FCP WQ index
+ * This routine performs a roundrobin SCSI command to SLI4 FCP WQ index
  * distribution.  This is called by __lpfc_sli_issue_iocb_s4() with the hbalock
  * held.
  *
@@ -5965,7 +5965,7 @@
 	uint16_t abrt_iotag;
 	struct lpfc_iocbq *abrtiocbq;
 	struct ulp_bde64 *bpl = NULL;
-	uint32_t els_id = ELS_ID_DEFAULT;
+	uint32_t els_id = LPFC_ELS_ID_DEFAULT;
 	int numBdes, i;
 	struct ulp_bde64 bde;
 
@@ -5982,7 +5982,7 @@
 	memcpy(wqe, &iocbq->iocb, sizeof(union lpfc_wqe));
 	abort_tag = (uint32_t) iocbq->iotag;
 	xritag = iocbq->sli4_xritag;
-	wqe->words[7] = 0; /* The ct field has moved so reset */
+	wqe->generic.wqe_com.word7 = 0; /* The ct field has moved so reset */
 	/* words0-2 bpl convert bde */
 	if (iocbq->iocb.un.genreq64.bdl.bdeFlags == BUFF_TYPE_BLP_64) {
 		numBdes = iocbq->iocb.un.genreq64.bdl.bdeSize /
@@ -6033,109 +6033,117 @@
 		 * contains the FCFI and remote N_Port_ID is
 		 * in word 5.
 		 */
-
 		ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
-		bf_set(lpfc_wqe_gen_context, &wqe->generic,
-				iocbq->iocb.ulpContext);
-
-		bf_set(lpfc_wqe_gen_ct, &wqe->generic, ct);
-		bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+		bf_set(wqe_ctxt_tag, &wqe->els_req.wqe_com,
+		       iocbq->iocb.ulpContext);
+		bf_set(wqe_ct, &wqe->els_req.wqe_com, ct);
+		bf_set(wqe_pu, &wqe->els_req.wqe_com, 0);
 		/* CCP CCPE PV PRI in word10 were set in the memcpy */
-
 		if (command_type == ELS_COMMAND_FIP) {
 			els_id = ((iocbq->iocb_flag & LPFC_FIP_ELS_ID_MASK)
 					>> LPFC_FIP_ELS_ID_SHIFT);
 		}
-		bf_set(lpfc_wqe_gen_els_id, &wqe->generic, els_id);
-
+		bf_set(wqe_els_id, &wqe->els_req.wqe_com, els_id);
+		bf_set(wqe_dbde, &wqe->els_req.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->els_req.wqe_com, LPFC_WQE_IOD_READ);
+		bf_set(wqe_qosd, &wqe->els_req.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->els_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+		bf_set(wqe_ebde_cnt, &wqe->els_req.wqe_com, 0);
 	break;
 	case CMD_XMIT_SEQUENCE64_CX:
-		bf_set(lpfc_wqe_gen_context, &wqe->generic,
-					iocbq->iocb.un.ulpWord[3]);
-		wqe->generic.word3 = 0;
-		bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
+		bf_set(wqe_ctxt_tag, &wqe->xmit_sequence.wqe_com,
+		       iocbq->iocb.un.ulpWord[3]);
+		bf_set(wqe_rcvoxid, &wqe->xmit_sequence.wqe_com,
+		       iocbq->iocb.ulpContext);
 		/* The entire sequence is transmitted for this IOCB */
 		xmit_len = total_len;
 		cmnd = CMD_XMIT_SEQUENCE64_CR;
 	case CMD_XMIT_SEQUENCE64_CR:
-		/* word3 iocb=io_tag32 wqe=payload_offset */
-		/* payload offset used for multilpe outstanding
-		 * sequences on the same exchange
-		 */
-		wqe->words[3] = 0;
+		/* word3 iocb=io_tag32 wqe=reserved */
+		wqe->xmit_sequence.rsvd3 = 0;
 		/* word4 relative_offset memcpy */
 		/* word5 r_ctl/df_ctl memcpy */
-		bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+		bf_set(wqe_pu, &wqe->xmit_sequence.wqe_com, 0);
+		bf_set(wqe_dbde, &wqe->xmit_sequence.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->xmit_sequence.wqe_com,
+		       LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_lenloc, &wqe->xmit_sequence.wqe_com,
+		       LPFC_WQE_LENLOC_WORD12);
+		bf_set(wqe_ebde_cnt, &wqe->xmit_sequence.wqe_com, 0);
 		wqe->xmit_sequence.xmit_len = xmit_len;
 		command_type = OTHER_COMMAND;
 	break;
 	case CMD_XMIT_BCAST64_CN:
-		/* word3 iocb=iotag32 wqe=payload_len */
-		wqe->words[3] = 0; /* no definition for this in wqe */
+		/* word3 iocb=iotag32 wqe=seq_payload_len */
+		wqe->xmit_bcast64.seq_payload_len = xmit_len;
 		/* word4 iocb=rsvd wqe=rsvd */
 		/* word5 iocb=rctl/type/df_ctl wqe=rctl/type/df_ctl memcpy */
 		/* word6 iocb=ctxt_tag/io_tag wqe=ctxt_tag/xri */
-		bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+		bf_set(wqe_ct, &wqe->xmit_bcast64.wqe_com,
 			((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+		bf_set(wqe_dbde, &wqe->xmit_bcast64.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->xmit_bcast64.wqe_com, LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_lenloc, &wqe->xmit_bcast64.wqe_com,
+		       LPFC_WQE_LENLOC_WORD3);
+		bf_set(wqe_ebde_cnt, &wqe->xmit_bcast64.wqe_com, 0);
 	break;
 	case CMD_FCP_IWRITE64_CR:
 		command_type = FCP_COMMAND_DATA_OUT;
-		/* The struct for wqe fcp_iwrite has 3 fields that are somewhat
-		 * confusing.
-		 * word3 is payload_len: byte offset to the sgl entry for the
-		 * fcp_command.
-		 * word4 is total xfer len, same as the IOCB->ulpParameter.
-		 * word5 is initial xfer len 0 = wait for xfer-ready
-		 */
-
-		/* Always wait for xfer-ready before sending data */
-		wqe->fcp_iwrite.initial_xfer_len = 0;
-		/* word 4 (xfer length) should have been set on the memcpy */
-
-	/* allow write to fall through to read */
-	case CMD_FCP_IREAD64_CR:
-		/* FCP_CMD is always the 1st sgl entry */
-		wqe->fcp_iread.payload_len =
+		/* word3 iocb=iotag wqe=payload_offset_len */
+		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+		wqe->fcp_iwrite.payload_offset_len =
 			xmit_len + sizeof(struct fcp_rsp);
-
-		/* word 4 (xfer length) should have been set on the memcpy */
-
-		bf_set(lpfc_wqe_gen_erp, &wqe->generic,
-			iocbq->iocb.ulpFCP2Rcvy);
-		bf_set(lpfc_wqe_gen_lnk, &wqe->generic, iocbq->iocb.ulpXS);
-		/* The XC bit and the XS bit are similar. The driver never
-		 * tracked whether or not the exchange was previouslly open.
-		 * XC = Exchange create, 0 is create. 1 is already open.
-		 * XS = link cmd: 1 do not close the exchange after command.
-		 * XS = 0 close exchange when command completes.
-		 * The only time we would not set the XC bit is when the XS bit
-		 * is set and we are sending our 2nd or greater command on
-		 * this exchange.
-		 */
+		/* word4 iocb=parameter wqe=total_xfer_length memcpy */
+		/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
+		bf_set(wqe_erp, &wqe->fcp_iwrite.wqe_com,
+		       iocbq->iocb.ulpFCP2Rcvy);
+		bf_set(wqe_lnk, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpXS);
+		/* Always open the exchange */
+		bf_set(wqe_xc, &wqe->fcp_iwrite.wqe_com, 0);
+		bf_set(wqe_dbde, &wqe->fcp_iwrite.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->fcp_iwrite.wqe_com, LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_lenloc, &wqe->fcp_iwrite.wqe_com,
+		       LPFC_WQE_LENLOC_WORD4);
+		bf_set(wqe_ebde_cnt, &wqe->fcp_iwrite.wqe_com, 0);
+		bf_set(wqe_pu, &wqe->fcp_iwrite.wqe_com, iocbq->iocb.ulpPU);
+	break;
+	case CMD_FCP_IREAD64_CR:
+		/* word3 iocb=iotag wqe=payload_offset_len */
+		/* Add the FCP_CMD and FCP_RSP sizes to get the offset */
+		wqe->fcp_iread.payload_offset_len =
+			xmit_len + sizeof(struct fcp_rsp);
+		/* word4 iocb=parameter wqe=total_xfer_length memcpy */
+		/* word5 iocb=initial_xfer_len wqe=initial_xfer_len memcpy */
+		bf_set(wqe_erp, &wqe->fcp_iread.wqe_com,
+		       iocbq->iocb.ulpFCP2Rcvy);
+		bf_set(wqe_lnk, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpXS);
 		/* Always open the exchange */
 		bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
-
-		wqe->words[10] &= 0xffff0000; /* zero out ebde count */
-		bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
-		break;
+		bf_set(wqe_dbde, &wqe->fcp_iread.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->fcp_iread.wqe_com, LPFC_WQE_IOD_READ);
+		bf_set(wqe_lenloc, &wqe->fcp_iread.wqe_com,
+		       LPFC_WQE_LENLOC_WORD4);
+		bf_set(wqe_ebde_cnt, &wqe->fcp_iread.wqe_com, 0);
+		bf_set(wqe_pu, &wqe->fcp_iread.wqe_com, iocbq->iocb.ulpPU);
+	break;
 	case CMD_FCP_ICMND64_CR:
+		/* word3 iocb=IO_TAG wqe=reserved */
+		wqe->fcp_icmd.rsrvd3 = 0;
+		bf_set(wqe_pu, &wqe->fcp_icmd.wqe_com, 0);
 		/* Always open the exchange */
-		bf_set(wqe_xc, &wqe->fcp_iread.wqe_com, 0);
-
-		wqe->words[4] = 0;
-		wqe->words[10] &= 0xffff0000; /* zero out ebde count */
-		bf_set(lpfc_wqe_gen_pu, &wqe->generic, 0);
+		bf_set(wqe_xc, &wqe->fcp_icmd.wqe_com, 0);
+		bf_set(wqe_dbde, &wqe->fcp_icmd.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->fcp_icmd.wqe_com, LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_qosd, &wqe->fcp_icmd.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->fcp_icmd.wqe_com,
+		       LPFC_WQE_LENLOC_NONE);
+		bf_set(wqe_ebde_cnt, &wqe->fcp_icmd.wqe_com, 0);
 	break;
 	case CMD_GEN_REQUEST64_CR:
-		/* word3 command length is described as byte offset to the
-		 * rsp_data. Would always be 16, sizeof(struct sli4_sge)
-		 * sgl[0] = cmnd
-		 * sgl[1] = rsp.
-		 *
-		 */
-		wqe->gen_req.command_len = xmit_len;
-		/* Word4 parameter  copied in the memcpy */
-		/* Word5 [rctl, type, df_ctl, la] copied in memcpy */
+		/* word3 iocb=IO_TAG wqe=request_payload_len */
+		wqe->gen_req.request_payload_len = xmit_len;
+		/* word4 iocb=parameter wqe=relative_offset memcpy */
+		/* word5 [rctl, type, df_ctl, la] copied in memcpy */
 		/* word6 context tag copied in memcpy */
 		if (iocbq->iocb.ulpCt_h  || iocbq->iocb.ulpCt_l) {
 			ct = ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l);
@@ -6144,31 +6152,39 @@
 				ct, iocbq->iocb.ulpCommand);
 			return IOCB_ERROR;
 		}
-		bf_set(lpfc_wqe_gen_ct, &wqe->generic, 0);
-		bf_set(wqe_tmo, &wqe->gen_req.wqe_com,
-			iocbq->iocb.ulpTimeout);
-
-		bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
+		bf_set(wqe_ct, &wqe->gen_req.wqe_com, 0);
+		bf_set(wqe_tmo, &wqe->gen_req.wqe_com, iocbq->iocb.ulpTimeout);
+		bf_set(wqe_pu, &wqe->gen_req.wqe_com, iocbq->iocb.ulpPU);
+		bf_set(wqe_dbde, &wqe->gen_req.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->gen_req.wqe_com, LPFC_WQE_IOD_READ);
+		bf_set(wqe_qosd, &wqe->gen_req.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->gen_req.wqe_com, LPFC_WQE_LENLOC_NONE);
+		bf_set(wqe_ebde_cnt, &wqe->gen_req.wqe_com, 0);
 		command_type = OTHER_COMMAND;
 	break;
 	case CMD_XMIT_ELS_RSP64_CX:
 		/* words0-2 BDE memcpy */
-		/* word3 iocb=iotag32 wqe=rsvd */
-		wqe->words[3] = 0;
+		/* word3 iocb=iotag32 wqe=response_payload_len */
+		wqe->xmit_els_rsp.response_payload_len = xmit_len;
 		/* word4 iocb=did wge=rsvd. */
-		wqe->words[4] = 0;
+		wqe->xmit_els_rsp.rsvd4 = 0;
 		/* word5 iocb=rsvd wge=did */
 		bf_set(wqe_els_did, &wqe->xmit_els_rsp.wqe_dest,
 			 iocbq->iocb.un.elsreq64.remoteID);
-
-		bf_set(lpfc_wqe_gen_ct, &wqe->generic,
-			((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
-
-		bf_set(lpfc_wqe_gen_pu, &wqe->generic, iocbq->iocb.ulpPU);
-		bf_set(wqe_rcvoxid, &wqe->generic, iocbq->iocb.ulpContext);
+		bf_set(wqe_ct, &wqe->xmit_els_rsp.wqe_com,
+		       ((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
+		bf_set(wqe_pu, &wqe->xmit_els_rsp.wqe_com, iocbq->iocb.ulpPU);
+		bf_set(wqe_rcvoxid, &wqe->xmit_els_rsp.wqe_com,
+		       iocbq->iocb.ulpContext);
 		if (!iocbq->iocb.ulpCt_h && iocbq->iocb.ulpCt_l)
-			bf_set(lpfc_wqe_gen_context, &wqe->generic,
+			bf_set(wqe_ctxt_tag, &wqe->xmit_els_rsp.wqe_com,
 			       iocbq->vport->vpi + phba->vpi_base);
+		bf_set(wqe_dbde, &wqe->xmit_els_rsp.wqe_com, 1);
+		bf_set(wqe_iod, &wqe->xmit_els_rsp.wqe_com, LPFC_WQE_IOD_WRITE);
+		bf_set(wqe_qosd, &wqe->xmit_els_rsp.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->xmit_els_rsp.wqe_com,
+		       LPFC_WQE_LENLOC_WORD3);
+		bf_set(wqe_ebde_cnt, &wqe->xmit_els_rsp.wqe_com, 0);
 		command_type = OTHER_COMMAND;
 	break;
 	case CMD_CLOSE_XRI_CN:
@@ -6193,15 +6209,19 @@
 		else
 			bf_set(abort_cmd_ia, &wqe->abort_cmd, 0);
 		bf_set(abort_cmd_criteria, &wqe->abort_cmd, T_XRI_TAG);
-		wqe->words[5] = 0;
-		bf_set(lpfc_wqe_gen_ct, &wqe->generic,
+		/* word5 iocb=CONTEXT_TAG|IO_TAG wqe=reserved */
+		wqe->abort_cmd.rsrvd5 = 0;
+		bf_set(wqe_ct, &wqe->abort_cmd.wqe_com,
 			((iocbq->iocb.ulpCt_h << 1) | iocbq->iocb.ulpCt_l));
 		abort_tag = iocbq->iocb.un.acxri.abortIoTag;
 		/*
 		 * The abort handler will send us CMD_ABORT_XRI_CN or
 		 * CMD_CLOSE_XRI_CN and the fw only accepts CMD_ABORT_XRI_CX
 		 */
-		bf_set(lpfc_wqe_gen_command, &wqe->generic, CMD_ABORT_XRI_CX);
+		bf_set(wqe_cmnd, &wqe->abort_cmd.wqe_com, CMD_ABORT_XRI_CX);
+		bf_set(wqe_qosd, &wqe->abort_cmd.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->abort_cmd.wqe_com,
+		       LPFC_WQE_LENLOC_NONE);
 		cmnd = CMD_ABORT_XRI_CX;
 		command_type = OTHER_COMMAND;
 		xritag = 0;
@@ -6235,18 +6255,14 @@
 		bf_set(wqe_xmit_bls_pt, &wqe->xmit_bls_rsp.wqe_dest, 0x1);
 		bf_set(wqe_ctxt_tag, &wqe->xmit_bls_rsp.wqe_com,
 		       iocbq->iocb.ulpContext);
+		bf_set(wqe_qosd, &wqe->xmit_bls_rsp.wqe_com, 1);
+		bf_set(wqe_lenloc, &wqe->xmit_bls_rsp.wqe_com,
+		       LPFC_WQE_LENLOC_NONE);
 		/* Overwrite the pre-set comnd type with OTHER_COMMAND */
 		command_type = OTHER_COMMAND;
 	break;
 	case CMD_XRI_ABORTED_CX:
 	case CMD_CREATE_XRI_CR: /* Do we expect to use this? */
-		/* words0-2 are all 0's no bde */
-		/* word3 and word4 are rsvrd */
-		wqe->words[3] = 0;
-		wqe->words[4] = 0;
-		/* word5 iocb=rsvd wge=did */
-		/* There is no remote port id in the IOCB? */
-		/* Let this fall through and fail */
 	case CMD_IOCB_FCP_IBIDIR64_CR: /* bidirectional xfer */
 	case CMD_FCP_TSEND64_CX: /* Target mode send xfer-ready */
 	case CMD_FCP_TRSP64_CX: /* Target mode rcv */
@@ -6257,16 +6273,14 @@
 				iocbq->iocb.ulpCommand);
 		return IOCB_ERROR;
 	break;
-
 	}
-	bf_set(lpfc_wqe_gen_xri, &wqe->generic, xritag);
-	bf_set(lpfc_wqe_gen_request_tag, &wqe->generic, iocbq->iotag);
-	wqe->generic.abort_tag = abort_tag;
-	bf_set(lpfc_wqe_gen_cmd_type, &wqe->generic, command_type);
-	bf_set(lpfc_wqe_gen_command, &wqe->generic, cmnd);
-	bf_set(lpfc_wqe_gen_class, &wqe->generic, iocbq->iocb.ulpClass);
-	bf_set(lpfc_wqe_gen_cq_id, &wqe->generic, LPFC_WQE_CQ_ID_DEFAULT);
-
+	bf_set(wqe_xri_tag, &wqe->generic.wqe_com, xritag);
+	bf_set(wqe_reqtag, &wqe->generic.wqe_com, iocbq->iotag);
+	wqe->generic.wqe_com.abort_tag = abort_tag;
+	bf_set(wqe_cmd_type, &wqe->generic.wqe_com, command_type);
+	bf_set(wqe_cmnd, &wqe->generic.wqe_com, cmnd);
+	bf_set(wqe_class, &wqe->generic.wqe_com, iocbq->iocb.ulpClass);
+	bf_set(wqe_cqid, &wqe->generic.wqe_com, LPFC_WQE_CQ_ID_DEFAULT);
 	return 0;
 }
 
@@ -7257,25 +7271,26 @@
 }
 
 /**
- * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
+ * lpfc_sli_abort_iotag_issue - Issue abort for a command iocb
  * @phba: Pointer to HBA context object.
  * @pring: Pointer to driver SLI ring object.
  * @cmdiocb: Pointer to driver command iocb object.
  *
- * This function issues an abort iocb for the provided command
- * iocb. This function is called with hbalock held.
- * The function returns 0 when it fails due to memory allocation
- * failure or when the command iocb is an abort request.
+ * This function issues an abort iocb for the provided command iocb down to
+ * the port. Other than the case the outstanding command iocb is an abort
+ * request, this function issues abort out unconditionally. This function is
+ * called with hbalock held. The function returns 0 when it fails due to
+ * memory allocation failure or when the command iocb is an abort request.
  **/
-int
-lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+static int
+lpfc_sli_abort_iotag_issue(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
 			   struct lpfc_iocbq *cmdiocb)
 {
 	struct lpfc_vport *vport = cmdiocb->vport;
 	struct lpfc_iocbq *abtsiocbp;
 	IOCB_t *icmd = NULL;
 	IOCB_t *iabt = NULL;
-	int retval = IOCB_ERROR;
+	int retval;
 
 	/*
 	 * There are certain command types we don't want to abort.  And we
@@ -7288,18 +7303,6 @@
 	    (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
 		return 0;
 
-	/* If we're unloading, don't abort iocb on the ELS ring, but change the
-	 * callback so that nothing happens when it finishes.
-	 */
-	if ((vport->load_flag & FC_UNLOADING) &&
-	    (pring->ringno == LPFC_ELS_RING)) {
-		if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
-			cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
-		else
-			cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
-		goto abort_iotag_exit;
-	}
-
 	/* issue ABTS for this IOCB based on iotag */
 	abtsiocbp = __lpfc_sli_get_iocbq(phba);
 	if (abtsiocbp == NULL)
@@ -7344,6 +7347,63 @@
 
 	if (retval)
 		__lpfc_sli_release_iocbq(phba, abtsiocbp);
+
+	/*
+	 * Caller to this routine should check for IOCB_ERROR
+	 * and handle it properly.  This routine no longer removes
+	 * iocb off txcmplq and call compl in case of IOCB_ERROR.
+	 */
+	return retval;
+}
+
+/**
+ * lpfc_sli_issue_abort_iotag - Abort function for a command iocb
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ * @cmdiocb: Pointer to driver command iocb object.
+ *
+ * This function issues an abort iocb for the provided command iocb. In case
+ * of unloading, the abort iocb will not be issued to commands on the ELS
+ * ring. Instead, the callback function shall be changed to those commands
+ * so that nothing happens when them finishes. This function is called with
+ * hbalock held. The function returns 0 when the command iocb is an abort
+ * request.
+ **/
+int
+lpfc_sli_issue_abort_iotag(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
+			   struct lpfc_iocbq *cmdiocb)
+{
+	struct lpfc_vport *vport = cmdiocb->vport;
+	int retval = IOCB_ERROR;
+	IOCB_t *icmd = NULL;
+
+	/*
+	 * There are certain command types we don't want to abort.  And we
+	 * don't want to abort commands that are already in the process of
+	 * being aborted.
+	 */
+	icmd = &cmdiocb->iocb;
+	if (icmd->ulpCommand == CMD_ABORT_XRI_CN ||
+	    icmd->ulpCommand == CMD_CLOSE_XRI_CN ||
+	    (cmdiocb->iocb_flag & LPFC_DRIVER_ABORTED) != 0)
+		return 0;
+
+	/*
+	 * If we're unloading, don't abort iocb on the ELS ring, but change
+	 * the callback so that nothing happens when it finishes.
+	 */
+	if ((vport->load_flag & FC_UNLOADING) &&
+	    (pring->ringno == LPFC_ELS_RING)) {
+		if (cmdiocb->iocb_flag & LPFC_IO_FABRIC)
+			cmdiocb->fabric_iocb_cmpl = lpfc_ignore_els_cmpl;
+		else
+			cmdiocb->iocb_cmpl = lpfc_ignore_els_cmpl;
+		goto abort_iotag_exit;
+	}
+
+	/* Now, we try to issue the abort to the cmdiocb out */
+	retval = lpfc_sli_abort_iotag_issue(phba, pring, cmdiocb);
+
 abort_iotag_exit:
 	/*
 	 * Caller to this routine should check for IOCB_ERROR
@@ -7354,6 +7414,62 @@
 }
 
 /**
+ * lpfc_sli_iocb_ring_abort - Unconditionally abort all iocbs on an iocb ring
+ * @phba: Pointer to HBA context object.
+ * @pring: Pointer to driver SLI ring object.
+ *
+ * This function aborts all iocbs in the given ring and frees all the iocb
+ * objects in txq. This function issues abort iocbs unconditionally for all
+ * the iocb commands in txcmplq. The iocbs in the txcmplq is not guaranteed
+ * to complete before the return of this function. The caller is not required
+ * to hold any locks.
+ **/
+static void
+lpfc_sli_iocb_ring_abort(struct lpfc_hba *phba, struct lpfc_sli_ring *pring)
+{
+	LIST_HEAD(completions);
+	struct lpfc_iocbq *iocb, *next_iocb;
+
+	if (pring->ringno == LPFC_ELS_RING)
+		lpfc_fabric_abort_hba(phba);
+
+	spin_lock_irq(&phba->hbalock);
+
+	/* Take off all the iocbs on txq for cancelling */
+	list_splice_init(&pring->txq, &completions);
+	pring->txq_cnt = 0;
+
+	/* Next issue ABTS for everything on the txcmplq */
+	list_for_each_entry_safe(iocb, next_iocb, &pring->txcmplq, list)
+		lpfc_sli_abort_iotag_issue(phba, pring, iocb);
+
+	spin_unlock_irq(&phba->hbalock);
+
+	/* Cancel all the IOCBs from the completions list */
+	lpfc_sli_cancel_iocbs(phba, &completions, IOSTAT_LOCAL_REJECT,
+			      IOERR_SLI_ABORTED);
+}
+
+/**
+ * lpfc_sli_hba_iocb_abort - Abort all iocbs to an hba.
+ * @phba: pointer to lpfc HBA data structure.
+ *
+ * This routine will abort all pending and outstanding iocbs to an HBA.
+ **/
+void
+lpfc_sli_hba_iocb_abort(struct lpfc_hba *phba)
+{
+	struct lpfc_sli *psli = &phba->sli;
+	struct lpfc_sli_ring *pring;
+	int i;
+
+	for (i = 0; i < psli->num_rings; i++) {
+		pring = &psli->ring[i];
+		lpfc_sli_iocb_ring_abort(phba, pring);
+	}
+}
+
+/**
  * lpfc_sli_validate_fcp_iocb - find commands associated with a vport or LUN
  * @iocbq: Pointer to driver iocb object.
  * @vport: Pointer to driver virtual port object.
@@ -12242,13 +12358,15 @@
 	/* Issue the mailbox command asynchronously */
 	mboxq->vport = phba->pport;
 	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
+
+	spin_lock_irq(&phba->hbalock);
+	phba->hba_flag |= FCF_TS_INPROG;
+	spin_unlock_irq(&phba->hbalock);
+
 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
 	if (rc == MBX_NOT_FINISHED)
 		error = -EIO;
 	else {
-		spin_lock_irq(&phba->hbalock);
-		phba->hba_flag |= FCF_DISC_INPROGRESS;
-		spin_unlock_irq(&phba->hbalock);
 		/* Reset eligible FCF count for new scan */
 		if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
 			phba->fcf.eligible_fcf_cnt = 0;
@@ -12258,21 +12376,21 @@
 	if (error) {
 		if (mboxq)
 			lpfc_sli4_mbox_cmd_free(phba, mboxq);
-		/* FCF scan failed, clear FCF_DISC_INPROGRESS flag */
+		/* FCF scan failed, clear FCF_TS_INPROG flag */
 		spin_lock_irq(&phba->hbalock);
-		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+		phba->hba_flag &= ~FCF_TS_INPROG;
 		spin_unlock_irq(&phba->hbalock);
 	}
 	return error;
 }
 
 /**
- * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf.
+ * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for roundrobin fcf.
  * @phba: pointer to lpfc hba data structure.
  * @fcf_index: FCF table entry offset.
  *
  * This routine is invoked to read an FCF record indicated by @fcf_index
- * and to use it for FLOGI round robin FCF failover.
+ * and to use it for FLOGI roundrobin FCF failover.
  *
  * Return 0 if the mailbox command is submitted sucessfully, none 0
  * otherwise.
@@ -12318,7 +12436,7 @@
  * @fcf_index: FCF table entry offset.
  *
  * This routine is invoked to read an FCF record indicated by @fcf_index to
- * determine whether it's eligible for FLOGI round robin failover list.
+ * determine whether it's eligible for FLOGI roundrobin failover list.
  *
  * Return 0 if the mailbox command is submitted sucessfully, none 0
  * otherwise.
@@ -12364,7 +12482,7 @@
  *
  * This routine is to get the next eligible FCF record index in a round
  * robin fashion. If the next eligible FCF record index equals to the
- * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
+ * initial roundrobin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
  * shall be returned, otherwise, the next eligible FCF record's index
  * shall be returned.
  **/
@@ -12392,28 +12510,10 @@
 		return LPFC_FCOE_FCF_NEXT_NONE;
 	}
 
-	/* Check roundrobin failover index bmask stop condition */
-	if (next_fcf_index == phba->fcf.fcf_rr_init_indx) {
-		if (!(phba->fcf.fcf_flag & FCF_REDISC_RRU)) {
-			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
-					"2847 Round robin failover FCF index "
-					"search hit stop condition:x%x\n",
-					next_fcf_index);
-			return LPFC_FCOE_FCF_NEXT_NONE;
-		}
-		/* The roundrobin failover index bmask updated, start over */
-		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-				"2848 Round robin failover FCF index bmask "
-				"updated, start over\n");
-		spin_lock_irq(&phba->hbalock);
-		phba->fcf.fcf_flag &= ~FCF_REDISC_RRU;
-		spin_unlock_irq(&phba->hbalock);
-		return phba->fcf.fcf_rr_init_indx;
-	}
-
 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-			"2845 Get next round robin failover "
-			"FCF index x%x\n", next_fcf_index);
+			"2845 Get next roundrobin failover FCF (x%x)\n",
+			next_fcf_index);
+
 	return next_fcf_index;
 }
 
@@ -12422,7 +12522,7 @@
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine sets the FCF record index in to the eligible bmask for
- * round robin failover search. It checks to make sure that the index
+ * roundrobin failover search. It checks to make sure that the index
  * does not go beyond the range of the driver allocated bmask dimension
  * before setting the bit.
  *
@@ -12434,22 +12534,16 @@
 {
 	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
-				"2610 HBA FCF index reached driver's "
-				"book keeping dimension: fcf_index:%d, "
-				"driver_bmask_max:%d\n",
+				"2610 FCF (x%x) reached driver's book "
+				"keeping dimension:x%x\n",
 				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
 		return -EINVAL;
 	}
 	/* Set the eligible FCF record index bmask */
 	set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
 
-	/* Set the roundrobin index bmask updated */
-	spin_lock_irq(&phba->hbalock);
-	phba->fcf.fcf_flag |= FCF_REDISC_RRU;
-	spin_unlock_irq(&phba->hbalock);
-
 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-			"2790 Set FCF index x%x to round robin failover "
+			"2790 Set FCF (x%x) to roundrobin FCF failover "
 			"bmask\n", fcf_index);
 
 	return 0;
@@ -12460,7 +12554,7 @@
  * @phba: pointer to lpfc hba data structure.
  *
  * This routine clears the FCF record index from the eligible bmask for
- * round robin failover search. It checks to make sure that the index
+ * roundrobin failover search. It checks to make sure that the index
  * does not go beyond the range of the driver allocated bmask dimension
  * before clearing the bit.
  **/
@@ -12469,9 +12563,8 @@
 {
 	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
-				"2762 HBA FCF index goes beyond driver's "
-				"book keeping dimension: fcf_index:%d, "
-				"driver_bmask_max:%d\n",
+				"2762 FCF (x%x) reached driver's book "
+				"keeping dimension:x%x\n",
 				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
 		return;
 	}
@@ -12479,7 +12572,7 @@
 	clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
 
 	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-			"2791 Clear FCF index x%x from round robin failover "
+			"2791 Clear FCF (x%x) from roundrobin failover "
 			"bmask\n", fcf_index);
 }
 
@@ -12530,8 +12623,7 @@
 		}
 	} else {
 		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
-				"2775 Start FCF rediscovery quiescent period "
-				"wait timer before scaning FCF table\n");
+				"2775 Start FCF rediscover quiescent timer\n");
 		/*
 		 * Start FCF rediscovery wait timer for pending FCF
 		 * before rescan FCF record table.
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index a0ca572..c4483fe 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -19,10 +19,16 @@
  *******************************************************************/
 
 #define LPFC_ACTIVE_MBOX_WAIT_CNT               100
+#define LPFC_XRI_EXCH_BUSY_WAIT_TMO		10000
+#define LPFC_XRI_EXCH_BUSY_WAIT_T1   		10
+#define LPFC_XRI_EXCH_BUSY_WAIT_T2              30000
 #define LPFC_RELEASE_NOTIFICATION_INTERVAL	32
 #define LPFC_GET_QE_REL_INT			32
 #define LPFC_RPI_LOW_WATER_MARK			10
 
+#define LPFC_UNREG_FCF                          1
+#define LPFC_SKIP_UNREG_FCF                     0
+
 /* Amount of time in seconds for waiting FCF rediscovery to complete */
 #define LPFC_FCF_REDISCOVER_WAIT_TMO		2000 /* msec */
 
@@ -163,9 +169,8 @@
 #define FCF_REDISC_PEND	0x80 /* FCF rediscovery pending */
 #define FCF_REDISC_EVT	0x100 /* FCF rediscovery event to worker thread */
 #define FCF_REDISC_FOV	0x200 /* Post FCF rediscovery fast failover */
-#define FCF_REDISC_RRU	0x400 /* Roundrobin bitmap updated */
+#define FCF_REDISC_PROG (FCF_REDISC_PEND | FCF_REDISC_EVT)
 	uint32_t addr_mode;
-	uint16_t fcf_rr_init_indx;
 	uint32_t eligible_fcf_cnt;
 	struct lpfc_fcf_rec current_rec;
 	struct lpfc_fcf_rec failover_rec;
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index f93120e..7a1b5b1 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.17"
+#define LPFC_DRIVER_VERSION "8.3.18"
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
 #define LPFC_FP_DRIVER_HANDLER_NAME	"lpfc:fp"
diff --git a/drivers/scsi/megaraid/megaraid_sas.c b/drivers/scsi/megaraid/megaraid_sas.c
index d3c9cde..eb29d50 100644
--- a/drivers/scsi/megaraid/megaraid_sas.c
+++ b/drivers/scsi/megaraid/megaraid_sas.c
@@ -10,7 +10,7 @@
  *	   2 of the License, or (at your option) any later version.
  *
  * FILE		: megaraid_sas.c
- * Version     : v00.00.04.17.1-rc1
+ * Version     : v00.00.04.31-rc1
  *
  * Authors:
  *	(email-id : megaraidlinux@lsi.com)
@@ -56,6 +56,15 @@
 MODULE_PARM_DESC(poll_mode_io,
 	"Complete cmds from IO path, (default=0)");
 
+/*
+ * Number of sectors per IO command
+ * Will be set in megasas_init_mfi if user does not provide
+ */
+static unsigned int max_sectors;
+module_param_named(max_sectors, max_sectors, int, 0);
+MODULE_PARM_DESC(max_sectors,
+	"Maximum number of sectors per IO command");
+
 MODULE_LICENSE("GPL");
 MODULE_VERSION(MEGASAS_VERSION);
 MODULE_AUTHOR("megaraidlinux@lsi.com");
@@ -103,6 +112,7 @@
 static DECLARE_WAIT_QUEUE_HEAD(megasas_poll_wait);
 static u32 support_poll_for_event;
 static u32 megasas_dbg_lvl;
+static u32 support_device_change;
 
 /* define lock for aen poll */
 spinlock_t poll_aen_lock;
@@ -718,6 +728,10 @@
 megasas_check_reset_gen2(struct megasas_instance *instance,
 		struct megasas_register_set __iomem *regs)
 {
+	if (instance->adprecovery != MEGASAS_HBA_OPERATIONAL) {
+		return 1;
+	}
+
 	return 0;
 }
 
@@ -930,6 +944,7 @@
 			mfi_sgl->sge_skinny[i].length = sg_dma_len(os_sgl);
 			mfi_sgl->sge_skinny[i].phys_addr =
 						sg_dma_address(os_sgl);
+			mfi_sgl->sge_skinny[i].flag = 0;
 		}
 	}
 	return sge_count;
@@ -1557,6 +1572,28 @@
 	}
 }
 
+static void
+megasas_internal_reset_defer_cmds(struct megasas_instance *instance);
+
+static void
+process_fw_state_change_wq(struct work_struct *work);
+
+void megasas_do_ocr(struct megasas_instance *instance)
+{
+	if ((instance->pdev->device == PCI_DEVICE_ID_LSI_SAS1064R) ||
+	(instance->pdev->device == PCI_DEVICE_ID_DELL_PERC5) ||
+	(instance->pdev->device == PCI_DEVICE_ID_LSI_VERDE_ZCR)) {
+		*instance->consumer     = MEGASAS_ADPRESET_INPROG_SIGN;
+	}
+	instance->instancet->disable_intr(instance->reg_set);
+	instance->adprecovery   = MEGASAS_ADPRESET_SM_INFAULT;
+	instance->issuepend_done = 0;
+
+	atomic_set(&instance->fw_outstanding, 0);
+	megasas_internal_reset_defer_cmds(instance);
+	process_fw_state_change_wq(&instance->work_init);
+}
+
 /**
  * megasas_wait_for_outstanding -	Wait for all outstanding cmds
  * @instance:				Adapter soft state
@@ -1574,6 +1611,8 @@
 	unsigned long flags;
 	struct list_head clist_local;
 	struct megasas_cmd *reset_cmd;
+	u32 fw_state;
+	u8 kill_adapter_flag;
 
 	spin_lock_irqsave(&instance->hba_lock, flags);
 	adprecovery = instance->adprecovery;
@@ -1659,7 +1698,45 @@
 		msleep(1000);
 	}
 
-	if (atomic_read(&instance->fw_outstanding)) {
+	i = 0;
+	kill_adapter_flag = 0;
+	do {
+		fw_state = instance->instancet->read_fw_status_reg(
+					instance->reg_set) & MFI_STATE_MASK;
+		if ((fw_state == MFI_STATE_FAULT) &&
+			(instance->disableOnlineCtrlReset == 0)) {
+			if (i == 3) {
+				kill_adapter_flag = 2;
+				break;
+			}
+			megasas_do_ocr(instance);
+			kill_adapter_flag = 1;
+
+			/* wait for 1 secs to let FW finish the pending cmds */
+			msleep(1000);
+		}
+		i++;
+	} while (i <= 3);
+
+	if (atomic_read(&instance->fw_outstanding) &&
+					!kill_adapter_flag) {
+		if (instance->disableOnlineCtrlReset == 0) {
+
+			megasas_do_ocr(instance);
+
+			/* wait for 5 secs to let FW finish the pending cmds */
+			for (i = 0; i < wait_time; i++) {
+				int outstanding =
+					atomic_read(&instance->fw_outstanding);
+				if (!outstanding)
+					return SUCCESS;
+				msleep(1000);
+			}
+		}
+	}
+
+	if (atomic_read(&instance->fw_outstanding) ||
+					(kill_adapter_flag == 2)) {
 		printk(KERN_NOTICE "megaraid_sas: pending cmds after reset\n");
 		/*
 		* Send signal to FW to stop processing any pending cmds.
@@ -2669,6 +2746,7 @@
 			return -ENOMEM;
 		}
 
+		memset(cmd->frame, 0, total_sz);
 		cmd->frame->io.context = cmd->index;
 		cmd->frame->io.pad_0 = 0;
 	}
@@ -3585,6 +3663,27 @@
 			instance->max_fw_cmds - MEGASAS_INT_CMDS;
 	host->this_id = instance->init_id;
 	host->sg_tablesize = instance->max_num_sge;
+	/*
+	 * Check if the module parameter value for max_sectors can be used
+	 */
+	if (max_sectors && max_sectors < instance->max_sectors_per_req)
+		instance->max_sectors_per_req = max_sectors;
+	else {
+		if (max_sectors) {
+			if (((instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS1078GEN2) ||
+				(instance->pdev->device ==
+				PCI_DEVICE_ID_LSI_SAS0079GEN2)) &&
+				(max_sectors <= MEGASAS_MAX_SECTORS)) {
+				instance->max_sectors_per_req = max_sectors;
+			} else {
+			printk(KERN_INFO "megasas: max_sectors should be > 0"
+				"and <= %d (or < 1MB for GEN2 controller)\n",
+				instance->max_sectors_per_req);
+			}
+		}
+	}
+
 	host->max_sectors = instance->max_sectors_per_req;
 	host->cmd_per_lun = 128;
 	host->max_channel = MEGASAS_MAX_CHANNELS - 1;
@@ -4658,6 +4757,15 @@
 static DRIVER_ATTR(support_poll_for_event, S_IRUGO,
 			megasas_sysfs_show_support_poll_for_event, NULL);
 
+ static ssize_t
+megasas_sysfs_show_support_device_change(struct device_driver *dd, char *buf)
+{
+	return sprintf(buf, "%u\n", support_device_change);
+}
+
+static DRIVER_ATTR(support_device_change, S_IRUGO,
+			megasas_sysfs_show_support_device_change, NULL);
+
 static ssize_t
 megasas_sysfs_show_dbg_lvl(struct device_driver *dd, char *buf)
 {
@@ -4978,6 +5086,7 @@
 	       MEGASAS_EXT_VERSION);
 
 	support_poll_for_event = 2;
+	support_device_change = 1;
 
 	memset(&megasas_mgmt_info, 0, sizeof(megasas_mgmt_info));
 
@@ -5026,8 +5135,17 @@
 	if (rval)
 		goto err_dcf_poll_mode_io;
 
+	rval = driver_create_file(&megasas_pci_driver.driver,
+				&driver_attr_support_device_change);
+	if (rval)
+		goto err_dcf_support_device_change;
+
 	return rval;
 
+err_dcf_support_device_change:
+	driver_remove_file(&megasas_pci_driver.driver,
+		  &driver_attr_poll_mode_io);
+
 err_dcf_poll_mode_io:
 	driver_remove_file(&megasas_pci_driver.driver,
 			   &driver_attr_dbg_lvl);
@@ -5058,6 +5176,10 @@
 	driver_remove_file(&megasas_pci_driver.driver,
 			   &driver_attr_dbg_lvl);
 	driver_remove_file(&megasas_pci_driver.driver,
+			&driver_attr_support_poll_for_event);
+	driver_remove_file(&megasas_pci_driver.driver,
+			&driver_attr_support_device_change);
+	driver_remove_file(&megasas_pci_driver.driver,
 			   &driver_attr_release_date);
 	driver_remove_file(&megasas_pci_driver.driver, &driver_attr_version);
 
diff --git a/drivers/scsi/megaraid/megaraid_sas.h b/drivers/scsi/megaraid/megaraid_sas.h
index 16a4f68..ad16f5e 100644
--- a/drivers/scsi/megaraid/megaraid_sas.h
+++ b/drivers/scsi/megaraid/megaraid_sas.h
@@ -18,9 +18,9 @@
 /*
  * MegaRAID SAS Driver meta data
  */
-#define MEGASAS_VERSION			"00.00.04.17.1-rc1"
-#define MEGASAS_RELDATE			"Oct. 29, 2009"
-#define MEGASAS_EXT_VERSION		"Thu. Oct. 29, 11:41:51 PST 2009"
+#define MEGASAS_VERSION			"00.00.04.31-rc1"
+#define MEGASAS_RELDATE			"May 3, 2010"
+#define MEGASAS_EXT_VERSION		"Mon. May 3, 11:41:51 PST 2010"
 
 /*
  * Device IDs
@@ -706,6 +706,7 @@
 #define MEGASAS_MAX_LD_IDS			(MEGASAS_MAX_LD_CHANNELS * \
 						MEGASAS_MAX_DEV_PER_CHANNEL)
 
+#define MEGASAS_MAX_SECTORS                    (2*1024)
 #define MEGASAS_DBG_LVL				1
 
 #define MEGASAS_FW_BUSY				1
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index e88bbdd..0433ea6 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -452,10 +452,6 @@
 {
 	struct request *rq = or->request;
 
-	_osd_free_seg(or, &or->set_attr);
-	_osd_free_seg(or, &or->enc_get_attr);
-	_osd_free_seg(or, &or->get_attr);
-
 	if (rq) {
 		if (rq->next_rq) {
 			_put_request(rq->next_rq);
@@ -464,6 +460,12 @@
 
 		_put_request(rq);
 	}
+
+	_osd_free_seg(or, &or->get_attr);
+	_osd_free_seg(or, &or->enc_get_attr);
+	_osd_free_seg(or, &or->set_attr);
+	_osd_free_seg(or, &or->cdb_cont);
+
 	_osd_request_free(or);
 }
 EXPORT_SYMBOL(osd_end_request);
@@ -547,6 +549,12 @@
 	return 0;
 }
 
+static int _alloc_cdb_cont(struct osd_request *or, unsigned total_bytes)
+{
+	OSD_DEBUG("total_bytes=%d\n", total_bytes);
+	return _osd_realloc_seg(or, &or->cdb_cont, total_bytes);
+}
+
 static int _alloc_set_attr_list(struct osd_request *or,
 	const struct osd_attr *oa, unsigned nelem, unsigned add_bytes)
 {
@@ -885,6 +893,199 @@
 }
 EXPORT_SYMBOL(osd_req_read_kern);
 
+static int _add_sg_continuation_descriptor(struct osd_request *or,
+	const struct osd_sg_entry *sglist, unsigned numentries, u64 *len)
+{
+	struct osd_sg_continuation_descriptor *oscd;
+	u32 oscd_size;
+	unsigned i;
+	int ret;
+
+	oscd_size = sizeof(*oscd) + numentries * sizeof(oscd->entries[0]);
+
+	if (!or->cdb_cont.total_bytes) {
+		/* First time, jump over the header, we will write to:
+		 *	cdb_cont.buff + cdb_cont.total_bytes
+		 */
+		or->cdb_cont.total_bytes =
+				sizeof(struct osd_continuation_segment_header);
+	}
+
+	ret = _alloc_cdb_cont(or, or->cdb_cont.total_bytes + oscd_size);
+	if (unlikely(ret))
+		return ret;
+
+	oscd = or->cdb_cont.buff + or->cdb_cont.total_bytes;
+	oscd->hdr.type = cpu_to_be16(SCATTER_GATHER_LIST);
+	oscd->hdr.pad_length = 0;
+	oscd->hdr.length = cpu_to_be32(oscd_size - sizeof(*oscd));
+
+	*len = 0;
+	/* copy the sg entries and convert to network byte order */
+	for (i = 0; i < numentries; i++) {
+		oscd->entries[i].offset = cpu_to_be64(sglist[i].offset);
+		oscd->entries[i].len    = cpu_to_be64(sglist[i].len);
+		*len += sglist[i].len;
+	}
+
+	or->cdb_cont.total_bytes += oscd_size;
+	OSD_DEBUG("total_bytes=%d oscd_size=%d numentries=%d\n",
+		  or->cdb_cont.total_bytes, oscd_size, numentries);
+	return 0;
+}
+
+static int _osd_req_finalize_cdb_cont(struct osd_request *or, const u8 *cap_key)
+{
+	struct request_queue *req_q = osd_request_queue(or->osd_dev);
+	struct bio *bio;
+	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
+	struct osd_continuation_segment_header *cont_seg_hdr;
+
+	if (!or->cdb_cont.total_bytes)
+		return 0;
+
+	cont_seg_hdr = or->cdb_cont.buff;
+	cont_seg_hdr->format = CDB_CONTINUATION_FORMAT_V2;
+	cont_seg_hdr->service_action = cdbh->varlen_cdb.service_action;
+
+	/* create a bio for continuation segment */
+	bio = bio_map_kern(req_q, or->cdb_cont.buff, or->cdb_cont.total_bytes,
+			   GFP_KERNEL);
+	if (unlikely(!bio))
+		return -ENOMEM;
+
+	bio->bi_rw |= REQ_WRITE;
+
+	/* integrity check the continuation before the bio is linked
+	 * with the other data segments since the continuation
+	 * integrity is separate from the other data segments.
+	 */
+	osd_sec_sign_data(cont_seg_hdr->integrity_check, bio, cap_key);
+
+	cdbh->v2.cdb_continuation_length = cpu_to_be32(or->cdb_cont.total_bytes);
+
+	/* we can't use _req_append_segment, because we need to link in the
+	 * continuation bio to the head of the bio list - the
+	 * continuation segment (if it exists) is always the first segment in
+	 * the out data buffer.
+	 */
+	bio->bi_next = or->out.bio;
+	or->out.bio = bio;
+	or->out.total_bytes += or->cdb_cont.total_bytes;
+
+	return 0;
+}
+
+/* osd_req_write_sg: Takes a @bio that points to the data out buffer and an
+ * @sglist that has the scatter gather entries. Scatter-gather enables a write
+ * of multiple none-contiguous areas of an object, in a single call. The extents
+ * may overlap and/or be in any order. The only constrain is that:
+ *	total_bytes(sglist) >= total_bytes(bio)
+ */
+int osd_req_write_sg(struct osd_request *or,
+	const struct osd_obj_id *obj, struct bio *bio,
+	const struct osd_sg_entry *sglist, unsigned numentries)
+{
+	u64 len;
+	int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
+
+	if (ret)
+		return ret;
+	osd_req_write(or, obj, 0, bio, len);
+
+	return 0;
+}
+EXPORT_SYMBOL(osd_req_write_sg);
+
+/* osd_req_read_sg: Read multiple extents of an object into @bio
+ * See osd_req_write_sg
+ */
+int osd_req_read_sg(struct osd_request *or,
+	const struct osd_obj_id *obj, struct bio *bio,
+	const struct osd_sg_entry *sglist, unsigned numentries)
+{
+	u64 len;
+	int ret = _add_sg_continuation_descriptor(or, sglist, numentries, &len);
+
+	if (ret)
+		return ret;
+	osd_req_read(or, obj, 0, bio, len);
+
+	return 0;
+}
+EXPORT_SYMBOL(osd_req_read_sg);
+
+/* SG-list write/read Kern API
+ *
+ * osd_req_{write,read}_sg_kern takes an array of @buff pointers and an array
+ * of sg_entries. @numentries indicates how many pointers and sg_entries there
+ * are.  By requiring an array of buff pointers. This allows a caller to do a
+ * single write/read and scatter into multiple buffers.
+ * NOTE: Each buffer + len should not cross a page boundary.
+ */
+static struct bio *_create_sg_bios(struct osd_request *or,
+	void **buff, const struct osd_sg_entry *sglist, unsigned numentries)
+{
+	struct request_queue *q = osd_request_queue(or->osd_dev);
+	struct bio *bio;
+	unsigned i;
+
+	bio = bio_kmalloc(GFP_KERNEL, numentries);
+	if (unlikely(!bio)) {
+		OSD_DEBUG("Faild to allocate BIO size=%u\n", numentries);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	for (i = 0; i < numentries; i++) {
+		unsigned offset = offset_in_page(buff[i]);
+		struct page *page = virt_to_page(buff[i]);
+		unsigned len = sglist[i].len;
+		unsigned added_len;
+
+		BUG_ON(offset + len > PAGE_SIZE);
+		added_len = bio_add_pc_page(q, bio, page, len, offset);
+		if (unlikely(len != added_len)) {
+			OSD_DEBUG("bio_add_pc_page len(%d) != added_len(%d)\n",
+				  len, added_len);
+			bio_put(bio);
+			return ERR_PTR(-ENOMEM);
+		}
+	}
+
+	return bio;
+}
+
+int osd_req_write_sg_kern(struct osd_request *or,
+	const struct osd_obj_id *obj, void **buff,
+	const struct osd_sg_entry *sglist, unsigned numentries)
+{
+	struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
+	if (IS_ERR(bio))
+		return PTR_ERR(bio);
+
+	bio->bi_rw |= REQ_WRITE;
+	osd_req_write_sg(or, obj, bio, sglist, numentries);
+
+	return 0;
+}
+EXPORT_SYMBOL(osd_req_write_sg_kern);
+
+int osd_req_read_sg_kern(struct osd_request *or,
+	const struct osd_obj_id *obj, void **buff,
+	const struct osd_sg_entry *sglist, unsigned numentries)
+{
+	struct bio *bio = _create_sg_bios(or, buff, sglist, numentries);
+	if (IS_ERR(bio))
+		return PTR_ERR(bio);
+
+	osd_req_read_sg(or, obj, bio, sglist, numentries);
+
+	return 0;
+}
+EXPORT_SYMBOL(osd_req_read_sg_kern);
+
+
+
 void osd_req_get_attributes(struct osd_request *or,
 	const struct osd_obj_id *obj)
 {
@@ -1218,17 +1419,18 @@
 	or->get_attr.buff = attar_page;
 	or->get_attr.total_bytes = max_page_len;
 
-	or->set_attr.buff = set_one_attr->val_ptr;
-	or->set_attr.total_bytes = set_one_attr->len;
-
 	cdbh->attrs_page.get_attr_page = cpu_to_be32(page_id);
 	cdbh->attrs_page.get_attr_alloc_length = cpu_to_be32(max_page_len);
-	/* ocdb->attrs_page.get_attr_offset; */
+
+	if (!set_one_attr || !set_one_attr->attr_page)
+		return 0; /* The set is optional */
+
+	or->set_attr.buff = set_one_attr->val_ptr;
+	or->set_attr.total_bytes = set_one_attr->len;
 
 	cdbh->attrs_page.set_attr_page = cpu_to_be32(set_one_attr->attr_page);
 	cdbh->attrs_page.set_attr_id = cpu_to_be32(set_one_attr->attr_id);
 	cdbh->attrs_page.set_attr_length = cpu_to_be32(set_one_attr->len);
-	/* ocdb->attrs_page.set_attr_offset; */
 	return 0;
 }
 EXPORT_SYMBOL(osd_req_add_get_attr_page);
@@ -1248,11 +1450,14 @@
 	if (ret)
 		return ret;
 
+	if (or->set_attr.total_bytes == 0)
+		return 0;
+
 	/* set one value */
 	cdbh->attrs_page.set_attr_offset =
 		osd_req_encode_offset(or, or->out.total_bytes, &out_padding);
 
-	ret = _req_append_segment(or, out_padding, &or->enc_get_attr, NULL,
+	ret = _req_append_segment(or, out_padding, &or->set_attr, NULL,
 				  &or->out);
 	return ret;
 }
@@ -1276,7 +1481,8 @@
 }
 
 static int _osd_req_finalize_data_integrity(struct osd_request *or,
-	bool has_in, bool has_out, u64 out_data_bytes, const u8 *cap_key)
+	bool has_in, bool has_out, struct bio *out_data_bio, u64 out_data_bytes,
+	const u8 *cap_key)
 {
 	struct osd_security_parameters *sec_parms = _osd_req_sec_params(or);
 	int ret;
@@ -1307,7 +1513,7 @@
 		or->out.last_seg = NULL;
 
 		/* they are now all chained to request sign them all together */
-		osd_sec_sign_data(&or->out_data_integ, or->out.req->bio,
+		osd_sec_sign_data(&or->out_data_integ, out_data_bio,
 				  cap_key);
 	}
 
@@ -1403,6 +1609,8 @@
 {
 	struct osd_cdb_head *cdbh = osd_cdb_head(&or->cdb);
 	bool has_in, has_out;
+	 /* Save for data_integrity without the cdb_continuation */
+	struct bio *out_data_bio = or->out.bio;
 	u64 out_data_bytes = or->out.total_bytes;
 	int ret;
 
@@ -1418,9 +1626,14 @@
 	osd_set_caps(&or->cdb, cap);
 
 	has_in = or->in.bio || or->get_attr.total_bytes;
-	has_out = or->out.bio || or->set_attr.total_bytes ||
-		or->enc_get_attr.total_bytes;
+	has_out = or->out.bio || or->cdb_cont.total_bytes ||
+		or->set_attr.total_bytes || or->enc_get_attr.total_bytes;
 
+	ret = _osd_req_finalize_cdb_cont(or, cap_key);
+	if (ret) {
+		OSD_DEBUG("_osd_req_finalize_cdb_cont failed\n");
+		return ret;
+	}
 	ret = _init_blk_request(or, has_in, has_out);
 	if (ret) {
 		OSD_DEBUG("_init_blk_request failed\n");
@@ -1458,7 +1671,8 @@
 	}
 
 	ret = _osd_req_finalize_data_integrity(or, has_in, has_out,
-					       out_data_bytes, cap_key);
+					       out_data_bio, out_data_bytes,
+					       cap_key);
 	if (ret)
 		return ret;
 
diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
index 4b87657..cf89091 100644
--- a/drivers/scsi/pmcraid.c
+++ b/drivers/scsi/pmcraid.c
@@ -1594,10 +1594,12 @@
 	cfg_entry = &ccn_hcam->cfg_entry;
 	fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
 
-	pmcraid_info
-		("CCN(%x): %x type: %x lost: %x flags: %x res: %x:%x:%x:%x\n",
+	pmcraid_info("CCN(%x): %x timestamp: %llx type: %x lost: %x flags: %x \
+		 res: %x:%x:%x:%x\n",
 		 pinstance->ccn.hcam->ilid,
 		 pinstance->ccn.hcam->op_code,
+		((pinstance->ccn.hcam->timestamp1) |
+		((pinstance->ccn.hcam->timestamp2 & 0xffffffffLL) << 32)),
 		 pinstance->ccn.hcam->notification_type,
 		 pinstance->ccn.hcam->notification_lost,
 		 pinstance->ccn.hcam->flags,
@@ -1850,6 +1852,7 @@
  *   none
  */
 static void pmcraid_initiate_reset(struct pmcraid_instance *);
+static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd);
 
 static void pmcraid_process_ldn(struct pmcraid_cmd *cmd)
 {
@@ -1881,6 +1884,10 @@
 					       lock_flags);
 			return;
 		}
+		if (fd_ioasc == PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC) {
+			pinstance->timestamp_error = 1;
+			pmcraid_set_timestamp(cmd);
+		}
 	} else {
 		dev_info(&pinstance->pdev->dev,
 			"Host RCB(LDN) failed with IOASC: 0x%08X\n", ioasc);
@@ -3363,7 +3370,7 @@
 	sg_size = buflen;
 
 	for (i = 0; i < num_elem; i++) {
-		page = alloc_pages(GFP_KERNEL|GFP_DMA, order);
+		page = alloc_pages(GFP_KERNEL|GFP_DMA|__GFP_ZERO, order);
 		if (!page) {
 			for (j = i - 1; j >= 0; j--)
 				__free_pages(sg_page(&scatterlist[j]), order);
@@ -3739,6 +3746,7 @@
 	unsigned long request_buffer;
 	unsigned long request_offset;
 	unsigned long lock_flags;
+	void *ioasa;
 	u32 ioasc;
 	int request_size;
 	int buffer_size;
@@ -3780,6 +3788,11 @@
 	rc = __copy_from_user(buffer,
 			     (struct pmcraid_passthrough_ioctl_buffer *) arg,
 			     sizeof(struct pmcraid_passthrough_ioctl_buffer));
+
+	ioasa =
+	(void *)(arg +
+		offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
+
 	if (rc) {
 		pmcraid_err("ioctl: can't copy passthrough buffer\n");
 		rc = -EFAULT;
@@ -3947,22 +3960,14 @@
 	}
 
 out_handle_response:
-	/* If the command failed for any reason, copy entire IOASA buffer and
-	 * return IOCTL success. If copying IOASA to user-buffer fails, return
+	/* copy entire IOASA buffer and return IOCTL success.
+	 * If copying IOASA to user-buffer fails, return
 	 * EFAULT
 	 */
-	if (PMCRAID_IOASC_SENSE_KEY(le32_to_cpu(cmd->ioa_cb->ioasa.ioasc))) {
-		void *ioasa =
-		    (void *)(arg +
-		    offsetof(struct pmcraid_passthrough_ioctl_buffer, ioasa));
-
-		pmcraid_info("command failed with %x\n",
-			     le32_to_cpu(cmd->ioa_cb->ioasa.ioasc));
-		if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
-				 sizeof(struct pmcraid_ioasa))) {
-			pmcraid_err("failed to copy ioasa buffer to user\n");
-			rc = -EFAULT;
-		}
+	if (copy_to_user(ioasa, &cmd->ioa_cb->ioasa,
+		sizeof(struct pmcraid_ioasa))) {
+		pmcraid_err("failed to copy ioasa buffer to user\n");
+		rc = -EFAULT;
 	}
 
 	/* If the data transfer was from device, copy the data onto user
@@ -5147,6 +5152,16 @@
 		pinstance->inq_data = NULL;
 		pinstance->inq_data_baddr = 0;
 	}
+
+	if (pinstance->timestamp_data != NULL) {
+		pci_free_consistent(pinstance->pdev,
+				    sizeof(struct pmcraid_timestamp_data),
+				    pinstance->timestamp_data,
+				    pinstance->timestamp_data_baddr);
+
+		pinstance->timestamp_data = NULL;
+		pinstance->timestamp_data_baddr = 0;
+	}
 }
 
 /**
@@ -5205,6 +5220,20 @@
 		return -ENOMEM;
 	}
 
+	/* allocate DMAable memory for set timestamp data buffer */
+	pinstance->timestamp_data = pci_alloc_consistent(
+					pinstance->pdev,
+					sizeof(struct pmcraid_timestamp_data),
+					&pinstance->timestamp_data_baddr);
+
+	if (pinstance->timestamp_data == NULL) {
+		pmcraid_err("couldn't allocate DMA memory for \
+				set time_stamp \n");
+		pmcraid_release_buffers(pinstance);
+		return -ENOMEM;
+	}
+
+
 	/* Initialize all the command blocks and add them to free pool. No
 	 * need to lock (free_pool_lock) as this is done in initialization
 	 * itself
@@ -5610,6 +5639,68 @@
 }
 
 /**
+ * pmcraid_set_timestamp - set the timestamp to IOAFP
+ *
+ * @cmd: pointer to pmcraid_cmd structure
+ *
+ * Return Value
+ *  0 for success or non-zero for failure cases
+ */
+static void pmcraid_set_timestamp(struct pmcraid_cmd *cmd)
+{
+	struct pmcraid_instance *pinstance = cmd->drv_inst;
+	struct pmcraid_ioarcb *ioarcb = &cmd->ioa_cb->ioarcb;
+	__be32 time_stamp_len = cpu_to_be32(PMCRAID_TIMESTAMP_LEN);
+	struct pmcraid_ioadl_desc *ioadl = ioarcb->add_data.u.ioadl;
+
+	struct timeval tv;
+	__le64 timestamp;
+
+	do_gettimeofday(&tv);
+	timestamp = tv.tv_sec * 1000;
+
+	pinstance->timestamp_data->timestamp[0] = (__u8)(timestamp);
+	pinstance->timestamp_data->timestamp[1] = (__u8)((timestamp) >> 8);
+	pinstance->timestamp_data->timestamp[2] = (__u8)((timestamp) >> 16);
+	pinstance->timestamp_data->timestamp[3] = (__u8)((timestamp) >> 24);
+	pinstance->timestamp_data->timestamp[4] = (__u8)((timestamp) >> 32);
+	pinstance->timestamp_data->timestamp[5] = (__u8)((timestamp)  >> 40);
+
+	pmcraid_reinit_cmdblk(cmd);
+	ioarcb->request_type = REQ_TYPE_SCSI;
+	ioarcb->resource_handle = cpu_to_le32(PMCRAID_IOA_RES_HANDLE);
+	ioarcb->cdb[0] = PMCRAID_SCSI_SET_TIMESTAMP;
+	ioarcb->cdb[1] = PMCRAID_SCSI_SERVICE_ACTION;
+	memcpy(&(ioarcb->cdb[6]), &time_stamp_len, sizeof(time_stamp_len));
+
+	ioarcb->ioadl_bus_addr = cpu_to_le64((cmd->ioa_cb_bus_addr) +
+					offsetof(struct pmcraid_ioarcb,
+						add_data.u.ioadl[0]));
+	ioarcb->ioadl_length = cpu_to_le32(sizeof(struct pmcraid_ioadl_desc));
+	ioarcb->ioarcb_bus_addr &= ~(0x1FULL);
+
+	ioarcb->request_flags0 |= NO_LINK_DESCS;
+	ioarcb->request_flags0 |= TRANSFER_DIR_WRITE;
+	ioarcb->data_transfer_length =
+		cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
+	ioadl = &(ioarcb->add_data.u.ioadl[0]);
+	ioadl->flags = IOADL_FLAGS_LAST_DESC;
+	ioadl->address = cpu_to_le64(pinstance->timestamp_data_baddr);
+	ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_timestamp_data));
+
+	if (!pinstance->timestamp_error) {
+		pinstance->timestamp_error = 0;
+		pmcraid_send_cmd(cmd, pmcraid_set_supported_devs,
+			 PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
+	} else {
+		pmcraid_send_cmd(cmd, pmcraid_return_cmd,
+			 PMCRAID_INTERNAL_TIMEOUT, pmcraid_timeout_handler);
+		return;
+	}
+}
+
+
+/**
  * pmcraid_init_res_table - Initialize the resource table
  * @cmd:  pointer to pmcraid command struct
  *
@@ -5720,7 +5811,7 @@
 
 	/* release the resource list lock */
 	spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
-	pmcraid_set_supported_devs(cmd);
+	pmcraid_set_timestamp(cmd);
 }
 
 /**
@@ -6054,10 +6145,10 @@
 static void __exit pmcraid_exit(void)
 {
 	pmcraid_netlink_release();
-	class_destroy(pmcraid_class);
 	unregister_chrdev_region(MKDEV(pmcraid_major, 0),
 				 PMCRAID_MAX_ADAPTERS);
 	pci_unregister_driver(&pmcraid_driver);
+	class_destroy(pmcraid_class);
 }
 
 module_init(pmcraid_init);
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 6cfa014..1134279 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -42,7 +42,7 @@
  */
 #define PMCRAID_DRIVER_NAME		"PMC MaxRAID"
 #define PMCRAID_DEVFILE			"pmcsas"
-#define PMCRAID_DRIVER_VERSION		"2.0.2"
+#define PMCRAID_DRIVER_VERSION		"2.0.3"
 #define PMCRAID_DRIVER_DATE		__DATE__
 
 #define PMCRAID_FW_VERSION_1		0x002
@@ -184,6 +184,7 @@
 #define PMCRAID_IOASC_IR_INVALID_RESOURCE_HANDLE        0x05250000
 #define PMCRAID_IOASC_AC_TERMINATED_BY_HOST		0x0B5A0000
 #define PMCRAID_IOASC_UA_BUS_WAS_RESET			0x06290000
+#define PMCRAID_IOASC_TIME_STAMP_OUT_OF_SYNC		0x06908B00
 #define PMCRAID_IOASC_UA_BUS_WAS_RESET_BY_OTHER		0x06298000
 
 /* Driver defined IOASCs */
@@ -561,6 +562,17 @@
 	__u8	reserved3[16];
 };
 
+#define PMCRAID_TIMESTAMP_LEN		12
+#define PMCRAID_REQ_TM_STR_LEN		6
+#define PMCRAID_SCSI_SET_TIMESTAMP	0xA4
+#define PMCRAID_SCSI_SERVICE_ACTION	0x0F
+
+struct pmcraid_timestamp_data {
+	__u8 reserved1[4];
+	__u8 timestamp[PMCRAID_REQ_TM_STR_LEN];		/* current time value */
+	__u8 reserved2[2];
+};
+
 /* pmcraid_cmd - LLD representation of SCSI command */
 struct pmcraid_cmd {
 
@@ -568,7 +580,6 @@
 	struct pmcraid_control_block *ioa_cb;
 	dma_addr_t ioa_cb_bus_addr;
 	dma_addr_t dma_handle;
-	u8 *sense_buffer;
 
 	/* pointer to mid layer structure of SCSI commands */
 	struct scsi_cmnd *scsi_cmd;
@@ -705,6 +716,9 @@
 	struct pmcraid_inquiry_data *inq_data;
 	dma_addr_t  inq_data_baddr;
 
+	struct pmcraid_timestamp_data *timestamp_data;
+	dma_addr_t  timestamp_data_baddr;
+
 	/* size of configuration table entry, varies based on the firmware */
 	u32	config_table_entry_size;
 
@@ -791,6 +805,7 @@
 #define SHUTDOWN_NONE               0x0
 #define SHUTDOWN_NORMAL             0x1
 #define SHUTDOWN_ABBREV             0x2
+	u32 timestamp_error:1; /* indicate set timestamp for out of sync */
 
 };
 
@@ -1056,10 +1071,10 @@
 #define PMCRAID_PASSTHROUGH_IOCTL    'F'
 
 #define DRV_IOCTL(n, size) \
-    _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
+	_IOC(_IOC_READ|_IOC_WRITE, PMCRAID_DRIVER_IOCTL, (n), (size))
 
 #define FMW_IOCTL(n, size) \
-    _IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL,  (n), (size))
+	_IOC(_IOC_READ|_IOC_WRITE, PMCRAID_PASSTHROUGH_IOCTL,  (n), (size))
 
 /*
  * _ARGSIZE: macro that gives size of the argument type passed to an IOCTL cmd.
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 2ff4342..bc8194f 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -1538,6 +1538,10 @@
 	if (!fcport)
 		return;
 
+	/* Now that the rport has been deleted, set the fcport state to
+	   FCS_DEVICE_DEAD */
+	atomic_set(&fcport->state, FCS_DEVICE_DEAD);
+
 	/*
 	 * Transport has effectively 'deleted' the rport, clear
 	 * all local references.
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index fdfbf83..31a4121 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -1307,6 +1307,125 @@
 }
 
 static int
+qla2x00_optrom_setup(struct fc_bsg_job *bsg_job, struct qla_hw_data *ha,
+	uint8_t is_update)
+{
+	uint32_t start = 0;
+	int valid = 0;
+
+	bsg_job->reply->reply_payload_rcv_len = 0;
+
+	if (unlikely(pci_channel_offline(ha->pdev)))
+		return -EINVAL;
+
+	start = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
+	if (start > ha->optrom_size)
+		return -EINVAL;
+
+	if (ha->optrom_state != QLA_SWAITING)
+		return -EBUSY;
+
+	ha->optrom_region_start = start;
+
+	if (is_update) {
+		if (ha->optrom_size == OPTROM_SIZE_2300 && start == 0)
+			valid = 1;
+		else if (start == (ha->flt_region_boot * 4) ||
+		    start == (ha->flt_region_fw * 4))
+			valid = 1;
+		else if (IS_QLA24XX_TYPE(ha) || IS_QLA25XX(ha) ||
+		    IS_QLA8XXX_TYPE(ha))
+			valid = 1;
+		if (!valid) {
+			qla_printk(KERN_WARNING, ha,
+			    "Invalid start region 0x%x/0x%x.\n",
+			    start, bsg_job->request_payload.payload_len);
+			return -EINVAL;
+		}
+
+		ha->optrom_region_size = start +
+		    bsg_job->request_payload.payload_len > ha->optrom_size ?
+		    ha->optrom_size - start :
+		    bsg_job->request_payload.payload_len;
+		ha->optrom_state = QLA_SWRITING;
+	} else {
+		ha->optrom_region_size = start +
+		    bsg_job->reply_payload.payload_len > ha->optrom_size ?
+		    ha->optrom_size - start :
+		    bsg_job->reply_payload.payload_len;
+		ha->optrom_state = QLA_SREADING;
+	}
+
+	ha->optrom_buffer = vmalloc(ha->optrom_region_size);
+	if (!ha->optrom_buffer) {
+		qla_printk(KERN_WARNING, ha,
+		    "Read: Unable to allocate memory for optrom retrieval "
+		    "(%x).\n", ha->optrom_region_size);
+
+		ha->optrom_state = QLA_SWAITING;
+		return -ENOMEM;
+	}
+
+	memset(ha->optrom_buffer, 0, ha->optrom_region_size);
+	return 0;
+}
+
+static int
+qla2x00_read_optrom(struct fc_bsg_job *bsg_job)
+{
+	struct Scsi_Host *host = bsg_job->shost;
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int rval = 0;
+
+	rval = qla2x00_optrom_setup(bsg_job, ha, 0);
+	if (rval)
+		return rval;
+
+	ha->isp_ops->read_optrom(vha, ha->optrom_buffer,
+	    ha->optrom_region_start, ha->optrom_region_size);
+
+	sg_copy_from_buffer(bsg_job->reply_payload.sg_list,
+	    bsg_job->reply_payload.sg_cnt, ha->optrom_buffer,
+	    ha->optrom_region_size);
+
+	bsg_job->reply->reply_payload_rcv_len = ha->optrom_region_size;
+	bsg_job->reply->result = DID_OK;
+	vfree(ha->optrom_buffer);
+	ha->optrom_buffer = NULL;
+	ha->optrom_state = QLA_SWAITING;
+	bsg_job->job_done(bsg_job);
+	return rval;
+}
+
+static int
+qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
+{
+	struct Scsi_Host *host = bsg_job->shost;
+	scsi_qla_host_t *vha = shost_priv(host);
+	struct qla_hw_data *ha = vha->hw;
+	int rval = 0;
+
+	rval = qla2x00_optrom_setup(bsg_job, ha, 1);
+	if (rval)
+		return rval;
+
+	sg_copy_to_buffer(bsg_job->request_payload.sg_list,
+	    bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
+	    ha->optrom_region_size);
+
+	ha->isp_ops->write_optrom(vha, ha->optrom_buffer,
+	    ha->optrom_region_start, ha->optrom_region_size);
+
+	bsg_job->reply->result = DID_OK;
+	vfree(ha->optrom_buffer);
+	ha->optrom_buffer = NULL;
+	ha->optrom_state = QLA_SWAITING;
+	bsg_job->job_done(bsg_job);
+	return rval;
+}
+
+static int
 qla2x00_process_vendor_specific(struct fc_bsg_job *bsg_job)
 {
 	switch (bsg_job->request->rqst_data.h_vendor.vendor_cmd[0]) {
@@ -1328,6 +1447,12 @@
 	case QL_VND_FCP_PRIO_CFG_CMD:
 		return qla24xx_proc_fcp_prio_cfg_cmd(bsg_job);
 
+	case QL_VND_READ_FLASH:
+		return qla2x00_read_optrom(bsg_job);
+
+	case QL_VND_UPDATE_FLASH:
+		return qla2x00_update_optrom(bsg_job);
+
 	default:
 		bsg_job->reply->result = (DID_ERROR << 16);
 		bsg_job->job_done(bsg_job);
diff --git a/drivers/scsi/qla2xxx/qla_bsg.h b/drivers/scsi/qla2xxx/qla_bsg.h
index cc7c52f..074a999 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.h
+++ b/drivers/scsi/qla2xxx/qla_bsg.h
@@ -14,6 +14,8 @@
 #define QL_VND_A84_MGMT_CMD	0x04
 #define QL_VND_IIDMA		0x05
 #define QL_VND_FCP_PRIO_CFG_CMD	0x06
+#define QL_VND_READ_FLASH	0x07
+#define QL_VND_UPDATE_FLASH	0x08
 
 /* BSG definations for interpreting CommandSent field */
 #define INT_DEF_LB_LOOPBACK_CMD         0
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e1d3ad40..3a22eff 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -1700,9 +1700,7 @@
 	atomic_t state;
 	uint32_t flags;
 
-	int port_login_retry_count;
 	int login_retry;
-	atomic_t port_down_timer;
 
 	struct fc_rport *rport, *drport;
 	u32 supported_classes;
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index c33dec8..9382a81 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -92,6 +92,7 @@
 extern int ql2xdbwr;
 extern int ql2xdontresethba;
 extern int ql2xasynctmfenable;
+extern int ql2xgffidenable;
 extern int ql2xenabledif;
 extern int ql2xenablehba_err_chk;
 extern int ql2xtargetreset;
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 3cafbef..259f511 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -71,7 +71,7 @@
 	struct srb_iocb *iocb = ctx->u.iocb_cmd;
 	struct scsi_qla_host *vha = sp->fcport->vha;
 
-	del_timer_sync(&iocb->timer);
+	del_timer(&iocb->timer);
 	kfree(iocb);
 	kfree(ctx);
 	mempool_free(sp, sp->fcport->vha->hw->srb_mempool);
@@ -1344,6 +1344,13 @@
 		qla_printk(KERN_WARNING, ha, "Unable to allocate (%d KB) for "
 		    "firmware dump!!!\n", dump_size / 1024);
 
+		if (ha->fce) {
+			dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
+			    ha->fce_dma);
+			ha->fce = NULL;
+			ha->fce_dma = 0;
+		}
+
 		if (ha->eft) {
 			dma_free_coherent(&ha->pdev->dev, eft_size, ha->eft,
 			    ha->eft_dma);
@@ -1818,14 +1825,14 @@
 		qla2x00_init_response_q_entries(rsp);
 	}
 
-	spin_lock_irqsave(&ha->vport_slock, flags);
+	spin_lock(&ha->vport_slock);
 	/* Clear RSCN queue. */
 	list_for_each_entry(vp, &ha->vp_list, list) {
 		vp->rscn_in_ptr = 0;
 		vp->rscn_out_ptr = 0;
 	}
 
-	spin_unlock_irqrestore(&ha->vport_slock, flags);
+	spin_unlock(&ha->vport_slock);
 
 	ha->isp_ops->config_rings(vha);
 
@@ -2916,21 +2923,13 @@
 void
 qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
 {
-	struct qla_hw_data *ha = vha->hw;
-
 	fcport->vha = vha;
 	fcport->login_retry = 0;
-	fcport->port_login_retry_count = ha->port_down_retry_count *
-	    PORT_RETRY_TIME;
-	atomic_set(&fcport->port_down_timer, ha->port_down_retry_count *
-	    PORT_RETRY_TIME);
 	fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
 
 	qla2x00_iidma_fcport(vha, fcport);
-
-	atomic_set(&fcport->state, FCS_ONLINE);
-
 	qla2x00_reg_remote_port(vha, fcport);
+	atomic_set(&fcport->state, FCS_ONLINE);
 }
 
 /*
@@ -3292,8 +3291,9 @@
 			continue;
 
 		/* Bypass ports whose FCP-4 type is not FCP_SCSI */
-		if (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
-		    new_fcport->fc4_type != FC4_TYPE_UNKNOWN)
+		if (ql2xgffidenable &&
+		    (new_fcport->fc4_type != FC4_TYPE_FCP_SCSI &&
+		    new_fcport->fc4_type != FC4_TYPE_UNKNOWN))
 			continue;
 
 		/* Locate matching device in database. */
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 579f028..5f94430 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -992,8 +992,8 @@
 	ha = vha->hw;
 
 	DEBUG18(printk(KERN_DEBUG
-	    "%s(%ld): Executing cmd sp %p, pid=%ld, prot_op=%u.\n", __func__,
-	    vha->host_no, sp, cmd->serial_number, scsi_get_prot_op(sp->cmd)));
+	    "%s(%ld): Executing cmd sp %p, prot_op=%u.\n", __func__,
+	    vha->host_no, sp, scsi_get_prot_op(sp->cmd)));
 
 	cmd_pkt->vp_index = sp->fcport->vp_idx;
 
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index e0e43d9..1f06ddd 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -1240,12 +1240,6 @@
 	case LSC_SCODE_NPORT_USED:
 		data[0] = MBS_LOOP_ID_USED;
 		break;
-	case LSC_SCODE_CMD_FAILED:
-		if ((iop[1] & 0xff) == 0x05) {
-			data[0] = MBS_NOT_LOGGED_IN;
-			break;
-		}
-		/* Fall through. */
 	default:
 		data[0] = MBS_COMMAND_ERROR;
 		break;
@@ -1431,9 +1425,8 @@
 		rsp->status_srb = sp;
 
 	DEBUG5(printk("%s(): Check condition Sense data, scsi(%ld:%d:%d:%d) "
-	    "cmd=%p pid=%ld\n", __func__, sp->fcport->vha->host_no,
-	    cp->device->channel, cp->device->id, cp->device->lun, cp,
-	    cp->serial_number));
+	    "cmd=%p\n", __func__, sp->fcport->vha->host_no,
+	    cp->device->channel, cp->device->id, cp->device->lun, cp));
 	if (sense_len)
 		DEBUG5(qla2x00_dump_buffer(cp->sense_buffer, sense_len));
 }
@@ -1757,6 +1750,8 @@
 	case CS_INCOMPLETE:
 	case CS_PORT_UNAVAILABLE:
 	case CS_TIMEOUT:
+	case CS_RESET:
+
 		/*
 		 * We are going to have the fc class block the rport
 		 * while we try to recover so instruct the mid layer
@@ -1781,10 +1776,6 @@
 			qla2x00_mark_device_lost(fcport->vha, fcport, 1, 1);
 		break;
 
-	case CS_RESET:
-		cp->result = DID_TRANSPORT_DISRUPTED << 16;
-		break;
-
 	case CS_ABORTED:
 		cp->result = DID_RESET << 16;
 		break;
@@ -1801,10 +1792,10 @@
 	if (logit)
 		DEBUG2(qla_printk(KERN_INFO, ha,
 		    "scsi(%ld:%d:%d) FCP command status: 0x%x-0x%x (0x%x) "
-		    "oxid=0x%x ser=0x%lx cdb=%02x%02x%02x len=0x%x "
+		    "oxid=0x%x cdb=%02x%02x%02x len=0x%x "
 		    "rsp_info=0x%x resid=0x%x fw_resid=0x%x\n", vha->host_no,
 		    cp->device->id, cp->device->lun, comp_status, scsi_status,
-		    cp->result, ox_id, cp->serial_number, cp->cmnd[0],
+		    cp->result, ox_id, cp->cmnd[0],
 		    cp->cmnd[1], cp->cmnd[2], scsi_bufflen(cp), rsp_info_len,
 		    resid_len, fw_resid_len));
 
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index 800ea92..1830e6e 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -160,6 +160,11 @@
 		 "Enable target reset."
 		 "Default is 1 - use hw defaults.");
 
+int ql2xgffidenable;
+module_param(ql2xgffidenable, int, S_IRUGO|S_IRUSR);
+MODULE_PARM_DESC(ql2xgffidenable,
+		"Enables GFF_ID checks of port type. "
+		"Default is 0 - Do not use GFF_ID information.");
 
 int ql2xasynctmfenable;
 module_param(ql2xasynctmfenable, int, S_IRUGO|S_IRUSR);
@@ -255,6 +260,7 @@
 
 static int qla2x00_mem_alloc(struct qla_hw_data *, uint16_t, uint16_t,
 	struct req_que **, struct rsp_que **);
+static void qla2x00_free_fw_dump(struct qla_hw_data *);
 static void qla2x00_mem_free(struct qla_hw_data *);
 static void qla2x00_sp_free_dma(srb_t *);
 
@@ -539,6 +545,7 @@
 	srb_t *sp;
 	int rval;
 
+	spin_unlock_irq(vha->host->host_lock);
 	if (ha->flags.eeh_busy) {
 		if (ha->flags.pci_channel_io_perm_failure)
 			cmd->result = DID_NO_CONNECT << 16;
@@ -553,10 +560,6 @@
 		goto qc24_fail_command;
 	}
 
-	/* Close window on fcport/rport state-transitioning. */
-	if (fcport->drport)
-		goto qc24_target_busy;
-
 	if (!vha->flags.difdix_supported &&
 		scsi_get_prot_op(cmd) != SCSI_PROT_NORMAL) {
 			DEBUG2(qla_printk(KERN_ERR, ha,
@@ -567,15 +570,14 @@
 	}
 	if (atomic_read(&fcport->state) != FCS_ONLINE) {
 		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD ||
-		    atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
+			atomic_read(&fcport->state) == FCS_DEVICE_LOST ||
+			atomic_read(&base_vha->loop_state) == LOOP_DEAD) {
 			cmd->result = DID_NO_CONNECT << 16;
 			goto qc24_fail_command;
 		}
 		goto qc24_target_busy;
 	}
 
-	spin_unlock_irq(vha->host->host_lock);
-
 	sp = qla2x00_get_new_sp(base_vha, fcport, cmd, done);
 	if (!sp)
 		goto qc24_host_busy_lock;
@@ -597,9 +599,11 @@
 	return SCSI_MLQUEUE_HOST_BUSY;
 
 qc24_target_busy:
+	spin_lock_irq(vha->host->host_lock);
 	return SCSI_MLQUEUE_TARGET_BUSY;
 
 qc24_fail_command:
+	spin_lock_irq(vha->host->host_lock);
 	done(cmd);
 
 	return 0;
@@ -824,81 +828,58 @@
 {
 	scsi_qla_host_t *vha = shost_priv(cmd->device->host);
 	srb_t *sp;
-	int ret, i;
+	int ret;
 	unsigned int id, lun;
-	unsigned long serial;
 	unsigned long flags;
 	int wait = 0;
 	struct qla_hw_data *ha = vha->hw;
-	struct req_que *req = vha->req;
-	srb_t *spt;
-	int got_ref = 0;
 
 	fc_block_scsi_eh(cmd);
 
 	if (!CMD_SP(cmd))
 		return SUCCESS;
 
-	ret = SUCCESS;
-
 	id = cmd->device->id;
 	lun = cmd->device->lun;
-	serial = cmd->serial_number;
-	spt = (srb_t *) CMD_SP(cmd);
-	if (!spt)
-		return SUCCESS;
 
-	/* Check active list for command command. */
 	spin_lock_irqsave(&ha->hardware_lock, flags);
-	for (i = 1; i < MAX_OUTSTANDING_COMMANDS; i++) {
-		sp = req->outstanding_cmds[i];
-
-		if (sp == NULL)
-			continue;
-		if ((sp->ctx) && !(sp->flags & SRB_FCP_CMND_DMA_VALID) &&
-		    !IS_PROT_IO(sp))
-			continue;
-		if (sp->cmd != cmd)
-			continue;
-
-		DEBUG2(printk("%s(%ld): aborting sp %p from RISC."
-		" pid=%ld.\n", __func__, vha->host_no, sp, serial));
-
-		/* Get a reference to the sp and drop the lock.*/
-		sp_get(sp);
-		got_ref++;
-
+	sp = (srb_t *) CMD_SP(cmd);
+	if (!sp) {
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
-		if (ha->isp_ops->abort_command(sp)) {
-			DEBUG2(printk("%s(%ld): abort_command "
-			"mbx failed.\n", __func__, vha->host_no));
-			ret = FAILED;
-		} else {
-			DEBUG3(printk("%s(%ld): abort_command "
-			"mbx success.\n", __func__, vha->host_no));
-			wait = 1;
-		}
-		spin_lock_irqsave(&ha->hardware_lock, flags);
-		break;
+		return SUCCESS;
 	}
+
+	DEBUG2(printk("%s(%ld): aborting sp %p from RISC.",
+	    __func__, vha->host_no, sp));
+
+	/* Get a reference to the sp and drop the lock.*/
+	sp_get(sp);
+
 	spin_unlock_irqrestore(&ha->hardware_lock, flags);
+	if (ha->isp_ops->abort_command(sp)) {
+		DEBUG2(printk("%s(%ld): abort_command "
+		"mbx failed.\n", __func__, vha->host_no));
+		ret = FAILED;
+	} else {
+		DEBUG3(printk("%s(%ld): abort_command "
+		"mbx success.\n", __func__, vha->host_no));
+		wait = 1;
+	}
+	qla2x00_sp_compl(ha, sp);
 
 	/* Wait for the command to be returned. */
 	if (wait) {
 		if (qla2x00_eh_wait_on_command(cmd) != QLA_SUCCESS) {
 			qla_printk(KERN_ERR, ha,
-			    "scsi(%ld:%d:%d): Abort handler timed out -- %lx "
-			    "%x.\n", vha->host_no, id, lun, serial, ret);
+			    "scsi(%ld:%d:%d): Abort handler timed out -- %x.\n",
+			    vha->host_no, id, lun, ret);
 			ret = FAILED;
 		}
 	}
 
-	if (got_ref)
-		qla2x00_sp_compl(ha, sp);
-
 	qla_printk(KERN_INFO, ha,
-	    "scsi(%ld:%d:%d): Abort command issued -- %d %lx %x.\n",
-	    vha->host_no, id, lun, wait, serial, ret);
+	    "scsi(%ld:%d:%d): Abort command issued -- %d %x.\n",
+	    vha->host_no, id, lun, wait, ret);
 
 	return ret;
 }
@@ -1043,13 +1024,11 @@
 	fc_port_t *fcport = (struct fc_port *) cmd->device->hostdata;
 	int ret = FAILED;
 	unsigned int id, lun;
-	unsigned long serial;
 
 	fc_block_scsi_eh(cmd);
 
 	id = cmd->device->id;
 	lun = cmd->device->lun;
-	serial = cmd->serial_number;
 
 	if (!fcport)
 		return ret;
@@ -1104,14 +1083,12 @@
 	struct qla_hw_data *ha = vha->hw;
 	int ret = FAILED;
 	unsigned int id, lun;
-	unsigned long serial;
 	scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
 
 	fc_block_scsi_eh(cmd);
 
 	id = cmd->device->id;
 	lun = cmd->device->lun;
-	serial = cmd->serial_number;
 
 	if (!fcport)
 		return ret;
@@ -1974,6 +1951,7 @@
 	ha->bars = bars;
 	ha->mem_only = mem_only;
 	spin_lock_init(&ha->hardware_lock);
+	spin_lock_init(&ha->vport_slock);
 
 	/* Set ISP-type information. */
 	qla2x00_set_isp_flags(ha);
@@ -2342,6 +2320,42 @@
 }
 
 static void
+qla2x00_shutdown(struct pci_dev *pdev)
+{
+	scsi_qla_host_t *vha;
+	struct qla_hw_data  *ha;
+
+	vha = pci_get_drvdata(pdev);
+	ha = vha->hw;
+
+	/* Turn-off FCE trace */
+	if (ha->flags.fce_enabled) {
+		qla2x00_disable_fce_trace(vha, NULL, NULL);
+		ha->flags.fce_enabled = 0;
+	}
+
+	/* Turn-off EFT trace */
+	if (ha->eft)
+		qla2x00_disable_eft_trace(vha);
+
+	/* Stop currently executing firmware. */
+	qla2x00_try_to_stop_firmware(vha);
+
+	/* Turn adapter off line */
+	vha->flags.online = 0;
+
+	/* turn-off interrupts on the card */
+	if (ha->interrupts_on) {
+		vha->flags.init_done = 0;
+		ha->isp_ops->disable_intrs(ha);
+	}
+
+	qla2x00_free_irqs(vha);
+
+	qla2x00_free_fw_dump(ha);
+}
+
+static void
 qla2x00_remove_one(struct pci_dev *pdev)
 {
 	scsi_qla_host_t *base_vha, *vha;
@@ -2597,12 +2611,12 @@
 		if (atomic_read(&fcport->state) == FCS_DEVICE_DEAD)
 			continue;
 		if (atomic_read(&fcport->state) == FCS_ONLINE) {
+			atomic_set(&fcport->state, FCS_DEVICE_LOST);
 			if (defer)
 				qla2x00_schedule_rport_del(vha, fcport, defer);
 			else if (vha->vp_idx == fcport->vp_idx)
 				qla2x00_schedule_rport_del(vha, fcport, defer);
 		}
-		atomic_set(&fcport->state, FCS_DEVICE_LOST);
 	}
 }
 
@@ -2830,6 +2844,35 @@
 }
 
 /*
+* qla2x00_free_fw_dump
+*	Frees fw dump stuff.
+*
+* Input:
+*	ha = adapter block pointer.
+*/
+static void
+qla2x00_free_fw_dump(struct qla_hw_data *ha)
+{
+	if (ha->fce)
+		dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
+		    ha->fce_dma);
+
+	if (ha->fw_dump) {
+		if (ha->eft)
+			dma_free_coherent(&ha->pdev->dev,
+			    ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
+		vfree(ha->fw_dump);
+	}
+	ha->fce = NULL;
+	ha->fce_dma = 0;
+	ha->eft = NULL;
+	ha->eft_dma = 0;
+	ha->fw_dump = NULL;
+	ha->fw_dumped = 0;
+	ha->fw_dump_reading = 0;
+}
+
+/*
 * qla2x00_mem_free
 *      Frees all adapter allocated memory.
 *
@@ -2839,20 +2882,11 @@
 static void
 qla2x00_mem_free(struct qla_hw_data *ha)
 {
+	qla2x00_free_fw_dump(ha);
+
 	if (ha->srb_mempool)
 		mempool_destroy(ha->srb_mempool);
 
-	if (ha->fce)
-		dma_free_coherent(&ha->pdev->dev, FCE_SIZE, ha->fce,
-		ha->fce_dma);
-
-	if (ha->fw_dump) {
-		if (ha->eft)
-			dma_free_coherent(&ha->pdev->dev,
-			ntohl(ha->fw_dump->eft_size), ha->eft, ha->eft_dma);
-		vfree(ha->fw_dump);
-	}
-
 	if (ha->dcbx_tlv)
 		dma_free_coherent(&ha->pdev->dev, DCBX_TLV_DATA_SIZE,
 		    ha->dcbx_tlv, ha->dcbx_tlv_dma);
@@ -2925,8 +2959,6 @@
 
 	ha->srb_mempool = NULL;
 	ha->ctx_mempool = NULL;
-	ha->eft = NULL;
-	ha->eft_dma = 0;
 	ha->sns_cmd = NULL;
 	ha->sns_cmd_dma = 0;
 	ha->ct_sns = NULL;
@@ -2946,10 +2978,6 @@
 
 	ha->gid_list = NULL;
 	ha->gid_list_dma = 0;
-
-	ha->fw_dump = NULL;
-	ha->fw_dumped = 0;
-	ha->fw_dump_reading = 0;
 }
 
 struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -3547,11 +3575,9 @@
 qla2x00_timer(scsi_qla_host_t *vha)
 {
 	unsigned long	cpu_flags = 0;
-	fc_port_t	*fcport;
 	int		start_dpc = 0;
 	int		index;
 	srb_t		*sp;
-	int		t;
 	uint16_t        w;
 	struct qla_hw_data *ha = vha->hw;
 	struct req_que *req;
@@ -3567,34 +3593,6 @@
 	/* Hardware read to raise pending EEH errors during mailbox waits. */
 	if (!pci_channel_offline(ha->pdev))
 		pci_read_config_word(ha->pdev, PCI_VENDOR_ID, &w);
-	/*
-	 * Ports - Port down timer.
-	 *
-	 * Whenever, a port is in the LOST state we start decrementing its port
-	 * down timer every second until it reaches zero. Once  it reaches zero
-	 * the port it marked DEAD.
-	 */
-	t = 0;
-	list_for_each_entry(fcport, &vha->vp_fcports, list) {
-		if (fcport->port_type != FCT_TARGET)
-			continue;
-
-		if (atomic_read(&fcport->state) == FCS_DEVICE_LOST) {
-
-			if (atomic_read(&fcport->port_down_timer) == 0)
-				continue;
-
-			if (atomic_dec_and_test(&fcport->port_down_timer) != 0)
-				atomic_set(&fcport->state, FCS_DEVICE_DEAD);
-
-			DEBUG(printk("scsi(%ld): fcport-%d - port retry count: "
-			    "%d remaining\n",
-			    vha->host_no,
-			    t, atomic_read(&fcport->port_down_timer)));
-		}
-		t++;
-	} /* End of for fcport  */
-
 
 	/* Loop down handler. */
 	if (atomic_read(&vha->loop_down_timer) > 0 &&
@@ -4079,6 +4077,7 @@
 	.id_table	= qla2xxx_pci_tbl,
 	.probe		= qla2x00_probe_one,
 	.remove		= qla2x00_remove_one,
+	.shutdown	= qla2x00_shutdown,
 	.err_handler	= &qla2xxx_err_handler,
 };
 
diff --git a/drivers/scsi/qla4xxx/ql4_dbg.c b/drivers/scsi/qla4xxx/ql4_dbg.c
index cbceb0e..edcf048 100644
--- a/drivers/scsi/qla4xxx/ql4_dbg.c
+++ b/drivers/scsi/qla4xxx/ql4_dbg.c
@@ -30,3 +30,104 @@
 	printk(KERN_INFO "\n");
 }
 
+void qla4xxx_dump_registers(struct scsi_qla_host *ha)
+{
+	uint8_t i;
+
+	if (is_qla8022(ha)) {
+		for (i = 1; i < MBOX_REG_COUNT; i++)
+			printk(KERN_INFO "mailbox[%d]     = 0x%08X\n",
+			    i, readl(&ha->qla4_8xxx_reg->mailbox_in[i]));
+		return;
+	}
+
+	for (i = 0; i < MBOX_REG_COUNT; i++) {
+		printk(KERN_INFO "0x%02X mailbox[%d]      = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, mailbox[i]), i,
+		    readw(&ha->reg->mailbox[i]));
+	}
+
+	printk(KERN_INFO "0x%02X flash_address            = 0x%08X\n",
+	    (uint8_t) offsetof(struct isp_reg, flash_address),
+	    readw(&ha->reg->flash_address));
+	printk(KERN_INFO "0x%02X flash_data               = 0x%08X\n",
+	    (uint8_t) offsetof(struct isp_reg, flash_data),
+	    readw(&ha->reg->flash_data));
+	printk(KERN_INFO "0x%02X ctrl_status              = 0x%08X\n",
+	    (uint8_t) offsetof(struct isp_reg, ctrl_status),
+	    readw(&ha->reg->ctrl_status));
+
+	if (is_qla4010(ha)) {
+		printk(KERN_INFO "0x%02X nvram            = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u1.isp4010.nvram),
+		    readw(&ha->reg->u1.isp4010.nvram));
+	} else if (is_qla4022(ha) | is_qla4032(ha)) {
+		printk(KERN_INFO "0x%02X intr_mask        = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u1.isp4022.intr_mask),
+		    readw(&ha->reg->u1.isp4022.intr_mask));
+		printk(KERN_INFO "0x%02X nvram            = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u1.isp4022.nvram),
+		    readw(&ha->reg->u1.isp4022.nvram));
+		printk(KERN_INFO "0x%02X semaphore	  = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u1.isp4022.semaphore),
+		    readw(&ha->reg->u1.isp4022.semaphore));
+	}
+	printk(KERN_INFO "0x%02X req_q_in                 = 0x%08X\n",
+	    (uint8_t) offsetof(struct isp_reg, req_q_in),
+	    readw(&ha->reg->req_q_in));
+	printk(KERN_INFO "0x%02X rsp_q_out                = 0x%08X\n",
+	    (uint8_t) offsetof(struct isp_reg, rsp_q_out),
+	    readw(&ha->reg->rsp_q_out));
+
+	if (is_qla4010(ha)) {
+		printk(KERN_INFO "0x%02X ext_hw_conf      = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4010.ext_hw_conf),
+		    readw(&ha->reg->u2.isp4010.ext_hw_conf));
+		printk(KERN_INFO "0x%02X port_ctrl        = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_ctrl),
+		    readw(&ha->reg->u2.isp4010.port_ctrl));
+		printk(KERN_INFO "0x%02X port_status      = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4010.port_status),
+		    readw(&ha->reg->u2.isp4010.port_status));
+		printk(KERN_INFO "0x%02X req_q_out        = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4010.req_q_out),
+		    readw(&ha->reg->u2.isp4010.req_q_out));
+		printk(KERN_INFO "0x%02X gp_out           = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_out),
+		    readw(&ha->reg->u2.isp4010.gp_out));
+		printk(KERN_INFO "0x%02X gp_in	          = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4010.gp_in),
+		    readw(&ha->reg->u2.isp4010.gp_in));
+		printk(KERN_INFO "0x%02X port_err_status  = 0x%08X\n", (uint8_t)
+		    offsetof(struct isp_reg, u2.isp4010.port_err_status),
+		    readw(&ha->reg->u2.isp4010.port_err_status));
+	} else if (is_qla4022(ha) | is_qla4032(ha)) {
+		printk(KERN_INFO "Page 0 Registers:\n");
+		printk(KERN_INFO "0x%02X ext_hw_conf      = 0x%08X\n", (uint8_t)
+		    offsetof(struct isp_reg, u2.isp4022.p0.ext_hw_conf),
+		    readw(&ha->reg->u2.isp4022.p0.ext_hw_conf));
+		printk(KERN_INFO "0x%02X port_ctrl        = 0x%08X\n", (uint8_t)
+		    offsetof(struct isp_reg, u2.isp4022.p0.port_ctrl),
+		    readw(&ha->reg->u2.isp4022.p0.port_ctrl));
+		printk(KERN_INFO "0x%02X port_status      = 0x%08X\n", (uint8_t)
+		    offsetof(struct isp_reg, u2.isp4022.p0.port_status),
+		    readw(&ha->reg->u2.isp4022.p0.port_status));
+		printk(KERN_INFO "0x%02X gp_out           = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_out),
+		    readw(&ha->reg->u2.isp4022.p0.gp_out));
+		printk(KERN_INFO "0x%02X gp_in            = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4022.p0.gp_in),
+		    readw(&ha->reg->u2.isp4022.p0.gp_in));
+		printk(KERN_INFO "0x%02X port_err_status  = 0x%08X\n", (uint8_t)
+		    offsetof(struct isp_reg, u2.isp4022.p0.port_err_status),
+		    readw(&ha->reg->u2.isp4022.p0.port_err_status));
+		printk(KERN_INFO "Page 1 Registers:\n");
+		writel(HOST_MEM_CFG_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
+		    &ha->reg->ctrl_status);
+		printk(KERN_INFO "0x%02X req_q_out        = 0x%08X\n",
+		    (uint8_t) offsetof(struct isp_reg, u2.isp4022.p1.req_q_out),
+		    readw(&ha->reg->u2.isp4022.p1.req_q_out));
+		writel(PORT_CTRL_STAT_PAGE & set_rmask(CSR_SCSI_PAGE_SELECT),
+		    &ha->reg->ctrl_status);
+	}
+}
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 9dc0a66..0f3bfc3 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -24,6 +24,7 @@
 #include <linux/delay.h>
 #include <linux/interrupt.h>
 #include <linux/mutex.h>
+#include <linux/aer.h>
 
 #include <net/tcp.h>
 #include <scsi/scsi.h>
@@ -36,24 +37,6 @@
 #include "ql4_dbg.h"
 #include "ql4_nx.h"
 
-#if defined(CONFIG_PCIEAER)
-#include <linux/aer.h>
-#else
-/* AER releated */
-static inline int pci_enable_pcie_error_reporting(struct pci_dev *dev)
-{
-	return -EINVAL;
-}
-static inline int pci_disable_pcie_error_reporting(struct pci_dev *dev)
-{
-	return -EINVAL;
-}
-static inline int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
-{
-	return -EINVAL;
-}
-#endif
-
 #ifndef PCI_DEVICE_ID_QLOGIC_ISP4010
 #define PCI_DEVICE_ID_QLOGIC_ISP4010	0x4010
 #endif
@@ -179,6 +162,7 @@
 #define IOCB_TOV_MARGIN			10
 #define RELOGIN_TOV			18
 #define ISNS_DEREG_TOV			5
+#define HBA_ONLINE_TOV			30
 
 #define MAX_RESET_HA_RETRIES		2
 
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 0336c6d..5e757d7 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -416,6 +416,8 @@
 #define MBOX_ASTS_IPV6_ND_PREFIX_IGNORED	0x802C
 #define MBOX_ASTS_IPV6_LCL_PREFIX_IGNORED	0x802D
 #define MBOX_ASTS_ICMPV6_ERROR_MSG_RCVD		0x802E
+#define MBOX_ASTS_TXSCVR_INSERTED		0x8130
+#define MBOX_ASTS_TXSCVR_REMOVED		0x8131
 
 #define ISNS_EVENT_DATA_RECEIVED		0x0000
 #define ISNS_EVENT_CONNECTION_OPENED		0x0001
@@ -446,6 +448,7 @@
 #define	 FWOPT_SESSION_MODE		  0x0040
 #define	 FWOPT_INITIATOR_MODE		  0x0020
 #define	 FWOPT_TARGET_MODE		  0x0010
+#define	 FWOPT_ENABLE_CRBDB		  0x8000
 
 	uint16_t exec_throttle;	/* 04-05 */
 	uint8_t zio_count;	/* 06 */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 95a26fb..6575a47 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -94,6 +94,7 @@
 void qla4xxx_wake_dpc(struct scsi_qla_host *ha);
 void qla4xxx_get_conn_event_log(struct scsi_qla_host *ha);
 void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha);
+void qla4xxx_dump_registers(struct scsi_qla_host *ha);
 
 void qla4_8xxx_pci_config(struct scsi_qla_host *);
 int qla4_8xxx_iospace_config(struct scsi_qla_host *ha);
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 4c9be77..dc01fa3 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -1207,8 +1207,8 @@
 			break;
 
 		DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot "
-			      "firmware to complete... ctrl_sts=0x%x\n",
-			      ha->host_no, __func__, ctrl_status));
+		    "firmware to complete... ctrl_sts=0x%x, remaining=%ld\n",
+		    ha->host_no, __func__, ctrl_status, max_wait_time));
 
 		msleep_interruptible(250);
 	} while (!time_after_eq(jiffies, max_wait_time));
@@ -1459,6 +1459,12 @@
 exit_init_online:
 	set_bit(AF_ONLINE, &ha->flags);
 exit_init_hba:
+	if (is_qla8022(ha) && (status == QLA_ERROR)) {
+		/* Since interrupts are registered in start_firmware for
+		 * 82xx, release them here if initialize_adapter fails */
+		qla4xxx_free_irqs(ha);
+	}
+
 	DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no,
 	    status == QLA_ERROR ? "FAILED" : "SUCCEDED"));
 	return status;
diff --git a/drivers/scsi/qla4xxx/ql4_iocb.c b/drivers/scsi/qla4xxx/ql4_iocb.c
index 4ef9ba1..5ae49fd 100644
--- a/drivers/scsi/qla4xxx/ql4_iocb.c
+++ b/drivers/scsi/qla4xxx/ql4_iocb.c
@@ -202,19 +202,11 @@
 void qla4_8xxx_queue_iocb(struct scsi_qla_host *ha)
 {
 	uint32_t dbval = 0;
-	unsigned long wtime;
 
 	dbval = 0x14 | (ha->func_num << 5);
 	dbval = dbval | (0 << 8) | (ha->request_in << 16);
-	writel(dbval, (unsigned long __iomem *)ha->nx_db_wr_ptr);
-	wmb();
 
-	wtime = jiffies + (2 * HZ);
-	while (readl((void __iomem *)ha->nx_db_rd_ptr) != dbval &&
-	    !time_after_eq(jiffies, wtime)) {
-		writel(dbval, (unsigned long __iomem *)ha->nx_db_wr_ptr);
-		wmb();
-	}
+	qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, ha->request_in);
 }
 
 /**
diff --git a/drivers/scsi/qla4xxx/ql4_isr.c b/drivers/scsi/qla4xxx/ql4_isr.c
index 2a1ab63..7c33fd5 100644
--- a/drivers/scsi/qla4xxx/ql4_isr.c
+++ b/drivers/scsi/qla4xxx/ql4_isr.c
@@ -72,7 +72,7 @@
 {
 	struct srb *srb = ha->status_srb;
 	struct scsi_cmnd *cmd;
-	uint8_t sense_len;
+	uint16_t sense_len;
 
 	if (srb == NULL)
 		return;
@@ -487,6 +487,8 @@
 		case MBOX_ASTS_SYSTEM_ERROR:
 			/* Log Mailbox registers */
 			ql4_printk(KERN_INFO, ha, "%s: System Err\n", __func__);
+			qla4xxx_dump_registers(ha);
+
 			if (ql4xdontresethba) {
 				DEBUG2(printk("scsi%ld: %s:Don't Reset HBA\n",
 				    ha->host_no, __func__));
@@ -621,6 +623,18 @@
 			}
 			break;
 
+		case MBOX_ASTS_TXSCVR_INSERTED:
+			DEBUG2(printk(KERN_WARNING
+			    "scsi%ld: AEN %04x Transceiver"
+			    " inserted\n",  ha->host_no, mbox_sts[0]));
+			break;
+
+		case MBOX_ASTS_TXSCVR_REMOVED:
+			DEBUG2(printk(KERN_WARNING
+			    "scsi%ld: AEN %04x Transceiver"
+			    " removed\n",  ha->host_no, mbox_sts[0]));
+			break;
+
 		default:
 			DEBUG2(printk(KERN_WARNING
 				      "scsi%ld: AEN %04x UNKNOWN\n",
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 9002170..2d2f9c8 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -299,6 +299,10 @@
 {
 	memset(mbox_cmd, 0, sizeof(mbox_cmd[0]) * MBOX_REG_COUNT);
 	memset(mbox_sts, 0, sizeof(mbox_sts[0]) * MBOX_REG_COUNT);
+
+	if (is_qla8022(ha))
+		qla4_8xxx_wr_32(ha, ha->nx_db_wr_ptr, 0);
+
 	mbox_cmd[0] = MBOX_CMD_INITIALIZE_FIRMWARE;
 	mbox_cmd[1] = 0;
 	mbox_cmd[2] = LSDW(init_fw_cb_dma);
@@ -472,6 +476,11 @@
 	init_fw_cb->fw_options |=
 		__constant_cpu_to_le16(FWOPT_SESSION_MODE |
 				       FWOPT_INITIATOR_MODE);
+
+	if (is_qla8022(ha))
+		init_fw_cb->fw_options |=
+		    __constant_cpu_to_le16(FWOPT_ENABLE_CRBDB);
+
 	init_fw_cb->fw_options &= __constant_cpu_to_le16(~FWOPT_TARGET_MODE);
 
 	if (qla4xxx_set_ifcb(ha, &mbox_cmd[0], &mbox_sts[0], init_fw_cb_dma)
@@ -592,7 +601,7 @@
 	}
 
 	ql4_printk(KERN_INFO, ha, "%ld firmare IOCBs available (%d).\n",
-	    ha->host_no, mbox_cmd[2]);
+	    ha->host_no, mbox_sts[2]);
 
 	return QLA_SUCCESS;
 }
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index 449256f..474b10d 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -839,8 +839,11 @@
 		done = qla4_8xxx_rd_32(ha, QLA82XX_PCIE_REG(PCIE_SEM2_LOCK));
 		if (done == 1)
 			break;
-		if (timeout >= qla4_8xxx_rom_lock_timeout)
+		if (timeout >= qla4_8xxx_rom_lock_timeout) {
+			ql4_printk(KERN_WARNING, ha,
+			    "%s: Failed to acquire rom lock", __func__);
 			return -1;
+		}
 
 		timeout++;
 
@@ -1078,21 +1081,6 @@
 	return 0;
 }
 
-static int qla4_8xxx_check_for_bad_spd(struct scsi_qla_host *ha)
-{
-	u32 val = 0;
-	val = qla4_8xxx_rd_32(ha, BOOT_LOADER_DIMM_STATUS) ;
-	val &= QLA82XX_BOOT_LOADER_MN_ISSUE;
-	if (val & QLA82XX_PEG_TUNE_MN_SPD_ZEROED) {
-		printk("Memory DIMM SPD not programmed.  Assumed valid.\n");
-		return 1;
-	} else if (val) {
-		printk("Memory DIMM type incorrect.  Info:%08X.\n", val);
-		return 2;
-	}
-	return 0;
-}
-
 static int
 qla4_8xxx_load_from_flash(struct scsi_qla_host *ha, uint32_t image_start)
 {
@@ -1377,8 +1365,6 @@
 
 		} while (--retries);
 
-		qla4_8xxx_check_for_bad_spd(ha);
-
 		if (!retries) {
 			pegtune_val = qla4_8xxx_rd_32(ha,
 				QLA82XX_ROMUSB_GLB_PEGTUNE_DONE);
@@ -1540,14 +1526,31 @@
 	ql4_printk(KERN_INFO, ha,
 	    "FW: Attempting to load firmware from flash...\n");
 	rval = qla4_8xxx_start_firmware(ha, ha->hw.flt_region_fw);
-	if (rval == QLA_SUCCESS)
-		return rval;
 
-	ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash FAILED...\n");
+	if (rval != QLA_SUCCESS) {
+		ql4_printk(KERN_ERR, ha, "FW: Load firmware from flash"
+		    " FAILED...\n");
+		return rval;
+	}
 
 	return rval;
 }
 
+static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
+{
+	if (qla4_8xxx_rom_lock(ha)) {
+		/* Someone else is holding the lock. */
+		dev_info(&ha->pdev->dev, "Resetting rom_lock\n");
+	}
+
+	/*
+	 * Either we got the lock, or someone
+	 * else died while holding it.
+	 * In either case, unlock.
+	 */
+	qla4_8xxx_rom_unlock(ha);
+}
+
 /**
  * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
  * @ha: pointer to adapter structure
@@ -1557,11 +1560,12 @@
 static int
 qla4_8xxx_device_bootstrap(struct scsi_qla_host *ha)
 {
-	int rval, i, timeout;
+	int rval = QLA_ERROR;
+	int i, timeout;
 	uint32_t old_count, count;
+	int need_reset = 0, peg_stuck = 1;
 
-	if (qla4_8xxx_need_reset(ha))
-		goto dev_initialize;
+	need_reset = qla4_8xxx_need_reset(ha);
 
 	old_count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
 
@@ -1570,12 +1574,30 @@
 		if (timeout) {
 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
 			   QLA82XX_DEV_FAILED);
-			return QLA_ERROR;
+			return rval;
 		}
 
 		count = qla4_8xxx_rd_32(ha, QLA82XX_PEG_ALIVE_COUNTER);
 		if (count != old_count)
+			peg_stuck = 0;
+	}
+
+	if (need_reset) {
+		/* We are trying to perform a recovery here. */
+		if (peg_stuck)
+			qla4_8xxx_rom_lock_recovery(ha);
+		goto dev_initialize;
+	} else  {
+		/* Start of day for this ha context. */
+		if (peg_stuck) {
+			/* Either we are the first or recovery in progress. */
+			qla4_8xxx_rom_lock_recovery(ha);
+			goto dev_initialize;
+		} else {
+			/* Firmware already running. */
+			rval = QLA_SUCCESS;
 			goto dev_ready;
+		}
 	}
 
 dev_initialize:
@@ -1601,7 +1623,7 @@
 	ql4_printk(KERN_INFO, ha, "HW State: READY\n");
 	qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_READY);
 
-	return QLA_SUCCESS;
+	return rval;
 }
 
 /**
@@ -1764,20 +1786,9 @@
 	int retval;
 	retval = qla4_8xxx_device_state_handler(ha);
 
-	if (retval == QLA_SUCCESS &&
-	    !test_bit(AF_INIT_DONE, &ha->flags)) {
+	if (retval == QLA_SUCCESS && !test_bit(AF_INIT_DONE, &ha->flags))
 		retval = qla4xxx_request_irqs(ha);
-		if (retval != QLA_SUCCESS) {
-			ql4_printk(KERN_WARNING, ha,
-			    "Failed to reserve interrupt %d already in use.\n",
-			    ha->pdev->irq);
-		} else {
-			set_bit(AF_IRQ_ATTACHED, &ha->flags);
-			ha->host->irq = ha->pdev->irq;
-			ql4_printk(KERN_INFO, ha, "%s: irq %d attached\n",
-			    __func__, ha->pdev->irq);
-		}
-	}
+
 	return retval;
 }
 
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index 931ad3f..ff689bf 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -24,7 +24,6 @@
 
 #define CRB_CMDPEG_STATE		QLA82XX_REG(0x50)
 #define CRB_RCVPEG_STATE		QLA82XX_REG(0x13c)
-#define BOOT_LOADER_DIMM_STATUS		QLA82XX_REG(0x54)
 #define CRB_DMA_SHIFT			QLA82XX_REG(0xcc)
 
 #define QLA82XX_HW_H0_CH_HUB_ADR	0x05
@@ -529,12 +528,12 @@
 # define QLA82XX_CAM_RAM_BASE	(QLA82XX_CRB_CAM + 0x02000)
 # define QLA82XX_CAM_RAM(reg)	(QLA82XX_CAM_RAM_BASE + (reg))
 
-#define QLA82XX_PEG_TUNE_MN_SPD_ZEROED	0x80000000
-#define QLA82XX_BOOT_LOADER_MN_ISSUE	0xff00ffff
 #define QLA82XX_PORT_MODE_ADDR		(QLA82XX_CAM_RAM(0x24))
 #define QLA82XX_PEG_HALT_STATUS1	(QLA82XX_CAM_RAM(0xa8))
 #define QLA82XX_PEG_HALT_STATUS2	(QLA82XX_CAM_RAM(0xac))
 #define QLA82XX_PEG_ALIVE_COUNTER	(QLA82XX_CAM_RAM(0xb0))
+#define QLA82XX_CAM_RAM_DB1		(QLA82XX_CAM_RAM(0x1b0))
+#define QLA82XX_CAM_RAM_DB2		(QLA82XX_CAM_RAM(0x1b4))
 
 #define HALT_STATUS_UNRECOVERABLE	0x80000000
 #define HALT_STATUS_RECOVERABLE		0x40000000
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index 370d40f..f4cd846 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -167,8 +167,6 @@
 			      "of (%d) secs exhausted, marking device DEAD.\n",
 			      ha->host_no, __func__, ddb_entry->fw_ddb_index,
 			      QL4_SESS_RECOVERY_TMO));
-
-		qla4xxx_wake_dpc(ha);
 	}
 }
 
@@ -573,10 +571,6 @@
 		if (ha->nx_pcibase)
 			iounmap(
 			    (struct device_reg_82xx __iomem *)ha->nx_pcibase);
-
-		if (ha->nx_db_wr_ptr)
-			iounmap(
-			    (struct device_reg_82xx __iomem *)ha->nx_db_wr_ptr);
 	} else if (ha->reg)
 		iounmap(ha->reg);
 	pci_release_regions(ha->pdev);
@@ -692,7 +686,9 @@
 			qla4xxx_wake_dpc(ha);
 			qla4xxx_mailbox_premature_completion(ha);
 		}
-	}
+	} else
+		ha->seconds_since_last_heartbeat = 0;
+
 	ha->fw_heartbeat_counter = fw_heartbeat_counter;
 }
 
@@ -885,7 +881,13 @@
 		/* Find a command that hasn't completed. */
 		for (index = 0; index < ha->host->can_queue; index++) {
 			cmd = scsi_host_find_tag(ha->host, index);
-			if (cmd != NULL)
+			/*
+			 * We cannot just check if the index is valid,
+			 * becase if we are run from the scsi eh, then
+			 * the scsi/block layer is going to prevent
+			 * the tag from being released.
+			 */
+			if (cmd != NULL && CMD_SP(cmd))
 				break;
 		}
 		spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -937,11 +939,14 @@
 {
 	uint32_t max_wait_time;
 	unsigned long flags = 0;
-	int status = QLA_ERROR;
+	int status;
 	uint32_t ctrl_status;
 
-	qla4xxx_hw_reset(ha);
+	status = qla4xxx_hw_reset(ha);
+	if (status != QLA_SUCCESS)
+		return status;
 
+	status = QLA_ERROR;
 	/* Wait until the Network Reset Intr bit is cleared */
 	max_wait_time = RESET_INTR_TOV;
 	do {
@@ -1101,7 +1106,8 @@
 		    ha->host_no, __func__));
 		status = ha->isp_ops->reset_firmware(ha);
 		if (status == QLA_SUCCESS) {
-			qla4xxx_cmd_wait(ha);
+			if (!test_bit(AF_FW_RECOVERY, &ha->flags))
+				qla4xxx_cmd_wait(ha);
 			ha->isp_ops->disable_intrs(ha);
 			qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
 			qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
@@ -1118,7 +1124,8 @@
 	 * or if stop_firmware fails for ISP-82xx.
 	 * This is the default case for ISP-4xxx */
 	if (!is_qla8022(ha) || reset_chip) {
-		qla4xxx_cmd_wait(ha);
+		if (!test_bit(AF_FW_RECOVERY, &ha->flags))
+			qla4xxx_cmd_wait(ha);
 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
 		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
 		DEBUG2(ql4_printk(KERN_INFO, ha,
@@ -1471,24 +1478,10 @@
 	db_base = pci_resource_start(pdev, 4);  /* doorbell is on bar 4 */
 	db_len = pci_resource_len(pdev, 4);
 
-	/* mapping of doorbell write pointer */
-	ha->nx_db_wr_ptr = (unsigned long)ioremap(db_base +
-	    (ha->pdev->devfn << 12), 4);
-	if (!ha->nx_db_wr_ptr) {
-		printk(KERN_ERR
-		    "cannot remap MMIO doorbell-write (%s), aborting\n",
-		    pci_name(pdev));
-		goto iospace_error_exit;
-	}
-	/* mapping of doorbell read pointer */
-	ha->nx_db_rd_ptr = (uint8_t *) ha->nx_pcibase + (512 * 1024) +
-	    (ha->pdev->devfn * 8);
-	if (!ha->nx_db_rd_ptr)
-		printk(KERN_ERR
-		    "cannot remap MMIO doorbell-read (%s), aborting\n",
-		    pci_name(pdev));
-	return 0;
+	ha->nx_db_wr_ptr = (ha->pdev->devfn == 4 ? QLA82XX_CAM_RAM_DB1 :
+	    QLA82XX_CAM_RAM_DB2);
 
+	return 0;
 iospace_error_exit:
 	return -ENOMEM;
 }
@@ -1960,13 +1953,11 @@
 {
 	unsigned long wait_online;
 
-	wait_online = jiffies + (30 * HZ);
+	wait_online = jiffies + (HBA_ONLINE_TOV * HZ);
 	while (time_before(jiffies, wait_online)) {
 
 		if (adapter_up(ha))
 			return QLA_SUCCESS;
-		else if (ha->retry_reset_ha_cnt == 0)
-			return QLA_ERROR;
 
 		msleep(2000);
 	}
@@ -2021,6 +2012,7 @@
 	unsigned int id = cmd->device->id;
 	unsigned int lun = cmd->device->lun;
 	unsigned long serial = cmd->serial_number;
+	unsigned long flags;
 	struct srb *srb = NULL;
 	int ret = SUCCESS;
 	int wait = 0;
@@ -2029,12 +2021,14 @@
 	    "scsi%ld:%d:%d: Abort command issued cmd=%p, pid=%ld\n",
 	    ha->host_no, id, lun, cmd, serial);
 
+	spin_lock_irqsave(&ha->hardware_lock, flags);
 	srb = (struct srb *) CMD_SP(cmd);
-
-	if (!srb)
+	if (!srb) {
+		spin_unlock_irqrestore(&ha->hardware_lock, flags);
 		return SUCCESS;
-
+	}
 	kref_get(&srb->srb_ref);
+	spin_unlock_irqrestore(&ha->hardware_lock, flags);
 
 	if (qla4xxx_abort_task(ha, srb) != QLA_SUCCESS) {
 		DEBUG3(printk("scsi%ld:%d:%d: Abort_task mbx failed.\n",
@@ -2267,6 +2261,8 @@
 		qla4xxx_mailbox_premature_completion(ha);
 		qla4xxx_free_irqs(ha);
 		pci_disable_device(pdev);
+		/* Return back all IOs */
+		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
 		return PCI_ERS_RESULT_NEED_RESET;
 	case pci_channel_io_perm_failure:
 		set_bit(AF_EEH_BUSY, &ha->flags);
@@ -2290,17 +2286,13 @@
 	if (!is_aer_supported(ha))
 		return PCI_ERS_RESULT_NONE;
 
-	if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
-		ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: firmware hang  -- "
-		    "mmio_enabled\n", ha->host_no, __func__);
-		return PCI_ERS_RESULT_NEED_RESET;
-	} else
-		return PCI_ERS_RESULT_RECOVERED;
+	return PCI_ERS_RESULT_RECOVERED;
 }
 
-uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
+static uint32_t qla4_8xxx_error_recovery(struct scsi_qla_host *ha)
 {
 	uint32_t rval = QLA_ERROR;
+	uint32_t ret = 0;
 	int fn;
 	struct pci_dev *other_pdev = NULL;
 
@@ -2312,7 +2304,6 @@
 		clear_bit(AF_ONLINE, &ha->flags);
 		qla4xxx_mark_all_devices_missing(ha);
 		qla4xxx_process_aen(ha, FLUSH_DDB_CHANGED_AENS);
-		qla4xxx_abort_active_cmds(ha, DID_RESET << 16);
 	}
 
 	fn = PCI_FUNC(ha->pdev->devfn);
@@ -2375,7 +2366,16 @@
 			/* Clear driver state register */
 			qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, 0);
 			qla4_8xxx_set_drv_active(ha);
-			ha->isp_ops->enable_intrs(ha);
+			ret = qla4xxx_request_irqs(ha);
+			if (ret) {
+				ql4_printk(KERN_WARNING, ha, "Failed to "
+				    "reserve interrupt %d already in use.\n",
+				    ha->pdev->irq);
+				rval = QLA_ERROR;
+			} else {
+				ha->isp_ops->enable_intrs(ha);
+				rval = QLA_SUCCESS;
+			}
 		}
 		qla4_8xxx_idc_unlock(ha);
 	} else {
@@ -2387,8 +2387,18 @@
 			clear_bit(AF_FW_RECOVERY, &ha->flags);
 			rval = qla4xxx_initialize_adapter(ha,
 			    PRESERVE_DDB_LIST);
-			if (rval == QLA_SUCCESS)
-				ha->isp_ops->enable_intrs(ha);
+			if (rval == QLA_SUCCESS) {
+				ret = qla4xxx_request_irqs(ha);
+				if (ret) {
+					ql4_printk(KERN_WARNING, ha, "Failed to"
+					    " reserve interrupt %d already in"
+					    " use.\n", ha->pdev->irq);
+					rval = QLA_ERROR;
+				} else {
+					ha->isp_ops->enable_intrs(ha);
+					rval = QLA_SUCCESS;
+				}
+			}
 			qla4_8xxx_idc_lock(ha);
 			qla4_8xxx_set_drv_active(ha);
 			qla4_8xxx_idc_unlock(ha);
@@ -2430,12 +2440,7 @@
 		goto exit_slot_reset;
 	}
 
-	ret = qla4xxx_request_irqs(ha);
-	if (ret) {
-		ql4_printk(KERN_WARNING, ha, "Failed to reserve interrupt %d"
-		    " already in use.\n", pdev->irq);
-		goto exit_slot_reset;
-	}
+	ha->isp_ops->disable_intrs(ha);
 
 	if (is_qla8022(ha)) {
 		if (qla4_8xxx_error_recovery(ha) == QLA_SUCCESS) {
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index a77b973..9bfacf4 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
  * See LICENSE.qla4xxx for copyright and licensing details.
  */
 
-#define QLA4XXX_DRIVER_VERSION	"5.02.00-k3"
+#define QLA4XXX_DRIVER_VERSION	"5.02.00-k4"
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 8041fe1..eafeeda 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -2438,7 +2438,8 @@
 		sdev->sdev_state = SDEV_RUNNING;
 	else if (sdev->sdev_state == SDEV_CREATED_BLOCK)
 		sdev->sdev_state = SDEV_CREATED;
-	else
+	else if (sdev->sdev_state != SDEV_CANCEL &&
+		 sdev->sdev_state != SDEV_OFFLINE)
 		return -EINVAL;
 
 	spin_lock_irqsave(q->queue_lock, flags);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 20ad59d..76ee2e7 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -964,10 +964,11 @@
 	list_for_each_entry(sdev, &shost->__devices, siblings) {
 		if (sdev->channel != starget->channel ||
 		    sdev->id != starget->id ||
-		    sdev->sdev_state == SDEV_DEL)
+		    scsi_device_get(sdev))
 			continue;
 		spin_unlock_irqrestore(shost->host_lock, flags);
 		scsi_remove_device(sdev);
+		scsi_device_put(sdev);
 		spin_lock_irqsave(shost->host_lock, flags);
 		goto restart;
 	}
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 57d1e3e..b9ab3a5 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -259,6 +259,28 @@
 }
 
 static ssize_t
+sd_show_protection_mode(struct device *dev, struct device_attribute *attr,
+			char *buf)
+{
+	struct scsi_disk *sdkp = to_scsi_disk(dev);
+	struct scsi_device *sdp = sdkp->device;
+	unsigned int dif, dix;
+
+	dif = scsi_host_dif_capable(sdp->host, sdkp->protection_type);
+	dix = scsi_host_dix_capable(sdp->host, sdkp->protection_type);
+
+	if (!dix && scsi_host_dix_capable(sdp->host, SD_DIF_TYPE0_PROTECTION)) {
+		dif = 0;
+		dix = 1;
+	}
+
+	if (!dif && !dix)
+		return snprintf(buf, 20, "none\n");
+
+	return snprintf(buf, 20, "%s%u\n", dix ? "dix" : "dif", dif);
+}
+
+static ssize_t
 sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
 		    char *buf)
 {
@@ -285,6 +307,7 @@
 	__ATTR(manage_start_stop, S_IRUGO|S_IWUSR, sd_show_manage_start_stop,
 	       sd_store_manage_start_stop),
 	__ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
+	__ATTR(protection_mode, S_IRUGO, sd_show_protection_mode, NULL),
 	__ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
 	__ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
 	__ATTR_NULL,
diff --git a/drivers/scsi/sr_ioctl.c b/drivers/scsi/sr_ioctl.c
index cbb38c5..3cd8ffb 100644
--- a/drivers/scsi/sr_ioctl.c
+++ b/drivers/scsi/sr_ioctl.c
@@ -325,6 +325,15 @@
 	}
 
 	/*
+	 * SK/ASC/ASCQ of 2/4/2 means "initialization required"
+	 * Using CD_TRAY_OPEN results in an START_STOP_UNIT to close
+	 * the tray, which resolves the initialization requirement.
+	 */
+	if (scsi_sense_valid(&sshdr) && sshdr.sense_key == NOT_READY
+			&& sshdr.asc == 0x04 && sshdr.ascq == 0x02)
+		return CDS_TRAY_OPEN;
+
+	/*
 	 * 0x04 is format in progress .. but there must be a disc present!
 	 */
 	if (sshdr.sense_key == NOT_READY && sshdr.asc == 0x04)
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index c856905..fa62578 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -1411,11 +1411,12 @@
 		       CONFIG_ETRAX_RS485_LTC1387_RXEN_PORT_G_BIT, 1);
 #endif
 
-	info->rs485.flags = r->flags;
-	if (r->delay_rts_before_send >= 1000)
+	info->rs485 = *r;
+
+	/* Maximum delay before RTS equal to 1000 */
+	if (info->rs485.delay_rts_before_send >= 1000)
 		info->rs485.delay_rts_before_send = 1000;
-	else
-		info->rs485.delay_rts_before_send = r->delay_rts_before_send;
+
 /*	printk("rts: on send = %i, after = %i, enabled = %i",
 		    info->rs485.rts_on_send,
 		    info->rs485.rts_after_sent,
@@ -3234,9 +3235,9 @@
 		e100_disable_rx(info);
 		e100_enable_rx_irq(info);
 #endif
-
-		if (info->rs485.delay_rts_before_send > 0)
-			msleep(info->rs485.delay_rts_before_send);
+		if ((info->rs485.flags & SER_RS485_RTS_BEFORE_SEND) &&
+			(info->rs485.delay_rts_before_send > 0))
+				msleep(info->rs485.delay_rts_before_send);
 	}
 #endif /* CONFIG_ETRAX_RS485 */
 
@@ -3694,6 +3695,11 @@
 
 		rs485data.delay_rts_before_send = rs485ctrl.delay_rts_before_send;
 		rs485data.flags = 0;
+		if (rs485data.delay_rts_before_send != 0)
+			rs485data.flags |= SER_RS485_RTS_BEFORE_SEND;
+		else
+			rs485data.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
+
 		if (rs485ctrl.enabled)
 			rs485data.flags |= SER_RS485_ENABLED;
 		else
@@ -3731,7 +3737,7 @@
 		/* This is the ioctl to get RS485 data from user-space */
 		if (copy_to_user((struct serial_rs485 *) arg,
 					rs485data,
-					sizeof(serial_rs485)))
+					sizeof(struct serial_rs485)))
 			return -EFAULT;
 		break;
 	}
@@ -4527,6 +4533,7 @@
 		/* Set sane defaults */
 		info->rs485.flags &= ~(SER_RS485_RTS_ON_SEND);
 		info->rs485.flags |= SER_RS485_RTS_AFTER_SEND;
+		info->rs485.flags &= ~(SER_RS485_RTS_BEFORE_SEND);
 		info->rs485.delay_rts_before_send = 0;
 		info->rs485.flags &= ~(SER_RS485_ENABLED);
 #endif
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index b5a78a1..709c836 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -29,11 +29,6 @@
 #include <linux/spi/spi.h>
 #include <linux/of_spi.h>
 
-
-/* SPI bustype and spi_master class are registered after board init code
- * provides the SPI device tables, ensuring that both are present by the
- * time controller driver registration causes spi_devices to "enumerate".
- */
 static void spidev_release(struct device *dev)
 {
 	struct spi_device	*spi = to_spi_device(dev);
@@ -202,11 +197,16 @@
 
 struct boardinfo {
 	struct list_head	list;
-	unsigned		n_board_info;
-	struct spi_board_info	board_info[0];
+	struct spi_board_info	board_info;
 };
 
 static LIST_HEAD(board_list);
+static LIST_HEAD(spi_master_list);
+
+/*
+ * Used to protect add/del opertion for board_info list and
+ * spi_master list, and their matching process
+ */
 static DEFINE_MUTEX(board_lock);
 
 /**
@@ -300,16 +300,16 @@
 	 */
 	status = spi_setup(spi);
 	if (status < 0) {
-		dev_err(dev, "can't %s %s, status %d\n",
-				"setup", dev_name(&spi->dev), status);
+		dev_err(dev, "can't setup %s, status %d\n",
+				dev_name(&spi->dev), status);
 		goto done;
 	}
 
 	/* Device may be bound to an active driver when this returns */
 	status = device_add(&spi->dev);
 	if (status < 0)
-		dev_err(dev, "can't %s %s, status %d\n",
-				"add", dev_name(&spi->dev), status);
+		dev_err(dev, "can't add %s, status %d\n",
+				dev_name(&spi->dev), status);
 	else
 		dev_dbg(dev, "registered child %s\n", dev_name(&spi->dev));
 
@@ -371,6 +371,20 @@
 }
 EXPORT_SYMBOL_GPL(spi_new_device);
 
+static void spi_match_master_to_boardinfo(struct spi_master *master,
+				struct spi_board_info *bi)
+{
+	struct spi_device *dev;
+
+	if (master->bus_num != bi->bus_num)
+		return;
+
+	dev = spi_new_device(master, bi);
+	if (!dev)
+		dev_err(master->dev.parent, "can't create new device for %s\n",
+			bi->modalias);
+}
+
 /**
  * spi_register_board_info - register SPI devices for a given board
  * @info: array of chip descriptors
@@ -393,43 +407,25 @@
 int __init
 spi_register_board_info(struct spi_board_info const *info, unsigned n)
 {
-	struct boardinfo	*bi;
+	struct boardinfo *bi;
+	int i;
 
-	bi = kmalloc(sizeof(*bi) + n * sizeof *info, GFP_KERNEL);
+	bi = kzalloc(n * sizeof(*bi), GFP_KERNEL);
 	if (!bi)
 		return -ENOMEM;
-	bi->n_board_info = n;
-	memcpy(bi->board_info, info, n * sizeof *info);
 
-	mutex_lock(&board_lock);
-	list_add_tail(&bi->list, &board_list);
-	mutex_unlock(&board_lock);
-	return 0;
-}
+	for (i = 0; i < n; i++, bi++, info++) {
+		struct spi_master *master;
 
-/* FIXME someone should add support for a __setup("spi", ...) that
- * creates board info from kernel command lines
- */
-
-static void scan_boardinfo(struct spi_master *master)
-{
-	struct boardinfo	*bi;
-
-	mutex_lock(&board_lock);
-	list_for_each_entry(bi, &board_list, list) {
-		struct spi_board_info	*chip = bi->board_info;
-		unsigned		n;
-
-		for (n = bi->n_board_info; n > 0; n--, chip++) {
-			if (chip->bus_num != master->bus_num)
-				continue;
-			/* NOTE: this relies on spi_new_device to
-			 * issue diagnostics when given bogus inputs
-			 */
-			(void) spi_new_device(master, chip);
-		}
+		memcpy(&bi->board_info, info, sizeof(*info));
+		mutex_lock(&board_lock);
+		list_add_tail(&bi->list, &board_list);
+		list_for_each_entry(master, &spi_master_list, list)
+			spi_match_master_to_boardinfo(master, &bi->board_info);
+		mutex_unlock(&board_lock);
 	}
-	mutex_unlock(&board_lock);
+
+	return 0;
 }
 
 /*-------------------------------------------------------------------------*/
@@ -512,6 +508,7 @@
 {
 	static atomic_t		dyn_bus_id = ATOMIC_INIT((1<<15) - 1);
 	struct device		*dev = master->dev.parent;
+	struct boardinfo	*bi;
 	int			status = -ENODEV;
 	int			dynamic = 0;
 
@@ -547,8 +544,12 @@
 	dev_dbg(dev, "registered master %s%s\n", dev_name(&master->dev),
 			dynamic ? " (dynamic)" : "");
 
-	/* populate children from any spi device tables */
-	scan_boardinfo(master);
+	mutex_lock(&board_lock);
+	list_add_tail(&master->list, &spi_master_list);
+	list_for_each_entry(bi, &board_list, list)
+		spi_match_master_to_boardinfo(master, &bi->board_info);
+	mutex_unlock(&board_lock);
+
 	status = 0;
 
 	/* Register devices from the device tree */
@@ -579,7 +580,12 @@
 {
 	int dummy;
 
-	dummy = device_for_each_child(&master->dev, NULL, __unregister);
+	mutex_lock(&board_lock);
+	list_del(&master->list);
+	mutex_unlock(&board_lock);
+
+	dummy = device_for_each_child(master->dev.parent, &master->dev,
+					__unregister);
 	device_unregister(&master->dev);
 }
 EXPORT_SYMBOL_GPL(spi_unregister_master);
@@ -652,7 +658,7 @@
 	 */
 	bad_bits = spi->mode & ~spi->master->mode_bits;
 	if (bad_bits) {
-		dev_dbg(&spi->dev, "setup: unsupported mode bits %x\n",
+		dev_err(&spi->dev, "setup: unsupported mode bits %x\n",
 			bad_bits);
 		return -EINVAL;
 	}
diff --git a/drivers/spi/spi_bfin5xx.c b/drivers/spi/spi_bfin5xx.c
index ab483a0..3f22351 100644
--- a/drivers/spi/spi_bfin5xx.c
+++ b/drivers/spi/spi_bfin5xx.c
@@ -504,6 +504,15 @@
 		"in dma_irq_handler dmastat:0x%x spistat:0x%x\n",
 		dmastat, spistat);
 
+	if (drv_data->rx != NULL) {
+		u16 cr = read_CTRL(drv_data);
+		/* discard old RX data and clear RXS */
+		bfin_spi_dummy_read(drv_data);
+		write_CTRL(drv_data, cr & ~BIT_CTL_ENABLE); /* Disable SPI */
+		write_CTRL(drv_data, cr & ~BIT_CTL_TIMOD); /* Restore State */
+		write_STAT(drv_data, BIT_STAT_CLR); /* Clear Status */
+	}
+
 	clear_dma_irqstat(drv_data->dma_channel);
 
 	/*
@@ -1099,12 +1108,15 @@
 	}
 
 	if (chip->chip_select_num >= MAX_CTRL_CS) {
-		ret = gpio_request(chip->cs_gpio, spi->modalias);
-		if (ret) {
-			dev_err(&spi->dev, "gpio_request() error\n");
-			goto pin_error;
+		/* Only request on first setup */
+		if (spi_get_ctldata(spi) == NULL) {
+			ret = gpio_request(chip->cs_gpio, spi->modalias);
+			if (ret) {
+				dev_err(&spi->dev, "gpio_request() error\n");
+				goto pin_error;
+			}
+			gpio_direction_output(chip->cs_gpio, 1);
 		}
-		gpio_direction_output(chip->cs_gpio, 1);
 	}
 
 	dev_dbg(&spi->dev, "setup spi chip %s, width is %d, dma is %d\n",
diff --git a/drivers/staging/ath6kl/os/linux/ar6000_raw_if.c b/drivers/staging/ath6kl/os/linux/ar6000_raw_if.c
index c196098..6b8eeea 100644
--- a/drivers/staging/ath6kl/os/linux/ar6000_raw_if.c
+++ b/drivers/staging/ath6kl/os/linux/ar6000_raw_if.c
@@ -198,8 +198,8 @@
         
     for (streamID = HTC_RAW_STREAM_0; streamID < HTC_RAW_STREAM_NUM_MAX; streamID++) {
         /* Initialize the data structures */
-        init_MUTEX(&arRaw->raw_htc_read_sem[streamID]);
-        init_MUTEX(&arRaw->raw_htc_write_sem[streamID]);
+	sema_init(&arRaw->raw_htc_read_sem[streamID], 1);
+	sema_init(&arRaw->raw_htc_write_sem[streamID], 1);
         init_waitqueue_head(&arRaw->raw_htc_read_queue[streamID]);
         init_waitqueue_head(&arRaw->raw_htc_write_queue[streamID]);
 
diff --git a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
index e535787..bbbe7c5 100644
--- a/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
+++ b/drivers/staging/brcm80211/brcmfmac/dhd_linux.c
@@ -1929,7 +1929,7 @@
 		goto fail;
 
 	net->netdev_ops = NULL;
-	init_MUTEX(&dhd->proto_sem);
+	sema_init(&dhd->proto_sem, 1);
 	/* Initialize other structure content */
 	init_waitqueue_head(&dhd->ioctl_resp_wait);
 	init_waitqueue_head(&dhd->ctrl_wait);
@@ -1977,7 +1977,7 @@
 	dhd->timer.function = dhd_watchdog;
 
 	/* Initialize thread based operation and lock */
-	init_MUTEX(&dhd->sdsem);
+	sema_init(&dhd->sdsem, 1);
 	if ((dhd_watchdog_prio >= 0) && (dhd_dpc_prio >= 0))
 		dhd->threads_only = true;
 	else
diff --git a/drivers/staging/brcm80211/sys/wl_mac80211.c b/drivers/staging/brcm80211/sys/wl_mac80211.c
index ad635ee..d060377 100644
--- a/drivers/staging/brcm80211/sys/wl_mac80211.c
+++ b/drivers/staging/brcm80211/sys/wl_mac80211.c
@@ -866,7 +866,7 @@
 	spin_lock_init(&wl->rpcq_lock);
 	spin_lock_init(&wl->txq_lock);
 
-	init_MUTEX(&wl->sem);
+	sema_init(&wl->sem, 1);
 #else
 	spin_lock_init(&wl->lock);
 	spin_lock_init(&wl->isr_lock);
diff --git a/drivers/staging/comedi/drivers/dt9812.c b/drivers/staging/comedi/drivers/dt9812.c
index 0560a74..0605985 100644
--- a/drivers/staging/comedi/drivers/dt9812.c
+++ b/drivers/staging/comedi/drivers/dt9812.c
@@ -262,7 +262,7 @@
 
 #define DT9812_NUM_SLOTS	16
 
-static DECLARE_MUTEX(dt9812_mutex);
+static DEFINE_SEMAPHORE(dt9812_mutex);
 
 static const struct usb_device_id dt9812_table[] = {
 	{USB_DEVICE(0x0867, 0x9812)},
diff --git a/drivers/staging/comedi/drivers/usbdux.c b/drivers/staging/comedi/drivers/usbdux.c
index 6131e2d..1f177a6 100644
--- a/drivers/staging/comedi/drivers/usbdux.c
+++ b/drivers/staging/comedi/drivers/usbdux.c
@@ -315,7 +315,7 @@
  */
 static struct usbduxsub usbduxsub[NUMUSBDUX];
 
-static DECLARE_MUTEX(start_stop_sem);
+static DEFINE_SEMAPHORE(start_stop_sem);
 
 /*
  * Stops the data acquision
@@ -2367,7 +2367,7 @@
 	dev_dbg(dev, "comedi_: usbdux: "
 		"usbduxsub[%d] is ready to connect to comedi.\n", index);
 
-	init_MUTEX(&(usbduxsub[index].sem));
+	sema_init(&(usbduxsub[index].sem), 1);
 	/* save a pointer to the usb device */
 	usbduxsub[index].usbdev = udev;
 
diff --git a/drivers/staging/comedi/drivers/usbduxfast.c b/drivers/staging/comedi/drivers/usbduxfast.c
index 0a164a9..5b15e6d 100644
--- a/drivers/staging/comedi/drivers/usbduxfast.c
+++ b/drivers/staging/comedi/drivers/usbduxfast.c
@@ -199,7 +199,7 @@
  */
 static struct usbduxfastsub_s usbduxfastsub[NUMUSBDUXFAST];
 
-static DECLARE_MUTEX(start_stop_sem);
+static DEFINE_SEMAPHORE(start_stop_sem);
 
 /*
  * bulk transfers to usbduxfast
@@ -1504,7 +1504,7 @@
 	       "connect to comedi.\n", index);
 #endif
 
-	init_MUTEX(&(usbduxfastsub[index].sem));
+	sema_init(&(usbduxfastsub[index].sem), 1);
 	/* save a pointer to the usb device */
 	usbduxfastsub[index].usbdev = udev;
 
diff --git a/drivers/staging/msm/msm_fb.c b/drivers/staging/msm/msm_fb.c
index ea268ed..23fa049 100644
--- a/drivers/staging/msm/msm_fb.c
+++ b/drivers/staging/msm/msm_fb.c
@@ -1158,7 +1158,7 @@
 	return ret;
 }
 
-DECLARE_MUTEX(msm_fb_pan_sem);
+DEFINE_SEMAPHORE(msm_fb_pan_sem);
 
 static int msm_fb_pan_display(struct fb_var_screeninfo *var,
 			      struct fb_info *info)
@@ -1962,7 +1962,7 @@
 
 #endif
 
-DECLARE_MUTEX(msm_fb_ioctl_ppp_sem);
+DEFINE_SEMAPHORE(msm_fb_ioctl_ppp_sem);
 DEFINE_MUTEX(msm_fb_ioctl_lut_sem);
 DEFINE_MUTEX(msm_fb_ioctl_hist_sem);
 
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index 7fca42c..d1674cd 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -161,7 +161,7 @@
 
 static inline void _rtl_rwlock_init(struct semaphore *prwlock)
 {
-	init_MUTEX(prwlock);
+	sema_init(prwlock, 1);
 }
 
 static inline void _init_listhead(struct list_head *list)
diff --git a/drivers/staging/smbfs/inode.c b/drivers/staging/smbfs/inode.c
index f9c4935..540a984 100644
--- a/drivers/staging/smbfs/inode.c
+++ b/drivers/staging/smbfs/inode.c
@@ -537,7 +537,7 @@
 	server->mnt = NULL;
 	server->sock_file = NULL;
 	init_waitqueue_head(&server->conn_wq);
-	init_MUTEX(&server->sem);
+	sema_init(&server->sem, 1);
 	INIT_LIST_HEAD(&server->entry);
 	INIT_LIST_HEAD(&server->xmitq);
 	INIT_LIST_HEAD(&server->recvq);
diff --git a/drivers/staging/tm6000/tm6000-i2c.c b/drivers/staging/tm6000/tm6000-i2c.c
index 3e46866..93f625f 100644
--- a/drivers/staging/tm6000/tm6000-i2c.c
+++ b/drivers/staging/tm6000/tm6000-i2c.c
@@ -320,7 +320,6 @@
 
 static struct i2c_adapter tm6000_adap_template = {
 	.owner = THIS_MODULE,
-	.class = I2C_CLASS_TV_ANALOG | I2C_CLASS_TV_DIGITAL,
 	.name = "tm6000",
 	.algo = &tm6000_algo,
 };
diff --git a/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c b/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c
index f428a7a..e1851f0 100644
--- a/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c
+++ b/drivers/staging/westbridge/astoria/block/cyasblkdev_block.c
@@ -157,7 +157,7 @@
 /* pointer to west bridge block data device superstructure */
 static struct cyasblkdev_blk_data *gl_bd;
 
-static DECLARE_MUTEX(open_lock);
+static DEFINE_SEMAPHORE(open_lock);
 
 /* local forwardd declarationss  */
 static cy_as_device_handle *cyas_dev_handle;
diff --git a/drivers/staging/westbridge/astoria/block/cyasblkdev_queue.c b/drivers/staging/westbridge/astoria/block/cyasblkdev_queue.c
index 24e959e..0bbb8a3 100644
--- a/drivers/staging/westbridge/astoria/block/cyasblkdev_queue.c
+++ b/drivers/staging/westbridge/astoria/block/cyasblkdev_queue.c
@@ -334,7 +334,7 @@
 
 	init_completion(&bq->thread_complete);
 	init_waitqueue_head(&bq->thread_wq);
-	init_MUTEX(&bq->thread_sem);
+	sema_init(&bq->thread_sem, 1);
 
 	ret = kernel_thread(cyasblkdev_queue_thread, bq, CLONE_KERNEL);
 	if (ret >= 0) {
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index cb23355..fbe86ca 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -811,7 +811,6 @@
 		INFO(dev, "MAC %pM\n", net->dev_addr);
 		INFO(dev, "HOST MAC %pM\n", dev->host_mac);
 
-		netif_stop_queue(net);
 		the_dev = dev;
 	}
 
diff --git a/fs/cifs/TODO b/fs/cifs/TODO
index 5aff46c..355abcd 100644
--- a/fs/cifs/TODO
+++ b/fs/cifs/TODO
@@ -81,7 +81,7 @@
 
 v) mount check for unmatched uids
 
-w) Add support for new vfs entry points for setlease and fallocate 
+w) Add support for new vfs entry point for fallocate
 
 x) Fix Samba 3 server to handle Linux kernel aio so dbench with lots of 
 processes can proceed better in parallel (on the server)
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 525ba59..e9a393c 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -15,7 +15,7 @@
  *   the GNU Lesser General Public License for more details.
  *
  */
-#include <linux/radix-tree.h>
+#include <linux/rbtree.h>
 
 #ifndef _CIFS_FS_SB_H
 #define _CIFS_FS_SB_H
@@ -42,9 +42,9 @@
 #define CIFS_MOUNT_MULTIUSER	0x20000 /* multiuser mount */
 
 struct cifs_sb_info {
-	struct radix_tree_root tlink_tree;
-#define CIFS_TLINK_MASTER_TAG		0	/* is "master" (mount) tcon */
+	struct rb_root tlink_tree;
 	spinlock_t tlink_tree_lock;
+	struct tcon_link *master_tlink;
 	struct nls_table *local_nls;
 	unsigned int rsize;
 	unsigned int wsize;
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 54745b6..9c37897 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -116,7 +116,7 @@
 		return -ENOMEM;
 
 	spin_lock_init(&cifs_sb->tlink_tree_lock);
-	INIT_RADIX_TREE(&cifs_sb->tlink_tree, GFP_KERNEL);
+	cifs_sb->tlink_tree = RB_ROOT;
 
 	rc = bdi_setup_and_register(&cifs_sb->bdi, "cifs", BDI_CAP_MAP_COPY);
 	if (rc) {
@@ -321,8 +321,7 @@
 	/* Until the file is open and we have gotten oplock
 	info back from the server, can not assume caching of
 	file data or metadata */
-	cifs_inode->clientCanCacheRead = false;
-	cifs_inode->clientCanCacheAll = false;
+	cifs_set_oplock_level(cifs_inode, 0);
 	cifs_inode->delete_pending = false;
 	cifs_inode->invalid_mapping = false;
 	cifs_inode->vfs_inode.i_blkbits = 14;  /* 2**14 = CIFS_MAX_MSGSIZE */
@@ -625,11 +624,8 @@
 		   knows that the file won't be changed on the server
 		   by anyone else */
 		return generic_setlease(file, arg, lease);
-	else {
-		if (arg != F_UNLCK)
-			locks_free_lock(*lease);
+	else
 		return -EAGAIN;
-	}
 }
 
 struct file_system_type cifs_fs_type = {
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index f259e4d..b577bf0 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -336,7 +336,8 @@
  * "get" on the container.
  */
 struct tcon_link {
-	unsigned long		tl_index;
+	struct rb_node		tl_rbnode;
+	uid_t			tl_uid;
 	unsigned long		tl_flags;
 #define TCON_LINK_MASTER	0
 #define TCON_LINK_PENDING	1
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index edb6d90..7ed69b6 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -104,6 +104,7 @@
 extern u64 cifs_UnixTimeToNT(struct timespec);
 extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
 				      int offset);
+extern void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock);
 
 extern struct cifsFileInfo *cifs_new_fileinfo(__u16 fileHandle,
 				struct file *file, struct tcon_link *tlink,
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 9eb327d..251a17c 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -116,6 +116,7 @@
 
 static int ipv4_connect(struct TCP_Server_Info *server);
 static int ipv6_connect(struct TCP_Server_Info *server);
+static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
 static void cifs_prune_tlinks(struct work_struct *work);
 
 /*
@@ -2900,24 +2901,16 @@
 		goto mount_fail_check;
 	}
 
-	tlink->tl_index = pSesInfo->linux_uid;
+	tlink->tl_uid = pSesInfo->linux_uid;
 	tlink->tl_tcon = tcon;
 	tlink->tl_time = jiffies;
 	set_bit(TCON_LINK_MASTER, &tlink->tl_flags);
 	set_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
 
-	rc = radix_tree_preload(GFP_KERNEL);
-	if (rc == -ENOMEM) {
-		kfree(tlink);
-		goto mount_fail_check;
-	}
-
+	cifs_sb->master_tlink = tlink;
 	spin_lock(&cifs_sb->tlink_tree_lock);
-	radix_tree_insert(&cifs_sb->tlink_tree, pSesInfo->linux_uid, tlink);
-	radix_tree_tag_set(&cifs_sb->tlink_tree, pSesInfo->linux_uid,
-			   CIFS_TLINK_MASTER_TAG);
+	tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
 	spin_unlock(&cifs_sb->tlink_tree_lock);
-	radix_tree_preload_end();
 
 	queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
 				TLINK_IDLE_EXPIRE);
@@ -3107,32 +3100,25 @@
 int
 cifs_umount(struct super_block *sb, struct cifs_sb_info *cifs_sb)
 {
-	int i, ret;
+	struct rb_root *root = &cifs_sb->tlink_tree;
+	struct rb_node *node;
+	struct tcon_link *tlink;
 	char *tmp;
-	struct tcon_link *tlink[8];
-	unsigned long index = 0;
 
 	cancel_delayed_work_sync(&cifs_sb->prune_tlinks);
 
-	do {
-		spin_lock(&cifs_sb->tlink_tree_lock);
-		ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree,
-					     (void **)tlink, index,
-					     ARRAY_SIZE(tlink));
-		/* increment index for next pass */
-		if (ret > 0)
-			index = tlink[ret - 1]->tl_index + 1;
-		for (i = 0; i < ret; i++) {
-			cifs_get_tlink(tlink[i]);
-			clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags);
-			radix_tree_delete(&cifs_sb->tlink_tree,
-							tlink[i]->tl_index);
-		}
-		spin_unlock(&cifs_sb->tlink_tree_lock);
+	spin_lock(&cifs_sb->tlink_tree_lock);
+	while ((node = rb_first(root))) {
+		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+		cifs_get_tlink(tlink);
+		clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
+		rb_erase(node, root);
 
-		for (i = 0; i < ret; i++)
-			cifs_put_tlink(tlink[i]);
-	} while (ret != 0);
+		spin_unlock(&cifs_sb->tlink_tree_lock);
+		cifs_put_tlink(tlink);
+		spin_lock(&cifs_sb->tlink_tree_lock);
+	}
+	spin_unlock(&cifs_sb->tlink_tree_lock);
 
 	tmp = cifs_sb->prepath;
 	cifs_sb->prepathlen = 0;
@@ -3271,22 +3257,10 @@
 	return tcon;
 }
 
-static struct tcon_link *
+static inline struct tcon_link *
 cifs_sb_master_tlink(struct cifs_sb_info *cifs_sb)
 {
-	struct tcon_link *tlink;
-	unsigned int ret;
-
-	spin_lock(&cifs_sb->tlink_tree_lock);
-	ret = radix_tree_gang_lookup_tag(&cifs_sb->tlink_tree, (void **)&tlink,
-					0, 1, CIFS_TLINK_MASTER_TAG);
-	spin_unlock(&cifs_sb->tlink_tree_lock);
-
-	/* the master tcon should always be present */
-	if (ret == 0)
-		BUG();
-
-	return tlink;
+	return cifs_sb->master_tlink;
 }
 
 struct cifsTconInfo *
@@ -3302,6 +3276,47 @@
 	return signal_pending(current) ? -ERESTARTSYS : 0;
 }
 
+/* find and return a tlink with given uid */
+static struct tcon_link *
+tlink_rb_search(struct rb_root *root, uid_t uid)
+{
+	struct rb_node *node = root->rb_node;
+	struct tcon_link *tlink;
+
+	while (node) {
+		tlink = rb_entry(node, struct tcon_link, tl_rbnode);
+
+		if (tlink->tl_uid > uid)
+			node = node->rb_left;
+		else if (tlink->tl_uid < uid)
+			node = node->rb_right;
+		else
+			return tlink;
+	}
+	return NULL;
+}
+
+/* insert a tcon_link into the tree */
+static void
+tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink)
+{
+	struct rb_node **new = &(root->rb_node), *parent = NULL;
+	struct tcon_link *tlink;
+
+	while (*new) {
+		tlink = rb_entry(*new, struct tcon_link, tl_rbnode);
+		parent = *new;
+
+		if (tlink->tl_uid > new_tlink->tl_uid)
+			new = &((*new)->rb_left);
+		else
+			new = &((*new)->rb_right);
+	}
+
+	rb_link_node(&new_tlink->tl_rbnode, parent, new);
+	rb_insert_color(&new_tlink->tl_rbnode, root);
+}
+
 /*
  * Find or construct an appropriate tcon given a cifs_sb and the fsuid of the
  * current task.
@@ -3309,7 +3324,7 @@
  * If the superblock doesn't refer to a multiuser mount, then just return
  * the master tcon for the mount.
  *
- * First, search the radix tree for an existing tcon for this fsuid. If one
+ * First, search the rbtree for an existing tcon for this fsuid. If one
  * exists, then check to see if it's pending construction. If it is then wait
  * for construction to complete. Once it's no longer pending, check to see if
  * it failed and either return an error or retry construction, depending on
@@ -3322,14 +3337,14 @@
 cifs_sb_tlink(struct cifs_sb_info *cifs_sb)
 {
 	int ret;
-	unsigned long fsuid = (unsigned long) current_fsuid();
+	uid_t fsuid = current_fsuid();
 	struct tcon_link *tlink, *newtlink;
 
 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
 		return cifs_get_tlink(cifs_sb_master_tlink(cifs_sb));
 
 	spin_lock(&cifs_sb->tlink_tree_lock);
-	tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid);
+	tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
 	if (tlink)
 		cifs_get_tlink(tlink);
 	spin_unlock(&cifs_sb->tlink_tree_lock);
@@ -3338,36 +3353,24 @@
 		newtlink = kzalloc(sizeof(*tlink), GFP_KERNEL);
 		if (newtlink == NULL)
 			return ERR_PTR(-ENOMEM);
-		newtlink->tl_index = fsuid;
+		newtlink->tl_uid = fsuid;
 		newtlink->tl_tcon = ERR_PTR(-EACCES);
 		set_bit(TCON_LINK_PENDING, &newtlink->tl_flags);
 		set_bit(TCON_LINK_IN_TREE, &newtlink->tl_flags);
 		cifs_get_tlink(newtlink);
 
-		ret = radix_tree_preload(GFP_KERNEL);
-		if (ret != 0) {
-			kfree(newtlink);
-			return ERR_PTR(ret);
-		}
-
 		spin_lock(&cifs_sb->tlink_tree_lock);
 		/* was one inserted after previous search? */
-		tlink = radix_tree_lookup(&cifs_sb->tlink_tree, fsuid);
+		tlink = tlink_rb_search(&cifs_sb->tlink_tree, fsuid);
 		if (tlink) {
 			cifs_get_tlink(tlink);
 			spin_unlock(&cifs_sb->tlink_tree_lock);
-			radix_tree_preload_end();
 			kfree(newtlink);
 			goto wait_for_construction;
 		}
-		ret = radix_tree_insert(&cifs_sb->tlink_tree, fsuid, newtlink);
-		spin_unlock(&cifs_sb->tlink_tree_lock);
-		radix_tree_preload_end();
-		if (ret) {
-			kfree(newtlink);
-			return ERR_PTR(ret);
-		}
 		tlink = newtlink;
+		tlink_rb_insert(&cifs_sb->tlink_tree, tlink);
+		spin_unlock(&cifs_sb->tlink_tree_lock);
 	} else {
 wait_for_construction:
 		ret = wait_on_bit(&tlink->tl_flags, TCON_LINK_PENDING,
@@ -3413,39 +3416,39 @@
 {
 	struct cifs_sb_info *cifs_sb = container_of(work, struct cifs_sb_info,
 						    prune_tlinks.work);
-	struct tcon_link *tlink[8];
-	unsigned long now = jiffies;
-	unsigned long index = 0;
-	int i, ret;
+	struct rb_root *root = &cifs_sb->tlink_tree;
+	struct rb_node *node = rb_first(root);
+	struct rb_node *tmp;
+	struct tcon_link *tlink;
 
-	do {
-		spin_lock(&cifs_sb->tlink_tree_lock);
-		ret = radix_tree_gang_lookup(&cifs_sb->tlink_tree,
-					     (void **)tlink, index,
-					     ARRAY_SIZE(tlink));
-		/* increment index for next pass */
-		if (ret > 0)
-			index = tlink[ret - 1]->tl_index + 1;
-		for (i = 0; i < ret; i++) {
-			if (test_bit(TCON_LINK_MASTER, &tlink[i]->tl_flags) ||
-			    atomic_read(&tlink[i]->tl_count) != 0 ||
-			    time_after(tlink[i]->tl_time + TLINK_IDLE_EXPIRE,
-				       now)) {
-				tlink[i] = NULL;
-				continue;
-			}
-			cifs_get_tlink(tlink[i]);
-			clear_bit(TCON_LINK_IN_TREE, &tlink[i]->tl_flags);
-			radix_tree_delete(&cifs_sb->tlink_tree,
-					  tlink[i]->tl_index);
-		}
+	/*
+	 * Because we drop the spinlock in the loop in order to put the tlink
+	 * it's not guarded against removal of links from the tree. The only
+	 * places that remove entries from the tree are this function and
+	 * umounts. Because this function is non-reentrant and is canceled
+	 * before umount can proceed, this is safe.
+	 */
+	spin_lock(&cifs_sb->tlink_tree_lock);
+	node = rb_first(root);
+	while (node != NULL) {
+		tmp = node;
+		node = rb_next(tmp);
+		tlink = rb_entry(tmp, struct tcon_link, tl_rbnode);
+
+		if (test_bit(TCON_LINK_MASTER, &tlink->tl_flags) ||
+		    atomic_read(&tlink->tl_count) != 0 ||
+		    time_after(tlink->tl_time + TLINK_IDLE_EXPIRE, jiffies))
+			continue;
+
+		cifs_get_tlink(tlink);
+		clear_bit(TCON_LINK_IN_TREE, &tlink->tl_flags);
+		rb_erase(tmp, root);
+
 		spin_unlock(&cifs_sb->tlink_tree_lock);
-
-		for (i = 0; i < ret; i++) {
-			if (tlink[i] != NULL)
-				cifs_put_tlink(tlink[i]);
-		}
-	} while (ret != 0);
+		cifs_put_tlink(tlink);
+		spin_lock(&cifs_sb->tlink_tree_lock);
+	}
+	spin_unlock(&cifs_sb->tlink_tree_lock);
 
 	queue_delayed_work(system_nrt_wq, &cifs_sb->prune_tlinks,
 				TLINK_IDLE_EXPIRE);
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index ae82159..06c3e83 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -146,12 +146,7 @@
 		rc = cifs_get_inode_info(&inode, full_path, buf, inode->i_sb,
 					 xid, NULL);
 
-	if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
-		pCifsInode->clientCanCacheAll = true;
-		pCifsInode->clientCanCacheRead = true;
-		cFYI(1, "Exclusive Oplock granted on inode %p", inode);
-	} else if ((oplock & 0xF) == OPLOCK_READ)
-		pCifsInode->clientCanCacheRead = true;
+	cifs_set_oplock_level(pCifsInode, oplock);
 
 	return rc;
 }
@@ -253,12 +248,7 @@
 		list_add_tail(&pCifsFile->flist, &pCifsInode->openFileList);
 	spin_unlock(&cifs_file_list_lock);
 
-	if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
-		pCifsInode->clientCanCacheAll = true;
-		pCifsInode->clientCanCacheRead = true;
-		cFYI(1, "Exclusive Oplock inode %p", inode);
-	} else if ((oplock & 0xF) == OPLOCK_READ)
-		pCifsInode->clientCanCacheRead = true;
+	cifs_set_oplock_level(pCifsInode, oplock);
 
 	file->private_data = pCifsFile;
 	return pCifsFile;
@@ -271,8 +261,9 @@
  */
 void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
 {
+	struct inode *inode = cifs_file->dentry->d_inode;
 	struct cifsTconInfo *tcon = tlink_tcon(cifs_file->tlink);
-	struct cifsInodeInfo *cifsi = CIFS_I(cifs_file->dentry->d_inode);
+	struct cifsInodeInfo *cifsi = CIFS_I(inode);
 	struct cifsLockInfo *li, *tmp;
 
 	spin_lock(&cifs_file_list_lock);
@@ -288,8 +279,7 @@
 	if (list_empty(&cifsi->openFileList)) {
 		cFYI(1, "closing last open instance for inode %p",
 			cifs_file->dentry->d_inode);
-		cifsi->clientCanCacheRead = false;
-		cifsi->clientCanCacheAll  = false;
+		cifs_set_oplock_level(cifsi, 0);
 	}
 	spin_unlock(&cifs_file_list_lock);
 
@@ -607,8 +597,6 @@
 		rc = filemap_write_and_wait(inode->i_mapping);
 		mapping_set_error(inode->i_mapping, rc);
 
-		pCifsInode->clientCanCacheAll = false;
-		pCifsInode->clientCanCacheRead = false;
 		if (tcon->unix_ext)
 			rc = cifs_get_inode_info_unix(&inode,
 				full_path, inode->i_sb, xid);
@@ -622,18 +610,9 @@
 	     invalidate the current end of file on the server
 	     we can not go to the server to get the new inod
 	     info */
-	if ((oplock & 0xF) == OPLOCK_EXCLUSIVE) {
-		pCifsInode->clientCanCacheAll = true;
-		pCifsInode->clientCanCacheRead = true;
-		cFYI(1, "Exclusive Oplock granted on inode %p",
-			 pCifsFile->dentry->d_inode);
-	} else if ((oplock & 0xF) == OPLOCK_READ) {
-		pCifsInode->clientCanCacheRead = true;
-		pCifsInode->clientCanCacheAll = false;
-	} else {
-		pCifsInode->clientCanCacheRead = false;
-		pCifsInode->clientCanCacheAll = false;
-	}
+
+	cifs_set_oplock_level(pCifsInode, oplock);
+
 	cifs_relock_file(pCifsFile);
 
 reopen_error_exit:
@@ -775,12 +754,6 @@
 
 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 	tcon = tlink_tcon(((struct cifsFileInfo *)file->private_data)->tlink);
-
-	if (file->private_data == NULL) {
-		rc = -EBADF;
-		FreeXid(xid);
-		return rc;
-	}
 	netfid = ((struct cifsFileInfo *)file->private_data)->netfid;
 
 	if ((tcon->ses->capabilities & CAP_UNIX) &&
@@ -956,6 +929,7 @@
 ssize_t cifs_user_write(struct file *file, const char __user *write_data,
 	size_t write_size, loff_t *poffset)
 {
+	struct inode *inode = file->f_path.dentry->d_inode;
 	int rc = 0;
 	unsigned int bytes_written = 0;
 	unsigned int total_written;
@@ -963,7 +937,7 @@
 	struct cifsTconInfo *pTcon;
 	int xid, long_op;
 	struct cifsFileInfo *open_file;
-	struct cifsInodeInfo *cifsi = CIFS_I(file->f_path.dentry->d_inode);
+	struct cifsInodeInfo *cifsi = CIFS_I(inode);
 
 	cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
 
@@ -1029,21 +1003,17 @@
 
 	cifs_stats_bytes_written(pTcon, total_written);
 
-	/* since the write may have blocked check these pointers again */
-	if ((file->f_path.dentry) && (file->f_path.dentry->d_inode)) {
-		struct inode *inode = file->f_path.dentry->d_inode;
 /* Do not update local mtime - server will set its actual value on write
- *		inode->i_ctime = inode->i_mtime =
- * 			current_fs_time(inode->i_sb);*/
-		if (total_written > 0) {
-			spin_lock(&inode->i_lock);
-			if (*poffset > file->f_path.dentry->d_inode->i_size)
-				i_size_write(file->f_path.dentry->d_inode,
-					*poffset);
-			spin_unlock(&inode->i_lock);
-		}
-		mark_inode_dirty_sync(file->f_path.dentry->d_inode);
+ *	inode->i_ctime = inode->i_mtime =
+ * 		current_fs_time(inode->i_sb);*/
+	if (total_written > 0) {
+		spin_lock(&inode->i_lock);
+		if (*poffset > inode->i_size)
+			i_size_write(inode, *poffset);
+		spin_unlock(&inode->i_lock);
 	}
+	mark_inode_dirty_sync(inode);
+
 	FreeXid(xid);
 	return total_written;
 }
@@ -1178,7 +1148,7 @@
 					bool fsuid_only)
 {
 	struct cifsFileInfo *open_file;
-	struct cifs_sb_info *cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+	struct cifs_sb_info *cifs_sb;
 	bool any_available = false;
 	int rc;
 
@@ -1192,6 +1162,8 @@
 		return NULL;
 	}
 
+	cifs_sb = CIFS_SB(cifs_inode->vfs_inode.i_sb);
+
 	/* only filter by fsuid on multiuser mounts */
 	if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MULTIUSER))
 		fsuid_only = false;
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 077bf75..2fa22f2 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -63,8 +63,6 @@
 #ifdef CONFIG_CIFS_POSIX
 		case FS_IOC_GETFLAGS:
 			if (CIFS_UNIX_EXTATTR_CAP & caps) {
-				if (pSMBFile == NULL)
-					break;
 				rc = CIFSGetExtAttr(xid, tcon, pSMBFile->netfid,
 					&ExtAttrBits, &ExtAttrMask);
 				if (rc == 0)
@@ -80,8 +78,6 @@
 					rc = -EFAULT;
 					break;
 				}
-				if (pSMBFile == NULL)
-					break;
 				/* rc= CIFSGetExtAttr(xid,tcon,pSMBFile->netfid,
 					extAttrBits, &ExtAttrMask);*/
 			}
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index c4e296f..43f1028 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -569,10 +569,9 @@
 
 				cFYI(1, "file id match, oplock break");
 				pCifsInode = CIFS_I(netfile->dentry->d_inode);
-				pCifsInode->clientCanCacheAll = false;
-				if (pSMB->OplockLevel == 0)
-					pCifsInode->clientCanCacheRead = false;
 
+				cifs_set_oplock_level(pCifsInode,
+						      pSMB->OplockLevel);
 				/*
 				 * cifs_oplock_break_put() can't be called
 				 * from here.  Get reference after queueing
@@ -722,3 +721,23 @@
 			   cifs_sb_master_tcon(cifs_sb)->treeName);
 	}
 }
+
+void cifs_set_oplock_level(struct cifsInodeInfo *cinode, __u32 oplock)
+{
+	oplock &= 0xF;
+
+	if (oplock == OPLOCK_EXCLUSIVE) {
+		cinode->clientCanCacheAll = true;
+		cinode->clientCanCacheRead = true;
+		cFYI(1, "Exclusive Oplock granted on inode %p",
+		     &cinode->vfs_inode);
+	} else if (oplock == OPLOCK_READ) {
+		cinode->clientCanCacheAll = false;
+		cinode->clientCanCacheRead = true;
+		cFYI(1, "Level II Oplock granted on inode %p",
+		    &cinode->vfs_inode);
+	} else {
+		cinode->clientCanCacheAll = false;
+		cinode->clientCanCacheRead = false;
+	}
+}
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 1916164..4d78342 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -5410,9 +5410,7 @@
 	 * will return the blocks that include the delayed allocation
 	 * blocks for this file.
 	 */
-	spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
 	delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks;
-	spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
 
 	stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
 	return 0;
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index ac943c1..aa99647 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -629,8 +629,6 @@
 
 static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
 {
-	if (arg != F_UNLCK)
-		locks_free_lock(*fl);
 	return -EINVAL;
 }
 
diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c
index eac5f96..793cb9d 100644
--- a/fs/hpfs/buffer.c
+++ b/fs/hpfs/buffer.c
@@ -14,7 +14,7 @@
 #ifdef DEBUG_LOCKS
 	printk("lock creation\n");
 #endif
-	down(&hpfs_sb(s)->hpfs_creation_de);
+	mutex_lock(&hpfs_sb(s)->hpfs_creation_de);
 }
 
 void hpfs_unlock_creation(struct super_block *s)
@@ -22,7 +22,7 @@
 #ifdef DEBUG_LOCKS
 	printk("unlock creation\n");
 #endif
-	up(&hpfs_sb(s)->hpfs_creation_de);
+	mutex_unlock(&hpfs_sb(s)->hpfs_creation_de);
 }
 
 /* Map a sector into a buffer and return pointers to it and to the buffer. */
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index b59eac0..2fee17d 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -87,7 +87,7 @@
 	unsigned *sb_bmp_dir;		/* main bitmap directory */
 	unsigned sb_c_bitmap;		/* current bitmap */
 	unsigned sb_max_fwd_alloc;	/* max forwad allocation */
-	struct semaphore hpfs_creation_de; /* when creating dirents, nobody else
+	struct mutex hpfs_creation_de;	/* when creating dirents, nobody else
 					   can alloc blocks */
 	/*unsigned sb_mounting : 1;*/
 	int sb_timeshift;
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index bb69389..6c5f015 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -491,7 +491,7 @@
 	sbi->sb_bmp_dir = NULL;
 	sbi->sb_cp_table = NULL;
 
-	init_MUTEX(&sbi->hpfs_creation_de);
+	mutex_init(&sbi->hpfs_creation_de);
 
 	uid = current_uid();
 	gid = current_gid();
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 538417c..c590d15 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1838,7 +1838,6 @@
  */
 #define JBD2_MAX_SLABS 8
 static struct kmem_cache *jbd2_slab[JBD2_MAX_SLABS];
-static DECLARE_MUTEX(jbd2_slab_create_sem);
 
 static const char *jbd2_slab_names[JBD2_MAX_SLABS] = {
 	"jbd2_1k", "jbd2_2k", "jbd2_4k", "jbd2_8k",
@@ -1859,6 +1858,7 @@
 
 static int jbd2_journal_create_slab(size_t size)
 {
+	static DEFINE_MUTEX(jbd2_slab_create_mutex);
 	int i = order_base_2(size) - 10;
 	size_t slab_size;
 
@@ -1870,16 +1870,16 @@
 
 	if (unlikely(i < 0))
 		i = 0;
-	down(&jbd2_slab_create_sem);
+	mutex_lock(&jbd2_slab_create_mutex);
 	if (jbd2_slab[i]) {
-		up(&jbd2_slab_create_sem);
+		mutex_unlock(&jbd2_slab_create_mutex);
 		return 0;	/* Already created */
 	}
 
 	slab_size = 1 << (i+10);
 	jbd2_slab[i] = kmem_cache_create(jbd2_slab_names[i], slab_size,
 					 slab_size, 0, NULL);
-	up(&jbd2_slab_create_sem);
+	mutex_unlock(&jbd2_slab_create_mutex);
 	if (!jbd2_slab[i]) {
 		printk(KERN_EMERG "JBD2: no memory for jbd2_slab cache\n");
 		return -ENOMEM;
diff --git a/fs/locks.c b/fs/locks.c
index 5b526a9..65765cb 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -235,11 +235,8 @@
 			fl->fl_ops->fl_copy_lock(new, fl);
 		new->fl_ops = fl->fl_ops;
 	}
-	if (fl->fl_lmops) {
-		if (fl->fl_lmops->fl_copy_lock)
-			fl->fl_lmops->fl_copy_lock(new, fl);
+	if (fl->fl_lmops)
 		new->fl_lmops = fl->fl_lmops;
-	}
 }
 
 /*
@@ -1428,8 +1425,9 @@
 		goto out;
 
 	if (my_before != NULL) {
-		*flp = *my_before;
 		error = lease->fl_lmops->fl_change(my_before, arg);
+		if (!error)
+			*flp = *my_before;
 		goto out;
 	}
 
@@ -1444,8 +1442,6 @@
 	return 0;
 
 out:
-	if (arg != F_UNLCK)
-		locks_free_lock(lease);
 	return error;
 }
 EXPORT_SYMBOL(generic_setlease);
@@ -1524,8 +1520,11 @@
 	}
 	lock_flocks();
 	error = __vfs_setlease(filp, arg, &fl);
-	if (error)
-		goto out_unlock;
+	if (error) {
+		unlock_flocks();
+		locks_free_lock(fl);
+		goto out_free_fasync;
+	}
 
 	/*
 	 * fasync_insert_entry() returns the old entry if any.
@@ -1541,12 +1540,12 @@
 		fl->fl_type = F_UNLCK | F_INPROGRESS;
 		fl->fl_break_time = jiffies - 10;
 		time_out_leases(inode);
-		goto out_unlock;
+	} else {
+		error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
 	}
-
-	error = __f_setown(filp, task_pid(current), PIDTYPE_PID, 0);
-out_unlock:
 	unlock_flocks();
+
+out_free_fasync:
 	if (new)
 		fasync_free(new);
 	return error;
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index cd51a36..57afd4a 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -486,7 +486,7 @@
 
 /* dev_mtd.c */
 #ifdef CONFIG_MTD
-int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
+int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr);
 #else
 static inline int logfs_get_sb_mtd(struct logfs_super *s, int mtdnr)
 {
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 1e524fb..60677f9 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -884,7 +884,5 @@
 	dprintk("NFS: setlease(%s/%s, arg=%ld)\n",
 			file->f_path.dentry->d_parent->d_name.name,
 			file->f_path.dentry->d_name.name, arg);
-	if (arg != F_UNLCK)
-		locks_free_lock(*fl);
 	return -EINVAL;
 }
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index b7f818b..f1e5ec6 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -2652,6 +2652,7 @@
 	if ((status = vfs_setlease(fl->fl_file, fl->fl_type, &fl))) {
 		dprintk("NFSD: setlease failed [%d], no delegation\n", status);
 		dp->dl_flock = NULL;
+		locks_free_lock(fl);
 		unhash_delegation(dp);
 		flag = NFS4_OPEN_DELEGATE_NONE;
 		goto out;
diff --git a/include/asm-generic/stat.h b/include/asm-generic/stat.h
index 47e6417..bd8cad2 100644
--- a/include/asm-generic/stat.h
+++ b/include/asm-generic/stat.h
@@ -33,18 +33,18 @@
 	int		st_blksize;	/* Optimal block size for I/O.  */
 	int		__pad2;
 	long		st_blocks;	/* Number 512-byte blocks allocated. */
-	int		st_atime;	/* Time of last access.  */
-	unsigned int	st_atime_nsec;
-	int		st_mtime;	/* Time of last modification.  */
-	unsigned int	st_mtime_nsec;
-	int		st_ctime;	/* Time of last status change.  */
-	unsigned int	st_ctime_nsec;
+	long		st_atime;	/* Time of last access.  */
+	unsigned long	st_atime_nsec;
+	long		st_mtime;	/* Time of last modification.  */
+	unsigned long	st_mtime_nsec;
+	long		st_ctime;	/* Time of last status change.  */
+	unsigned long	st_ctime_nsec;
 	unsigned int	__unused4;
 	unsigned int	__unused5;
 };
 
-#if __BITS_PER_LONG != 64
 /* This matches struct stat64 in glibc2.1. Only used for 32 bit. */
+#if __BITS_PER_LONG != 64 || defined(__ARCH_WANT_STAT64)
 struct stat64 {
 	unsigned long long st_dev;	/* Device.  */
 	unsigned long long st_ino;	/* File serial number.  */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 1eb2939..334d68a 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1056,7 +1056,6 @@
 	int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
 	void (*fl_notify)(struct file_lock *);	/* unblock callback */
 	int (*fl_grant)(struct file_lock *, struct file_lock *, int);
-	void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
 	void (*fl_release_private)(struct file_lock *);
 	void (*fl_break)(struct file_lock *);
 	int (*fl_mylease)(struct file_lock *, struct file_lock *);
diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h
index 8a389b6..41cb31f 100644
--- a/include/linux/hardirq.h
+++ b/include/linux/hardirq.h
@@ -96,11 +96,15 @@
  */
 #define in_nmi()	(preempt_count() & NMI_MASK)
 
-#if defined(CONFIG_PREEMPT)
+#if defined(CONFIG_PREEMPT) && defined(CONFIG_BKL)
 # define PREEMPT_INATOMIC_BASE kernel_locked()
-# define PREEMPT_CHECK_OFFSET 1
 #else
 # define PREEMPT_INATOMIC_BASE 0
+#endif
+
+#if defined(CONFIG_PREEMPT)
+# define PREEMPT_CHECK_OFFSET 1
+#else
 # define PREEMPT_CHECK_OFFSET 0
 #endif
 
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 1f66fa0..889b35a 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -407,8 +407,6 @@
 
 /* i2c adapter classes (bitmask) */
 #define I2C_CLASS_HWMON		(1<<0)	/* lm_sensors, ... */
-#define I2C_CLASS_TV_ANALOG	(1<<1)	/* bttv + friends */
-#define I2C_CLASS_TV_DIGITAL	(1<<2)	/* dvb cards */
 #define I2C_CLASS_DDC		(1<<3)	/* DDC bus on graphics adapters */
 #define I2C_CLASS_SPD		(1<<7)	/* SPD EEPROMs and similar */
 
diff --git a/include/linux/irq.h b/include/linux/irq.h
index e963911..abde252 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -412,6 +412,11 @@
 	irq_free_descs(irq, 1);
 }
 
+static inline int irq_reserve_irq(unsigned int irq)
+{
+	return irq_reserve_irqs(irq, 1);
+}
+
 #endif /* CONFIG_GENERIC_HARDIRQS */
 
 #endif /* !CONFIG_S390 */
diff --git a/include/linux/irqnr.h b/include/linux/irqnr.h
index 05aa8c23..3bc4dca 100644
--- a/include/linux/irqnr.h
+++ b/include/linux/irqnr.h
@@ -43,7 +43,7 @@
 		else
 
 #ifdef CONFIG_SMP
-#define irq_node(irq)	(irq_to_desc(irq)->node)
+#define irq_node(irq)	(irq_get_irq_data(irq)->node)
 #else
 #define irq_node(irq)	0
 #endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 87e2c2e..c6bcfe9 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2465,6 +2465,7 @@
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_SMBUS	0x1c22
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MIN	0x1c41
 #define PCI_DEVICE_ID_INTEL_COUGARPOINT_LPC_MAX	0x1c5f
+#define PCI_DEVICE_ID_INTEL_PATSBURG_SMBUS	0x1d22
 #define PCI_DEVICE_ID_INTEL_PATSBURG_LPC	0x1d40
 #define PCI_DEVICE_ID_INTEL_82801AA_0	0x2410
 #define PCI_DEVICE_ID_INTEL_82801AA_1	0x2411
diff --git a/include/linux/semaphore.h b/include/linux/semaphore.h
index 5310d27..39fa049 100644
--- a/include/linux/semaphore.h
+++ b/include/linux/semaphore.h
@@ -29,9 +29,6 @@
 #define DEFINE_SEMAPHORE(name)	\
 	struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
 
-#define DECLARE_MUTEX(name)	\
-	struct semaphore name = __SEMAPHORE_INITIALIZER(name, 1)
-
 static inline void sema_init(struct semaphore *sem, int val)
 {
 	static struct lock_class_key __key;
@@ -39,9 +36,6 @@
 	lockdep_init_map(&sem->lock.dep_map, "semaphore->lock", &__key, 0);
 }
 
-#define init_MUTEX(sem)		sema_init(sem, 1)
-#define init_MUTEX_LOCKED(sem)	sema_init(sem, 0)
-
 extern void down(struct semaphore *sem);
 extern int __must_check down_interruptible(struct semaphore *sem);
 extern int __must_check down_killable(struct semaphore *sem);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 92e52a1..b4d7710 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -204,6 +204,7 @@
 /**
  * struct spi_master - interface to SPI master controller
  * @dev: device interface to this driver
+ * @list: link with the global spi_master list
  * @bus_num: board-specific (and often SOC-specific) identifier for a
  *	given SPI controller.
  * @num_chipselect: chipselects are used to distinguish individual
@@ -238,6 +239,8 @@
 struct spi_master {
 	struct device	dev;
 
+	struct list_head list;
+
 	/* other than negative (== assign one dynamically), bus_num is fully
 	 * board-specific.  usually that simplifies to being SOC-specific.
 	 * example:  one SOC has three SPI controllers, numbered 0..2,
diff --git a/include/net/caif/caif_dev.h b/include/net/caif/caif_dev.h
index 6da573c..8eff83b 100644
--- a/include/net/caif/caif_dev.h
+++ b/include/net/caif/caif_dev.h
@@ -28,7 +28,7 @@
  * @sockaddr:		Socket address to connect.
  * @priority:		Priority of the connection.
  * @link_selector:	Link selector (high bandwidth or low latency)
- * @link_name:		Name of the CAIF Link Layer to use.
+ * @ifindex:		kernel index of the interface.
  * @param:		Connect Request parameters (CAIF_SO_REQ_PARAM).
  *
  * This struct is used when connecting a CAIF channel.
@@ -39,7 +39,7 @@
 	struct sockaddr_caif sockaddr;
 	enum caif_channel_priority priority;
 	enum caif_link_selector link_selector;
-	char link_name[16];
+	int ifindex;
 	struct caif_param param;
 };
 
diff --git a/include/net/caif/caif_spi.h b/include/net/caif/caif_spi.h
index ce4570d..87c3d11 100644
--- a/include/net/caif/caif_spi.h
+++ b/include/net/caif/caif_spi.h
@@ -121,6 +121,8 @@
 	wait_queue_head_t wait;
 	spinlock_t lock;
 	bool flow_stop;
+	bool slave;
+	bool slave_talked;
 #ifdef CONFIG_DEBUG_FS
 	enum cfspi_state dbg_state;
 	u16 pcmd;
diff --git a/include/net/caif/cfcnfg.h b/include/net/caif/cfcnfg.h
index bd646fa..f688478 100644
--- a/include/net/caif/cfcnfg.h
+++ b/include/net/caif/cfcnfg.h
@@ -139,10 +139,10 @@
 		     enum cfcnfg_phy_preference phy_pref);
 
 /**
- * cfcnfg_get_named() - Get the Physical Identifier of CAIF Link Layer
+ * cfcnfg_get_id_from_ifi() - Get the Physical Identifier of ifindex,
+ * 			it matches caif physical id with the kernel interface id.
  * @cnfg:	Configuration object
- * @name:	Name of the Physical Layer (Caif Link Layer)
+ * @ifi:	ifindex obtained from socket.c bindtodevice.
  */
-int cfcnfg_get_named(struct cfcnfg *cnfg, char *name);
-
+int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi);
 #endif				/* CFCNFG_H_ */
diff --git a/include/net/netlink.h b/include/net/netlink.h
index f3b201d..9801c55 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -384,7 +384,7 @@
  *
  * Returns the first attribute which matches the specified type.
  */
-static inline struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh,
+static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
 					     int hdrlen, int attrtype)
 {
 	return nla_find(nlmsg_attrdata(nlh, hdrlen),
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index 14be49b..f986ab7 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -721,7 +721,7 @@
  * struct fc_disc - Discovery context
  * @retry_count:   Number of retries
  * @pending:       1 if discovery is pending, 0 if not
- * @requesting:    1 if discovery has been requested, 0 if not
+ * @requested:     1 if discovery has been requested, 0 if not
  * @seq_count:     Number of sequences used for discovery
  * @buf_len:       Length of the discovery buffer
  * @disc_id:       Discovery ID
diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h
index a8f3701..53a9e88 100644
--- a/include/scsi/osd_initiator.h
+++ b/include/scsi/osd_initiator.h
@@ -137,7 +137,7 @@
 		void *buff;
 		unsigned alloc_size; /* 0 here means: don't call kfree */
 		unsigned total_bytes;
-	} set_attr, enc_get_attr, get_attr;
+	} cdb_cont, set_attr, enc_get_attr, get_attr;
 
 	struct _osd_io_info {
 		struct bio *bio;
@@ -448,6 +448,20 @@
 int osd_req_read_kern(struct osd_request *or,
 	const struct osd_obj_id *obj, u64 offset, void *buff, u64 len);
 
+/* Scatter/Gather write/read commands */
+int osd_req_write_sg(struct osd_request *or,
+	const struct osd_obj_id *obj, struct bio *bio,
+	const struct osd_sg_entry *sglist, unsigned numentries);
+int osd_req_read_sg(struct osd_request *or,
+	const struct osd_obj_id *obj, struct bio *bio,
+	const struct osd_sg_entry *sglist, unsigned numentries);
+int osd_req_write_sg_kern(struct osd_request *or,
+	const struct osd_obj_id *obj, void **buff,
+	const struct osd_sg_entry *sglist, unsigned numentries);
+int osd_req_read_sg_kern(struct osd_request *or,
+	const struct osd_obj_id *obj, void **buff,
+	const struct osd_sg_entry *sglist, unsigned numentries);
+
 /*
  * Root/Partition/Collection/Object Attributes commands
  */
diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h
index 6856612..a6026da 100644
--- a/include/scsi/osd_protocol.h
+++ b/include/scsi/osd_protocol.h
@@ -631,4 +631,46 @@
 	put_unaligned_le16(bit_mask, &cap->permissions_bit_mask);
 }
 
+/* osd2r05a sec 5.3: CDB continuation segment formats */
+enum osd_continuation_segment_format {
+	CDB_CONTINUATION_FORMAT_V2 = 0x01,
+};
+
+struct osd_continuation_segment_header {
+	u8	format;
+	u8	reserved1;
+	__be16	service_action;
+	__be32	reserved2;
+	u8	integrity_check[OSDv2_CRYPTO_KEYID_SIZE];
+} __packed;
+
+/* osd2r05a sec 5.4.1: CDB continuation descriptors */
+enum osd_continuation_descriptor_type {
+	NO_MORE_DESCRIPTORS = 0x0000,
+	SCATTER_GATHER_LIST = 0x0001,
+	QUERY_LIST = 0x0002,
+	USER_OBJECT = 0x0003,
+	COPY_USER_OBJECT_SOURCE = 0x0101,
+	EXTENSION_CAPABILITIES = 0xFFEE
+};
+
+struct osd_continuation_descriptor_header {
+	__be16	type;
+	u8	reserved;
+	u8	pad_length;
+	__be32	length;
+} __packed;
+
+
+/* osd2r05a sec 5.4.2: Scatter/gather list */
+struct osd_sg_list_entry {
+	__be64 offset;
+	__be64 len;
+};
+
+struct osd_sg_continuation_descriptor {
+	struct osd_continuation_descriptor_header hdr;
+	struct osd_sg_list_entry entries[];
+};
+
 #endif /* ndef __OSD_PROTOCOL_H__ */
diff --git a/include/scsi/osd_types.h b/include/scsi/osd_types.h
index 3f5e88c..bd0be7e 100644
--- a/include/scsi/osd_types.h
+++ b/include/scsi/osd_types.h
@@ -37,4 +37,9 @@
 	void *val_ptr;		/* in network order */
 };
 
+struct osd_sg_entry {
+	u64 offset;
+	u64 len;
+};
+
 #endif /* ndef __OSD_TYPES_H__ */
diff --git a/kernel/exit.c b/kernel/exit.c
index b194feb..21aa7b3 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -96,6 +96,14 @@
 		sig->tty = NULL;
 	} else {
 		/*
+		 * This can only happen if the caller is de_thread().
+		 * FIXME: this is the temporary hack, we should teach
+		 * posix-cpu-timers to handle this case correctly.
+		 */
+		if (unlikely(has_group_leader_pid(tsk)))
+			posix_cpu_timers_exit_group(tsk);
+
+		/*
 		 * If there is any task waiting for the group exit
 		 * then notify it:
 		 */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 644e8d5..5f92acc5 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -324,6 +324,10 @@
 	if (!desc)
 		return;
 
+	if (WARN(!desc->irq_data.chip || !desc->irq_data.chip->irq_enable,
+	    KERN_ERR "enable_irq before setup/request_irq: irq %u\n", irq))
+		return;
+
 	chip_bus_lock(desc);
 	raw_spin_lock_irqsave(&desc->lock, flags);
 	__enable_irq(desc, irq, false);
diff --git a/kernel/relay.c b/kernel/relay.c
index c7cf397..859ea5a 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -70,17 +70,10 @@
  */
 static struct page **relay_alloc_page_array(unsigned int n_pages)
 {
-	struct page **array;
-	size_t pa_size = n_pages * sizeof(struct page *);
-
-	if (pa_size > PAGE_SIZE) {
-		array = vmalloc(pa_size);
-		if (array)
-			memset(array, 0, pa_size);
-	} else {
-		array = kzalloc(pa_size, GFP_KERNEL);
-	}
-	return array;
+	const size_t pa_size = n_pages * sizeof(struct page *);
+	if (pa_size > PAGE_SIZE)
+		return vzalloc(pa_size);
+	return kzalloc(pa_size, GFP_KERNEL);
 }
 
 /*
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index bafba68..6e3c41a 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -43,7 +43,7 @@
 static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
 #endif
 
-static int __initdata no_watchdog;
+static int no_watchdog;
 
 
 /* boot commands */
diff --git a/mm/filemap.c b/mm/filemap.c
index 75572b5..61ba5e4 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1563,8 +1563,10 @@
 			goto no_cached_page;
 	}
 
-	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags))
+	if (!lock_page_or_retry(page, vma->vm_mm, vmf->flags)) {
+		page_cache_release(page);
 		return ret | VM_FAULT_RETRY;
+	}
 
 	/* Did it get truncated? */
 	if (unlikely(page->mapping != mapping)) {
diff --git a/mm/vmstat.c b/mm/vmstat.c
index cd2e42b..42eac4d 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -949,7 +949,7 @@
 	v[PGPGIN] /= 2;		/* sectors -> kbytes */
 	v[PGPGOUT] /= 2;
 #endif
-	return m->private + *pos;
+	return (unsigned long *)m->private + *pos;
 }
 
 static void *vmstat_next(struct seq_file *m, void *arg, loff_t *pos)
diff --git a/net/caif/caif_config_util.c b/net/caif/caif_config_util.c
index 76ae683..d522d8c 100644
--- a/net/caif/caif_config_util.c
+++ b/net/caif/caif_config_util.c
@@ -16,11 +16,18 @@
 {
 	struct dev_info *dev_info;
 	enum cfcnfg_phy_preference pref;
-	memset(l, 0, sizeof(*l));
-	l->priority = s->priority;
+	int res;
 
-	if (s->link_name[0] != '\0')
-		l->phyid = cfcnfg_get_named(cnfg, s->link_name);
+	memset(l, 0, sizeof(*l));
+	/* In caif protocol low value is high priority */
+	l->priority = CAIF_PRIO_MAX - s->priority + 1;
+
+	if (s->ifindex != 0){
+		res = cfcnfg_get_id_from_ifi(cnfg, s->ifindex);
+		if (res < 0)
+			return res;
+		l->phyid = res;
+	}
 	else {
 		switch (s->link_selector) {
 		case CAIF_LINK_HIGH_BANDW:
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index b99369a..a42a408 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -307,6 +307,8 @@
 
 	case NETDEV_UNREGISTER:
 		caifd = caif_get(dev);
+		if (caifd == NULL)
+			break;
 		netdev_info(dev, "unregister\n");
 		atomic_set(&caifd->state, what);
 		caif_device_destroy(dev);
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 2eca2dd..1bf0cf5 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -716,8 +716,7 @@
 {
 	struct sock *sk = sock->sk;
 	struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
-	int prio, linksel;
-	struct ifreq ifreq;
+	int linksel;
 
 	if (cf_sk->sk.sk_socket->state != SS_UNCONNECTED)
 		return -ENOPROTOOPT;
@@ -735,33 +734,6 @@
 		release_sock(&cf_sk->sk);
 		return 0;
 
-	case SO_PRIORITY:
-		if (lvl != SOL_SOCKET)
-			goto bad_sol;
-		if (ol < sizeof(int))
-			return -EINVAL;
-		if (copy_from_user(&prio, ov, sizeof(int)))
-			return -EINVAL;
-		lock_sock(&(cf_sk->sk));
-		cf_sk->conn_req.priority = prio;
-		release_sock(&cf_sk->sk);
-		return 0;
-
-	case SO_BINDTODEVICE:
-		if (lvl != SOL_SOCKET)
-			goto bad_sol;
-		if (ol < sizeof(struct ifreq))
-			return -EINVAL;
-		if (copy_from_user(&ifreq, ov, sizeof(ifreq)))
-			return -EFAULT;
-		lock_sock(&(cf_sk->sk));
-		strncpy(cf_sk->conn_req.link_name, ifreq.ifr_name,
-			sizeof(cf_sk->conn_req.link_name));
-		cf_sk->conn_req.link_name
-			[sizeof(cf_sk->conn_req.link_name)-1] = 0;
-		release_sock(&cf_sk->sk);
-		return 0;
-
 	case CAIFSO_REQ_PARAM:
 		if (lvl != SOL_CAIF)
 			goto bad_sol;
@@ -880,6 +852,18 @@
 	sock->state = SS_CONNECTING;
 	sk->sk_state = CAIF_CONNECTING;
 
+	/* Check priority value comming from socket */
+	/* if priority value is out of range it will be ajusted */
+	if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX)
+		cf_sk->conn_req.priority = CAIF_PRIO_MAX;
+	else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN)
+		cf_sk->conn_req.priority = CAIF_PRIO_MIN;
+	else
+		cf_sk->conn_req.priority = cf_sk->sk.sk_priority;
+
+	/*ifindex = id of the interface.*/
+	cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if;
+
 	dbfs_atomic_inc(&cnt.num_connect_req);
 	cf_sk->layer.receive = caif_sktrecv_cb;
 	err = caif_connect_client(&cf_sk->conn_req,
@@ -905,6 +889,7 @@
 	cf_sk->maxframe = mtu - (headroom + tailroom);
 	if (cf_sk->maxframe < 1) {
 		pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu);
+		err = -ENODEV;
 		goto out;
 	}
 
@@ -1142,7 +1127,7 @@
 	set_rx_flow_on(cf_sk);
 
 	/* Set default options on configuration */
-	cf_sk->conn_req.priority = CAIF_PRIO_NORMAL;
+	cf_sk->sk.sk_priority= CAIF_PRIO_NORMAL;
 	cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY;
 	cf_sk->conn_req.protocol = protocol;
 	/* Increase the number of sockets created. */
diff --git a/net/caif/cfcnfg.c b/net/caif/cfcnfg.c
index 41adafd1..21ede14 100644
--- a/net/caif/cfcnfg.c
+++ b/net/caif/cfcnfg.c
@@ -173,18 +173,15 @@
 	return NULL;
 }
 
-int cfcnfg_get_named(struct cfcnfg *cnfg, char *name)
+
+int cfcnfg_get_id_from_ifi(struct cfcnfg *cnfg, int ifi)
 {
 	int i;
-
-	/* Try to match with specified name */
-	for (i = 0; i < MAX_PHY_LAYERS; i++) {
-		if (cnfg->phy_layers[i].frm_layer != NULL
-		    && strcmp(cnfg->phy_layers[i].phy_layer->name,
-			      name) == 0)
-			return cnfg->phy_layers[i].frm_layer->id;
-	}
-	return 0;
+	for (i = 0; i < MAX_PHY_LAYERS; i++)
+		if (cnfg->phy_layers[i].frm_layer != NULL &&
+				cnfg->phy_layers[i].ifindex == ifi)
+			return i;
+	return -ENODEV;
 }
 
 int cfcnfg_disconn_adapt_layer(struct cfcnfg *cnfg, struct cflayer *adap_layer)
diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
index 08f267a..3cd8f97 100644
--- a/net/caif/cfctrl.c
+++ b/net/caif/cfctrl.c
@@ -361,11 +361,10 @@
 	struct cfctrl_request_info *p, *tmp;
 	struct cfctrl *ctrl = container_obj(layr);
 	spin_lock(&ctrl->info_list_lock);
-	pr_warn("enter\n");
 
 	list_for_each_entry_safe(p, tmp, &ctrl->list, list) {
 		if (p->client_layer == adap_layer) {
-			pr_warn("cancel req :%d\n", p->sequence_no);
+			pr_debug("cancel req :%d\n", p->sequence_no);
 			list_del(&p->list);
 			kfree(p);
 		}
diff --git a/net/caif/cfdbgl.c b/net/caif/cfdbgl.c
index 496fda9..11a2af4 100644
--- a/net/caif/cfdbgl.c
+++ b/net/caif/cfdbgl.c
@@ -12,6 +12,8 @@
 #include <net/caif/cfsrvl.h>
 #include <net/caif/cfpkt.h>
 
+#define container_obj(layr) ((struct cfsrvl *) layr)
+
 static int cfdbgl_receive(struct cflayer *layr, struct cfpkt *pkt);
 static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt);
 
@@ -38,5 +40,17 @@
 
 static int cfdbgl_transmit(struct cflayer *layr, struct cfpkt *pkt)
 {
+	struct cfsrvl *service = container_obj(layr);
+	struct caif_payload_info *info;
+	int ret;
+
+	if (!cfsrvl_ready(service, &ret))
+		return ret;
+
+	/* Add info for MUX-layer to route the packet out */
+	info = cfpkt_info(pkt);
+	info->channel_id = service->layer.id;
+	info->dev_info = &service->dev_info;
+
 	return layr->dn->transmit(layr->dn, pkt);
 }
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index bde8481..e2fb5fa 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -193,7 +193,7 @@
 
 static int cfrfml_transmit_segment(struct cfrfml *rfml, struct cfpkt *pkt)
 {
-	caif_assert(cfpkt_getlen(pkt) >= rfml->fragment_size);
+	caif_assert(cfpkt_getlen(pkt) < rfml->fragment_size);
 
 	/* Add info for MUX-layer to route the packet out. */
 	cfpkt_info(pkt)->channel_id = rfml->serv.layer.id;
diff --git a/net/core/dev.c b/net/core/dev.c
index 35dfb83..0dd54a6 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2131,7 +2131,7 @@
 	} else {
 		struct sock *sk = skb->sk;
 		queue_index = sk_tx_queue_get(sk);
-		if (queue_index < 0) {
+		if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) {
 
 			queue_index = 0;
 			if (dev->real_num_tx_queues > 1)
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index a29edf2..c079cc0 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -47,11 +47,8 @@
 static inline void fib_result_assign(struct fib_result *res,
 				     struct fib_info *fi)
 {
-	if (res->fi != NULL)
-		fib_info_put(res->fi);
+	/* we used to play games with refcounts, but we now use RCU */
 	res->fi = fi;
-	if (fi != NULL)
-		atomic_inc(&fi->fib_clntref);
 }
 
 #endif /* _FIB_LOOKUP_H */
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index ba80426..2ada171 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -490,9 +490,11 @@
 {
 	struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
 
-	if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
+	if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
 		struct inet_diag_entry entry;
-		struct rtattr *bc = (struct rtattr *)(r + 1);
+		const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
+							  sizeof(*r),
+							  INET_DIAG_REQ_BYTECODE);
 		struct inet_sock *inet = inet_sk(sk);
 
 		entry.family = sk->sk_family;
@@ -512,7 +514,7 @@
 		entry.dport = ntohs(inet->inet_dport);
 		entry.userlocks = sk->sk_userlocks;
 
-		if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
+		if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
 			return 0;
 	}
 
@@ -527,9 +529,11 @@
 {
 	struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
 
-	if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
+	if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
 		struct inet_diag_entry entry;
-		struct rtattr *bc = (struct rtattr *)(r + 1);
+		const struct nlattr *bc = nlmsg_find_attr(cb->nlh,
+							  sizeof(*r),
+							  INET_DIAG_REQ_BYTECODE);
 
 		entry.family = tw->tw_family;
 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
@@ -548,7 +552,7 @@
 		entry.dport = ntohs(tw->tw_dport);
 		entry.userlocks = 0;
 
-		if (!inet_diag_bc_run(RTA_DATA(bc), RTA_PAYLOAD(bc), &entry))
+		if (!inet_diag_bc_run(nla_data(bc), nla_len(bc), &entry))
 			return 0;
 	}
 
@@ -618,7 +622,7 @@
 	struct inet_diag_req *r = NLMSG_DATA(cb->nlh);
 	struct inet_connection_sock *icsk = inet_csk(sk);
 	struct listen_sock *lopt;
-	struct rtattr *bc = NULL;
+	const struct nlattr *bc = NULL;
 	struct inet_sock *inet = inet_sk(sk);
 	int j, s_j;
 	int reqnum, s_reqnum;
@@ -638,8 +642,9 @@
 	if (!lopt || !lopt->qlen)
 		goto out;
 
-	if (cb->nlh->nlmsg_len > 4 + NLMSG_SPACE(sizeof(*r))) {
-		bc = (struct rtattr *)(r + 1);
+	if (nlmsg_attrlen(cb->nlh, sizeof(*r))) {
+		bc = nlmsg_find_attr(cb->nlh, sizeof(*r),
+				     INET_DIAG_REQ_BYTECODE);
 		entry.sport = inet->inet_num;
 		entry.userlocks = sk->sk_userlocks;
 	}
@@ -672,8 +677,8 @@
 					&ireq->rmt_addr;
 				entry.dport = ntohs(ireq->rmt_port);
 
-				if (!inet_diag_bc_run(RTA_DATA(bc),
-						    RTA_PAYLOAD(bc), &entry))
+				if (!inet_diag_bc_run(nla_data(bc),
+						      nla_len(bc), &entry))
 					continue;
 			}
 
diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
index 3cad259..3fac340 100644
--- a/net/ipv4/netfilter/arp_tables.c
+++ b/net/ipv4/netfilter/arp_tables.c
@@ -927,6 +927,7 @@
 			private = &tmp;
 		}
 #endif
+		memset(&info, 0, sizeof(info));
 		info.valid_hooks = t->valid_hooks;
 		memcpy(info.hook_entry, private->hook_entry,
 		       sizeof(info.hook_entry));
diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
index d31b007..a846d63 100644
--- a/net/ipv4/netfilter/ip_tables.c
+++ b/net/ipv4/netfilter/ip_tables.c
@@ -1124,6 +1124,7 @@
 			private = &tmp;
 		}
 #endif
+		memset(&info, 0, sizeof(info));
 		info.valid_hooks = t->valid_hooks;
 		memcpy(info.hook_entry, private->hook_entry,
 		       sizeof(info.hook_entry));
diff --git a/net/ipv4/netfilter/nf_nat_core.c b/net/ipv4/netfilter/nf_nat_core.c
index 295c974..c04787c 100644
--- a/net/ipv4/netfilter/nf_nat_core.c
+++ b/net/ipv4/netfilter/nf_nat_core.c
@@ -47,26 +47,6 @@
 	return rcu_dereference(nf_nat_protos[protonum]);
 }
 
-static const struct nf_nat_protocol *
-nf_nat_proto_find_get(u_int8_t protonum)
-{
-	const struct nf_nat_protocol *p;
-
-	rcu_read_lock();
-	p = __nf_nat_proto_find(protonum);
-	if (!try_module_get(p->me))
-		p = &nf_nat_unknown_protocol;
-	rcu_read_unlock();
-
-	return p;
-}
-
-static void
-nf_nat_proto_put(const struct nf_nat_protocol *p)
-{
-	module_put(p->me);
-}
-
 /* We keep an extra hash for each conntrack, for fast searching. */
 static inline unsigned int
 hash_by_src(const struct net *net, u16 zone,
@@ -588,6 +568,26 @@
 #include <linux/netfilter/nfnetlink.h>
 #include <linux/netfilter/nfnetlink_conntrack.h>
 
+static const struct nf_nat_protocol *
+nf_nat_proto_find_get(u_int8_t protonum)
+{
+	const struct nf_nat_protocol *p;
+
+	rcu_read_lock();
+	p = __nf_nat_proto_find(protonum);
+	if (!try_module_get(p->me))
+		p = &nf_nat_unknown_protocol;
+	rcu_read_unlock();
+
+	return p;
+}
+
+static void
+nf_nat_proto_put(const struct nf_nat_protocol *p)
+{
+	module_put(p->me);
+}
+
 static const struct nla_policy protonat_nla_policy[CTA_PROTONAT_MAX+1] = {
 	[CTA_PROTONAT_PORT_MIN]	= { .type = NLA_U16 },
 	[CTA_PROTONAT_PORT_MAX]	= { .type = NLA_U16 },
diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
index 51df035..4555823 100644
--- a/net/ipv6/netfilter/ip6_tables.c
+++ b/net/ipv6/netfilter/ip6_tables.c
@@ -1137,6 +1137,7 @@
 			private = &tmp;
 		}
 #endif
+		memset(&info, 0, sizeof(info));
 		info.valid_hooks = t->valid_hooks;
 		memcpy(info.hook_entry, private->hook_entry,
 		       sizeof(info.hook_entry));
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 25661f9..fc32833 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -2741,6 +2741,7 @@
 	kfree(net->ipv6.ip6_prohibit_entry);
 	kfree(net->ipv6.ip6_blk_hole_entry);
 #endif
+	dst_entries_destroy(&net->ipv6.ip6_dst_ops);
 }
 
 static struct pernet_operations ip6_route_net_ops = {
@@ -2832,5 +2833,6 @@
 	xfrm6_fini();
 	fib6_gc_cleanup();
 	unregister_pernet_subsys(&ip6_route_net_ops);
+	dst_entries_destroy(&ip6_dst_blackhole_ops);
 	kmem_cache_destroy(ip6_dst_ops_template.kmem_cachep);
 }
diff --git a/net/l2tp/l2tp_debugfs.c b/net/l2tp/l2tp_debugfs.c
index 104ec3b..b8dbae8 100644
--- a/net/l2tp/l2tp_debugfs.c
+++ b/net/l2tp/l2tp_debugfs.c
@@ -249,7 +249,7 @@
 	struct seq_file *seq;
 	int rc = -ENOMEM;
 
-	pd = kzalloc(GFP_KERNEL, sizeof(*pd));
+	pd = kzalloc(sizeof(*pd), GFP_KERNEL);
 	if (pd == NULL)
 		goto out;
 
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 1eacf8d..27a5ea6 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1312,7 +1312,8 @@
 	if (!hash) {
 		*vmalloced = 1;
 		printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
-		hash = __vmalloc(sz, GFP_KERNEL | __GFP_ZERO, PAGE_KERNEL);
+		hash = __vmalloc(sz, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
+				 PAGE_KERNEL);
 	}
 
 	if (hash && nulls)
diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
index ed6d929..dc7bb74 100644
--- a/net/netfilter/nf_conntrack_proto.c
+++ b/net/netfilter/nf_conntrack_proto.c
@@ -292,6 +292,12 @@
 
 		for (i = 0; i < MAX_NF_CT_PROTO; i++)
 			proto_array[i] = &nf_conntrack_l4proto_generic;
+
+		/* Before making proto_array visible to lockless readers,
+		 * we must make sure its content is committed to memory.
+		 */
+		smp_wmb();
+
 		nf_ct_protos[l4proto->l3proto] = proto_array;
 	} else if (nf_ct_protos[l4proto->l3proto][l4proto->l4proto] !=
 					&nf_conntrack_l4proto_generic) {
diff --git a/net/rds/loop.c b/net/rds/loop.c
index c390156..aeec1d4 100644
--- a/net/rds/loop.c
+++ b/net/rds/loop.c
@@ -134,8 +134,12 @@
 static void rds_loop_conn_free(void *arg)
 {
 	struct rds_loop_connection *lc = arg;
+	unsigned long flags;
+
 	rdsdebug("lc %p\n", lc);
+	spin_lock_irqsave(&loop_conns_lock, flags);
 	list_del(&lc->loop_node);
+	spin_unlock_irqrestore(&loop_conns_lock, flags);
 	kfree(lc);
 }
 
diff --git a/net/rds/tcp.c b/net/rds/tcp.c
index 08a8c6c..8e0a320 100644
--- a/net/rds/tcp.c
+++ b/net/rds/tcp.c
@@ -221,7 +221,13 @@
 static void rds_tcp_conn_free(void *arg)
 {
 	struct rds_tcp_connection *tc = arg;
+	unsigned long flags;
 	rdsdebug("freeing tc %p\n", tc);
+
+	spin_lock_irqsave(&rds_tcp_conn_lock, flags);
+	list_del(&tc->t_tcp_node);
+	spin_unlock_irqrestore(&rds_tcp_conn_lock, flags);
+
 	kmem_cache_free(rds_tcp_conn_slab, tc);
 }
 
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index 37dff78..d49c40f 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -34,8 +34,6 @@
 	.populate	= cgrp_populate,
 #ifdef CONFIG_NET_CLS_CGROUP
 	.subsys_id	= net_cls_subsys_id,
-#else
-#define net_cls_subsys_id net_cls_subsys.subsys_id
 #endif
 	.module		= THIS_MODULE,
 };
diff --git a/net/sched/em_text.c b/net/sched/em_text.c
index 7632532..ea8f566 100644
--- a/net/sched/em_text.c
+++ b/net/sched/em_text.c
@@ -103,7 +103,8 @@
 
 static void em_text_destroy(struct tcf_proto *tp, struct tcf_ematch *m)
 {
-	textsearch_destroy(EM_TEXT_PRIV(m)->config);
+	if (EM_TEXT_PRIV(m) && EM_TEXT_PRIV(m)->config)
+		textsearch_destroy(EM_TEXT_PRIV(m)->config);
 }
 
 static int em_text_dump(struct sk_buff *skb, struct tcf_ematch *m)
diff --git a/net/x25/x25_facilities.c b/net/x25/x25_facilities.c
index 771bab0..3a8c4c4 100644
--- a/net/x25/x25_facilities.c
+++ b/net/x25/x25_facilities.c
@@ -134,15 +134,15 @@
 		case X25_FAC_CLASS_D:
 			switch (*p) {
 			case X25_FAC_CALLING_AE:
-				if (p[1] > X25_MAX_DTE_FACIL_LEN)
-					break;
+				if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
+					return 0;
 				dte_facs->calling_len = p[2];
 				memcpy(dte_facs->calling_ae, &p[3], p[1] - 1);
 				*vc_fac_mask |= X25_MASK_CALLING_AE;
 				break;
 			case X25_FAC_CALLED_AE:
-				if (p[1] > X25_MAX_DTE_FACIL_LEN)
-					break;
+				if (p[1] > X25_MAX_DTE_FACIL_LEN || p[1] <= 1)
+					return 0;
 				dte_facs->called_len = p[2];
 				memcpy(dte_facs->called_ae, &p[3], p[1] - 1);
 				*vc_fac_mask |= X25_MASK_CALLED_AE;
diff --git a/net/x25/x25_in.c b/net/x25/x25_in.c
index 6317896..f729f02 100644
--- a/net/x25/x25_in.c
+++ b/net/x25/x25_in.c
@@ -119,6 +119,8 @@
 						&x25->vc_facil_mask);
 			if (len > 0)
 				skb_pull(skb, len);
+			else
+				return -1;
 			/*
 			 *	Copy any Call User Data.
 			 */
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 90b54d4..e3c7fc0 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2794,12 +2794,8 @@
 			WARN("__func__ should be used instead of gcc specific __FUNCTION__\n"  . $herecurr);
 		}
 
-# check for semaphores used as mutexes
-		if ($line =~ /^.\s*(DECLARE_MUTEX|init_MUTEX)\s*\(/) {
-			WARN("mutexes are preferred for single holder semaphores\n" . $herecurr);
-		}
-# check for semaphores used as mutexes
-		if ($line =~ /^.\s*init_MUTEX_LOCKED\s*\(/) {
+# check for semaphores initialized locked
+		if ($line =~ /^.\s*sema_init.+,\W?0\W?\)/) {
 			WARN("consider using a completion\n" . $herecurr);
 
 		}
diff --git a/scripts/kconfig/symbol.c b/scripts/kconfig/symbol.c
index c0efe10..af6e9f3 100644
--- a/scripts/kconfig/symbol.c
+++ b/scripts/kconfig/symbol.c
@@ -875,7 +875,7 @@
 			symval = sym_get_string_value(sym);
 		}
 
-		newlen = strlen(res) + strlen(symval) + strlen(src);
+		newlen = strlen(res) + strlen(symval) + strlen(src) + 1;
 		if (newlen > reslen) {
 			reslen = newlen;
 			res = realloc(res, reslen);
diff --git a/sound/pci/asihpi/hpi6000.c b/sound/pci/asihpi/hpi6000.c
index f7e374e..1b9bf93 100644
--- a/sound/pci/asihpi/hpi6000.c
+++ b/sound/pci/asihpi/hpi6000.c
@@ -625,6 +625,8 @@
 			control_cache_size, (struct hpi_control_cache_info *)
 			&phw->control_cache[0]
 			);
+		if (!phw->p_cache)
+			pao->has_control_cache = 0;
 	} else
 		pao->has_control_cache = 0;
 
diff --git a/sound/pci/asihpi/hpi6205.c b/sound/pci/asihpi/hpi6205.c
index 22c5fc6..2672f65 100644
--- a/sound/pci/asihpi/hpi6205.c
+++ b/sound/pci/asihpi/hpi6205.c
@@ -644,6 +644,8 @@
 				interface->control_cache.size_in_bytes,
 				(struct hpi_control_cache_info *)
 				p_control_cache_virtual);
+			if (!phw->p_cache)
+				err = HPI_ERROR_MEMORY_ALLOC;
 		}
 		if (!err) {
 			err = hpios_locked_mem_get_phys_addr(&phw->
diff --git a/sound/pci/asihpi/hpicmn.c b/sound/pci/asihpi/hpicmn.c
index dda4f1c..d67f4d3 100644
--- a/sound/pci/asihpi/hpicmn.c
+++ b/sound/pci/asihpi/hpicmn.c
@@ -571,14 +571,20 @@
 {
 	struct hpi_control_cache *p_cache =
 		kmalloc(sizeof(*p_cache), GFP_KERNEL);
+	if (!p_cache)
+		return NULL;
+	p_cache->p_info =
+		kmalloc(sizeof(*p_cache->p_info) * number_of_controls,
+			GFP_KERNEL);
+	if (!p_cache->p_info) {
+		kfree(p_cache);
+		return NULL;
+	}
 	p_cache->cache_size_in_bytes = size_in_bytes;
 	p_cache->control_count = number_of_controls;
 	p_cache->p_cache =
 		(struct hpi_control_cache_single *)pDSP_control_buffer;
 	p_cache->init = 0;
-	p_cache->p_info =
-		kmalloc(sizeof(*p_cache->p_info) * p_cache->control_count,
-		GFP_KERNEL);
 	return p_cache;
 }
 
diff --git a/sound/pci/cs46xx/dsp_spos.c b/sound/pci/cs46xx/dsp_spos.c
index 3e5ca8f..e377287 100644
--- a/sound/pci/cs46xx/dsp_spos.c
+++ b/sound/pci/cs46xx/dsp_spos.c
@@ -225,39 +225,25 @@
 {
 	struct dsp_spos_instance * ins = kzalloc(sizeof(struct dsp_spos_instance), GFP_KERNEL);
 
-	if (ins == NULL) 
+	if (ins == NULL)
 		return NULL;
 
 	/* better to use vmalloc for this big table */
-	ins->symbol_table.nsymbols = 0;
 	ins->symbol_table.symbols = vmalloc(sizeof(struct dsp_symbol_entry) *
 					    DSP_MAX_SYMBOLS);
-	ins->symbol_table.highest_frag_index = 0;
-
-	if (ins->symbol_table.symbols == NULL) {
+	ins->code.data = kmalloc(DSP_CODE_BYTE_SIZE, GFP_KERNEL);
+	ins->modules = kmalloc(sizeof(struct dsp_module_desc) * DSP_MAX_MODULES, GFP_KERNEL);
+	if (!ins->symbol_table.symbols || !ins->code.data || !ins->modules) {
 		cs46xx_dsp_spos_destroy(chip);
 		goto error;
 	}
-
+	ins->symbol_table.nsymbols = 0;
+	ins->symbol_table.highest_frag_index = 0;
 	ins->code.offset = 0;
 	ins->code.size = 0;
-	ins->code.data = kmalloc(DSP_CODE_BYTE_SIZE, GFP_KERNEL);
-
-	if (ins->code.data == NULL) {
-		cs46xx_dsp_spos_destroy(chip);
-		goto error;
-	}
-
 	ins->nscb = 0;
 	ins->ntask = 0;
-
 	ins->nmodules = 0;
-	ins->modules = kmalloc(sizeof(struct dsp_module_desc) * DSP_MAX_MODULES, GFP_KERNEL);
-
-	if (ins->modules == NULL) {
-		cs46xx_dsp_spos_destroy(chip);
-		goto error;
-	}
 
 	/* default SPDIF input sample rate
 	   to 48000 khz */
@@ -271,8 +257,8 @@
 
 	/* set left and right validity bits and
 	   default channel status */
-	ins->spdif_csuv_default = 
-		ins->spdif_csuv_stream =  
+	ins->spdif_csuv_default =
+		ins->spdif_csuv_stream =
 	 /* byte 0 */  ((unsigned int)_wrap_all_bits(  (SNDRV_PCM_DEFAULT_CON_SPDIF        & 0xff)) << 24) |
 	 /* byte 1 */  ((unsigned int)_wrap_all_bits( ((SNDRV_PCM_DEFAULT_CON_SPDIF >> 8) & 0xff)) << 16) |
 	 /* byte 3 */   (unsigned int)_wrap_all_bits(  (SNDRV_PCM_DEFAULT_CON_SPDIF >> 24) & 0xff) |
@@ -281,6 +267,9 @@
 	return ins;
 
 error:
+	kfree(ins->modules);
+	kfree(ins->code.data);
+	vfree(ins->symbol_table.symbols);
 	kfree(ins);
 	return NULL;
 }
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 460fb2e..18af38e 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -1166,6 +1166,7 @@
 
 static struct snd_pci_quirk cs420x_cfg_tbl[] = {
 	SND_PCI_QUIRK(0x10de, 0x0ac0, "MacBookPro 5,3", CS420X_MBP53),
+	SND_PCI_QUIRK(0x10de, 0x0d94, "MacBookAir 3,1(2)", CS420X_MBP55),
 	SND_PCI_QUIRK(0x10de, 0xcb79, "MacBookPro 5,5", CS420X_MBP55),
 	SND_PCI_QUIRK(0x10de, 0xcb89, "MacBookPro 7,1", CS420X_MBP55),
 	SND_PCI_QUIRK(0x8086, 0x7270, "IMac 27 Inch", CS420X_IMAC27),
diff --git a/sound/pci/lx6464es/lx6464es.c b/sound/pci/lx6464es/lx6464es.c
index ef9af3f..1bd7a54 100644
--- a/sound/pci/lx6464es/lx6464es.c
+++ b/sound/pci/lx6464es/lx6464es.c
@@ -425,7 +425,7 @@
 static void lx_trigger_start(struct lx6464es *chip, struct lx_stream *lx_stream)
 {
 	struct snd_pcm_substream *substream = lx_stream->stream;
-	const int is_capture = lx_stream->is_capture;
+	const unsigned int is_capture = lx_stream->is_capture;
 
 	int err;
 
@@ -473,7 +473,7 @@
 
 static void lx_trigger_stop(struct lx6464es *chip, struct lx_stream *lx_stream)
 {
-	const int is_capture = lx_stream->is_capture;
+	const unsigned int is_capture = lx_stream->is_capture;
 	int err;
 
 	snd_printd(LXP "stopping: stopping stream\n");
diff --git a/sound/pci/lx6464es/lx6464es.h b/sound/pci/lx6464es/lx6464es.h
index 51afc04..aea621e 100644
--- a/sound/pci/lx6464es/lx6464es.h
+++ b/sound/pci/lx6464es/lx6464es.h
@@ -60,7 +60,7 @@
 	snd_pcm_uframes_t          frame_pos;
 	enum lx_stream_status      status; /* free, open, running, draining
 					    * pause */
-	int                        is_capture:1;
+	unsigned int               is_capture:1;
 };
 
 
diff --git a/sound/pci/lx6464es/lx_core.c b/sound/pci/lx6464es/lx_core.c
index 3086b75..617f98b 100644
--- a/sound/pci/lx6464es/lx_core.c
+++ b/sound/pci/lx6464es/lx_core.c
@@ -1152,7 +1152,7 @@
 					   struct lx_stream *lx_stream)
 {
 	struct snd_pcm_substream *substream = lx_stream->stream;
-	int is_capture = lx_stream->is_capture;
+	const unsigned int is_capture = lx_stream->is_capture;
 	int err;
 	unsigned long flags;
 
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 94a9d06..3b5690d 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -25,8 +25,9 @@
 	select SND_SOC_CQ0093VC if MFD_DAVINCI_VOICECODEC
 	select SND_SOC_CS42L51 if I2C
 	select SND_SOC_CS4270 if I2C
+	select SND_SOC_CX20442
 	select SND_SOC_DA7210 if I2C
-	select SND_SOC_JZ4740 if SOC_JZ4740
+	select SND_SOC_JZ4740_CODEC if SOC_JZ4740
 	select SND_SOC_MAX98088 if I2C
 	select SND_SOC_MAX9877 if I2C
 	select SND_SOC_PCM3008
diff --git a/sound/soc/codecs/tlv320dac33.c b/sound/soc/codecs/tlv320dac33.c
index d251ff5..c5ab8c8 100644
--- a/sound/soc/codecs/tlv320dac33.c
+++ b/sound/soc/codecs/tlv320dac33.c
@@ -58,7 +58,7 @@
 	(1000000000 / ((rate * 1000) / samples))
 
 #define US_TO_SAMPLES(rate, us) \
-	(rate / (1000000 / us))
+	(rate / (1000000 / (us < 1000000 ? us : 1000000)))
 
 #define UTHR_FROM_PERIOD_SIZE(samples, playrate, burstrate) \
 	((samples * 5000) / ((burstrate * 5000) / (burstrate - playrate)))
@@ -200,7 +200,7 @@
 		      u8 *value)
 {
 	struct tlv320dac33_priv *dac33 = snd_soc_codec_get_drvdata(codec);
-	int val;
+	int val, ret = 0;
 
 	*value = reg & 0xff;
 
@@ -210,6 +210,7 @@
 		if (val < 0) {
 			dev_err(codec->dev, "Read failed (%d)\n", val);
 			value[0] = dac33_read_reg_cache(codec, reg);
+			ret = val;
 		} else {
 			value[0] = val;
 			dac33_write_reg_cache(codec, reg, val);
@@ -218,7 +219,7 @@
 		value[0] = dac33_read_reg_cache(codec, reg);
 	}
 
-	return 0;
+	return ret;
 }
 
 static int dac33_write(struct snd_soc_codec *codec, unsigned int reg,
@@ -329,13 +330,18 @@
 		    dac33_read_reg_cache(codec, DAC33_LINER_TO_RLO_VOL));
 }
 
-static inline void dac33_read_id(struct snd_soc_codec *codec)
+static inline int dac33_read_id(struct snd_soc_codec *codec)
 {
+	int i, ret = 0;
 	u8 reg;
 
-	dac33_read(codec, DAC33_DEVICE_ID_MSB, &reg);
-	dac33_read(codec, DAC33_DEVICE_ID_LSB, &reg);
-	dac33_read(codec, DAC33_DEVICE_REV_ID, &reg);
+	for (i = 0; i < 3; i++) {
+		ret = dac33_read(codec, DAC33_DEVICE_ID_MSB + i, &reg);
+		if (ret < 0)
+			break;
+	}
+
+	return ret;
 }
 
 static inline void dac33_soft_power(struct snd_soc_codec *codec, int power)
@@ -1076,6 +1082,9 @@
 		/* Number of samples under i2c latency */
 		dac33->alarm_threshold = US_TO_SAMPLES(rate,
 						dac33->mode1_latency);
+		nsample_limit = DAC33_BUFFER_SIZE_SAMPLES -
+				dac33->alarm_threshold;
+
 		if (dac33->auto_fifo_config) {
 			if (period_size <= dac33->alarm_threshold)
 				/*
@@ -1086,6 +1095,8 @@
 				       ((dac33->alarm_threshold / period_size) +
 				       (dac33->alarm_threshold % period_size ?
 				       1 : 0));
+			else if (period_size > nsample_limit)
+				dac33->nsample = nsample_limit;
 			else
 				dac33->nsample = period_size;
 		} else {
@@ -1097,8 +1108,7 @@
 			 */
 			dac33->nsample_max = substream->runtime->buffer_size -
 						period_size;
-			nsample_limit = DAC33_BUFFER_SIZE_SAMPLES -
-					dac33->alarm_threshold;
+
 			if (dac33->nsample_max > nsample_limit)
 				dac33->nsample_max = nsample_limit;
 
@@ -1414,9 +1424,15 @@
 		dev_err(codec->dev, "Failed to power up codec: %d\n", ret);
 		goto err_power;
 	}
-	dac33_read_id(codec);
+	ret = dac33_read_id(codec);
 	dac33_hard_power(codec, 0);
 
+	if (ret < 0) {
+		dev_err(codec->dev, "Failed to read chip ID: %d\n", ret);
+		ret = -ENODEV;
+		goto err_power;
+	}
+
 	/* Check if the IRQ number is valid and request it */
 	if (dac33->irq >= 0) {
 		ret = request_irq(dac33->irq, dac33_interrupt_handler,
diff --git a/sound/soc/codecs/tpa6130a2.c b/sound/soc/codecs/tpa6130a2.c
index 329acc1..ee4fb20 100644
--- a/sound/soc/codecs/tpa6130a2.c
+++ b/sound/soc/codecs/tpa6130a2.c
@@ -119,13 +119,13 @@
 {
 	struct	tpa6130a2_data *data;
 	u8	val;
-	int	ret;
+	int	ret = 0;
 
 	BUG_ON(tpa6130a2_client == NULL);
 	data = i2c_get_clientdata(tpa6130a2_client);
 
 	mutex_lock(&data->mutex);
-	if (power) {
+	if (power && !data->power_state) {
 		/* Power on */
 		if (data->power_gpio >= 0)
 			gpio_set_value(data->power_gpio, 1);
@@ -153,7 +153,7 @@
 		val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
 		val &= ~TPA6130A2_SWS;
 		tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val);
-	} else {
+	} else if (!power && data->power_state) {
 		/* set SWS */
 		val = tpa6130a2_read(TPA6130A2_REG_CONTROL);
 		val |= TPA6130A2_SWS;
diff --git a/sound/soc/codecs/wm8900.c b/sound/soc/codecs/wm8900.c
index b4f1172..aca4b1e 100644
--- a/sound/soc/codecs/wm8900.c
+++ b/sound/soc/codecs/wm8900.c
@@ -186,7 +186,6 @@
 {
 	switch (reg) {
 	case WM8900_REG_ID:
-	case WM8900_REG_POWER1:
 		return 1;
 	default:
 		return 0;
@@ -1200,11 +1199,6 @@
 		return -ENODEV;
 	}
 
-	/* Read back from the chip */
-	reg = snd_soc_read(codec, WM8900_REG_POWER1);
-	reg = (reg >> 12) & 0xf;
-	dev_info(codec->dev, "WM8900 revision %d\n", reg);
-
 	wm8900_reset(codec);
 
 	/* Turn the chip on */
diff --git a/sound/soc/codecs/wm_hubs.c b/sound/soc/codecs/wm_hubs.c
index 2cb8153..19ca782 100644
--- a/sound/soc/codecs/wm_hubs.c
+++ b/sound/soc/codecs/wm_hubs.c
@@ -123,7 +123,7 @@
 			reg_r = reg & WM8993_DCS_DAC_WR_VAL_0_MASK;
 			break;
 		default:
-			WARN(1, "Unknown DCS readback method");
+			WARN(1, "Unknown DCS readback method\n");
 			break;
 		}
 
diff --git a/sound/soc/pxa/tosa.c b/sound/soc/pxa/tosa.c
index a3bfb2e..73d0edd 100644
--- a/sound/soc/pxa/tosa.c
+++ b/sound/soc/pxa/tosa.c
@@ -79,7 +79,7 @@
 static int tosa_startup(struct snd_pcm_substream *substream)
 {
 	struct snd_soc_pcm_runtime *rtd = substream->private_data;
-	struct snd_soc_codec *codec = rtd->card->codec;
+	struct snd_soc_codec *codec = rtd->codec;
 
 	/* check the jack status at stream startup */
 	tosa_ext_control(codec);
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 1c8f3f5..614a8b3 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -165,8 +165,11 @@
 {
 	struct snd_soc_pcm_runtime *rtd =
 			container_of(dev, struct snd_soc_pcm_runtime, dev);
+	int ret;
 
-	strict_strtol(buf, 10, &rtd->pmdown_time);
+	ret = strict_strtol(buf, 10, &rtd->pmdown_time);
+	if (ret)
+		return ret;
 
 	return count;
 }
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index 7dae05d..782f741 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -60,7 +60,7 @@
 	{ USB_ID(0x041e, 0x3000), 0, 1, 2, 1,  18, 0x0013 }, /* Extigy       */
 	{ USB_ID(0x041e, 0x3020), 2, 1, 6, 6,  18, 0x0013 }, /* Audigy 2 NX  */
 	{ USB_ID(0x041e, 0x3040), 2, 2, 6, 6,  2,  0x6e91 }, /* Live! 24-bit */
-	{ USB_ID(0x041e, 0x3042), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi */
+	{ USB_ID(0x041e, 0x3042), 0, 1, 1, 1,  1,  0x000d }, /* Usb X-Fi S51 */
 	{ USB_ID(0x041e, 0x3048), 2, 2, 6, 6,  2,  0x6e91 }, /* Toshiba SB0500 */
 };
 
@@ -183,7 +183,13 @@
 	if (value > 1)
 		return -EINVAL;
 	changed = value != mixer->audigy2nx_leds[index];
-	err = snd_usb_ctl_msg(mixer->chip->dev,
+	if (mixer->chip->usb_id == USB_ID(0x041e, 0x3042))
+		err = snd_usb_ctl_msg(mixer->chip->dev,
+			      usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
+			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
+			      !value, 0, NULL, 0, 100);
+	else
+		err = snd_usb_ctl_msg(mixer->chip->dev,
 			      usb_sndctrlpipe(mixer->chip->dev, 0), 0x24,
 			      USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER,
 			      value, index + 2, NULL, 0, 100);
@@ -225,8 +231,12 @@
 	int i, err;
 
 	for (i = 0; i < ARRAY_SIZE(snd_audigy2nx_controls); ++i) {
+		/* USB X-Fi S51 doesn't have a CMSS LED */
+		if ((mixer->chip->usb_id == USB_ID(0x041e, 0x3042)) && i == 0)
+			continue;
 		if (i > 1 && /* Live24ext has 2 LEDs only */
 			(mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
+			 mixer->chip->usb_id == USB_ID(0x041e, 0x3042) ||
 			 mixer->chip->usb_id == USB_ID(0x041e, 0x3048)))
 			break; 
 		err = snd_ctl_add(mixer->chip->card,
@@ -365,6 +375,7 @@
 
 	if (mixer->chip->usb_id == USB_ID(0x041e, 0x3020) ||
 	    mixer->chip->usb_id == USB_ID(0x041e, 0x3040) ||
+	    mixer->chip->usb_id == USB_ID(0x041e, 0x3042) ||
 	    mixer->chip->usb_id == USB_ID(0x041e, 0x3048)) {
 		if ((err = snd_audigy2nx_controls_create(mixer)) < 0)
 			return err;
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index cff3a3c..4132522 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -676,8 +676,10 @@
 	if (!needs_knot)
 		return 0;
 
-	subs->rate_list.count = count;
 	subs->rate_list.list = kmalloc(sizeof(int) * count, GFP_KERNEL);
+	if (!subs->rate_list.list)
+		return -ENOMEM;
+	subs->rate_list.count = count;
 	subs->rate_list.mask = 0;
 	count = 0;
 	list_for_each_entry(fp, &subs->fmt_list, list) {
diff --git a/usr/initramfs_data.S b/usr/initramfs_data.S
index b9efed5..792a750 100644
--- a/usr/initramfs_data.S
+++ b/usr/initramfs_data.S
@@ -30,8 +30,8 @@
 .section .init.ramfs.info,"a"
 .globl __initramfs_size
 __initramfs_size:
-#ifdef CONFIG_32BIT
-	.long __irf_end - __irf_start
-#else
+#ifdef CONFIG_64BIT
 	.quad __irf_end - __irf_start
+#else
+	.long __irf_end - __irf_start
 #endif