Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6

Conflicts:
	drivers/net/bonding/bond_main.c
	drivers/net/via-velocity.c
	drivers/net/wireless/iwlwifi/iwl-agn.c
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 2ff2b6ab..cbe6f39 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -998,6 +998,7 @@
 	}
 
 	if (acpi_video_backlight_support()) {
+		struct backlight_properties props;
 		int result;
 		static int count = 0;
 		char *name;
@@ -1010,12 +1011,14 @@
 			return;
 
 		sprintf(name, "acpi_video%d", count++);
-		device->backlight = backlight_device_register(name,
-			NULL, device, &acpi_backlight_ops);
+		memset(&props, 0, sizeof(struct backlight_properties));
+		props.max_brightness = device->brightness->count - 3;
+		device->backlight = backlight_device_register(name, NULL, device,
+							      &acpi_backlight_ops,
+							      &props);
 		kfree(name);
 		if (IS_ERR(device->backlight))
 			return;
-		device->backlight->props.max_brightness = device->brightness->count-3;
 
 		result = sysfs_create_link(&device->backlight->dev.kobj,
 					   &device->dev->dev.kobj, "device");
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 6bd930b..fdc9bcb 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -641,6 +641,21 @@
 	{ PCI_VDEVICE(NVIDIA, 0x055a), board_ahci_yesncq },	/* MCP67 */
 	{ PCI_VDEVICE(NVIDIA, 0x055b), board_ahci_yesncq },	/* MCP67 */
 	{ PCI_VDEVICE(NVIDIA, 0x0580), board_ahci_yesncq },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0581), board_ahci_yesncq },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0582), board_ahci_yesncq },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0583), board_ahci_yesncq },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0584), board_ahci_yesncq },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0585), board_ahci_yesncq },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0586), board_ahci_yesncq },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0587), board_ahci_yesncq },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0588), board_ahci_yesncq },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x0589), board_ahci_yesncq },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x058a), board_ahci_yesncq },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x058b), board_ahci_yesncq },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x058c), board_ahci_yesncq },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x058d), board_ahci_yesncq },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x058e), board_ahci_yesncq },	/* Linux ID */
+	{ PCI_VDEVICE(NVIDIA, 0x058f), board_ahci_yesncq },	/* Linux ID */
 	{ PCI_VDEVICE(NVIDIA, 0x07f0), board_ahci_yesncq },	/* MCP73 */
 	{ PCI_VDEVICE(NVIDIA, 0x07f1), board_ahci_yesncq },	/* MCP73 */
 	{ PCI_VDEVICE(NVIDIA, 0x07f2), board_ahci_yesncq },	/* MCP73 */
@@ -2263,7 +2278,7 @@
 	struct ahci_port_priv *pp = ap->private_data;
 	struct ahci_host_priv *hpriv = ap->host->private_data;
 	int resetting = !!(ap->pflags & ATA_PFLAG_RESETTING);
-	u32 status, qc_active;
+	u32 status, qc_active = 0;
 	int rc;
 
 	status = readl(port_mmio + PORT_IRQ_STAT);
@@ -2321,11 +2336,22 @@
 		}
 	}
 
-	/* pp->active_link is valid iff any command is in flight */
-	if (ap->qc_active && pp->active_link->sactive)
-		qc_active = readl(port_mmio + PORT_SCR_ACT);
-	else
-		qc_active = readl(port_mmio + PORT_CMD_ISSUE);
+	/* pp->active_link is not reliable once FBS is enabled, both
+	 * PORT_SCR_ACT and PORT_CMD_ISSUE should be checked because
+	 * NCQ and non-NCQ commands may be in flight at the same time.
+	 */
+	if (pp->fbs_enabled) {
+		if (ap->qc_active) {
+			qc_active = readl(port_mmio + PORT_SCR_ACT);
+			qc_active |= readl(port_mmio + PORT_CMD_ISSUE);
+		}
+	} else {
+		/* pp->active_link is valid iff any command is in flight */
+		if (ap->qc_active && pp->active_link->sactive)
+			qc_active = readl(port_mmio + PORT_SCR_ACT);
+		else
+			qc_active = readl(port_mmio + PORT_CMD_ISSUE);
+	}
 
 	rc = ata_qc_complete_multiple(ap, qc_active);
 
@@ -3022,6 +3048,14 @@
 		 * On HP dv[4-6] and HDX18 with earlier BIOSen, link
 		 * to the harddisk doesn't become online after
 		 * resuming from STR.  Warn and fail suspend.
+		 *
+		 * http://bugzilla.kernel.org/show_bug.cgi?id=12276
+		 *
+		 * Use dates instead of versions to match as HP is
+		 * apparently recycling both product and version
+		 * strings.
+		 *
+		 * http://bugzilla.kernel.org/show_bug.cgi?id=15462
 		 */
 		{
 			.ident = "dv4",
@@ -3030,7 +3064,7 @@
 				DMI_MATCH(DMI_PRODUCT_NAME,
 					  "HP Pavilion dv4 Notebook PC"),
 			},
-			.driver_data = "F.30", /* cutoff BIOS version */
+			.driver_data = "20090105",	/* F.30 */
 		},
 		{
 			.ident = "dv5",
@@ -3039,7 +3073,7 @@
 				DMI_MATCH(DMI_PRODUCT_NAME,
 					  "HP Pavilion dv5 Notebook PC"),
 			},
-			.driver_data = "F.16", /* cutoff BIOS version */
+			.driver_data = "20090506",	/* F.16 */
 		},
 		{
 			.ident = "dv6",
@@ -3048,7 +3082,7 @@
 				DMI_MATCH(DMI_PRODUCT_NAME,
 					  "HP Pavilion dv6 Notebook PC"),
 			},
-			.driver_data = "F.21",	/* cutoff BIOS version */
+			.driver_data = "20090423",	/* F.21 */
 		},
 		{
 			.ident = "HDX18",
@@ -3057,7 +3091,7 @@
 				DMI_MATCH(DMI_PRODUCT_NAME,
 					  "HP HDX18 Notebook PC"),
 			},
-			.driver_data = "F.23",	/* cutoff BIOS version */
+			.driver_data = "20090430",	/* F.23 */
 		},
 		/*
 		 * Acer eMachines G725 has the same problem.  BIOS
@@ -3065,6 +3099,8 @@
 		 * work.  Inbetween, there are V1.06, V2.06 and V3.03
 		 * that we don't have much idea about.  For now,
 		 * blacklist anything older than V3.04.
+		 *
+		 * http://bugzilla.kernel.org/show_bug.cgi?id=15104
 		 */
 		{
 			.ident = "G725",
@@ -3072,19 +3108,21 @@
 				DMI_MATCH(DMI_SYS_VENDOR, "eMachines"),
 				DMI_MATCH(DMI_PRODUCT_NAME, "eMachines G725"),
 			},
-			.driver_data = "V3.04",	/* cutoff BIOS version */
+			.driver_data = "20091216",	/* V3.04 */
 		},
 		{ }	/* terminate list */
 	};
 	const struct dmi_system_id *dmi = dmi_first_match(sysids);
-	const char *ver;
+	int year, month, date;
+	char buf[9];
 
 	if (!dmi || pdev->bus->number || pdev->devfn != PCI_DEVFN(0x1f, 2))
 		return false;
 
-	ver = dmi_get_system_info(DMI_BIOS_VERSION);
+	dmi_get_date(DMI_BIOS_DATE, &year, &month, &date);
+	snprintf(buf, sizeof(buf), "%04d%02d%02d", year, month, date);
 
-	return !ver || strcmp(ver, dmi->driver_data) < 0;
+	return strcmp(buf, dmi->driver_data) < 0;
 }
 
 static bool ahci_broken_online(struct pci_dev *pdev)
diff --git a/drivers/base/class.c b/drivers/base/class.c
index 0147f47..9c6a0d6 100644
--- a/drivers/base/class.c
+++ b/drivers/base/class.c
@@ -219,6 +219,8 @@
  * This is used to create a struct class pointer that can then be used
  * in calls to device_create().
  *
+ * Returns &struct class pointer on success, or ERR_PTR() on error.
+ *
  * Note, the pointer created here is to be destroyed when finished by
  * making a call to class_destroy().
  */
diff --git a/drivers/base/core.c b/drivers/base/core.c
index ef55df3..b56a0ba 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -1345,6 +1345,8 @@
  * 'module' symlink which points to the @owner directory
  * in sysfs.
  *
+ * Returns &struct device pointer on success, or ERR_PTR() on error.
+ *
  * Note: You probably want to use root_device_register().
  */
 struct device *__root_device_register(const char *name, struct module *owner)
@@ -1432,6 +1434,8 @@
  * Any further sysfs files that might be required can be created using this
  * pointer.
  *
+ * Returns &struct device pointer on success, or ERR_PTR() on error.
+ *
  * Note: the struct class passed to this function must have previously
  * been created with a call to class_create().
  */
@@ -1492,6 +1496,8 @@
  * Any further sysfs files that might be required can be created using this
  * pointer.
  *
+ * Returns &struct device pointer on success, or ERR_PTR() on error.
+ *
  * Note: the struct class passed to this function must have previously
  * been created with a call to class_create().
  */
diff --git a/drivers/base/cpu.c b/drivers/base/cpu.c
index 7036e8e..b5242e1 100644
--- a/drivers/base/cpu.c
+++ b/drivers/base/cpu.c
@@ -79,24 +79,24 @@
 }
 
 #ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
-static ssize_t cpu_probe_store(struct sys_device *dev,
-				struct sysdev_attribute *attr,
-				const char *buf,
+static ssize_t cpu_probe_store(struct sysdev_class *class,
+			       struct sysdev_class_attribute *attr,
+			       const char *buf,
 			       size_t count)
 {
 	return arch_cpu_probe(buf, count);
 }
 
-static ssize_t cpu_release_store(struct sys_device *dev,
-				struct sysdev_attribute *attr,
-				const char *buf,
+static ssize_t cpu_release_store(struct sysdev_class *class,
+				 struct sysdev_class_attribute *attr,
+				 const char *buf,
 				 size_t count)
 {
 	return arch_cpu_release(buf, count);
 }
 
-static SYSDEV_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
-static SYSDEV_ATTR(release, S_IWUSR, NULL, cpu_release_store);
+static SYSDEV_CLASS_ATTR(probe, S_IWUSR, NULL, cpu_probe_store);
+static SYSDEV_CLASS_ATTR(release, S_IWUSR, NULL, cpu_release_store);
 #endif /* CONFIG_ARCH_CPU_PROBE_RELEASE */
 
 #else /* ... !CONFIG_HOTPLUG_CPU */
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index d0dc26a..18518ba 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -78,6 +78,7 @@
 /**
  * firmware_timeout_store - set number of seconds to wait for firmware
  * @class: device class pointer
+ * @attr: device attribute pointer
  * @buf: buffer to scan for timeout value
  * @count: number of bytes in @buf
  *
@@ -442,6 +443,7 @@
 	fw_priv = dev_get_drvdata(f_dev);
 
 	fw_priv->fw = fw;
+	sysfs_bin_attr_init(&fw_priv->attr_data);
 	retval = sysfs_create_bin_file(&f_dev->kobj, &fw_priv->attr_data);
 	if (retval) {
 		dev_err(device, "%s: sysfs_create_bin_file failed\n", __func__);
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 2f86915..db0848e 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -429,12 +429,16 @@
  * differentiation between which *physical* devices each
  * section belongs to...
  */
+int __weak arch_get_memory_phys_device(unsigned long start_pfn)
+{
+	return 0;
+}
 
 static int add_memory_block(int nid, struct mem_section *section,
-			unsigned long state, int phys_device,
-			enum mem_add_context context)
+			unsigned long state, enum mem_add_context context)
 {
 	struct memory_block *mem = kzalloc(sizeof(*mem), GFP_KERNEL);
+	unsigned long start_pfn;
 	int ret = 0;
 
 	if (!mem)
@@ -443,7 +447,8 @@
 	mem->phys_index = __section_nr(section);
 	mem->state = state;
 	mutex_init(&mem->state_mutex);
-	mem->phys_device = phys_device;
+	start_pfn = section_nr_to_pfn(mem->phys_index);
+	mem->phys_device = arch_get_memory_phys_device(start_pfn);
 
 	ret = register_memory(mem, section);
 	if (!ret)
@@ -515,7 +520,7 @@
  */
 int register_new_memory(int nid, struct mem_section *section)
 {
-	return add_memory_block(nid, section, MEM_OFFLINE, 0, HOTPLUG);
+	return add_memory_block(nid, section, MEM_OFFLINE, HOTPLUG);
 }
 
 int unregister_memory_section(struct mem_section *section)
@@ -548,7 +553,7 @@
 		if (!present_section_nr(i))
 			continue;
 		err = add_memory_block(0, __nr_to_section(i), MEM_ONLINE,
-					0, BOOT);
+				       BOOT);
 		if (!ret)
 			ret = err;
 	}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index ad43185..93b3ac6 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -165,8 +165,11 @@
 	int len = 0;
 	int i;
 
-	/* buf currently PAGE_SIZE, need ~4 chars per node */
-	BUILD_BUG_ON(MAX_NUMNODES*4 > PAGE_SIZE/2);
+	/*
+	 * buf is currently PAGE_SIZE in length and each node needs 4 chars
+	 * at the most (distance + space or newline).
+	 */
+	BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE);
 
 	for_each_online_node(i)
 		len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i));
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 1ba9d617..4b4b565 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -362,6 +362,8 @@
  * enumeration tasks, they don't fully conform to the Linux driver model.
  * In particular, when such drivers are built as modules, they can't be
  * "hotplugged".
+ *
+ * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
  */
 struct platform_device *platform_device_register_simple(const char *name,
 							int id,
@@ -408,6 +410,8 @@
  * allocated for the device allows drivers using such devices to be
  * unloaded without waiting for the last reference to the device to be
  * dropped.
+ *
+ * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
  */
 struct platform_device *platform_device_register_data(
 		struct device *parent,
@@ -559,6 +563,8 @@
  *
  * Use this in legacy-style modules that probe hardware directly and
  * register a single platform device and corresponding platform driver.
+ *
+ * Returns &struct platform_device pointer on success, or ERR_PTR() on error.
  */
 struct platform_device * __init_or_module platform_create_bundle(
 			struct platform_driver *driver,
@@ -1052,9 +1058,11 @@
 static __initdata LIST_HEAD(early_platform_device_list);
 
 /**
- * early_platform_driver_register
+ * early_platform_driver_register - register early platform driver
  * @epdrv: early_platform driver structure
  * @buf: string passed from early_param()
+ *
+ * Helper function for early_platform_init() / early_platform_init_buffer()
  */
 int __init early_platform_driver_register(struct early_platform_driver *epdrv,
 					  char *buf)
@@ -1106,9 +1114,12 @@
 }
 
 /**
- * early_platform_add_devices - add a numbers of early platform devices
+ * early_platform_add_devices - adds a number of early platform devices
  * @devs: array of early platform devices to add
  * @num: number of early platform devices in array
+ *
+ * Used by early architecture code to register early platform devices and
+ * their platform data.
  */
 void __init early_platform_add_devices(struct platform_device **devs, int num)
 {
@@ -1128,8 +1139,12 @@
 }
 
 /**
- * early_platform_driver_register_all
+ * early_platform_driver_register_all - register early platform drivers
  * @class_str: string to identify early platform driver class
+ *
+ * Used by architecture code to register all early platform drivers
+ * for a certain class. If omitted then only early platform drivers
+ * with matching kernel command line class parameters will be registered.
  */
 void __init early_platform_driver_register_all(char *class_str)
 {
@@ -1151,7 +1166,7 @@
 }
 
 /**
- * early_platform_match
+ * early_platform_match - find early platform device matching driver
  * @epdrv: early platform driver structure
  * @id: id to match against
  */
@@ -1169,7 +1184,7 @@
 }
 
 /**
- * early_platform_left
+ * early_platform_left - check if early platform driver has matching devices
  * @epdrv: early platform driver structure
  * @id: return true if id or above exists
  */
@@ -1187,7 +1202,7 @@
 }
 
 /**
- * early_platform_driver_probe_id
+ * early_platform_driver_probe_id - probe drivers matching class_str and id
  * @class_str: string to identify early platform driver class
  * @id: id to match against
  * @nr_probe: number of platform devices to successfully probe before exiting
@@ -1257,10 +1272,14 @@
 }
 
 /**
- * early_platform_driver_probe
+ * early_platform_driver_probe - probe a class of registered drivers
  * @class_str: string to identify early platform driver class
  * @nr_probe: number of platform devices to successfully probe before exiting
  * @user_only: only probe user specified early platform devices
+ *
+ * Used by architecture code to probe registered early platform drivers
+ * within a certain class. For probe to happen a registered early platform
+ * device matching a registered early platform driver is needed.
  */
 int __init early_platform_driver_probe(char *class_str,
 				       int nr_probe,
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index e481c59..9c5eea3 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -215,9 +215,7 @@
 	else
 		v &= ~0xffff;
 
-	for (irq = find_first_bit(&v, HPET_MAX_IRQ); irq < HPET_MAX_IRQ;
-		irq = find_next_bit(&v, HPET_MAX_IRQ, 1 + irq)) {
-
+	for_each_set_bit(irq, &v, HPET_MAX_IRQ) {
 		if (irq >= nr_irqs) {
 			irq = HPET_MAX_IRQ;
 			break;
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 465185f..ba55bba 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -312,6 +312,7 @@
 	spin_lock_irqsave(&hp->lock, flags);
 	/* Check and then increment for fast path open. */
 	if (hp->count++ > 0) {
+		tty_kref_get(tty);
 		spin_unlock_irqrestore(&hp->lock, flags);
 		hvc_kick();
 		return 0;
@@ -319,7 +320,7 @@
 
 	tty->driver_data = hp;
 
-	hp->tty = tty;
+	hp->tty = tty_kref_get(tty);
 
 	spin_unlock_irqrestore(&hp->lock, flags);
 
@@ -336,6 +337,7 @@
 		spin_lock_irqsave(&hp->lock, flags);
 		hp->tty = NULL;
 		spin_unlock_irqrestore(&hp->lock, flags);
+		tty_kref_put(tty);
 		tty->driver_data = NULL;
 		kref_put(&hp->kref, destroy_hvc_struct);
 		printk(KERN_ERR "hvc_open: request_irq failed with rc %d.\n", rc);
@@ -363,13 +365,18 @@
 		return;
 
 	hp = tty->driver_data;
+
 	spin_lock_irqsave(&hp->lock, flags);
+	tty_kref_get(tty);
 
 	if (--hp->count == 0) {
 		/* We are done with the tty pointer now. */
 		hp->tty = NULL;
 		spin_unlock_irqrestore(&hp->lock, flags);
 
+		/* Put the ref obtained in hvc_open() */
+		tty_kref_put(tty);
+
 		if (hp->ops->notifier_del)
 			hp->ops->notifier_del(hp, hp->data);
 
@@ -389,6 +396,7 @@
 		spin_unlock_irqrestore(&hp->lock, flags);
 	}
 
+	tty_kref_put(tty);
 	kref_put(&hp->kref, destroy_hvc_struct);
 }
 
@@ -424,10 +432,11 @@
 	spin_unlock_irqrestore(&hp->lock, flags);
 
 	if (hp->ops->notifier_hangup)
-			hp->ops->notifier_hangup(hp, hp->data);
+		hp->ops->notifier_hangup(hp, hp->data);
 
 	while(temp_open_count) {
 		--temp_open_count;
+		tty_kref_put(tty);
 		kref_put(&hp->kref, destroy_hvc_struct);
 	}
 }
@@ -592,7 +601,7 @@
 	}
 
 	/* No tty attached, just skip */
-	tty = hp->tty;
+	tty = tty_kref_get(hp->tty);
 	if (tty == NULL)
 		goto bail;
 
@@ -672,6 +681,8 @@
 
 		tty_flip_buffer_push(tty);
 	}
+	if (tty)
+		tty_kref_put(tty);
 
 	return poll_mask;
 }
@@ -807,7 +818,7 @@
 	struct tty_struct *tty;
 
 	spin_lock_irqsave(&hp->lock, flags);
-	tty = hp->tty;
+	tty = tty_kref_get(hp->tty);
 
 	if (hp->index < MAX_NR_HVC_CONSOLES)
 		vtermnos[hp->index] = -1;
@@ -819,18 +830,18 @@
 	/*
 	 * We 'put' the instance that was grabbed when the kref instance
 	 * was initialized using kref_init().  Let the last holder of this
-	 * kref cause it to be removed, which will probably be the tty_hangup
+	 * kref cause it to be removed, which will probably be the tty_vhangup
 	 * below.
 	 */
 	kref_put(&hp->kref, destroy_hvc_struct);
 
 	/*
-	 * This function call will auto chain call hvc_hangup.  The tty should
-	 * always be valid at this time unless a simultaneous tty close already
-	 * cleaned up the hvc_struct.
+	 * This function call will auto chain call hvc_hangup.
 	 */
-	if (tty)
-		tty_hangup(tty);
+	if (tty) {
+		tty_vhangup(tty);
+		tty_kref_put(tty);
+	}
 	return 0;
 }
 EXPORT_SYMBOL_GPL(hvc_remove);
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index ec5e3f8..c6ad423 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -2272,42 +2272,52 @@
 	bmc->device_id_attr.attr.name = "device_id";
 	bmc->device_id_attr.attr.mode = S_IRUGO;
 	bmc->device_id_attr.show = device_id_show;
+	sysfs_attr_init(&bmc->device_id_attr.attr);
 
 	bmc->provides_dev_sdrs_attr.attr.name = "provides_device_sdrs";
 	bmc->provides_dev_sdrs_attr.attr.mode = S_IRUGO;
 	bmc->provides_dev_sdrs_attr.show = provides_dev_sdrs_show;
+	sysfs_attr_init(&bmc->provides_dev_sdrs_attr.attr);
 
 	bmc->revision_attr.attr.name = "revision";
 	bmc->revision_attr.attr.mode = S_IRUGO;
 	bmc->revision_attr.show = revision_show;
+	sysfs_attr_init(&bmc->revision_attr.attr);
 
 	bmc->firmware_rev_attr.attr.name = "firmware_revision";
 	bmc->firmware_rev_attr.attr.mode = S_IRUGO;
 	bmc->firmware_rev_attr.show = firmware_rev_show;
+	sysfs_attr_init(&bmc->firmware_rev_attr.attr);
 
 	bmc->version_attr.attr.name = "ipmi_version";
 	bmc->version_attr.attr.mode = S_IRUGO;
 	bmc->version_attr.show = ipmi_version_show;
+	sysfs_attr_init(&bmc->version_attr.attr);
 
 	bmc->add_dev_support_attr.attr.name = "additional_device_support";
 	bmc->add_dev_support_attr.attr.mode = S_IRUGO;
 	bmc->add_dev_support_attr.show = add_dev_support_show;
+	sysfs_attr_init(&bmc->add_dev_support_attr.attr);
 
 	bmc->manufacturer_id_attr.attr.name = "manufacturer_id";
 	bmc->manufacturer_id_attr.attr.mode = S_IRUGO;
 	bmc->manufacturer_id_attr.show = manufacturer_id_show;
+	sysfs_attr_init(&bmc->manufacturer_id_attr.attr);
 
 	bmc->product_id_attr.attr.name = "product_id";
 	bmc->product_id_attr.attr.mode = S_IRUGO;
 	bmc->product_id_attr.show = product_id_show;
+	sysfs_attr_init(&bmc->product_id_attr.attr);
 
 	bmc->guid_attr.attr.name = "guid";
 	bmc->guid_attr.attr.mode = S_IRUGO;
 	bmc->guid_attr.show = guid_show;
+	sysfs_attr_init(&bmc->guid_attr.attr);
 
 	bmc->aux_firmware_rev_attr.attr.name = "aux_firmware_revision";
 	bmc->aux_firmware_rev_attr.attr.mode = S_IRUGO;
 	bmc->aux_firmware_rev_attr.show = aux_firmware_rev_show;
+	sysfs_attr_init(&bmc->aux_firmware_rev_attr.attr);
 
 	err = device_create_file(&bmc->dev->dev,
 			   &bmc->device_id_attr);
diff --git a/drivers/char/tty_buffer.c b/drivers/char/tty_buffer.c
index af8d977..7ee5216 100644
--- a/drivers/char/tty_buffer.c
+++ b/drivers/char/tty_buffer.c
@@ -248,7 +248,7 @@
 {
 	int copied = 0;
 	do {
-		int goal = min(size - copied, TTY_BUFFER_PAGE);
+		int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
 		int space = tty_buffer_request_room(tty, goal);
 		struct tty_buffer *tb = tty->buf.tail;
 		/* If there is no space then tb may be NULL */
@@ -285,7 +285,7 @@
 {
 	int copied = 0;
 	do {
-		int goal = min(size - copied, TTY_BUFFER_PAGE);
+		int goal = min_t(size_t, size - copied, TTY_BUFFER_PAGE);
 		int space = tty_buffer_request_room(tty, goal);
 		struct tty_buffer *tb = tty->buf.tail;
 		/* If there is no space then tb may be NULL */
diff --git a/drivers/char/tty_port.c b/drivers/char/tty_port.c
index be492dd..a3bd1d0 100644
--- a/drivers/char/tty_port.c
+++ b/drivers/char/tty_port.c
@@ -119,7 +119,7 @@
 static void tty_port_shutdown(struct tty_port *port)
 {
 	mutex_lock(&port->mutex);
-	if (port->ops->shutdown &&
+	if (port->ops->shutdown && !port->console &&
 		test_and_clear_bit(ASYNCB_INITIALIZED, &port->flags))
 			port->ops->shutdown(port);
 	mutex_unlock(&port->mutex);
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index f404ccf..44288ce 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -681,6 +681,10 @@
 	struct virtio_device *vdev;
 	struct winsize ws;
 
+	/* The port could have been hot-unplugged */
+	if (!port)
+		return;
+
 	vdev = port->portdev->vdev;
 	if (virtio_has_feature(vdev, VIRTIO_CONSOLE_F_SIZE)) {
 		vdev->config->get(vdev,
@@ -947,11 +951,18 @@
 		 */
 		err = sysfs_create_group(&port->dev->kobj,
 					 &port_attribute_group);
-		if (err)
+		if (err) {
 			dev_err(port->dev,
 				"Error %d creating sysfs device attributes\n",
 				err);
-
+		} else {
+			/*
+			 * Generate a udev event so that appropriate
+			 * symlinks can be created based on udev
+			 * rules.
+			 */
+			kobject_uevent(&port->dev->kobj, KOBJ_CHANGE);
+		}
 		break;
 	case VIRTIO_CONSOLE_PORT_REMOVE:
 		/*
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 87778dc..6aa1028 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -888,7 +888,7 @@
 			ret = -EFAULT;
 			goto out;
 		}
-		if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS && tmp.mode != VT_PROCESS_AUTO) {
+		if (tmp.mode != VT_AUTO && tmp.mode != VT_PROCESS) {
 			ret = -EINVAL;
 			goto out;
 		}
@@ -1622,7 +1622,7 @@
 	 * telling it that it has acquired. Also check if it has died and
 	 * clean up (similar to logic employed in change_console())
 	 */
-	if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) {
+	if (vc->vt_mode.mode == VT_PROCESS) {
 		/*
 		 * Send the signal as privileged - kill_pid() will
 		 * tell us if the process has gone or something else
@@ -1682,7 +1682,7 @@
 	 * vt to auto control.
 	 */
 	vc = vc_cons[fg_console].d;
-	if (vc->vt_mode.mode == VT_PROCESS || vc->vt_mode.mode == VT_PROCESS_AUTO) {
+	if (vc->vt_mode.mode == VT_PROCESS) {
 		/*
 		 * Send the signal as privileged - kill_pid() will
 		 * tell us if the process has gone or something else
@@ -1693,28 +1693,27 @@
 		 */
 		vc->vt_newvt = new_vc->vc_num;
 		if (kill_pid(vc->vt_pid, vc->vt_mode.relsig, 1) == 0) {
-			if(vc->vt_mode.mode == VT_PROCESS)
-				/*
-				 * It worked. Mark the vt to switch to and
-				 * return. The process needs to send us a
-				 * VT_RELDISP ioctl to complete the switch.
-				 */
-				return;
-		} else {
 			/*
-			 * The controlling process has died, so we revert back to
-			 * normal operation. In this case, we'll also change back
-			 * to KD_TEXT mode. I'm not sure if this is strictly correct
-			 * but it saves the agony when the X server dies and the screen
-			 * remains blanked due to KD_GRAPHICS! It would be nice to do
-			 * this outside of VT_PROCESS but there is no single process
-			 * to account for and tracking tty count may be undesirable.
+			 * It worked. Mark the vt to switch to and
+			 * return. The process needs to send us a
+			 * VT_RELDISP ioctl to complete the switch.
 			 */
-			reset_vc(vc);
+			return;
 		}
 
 		/*
-		 * Fall through to normal (VT_AUTO and VT_PROCESS_AUTO) handling of the switch...
+		 * The controlling process has died, so we revert back to
+		 * normal operation. In this case, we'll also change back
+		 * to KD_TEXT mode. I'm not sure if this is strictly correct
+		 * but it saves the agony when the X server dies and the screen
+		 * remains blanked due to KD_GRAPHICS! It would be nice to do
+		 * this outside of VT_PROCESS but there is no single process
+		 * to account for and tracking tty count may be undesirable.
+		 */
+		reset_vc(vc);
+
+		/*
+		 * Fall through to normal (VT_AUTO) handling of the switch...
 		 */
 	}
 
diff --git a/drivers/edac/edac_mce_amd.c b/drivers/edac/edac_mce_amd.c
index 8fc91a0..f5b6d9f 100644
--- a/drivers/edac/edac_mce_amd.c
+++ b/drivers/edac/edac_mce_amd.c
@@ -316,7 +316,12 @@
 		if (regs->nbsh & K8_NBSH_ERR_CPU_VAL)
 			pr_cont(", core: %u\n", (u8)(regs->nbsh & 0xf));
 	} else {
-		pr_cont(", core: %d\n", fls((regs->nbsh & 0xf) - 1));
+		u8 assoc_cpus = regs->nbsh & 0xf;
+
+		if (assoc_cpus > 0)
+			pr_cont(", core: %d", fls(assoc_cpus) - 1);
+
+		pr_cont("\n");
 	}
 
 	pr_emerg("%s.\n", EXT_ERR_MSG(xec));
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index 20564f8..406228f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -89,19 +89,21 @@
 
 static int nouveau_nv40_backlight_init(struct drm_device *dev)
 {
+	struct backlight_properties props;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct backlight_device *bd;
 
 	if (!(nv_rd32(dev, NV40_PMC_BACKLIGHT) & NV40_PMC_BACKLIGHT_MASK))
 		return 0;
 
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 31;
 	bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
-				       &nv40_bl_ops);
+				       &nv40_bl_ops, &props);
 	if (IS_ERR(bd))
 		return PTR_ERR(bd);
 
 	dev_priv->backlight = bd;
-	bd->props.max_brightness = 31;
 	bd->props.brightness = nv40_get_intensity(bd);
 	backlight_update_status(bd);
 
@@ -110,19 +112,21 @@
 
 static int nouveau_nv50_backlight_init(struct drm_device *dev)
 {
+	struct backlight_properties props;
 	struct drm_nouveau_private *dev_priv = dev->dev_private;
 	struct backlight_device *bd;
 
 	if (!nv_rd32(dev, NV50_PDISPLAY_SOR_BACKLIGHT))
 		return 0;
 
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 1025;
 	bd = backlight_device_register("nv_backlight", &dev->pdev->dev, dev,
-				       &nv50_bl_ops);
+				       &nv50_bl_ops, &props);
 	if (IS_ERR(bd))
 		return PTR_ERR(bd);
 
 	dev_priv->backlight = bd;
-	bd->props.max_brightness = 1025;
 	bd->props.brightness = nv50_get_intensity(bd);
 	backlight_update_status(bd);
 	return 0;
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 368fbb0..2e2aa75 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1357,6 +1357,7 @@
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb323) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb324) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651) },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TOPSEED, USB_DEVICE_ID_TOPSEED_CYBERLINK) },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_TWINHAN, USB_DEVICE_ID_TWINHAN_IR_REMOTE) },
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index cd4ece6..0c4e755 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -564,10 +564,10 @@
 	struct hid_debug_list *list;
 
 	list_for_each_entry(list, &hdev->debug_list, node) {
-		for (i = 0; i <= strlen(buf); i++)
-			list->hid_debug_buf[(list->tail + i) % (HID_DEBUG_BUFSIZE - 1)] =
+		for (i = 0; i < strlen(buf); i++)
+			list->hid_debug_buf[(list->tail + i) % HID_DEBUG_BUFSIZE] =
 				buf[i];
-		list->tail = (list->tail + i) % (HID_DEBUG_BUFSIZE - 1);
+		list->tail = (list->tail + i) % HID_DEBUG_BUFSIZE;
         }
 }
 EXPORT_SYMBOL_GPL(hid_debug_event);
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 72c05f9..797e064 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -445,6 +445,7 @@
 
 #define USB_VENDOR_ID_UCLOGIC		0x5543
 #define USB_DEVICE_ID_UCLOGIC_TABLET_PF1209	0x0042
+#define USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U	0x0003
 
 #define USB_VENDOR_ID_VERNIER		0x08f7
 #define USB_DEVICE_ID_VERNIER_LABPRO	0x0001
diff --git a/drivers/hid/hid-magicmouse.c b/drivers/hid/hid-magicmouse.c
index 4a3a94f..c174b64 100644
--- a/drivers/hid/hid-magicmouse.c
+++ b/drivers/hid/hid-magicmouse.c
@@ -353,7 +353,7 @@
 		goto err_free;
 	}
 
-	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT);
+	ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT & ~HID_CONNECT_HIDINPUT);
 	if (ret) {
 		dev_err(&hdev->dev, "magicmouse hw start failed\n");
 		goto err_free;
@@ -409,8 +409,11 @@
 
 static void magicmouse_remove(struct hid_device *hdev)
 {
+	struct magicmouse_sc *msc = hid_get_drvdata(hdev);
+
 	hid_hw_stop(hdev);
-	kfree(hid_get_drvdata(hdev));
+	input_unregister_device(msc->input);
+	kfree(msc);
 }
 
 static const struct hid_device_id magic_mice[] = {
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 3234c72..edcc0c4 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -140,6 +140,9 @@
 			nd->reading_mt = 1;
 			nd->first_contact_confidence = 0;
 			break;
+		case HID_DG_TIPSWITCH:
+			/* Prevent emission of touch until validated */
+			return 1;
 		case HID_DG_CONFIDENCE:
 			nd->confidence = value;
 			break;
@@ -259,6 +262,7 @@
 						BTN_TOOL_TRIPLETAP, 0);
 				input_report_key(input,
 						BTN_TOOL_QUADTAP, 0);
+				input_report_key(input, BTN_TOUCH, 0);
 			}
 			break;
 
@@ -308,13 +312,20 @@
 
 
 	list_for_each_entry(hidinput, &hdev->inputs, list) {
+		if (hidinput->report->maxfield < 1)
+			continue;
+
 		input = hidinput->input;
 		switch (hidinput->report->field[0]->application) {
 		case HID_DG_PEN:
 			input->name = "N-Trig Pen";
 			break;
 		case HID_DG_TOUCHSCREEN:
+			/* These keys are redundant for fingers, clear them
+			 * to prevent incorrect identification */
 			__clear_bit(BTN_TOOL_PEN, input->keybit);
+			__clear_bit(BTN_TOOL_FINGER, input->keybit);
+			__clear_bit(BTN_0, input->keybit);
 			/*
 			 * A little something special to enable
 			 * two and three finger taps.
diff --git a/drivers/hid/hid-tmff.c b/drivers/hid/hid-tmff.c
index 167ea74..c32f32c 100644
--- a/drivers/hid/hid-tmff.c
+++ b/drivers/hid/hid-tmff.c
@@ -251,6 +251,8 @@
 		.driver_data = (unsigned long)ff_rumble },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb651),	/* FGT Rumble Force Wheel */
 		.driver_data = (unsigned long)ff_rumble },
+	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb653),	/* RGT Force Feedback CLUTCH Raging Wheel */
+		.driver_data = (unsigned long)ff_joystick },
 	{ HID_USB_DEVICE(USB_VENDOR_ID_THRUSTMASTER, 0xb654),	/* FGT Force Feedback Wheel */
 		.driver_data = (unsigned long)ff_joystick },
 	{ }
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 7844280..928943c 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -63,6 +63,7 @@
 	{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
 	{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
+	{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_WP4030U, HID_QUIRK_MULTI_INPUT },
 	{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_DUAL_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
 	{ USB_VENDOR_ID_WISEGROUP, USB_DEVICE_ID_QUAD_USB_JOYPAD, HID_QUIRK_NOGET | HID_QUIRK_MULTI_INPUT },
 
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 1558bb7..f901957a 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -461,6 +461,7 @@
 		element->attr.attr.mode  = S_IRUGO;
 		element->attr.show       = show;
 		element->index		 = i;
+		sysfs_attr_init(&element->attr.attr);
 
 		tab_attr[i] = &element->attr.attr;
 	}
diff --git a/drivers/infiniband/ulp/iser/iscsi_iser.c b/drivers/infiniband/ulp/iser/iscsi_iser.c
index 71237f8f..e78af36d 100644
--- a/drivers/infiniband/ulp/iser/iscsi_iser.c
+++ b/drivers/infiniband/ulp/iser/iscsi_iser.c
@@ -613,7 +613,7 @@
 	.cmd_per_lun            = ISER_DEF_CMD_PER_LUN,
 	.eh_abort_handler       = iscsi_eh_abort,
 	.eh_device_reset_handler= iscsi_eh_device_reset,
-	.eh_target_reset_handler= iscsi_eh_target_reset,
+	.eh_target_reset_handler = iscsi_eh_recover_target,
 	.target_alloc		= iscsi_target_alloc,
 	.use_clustering         = DISABLE_CLUSTERING,
 	.proc_name              = "iscsi_iser",
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index e5deb15..8d1d63a 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -50,7 +50,7 @@
    handler.
 */
 
-static int avma1cs_config(struct pcmcia_device *link);
+static int avma1cs_config(struct pcmcia_device *link) __devinit ;
 static void avma1cs_release(struct pcmcia_device *link);
 
 /*
@@ -59,7 +59,7 @@
    needed to manage one actual PCMCIA card.
 */
 
-static void avma1cs_detach(struct pcmcia_device *p_dev);
+static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit ;
 
 
 /*
@@ -99,7 +99,7 @@
     
 ======================================================================*/
 
-static int avma1cs_probe(struct pcmcia_device *p_dev)
+static int __devinit avma1cs_probe(struct pcmcia_device *p_dev)
 {
     local_info_t *local;
 
@@ -140,7 +140,7 @@
 
 ======================================================================*/
 
-static void avma1cs_detach(struct pcmcia_device *link)
+static void __devexit avma1cs_detach(struct pcmcia_device *link)
 {
 	dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link);
 	avma1cs_release(link);
@@ -174,7 +174,7 @@
 }
 
 
-static int avma1cs_config(struct pcmcia_device *link)
+static int __devinit avma1cs_config(struct pcmcia_device *link)
 {
     local_info_t *dev;
     int i;
@@ -282,7 +282,7 @@
 		.name	= "avma1_cs",
 	},
 	.probe		= avma1cs_probe,
-	.remove		= avma1cs_detach,
+	.remove		= __devexit_p(avma1cs_detach),
 	.id_table	= avma1cs_ids,
 };
 
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index c9a30b1..c9f2279 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -76,7 +76,7 @@
    handler.
 */
 
-static int elsa_cs_config(struct pcmcia_device *link);
+static int elsa_cs_config(struct pcmcia_device *link) __devinit ;
 static void elsa_cs_release(struct pcmcia_device *link);
 
 /*
@@ -85,7 +85,7 @@
    needed to manage one actual PCMCIA card.
 */
 
-static void elsa_cs_detach(struct pcmcia_device *p_dev);
+static void elsa_cs_detach(struct pcmcia_device *p_dev) __devexit;
 
 /*
    A driver needs to provide a dev_node_t structure for each device
@@ -121,7 +121,7 @@
 
 ======================================================================*/
 
-static int elsa_cs_probe(struct pcmcia_device *link)
+static int __devinit elsa_cs_probe(struct pcmcia_device *link)
 {
     local_info_t *local;
 
@@ -166,7 +166,7 @@
 
 ======================================================================*/
 
-static void elsa_cs_detach(struct pcmcia_device *link)
+static void __devexit elsa_cs_detach(struct pcmcia_device *link)
 {
 	local_info_t *info = link->priv;
 
@@ -210,7 +210,7 @@
 	return -ENODEV;
 }
 
-static int elsa_cs_config(struct pcmcia_device *link)
+static int __devinit elsa_cs_config(struct pcmcia_device *link)
 {
     local_info_t *dev;
     int i;
@@ -327,7 +327,7 @@
 		.name	= "elsa_cs",
 	},
 	.probe		= elsa_cs_probe,
-	.remove		= elsa_cs_detach,
+	.remove		= __devexit_p(elsa_cs_detach),
 	.id_table	= elsa_ids,
 	.suspend	= elsa_suspend,
 	.resume		= elsa_resume,
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index 7836ec3..71b3dde 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -76,7 +76,7 @@
    event handler. 
 */
 
-static int sedlbauer_config(struct pcmcia_device *link);
+static int sedlbauer_config(struct pcmcia_device *link) __devinit ;
 static void sedlbauer_release(struct pcmcia_device *link);
 
 /*
@@ -85,7 +85,7 @@
    needed to manage one actual PCMCIA card.
 */
 
-static void sedlbauer_detach(struct pcmcia_device *p_dev);
+static void sedlbauer_detach(struct pcmcia_device *p_dev) __devexit;
 
 /*
    You'll also need to prototype all the functions that will actually
@@ -129,7 +129,7 @@
     
 ======================================================================*/
 
-static int sedlbauer_probe(struct pcmcia_device *link)
+static int __devinit sedlbauer_probe(struct pcmcia_device *link)
 {
     local_info_t *local;
 
@@ -177,7 +177,7 @@
 
 ======================================================================*/
 
-static void sedlbauer_detach(struct pcmcia_device *link)
+static void __devexit sedlbauer_detach(struct pcmcia_device *link)
 {
 	dev_dbg(&link->dev, "sedlbauer_detach(0x%p)\n", link);
 
@@ -283,7 +283,7 @@
 
 
 
-static int sedlbauer_config(struct pcmcia_device *link)
+static int __devinit sedlbauer_config(struct pcmcia_device *link)
 {
     local_info_t *dev = link->priv;
     win_req_t *req;
@@ -441,7 +441,7 @@
 		.name	= "sedlbauer_cs",
 	},
 	.probe		= sedlbauer_probe,
-	.remove		= sedlbauer_detach,
+	.remove		= __devexit_p(sedlbauer_detach),
 	.id_table	= sedlbauer_ids,
 	.suspend	= sedlbauer_suspend,
 	.resume		= sedlbauer_resume,
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index b0c5976..d010a0d 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -57,7 +57,7 @@
    handler.
 */
 
-static int teles_cs_config(struct pcmcia_device *link);
+static int teles_cs_config(struct pcmcia_device *link) __devinit ;
 static void teles_cs_release(struct pcmcia_device *link);
 
 /*
@@ -66,7 +66,7 @@
    needed to manage one actual PCMCIA card.
 */
 
-static void teles_detach(struct pcmcia_device *p_dev);
+static void teles_detach(struct pcmcia_device *p_dev) __devexit ;
 
 /*
    A linked list of "instances" of the teles_cs device.  Each actual
@@ -112,7 +112,7 @@
 
 ======================================================================*/
 
-static int teles_probe(struct pcmcia_device *link)
+static int __devinit teles_probe(struct pcmcia_device *link)
 {
     local_info_t *local;
 
@@ -156,7 +156,7 @@
 
 ======================================================================*/
 
-static void teles_detach(struct pcmcia_device *link)
+static void __devexit teles_detach(struct pcmcia_device *link)
 {
 	local_info_t *info = link->priv;
 
@@ -200,7 +200,7 @@
 	return -ENODEV;
 }
 
-static int teles_cs_config(struct pcmcia_device *link)
+static int __devinit teles_cs_config(struct pcmcia_device *link)
 {
     local_info_t *dev;
     int i;
@@ -319,7 +319,7 @@
 		.name	= "teles_cs",
 	},
 	.probe		= teles_probe,
-	.remove		= teles_detach,
+	.remove		= __devexit_p(teles_detach),
 	.id_table       = teles_ids,
 	.suspend	= teles_suspend,
 	.resume		= teles_resume,
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index e0b6431..505eb64 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -15,6 +15,8 @@
 	  This option enables the led sysfs class in /sys/class/leds.  You'll
 	  need this to do anything useful with LEDs.  If unsure, say N.
 
+if LEDS_CLASS
+
 comment "LED drivers"
 
 config LEDS_88PM860X
@@ -26,73 +28,73 @@
 
 config LEDS_ATMEL_PWM
 	tristate "LED Support using Atmel PWM outputs"
-	depends on LEDS_CLASS && ATMEL_PWM
+	depends on ATMEL_PWM
 	help
 	  This option enables support for LEDs driven using outputs
 	  of the dedicated PWM controller found on newer Atmel SOCs.
 
 config LEDS_LOCOMO
 	tristate "LED Support for Locomo device"
-	depends on LEDS_CLASS && SHARP_LOCOMO
+	depends on SHARP_LOCOMO
 	help
 	  This option enables support for the LEDs on Sharp Locomo.
 	  Zaurus models SL-5500 and SL-5600.
 
 config LEDS_MIKROTIK_RB532
 	tristate "LED Support for Mikrotik Routerboard 532"
-	depends on LEDS_CLASS && MIKROTIK_RB532
+	depends on MIKROTIK_RB532
 	help
 	  This option enables support for the so called "User LED" of
 	  Mikrotik's Routerboard 532.
 
 config LEDS_S3C24XX
 	tristate "LED Support for Samsung S3C24XX GPIO LEDs"
-	depends on LEDS_CLASS && ARCH_S3C2410
+	depends on ARCH_S3C2410
 	help
 	  This option enables support for LEDs connected to GPIO lines
 	  on Samsung S3C24XX series CPUs, such as the S3C2410 and S3C2440.
 
 config LEDS_AMS_DELTA
 	tristate "LED Support for the Amstrad Delta (E3)"
-	depends on LEDS_CLASS && MACH_AMS_DELTA
+	depends on MACH_AMS_DELTA
 	help
 	  This option enables support for the LEDs on Amstrad Delta (E3).
 
 config LEDS_NET48XX
 	tristate "LED Support for Soekris net48xx series Error LED"
-	depends on LEDS_CLASS && SCx200_GPIO
+	depends on SCx200_GPIO
 	help
 	  This option enables support for the Soekris net4801 and net4826 error
 	  LED.
 
 config LEDS_FSG
 	tristate "LED Support for the Freecom FSG-3"
-	depends on LEDS_CLASS && MACH_FSG
+	depends on MACH_FSG
 	help
 	  This option enables support for the LEDs on the Freecom FSG-3.
 
 config LEDS_WRAP
 	tristate "LED Support for the WRAP series LEDs"
-	depends on LEDS_CLASS && SCx200_GPIO
+	depends on SCx200_GPIO
 	help
 	  This option enables support for the PCEngines WRAP programmable LEDs.
 
 config LEDS_ALIX2
 	tristate "LED Support for ALIX.2 and ALIX.3 series"
-	depends on LEDS_CLASS && X86 && EXPERIMENTAL
+	depends on X86 && !GPIO_CS5535 && !CS5535_GPIO
 	help
 	  This option enables support for the PCEngines ALIX.2 and ALIX.3 LEDs.
 	  You have to set leds-alix2.force=1 for boards with Award BIOS.
 
 config LEDS_H1940
 	tristate "LED Support for iPAQ H1940 device"
-	depends on LEDS_CLASS && ARCH_H1940
+	depends on ARCH_H1940
 	help
 	  This option enables support for the LEDs on the h1940.
 
 config LEDS_COBALT_QUBE
 	tristate "LED Support for the Cobalt Qube series front LED"
-	depends on LEDS_CLASS && MIPS_COBALT
+	depends on MIPS_COBALT
 	help
 	  This option enables support for the front LED on Cobalt Qube series
 
@@ -105,7 +107,7 @@
 
 config LEDS_SUNFIRE
 	tristate "LED support for SunFire servers."
-	depends on LEDS_CLASS && SPARC64
+	depends on SPARC64
 	select LEDS_TRIGGERS
 	help
 	  This option enables support for the Left, Middle, and Right
@@ -113,14 +115,14 @@
 
 config LEDS_HP6XX
 	tristate "LED Support for the HP Jornada 6xx"
-	depends on LEDS_CLASS && SH_HP6XX
+	depends on SH_HP6XX
 	help
 	  This option enables LED support for the handheld
 	  HP Jornada 620/660/680/690.
 
 config LEDS_PCA9532
 	tristate "LED driver for PCA9532 dimmer"
-	depends on LEDS_CLASS && I2C && INPUT && EXPERIMENTAL
+	depends on I2C && INPUT && EXPERIMENTAL
 	help
 	  This option enables support for NXP pca9532
 	  LED controller. It is generally only useful
@@ -128,7 +130,7 @@
 
 config LEDS_GPIO
 	tristate "LED Support for GPIO connected LEDs"
-	depends on LEDS_CLASS && GENERIC_GPIO
+	depends on GENERIC_GPIO
 	help
 	  This option enables support for the LEDs connected to GPIO
 	  outputs. To be useful the particular board must have LEDs
@@ -155,7 +157,7 @@
 
 config LEDS_LP3944
 	tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
-	depends on LEDS_CLASS && I2C
+	depends on I2C
 	help
 	  This option enables support for LEDs connected to the National
 	  Semiconductor LP3944 Lighting Management Unit (LMU) also known as
@@ -166,7 +168,7 @@
 
 config LEDS_CLEVO_MAIL
 	tristate "Mail LED on Clevo notebook"
-	depends on LEDS_CLASS && X86 && SERIO_I8042 && DMI
+	depends on X86 && SERIO_I8042 && DMI
 	help
 	  This driver makes the mail LED accessible from userspace
 	  programs through the leds subsystem. This LED have three
@@ -196,7 +198,7 @@
 
 config LEDS_PCA955X
 	tristate "LED Support for PCA955x I2C chips"
-	depends on LEDS_CLASS && I2C
+	depends on I2C
 	help
 	  This option enables support for LEDs connected to PCA955x
 	  LED driver chips accessed via the I2C bus.  Supported
@@ -204,54 +206,54 @@
 
 config LEDS_WM831X_STATUS
 	tristate "LED support for status LEDs on WM831x PMICs"
-	depends on LEDS_CLASS && MFD_WM831X
+	depends on MFD_WM831X
 	help
 	  This option enables support for the status LEDs of the WM831x
           series of PMICs.
 
 config LEDS_WM8350
 	tristate "LED Support for WM8350 AudioPlus PMIC"
-	depends on LEDS_CLASS && MFD_WM8350
+	depends on MFD_WM8350
 	help
 	  This option enables support for LEDs driven by the Wolfson
 	  Microelectronics WM8350 AudioPlus PMIC.
 
 config LEDS_DA903X
 	tristate "LED Support for DA9030/DA9034 PMIC"
-	depends on LEDS_CLASS && PMIC_DA903X
+	depends on PMIC_DA903X
 	help
 	  This option enables support for on-chip LED drivers found
 	  on Dialog Semiconductor DA9030/DA9034 PMICs.
 
 config LEDS_DAC124S085
 	tristate "LED Support for DAC124S085 SPI DAC"
-	depends on LEDS_CLASS && SPI
+	depends on SPI
 	help
 	  This option enables support for DAC124S085 SPI DAC from NatSemi,
 	  which can be used to control up to four LEDs.
 
 config LEDS_PWM
 	tristate "PWM driven LED Support"
-	depends on LEDS_CLASS && HAVE_PWM
+	depends on HAVE_PWM
 	help
 	  This option enables support for pwm driven LEDs
 
 config LEDS_REGULATOR
 	tristate "REGULATOR driven LED support"
-	depends on LEDS_CLASS && REGULATOR
+	depends on REGULATOR
 	help
 	  This option enables support for regulator driven LEDs.
 
 config LEDS_BD2802
 	tristate "LED driver for BD2802 RGB LED"
-	depends on LEDS_CLASS && I2C
+	depends on I2C
 	help
 	  This option enables support for BD2802GU RGB LED driver chips
 	  accessed via the I2C bus.
 
 config LEDS_INTEL_SS4200
 	tristate "LED driver for Intel NAS SS4200 series"
-	depends on LEDS_CLASS && PCI && DMI
+	depends on PCI && DMI
 	help
 	  This option enables support for the Intel SS4200 series of
 	  Network Attached Storage servers.  You may control the hard
@@ -260,7 +262,7 @@
 
 config LEDS_LT3593
 	tristate "LED driver for LT3593 controllers"
-	depends on LEDS_CLASS && GENERIC_GPIO
+	depends on GENERIC_GPIO
 	help
 	  This option enables support for LEDs driven by a Linear Technology
 	  LT3593 controller. This controller uses a special one-wire pulse
@@ -268,7 +270,7 @@
 
 config LEDS_ADP5520
 	tristate "LED Support for ADP5520/ADP5501 PMIC"
-	depends on LEDS_CLASS && PMIC_ADP5520
+	depends on PMIC_ADP5520
 	help
 	  This option enables support for on-chip LED drivers found
 	  on Analog Devices ADP5520/ADP5501 PMICs.
@@ -276,7 +278,12 @@
 	  To compile this driver as a module, choose M here: the module will
 	  be called leds-adp5520.
 
-comment "LED Triggers"
+config LEDS_DELL_NETBOOKS
+	tristate "External LED on Dell Business Netbooks"
+	depends on X86 && ACPI_WMI
+	help
+	  This adds support for the Latitude 2100 and similar
+	  notebooks that have an external LED.
 
 config LEDS_TRIGGERS
 	bool "LED Trigger support"
@@ -285,9 +292,12 @@
 	  These triggers allow kernel events to drive the LEDs and can
 	  be configured via sysfs. If unsure, say Y.
 
+if LEDS_TRIGGERS
+
+comment "LED Triggers"
+
 config LEDS_TRIGGER_TIMER
 	tristate "LED Timer Trigger"
-	depends on LEDS_TRIGGERS
 	help
 	  This allows LEDs to be controlled by a programmable timer
 	  via sysfs. Some LED hardware can be programmed to start
@@ -298,14 +308,13 @@
 
 config LEDS_TRIGGER_IDE_DISK
 	bool "LED IDE Disk Trigger"
-	depends on LEDS_TRIGGERS && IDE_GD_ATA
+	depends on IDE_GD_ATA
 	help
 	  This allows LEDs to be controlled by IDE disk activity.
 	  If unsure, say Y.
 
 config LEDS_TRIGGER_HEARTBEAT
 	tristate "LED Heartbeat Trigger"
-	depends on LEDS_TRIGGERS
 	help
 	  This allows LEDs to be controlled by a CPU load average.
 	  The flash frequency is a hyperbolic function of the 1-minute
@@ -314,7 +323,6 @@
 
 config LEDS_TRIGGER_BACKLIGHT
 	tristate "LED backlight Trigger"
-	depends on LEDS_TRIGGERS
 	help
 	  This allows LEDs to be controlled as a backlight device: they
 	  turn off and on when the display is blanked and unblanked.
@@ -323,7 +331,6 @@
 
 config LEDS_TRIGGER_GPIO
 	tristate "LED GPIO Trigger"
-	depends on LEDS_TRIGGERS
 	depends on GPIOLIB
 	help
 	  This allows LEDs to be controlled by gpio events. It's good
@@ -336,7 +343,6 @@
 
 config LEDS_TRIGGER_DEFAULT_ON
 	tristate "LED Default ON Trigger"
-	depends on LEDS_TRIGGERS
 	help
 	  This allows LEDs to be initialised in the ON state.
 	  If unsure, say Y.
@@ -344,4 +350,8 @@
 comment "iptables trigger is under Netfilter config (LED target)"
 	depends on LEDS_TRIGGERS
 
+endif # LEDS_TRIGGERS
+
+endif # LEDS_CLASS
+
 endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index d76fb32..0cd8b99 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -34,6 +34,7 @@
 obj-$(CONFIG_LEDS_INTEL_SS4200)		+= leds-ss4200.o
 obj-$(CONFIG_LEDS_LT3593)		+= leds-lt3593.o
 obj-$(CONFIG_LEDS_ADP5520)		+= leds-adp5520.o
+obj-$(CONFIG_LEDS_DELL_NETBOOKS)	+= dell-led.o
 
 # LED SPI Drivers
 obj-$(CONFIG_LEDS_DAC124S085)		+= leds-dac124s085.o
diff --git a/drivers/leds/dell-led.c b/drivers/leds/dell-led.c
new file mode 100644
index 0000000..ee31089
--- /dev/null
+++ b/drivers/leds/dell-led.c
@@ -0,0 +1,200 @@
+/*
+ * dell_led.c - Dell LED Driver
+ *
+ * Copyright (C) 2010 Dell Inc.
+ * Louis Davis <louis_davis@dell.com>
+ * Jim Dailey <jim_dailey@dell.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/acpi.h>
+#include <linux/leds.h>
+
+MODULE_AUTHOR("Louis Davis/Jim Dailey");
+MODULE_DESCRIPTION("Dell LED Control Driver");
+MODULE_LICENSE("GPL");
+
+#define DELL_LED_BIOS_GUID "F6E4FE6E-909D-47cb-8BAB-C9F6F2F8D396"
+MODULE_ALIAS("wmi:" DELL_LED_BIOS_GUID);
+
+/* Error Result Codes: */
+#define INVALID_DEVICE_ID	250
+#define INVALID_PARAMETER	251
+#define INVALID_BUFFER		252
+#define INTERFACE_ERROR		253
+#define UNSUPPORTED_COMMAND	254
+#define UNSPECIFIED_ERROR	255
+
+/* Device ID */
+#define DEVICE_ID_PANEL_BACK	1
+
+/* LED Commands */
+#define CMD_LED_ON	16
+#define CMD_LED_OFF	17
+#define CMD_LED_BLINK	18
+
+struct bios_args {
+	unsigned char length;
+	unsigned char result_code;
+	unsigned char device_id;
+	unsigned char command;
+	unsigned char on_time;
+	unsigned char off_time;
+};
+
+static int dell_led_perform_fn(u8 length,
+		u8 result_code,
+		u8 device_id,
+		u8 command,
+		u8 on_time,
+		u8 off_time)
+{
+	struct bios_args *bios_return;
+	u8 return_code;
+	union acpi_object *obj;
+	struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
+	struct acpi_buffer input;
+	acpi_status status;
+
+	struct bios_args args;
+	args.length = length;
+	args.result_code = result_code;
+	args.device_id = device_id;
+	args.command = command;
+	args.on_time = on_time;
+	args.off_time = off_time;
+
+	input.length = sizeof(struct bios_args);
+	input.pointer = &args;
+
+	status = wmi_evaluate_method(DELL_LED_BIOS_GUID,
+		1,
+		1,
+		&input,
+		&output);
+
+	if (ACPI_FAILURE(status))
+		return status;
+
+	obj = output.pointer;
+
+	if (!obj)
+		return -EINVAL;
+	else if (obj->type != ACPI_TYPE_BUFFER) {
+		kfree(obj);
+		return -EINVAL;
+	}
+
+	bios_return = ((struct bios_args *)obj->buffer.pointer);
+	return_code = bios_return->result_code;
+
+	kfree(obj);
+
+	return return_code;
+}
+
+static int led_on(void)
+{
+	return dell_led_perform_fn(3,	/* Length of command */
+		INTERFACE_ERROR,	/* Init to  INTERFACE_ERROR */
+		DEVICE_ID_PANEL_BACK,	/* Device ID */
+		CMD_LED_ON,		/* Command */
+		0,			/* not used */
+		0);			/* not used */
+}
+
+static int led_off(void)
+{
+	return dell_led_perform_fn(3,	/* Length of command */
+		INTERFACE_ERROR,	/* Init to  INTERFACE_ERROR */
+		DEVICE_ID_PANEL_BACK,	/* Device ID */
+		CMD_LED_OFF,		/* Command */
+		0,			/* not used */
+		0);			/* not used */
+}
+
+static int led_blink(unsigned char on_eighths,
+		unsigned char off_eighths)
+{
+	return dell_led_perform_fn(5,	/* Length of command */
+		INTERFACE_ERROR,	/* Init to  INTERFACE_ERROR */
+		DEVICE_ID_PANEL_BACK,	/* Device ID */
+		CMD_LED_BLINK,		/* Command */
+		on_eighths,		/* blink on in eigths of a second */
+		off_eighths);		/* blink off in eights of a second */
+}
+
+static void dell_led_set(struct led_classdev *led_cdev,
+		enum led_brightness value)
+{
+	if (value == LED_OFF)
+		led_off();
+	else
+		led_on();
+}
+
+static int dell_led_blink(struct led_classdev *led_cdev,
+		unsigned long *delay_on,
+		unsigned long *delay_off)
+{
+	unsigned long on_eighths;
+	unsigned long off_eighths;
+
+	/* The Dell LED delay is based on 125ms intervals.
+	   Need to round up to next interval. */
+
+	on_eighths = (*delay_on + 124) / 125;
+	if (0 == on_eighths)
+		on_eighths = 1;
+	if (on_eighths > 255)
+		on_eighths = 255;
+	*delay_on = on_eighths * 125;
+
+	off_eighths = (*delay_off + 124) / 125;
+	if (0 == off_eighths)
+		off_eighths = 1;
+	if (off_eighths > 255)
+		off_eighths = 255;
+	*delay_off = off_eighths * 125;
+
+	led_blink(on_eighths, off_eighths);
+
+	return 0;
+}
+
+static struct led_classdev dell_led = {
+	.name		= "dell::lid",
+	.brightness	= LED_OFF,
+	.max_brightness = 1,
+	.brightness_set = dell_led_set,
+	.blink_set	= dell_led_blink,
+	.flags		= LED_CORE_SUSPENDRESUME,
+};
+
+static int __init dell_led_init(void)
+{
+	int error = 0;
+
+	if (!wmi_has_guid(DELL_LED_BIOS_GUID))
+		return -ENODEV;
+
+	error = led_off();
+	if (error != 0)
+		return -ENODEV;
+
+	return led_classdev_register(NULL, &dell_led);
+}
+
+static void __exit dell_led_exit(void)
+{
+	led_classdev_unregister(&dell_led);
+
+	led_off();
+}
+
+module_init(dell_led_init);
+module_exit(dell_led_exit);
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 782f958..69e7d86 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -72,11 +72,14 @@
 	return sprintf(buf, "%u\n", led_cdev->max_brightness);
 }
 
-static DEVICE_ATTR(brightness, 0644, led_brightness_show, led_brightness_store);
-static DEVICE_ATTR(max_brightness, 0444, led_max_brightness_show, NULL);
+static struct device_attribute led_class_attrs[] = {
+	__ATTR(brightness, 0644, led_brightness_show, led_brightness_store),
+	__ATTR(max_brightness, 0644, led_max_brightness_show, NULL),
 #ifdef CONFIG_LEDS_TRIGGERS
-static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
+	__ATTR(trigger, 0644, led_trigger_show, led_trigger_store),
 #endif
+	__ATTR_NULL,
+};
 
 /**
  * led_classdev_suspend - suspend an led_classdev.
@@ -127,18 +130,11 @@
  */
 int led_classdev_register(struct device *parent, struct led_classdev *led_cdev)
 {
-	int rc;
-
 	led_cdev->dev = device_create(leds_class, parent, 0, led_cdev,
 				      "%s", led_cdev->name);
 	if (IS_ERR(led_cdev->dev))
 		return PTR_ERR(led_cdev->dev);
 
-	/* register the attributes */
-	rc = device_create_file(led_cdev->dev, &dev_attr_brightness);
-	if (rc)
-		goto err_out;
-
 #ifdef CONFIG_LEDS_TRIGGERS
 	init_rwsem(&led_cdev->trigger_lock);
 #endif
@@ -150,36 +146,18 @@
 	if (!led_cdev->max_brightness)
 		led_cdev->max_brightness = LED_FULL;
 
-	rc = device_create_file(led_cdev->dev, &dev_attr_max_brightness);
-	if (rc)
-		goto err_out_attr_max;
-
 	led_update_brightness(led_cdev);
 
 #ifdef CONFIG_LEDS_TRIGGERS
-	rc = device_create_file(led_cdev->dev, &dev_attr_trigger);
-	if (rc)
-		goto err_out_led_list;
-
 	led_trigger_set_default(led_cdev);
 #endif
 
-	printk(KERN_INFO "Registered led device: %s\n",
+	printk(KERN_DEBUG "Registered led device: %s\n",
 			led_cdev->name);
 
 	return 0;
-
-#ifdef CONFIG_LEDS_TRIGGERS
-err_out_led_list:
-	device_remove_file(led_cdev->dev, &dev_attr_max_brightness);
-#endif
-err_out_attr_max:
-	device_remove_file(led_cdev->dev, &dev_attr_brightness);
-	list_del(&led_cdev->node);
-err_out:
-	device_unregister(led_cdev->dev);
-	return rc;
 }
+
 EXPORT_SYMBOL_GPL(led_classdev_register);
 
 /**
@@ -190,10 +168,7 @@
  */
 void led_classdev_unregister(struct led_classdev *led_cdev)
 {
-	device_remove_file(led_cdev->dev, &dev_attr_max_brightness);
-	device_remove_file(led_cdev->dev, &dev_attr_brightness);
 #ifdef CONFIG_LEDS_TRIGGERS
-	device_remove_file(led_cdev->dev, &dev_attr_trigger);
 	down_write(&led_cdev->trigger_lock);
 	if (led_cdev->trigger)
 		led_trigger_set(led_cdev, NULL);
@@ -215,6 +190,7 @@
 		return PTR_ERR(leds_class);
 	leds_class->suspend = led_suspend;
 	leds_class->resume = led_resume;
+	leds_class->dev_attrs = led_class_attrs;
 	return 0;
 }
 
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index e5225d2..0823e26 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -211,7 +211,6 @@
 					const struct of_device_id *match)
 {
 	struct device_node *np = ofdev->node, *child;
-	struct gpio_led led;
 	struct gpio_led_of_platform_data *pdata;
 	int count = 0, ret;
 
@@ -226,8 +225,8 @@
 	if (!pdata)
 		return -ENOMEM;
 
-	memset(&led, 0, sizeof(led));
 	for_each_child_of_node(np, child) {
+		struct gpio_led led = {};
 		enum of_gpio_flags flags;
 		const char *state;
 
diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
index 97f0498..51477ec 100644
--- a/drivers/leds/leds-ss4200.c
+++ b/drivers/leds/leds-ss4200.c
@@ -63,7 +63,7 @@
 /*
  * PCI ID of the Intel ICH7 LPC Device within which the GPIO block lives.
  */
-static struct pci_device_id ich7_lpc_pci_id[] =
+static const struct pci_device_id ich7_lpc_pci_id[] =
 {
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_0) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH7_1) },
diff --git a/drivers/macintosh/via-pmu-backlight.c b/drivers/macintosh/via-pmu-backlight.c
index 4f3c447..1cec02f 100644
--- a/drivers/macintosh/via-pmu-backlight.c
+++ b/drivers/macintosh/via-pmu-backlight.c
@@ -144,6 +144,7 @@
 
 void __init pmu_backlight_init()
 {
+	struct backlight_properties props;
 	struct backlight_device *bd;
 	char name[10];
 	int level, autosave;
@@ -161,13 +162,15 @@
 
 	snprintf(name, sizeof(name), "pmubl");
 
-	bd = backlight_device_register(name, NULL, NULL, &pmu_backlight_data);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
+	bd = backlight_device_register(name, NULL, NULL, &pmu_backlight_data,
+				       &props);
 	if (IS_ERR(bd)) {
 		printk(KERN_ERR "PMU Backlight registration failed\n");
 		return;
 	}
 	uses_pmu_bl = 1;
-	bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
 	pmu_backlight_init_curve(0x7F, 0x46, 0x0E);
 
 	level = bd->props.max_brightness;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index af2d39d..bb2a231 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -172,12 +172,14 @@
 		disk_stack_limits(mddev->gendisk, rdev->bdev,
 				  rdev->data_offset << 9);
 		/* as we don't honour merge_bvec_fn, we must never risk
-		 * violating it, so limit ->max_sector to one PAGE, as
-		 * a one page request is never in violation.
+		 * violating it, so limit max_segments to 1 lying within
+		 * a single page.
 		 */
-		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
-		    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-			blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
+		if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+			blk_queue_max_segments(mddev->queue, 1);
+			blk_queue_segment_boundary(mddev->queue,
+						   PAGE_CACHE_SIZE - 1);
+		}
 
 		conf->array_sectors += rdev->sectors;
 		cnt++;
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 4b323f4..5558ebc 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -301,14 +301,16 @@
 					  rdev->data_offset << 9);
 
 		/* as we don't honour merge_bvec_fn, we must never risk
-		 * violating it, so limit ->max_sector to one PAGE, as
-		 * a one page request is never in violation.
+		 * violating it, so limit ->max_segments to one, lying
+		 * within a single page.
 		 * (Note: it is very unlikely that a device with
 		 * merge_bvec_fn will be involved in multipath.)
 		 */
-			if (q->merge_bvec_fn &&
-			    queue_max_sectors(q) > (PAGE_SIZE>>9))
-				blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
+			if (q->merge_bvec_fn) {
+				blk_queue_max_segments(mddev->queue, 1);
+				blk_queue_segment_boundary(mddev->queue,
+							   PAGE_CACHE_SIZE - 1);
+			}
 
 			conf->working_disks++;
 			mddev->degraded--;
@@ -476,9 +478,11 @@
 		/* as we don't honour merge_bvec_fn, we must never risk
 		 * violating it, not that we ever expect a device with
 		 * a merge_bvec_fn to be involved in multipath */
-		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
-		    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-			blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
+		if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+			blk_queue_max_segments(mddev->queue, 1);
+			blk_queue_segment_boundary(mddev->queue,
+						   PAGE_CACHE_SIZE - 1);
+		}
 
 		if (!test_bit(Faulty, &rdev->flags))
 			conf->working_disks++;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index a1f7147..377cf2a 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -176,14 +176,15 @@
 		disk_stack_limits(mddev->gendisk, rdev1->bdev,
 				  rdev1->data_offset << 9);
 		/* as we don't honour merge_bvec_fn, we must never risk
-		 * violating it, so limit ->max_sector to one PAGE, as
-		 * a one page request is never in violation.
+		 * violating it, so limit ->max_segments to 1, lying within
+		 * a single page.
 		 */
 
-		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn &&
-		    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-			blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
-
+		if (rdev1->bdev->bd_disk->queue->merge_bvec_fn) {
+			blk_queue_max_segments(mddev->queue, 1);
+			blk_queue_segment_boundary(mddev->queue,
+						   PAGE_CACHE_SIZE - 1);
+		}
 		if (!smallest || (rdev1->sectors < smallest->sectors))
 			smallest = rdev1;
 		cnt++;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 5a06122..f741f77 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1152,13 +1152,17 @@
 
 			disk_stack_limits(mddev->gendisk, rdev->bdev,
 					  rdev->data_offset << 9);
-			/* as we don't honour merge_bvec_fn, we must never risk
-			 * violating it, so limit ->max_sector to one PAGE, as
-			 * a one page request is never in violation.
+			/* as we don't honour merge_bvec_fn, we must
+			 * never risk violating it, so limit
+			 * ->max_segments to one lying with a single
+			 * page, as a one page request is never in
+			 * violation.
 			 */
-			if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
-			    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-				blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
+			if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+				blk_queue_max_segments(mddev->queue, 1);
+				blk_queue_segment_boundary(mddev->queue,
+							   PAGE_CACHE_SIZE - 1);
+			}
 
 			p->head_position = 0;
 			rdev->raid_disk = mirror;
@@ -2098,12 +2102,14 @@
 		disk_stack_limits(mddev->gendisk, rdev->bdev,
 				  rdev->data_offset << 9);
 		/* as we don't honour merge_bvec_fn, we must never risk
-		 * violating it, so limit ->max_sector to one PAGE, as
-		 * a one page request is never in violation.
+		 * violating it, so limit ->max_segments to 1 lying within
+		 * a single page, as a one page request is never in violation.
 		 */
-		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
-		    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-			blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
+		if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+			blk_queue_max_segments(mddev->queue, 1);
+			blk_queue_segment_boundary(mddev->queue,
+						   PAGE_CACHE_SIZE - 1);
+		}
 	}
 
 	mddev->degraded = 0;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7584f9a..b4ba41e 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1155,13 +1155,17 @@
 
 			disk_stack_limits(mddev->gendisk, rdev->bdev,
 					  rdev->data_offset << 9);
-			/* as we don't honour merge_bvec_fn, we must never risk
-			 * violating it, so limit ->max_sector to one PAGE, as
-			 * a one page request is never in violation.
+			/* as we don't honour merge_bvec_fn, we must
+			 * never risk violating it, so limit
+			 * ->max_segments to one lying with a single
+			 * page, as a one page request is never in
+			 * violation.
 			 */
-			if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
-			    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-				blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
+			if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+				blk_queue_max_segments(mddev->queue, 1);
+				blk_queue_segment_boundary(mddev->queue,
+							   PAGE_CACHE_SIZE - 1);
+			}
 
 			p->head_position = 0;
 			rdev->raid_disk = mirror;
@@ -2255,12 +2259,14 @@
 		disk_stack_limits(mddev->gendisk, rdev->bdev,
 				  rdev->data_offset << 9);
 		/* as we don't honour merge_bvec_fn, we must never risk
-		 * violating it, so limit ->max_sector to one PAGE, as
-		 * a one page request is never in violation.
+		 * violating it, so limit max_segments to 1 lying
+		 * within a single page.
 		 */
-		if (rdev->bdev->bd_disk->queue->merge_bvec_fn &&
-		    queue_max_sectors(mddev->queue) > (PAGE_SIZE>>9))
-			blk_queue_max_hw_sectors(mddev->queue, PAGE_SIZE>>9);
+		if (rdev->bdev->bd_disk->queue->merge_bvec_fn) {
+			blk_queue_max_segments(mddev->queue, 1);
+			blk_queue_segment_boundary(mddev->queue,
+						   PAGE_CACHE_SIZE - 1);
+		}
 
 		disk->head_position = 0;
 	}
diff --git a/drivers/mtd/maps/omap_nor.c b/drivers/mtd/maps/omap_nor.c
deleted file mode 100644
index e69de29..0000000
--- a/drivers/mtd/maps/omap_nor.c
+++ /dev/null
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 1157d56..42e5ea49 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -457,7 +457,7 @@
 
 config MTD_NAND_SH_FLCTL
 	tristate "Support for NAND on Renesas SuperH FLCTL"
-	depends on MTD_NAND && SUPERH
+	depends on MTD_NAND && (SUPERH || ARCH_SHMOBILE)
 	help
 	  Several Renesas SuperH CPU has FLCTL. This option enables support
 	  for NAND Flash using FLCTL.
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 20e2dec..65db201 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -2583,6 +2583,31 @@
 	  To compile this driver as a module, choose M here: the module
 	  will be called cxgb3.
 
+config CHELSIO_T4_DEPENDS
+	tristate
+	depends on PCI && INET
+	default y
+
+config CHELSIO_T4
+	tristate "Chelsio Communications T4 Ethernet support"
+	depends on CHELSIO_T4_DEPENDS
+	select FW_LOADER
+	select MDIO
+	help
+	  This driver supports Chelsio T4-based gigabit and 10Gb Ethernet
+	  adapters.
+
+	  For general information about Chelsio and our products, visit
+	  our website at <http://www.chelsio.com>.
+
+	  For customer support, please visit our customer support page at
+	  <http://www.chelsio.com/support.htm>.
+
+	  Please send feedback to <linux-bugs@chelsio.com>.
+
+	  To compile this driver as a module choose M here; the module
+	  will be called cxgb4.
+
 config EHEA
 	tristate "eHEA Ethernet support"
 	depends on IBMEBUS && INET && SPARSEMEM
diff --git a/drivers/net/Makefile b/drivers/net/Makefile
index f8444f4..ebf80b9 100644
--- a/drivers/net/Makefile
+++ b/drivers/net/Makefile
@@ -19,6 +19,7 @@
 obj-$(CONFIG_IP1000) += ipg.o
 obj-$(CONFIG_CHELSIO_T1) += chelsio/
 obj-$(CONFIG_CHELSIO_T3) += cxgb3/
+obj-$(CONFIG_CHELSIO_T4) += cxgb4/
 obj-$(CONFIG_EHEA) += ehea/
 obj-$(CONFIG_CAN) += can/
 obj-$(CONFIG_BONDING) += bonding/
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 9ba5470..0ebd820 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -84,7 +84,7 @@
 
 #define ATLX_DRIVER_VERSION "2.1.3"
 MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
-	Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
+Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(ATLX_DRIVER_VERSION);
 
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 61a9afd..da87930 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -1466,8 +1466,8 @@
 
 	req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
 	req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
-	req->params.offset = offset;
-	req->params.data_buf_size = 0x4;
+	req->params.offset = cpu_to_le32(offset);
+	req->params.data_buf_size = cpu_to_le32(0x4);
 
 	status = be_mcc_notify_wait(adapter);
 	if (!status)
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index d7390da..d488d52 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -490,7 +490,7 @@
 {
 	int ret, i;
 	struct be_dma_mem ddrdma_cmd;
-	u64 pattern[2] = {0x5a5a5a5a5a5a5a5a, 0xa5a5a5a5a5a5a5a5};
+	u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL};
 
 	ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
 	ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index cb0a4a6..49d5196 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -879,7 +879,7 @@
 			return;
 		}
 		vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
-		vid = be16_to_cpu(vid);
+		vid = swab16(vid);
 		vlan_hwaccel_receive_skb(skb, adapter->vlan_grp, vid);
 	} else {
 		netif_receive_skb(skb);
@@ -956,7 +956,7 @@
 		napi_gro_frags(&eq_obj->napi);
 	} else {
 		vid = AMAP_GET_BITS(struct amap_eth_rx_compl, vlan_tag, rxcp);
-		vid = be16_to_cpu(vid);
+		vid = swab16(vid);
 
 		if (!adapter->vlan_grp || adapter->vlans_added == 0)
 			return;
@@ -1977,7 +1977,7 @@
 	p += crc_offset;
 
 	status = be_cmd_get_flash_crc(adapter, flashed_crc,
-			(img_start + image_size - 4));
+			(image_size - 4));
 	if (status) {
 		dev_err(&adapter->pdev->dev,
 		"could not get crc from flash, not flashing redboot\n");
@@ -2113,7 +2113,7 @@
 	struct flash_file_hdr_g3 *fhdr3;
 	struct image_hdr *img_hdr_ptr = NULL;
 	struct be_dma_mem flash_cmd;
-	int status, i = 0;
+	int status, i = 0, num_imgs = 0;
 	const u8 *p;
 
 	strcpy(fw_file, func);
@@ -2139,15 +2139,14 @@
 	if ((adapter->generation == BE_GEN3) &&
 			(get_ufigen_type(fhdr) == BE_GEN3)) {
 		fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
-		for (i = 0; i < fhdr3->num_imgs; i++) {
+		num_imgs = le32_to_cpu(fhdr3->num_imgs);
+		for (i = 0; i < num_imgs; i++) {
 			img_hdr_ptr = (struct image_hdr *) (fw->data +
 					(sizeof(struct flash_file_hdr_g3) +
-					i * sizeof(struct image_hdr)));
-			if (img_hdr_ptr->imageid == 1) {
-				status = be_flash_data(adapter, fw,
-						&flash_cmd, fhdr3->num_imgs);
-			}
-
+					 i * sizeof(struct image_hdr)));
+			if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
+				status = be_flash_data(adapter, fw, &flash_cmd,
+							num_imgs);
 		}
 	} else if ((adapter->generation == BE_GEN2) &&
 			(get_ufigen_type(fhdr) == BE_GEN2)) {
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 0b69ffb..802b538 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -246,6 +246,8 @@
 
 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
 
+static void bnx2_init_napi(struct bnx2 *bp);
+
 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
 {
 	u32 diff;
@@ -6196,6 +6198,7 @@
 	bnx2_disable_int(bp);
 
 	bnx2_setup_int_mode(bp, disable_msi);
+	bnx2_init_napi(bp);
 	bnx2_napi_enable(bp);
 	rc = bnx2_alloc_mem(bp);
 	if (rc)
@@ -7642,9 +7645,11 @@
 	int i;
 
 	for (i = 0; i < bp->irq_nvecs; i++) {
-		disable_irq(bp->irq_tbl[i].vector);
-		bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
-		enable_irq(bp->irq_tbl[i].vector);
+		struct bnx2_irq *irq = &bp->irq_tbl[i];
+
+		disable_irq(irq->vector);
+		irq->handler(irq->vector, &bp->bnx2_napi[i]);
+		enable_irq(irq->vector);
 	}
 }
 #endif
@@ -8206,7 +8211,7 @@
 {
 	int i;
 
-	for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
+	for (i = 0; i < bp->irq_nvecs; i++) {
 		struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
 		int (*poll)(struct napi_struct *, int);
 
@@ -8275,7 +8280,6 @@
 	dev->ethtool_ops = &bnx2_ethtool_ops;
 
 	bp = netdev_priv(dev);
-	bnx2_init_napi(bp);
 
 	pci_set_drvdata(pdev, dev);
 
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 22682f1..85e813c 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1162,6 +1162,11 @@
 			write_lock_bh(&bond->curr_slave_lock);
 		}
 	}
+
+	/* resend IGMP joins since all were sent on curr_active_slave */
+	if (bond->params.mode == BOND_MODE_ROUNDROBIN) {
+		bond_resend_igmp_join_requests(bond);
+	}
 }
 
 /**
@@ -4096,22 +4101,41 @@
 	struct bonding *bond = netdev_priv(bond_dev);
 	struct slave *slave, *start_at;
 	int i, slave_no, res = 1;
+	struct iphdr *iph = ip_hdr(skb);
 
 	read_lock(&bond->lock);
 
 	if (!BOND_IS_OK(bond))
 		goto out;
-
 	/*
-	 * Concurrent TX may collide on rr_tx_counter; we accept that
-	 * as being rare enough not to justify using an atomic op here
+	 * Start with the curr_active_slave that joined the bond as the
+	 * default for sending IGMP traffic.  For failover purposes one
+	 * needs to maintain some consistency for the interface that will
+	 * send the join/membership reports.  The curr_active_slave found
+	 * will send all of this type of traffic.
 	 */
-	slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
+	if ((iph->protocol == IPPROTO_IGMP) &&
+	    (skb->protocol == htons(ETH_P_IP))) {
 
-	bond_for_each_slave(bond, slave, i) {
-		slave_no--;
-		if (slave_no < 0)
-			break;
+		read_lock(&bond->curr_slave_lock);
+		slave = bond->curr_active_slave;
+		read_unlock(&bond->curr_slave_lock);
+
+		if (!slave)
+			goto out;
+	} else {
+		/*
+		 * Concurrent TX may collide on rr_tx_counter; we accept
+		 * that as being rare enough not to justify using an
+		 * atomic op here.
+		 */
+		slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
+
+		bond_for_each_slave(bond, slave, i) {
+			slave_no--;
+			if (slave_no < 0)
+				break;
+		}
 	}
 
 	start_at = slave;
@@ -4384,6 +4408,14 @@
 	.ndo_vlan_rx_kill_vid	= bond_vlan_rx_kill_vid,
 };
 
+static void bond_destructor(struct net_device *bond_dev)
+{
+	struct bonding *bond = netdev_priv(bond_dev);
+	if (bond->wq)
+		destroy_workqueue(bond->wq);
+	free_netdev(bond_dev);
+}
+
 static void bond_setup(struct net_device *bond_dev)
 {
 	struct bonding *bond = netdev_priv(bond_dev);
@@ -4404,7 +4436,7 @@
 	bond_dev->ethtool_ops = &bond_ethtool_ops;
 	bond_set_mode_ops(bond, bond->params.mode);
 
-	bond_dev->destructor = free_netdev;
+	bond_dev->destructor = bond_destructor;
 
 	/* Initialize the device options */
 	bond_dev->tx_queue_len = 0;
@@ -4476,9 +4508,6 @@
 
 	bond_remove_proc_entry(bond);
 
-	if (bond->wq)
-		destroy_workqueue(bond->wq);
-
 	__hw_addr_flush(&bond->mc_list);
 }
 
@@ -4890,8 +4919,8 @@
 				bond_setup);
 	if (!bond_dev) {
 		pr_err("%s: eek! can't alloc netdev!\n", name);
-		res = -ENOMEM;
-		goto out;
+		rtnl_unlock();
+		return -ENOMEM;
 	}
 
 	dev_net_set(bond_dev, net);
@@ -4900,19 +4929,16 @@
 	if (!name) {
 		res = dev_alloc_name(bond_dev, "bond%d");
 		if (res < 0)
-			goto out_netdev;
+			goto out;
 	}
 
 	res = register_netdevice(bond_dev);
-	if (res < 0)
-		goto out_netdev;
 
 out:
 	rtnl_unlock();
+	if (res < 0)
+		bond_destructor(bond_dev);
 	return res;
-out_netdev:
-	free_netdev(bond_dev);
-	goto out;
 }
 
 static int __net_init bond_net_init(struct net *net)
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 866905f..0348986 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -22,6 +22,7 @@
 #include <linux/can/dev.h>
 #include <linux/can/error.h>
 
+#include <asm/bfin_can.h>
 #include <asm/portmux.h>
 
 #define DRV_NAME "bfin_can"
@@ -29,90 +30,6 @@
 #define TX_ECHO_SKB_MAX  1
 
 /*
- * transmit and receive channels
- */
-#define TRANSMIT_CHL            24
-#define RECEIVE_STD_CHL         0
-#define RECEIVE_EXT_CHL         4
-#define RECEIVE_RTR_CHL         8
-#define RECEIVE_EXT_RTR_CHL     12
-#define MAX_CHL_NUMBER          32
-
-/*
- * bfin can registers layout
- */
-struct bfin_can_mask_regs {
-	u16 aml;
-	u16 dummy1;
-	u16 amh;
-	u16 dummy2;
-};
-
-struct bfin_can_channel_regs {
-	u16 data[8];
-	u16 dlc;
-	u16 dummy1;
-	u16 tsv;
-	u16 dummy2;
-	u16 id0;
-	u16 dummy3;
-	u16 id1;
-	u16 dummy4;
-};
-
-struct bfin_can_regs {
-	/*
-	 * global control and status registers
-	 */
-	u16 mc1;           /* offset 0 */
-	u16 dummy1;
-	u16 md1;           /* offset 4 */
-	u16 rsv1[13];
-	u16 mbtif1;        /* offset 0x20 */
-	u16 dummy2;
-	u16 mbrif1;        /* offset 0x24 */
-	u16 dummy3;
-	u16 mbim1;         /* offset 0x28 */
-	u16 rsv2[11];
-	u16 mc2;           /* offset 0x40 */
-	u16 dummy4;
-	u16 md2;           /* offset 0x44 */
-	u16 dummy5;
-	u16 trs2;          /* offset 0x48 */
-	u16 rsv3[11];
-	u16 mbtif2;        /* offset 0x60 */
-	u16 dummy6;
-	u16 mbrif2;        /* offset 0x64 */
-	u16 dummy7;
-	u16 mbim2;         /* offset 0x68 */
-	u16 rsv4[11];
-	u16 clk;           /* offset 0x80 */
-	u16 dummy8;
-	u16 timing;        /* offset 0x84 */
-	u16 rsv5[3];
-	u16 status;        /* offset 0x8c */
-	u16 dummy9;
-	u16 cec;           /* offset 0x90 */
-	u16 dummy10;
-	u16 gis;           /* offset 0x94 */
-	u16 dummy11;
-	u16 gim;           /* offset 0x98 */
-	u16 rsv6[3];
-	u16 ctrl;          /* offset 0xa0 */
-	u16 dummy12;
-	u16 intr;          /* offset 0xa4 */
-	u16 rsv7[7];
-	u16 esr;           /* offset 0xb4 */
-	u16 rsv8[37];
-
-	/*
-	 * channel(mailbox) mask and message registers
-	 */
-	struct bfin_can_mask_regs msk[MAX_CHL_NUMBER];    /* offset 0x100 */
-	struct bfin_can_channel_regs chl[MAX_CHL_NUMBER]; /* offset 0x200 */
-};
-
-/*
  * bfin can private data
  */
 struct bfin_can_priv {
@@ -163,7 +80,7 @@
 	if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
 		timing |= SAM;
 
-	bfin_write16(&reg->clk, clk);
+	bfin_write16(&reg->clock, clk);
 	bfin_write16(&reg->timing, timing);
 
 	dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n",
@@ -185,11 +102,11 @@
 	bfin_write16(&reg->gim, 0);
 
 	/* reset can and enter configuration mode */
-	bfin_write16(&reg->ctrl, SRS | CCR);
+	bfin_write16(&reg->control, SRS | CCR);
 	SSYNC();
-	bfin_write16(&reg->ctrl, CCR);
+	bfin_write16(&reg->control, CCR);
 	SSYNC();
-	while (!(bfin_read16(&reg->ctrl) & CCA)) {
+	while (!(bfin_read16(&reg->control) & CCA)) {
 		udelay(10);
 		if (--timeout == 0) {
 			dev_err(dev->dev.parent,
@@ -244,7 +161,7 @@
 	/*
 	 * leave configuration mode
 	 */
-	bfin_write16(&reg->ctrl, bfin_read16(&reg->ctrl) & ~CCR);
+	bfin_write16(&reg->control, bfin_read16(&reg->control) & ~CCR);
 
 	while (bfin_read16(&reg->status) & CCA) {
 		udelay(10);
@@ -726,7 +643,7 @@
 
 	if (netif_running(dev)) {
 		/* enter sleep mode */
-		bfin_write16(&reg->ctrl, bfin_read16(&reg->ctrl) | SMR);
+		bfin_write16(&reg->control, bfin_read16(&reg->control) | SMR);
 		SSYNC();
 		while (!(bfin_read16(&reg->intr) & SMACK)) {
 			udelay(10);
diff --git a/drivers/net/cxgb4/Makefile b/drivers/net/cxgb4/Makefile
new file mode 100644
index 0000000..4986674
--- /dev/null
+++ b/drivers/net/cxgb4/Makefile
@@ -0,0 +1,7 @@
+#
+# Chelsio T4 driver
+#
+
+obj-$(CONFIG_CHELSIO_T4) += cxgb4.o
+
+cxgb4-objs := cxgb4_main.o l2t.o t4_hw.o sge.o
diff --git a/drivers/net/cxgb4/cxgb4.h b/drivers/net/cxgb4/cxgb4.h
new file mode 100644
index 0000000..3d8ff48
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4.h
@@ -0,0 +1,741 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_H__
+#define __CXGB4_H__
+
+#include <linux/bitops.h>
+#include <linux/cache.h>
+#include <linux/interrupt.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/timer.h>
+#include <asm/io.h>
+#include "cxgb4_uld.h"
+#include "t4_hw.h"
+
+#define FW_VERSION_MAJOR 1
+#define FW_VERSION_MINOR 1
+#define FW_VERSION_MICRO 0
+
+enum {
+	MAX_NPORTS = 4,     /* max # of ports */
+	SERNUM_LEN = 16,    /* Serial # length */
+	EC_LEN     = 16,    /* E/C length */
+	ID_LEN     = 16,    /* ID length */
+};
+
+enum {
+	MEM_EDC0,
+	MEM_EDC1,
+	MEM_MC
+};
+
+enum dev_master {
+	MASTER_CANT,
+	MASTER_MAY,
+	MASTER_MUST
+};
+
+enum dev_state {
+	DEV_STATE_UNINIT,
+	DEV_STATE_INIT,
+	DEV_STATE_ERR
+};
+
+enum {
+	PAUSE_RX      = 1 << 0,
+	PAUSE_TX      = 1 << 1,
+	PAUSE_AUTONEG = 1 << 2
+};
+
+struct port_stats {
+	u64 tx_octets;            /* total # of octets in good frames */
+	u64 tx_frames;            /* all good frames */
+	u64 tx_bcast_frames;      /* all broadcast frames */
+	u64 tx_mcast_frames;      /* all multicast frames */
+	u64 tx_ucast_frames;      /* all unicast frames */
+	u64 tx_error_frames;      /* all error frames */
+
+	u64 tx_frames_64;         /* # of Tx frames in a particular range */
+	u64 tx_frames_65_127;
+	u64 tx_frames_128_255;
+	u64 tx_frames_256_511;
+	u64 tx_frames_512_1023;
+	u64 tx_frames_1024_1518;
+	u64 tx_frames_1519_max;
+
+	u64 tx_drop;              /* # of dropped Tx frames */
+	u64 tx_pause;             /* # of transmitted pause frames */
+	u64 tx_ppp0;              /* # of transmitted PPP prio 0 frames */
+	u64 tx_ppp1;              /* # of transmitted PPP prio 1 frames */
+	u64 tx_ppp2;              /* # of transmitted PPP prio 2 frames */
+	u64 tx_ppp3;              /* # of transmitted PPP prio 3 frames */
+	u64 tx_ppp4;              /* # of transmitted PPP prio 4 frames */
+	u64 tx_ppp5;              /* # of transmitted PPP prio 5 frames */
+	u64 tx_ppp6;              /* # of transmitted PPP prio 6 frames */
+	u64 tx_ppp7;              /* # of transmitted PPP prio 7 frames */
+
+	u64 rx_octets;            /* total # of octets in good frames */
+	u64 rx_frames;            /* all good frames */
+	u64 rx_bcast_frames;      /* all broadcast frames */
+	u64 rx_mcast_frames;      /* all multicast frames */
+	u64 rx_ucast_frames;      /* all unicast frames */
+	u64 rx_too_long;          /* # of frames exceeding MTU */
+	u64 rx_jabber;            /* # of jabber frames */
+	u64 rx_fcs_err;           /* # of received frames with bad FCS */
+	u64 rx_len_err;           /* # of received frames with length error */
+	u64 rx_symbol_err;        /* symbol errors */
+	u64 rx_runt;              /* # of short frames */
+
+	u64 rx_frames_64;         /* # of Rx frames in a particular range */
+	u64 rx_frames_65_127;
+	u64 rx_frames_128_255;
+	u64 rx_frames_256_511;
+	u64 rx_frames_512_1023;
+	u64 rx_frames_1024_1518;
+	u64 rx_frames_1519_max;
+
+	u64 rx_pause;             /* # of received pause frames */
+	u64 rx_ppp0;              /* # of received PPP prio 0 frames */
+	u64 rx_ppp1;              /* # of received PPP prio 1 frames */
+	u64 rx_ppp2;              /* # of received PPP prio 2 frames */
+	u64 rx_ppp3;              /* # of received PPP prio 3 frames */
+	u64 rx_ppp4;              /* # of received PPP prio 4 frames */
+	u64 rx_ppp5;              /* # of received PPP prio 5 frames */
+	u64 rx_ppp6;              /* # of received PPP prio 6 frames */
+	u64 rx_ppp7;              /* # of received PPP prio 7 frames */
+
+	u64 rx_ovflow0;           /* drops due to buffer-group 0 overflows */
+	u64 rx_ovflow1;           /* drops due to buffer-group 1 overflows */
+	u64 rx_ovflow2;           /* drops due to buffer-group 2 overflows */
+	u64 rx_ovflow3;           /* drops due to buffer-group 3 overflows */
+	u64 rx_trunc0;            /* buffer-group 0 truncated packets */
+	u64 rx_trunc1;            /* buffer-group 1 truncated packets */
+	u64 rx_trunc2;            /* buffer-group 2 truncated packets */
+	u64 rx_trunc3;            /* buffer-group 3 truncated packets */
+};
+
+struct lb_port_stats {
+	u64 octets;
+	u64 frames;
+	u64 bcast_frames;
+	u64 mcast_frames;
+	u64 ucast_frames;
+	u64 error_frames;
+
+	u64 frames_64;
+	u64 frames_65_127;
+	u64 frames_128_255;
+	u64 frames_256_511;
+	u64 frames_512_1023;
+	u64 frames_1024_1518;
+	u64 frames_1519_max;
+
+	u64 drop;
+
+	u64 ovflow0;
+	u64 ovflow1;
+	u64 ovflow2;
+	u64 ovflow3;
+	u64 trunc0;
+	u64 trunc1;
+	u64 trunc2;
+	u64 trunc3;
+};
+
+struct tp_tcp_stats {
+	u32 tcpOutRsts;
+	u64 tcpInSegs;
+	u64 tcpOutSegs;
+	u64 tcpRetransSegs;
+};
+
+struct tp_err_stats {
+	u32 macInErrs[4];
+	u32 hdrInErrs[4];
+	u32 tcpInErrs[4];
+	u32 tnlCongDrops[4];
+	u32 ofldChanDrops[4];
+	u32 tnlTxDrops[4];
+	u32 ofldVlanDrops[4];
+	u32 tcp6InErrs[4];
+	u32 ofldNoNeigh;
+	u32 ofldCongDefer;
+};
+
+struct tp_params {
+	unsigned int ntxchan;        /* # of Tx channels */
+	unsigned int tre;            /* log2 of core clocks per TP tick */
+};
+
+struct vpd_params {
+	unsigned int cclk;
+	u8 ec[EC_LEN + 1];
+	u8 sn[SERNUM_LEN + 1];
+	u8 id[ID_LEN + 1];
+};
+
+struct pci_params {
+	unsigned char speed;
+	unsigned char width;
+};
+
+struct adapter_params {
+	struct tp_params  tp;
+	struct vpd_params vpd;
+	struct pci_params pci;
+
+	unsigned int fw_vers;
+	unsigned int tp_vers;
+	u8 api_vers[7];
+
+	unsigned short mtus[NMTUS];
+	unsigned short a_wnd[NCCTRL_WIN];
+	unsigned short b_wnd[NCCTRL_WIN];
+
+	unsigned char nports;             /* # of ethernet ports */
+	unsigned char portvec;
+	unsigned char rev;                /* chip revision */
+	unsigned char offload;
+
+	unsigned int ofldq_wr_cred;
+};
+
+struct trace_params {
+	u32 data[TRACE_LEN / 4];
+	u32 mask[TRACE_LEN / 4];
+	unsigned short snap_len;
+	unsigned short min_len;
+	unsigned char skip_ofst;
+	unsigned char skip_len;
+	unsigned char invert;
+	unsigned char port;
+};
+
+struct link_config {
+	unsigned short supported;        /* link capabilities */
+	unsigned short advertising;      /* advertised capabilities */
+	unsigned short requested_speed;  /* speed user has requested */
+	unsigned short speed;            /* actual link speed */
+	unsigned char  requested_fc;     /* flow control user has requested */
+	unsigned char  fc;               /* actual link flow control */
+	unsigned char  autoneg;          /* autonegotiating? */
+	unsigned char  link_ok;          /* link up? */
+};
+
+#define FW_LEN16(fw_struct) FW_CMD_LEN16(sizeof(fw_struct) / 16)
+
+enum {
+	MAX_ETH_QSETS = 32,           /* # of Ethernet Tx/Rx queue sets */
+	MAX_OFLD_QSETS = 16,          /* # of offload Tx/Rx queue sets */
+	MAX_CTRL_QUEUES = NCHAN,      /* # of control Tx queues */
+	MAX_RDMA_QUEUES = NCHAN,      /* # of streaming RDMA Rx queues */
+};
+
+enum {
+	MAX_EGRQ = 128,         /* max # of egress queues, including FLs */
+	MAX_INGQ = 64           /* max # of interrupt-capable ingress queues */
+};
+
+struct adapter;
+struct vlan_group;
+struct sge_rspq;
+
+struct port_info {
+	struct adapter *adapter;
+	struct vlan_group *vlan_grp;
+	u16    viid;
+	s16    xact_addr_filt;        /* index of exact MAC address filter */
+	u16    rss_size;              /* size of VI's RSS table slice */
+	s8     mdio_addr;
+	u8     port_type;
+	u8     mod_type;
+	u8     port_id;
+	u8     tx_chan;
+	u8     lport;                 /* associated offload logical port */
+	u8     rx_offload;            /* CSO, etc */
+	u8     nqsets;                /* # of qsets */
+	u8     first_qset;            /* index of first qset */
+	struct link_config link_cfg;
+};
+
+/* port_info.rx_offload flags */
+enum {
+	RX_CSO = 1 << 0,
+};
+
+struct dentry;
+struct work_struct;
+
+enum {                                 /* adapter flags */
+	FULL_INIT_DONE     = (1 << 0),
+	USING_MSI          = (1 << 1),
+	USING_MSIX         = (1 << 2),
+	QUEUES_BOUND       = (1 << 3),
+	FW_OK              = (1 << 4),
+};
+
+struct rx_sw_desc;
+
+struct sge_fl {                     /* SGE free-buffer queue state */
+	unsigned int avail;         /* # of available Rx buffers */
+	unsigned int pend_cred;     /* new buffers since last FL DB ring */
+	unsigned int cidx;          /* consumer index */
+	unsigned int pidx;          /* producer index */
+	unsigned long alloc_failed; /* # of times buffer allocation failed */
+	unsigned long large_alloc_failed;
+	unsigned long starving;
+	/* RO fields */
+	unsigned int cntxt_id;      /* SGE context id for the free list */
+	unsigned int size;          /* capacity of free list */
+	struct rx_sw_desc *sdesc;   /* address of SW Rx descriptor ring */
+	__be64 *desc;               /* address of HW Rx descriptor ring */
+	dma_addr_t addr;            /* bus address of HW ring start */
+};
+
+/* A packet gather list */
+struct pkt_gl {
+	skb_frag_t frags[MAX_SKB_FRAGS];
+	void *va;                         /* virtual address of first byte */
+	unsigned int nfrags;              /* # of fragments */
+	unsigned int tot_len;             /* total length of fragments */
+};
+
+typedef int (*rspq_handler_t)(struct sge_rspq *q, const __be64 *rsp,
+			      const struct pkt_gl *gl);
+
+struct sge_rspq {                   /* state for an SGE response queue */
+	struct napi_struct napi;
+	const __be64 *cur_desc;     /* current descriptor in queue */
+	unsigned int cidx;          /* consumer index */
+	u8 gen;                     /* current generation bit */
+	u8 intr_params;             /* interrupt holdoff parameters */
+	u8 next_intr_params;        /* holdoff params for next interrupt */
+	u8 pktcnt_idx;              /* interrupt packet threshold */
+	u8 uld;                     /* ULD handling this queue */
+	u8 idx;                     /* queue index within its group */
+	int offset;                 /* offset into current Rx buffer */
+	u16 cntxt_id;               /* SGE context id for the response q */
+	u16 abs_id;                 /* absolute SGE id for the response q */
+	__be64 *desc;               /* address of HW response ring */
+	dma_addr_t phys_addr;       /* physical address of the ring */
+	unsigned int iqe_len;       /* entry size */
+	unsigned int size;          /* capacity of response queue */
+	struct adapter *adap;
+	struct net_device *netdev;  /* associated net device */
+	rspq_handler_t handler;
+};
+
+struct sge_eth_stats {              /* Ethernet queue statistics */
+	unsigned long pkts;         /* # of ethernet packets */
+	unsigned long lro_pkts;     /* # of LRO super packets */
+	unsigned long lro_merged;   /* # of wire packets merged by LRO */
+	unsigned long rx_cso;       /* # of Rx checksum offloads */
+	unsigned long vlan_ex;      /* # of Rx VLAN extractions */
+	unsigned long rx_drops;     /* # of packets dropped due to no mem */
+};
+
+struct sge_eth_rxq {                /* SW Ethernet Rx queue */
+	struct sge_rspq rspq;
+	struct sge_fl fl;
+	struct sge_eth_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct sge_ofld_stats {             /* offload queue statistics */
+	unsigned long pkts;         /* # of packets */
+	unsigned long imm;          /* # of immediate-data packets */
+	unsigned long an;           /* # of asynchronous notifications */
+	unsigned long nomem;        /* # of responses deferred due to no mem */
+};
+
+struct sge_ofld_rxq {               /* SW offload Rx queue */
+	struct sge_rspq rspq;
+	struct sge_fl fl;
+	struct sge_ofld_stats stats;
+} ____cacheline_aligned_in_smp;
+
+struct tx_desc {
+	__be64 flit[8];
+};
+
+struct tx_sw_desc;
+
+struct sge_txq {
+	unsigned int  in_use;       /* # of in-use Tx descriptors */
+	unsigned int  size;         /* # of descriptors */
+	unsigned int  cidx;         /* SW consumer index */
+	unsigned int  pidx;         /* producer index */
+	unsigned long stops;        /* # of times q has been stopped */
+	unsigned long restarts;     /* # of queue restarts */
+	unsigned int  cntxt_id;     /* SGE context id for the Tx q */
+	struct tx_desc *desc;       /* address of HW Tx descriptor ring */
+	struct tx_sw_desc *sdesc;   /* address of SW Tx descriptor ring */
+	struct sge_qstat *stat;     /* queue status entry */
+	dma_addr_t    phys_addr;    /* physical address of the ring */
+};
+
+struct sge_eth_txq {                /* state for an SGE Ethernet Tx queue */
+	struct sge_txq q;
+	struct netdev_queue *txq;   /* associated netdev TX queue */
+	unsigned long tso;          /* # of TSO requests */
+	unsigned long tx_cso;       /* # of Tx checksum offloads */
+	unsigned long vlan_ins;     /* # of Tx VLAN insertions */
+	unsigned long mapping_err;  /* # of I/O MMU packet mapping errors */
+} ____cacheline_aligned_in_smp;
+
+struct sge_ofld_txq {               /* state for an SGE offload Tx queue */
+	struct sge_txq q;
+	struct adapter *adap;
+	struct sk_buff_head sendq;  /* list of backpressured packets */
+	struct tasklet_struct qresume_tsk; /* restarts the queue */
+	u8 full;                    /* the Tx ring is full */
+	unsigned long mapping_err;  /* # of I/O MMU packet mapping errors */
+} ____cacheline_aligned_in_smp;
+
+struct sge_ctrl_txq {               /* state for an SGE control Tx queue */
+	struct sge_txq q;
+	struct adapter *adap;
+	struct sk_buff_head sendq;  /* list of backpressured packets */
+	struct tasklet_struct qresume_tsk; /* restarts the queue */
+	u8 full;                    /* the Tx ring is full */
+} ____cacheline_aligned_in_smp;
+
+struct sge {
+	struct sge_eth_txq ethtxq[MAX_ETH_QSETS];
+	struct sge_ofld_txq ofldtxq[MAX_OFLD_QSETS];
+	struct sge_ctrl_txq ctrlq[MAX_CTRL_QUEUES];
+
+	struct sge_eth_rxq ethrxq[MAX_ETH_QSETS];
+	struct sge_ofld_rxq ofldrxq[MAX_OFLD_QSETS];
+	struct sge_ofld_rxq rdmarxq[MAX_RDMA_QUEUES];
+	struct sge_rspq fw_evtq ____cacheline_aligned_in_smp;
+
+	struct sge_rspq intrq ____cacheline_aligned_in_smp;
+	spinlock_t intrq_lock;
+
+	u16 max_ethqsets;           /* # of available Ethernet queue sets */
+	u16 ethqsets;               /* # of active Ethernet queue sets */
+	u16 ethtxq_rover;           /* Tx queue to clean up next */
+	u16 ofldqsets;              /* # of active offload queue sets */
+	u16 rdmaqs;                 /* # of available RDMA Rx queues */
+	u16 ofld_rxq[MAX_OFLD_QSETS];
+	u16 rdma_rxq[NCHAN];
+	u16 timer_val[SGE_NTIMERS];
+	u8 counter_val[SGE_NCOUNTERS];
+	unsigned int starve_thres;
+	u8 idma_state[2];
+	void *egr_map[MAX_EGRQ];    /* qid->queue egress queue map */
+	struct sge_rspq *ingr_map[MAX_INGQ]; /* qid->queue ingress queue map */
+	DECLARE_BITMAP(starving_fl, MAX_EGRQ);
+	DECLARE_BITMAP(txq_maperr, MAX_EGRQ);
+	struct timer_list rx_timer; /* refills starving FLs */
+	struct timer_list tx_timer; /* checks Tx queues */
+};
+
+#define for_each_ethrxq(sge, i) for (i = 0; i < (sge)->ethqsets; i++)
+#define for_each_ofldrxq(sge, i) for (i = 0; i < (sge)->ofldqsets; i++)
+#define for_each_rdmarxq(sge, i) for (i = 0; i < (sge)->rdmaqs; i++)
+
+struct l2t_data;
+
+struct adapter {
+	void __iomem *regs;
+	struct pci_dev *pdev;
+	struct device *pdev_dev;
+	unsigned long registered_device_map;
+	unsigned long open_device_map;
+	unsigned long flags;
+
+	const char *name;
+	int msg_enable;
+
+	struct adapter_params params;
+	struct cxgb4_virt_res vres;
+	unsigned int swintr;
+
+	unsigned int wol;
+
+	struct {
+		unsigned short vec;
+		char desc[14];
+	} msix_info[MAX_INGQ + 1];
+
+	struct sge sge;
+
+	struct net_device *port[MAX_NPORTS];
+	u8 chan_map[NCHAN];                   /* channel -> port map */
+
+	struct l2t_data *l2t;
+	void *uld_handle[CXGB4_ULD_MAX];
+	struct list_head list_node;
+
+	struct tid_info tids;
+	void **tid_release_head;
+	spinlock_t tid_release_lock;
+	struct work_struct tid_release_task;
+	bool tid_release_task_busy;
+
+	struct dentry *debugfs_root;
+
+	spinlock_t stats_lock;
+};
+
+static inline u32 t4_read_reg(struct adapter *adap, u32 reg_addr)
+{
+	return readl(adap->regs + reg_addr);
+}
+
+static inline void t4_write_reg(struct adapter *adap, u32 reg_addr, u32 val)
+{
+	writel(val, adap->regs + reg_addr);
+}
+
+#ifndef readq
+static inline u64 readq(const volatile void __iomem *addr)
+{
+	return readl(addr) + ((u64)readl(addr + 4) << 32);
+}
+
+static inline void writeq(u64 val, volatile void __iomem *addr)
+{
+	writel(val, addr);
+	writel(val >> 32, addr + 4);
+}
+#endif
+
+static inline u64 t4_read_reg64(struct adapter *adap, u32 reg_addr)
+{
+	return readq(adap->regs + reg_addr);
+}
+
+static inline void t4_write_reg64(struct adapter *adap, u32 reg_addr, u64 val)
+{
+	writeq(val, adap->regs + reg_addr);
+}
+
+/**
+ * netdev2pinfo - return the port_info structure associated with a net_device
+ * @dev: the netdev
+ *
+ * Return the struct port_info associated with a net_device
+ */
+static inline struct port_info *netdev2pinfo(const struct net_device *dev)
+{
+	return netdev_priv(dev);
+}
+
+/**
+ * adap2pinfo - return the port_info of a port
+ * @adap: the adapter
+ * @idx: the port index
+ *
+ * Return the port_info structure for the port of the given index.
+ */
+static inline struct port_info *adap2pinfo(struct adapter *adap, int idx)
+{
+	return netdev_priv(adap->port[idx]);
+}
+
+/**
+ * netdev2adap - return the adapter structure associated with a net_device
+ * @dev: the netdev
+ *
+ * Return the struct adapter associated with a net_device
+ */
+static inline struct adapter *netdev2adap(const struct net_device *dev)
+{
+	return netdev2pinfo(dev)->adapter;
+}
+
+void t4_os_portmod_changed(const struct adapter *adap, int port_id);
+void t4_os_link_changed(struct adapter *adap, int port_id, int link_stat);
+
+void *t4_alloc_mem(size_t size);
+void t4_free_mem(void *addr);
+
+void t4_free_sge_resources(struct adapter *adap);
+irq_handler_t t4_intr_handler(struct adapter *adap);
+netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev);
+int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
+		     const struct pkt_gl *gl);
+int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb);
+int t4_ofld_send(struct adapter *adap, struct sk_buff *skb);
+int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+		     struct net_device *dev, int intr_idx,
+		     struct sge_fl *fl, rspq_handler_t hnd);
+int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+			 struct net_device *dev, struct netdev_queue *netdevq,
+			 unsigned int iqid);
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+			  struct net_device *dev, unsigned int iqid,
+			  unsigned int cmplqid);
+int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
+			  struct net_device *dev, unsigned int iqid);
+irqreturn_t t4_sge_intr_msix(int irq, void *cookie);
+void t4_sge_init(struct adapter *adap);
+void t4_sge_start(struct adapter *adap);
+void t4_sge_stop(struct adapter *adap);
+
+#define for_each_port(adapter, iter) \
+	for (iter = 0; iter < (adapter)->params.nports; ++iter)
+
+static inline unsigned int core_ticks_per_usec(const struct adapter *adap)
+{
+	return adap->params.vpd.cclk / 1000;
+}
+
+static inline unsigned int us_to_core_ticks(const struct adapter *adap,
+					    unsigned int us)
+{
+	return (us * adap->params.vpd.cclk) / 1000;
+}
+
+void t4_set_reg_field(struct adapter *adap, unsigned int addr, u32 mask,
+		      u32 val);
+
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+		    void *rpl, bool sleep_ok);
+
+static inline int t4_wr_mbox(struct adapter *adap, int mbox, const void *cmd,
+			     int size, void *rpl)
+{
+	return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, true);
+}
+
+static inline int t4_wr_mbox_ns(struct adapter *adap, int mbox, const void *cmd,
+				int size, void *rpl)
+{
+	return t4_wr_mbox_meat(adap, mbox, cmd, size, rpl, false);
+}
+
+void t4_intr_enable(struct adapter *adapter);
+void t4_intr_disable(struct adapter *adapter);
+void t4_intr_clear(struct adapter *adapter);
+int t4_slow_intr_handler(struct adapter *adapter);
+
+int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+		  struct link_config *lc);
+int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port);
+int t4_seeprom_wp(struct adapter *adapter, bool enable);
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+		  unsigned int nwords, u32 *data, int byte_oriented);
+int t4_load_fw(struct adapter *adapter, const u8 *fw_data, unsigned int size);
+int t4_check_fw_version(struct adapter *adapter);
+int t4_prep_adapter(struct adapter *adapter);
+int t4_port_init(struct adapter *adap, int mbox, int pf, int vf);
+void t4_fatal_err(struct adapter *adapter);
+void t4_set_vlan_accel(struct adapter *adapter, unsigned int ports, int on);
+int t4_set_trace_filter(struct adapter *adapter, const struct trace_params *tp,
+			int filter_index, int enable);
+void t4_get_trace_filter(struct adapter *adapter, struct trace_params *tp,
+			 int filter_index, int *enabled);
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+			int start, int n, const u16 *rspq, unsigned int nrspq);
+int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
+		       unsigned int flags);
+int t4_read_rss(struct adapter *adapter, u16 *entries);
+int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *parity);
+int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data,
+		u64 *parity);
+
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p);
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p);
+
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log);
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st);
+void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
+			 struct tp_tcp_stats *v6);
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+		  const unsigned short *alpha, const unsigned short *beta);
+
+void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
+			 const u8 *addr);
+int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
+		      u64 mask0, u64 mask1, unsigned int crc, bool enable);
+
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+		enum dev_master master, enum dev_state *state);
+int t4_fw_bye(struct adapter *adap, unsigned int mbox);
+int t4_early_init(struct adapter *adap, unsigned int mbox);
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset);
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		    unsigned int vf, unsigned int nparams, const u32 *params,
+		    u32 *val);
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		  unsigned int vf, unsigned int nparams, const u32 *params,
+		  const u32 *val);
+int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
+		unsigned int rxqi, unsigned int rxq, unsigned int tc,
+		unsigned int vi, unsigned int cmask, unsigned int pmask,
+		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps);
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+		unsigned int *rss_size);
+int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	       unsigned int vf, unsigned int viid);
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		int mtu, int promisc, int all_multi, int bcast, bool sleep_ok);
+int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
+		      unsigned int viid, bool free, unsigned int naddr,
+		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok);
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		  int idx, const u8 *addr, bool persist, bool add_smt);
+int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		     bool ucast, u64 vec, bool sleep_ok);
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		 bool rx_en, bool tx_en);
+int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		     unsigned int nblinks);
+int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+	       unsigned int mmd, unsigned int reg, u16 *valp);
+int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+	       unsigned int mmd, unsigned int reg, u16 val);
+int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
+		     unsigned int pf, unsigned int vf, unsigned int iqid,
+		     unsigned int fl0id, unsigned int fl1id);
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
+	       unsigned int fl0id, unsigned int fl1id);
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		   unsigned int vf, unsigned int eqid);
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		    unsigned int vf, unsigned int eqid);
+int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		    unsigned int vf, unsigned int eqid);
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl);
+#endif /* __CXGB4_H__ */
diff --git a/drivers/net/cxgb4/cxgb4_main.c b/drivers/net/cxgb4/cxgb4_main.c
new file mode 100644
index 0000000..5f582db
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4_main.c
@@ -0,0 +1,3388 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/bitmap.h>
+#include <linux/crc32.h>
+#include <linux/ctype.h>
+#include <linux/debugfs.h>
+#include <linux/err.h>
+#include <linux/etherdevice.h>
+#include <linux/firmware.h>
+#include <linux/if_vlan.h>
+#include <linux/init.h>
+#include <linux/log2.h>
+#include <linux/mdio.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/mutex.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/aer.h>
+#include <linux/rtnetlink.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/sockios.h>
+#include <linux/vmalloc.h>
+#include <linux/workqueue.h>
+#include <net/neighbour.h>
+#include <net/netevent.h>
+#include <asm/uaccess.h>
+
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "t4fw_api.h"
+#include "l2t.h"
+
+#define DRV_VERSION "1.0.0-ko"
+#define DRV_DESC "Chelsio T4 Network Driver"
+
+/*
+ * Max interrupt hold-off timer value in us.  Queues fall back to this value
+ * under extreme memory pressure so it's largish to give the system time to
+ * recover.
+ */
+#define MAX_SGE_TIMERVAL 200U
+
+enum {
+	MEMWIN0_APERTURE = 65536,
+	MEMWIN0_BASE     = 0x30000,
+	MEMWIN1_APERTURE = 32768,
+	MEMWIN1_BASE     = 0x28000,
+	MEMWIN2_APERTURE = 2048,
+	MEMWIN2_BASE     = 0x1b800,
+};
+
+enum {
+	MAX_TXQ_ENTRIES      = 16384,
+	MAX_CTRL_TXQ_ENTRIES = 1024,
+	MAX_RSPQ_ENTRIES     = 16384,
+	MAX_RX_BUFFERS       = 16384,
+	MIN_TXQ_ENTRIES      = 32,
+	MIN_CTRL_TXQ_ENTRIES = 32,
+	MIN_RSPQ_ENTRIES     = 128,
+	MIN_FL_ENTRIES       = 16
+};
+
+#define DFLT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK | \
+			 NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP |\
+			 NETIF_MSG_RX_ERR | NETIF_MSG_TX_ERR)
+
+#define CH_DEVICE(devid) { PCI_VDEVICE(CHELSIO, devid), 0 }
+
+static DEFINE_PCI_DEVICE_TABLE(cxgb4_pci_tbl) = {
+	CH_DEVICE(0xa000),  /* PE10K */
+	{ 0, }
+};
+
+#define FW_FNAME "cxgb4/t4fw.bin"
+
+MODULE_DESCRIPTION(DRV_DESC);
+MODULE_AUTHOR("Chelsio Communications");
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_VERSION(DRV_VERSION);
+MODULE_DEVICE_TABLE(pci, cxgb4_pci_tbl);
+MODULE_FIRMWARE(FW_FNAME);
+
+static int dflt_msg_enable = DFLT_MSG_ENABLE;
+
+module_param(dflt_msg_enable, int, 0644);
+MODULE_PARM_DESC(dflt_msg_enable, "Chelsio T4 default message enable bitmap");
+
+/*
+ * The driver uses the best interrupt scheme available on a platform in the
+ * order MSI-X, MSI, legacy INTx interrupts.  This parameter determines which
+ * of these schemes the driver may consider as follows:
+ *
+ * msi = 2: choose from among all three options
+ * msi = 1: only consider MSI and INTx interrupts
+ * msi = 0: force INTx interrupts
+ */
+static int msi = 2;
+
+module_param(msi, int, 0644);
+MODULE_PARM_DESC(msi, "whether to use INTx (0), MSI (1) or MSI-X (2)");
+
+/*
+ * Queue interrupt hold-off timer values.  Queues default to the first of these
+ * upon creation.
+ */
+static unsigned int intr_holdoff[SGE_NTIMERS - 1] = { 5, 10, 20, 50, 100 };
+
+module_param_array(intr_holdoff, uint, NULL, 0644);
+MODULE_PARM_DESC(intr_holdoff, "values for queue interrupt hold-off timers "
+		 "0..4 in microseconds");
+
+static unsigned int intr_cnt[SGE_NCOUNTERS - 1] = { 4, 8, 16 };
+
+module_param_array(intr_cnt, uint, NULL, 0644);
+MODULE_PARM_DESC(intr_cnt,
+		 "thresholds 1..3 for queue interrupt packet counters");
+
+static int vf_acls;
+
+#ifdef CONFIG_PCI_IOV
+module_param(vf_acls, bool, 0644);
+MODULE_PARM_DESC(vf_acls, "if set enable virtualization L2 ACL enforcement");
+
+static unsigned int num_vf[4];
+
+module_param_array(num_vf, uint, NULL, 0644);
+MODULE_PARM_DESC(num_vf, "number of VFs for each of PFs 0-3");
+#endif
+
+static struct dentry *cxgb4_debugfs_root;
+
+static LIST_HEAD(adapter_list);
+static DEFINE_MUTEX(uld_mutex);
+static struct cxgb4_uld_info ulds[CXGB4_ULD_MAX];
+static const char *uld_str[] = { "RDMA", "iSCSI" };
+
+static void link_report(struct net_device *dev)
+{
+	if (!netif_carrier_ok(dev))
+		netdev_info(dev, "link down\n");
+	else {
+		static const char *fc[] = { "no", "Rx", "Tx", "Tx/Rx" };
+
+		const char *s = "10Mbps";
+		const struct port_info *p = netdev_priv(dev);
+
+		switch (p->link_cfg.speed) {
+		case SPEED_10000:
+			s = "10Gbps";
+			break;
+		case SPEED_1000:
+			s = "1000Mbps";
+			break;
+		case SPEED_100:
+			s = "100Mbps";
+			break;
+		}
+
+		netdev_info(dev, "link up, %s, full-duplex, %s PAUSE\n", s,
+			    fc[p->link_cfg.fc]);
+	}
+}
+
+void t4_os_link_changed(struct adapter *adapter, int port_id, int link_stat)
+{
+	struct net_device *dev = adapter->port[port_id];
+
+	/* Skip changes from disabled ports. */
+	if (netif_running(dev) && link_stat != netif_carrier_ok(dev)) {
+		if (link_stat)
+			netif_carrier_on(dev);
+		else
+			netif_carrier_off(dev);
+
+		link_report(dev);
+	}
+}
+
+void t4_os_portmod_changed(const struct adapter *adap, int port_id)
+{
+	static const char *mod_str[] = {
+		NULL, "LR", "SR", "ER", "passive DA", "active DA"
+	};
+
+	const struct net_device *dev = adap->port[port_id];
+	const struct port_info *pi = netdev_priv(dev);
+
+	if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
+		netdev_info(dev, "port module unplugged\n");
+	else
+		netdev_info(dev, "%s module inserted\n", mod_str[pi->mod_type]);
+}
+
+/*
+ * Configure the exact and hash address filters to handle a port's multicast
+ * and secondary unicast MAC addresses.
+ */
+static int set_addr_filters(const struct net_device *dev, bool sleep)
+{
+	u64 mhash = 0;
+	u64 uhash = 0;
+	bool free = true;
+	u16 filt_idx[7];
+	const u8 *addr[7];
+	int ret, naddr = 0;
+	const struct netdev_hw_addr *ha;
+	int uc_cnt = netdev_uc_count(dev);
+	int mc_cnt = netdev_mc_count(dev);
+	const struct port_info *pi = netdev_priv(dev);
+
+	/* first do the secondary unicast addresses */
+	netdev_for_each_uc_addr(ha, dev) {
+		addr[naddr++] = ha->addr;
+		if (--uc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
+			ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
+					naddr, addr, filt_idx, &uhash, sleep);
+			if (ret < 0)
+				return ret;
+
+			free = false;
+			naddr = 0;
+		}
+	}
+
+	/* next set up the multicast addresses */
+	netdev_for_each_mc_addr(ha, dev) {
+		addr[naddr++] = ha->addr;
+		if (--mc_cnt == 0 || naddr >= ARRAY_SIZE(addr)) {
+			ret = t4_alloc_mac_filt(pi->adapter, 0, pi->viid, free,
+					naddr, addr, filt_idx, &mhash, sleep);
+			if (ret < 0)
+				return ret;
+
+			free = false;
+			naddr = 0;
+		}
+	}
+
+	return t4_set_addr_hash(pi->adapter, 0, pi->viid, uhash != 0,
+				uhash | mhash, sleep);
+}
+
+/*
+ * Set Rx properties of a port, such as promiscruity, address filters, and MTU.
+ * If @mtu is -1 it is left unchanged.
+ */
+static int set_rxmode(struct net_device *dev, int mtu, bool sleep_ok)
+{
+	int ret;
+	struct port_info *pi = netdev_priv(dev);
+
+	ret = set_addr_filters(dev, sleep_ok);
+	if (ret == 0)
+		ret = t4_set_rxmode(pi->adapter, 0, pi->viid, mtu,
+				    (dev->flags & IFF_PROMISC) ? 1 : 0,
+				    (dev->flags & IFF_ALLMULTI) ? 1 : 0, 1,
+				    sleep_ok);
+	return ret;
+}
+
+/**
+ *	link_start - enable a port
+ *	@dev: the port to enable
+ *
+ *	Performs the MAC and PHY actions needed to enable a port.
+ */
+static int link_start(struct net_device *dev)
+{
+	int ret;
+	struct port_info *pi = netdev_priv(dev);
+
+	/*
+	 * We do not set address filters and promiscuity here, the stack does
+	 * that step explicitly.
+	 */
+	ret = t4_set_rxmode(pi->adapter, 0, pi->viid, dev->mtu, -1, -1, -1,
+			    true);
+	if (ret == 0) {
+		ret = t4_change_mac(pi->adapter, 0, pi->viid,
+				    pi->xact_addr_filt, dev->dev_addr, true,
+				    false);
+		if (ret >= 0) {
+			pi->xact_addr_filt = ret;
+			ret = 0;
+		}
+	}
+	if (ret == 0)
+		ret = t4_link_start(pi->adapter, 0, pi->tx_chan, &pi->link_cfg);
+	if (ret == 0)
+		ret = t4_enable_vi(pi->adapter, 0, pi->viid, true, true);
+	return ret;
+}
+
+/*
+ * Response queue handler for the FW event queue.
+ */
+static int fwevtq_handler(struct sge_rspq *q, const __be64 *rsp,
+			  const struct pkt_gl *gl)
+{
+	u8 opcode = ((const struct rss_header *)rsp)->opcode;
+
+	rsp++;                                          /* skip RSS header */
+	if (likely(opcode == CPL_SGE_EGR_UPDATE)) {
+		const struct cpl_sge_egr_update *p = (void *)rsp;
+		unsigned int qid = EGR_QID(ntohl(p->opcode_qid));
+		struct sge_txq *txq = q->adap->sge.egr_map[qid];
+
+		txq->restarts++;
+		if ((u8 *)txq < (u8 *)q->adap->sge.ethrxq) {
+			struct sge_eth_txq *eq;
+
+			eq = container_of(txq, struct sge_eth_txq, q);
+			netif_tx_wake_queue(eq->txq);
+		} else {
+			struct sge_ofld_txq *oq;
+
+			oq = container_of(txq, struct sge_ofld_txq, q);
+			tasklet_schedule(&oq->qresume_tsk);
+		}
+	} else if (opcode == CPL_FW6_MSG || opcode == CPL_FW4_MSG) {
+		const struct cpl_fw6_msg *p = (void *)rsp;
+
+		if (p->type == 0)
+			t4_handle_fw_rpl(q->adap, p->data);
+	} else if (opcode == CPL_L2T_WRITE_RPL) {
+		const struct cpl_l2t_write_rpl *p = (void *)rsp;
+
+		do_l2t_write_rpl(q->adap, p);
+	} else
+		dev_err(q->adap->pdev_dev,
+			"unexpected CPL %#x on FW event queue\n", opcode);
+	return 0;
+}
+
+/**
+ *	uldrx_handler - response queue handler for ULD queues
+ *	@q: the response queue that received the packet
+ *	@rsp: the response queue descriptor holding the offload message
+ *	@gl: the gather list of packet fragments
+ *
+ *	Deliver an ingress offload packet to a ULD.  All processing is done by
+ *	the ULD, we just maintain statistics.
+ */
+static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
+			 const struct pkt_gl *gl)
+{
+	struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
+
+	if (ulds[q->uld].rx_handler(q->adap->uld_handle[q->uld], rsp, gl)) {
+		rxq->stats.nomem++;
+		return -1;
+	}
+	if (gl == NULL)
+		rxq->stats.imm++;
+	else if (gl == CXGB4_MSG_AN)
+		rxq->stats.an++;
+	else
+		rxq->stats.pkts++;
+	return 0;
+}
+
+static void disable_msi(struct adapter *adapter)
+{
+	if (adapter->flags & USING_MSIX) {
+		pci_disable_msix(adapter->pdev);
+		adapter->flags &= ~USING_MSIX;
+	} else if (adapter->flags & USING_MSI) {
+		pci_disable_msi(adapter->pdev);
+		adapter->flags &= ~USING_MSI;
+	}
+}
+
+/*
+ * Interrupt handler for non-data events used with MSI-X.
+ */
+static irqreturn_t t4_nondata_intr(int irq, void *cookie)
+{
+	struct adapter *adap = cookie;
+
+	u32 v = t4_read_reg(adap, MYPF_REG(PL_PF_INT_CAUSE));
+	if (v & PFSW) {
+		adap->swintr = 1;
+		t4_write_reg(adap, MYPF_REG(PL_PF_INT_CAUSE), v);
+	}
+	t4_slow_intr_handler(adap);
+	return IRQ_HANDLED;
+}
+
+/*
+ * Name the MSI-X interrupts.
+ */
+static void name_msix_vecs(struct adapter *adap)
+{
+	int i, j, msi_idx = 2, n = sizeof(adap->msix_info[0].desc) - 1;
+
+	/* non-data interrupts */
+	snprintf(adap->msix_info[0].desc, n, "%s", adap->name);
+	adap->msix_info[0].desc[n] = 0;
+
+	/* FW events */
+	snprintf(adap->msix_info[1].desc, n, "%s-FWeventq", adap->name);
+	adap->msix_info[1].desc[n] = 0;
+
+	/* Ethernet queues */
+	for_each_port(adap, j) {
+		struct net_device *d = adap->port[j];
+		const struct port_info *pi = netdev_priv(d);
+
+		for (i = 0; i < pi->nqsets; i++, msi_idx++) {
+			snprintf(adap->msix_info[msi_idx].desc, n, "%s-Rx%d",
+				 d->name, i);
+			adap->msix_info[msi_idx].desc[n] = 0;
+		}
+	}
+
+	/* offload queues */
+	for_each_ofldrxq(&adap->sge, i) {
+		snprintf(adap->msix_info[msi_idx].desc, n, "%s-ofld%d",
+			 adap->name, i);
+		adap->msix_info[msi_idx++].desc[n] = 0;
+	}
+	for_each_rdmarxq(&adap->sge, i) {
+		snprintf(adap->msix_info[msi_idx].desc, n, "%s-rdma%d",
+			 adap->name, i);
+		adap->msix_info[msi_idx++].desc[n] = 0;
+	}
+}
+
+static int request_msix_queue_irqs(struct adapter *adap)
+{
+	struct sge *s = &adap->sge;
+	int err, ethqidx, ofldqidx = 0, rdmaqidx = 0, msi = 2;
+
+	err = request_irq(adap->msix_info[1].vec, t4_sge_intr_msix, 0,
+			  adap->msix_info[1].desc, &s->fw_evtq);
+	if (err)
+		return err;
+
+	for_each_ethrxq(s, ethqidx) {
+		err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
+				  adap->msix_info[msi].desc,
+				  &s->ethrxq[ethqidx].rspq);
+		if (err)
+			goto unwind;
+		msi++;
+	}
+	for_each_ofldrxq(s, ofldqidx) {
+		err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
+				  adap->msix_info[msi].desc,
+				  &s->ofldrxq[ofldqidx].rspq);
+		if (err)
+			goto unwind;
+		msi++;
+	}
+	for_each_rdmarxq(s, rdmaqidx) {
+		err = request_irq(adap->msix_info[msi].vec, t4_sge_intr_msix, 0,
+				  adap->msix_info[msi].desc,
+				  &s->rdmarxq[rdmaqidx].rspq);
+		if (err)
+			goto unwind;
+		msi++;
+	}
+	return 0;
+
+unwind:
+	while (--rdmaqidx >= 0)
+		free_irq(adap->msix_info[--msi].vec,
+			 &s->rdmarxq[rdmaqidx].rspq);
+	while (--ofldqidx >= 0)
+		free_irq(adap->msix_info[--msi].vec,
+			 &s->ofldrxq[ofldqidx].rspq);
+	while (--ethqidx >= 0)
+		free_irq(adap->msix_info[--msi].vec, &s->ethrxq[ethqidx].rspq);
+	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+	return err;
+}
+
+static void free_msix_queue_irqs(struct adapter *adap)
+{
+	int i, msi = 2;
+	struct sge *s = &adap->sge;
+
+	free_irq(adap->msix_info[1].vec, &s->fw_evtq);
+	for_each_ethrxq(s, i)
+		free_irq(adap->msix_info[msi++].vec, &s->ethrxq[i].rspq);
+	for_each_ofldrxq(s, i)
+		free_irq(adap->msix_info[msi++].vec, &s->ofldrxq[i].rspq);
+	for_each_rdmarxq(s, i)
+		free_irq(adap->msix_info[msi++].vec, &s->rdmarxq[i].rspq);
+}
+
+/**
+ *	setup_rss - configure RSS
+ *	@adap: the adapter
+ *
+ *	Sets up RSS to distribute packets to multiple receive queues.  We
+ *	configure the RSS CPU lookup table to distribute to the number of HW
+ *	receive queues, and the response queue lookup table to narrow that
+ *	down to the response queues actually configured for each port.
+ *	We always configure the RSS mapping for all ports since the mapping
+ *	table has plenty of entries.
+ */
+static int setup_rss(struct adapter *adap)
+{
+	int i, j, err;
+	u16 rss[MAX_ETH_QSETS];
+
+	for_each_port(adap, i) {
+		const struct port_info *pi = adap2pinfo(adap, i);
+		const struct sge_eth_rxq *q = &adap->sge.ethrxq[pi->first_qset];
+
+		for (j = 0; j < pi->nqsets; j++)
+			rss[j] = q[j].rspq.abs_id;
+
+		err = t4_config_rss_range(adap, 0, pi->viid, 0, pi->rss_size,
+					  rss, pi->nqsets);
+		if (err)
+			return err;
+	}
+	return 0;
+}
+
+/*
+ * Wait until all NAPI handlers are descheduled.
+ */
+static void quiesce_rx(struct adapter *adap)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+		struct sge_rspq *q = adap->sge.ingr_map[i];
+
+		if (q && q->handler)
+			napi_disable(&q->napi);
+	}
+}
+
+/*
+ * Enable NAPI scheduling and interrupt generation for all Rx queues.
+ */
+static void enable_rx(struct adapter *adap)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(adap->sge.ingr_map); i++) {
+		struct sge_rspq *q = adap->sge.ingr_map[i];
+
+		if (!q)
+			continue;
+		if (q->handler)
+			napi_enable(&q->napi);
+		/* 0-increment GTS to start the timer and enable interrupts */
+		t4_write_reg(adap, MYPF_REG(SGE_PF_GTS),
+			     SEINTARM(q->intr_params) |
+			     INGRESSQID(q->cntxt_id));
+	}
+}
+
+/**
+ *	setup_sge_queues - configure SGE Tx/Rx/response queues
+ *	@adap: the adapter
+ *
+ *	Determines how many sets of SGE queues to use and initializes them.
+ *	We support multiple queue sets per port if we have MSI-X, otherwise
+ *	just one queue set per port.
+ */
+static int setup_sge_queues(struct adapter *adap)
+{
+	int err, msi_idx, i, j;
+	struct sge *s = &adap->sge;
+
+	bitmap_zero(s->starving_fl, MAX_EGRQ);
+	bitmap_zero(s->txq_maperr, MAX_EGRQ);
+
+	if (adap->flags & USING_MSIX)
+		msi_idx = 1;         /* vector 0 is for non-queue interrupts */
+	else {
+		err = t4_sge_alloc_rxq(adap, &s->intrq, false, adap->port[0], 0,
+				       NULL, NULL);
+		if (err)
+			return err;
+		msi_idx = -((int)s->intrq.abs_id + 1);
+	}
+
+	err = t4_sge_alloc_rxq(adap, &s->fw_evtq, true, adap->port[0],
+			       msi_idx, NULL, fwevtq_handler);
+	if (err) {
+freeout:	t4_free_sge_resources(adap);
+		return err;
+	}
+
+	for_each_port(adap, i) {
+		struct net_device *dev = adap->port[i];
+		struct port_info *pi = netdev_priv(dev);
+		struct sge_eth_rxq *q = &s->ethrxq[pi->first_qset];
+		struct sge_eth_txq *t = &s->ethtxq[pi->first_qset];
+
+		for (j = 0; j < pi->nqsets; j++, q++) {
+			if (msi_idx > 0)
+				msi_idx++;
+			err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev,
+					       msi_idx, &q->fl,
+					       t4_ethrx_handler);
+			if (err)
+				goto freeout;
+			q->rspq.idx = j;
+			memset(&q->stats, 0, sizeof(q->stats));
+		}
+		for (j = 0; j < pi->nqsets; j++, t++) {
+			err = t4_sge_alloc_eth_txq(adap, t, dev,
+					netdev_get_tx_queue(dev, j),
+					s->fw_evtq.cntxt_id);
+			if (err)
+				goto freeout;
+		}
+	}
+
+	j = s->ofldqsets / adap->params.nports; /* ofld queues per channel */
+	for_each_ofldrxq(s, i) {
+		struct sge_ofld_rxq *q = &s->ofldrxq[i];
+		struct net_device *dev = adap->port[i / j];
+
+		if (msi_idx > 0)
+			msi_idx++;
+		err = t4_sge_alloc_rxq(adap, &q->rspq, false, dev, msi_idx,
+				       &q->fl, uldrx_handler);
+		if (err)
+			goto freeout;
+		memset(&q->stats, 0, sizeof(q->stats));
+		s->ofld_rxq[i] = q->rspq.abs_id;
+		err = t4_sge_alloc_ofld_txq(adap, &s->ofldtxq[i], dev,
+					    s->fw_evtq.cntxt_id);
+		if (err)
+			goto freeout;
+	}
+
+	for_each_rdmarxq(s, i) {
+		struct sge_ofld_rxq *q = &s->rdmarxq[i];
+
+		if (msi_idx > 0)
+			msi_idx++;
+		err = t4_sge_alloc_rxq(adap, &q->rspq, false, adap->port[i],
+				       msi_idx, &q->fl, uldrx_handler);
+		if (err)
+			goto freeout;
+		memset(&q->stats, 0, sizeof(q->stats));
+		s->rdma_rxq[i] = q->rspq.abs_id;
+	}
+
+	for_each_port(adap, i) {
+		/*
+		 * Note that ->rdmarxq[i].rspq.cntxt_id below is 0 if we don't
+		 * have RDMA queues, and that's the right value.
+		 */
+		err = t4_sge_alloc_ctrl_txq(adap, &s->ctrlq[i], adap->port[i],
+					    s->fw_evtq.cntxt_id,
+					    s->rdmarxq[i].rspq.cntxt_id);
+		if (err)
+			goto freeout;
+	}
+
+	t4_write_reg(adap, MPS_TRC_RSS_CONTROL,
+		     RSSCONTROL(netdev2pinfo(adap->port[0])->tx_chan) |
+		     QUEUENUMBER(s->ethrxq[0].rspq.abs_id));
+	return 0;
+}
+
+/*
+ * Returns 0 if new FW was successfully loaded, a positive errno if a load was
+ * started but failed, and a negative errno if flash load couldn't start.
+ */
+static int upgrade_fw(struct adapter *adap)
+{
+	int ret;
+	u32 vers;
+	const struct fw_hdr *hdr;
+	const struct firmware *fw;
+	struct device *dev = adap->pdev_dev;
+
+	ret = request_firmware(&fw, FW_FNAME, dev);
+	if (ret < 0) {
+		dev_err(dev, "unable to load firmware image " FW_FNAME
+			", error %d\n", ret);
+		return ret;
+	}
+
+	hdr = (const struct fw_hdr *)fw->data;
+	vers = ntohl(hdr->fw_ver);
+	if (FW_HDR_FW_VER_MAJOR_GET(vers) != FW_VERSION_MAJOR) {
+		ret = -EINVAL;              /* wrong major version, won't do */
+		goto out;
+	}
+
+	/*
+	 * If the flash FW is unusable or we found something newer, load it.
+	 */
+	if (FW_HDR_FW_VER_MAJOR_GET(adap->params.fw_vers) != FW_VERSION_MAJOR ||
+	    vers > adap->params.fw_vers) {
+		ret = -t4_load_fw(adap, fw->data, fw->size);
+		if (!ret)
+			dev_info(dev, "firmware upgraded to version %pI4 from "
+				 FW_FNAME "\n", &hdr->fw_ver);
+	}
+out:	release_firmware(fw);
+	return ret;
+}
+
+/*
+ * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
+ * The allocated memory is cleared.
+ */
+void *t4_alloc_mem(size_t size)
+{
+	void *p = kmalloc(size, GFP_KERNEL);
+
+	if (!p)
+		p = vmalloc(size);
+	if (p)
+		memset(p, 0, size);
+	return p;
+}
+
+/*
+ * Free memory allocated through alloc_mem().
+ */
+void t4_free_mem(void *addr)
+{
+	if (is_vmalloc_addr(addr))
+		vfree(addr);
+	else
+		kfree(addr);
+}
+
+static inline int is_offload(const struct adapter *adap)
+{
+	return adap->params.offload;
+}
+
+/*
+ * Implementation of ethtool operations.
+ */
+
+static u32 get_msglevel(struct net_device *dev)
+{
+	return netdev2adap(dev)->msg_enable;
+}
+
+static void set_msglevel(struct net_device *dev, u32 val)
+{
+	netdev2adap(dev)->msg_enable = val;
+}
+
+static char stats_strings[][ETH_GSTRING_LEN] = {
+	"TxOctetsOK         ",
+	"TxFramesOK         ",
+	"TxBroadcastFrames  ",
+	"TxMulticastFrames  ",
+	"TxUnicastFrames    ",
+	"TxErrorFrames      ",
+
+	"TxFrames64         ",
+	"TxFrames65To127    ",
+	"TxFrames128To255   ",
+	"TxFrames256To511   ",
+	"TxFrames512To1023  ",
+	"TxFrames1024To1518 ",
+	"TxFrames1519ToMax  ",
+
+	"TxFramesDropped    ",
+	"TxPauseFrames      ",
+	"TxPPP0Frames       ",
+	"TxPPP1Frames       ",
+	"TxPPP2Frames       ",
+	"TxPPP3Frames       ",
+	"TxPPP4Frames       ",
+	"TxPPP5Frames       ",
+	"TxPPP6Frames       ",
+	"TxPPP7Frames       ",
+
+	"RxOctetsOK         ",
+	"RxFramesOK         ",
+	"RxBroadcastFrames  ",
+	"RxMulticastFrames  ",
+	"RxUnicastFrames    ",
+
+	"RxFramesTooLong    ",
+	"RxJabberErrors     ",
+	"RxFCSErrors        ",
+	"RxLengthErrors     ",
+	"RxSymbolErrors     ",
+	"RxRuntFrames       ",
+
+	"RxFrames64         ",
+	"RxFrames65To127    ",
+	"RxFrames128To255   ",
+	"RxFrames256To511   ",
+	"RxFrames512To1023  ",
+	"RxFrames1024To1518 ",
+	"RxFrames1519ToMax  ",
+
+	"RxPauseFrames      ",
+	"RxPPP0Frames       ",
+	"RxPPP1Frames       ",
+	"RxPPP2Frames       ",
+	"RxPPP3Frames       ",
+	"RxPPP4Frames       ",
+	"RxPPP5Frames       ",
+	"RxPPP6Frames       ",
+	"RxPPP7Frames       ",
+
+	"RxBG0FramesDropped ",
+	"RxBG1FramesDropped ",
+	"RxBG2FramesDropped ",
+	"RxBG3FramesDropped ",
+	"RxBG0FramesTrunc   ",
+	"RxBG1FramesTrunc   ",
+	"RxBG2FramesTrunc   ",
+	"RxBG3FramesTrunc   ",
+
+	"TSO                ",
+	"TxCsumOffload      ",
+	"RxCsumGood         ",
+	"VLANextractions    ",
+	"VLANinsertions     ",
+};
+
+static int get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(stats_strings);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+#define T4_REGMAP_SIZE (160 * 1024)
+
+static int get_regs_len(struct net_device *dev)
+{
+	return T4_REGMAP_SIZE;
+}
+
+static int get_eeprom_len(struct net_device *dev)
+{
+	return EEPROMSIZE;
+}
+
+static void get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
+{
+	struct adapter *adapter = netdev2adap(dev);
+
+	strcpy(info->driver, KBUILD_MODNAME);
+	strcpy(info->version, DRV_VERSION);
+	strcpy(info->bus_info, pci_name(adapter->pdev));
+
+	if (!adapter->params.fw_vers)
+		strcpy(info->fw_version, "N/A");
+	else
+		snprintf(info->fw_version, sizeof(info->fw_version),
+			"%u.%u.%u.%u, TP %u.%u.%u.%u",
+			FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers),
+			FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers),
+			FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers),
+			FW_HDR_FW_VER_BUILD_GET(adapter->params.fw_vers),
+			FW_HDR_FW_VER_MAJOR_GET(adapter->params.tp_vers),
+			FW_HDR_FW_VER_MINOR_GET(adapter->params.tp_vers),
+			FW_HDR_FW_VER_MICRO_GET(adapter->params.tp_vers),
+			FW_HDR_FW_VER_BUILD_GET(adapter->params.tp_vers));
+}
+
+static void get_strings(struct net_device *dev, u32 stringset, u8 *data)
+{
+	if (stringset == ETH_SS_STATS)
+		memcpy(data, stats_strings, sizeof(stats_strings));
+}
+
+/*
+ * port stats maintained per queue of the port.  They should be in the same
+ * order as in stats_strings above.
+ */
+struct queue_port_stats {
+	u64 tso;
+	u64 tx_csum;
+	u64 rx_csum;
+	u64 vlan_ex;
+	u64 vlan_ins;
+};
+
+static void collect_sge_port_stats(const struct adapter *adap,
+		const struct port_info *p, struct queue_port_stats *s)
+{
+	int i;
+	const struct sge_eth_txq *tx = &adap->sge.ethtxq[p->first_qset];
+	const struct sge_eth_rxq *rx = &adap->sge.ethrxq[p->first_qset];
+
+	memset(s, 0, sizeof(*s));
+	for (i = 0; i < p->nqsets; i++, rx++, tx++) {
+		s->tso += tx->tso;
+		s->tx_csum += tx->tx_cso;
+		s->rx_csum += rx->stats.rx_cso;
+		s->vlan_ex += rx->stats.vlan_ex;
+		s->vlan_ins += tx->vlan_ins;
+	}
+}
+
+static void get_stats(struct net_device *dev, struct ethtool_stats *stats,
+		      u64 *data)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	t4_get_port_stats(adapter, pi->tx_chan, (struct port_stats *)data);
+
+	data += sizeof(struct port_stats) / sizeof(u64);
+	collect_sge_port_stats(adapter, pi, (struct queue_port_stats *)data);
+}
+
+/*
+ * Return a version number to identify the type of adapter.  The scheme is:
+ * - bits 0..9: chip version
+ * - bits 10..15: chip revision
+ */
+static inline unsigned int mk_adap_vers(const struct adapter *ap)
+{
+	return 4 | (ap->params.rev << 10);
+}
+
+static void reg_block_dump(struct adapter *ap, void *buf, unsigned int start,
+			   unsigned int end)
+{
+	u32 *p = buf + start;
+
+	for ( ; start <= end; start += sizeof(u32))
+		*p++ = t4_read_reg(ap, start);
+}
+
+static void get_regs(struct net_device *dev, struct ethtool_regs *regs,
+		     void *buf)
+{
+	static const unsigned int reg_ranges[] = {
+		0x1008, 0x1108,
+		0x1180, 0x11b4,
+		0x11fc, 0x123c,
+		0x1300, 0x173c,
+		0x1800, 0x18fc,
+		0x3000, 0x30d8,
+		0x30e0, 0x5924,
+		0x5960, 0x59d4,
+		0x5a00, 0x5af8,
+		0x6000, 0x6098,
+		0x6100, 0x6150,
+		0x6200, 0x6208,
+		0x6240, 0x6248,
+		0x6280, 0x6338,
+		0x6370, 0x638c,
+		0x6400, 0x643c,
+		0x6500, 0x6524,
+		0x6a00, 0x6a38,
+		0x6a60, 0x6a78,
+		0x6b00, 0x6b84,
+		0x6bf0, 0x6c84,
+		0x6cf0, 0x6d84,
+		0x6df0, 0x6e84,
+		0x6ef0, 0x6f84,
+		0x6ff0, 0x7084,
+		0x70f0, 0x7184,
+		0x71f0, 0x7284,
+		0x72f0, 0x7384,
+		0x73f0, 0x7450,
+		0x7500, 0x7530,
+		0x7600, 0x761c,
+		0x7680, 0x76cc,
+		0x7700, 0x7798,
+		0x77c0, 0x77fc,
+		0x7900, 0x79fc,
+		0x7b00, 0x7c38,
+		0x7d00, 0x7efc,
+		0x8dc0, 0x8e1c,
+		0x8e30, 0x8e78,
+		0x8ea0, 0x8f6c,
+		0x8fc0, 0x9074,
+		0x90fc, 0x90fc,
+		0x9400, 0x9458,
+		0x9600, 0x96bc,
+		0x9800, 0x9808,
+		0x9820, 0x983c,
+		0x9850, 0x9864,
+		0x9c00, 0x9c6c,
+		0x9c80, 0x9cec,
+		0x9d00, 0x9d6c,
+		0x9d80, 0x9dec,
+		0x9e00, 0x9e6c,
+		0x9e80, 0x9eec,
+		0x9f00, 0x9f6c,
+		0x9f80, 0x9fec,
+		0xd004, 0xd03c,
+		0xdfc0, 0xdfe0,
+		0xe000, 0xea7c,
+		0xf000, 0x11190,
+		0x19040, 0x19124,
+		0x19150, 0x191b0,
+		0x191d0, 0x191e8,
+		0x19238, 0x1924c,
+		0x193f8, 0x19474,
+		0x19490, 0x194f8,
+		0x19800, 0x19f30,
+		0x1a000, 0x1a06c,
+		0x1a0b0, 0x1a120,
+		0x1a128, 0x1a138,
+		0x1a190, 0x1a1c4,
+		0x1a1fc, 0x1a1fc,
+		0x1e040, 0x1e04c,
+		0x1e240, 0x1e28c,
+		0x1e2c0, 0x1e2c0,
+		0x1e2e0, 0x1e2e0,
+		0x1e300, 0x1e384,
+		0x1e3c0, 0x1e3c8,
+		0x1e440, 0x1e44c,
+		0x1e640, 0x1e68c,
+		0x1e6c0, 0x1e6c0,
+		0x1e6e0, 0x1e6e0,
+		0x1e700, 0x1e784,
+		0x1e7c0, 0x1e7c8,
+		0x1e840, 0x1e84c,
+		0x1ea40, 0x1ea8c,
+		0x1eac0, 0x1eac0,
+		0x1eae0, 0x1eae0,
+		0x1eb00, 0x1eb84,
+		0x1ebc0, 0x1ebc8,
+		0x1ec40, 0x1ec4c,
+		0x1ee40, 0x1ee8c,
+		0x1eec0, 0x1eec0,
+		0x1eee0, 0x1eee0,
+		0x1ef00, 0x1ef84,
+		0x1efc0, 0x1efc8,
+		0x1f040, 0x1f04c,
+		0x1f240, 0x1f28c,
+		0x1f2c0, 0x1f2c0,
+		0x1f2e0, 0x1f2e0,
+		0x1f300, 0x1f384,
+		0x1f3c0, 0x1f3c8,
+		0x1f440, 0x1f44c,
+		0x1f640, 0x1f68c,
+		0x1f6c0, 0x1f6c0,
+		0x1f6e0, 0x1f6e0,
+		0x1f700, 0x1f784,
+		0x1f7c0, 0x1f7c8,
+		0x1f840, 0x1f84c,
+		0x1fa40, 0x1fa8c,
+		0x1fac0, 0x1fac0,
+		0x1fae0, 0x1fae0,
+		0x1fb00, 0x1fb84,
+		0x1fbc0, 0x1fbc8,
+		0x1fc40, 0x1fc4c,
+		0x1fe40, 0x1fe8c,
+		0x1fec0, 0x1fec0,
+		0x1fee0, 0x1fee0,
+		0x1ff00, 0x1ff84,
+		0x1ffc0, 0x1ffc8,
+		0x20000, 0x2002c,
+		0x20100, 0x2013c,
+		0x20190, 0x201c8,
+		0x20200, 0x20318,
+		0x20400, 0x20528,
+		0x20540, 0x20614,
+		0x21000, 0x21040,
+		0x2104c, 0x21060,
+		0x210c0, 0x210ec,
+		0x21200, 0x21268,
+		0x21270, 0x21284,
+		0x212fc, 0x21388,
+		0x21400, 0x21404,
+		0x21500, 0x21518,
+		0x2152c, 0x2153c,
+		0x21550, 0x21554,
+		0x21600, 0x21600,
+		0x21608, 0x21628,
+		0x21630, 0x2163c,
+		0x21700, 0x2171c,
+		0x21780, 0x2178c,
+		0x21800, 0x21c38,
+		0x21c80, 0x21d7c,
+		0x21e00, 0x21e04,
+		0x22000, 0x2202c,
+		0x22100, 0x2213c,
+		0x22190, 0x221c8,
+		0x22200, 0x22318,
+		0x22400, 0x22528,
+		0x22540, 0x22614,
+		0x23000, 0x23040,
+		0x2304c, 0x23060,
+		0x230c0, 0x230ec,
+		0x23200, 0x23268,
+		0x23270, 0x23284,
+		0x232fc, 0x23388,
+		0x23400, 0x23404,
+		0x23500, 0x23518,
+		0x2352c, 0x2353c,
+		0x23550, 0x23554,
+		0x23600, 0x23600,
+		0x23608, 0x23628,
+		0x23630, 0x2363c,
+		0x23700, 0x2371c,
+		0x23780, 0x2378c,
+		0x23800, 0x23c38,
+		0x23c80, 0x23d7c,
+		0x23e00, 0x23e04,
+		0x24000, 0x2402c,
+		0x24100, 0x2413c,
+		0x24190, 0x241c8,
+		0x24200, 0x24318,
+		0x24400, 0x24528,
+		0x24540, 0x24614,
+		0x25000, 0x25040,
+		0x2504c, 0x25060,
+		0x250c0, 0x250ec,
+		0x25200, 0x25268,
+		0x25270, 0x25284,
+		0x252fc, 0x25388,
+		0x25400, 0x25404,
+		0x25500, 0x25518,
+		0x2552c, 0x2553c,
+		0x25550, 0x25554,
+		0x25600, 0x25600,
+		0x25608, 0x25628,
+		0x25630, 0x2563c,
+		0x25700, 0x2571c,
+		0x25780, 0x2578c,
+		0x25800, 0x25c38,
+		0x25c80, 0x25d7c,
+		0x25e00, 0x25e04,
+		0x26000, 0x2602c,
+		0x26100, 0x2613c,
+		0x26190, 0x261c8,
+		0x26200, 0x26318,
+		0x26400, 0x26528,
+		0x26540, 0x26614,
+		0x27000, 0x27040,
+		0x2704c, 0x27060,
+		0x270c0, 0x270ec,
+		0x27200, 0x27268,
+		0x27270, 0x27284,
+		0x272fc, 0x27388,
+		0x27400, 0x27404,
+		0x27500, 0x27518,
+		0x2752c, 0x2753c,
+		0x27550, 0x27554,
+		0x27600, 0x27600,
+		0x27608, 0x27628,
+		0x27630, 0x2763c,
+		0x27700, 0x2771c,
+		0x27780, 0x2778c,
+		0x27800, 0x27c38,
+		0x27c80, 0x27d7c,
+		0x27e00, 0x27e04
+	};
+
+	int i;
+	struct adapter *ap = netdev2adap(dev);
+
+	regs->version = mk_adap_vers(ap);
+
+	memset(buf, 0, T4_REGMAP_SIZE);
+	for (i = 0; i < ARRAY_SIZE(reg_ranges); i += 2)
+		reg_block_dump(ap, buf, reg_ranges[i], reg_ranges[i + 1]);
+}
+
+static int restart_autoneg(struct net_device *dev)
+{
+	struct port_info *p = netdev_priv(dev);
+
+	if (!netif_running(dev))
+		return -EAGAIN;
+	if (p->link_cfg.autoneg != AUTONEG_ENABLE)
+		return -EINVAL;
+	t4_restart_aneg(p->adapter, 0, p->tx_chan);
+	return 0;
+}
+
+static int identify_port(struct net_device *dev, u32 data)
+{
+	if (data == 0)
+		data = 2;     /* default to 2 seconds */
+
+	return t4_identify_port(netdev2adap(dev), 0, netdev2pinfo(dev)->viid,
+				data * 5);
+}
+
+static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
+{
+	unsigned int v = 0;
+
+	if (type == FW_PORT_TYPE_BT_SGMII || type == FW_PORT_TYPE_BT_XAUI) {
+		v |= SUPPORTED_TP;
+		if (caps & FW_PORT_CAP_SPEED_100M)
+			v |= SUPPORTED_100baseT_Full;
+		if (caps & FW_PORT_CAP_SPEED_1G)
+			v |= SUPPORTED_1000baseT_Full;
+		if (caps & FW_PORT_CAP_SPEED_10G)
+			v |= SUPPORTED_10000baseT_Full;
+	} else if (type == FW_PORT_TYPE_KX4 || type == FW_PORT_TYPE_KX) {
+		v |= SUPPORTED_Backplane;
+		if (caps & FW_PORT_CAP_SPEED_1G)
+			v |= SUPPORTED_1000baseKX_Full;
+		if (caps & FW_PORT_CAP_SPEED_10G)
+			v |= SUPPORTED_10000baseKX4_Full;
+	} else if (type == FW_PORT_TYPE_KR)
+		v |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full;
+	else if (type == FW_PORT_TYPE_FIBER)
+		v |= SUPPORTED_FIBRE;
+
+	if (caps & FW_PORT_CAP_ANEG)
+		v |= SUPPORTED_Autoneg;
+	return v;
+}
+
+static unsigned int to_fw_linkcaps(unsigned int caps)
+{
+	unsigned int v = 0;
+
+	if (caps & ADVERTISED_100baseT_Full)
+		v |= FW_PORT_CAP_SPEED_100M;
+	if (caps & ADVERTISED_1000baseT_Full)
+		v |= FW_PORT_CAP_SPEED_1G;
+	if (caps & ADVERTISED_10000baseT_Full)
+		v |= FW_PORT_CAP_SPEED_10G;
+	return v;
+}
+
+static int get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	const struct port_info *p = netdev_priv(dev);
+
+	if (p->port_type == FW_PORT_TYPE_BT_SGMII ||
+	    p->port_type == FW_PORT_TYPE_BT_XAUI)
+		cmd->port = PORT_TP;
+	else if (p->port_type == FW_PORT_TYPE_FIBER)
+		cmd->port = PORT_FIBRE;
+	else if (p->port_type == FW_PORT_TYPE_TWINAX)
+		cmd->port = PORT_DA;
+	else
+		cmd->port = PORT_OTHER;
+
+	if (p->mdio_addr >= 0) {
+		cmd->phy_address = p->mdio_addr;
+		cmd->transceiver = XCVR_EXTERNAL;
+		cmd->mdio_support = p->port_type == FW_PORT_TYPE_BT_SGMII ?
+			MDIO_SUPPORTS_C22 : MDIO_SUPPORTS_C45;
+	} else {
+		cmd->phy_address = 0;  /* not really, but no better option */
+		cmd->transceiver = XCVR_INTERNAL;
+		cmd->mdio_support = 0;
+	}
+
+	cmd->supported = from_fw_linkcaps(p->port_type, p->link_cfg.supported);
+	cmd->advertising = from_fw_linkcaps(p->port_type,
+					    p->link_cfg.advertising);
+	cmd->speed = netif_carrier_ok(dev) ? p->link_cfg.speed : 0;
+	cmd->duplex = DUPLEX_FULL;
+	cmd->autoneg = p->link_cfg.autoneg;
+	cmd->maxtxpkt = 0;
+	cmd->maxrxpkt = 0;
+	return 0;
+}
+
+static unsigned int speed_to_caps(int speed)
+{
+	if (speed == SPEED_100)
+		return FW_PORT_CAP_SPEED_100M;
+	if (speed == SPEED_1000)
+		return FW_PORT_CAP_SPEED_1G;
+	if (speed == SPEED_10000)
+		return FW_PORT_CAP_SPEED_10G;
+	return 0;
+}
+
+static int set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
+{
+	unsigned int cap;
+	struct port_info *p = netdev_priv(dev);
+	struct link_config *lc = &p->link_cfg;
+
+	if (cmd->duplex != DUPLEX_FULL)     /* only full-duplex supported */
+		return -EINVAL;
+
+	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
+		/*
+		 * PHY offers a single speed.  See if that's what's
+		 * being requested.
+		 */
+		if (cmd->autoneg == AUTONEG_DISABLE &&
+		    (lc->supported & speed_to_caps(cmd->speed)))
+				return 0;
+		return -EINVAL;
+	}
+
+	if (cmd->autoneg == AUTONEG_DISABLE) {
+		cap = speed_to_caps(cmd->speed);
+
+		if (!(lc->supported & cap) || cmd->speed == SPEED_1000 ||
+		    cmd->speed == SPEED_10000)
+			return -EINVAL;
+		lc->requested_speed = cap;
+		lc->advertising = 0;
+	} else {
+		cap = to_fw_linkcaps(cmd->advertising);
+		if (!(lc->supported & cap))
+			return -EINVAL;
+		lc->requested_speed = 0;
+		lc->advertising = cap | FW_PORT_CAP_ANEG;
+	}
+	lc->autoneg = cmd->autoneg;
+
+	if (netif_running(dev))
+		return t4_link_start(p->adapter, 0, p->tx_chan, lc);
+	return 0;
+}
+
+static void get_pauseparam(struct net_device *dev,
+			   struct ethtool_pauseparam *epause)
+{
+	struct port_info *p = netdev_priv(dev);
+
+	epause->autoneg = (p->link_cfg.requested_fc & PAUSE_AUTONEG) != 0;
+	epause->rx_pause = (p->link_cfg.fc & PAUSE_RX) != 0;
+	epause->tx_pause = (p->link_cfg.fc & PAUSE_TX) != 0;
+}
+
+static int set_pauseparam(struct net_device *dev,
+			  struct ethtool_pauseparam *epause)
+{
+	struct port_info *p = netdev_priv(dev);
+	struct link_config *lc = &p->link_cfg;
+
+	if (epause->autoneg == AUTONEG_DISABLE)
+		lc->requested_fc = 0;
+	else if (lc->supported & FW_PORT_CAP_ANEG)
+		lc->requested_fc = PAUSE_AUTONEG;
+	else
+		return -EINVAL;
+
+	if (epause->rx_pause)
+		lc->requested_fc |= PAUSE_RX;
+	if (epause->tx_pause)
+		lc->requested_fc |= PAUSE_TX;
+	if (netif_running(dev))
+		return t4_link_start(p->adapter, 0, p->tx_chan, lc);
+	return 0;
+}
+
+static u32 get_rx_csum(struct net_device *dev)
+{
+	struct port_info *p = netdev_priv(dev);
+
+	return p->rx_offload & RX_CSO;
+}
+
+static int set_rx_csum(struct net_device *dev, u32 data)
+{
+	struct port_info *p = netdev_priv(dev);
+
+	if (data)
+		p->rx_offload |= RX_CSO;
+	else
+		p->rx_offload &= ~RX_CSO;
+	return 0;
+}
+
+static void get_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+	const struct port_info *pi = netdev_priv(dev);
+	const struct sge *s = &pi->adapter->sge;
+
+	e->rx_max_pending = MAX_RX_BUFFERS;
+	e->rx_mini_max_pending = MAX_RSPQ_ENTRIES;
+	e->rx_jumbo_max_pending = 0;
+	e->tx_max_pending = MAX_TXQ_ENTRIES;
+
+	e->rx_pending = s->ethrxq[pi->first_qset].fl.size - 8;
+	e->rx_mini_pending = s->ethrxq[pi->first_qset].rspq.size;
+	e->rx_jumbo_pending = 0;
+	e->tx_pending = s->ethtxq[pi->first_qset].q.size;
+}
+
+static int set_sge_param(struct net_device *dev, struct ethtool_ringparam *e)
+{
+	int i;
+	const struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+	struct sge *s = &adapter->sge;
+
+	if (e->rx_pending > MAX_RX_BUFFERS || e->rx_jumbo_pending ||
+	    e->tx_pending > MAX_TXQ_ENTRIES ||
+	    e->rx_mini_pending > MAX_RSPQ_ENTRIES ||
+	    e->rx_mini_pending < MIN_RSPQ_ENTRIES ||
+	    e->rx_pending < MIN_FL_ENTRIES || e->tx_pending < MIN_TXQ_ENTRIES)
+		return -EINVAL;
+
+	if (adapter->flags & FULL_INIT_DONE)
+		return -EBUSY;
+
+	for (i = 0; i < pi->nqsets; ++i) {
+		s->ethtxq[pi->first_qset + i].q.size = e->tx_pending;
+		s->ethrxq[pi->first_qset + i].fl.size = e->rx_pending + 8;
+		s->ethrxq[pi->first_qset + i].rspq.size = e->rx_mini_pending;
+	}
+	return 0;
+}
+
+static int closest_timer(const struct sge *s, int time)
+{
+	int i, delta, match = 0, min_delta = INT_MAX;
+
+	for (i = 0; i < ARRAY_SIZE(s->timer_val); i++) {
+		delta = time - s->timer_val[i];
+		if (delta < 0)
+			delta = -delta;
+		if (delta < min_delta) {
+			min_delta = delta;
+			match = i;
+		}
+	}
+	return match;
+}
+
+static int closest_thres(const struct sge *s, int thres)
+{
+	int i, delta, match = 0, min_delta = INT_MAX;
+
+	for (i = 0; i < ARRAY_SIZE(s->counter_val); i++) {
+		delta = thres - s->counter_val[i];
+		if (delta < 0)
+			delta = -delta;
+		if (delta < min_delta) {
+			min_delta = delta;
+			match = i;
+		}
+	}
+	return match;
+}
+
+/*
+ * Return a queue's interrupt hold-off time in us.  0 means no timer.
+ */
+static unsigned int qtimer_val(const struct adapter *adap,
+			       const struct sge_rspq *q)
+{
+	unsigned int idx = q->intr_params >> 1;
+
+	return idx < SGE_NTIMERS ? adap->sge.timer_val[idx] : 0;
+}
+
+/**
+ *	set_rxq_intr_params - set a queue's interrupt holdoff parameters
+ *	@adap: the adapter
+ *	@q: the Rx queue
+ *	@us: the hold-off time in us, or 0 to disable timer
+ *	@cnt: the hold-off packet count, or 0 to disable counter
+ *
+ *	Sets an Rx queue's interrupt hold-off time and packet count.  At least
+ *	one of the two needs to be enabled for the queue to generate interrupts.
+ */
+static int set_rxq_intr_params(struct adapter *adap, struct sge_rspq *q,
+			       unsigned int us, unsigned int cnt)
+{
+	if ((us | cnt) == 0)
+		cnt = 1;
+
+	if (cnt) {
+		int err;
+		u32 v, new_idx;
+
+		new_idx = closest_thres(&adap->sge, cnt);
+		if (q->desc && q->pktcnt_idx != new_idx) {
+			/* the queue has already been created, update it */
+			v = FW_PARAMS_MNEM(FW_PARAMS_MNEM_DMAQ) |
+			    FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH) |
+			    FW_PARAMS_PARAM_YZ(q->cntxt_id);
+			err = t4_set_params(adap, 0, 0, 0, 1, &v, &new_idx);
+			if (err)
+				return err;
+		}
+		q->pktcnt_idx = new_idx;
+	}
+
+	us = us == 0 ? 6 : closest_timer(&adap->sge, us);
+	q->intr_params = QINTR_TIMER_IDX(us) | (cnt > 0 ? QINTR_CNT_EN : 0);
+	return 0;
+}
+
+static int set_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	const struct port_info *pi = netdev_priv(dev);
+	struct adapter *adap = pi->adapter;
+
+	return set_rxq_intr_params(adap, &adap->sge.ethrxq[pi->first_qset].rspq,
+			c->rx_coalesce_usecs, c->rx_max_coalesced_frames);
+}
+
+static int get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
+{
+	const struct port_info *pi = netdev_priv(dev);
+	const struct adapter *adap = pi->adapter;
+	const struct sge_rspq *rq = &adap->sge.ethrxq[pi->first_qset].rspq;
+
+	c->rx_coalesce_usecs = qtimer_val(adap, rq);
+	c->rx_max_coalesced_frames = (rq->intr_params & QINTR_CNT_EN) ?
+		adap->sge.counter_val[rq->pktcnt_idx] : 0;
+	return 0;
+}
+
+/*
+ * Translate a physical EEPROM address to virtual.  The first 1K is accessed
+ * through virtual addresses starting at 31K, the rest is accessed through
+ * virtual addresses starting at 0.  This mapping is correct only for PF0.
+ */
+static int eeprom_ptov(unsigned int phys_addr)
+{
+	if (phys_addr < 1024)
+		return phys_addr + (31 << 10);
+	if (phys_addr < EEPROMSIZE)
+		return phys_addr - 1024;
+	return -EINVAL;
+}
+
+/*
+ * The next two routines implement eeprom read/write from physical addresses.
+ * The physical->virtual translation is correct only for PF0.
+ */
+static int eeprom_rd_phys(struct adapter *adap, unsigned int phys_addr, u32 *v)
+{
+	int vaddr = eeprom_ptov(phys_addr);
+
+	if (vaddr >= 0)
+		vaddr = pci_read_vpd(adap->pdev, vaddr, sizeof(u32), v);
+	return vaddr < 0 ? vaddr : 0;
+}
+
+static int eeprom_wr_phys(struct adapter *adap, unsigned int phys_addr, u32 v)
+{
+	int vaddr = eeprom_ptov(phys_addr);
+
+	if (vaddr >= 0)
+		vaddr = pci_write_vpd(adap->pdev, vaddr, sizeof(u32), &v);
+	return vaddr < 0 ? vaddr : 0;
+}
+
+#define EEPROM_MAGIC 0x38E2F10C
+
+static int get_eeprom(struct net_device *dev, struct ethtool_eeprom *e,
+		      u8 *data)
+{
+	int i, err = 0;
+	struct adapter *adapter = netdev2adap(dev);
+
+	u8 *buf = kmalloc(EEPROMSIZE, GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	e->magic = EEPROM_MAGIC;
+	for (i = e->offset & ~3; !err && i < e->offset + e->len; i += 4)
+		err = eeprom_rd_phys(adapter, i, (u32 *)&buf[i]);
+
+	if (!err)
+		memcpy(data, buf + e->offset, e->len);
+	kfree(buf);
+	return err;
+}
+
+static int set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
+		      u8 *data)
+{
+	u8 *buf;
+	int err = 0;
+	u32 aligned_offset, aligned_len, *p;
+	struct adapter *adapter = netdev2adap(dev);
+
+	if (eeprom->magic != EEPROM_MAGIC)
+		return -EINVAL;
+
+	aligned_offset = eeprom->offset & ~3;
+	aligned_len = (eeprom->len + (eeprom->offset & 3) + 3) & ~3;
+
+	if (aligned_offset != eeprom->offset || aligned_len != eeprom->len) {
+		/*
+		 * RMW possibly needed for first or last words.
+		 */
+		buf = kmalloc(aligned_len, GFP_KERNEL);
+		if (!buf)
+			return -ENOMEM;
+		err = eeprom_rd_phys(adapter, aligned_offset, (u32 *)buf);
+		if (!err && aligned_len > 4)
+			err = eeprom_rd_phys(adapter,
+					     aligned_offset + aligned_len - 4,
+					     (u32 *)&buf[aligned_len - 4]);
+		if (err)
+			goto out;
+		memcpy(buf + (eeprom->offset & 3), data, eeprom->len);
+	} else
+		buf = data;
+
+	err = t4_seeprom_wp(adapter, false);
+	if (err)
+		goto out;
+
+	for (p = (u32 *)buf; !err && aligned_len; aligned_len -= 4, p++) {
+		err = eeprom_wr_phys(adapter, aligned_offset, *p);
+		aligned_offset += 4;
+	}
+
+	if (!err)
+		err = t4_seeprom_wp(adapter, true);
+out:
+	if (buf != data)
+		kfree(buf);
+	return err;
+}
+
+static int set_flash(struct net_device *netdev, struct ethtool_flash *ef)
+{
+	int ret;
+	const struct firmware *fw;
+	struct adapter *adap = netdev2adap(netdev);
+
+	ef->data[sizeof(ef->data) - 1] = '\0';
+	ret = request_firmware(&fw, ef->data, adap->pdev_dev);
+	if (ret < 0)
+		return ret;
+
+	ret = t4_load_fw(adap, fw->data, fw->size);
+	release_firmware(fw);
+	if (!ret)
+		dev_info(adap->pdev_dev, "loaded firmware %s\n", ef->data);
+	return ret;
+}
+
+#define WOL_SUPPORTED (WAKE_BCAST | WAKE_MAGIC)
+#define BCAST_CRC 0xa0ccc1a6
+
+static void get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	wol->supported = WAKE_BCAST | WAKE_MAGIC;
+	wol->wolopts = netdev2adap(dev)->wol;
+	memset(&wol->sopass, 0, sizeof(wol->sopass));
+}
+
+static int set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
+{
+	int err = 0;
+	struct port_info *pi = netdev_priv(dev);
+
+	if (wol->wolopts & ~WOL_SUPPORTED)
+		return -EINVAL;
+	t4_wol_magic_enable(pi->adapter, pi->tx_chan,
+			    (wol->wolopts & WAKE_MAGIC) ? dev->dev_addr : NULL);
+	if (wol->wolopts & WAKE_BCAST) {
+		err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0xfe, ~0ULL,
+					~0ULL, 0, false);
+		if (!err)
+			err = t4_wol_pat_enable(pi->adapter, pi->tx_chan, 1,
+						~6ULL, ~0ULL, BCAST_CRC, true);
+	} else
+		t4_wol_pat_enable(pi->adapter, pi->tx_chan, 0, 0, 0, 0, false);
+	return err;
+}
+
+static int set_tso(struct net_device *dev, u32 value)
+{
+	if (value)
+		dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
+	else
+		dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6);
+	return 0;
+}
+
+static struct ethtool_ops cxgb_ethtool_ops = {
+	.get_settings      = get_settings,
+	.set_settings      = set_settings,
+	.get_drvinfo       = get_drvinfo,
+	.get_msglevel      = get_msglevel,
+	.set_msglevel      = set_msglevel,
+	.get_ringparam     = get_sge_param,
+	.set_ringparam     = set_sge_param,
+	.get_coalesce      = get_coalesce,
+	.set_coalesce      = set_coalesce,
+	.get_eeprom_len    = get_eeprom_len,
+	.get_eeprom        = get_eeprom,
+	.set_eeprom        = set_eeprom,
+	.get_pauseparam    = get_pauseparam,
+	.set_pauseparam    = set_pauseparam,
+	.get_rx_csum       = get_rx_csum,
+	.set_rx_csum       = set_rx_csum,
+	.set_tx_csum       = ethtool_op_set_tx_ipv6_csum,
+	.set_sg            = ethtool_op_set_sg,
+	.get_link          = ethtool_op_get_link,
+	.get_strings       = get_strings,
+	.phys_id           = identify_port,
+	.nway_reset        = restart_autoneg,
+	.get_sset_count    = get_sset_count,
+	.get_ethtool_stats = get_stats,
+	.get_regs_len      = get_regs_len,
+	.get_regs          = get_regs,
+	.get_wol           = get_wol,
+	.set_wol           = set_wol,
+	.set_tso           = set_tso,
+	.flash_device      = set_flash,
+};
+
+/*
+ * debugfs support
+ */
+
+static int mem_open(struct inode *inode, struct file *file)
+{
+	file->private_data = inode->i_private;
+	return 0;
+}
+
+static ssize_t mem_read(struct file *file, char __user *buf, size_t count,
+			loff_t *ppos)
+{
+	loff_t pos = *ppos;
+	loff_t avail = file->f_path.dentry->d_inode->i_size;
+	unsigned int mem = (uintptr_t)file->private_data & 3;
+	struct adapter *adap = file->private_data - mem;
+
+	if (pos < 0)
+		return -EINVAL;
+	if (pos >= avail)
+		return 0;
+	if (count > avail - pos)
+		count = avail - pos;
+
+	while (count) {
+		size_t len;
+		int ret, ofst;
+		__be32 data[16];
+
+		if (mem == MEM_MC)
+			ret = t4_mc_read(adap, pos, data, NULL);
+		else
+			ret = t4_edc_read(adap, mem, pos, data, NULL);
+		if (ret)
+			return ret;
+
+		ofst = pos % sizeof(data);
+		len = min(count, sizeof(data) - ofst);
+		if (copy_to_user(buf, (u8 *)data + ofst, len))
+			return -EFAULT;
+
+		buf += len;
+		pos += len;
+		count -= len;
+	}
+	count = pos - *ppos;
+	*ppos = pos;
+	return count;
+}
+
+static const struct file_operations mem_debugfs_fops = {
+	.owner   = THIS_MODULE,
+	.open    = mem_open,
+	.read    = mem_read,
+};
+
+static void __devinit add_debugfs_mem(struct adapter *adap, const char *name,
+				      unsigned int idx, unsigned int size_mb)
+{
+	struct dentry *de;
+
+	de = debugfs_create_file(name, S_IRUSR, adap->debugfs_root,
+				 (void *)adap + idx, &mem_debugfs_fops);
+	if (de && de->d_inode)
+		de->d_inode->i_size = size_mb << 20;
+}
+
+static int __devinit setup_debugfs(struct adapter *adap)
+{
+	int i;
+
+	if (IS_ERR_OR_NULL(adap->debugfs_root))
+		return -1;
+
+	i = t4_read_reg(adap, MA_TARGET_MEM_ENABLE);
+	if (i & EDRAM0_ENABLE)
+		add_debugfs_mem(adap, "edc0", MEM_EDC0, 5);
+	if (i & EDRAM1_ENABLE)
+		add_debugfs_mem(adap, "edc1", MEM_EDC1, 5);
+	if (i & EXT_MEM_ENABLE)
+		add_debugfs_mem(adap, "mc", MEM_MC,
+			EXT_MEM_SIZE_GET(t4_read_reg(adap, MA_EXT_MEMORY_BAR)));
+	if (adap->l2t)
+		debugfs_create_file("l2t", S_IRUSR, adap->debugfs_root, adap,
+				    &t4_l2t_fops);
+	return 0;
+}
+
+/*
+ * upper-layer driver support
+ */
+
+/*
+ * Allocate an active-open TID and set it to the supplied value.
+ */
+int cxgb4_alloc_atid(struct tid_info *t, void *data)
+{
+	int atid = -1;
+
+	spin_lock_bh(&t->atid_lock);
+	if (t->afree) {
+		union aopen_entry *p = t->afree;
+
+		atid = p - t->atid_tab;
+		t->afree = p->next;
+		p->data = data;
+		t->atids_in_use++;
+	}
+	spin_unlock_bh(&t->atid_lock);
+	return atid;
+}
+EXPORT_SYMBOL(cxgb4_alloc_atid);
+
+/*
+ * Release an active-open TID.
+ */
+void cxgb4_free_atid(struct tid_info *t, unsigned int atid)
+{
+	union aopen_entry *p = &t->atid_tab[atid];
+
+	spin_lock_bh(&t->atid_lock);
+	p->next = t->afree;
+	t->afree = p;
+	t->atids_in_use--;
+	spin_unlock_bh(&t->atid_lock);
+}
+EXPORT_SYMBOL(cxgb4_free_atid);
+
+/*
+ * Allocate a server TID and set it to the supplied value.
+ */
+int cxgb4_alloc_stid(struct tid_info *t, int family, void *data)
+{
+	int stid;
+
+	spin_lock_bh(&t->stid_lock);
+	if (family == PF_INET) {
+		stid = find_first_zero_bit(t->stid_bmap, t->nstids);
+		if (stid < t->nstids)
+			__set_bit(stid, t->stid_bmap);
+		else
+			stid = -1;
+	} else {
+		stid = bitmap_find_free_region(t->stid_bmap, t->nstids, 2);
+		if (stid < 0)
+			stid = -1;
+	}
+	if (stid >= 0) {
+		t->stid_tab[stid].data = data;
+		stid += t->stid_base;
+		t->stids_in_use++;
+	}
+	spin_unlock_bh(&t->stid_lock);
+	return stid;
+}
+EXPORT_SYMBOL(cxgb4_alloc_stid);
+
+/*
+ * Release a server TID.
+ */
+void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family)
+{
+	stid -= t->stid_base;
+	spin_lock_bh(&t->stid_lock);
+	if (family == PF_INET)
+		__clear_bit(stid, t->stid_bmap);
+	else
+		bitmap_release_region(t->stid_bmap, stid, 2);
+	t->stid_tab[stid].data = NULL;
+	t->stids_in_use--;
+	spin_unlock_bh(&t->stid_lock);
+}
+EXPORT_SYMBOL(cxgb4_free_stid);
+
+/*
+ * Populate a TID_RELEASE WR.  Caller must properly size the skb.
+ */
+static void mk_tid_release(struct sk_buff *skb, unsigned int chan,
+			   unsigned int tid)
+{
+	struct cpl_tid_release *req;
+
+	set_wr_txq(skb, CPL_PRIORITY_SETUP, chan);
+	req = (struct cpl_tid_release *)__skb_put(skb, sizeof(*req));
+	INIT_TP_WR(req, tid);
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
+}
+
+/*
+ * Queue a TID release request and if necessary schedule a work queue to
+ * process it.
+ */
+void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
+			     unsigned int tid)
+{
+	void **p = &t->tid_tab[tid];
+	struct adapter *adap = container_of(t, struct adapter, tids);
+
+	spin_lock_bh(&adap->tid_release_lock);
+	*p = adap->tid_release_head;
+	/* Low 2 bits encode the Tx channel number */
+	adap->tid_release_head = (void **)((uintptr_t)p | chan);
+	if (!adap->tid_release_task_busy) {
+		adap->tid_release_task_busy = true;
+		schedule_work(&adap->tid_release_task);
+	}
+	spin_unlock_bh(&adap->tid_release_lock);
+}
+EXPORT_SYMBOL(cxgb4_queue_tid_release);
+
+/*
+ * Process the list of pending TID release requests.
+ */
+static void process_tid_release_list(struct work_struct *work)
+{
+	struct sk_buff *skb;
+	struct adapter *adap;
+
+	adap = container_of(work, struct adapter, tid_release_task);
+
+	spin_lock_bh(&adap->tid_release_lock);
+	while (adap->tid_release_head) {
+		void **p = adap->tid_release_head;
+		unsigned int chan = (uintptr_t)p & 3;
+		p = (void *)p - chan;
+
+		adap->tid_release_head = *p;
+		*p = NULL;
+		spin_unlock_bh(&adap->tid_release_lock);
+
+		while (!(skb = alloc_skb(sizeof(struct cpl_tid_release),
+					 GFP_KERNEL)))
+			schedule_timeout_uninterruptible(1);
+
+		mk_tid_release(skb, chan, p - adap->tids.tid_tab);
+		t4_ofld_send(adap, skb);
+		spin_lock_bh(&adap->tid_release_lock);
+	}
+	adap->tid_release_task_busy = false;
+	spin_unlock_bh(&adap->tid_release_lock);
+}
+
+/*
+ * Release a TID and inform HW.  If we are unable to allocate the release
+ * message we defer to a work queue.
+ */
+void cxgb4_remove_tid(struct tid_info *t, unsigned int chan, unsigned int tid)
+{
+	void *old;
+	struct sk_buff *skb;
+	struct adapter *adap = container_of(t, struct adapter, tids);
+
+	old = t->tid_tab[tid];
+	skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
+	if (likely(skb)) {
+		t->tid_tab[tid] = NULL;
+		mk_tid_release(skb, chan, tid);
+		t4_ofld_send(adap, skb);
+	} else
+		cxgb4_queue_tid_release(t, chan, tid);
+	if (old)
+		atomic_dec(&t->tids_in_use);
+}
+EXPORT_SYMBOL(cxgb4_remove_tid);
+
+/*
+ * Allocate and initialize the TID tables.  Returns 0 on success.
+ */
+static int tid_init(struct tid_info *t)
+{
+	size_t size;
+	unsigned int natids = t->natids;
+
+	size = t->ntids * sizeof(*t->tid_tab) + natids * sizeof(*t->atid_tab) +
+	       t->nstids * sizeof(*t->stid_tab) +
+	       BITS_TO_LONGS(t->nstids) * sizeof(long);
+	t->tid_tab = t4_alloc_mem(size);
+	if (!t->tid_tab)
+		return -ENOMEM;
+
+	t->atid_tab = (union aopen_entry *)&t->tid_tab[t->ntids];
+	t->stid_tab = (struct serv_entry *)&t->atid_tab[natids];
+	t->stid_bmap = (unsigned long *)&t->stid_tab[t->nstids];
+	spin_lock_init(&t->stid_lock);
+	spin_lock_init(&t->atid_lock);
+
+	t->stids_in_use = 0;
+	t->afree = NULL;
+	t->atids_in_use = 0;
+	atomic_set(&t->tids_in_use, 0);
+
+	/* Setup the free list for atid_tab and clear the stid bitmap. */
+	if (natids) {
+		while (--natids)
+			t->atid_tab[natids - 1].next = &t->atid_tab[natids];
+		t->afree = t->atid_tab;
+	}
+	bitmap_zero(t->stid_bmap, t->nstids);
+	return 0;
+}
+
+/**
+ *	cxgb4_create_server - create an IP server
+ *	@dev: the device
+ *	@stid: the server TID
+ *	@sip: local IP address to bind server to
+ *	@sport: the server's TCP port
+ *	@queue: queue to direct messages from this server to
+ *
+ *	Create an IP server for the given port and address.
+ *	Returns <0 on error and one of the %NET_XMIT_* values on success.
+ */
+int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
+			__be32 sip, __be16 sport, unsigned int queue)
+{
+	unsigned int chan;
+	struct sk_buff *skb;
+	struct adapter *adap;
+	struct cpl_pass_open_req *req;
+
+	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	adap = netdev2adap(dev);
+	req = (struct cpl_pass_open_req *)__skb_put(skb, sizeof(*req));
+	INIT_TP_WR(req, 0);
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ, stid));
+	req->local_port = sport;
+	req->peer_port = htons(0);
+	req->local_ip = sip;
+	req->peer_ip = htonl(0);
+	chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
+	req->opt0 = cpu_to_be64(TX_CHAN(chan));
+	req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
+				SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+	return t4_mgmt_tx(adap, skb);
+}
+EXPORT_SYMBOL(cxgb4_create_server);
+
+/**
+ *	cxgb4_create_server6 - create an IPv6 server
+ *	@dev: the device
+ *	@stid: the server TID
+ *	@sip: local IPv6 address to bind server to
+ *	@sport: the server's TCP port
+ *	@queue: queue to direct messages from this server to
+ *
+ *	Create an IPv6 server for the given port and address.
+ *	Returns <0 on error and one of the %NET_XMIT_* values on success.
+ */
+int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
+			 const struct in6_addr *sip, __be16 sport,
+			 unsigned int queue)
+{
+	unsigned int chan;
+	struct sk_buff *skb;
+	struct adapter *adap;
+	struct cpl_pass_open_req6 *req;
+
+	skb = alloc_skb(sizeof(*req), GFP_KERNEL);
+	if (!skb)
+		return -ENOMEM;
+
+	adap = netdev2adap(dev);
+	req = (struct cpl_pass_open_req6 *)__skb_put(skb, sizeof(*req));
+	INIT_TP_WR(req, 0);
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_PASS_OPEN_REQ6, stid));
+	req->local_port = sport;
+	req->peer_port = htons(0);
+	req->local_ip_hi = *(__be64 *)(sip->s6_addr);
+	req->local_ip_lo = *(__be64 *)(sip->s6_addr + 8);
+	req->peer_ip_hi = cpu_to_be64(0);
+	req->peer_ip_lo = cpu_to_be64(0);
+	chan = netdev2pinfo(adap->sge.ingr_map[queue]->netdev)->tx_chan;
+	req->opt0 = cpu_to_be64(TX_CHAN(chan));
+	req->opt1 = cpu_to_be64(CONN_POLICY_ASK |
+				SYN_RSS_ENABLE | SYN_RSS_QUEUE(queue));
+	return t4_mgmt_tx(adap, skb);
+}
+EXPORT_SYMBOL(cxgb4_create_server6);
+
+/**
+ *	cxgb4_best_mtu - find the entry in the MTU table closest to an MTU
+ *	@mtus: the HW MTU table
+ *	@mtu: the target MTU
+ *	@idx: index of selected entry in the MTU table
+ *
+ *	Returns the index and the value in the HW MTU table that is closest to
+ *	but does not exceed @mtu, unless @mtu is smaller than any value in the
+ *	table, in which case that smallest available value is selected.
+ */
+unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
+			    unsigned int *idx)
+{
+	unsigned int i = 0;
+
+	while (i < NMTUS - 1 && mtus[i + 1] <= mtu)
+		++i;
+	if (idx)
+		*idx = i;
+	return mtus[i];
+}
+EXPORT_SYMBOL(cxgb4_best_mtu);
+
+/**
+ *	cxgb4_port_chan - get the HW channel of a port
+ *	@dev: the net device for the port
+ *
+ *	Return the HW Tx channel of the given port.
+ */
+unsigned int cxgb4_port_chan(const struct net_device *dev)
+{
+	return netdev2pinfo(dev)->tx_chan;
+}
+EXPORT_SYMBOL(cxgb4_port_chan);
+
+/**
+ *	cxgb4_port_viid - get the VI id of a port
+ *	@dev: the net device for the port
+ *
+ *	Return the VI id of the given port.
+ */
+unsigned int cxgb4_port_viid(const struct net_device *dev)
+{
+	return netdev2pinfo(dev)->viid;
+}
+EXPORT_SYMBOL(cxgb4_port_viid);
+
+/**
+ *	cxgb4_port_idx - get the index of a port
+ *	@dev: the net device for the port
+ *
+ *	Return the index of the given port.
+ */
+unsigned int cxgb4_port_idx(const struct net_device *dev)
+{
+	return netdev2pinfo(dev)->port_id;
+}
+EXPORT_SYMBOL(cxgb4_port_idx);
+
+/**
+ *	cxgb4_netdev_by_hwid - return the net device of a HW port
+ *	@pdev: identifies the adapter
+ *	@id: the HW port id
+ *
+ *	Return the net device associated with the interface with the given HW
+ *	id.
+ */
+struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id)
+{
+	const struct adapter *adap = pci_get_drvdata(pdev);
+
+	if (!adap || id >= NCHAN)
+		return NULL;
+	id = adap->chan_map[id];
+	return id < MAX_NPORTS ? adap->port[id] : NULL;
+}
+EXPORT_SYMBOL(cxgb4_netdev_by_hwid);
+
+void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
+			 struct tp_tcp_stats *v6)
+{
+	struct adapter *adap = pci_get_drvdata(pdev);
+
+	spin_lock(&adap->stats_lock);
+	t4_tp_get_tcp_stats(adap, v4, v6);
+	spin_unlock(&adap->stats_lock);
+}
+EXPORT_SYMBOL(cxgb4_get_tcp_stats);
+
+void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
+		      const unsigned int *pgsz_order)
+{
+	struct adapter *adap = netdev2adap(dev);
+
+	t4_write_reg(adap, ULP_RX_ISCSI_TAGMASK, tag_mask);
+	t4_write_reg(adap, ULP_RX_ISCSI_PSZ, HPZ0(pgsz_order[0]) |
+		     HPZ1(pgsz_order[1]) | HPZ2(pgsz_order[2]) |
+		     HPZ3(pgsz_order[3]));
+}
+EXPORT_SYMBOL(cxgb4_iscsi_init);
+
+static struct pci_driver cxgb4_driver;
+
+static void check_neigh_update(struct neighbour *neigh)
+{
+	const struct device *parent;
+	const struct net_device *netdev = neigh->dev;
+
+	if (netdev->priv_flags & IFF_802_1Q_VLAN)
+		netdev = vlan_dev_real_dev(netdev);
+	parent = netdev->dev.parent;
+	if (parent && parent->driver == &cxgb4_driver.driver)
+		t4_l2t_update(dev_get_drvdata(parent), neigh);
+}
+
+static int netevent_cb(struct notifier_block *nb, unsigned long event,
+		       void *data)
+{
+	switch (event) {
+	case NETEVENT_NEIGH_UPDATE:
+		check_neigh_update(data);
+		break;
+	case NETEVENT_PMTU_UPDATE:
+	case NETEVENT_REDIRECT:
+	default:
+		break;
+	}
+	return 0;
+}
+
+static bool netevent_registered;
+static struct notifier_block cxgb4_netevent_nb = {
+	.notifier_call = netevent_cb
+};
+
+static void uld_attach(struct adapter *adap, unsigned int uld)
+{
+	void *handle;
+	struct cxgb4_lld_info lli;
+
+	lli.pdev = adap->pdev;
+	lli.l2t = adap->l2t;
+	lli.tids = &adap->tids;
+	lli.ports = adap->port;
+	lli.vr = &adap->vres;
+	lli.mtus = adap->params.mtus;
+	if (uld == CXGB4_ULD_RDMA) {
+		lli.rxq_ids = adap->sge.rdma_rxq;
+		lli.nrxq = adap->sge.rdmaqs;
+	} else if (uld == CXGB4_ULD_ISCSI) {
+		lli.rxq_ids = adap->sge.ofld_rxq;
+		lli.nrxq = adap->sge.ofldqsets;
+	}
+	lli.ntxq = adap->sge.ofldqsets;
+	lli.nchan = adap->params.nports;
+	lli.nports = adap->params.nports;
+	lli.wr_cred = adap->params.ofldq_wr_cred;
+	lli.adapter_type = adap->params.rev;
+	lli.iscsi_iolen = MAXRXDATA_GET(t4_read_reg(adap, TP_PARA_REG2));
+	lli.udb_density = 1 << QUEUESPERPAGEPF0_GET(
+			t4_read_reg(adap, SGE_EGRESS_QUEUES_PER_PAGE_PF));
+	lli.ucq_density = 1 << QUEUESPERPAGEPF0_GET(
+			t4_read_reg(adap, SGE_INGRESS_QUEUES_PER_PAGE_PF));
+	lli.gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS);
+	lli.db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL);
+	lli.fw_vers = adap->params.fw_vers;
+
+	handle = ulds[uld].add(&lli);
+	if (IS_ERR(handle)) {
+		dev_warn(adap->pdev_dev,
+			 "could not attach to the %s driver, error %ld\n",
+			 uld_str[uld], PTR_ERR(handle));
+		return;
+	}
+
+	adap->uld_handle[uld] = handle;
+
+	if (!netevent_registered) {
+		register_netevent_notifier(&cxgb4_netevent_nb);
+		netevent_registered = true;
+	}
+}
+
+static void attach_ulds(struct adapter *adap)
+{
+	unsigned int i;
+
+	mutex_lock(&uld_mutex);
+	list_add_tail(&adap->list_node, &adapter_list);
+	for (i = 0; i < CXGB4_ULD_MAX; i++)
+		if (ulds[i].add)
+			uld_attach(adap, i);
+	mutex_unlock(&uld_mutex);
+}
+
+static void detach_ulds(struct adapter *adap)
+{
+	unsigned int i;
+
+	mutex_lock(&uld_mutex);
+	list_del(&adap->list_node);
+	for (i = 0; i < CXGB4_ULD_MAX; i++)
+		if (adap->uld_handle[i]) {
+			ulds[i].state_change(adap->uld_handle[i],
+					     CXGB4_STATE_DETACH);
+			adap->uld_handle[i] = NULL;
+		}
+	if (netevent_registered && list_empty(&adapter_list)) {
+		unregister_netevent_notifier(&cxgb4_netevent_nb);
+		netevent_registered = false;
+	}
+	mutex_unlock(&uld_mutex);
+}
+
+static void notify_ulds(struct adapter *adap, enum cxgb4_state new_state)
+{
+	unsigned int i;
+
+	mutex_lock(&uld_mutex);
+	for (i = 0; i < CXGB4_ULD_MAX; i++)
+		if (adap->uld_handle[i])
+			ulds[i].state_change(adap->uld_handle[i], new_state);
+	mutex_unlock(&uld_mutex);
+}
+
+/**
+ *	cxgb4_register_uld - register an upper-layer driver
+ *	@type: the ULD type
+ *	@p: the ULD methods
+ *
+ *	Registers an upper-layer driver with this driver and notifies the ULD
+ *	about any presently available devices that support its type.  Returns
+ *	%-EBUSY if a ULD of the same type is already registered.
+ */
+int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p)
+{
+	int ret = 0;
+	struct adapter *adap;
+
+	if (type >= CXGB4_ULD_MAX)
+		return -EINVAL;
+	mutex_lock(&uld_mutex);
+	if (ulds[type].add) {
+		ret = -EBUSY;
+		goto out;
+	}
+	ulds[type] = *p;
+	list_for_each_entry(adap, &adapter_list, list_node)
+		uld_attach(adap, type);
+out:	mutex_unlock(&uld_mutex);
+	return ret;
+}
+EXPORT_SYMBOL(cxgb4_register_uld);
+
+/**
+ *	cxgb4_unregister_uld - unregister an upper-layer driver
+ *	@type: the ULD type
+ *
+ *	Unregisters an existing upper-layer driver.
+ */
+int cxgb4_unregister_uld(enum cxgb4_uld type)
+{
+	struct adapter *adap;
+
+	if (type >= CXGB4_ULD_MAX)
+		return -EINVAL;
+	mutex_lock(&uld_mutex);
+	list_for_each_entry(adap, &adapter_list, list_node)
+		adap->uld_handle[type] = NULL;
+	ulds[type].add = NULL;
+	mutex_unlock(&uld_mutex);
+	return 0;
+}
+EXPORT_SYMBOL(cxgb4_unregister_uld);
+
+/**
+ *	cxgb_up - enable the adapter
+ *	@adap: adapter being enabled
+ *
+ *	Called when the first port is enabled, this function performs the
+ *	actions necessary to make an adapter operational, such as completing
+ *	the initialization of HW modules, and enabling interrupts.
+ *
+ *	Must be called with the rtnl lock held.
+ */
+static int cxgb_up(struct adapter *adap)
+{
+	int err = 0;
+
+	if (!(adap->flags & FULL_INIT_DONE)) {
+		err = setup_sge_queues(adap);
+		if (err)
+			goto out;
+		err = setup_rss(adap);
+		if (err) {
+			t4_free_sge_resources(adap);
+			goto out;
+		}
+		if (adap->flags & USING_MSIX)
+			name_msix_vecs(adap);
+		adap->flags |= FULL_INIT_DONE;
+	}
+
+	if (adap->flags & USING_MSIX) {
+		err = request_irq(adap->msix_info[0].vec, t4_nondata_intr, 0,
+				  adap->msix_info[0].desc, adap);
+		if (err)
+			goto irq_err;
+
+		err = request_msix_queue_irqs(adap);
+		if (err) {
+			free_irq(adap->msix_info[0].vec, adap);
+			goto irq_err;
+		}
+	} else {
+		err = request_irq(adap->pdev->irq, t4_intr_handler(adap),
+				  (adap->flags & USING_MSI) ? 0 : IRQF_SHARED,
+				  adap->name, adap);
+		if (err)
+			goto irq_err;
+	}
+	enable_rx(adap);
+	t4_sge_start(adap);
+	t4_intr_enable(adap);
+	notify_ulds(adap, CXGB4_STATE_UP);
+ out:
+	return err;
+ irq_err:
+	dev_err(adap->pdev_dev, "request_irq failed, err %d\n", err);
+	goto out;
+}
+
+static void cxgb_down(struct adapter *adapter)
+{
+	t4_intr_disable(adapter);
+	cancel_work_sync(&adapter->tid_release_task);
+	adapter->tid_release_task_busy = false;
+
+	if (adapter->flags & USING_MSIX) {
+		free_msix_queue_irqs(adapter);
+		free_irq(adapter->msix_info[0].vec, adapter);
+	} else
+		free_irq(adapter->pdev->irq, adapter);
+	quiesce_rx(adapter);
+}
+
+/*
+ * net_device operations
+ */
+static int cxgb_open(struct net_device *dev)
+{
+	int err;
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	if (!adapter->open_device_map && (err = cxgb_up(adapter)) < 0)
+		return err;
+
+	dev->real_num_tx_queues = pi->nqsets;
+	set_bit(pi->tx_chan, &adapter->open_device_map);
+	link_start(dev);
+	netif_tx_start_all_queues(dev);
+	return 0;
+}
+
+static int cxgb_close(struct net_device *dev)
+{
+	int ret;
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adapter = pi->adapter;
+
+	netif_tx_stop_all_queues(dev);
+	netif_carrier_off(dev);
+	ret = t4_enable_vi(adapter, 0, pi->viid, false, false);
+
+	clear_bit(pi->tx_chan, &adapter->open_device_map);
+
+	if (!adapter->open_device_map)
+		cxgb_down(adapter);
+	return 0;
+}
+
+static struct net_device_stats *cxgb_get_stats(struct net_device *dev)
+{
+	struct port_stats stats;
+	struct port_info *p = netdev_priv(dev);
+	struct adapter *adapter = p->adapter;
+	struct net_device_stats *ns = &dev->stats;
+
+	spin_lock(&adapter->stats_lock);
+	t4_get_port_stats(adapter, p->tx_chan, &stats);
+	spin_unlock(&adapter->stats_lock);
+
+	ns->tx_bytes   = stats.tx_octets;
+	ns->tx_packets = stats.tx_frames;
+	ns->rx_bytes   = stats.rx_octets;
+	ns->rx_packets = stats.rx_frames;
+	ns->multicast  = stats.rx_mcast_frames;
+
+	/* detailed rx_errors */
+	ns->rx_length_errors = stats.rx_jabber + stats.rx_too_long +
+			       stats.rx_runt;
+	ns->rx_over_errors   = 0;
+	ns->rx_crc_errors    = stats.rx_fcs_err;
+	ns->rx_frame_errors  = stats.rx_symbol_err;
+	ns->rx_fifo_errors   = stats.rx_ovflow0 + stats.rx_ovflow1 +
+			       stats.rx_ovflow2 + stats.rx_ovflow3 +
+			       stats.rx_trunc0 + stats.rx_trunc1 +
+			       stats.rx_trunc2 + stats.rx_trunc3;
+	ns->rx_missed_errors = 0;
+
+	/* detailed tx_errors */
+	ns->tx_aborted_errors   = 0;
+	ns->tx_carrier_errors   = 0;
+	ns->tx_fifo_errors      = 0;
+	ns->tx_heartbeat_errors = 0;
+	ns->tx_window_errors    = 0;
+
+	ns->tx_errors = stats.tx_error_frames;
+	ns->rx_errors = stats.rx_symbol_err + stats.rx_fcs_err +
+		ns->rx_length_errors + stats.rx_len_err + ns->rx_fifo_errors;
+	return ns;
+}
+
+static int cxgb_ioctl(struct net_device *dev, struct ifreq *req, int cmd)
+{
+	int ret = 0, prtad, devad;
+	struct port_info *pi = netdev_priv(dev);
+	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&req->ifr_data;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		if (pi->mdio_addr < 0)
+			return -EOPNOTSUPP;
+		data->phy_id = pi->mdio_addr;
+		break;
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		if (mdio_phy_id_is_c45(data->phy_id)) {
+			prtad = mdio_phy_id_prtad(data->phy_id);
+			devad = mdio_phy_id_devad(data->phy_id);
+		} else if (data->phy_id < 32) {
+			prtad = data->phy_id;
+			devad = 0;
+			data->reg_num &= 0x1f;
+		} else
+			return -EINVAL;
+
+		if (cmd == SIOCGMIIREG)
+			ret = t4_mdio_rd(pi->adapter, 0, prtad, devad,
+					 data->reg_num, &data->val_out);
+		else
+			ret = t4_mdio_wr(pi->adapter, 0, prtad, devad,
+					 data->reg_num, data->val_in);
+		break;
+	default:
+		return -EOPNOTSUPP;
+	}
+	return ret;
+}
+
+static void cxgb_set_rxmode(struct net_device *dev)
+{
+	/* unfortunately we can't return errors to the stack */
+	set_rxmode(dev, -1, false);
+}
+
+static int cxgb_change_mtu(struct net_device *dev, int new_mtu)
+{
+	int ret;
+	struct port_info *pi = netdev_priv(dev);
+
+	if (new_mtu < 81 || new_mtu > MAX_MTU)         /* accommodate SACK */
+		return -EINVAL;
+	ret = t4_set_rxmode(pi->adapter, 0, pi->viid, new_mtu, -1, -1, -1,
+			    true);
+	if (!ret)
+		dev->mtu = new_mtu;
+	return ret;
+}
+
+static int cxgb_set_mac_addr(struct net_device *dev, void *p)
+{
+	int ret;
+	struct sockaddr *addr = p;
+	struct port_info *pi = netdev_priv(dev);
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EINVAL;
+
+	ret = t4_change_mac(pi->adapter, 0, pi->viid, pi->xact_addr_filt,
+			    addr->sa_data, true, true);
+	if (ret < 0)
+		return ret;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+	pi->xact_addr_filt = ret;
+	return 0;
+}
+
+static void vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
+{
+	struct port_info *pi = netdev_priv(dev);
+
+	pi->vlan_grp = grp;
+	t4_set_vlan_accel(pi->adapter, 1 << pi->tx_chan, grp != NULL);
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+static void cxgb_netpoll(struct net_device *dev)
+{
+	struct port_info *pi = netdev_priv(dev);
+	struct adapter *adap = pi->adapter;
+
+	if (adap->flags & USING_MSIX) {
+		int i;
+		struct sge_eth_rxq *rx = &adap->sge.ethrxq[pi->first_qset];
+
+		for (i = pi->nqsets; i; i--, rx++)
+			t4_sge_intr_msix(0, &rx->rspq);
+	} else
+		t4_intr_handler(adap)(0, adap);
+}
+#endif
+
+static const struct net_device_ops cxgb4_netdev_ops = {
+	.ndo_open             = cxgb_open,
+	.ndo_stop             = cxgb_close,
+	.ndo_start_xmit       = t4_eth_xmit,
+	.ndo_get_stats        = cxgb_get_stats,
+	.ndo_set_rx_mode      = cxgb_set_rxmode,
+	.ndo_set_mac_address  = cxgb_set_mac_addr,
+	.ndo_validate_addr    = eth_validate_addr,
+	.ndo_do_ioctl         = cxgb_ioctl,
+	.ndo_change_mtu       = cxgb_change_mtu,
+	.ndo_vlan_rx_register = vlan_rx_register,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller  = cxgb_netpoll,
+#endif
+};
+
+void t4_fatal_err(struct adapter *adap)
+{
+	t4_set_reg_field(adap, SGE_CONTROL, GLOBALENABLE, 0);
+	t4_intr_disable(adap);
+	dev_alert(adap->pdev_dev, "encountered fatal error, adapter stopped\n");
+}
+
+static void setup_memwin(struct adapter *adap)
+{
+	u32 bar0;
+
+	bar0 = pci_resource_start(adap->pdev, 0);  /* truncation intentional */
+	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 0),
+		     (bar0 + MEMWIN0_BASE) | BIR(0) |
+		     WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
+	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 1),
+		     (bar0 + MEMWIN1_BASE) | BIR(0) |
+		     WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
+	t4_write_reg(adap, PCIE_MEM_ACCESS_REG(PCIE_MEM_ACCESS_BASE_WIN, 2),
+		     (bar0 + MEMWIN2_BASE) | BIR(0) |
+		     WINDOW(ilog2(MEMWIN2_APERTURE) - 10));
+}
+
+/*
+ * Max # of ATIDs.  The absolute HW max is 16K but we keep it lower.
+ */
+#define MAX_ATIDS 8192U
+
+/*
+ * Phase 0 of initialization: contact FW, obtain config, perform basic init.
+ */
+static int adap_init0(struct adapter *adap)
+{
+	int ret;
+	u32 v, port_vec;
+	enum dev_state state;
+	u32 params[7], val[7];
+	struct fw_caps_config_cmd c;
+
+	ret = t4_check_fw_version(adap);
+	if (ret == -EINVAL || ret > 0) {
+		if (upgrade_fw(adap) >= 0)             /* recache FW version */
+			ret = t4_check_fw_version(adap);
+	}
+	if (ret < 0)
+		return ret;
+
+	/* contact FW, request master */
+	ret = t4_fw_hello(adap, 0, 0, MASTER_MUST, &state);
+	if (ret < 0) {
+		dev_err(adap->pdev_dev, "could not connect to FW, error %d\n",
+			ret);
+		return ret;
+	}
+
+	/* reset device */
+	ret = t4_fw_reset(adap, 0, PIORSTMODE | PIORST);
+	if (ret < 0)
+		goto bye;
+
+	/* get device capabilities */
+	memset(&c, 0, sizeof(c));
+	c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+			      FW_CMD_REQUEST | FW_CMD_READ);
+	c.retval_len16 = htonl(FW_LEN16(c));
+	ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+	if (ret < 0)
+		goto bye;
+
+	/* select capabilities we'll be using */
+	if (c.niccaps & htons(FW_CAPS_CONFIG_NIC_VM)) {
+		if (!vf_acls)
+			c.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
+		else
+			c.niccaps = htons(FW_CAPS_CONFIG_NIC_VM);
+	} else if (vf_acls) {
+		dev_err(adap->pdev_dev, "virtualization ACLs not supported");
+		goto bye;
+	}
+	c.op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
+			      FW_CMD_REQUEST | FW_CMD_WRITE);
+	ret = t4_wr_mbox(adap, 0, &c, sizeof(c), NULL);
+	if (ret < 0)
+		goto bye;
+
+	ret = t4_config_glbl_rss(adap, 0,
+				 FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL,
+				 FW_RSS_GLB_CONFIG_CMD_TNLMAPEN |
+				 FW_RSS_GLB_CONFIG_CMD_TNLALLLKP);
+	if (ret < 0)
+		goto bye;
+
+	ret = t4_cfg_pfvf(adap, 0, 0, 0, 64, 64, 64, 0, 0, 4, 0xf, 0xf, 16,
+			  FW_CMD_CAP_PF, FW_CMD_CAP_PF);
+	if (ret < 0)
+		goto bye;
+
+	for (v = 0; v < SGE_NTIMERS - 1; v++)
+		adap->sge.timer_val[v] = min(intr_holdoff[v], MAX_SGE_TIMERVAL);
+	adap->sge.timer_val[SGE_NTIMERS - 1] = MAX_SGE_TIMERVAL;
+	adap->sge.counter_val[0] = 1;
+	for (v = 1; v < SGE_NCOUNTERS; v++)
+		adap->sge.counter_val[v] = min(intr_cnt[v - 1],
+					       THRESHOLD_3_MASK);
+	t4_sge_init(adap);
+
+	/* get basic stuff going */
+	ret = t4_early_init(adap, 0);
+	if (ret < 0)
+		goto bye;
+
+#define FW_PARAM_DEV(param) \
+	(FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
+	 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
+
+#define FW_PARAM_PFVF(param) \
+	(FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
+	 FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
+
+	params[0] = FW_PARAM_DEV(PORTVEC);
+	params[1] = FW_PARAM_PFVF(L2T_START);
+	params[2] = FW_PARAM_PFVF(L2T_END);
+	params[3] = FW_PARAM_PFVF(FILTER_START);
+	params[4] = FW_PARAM_PFVF(FILTER_END);
+	ret = t4_query_params(adap, 0, 0, 0, 5, params, val);
+	if (ret < 0)
+		goto bye;
+	port_vec = val[0];
+	adap->tids.ftid_base = val[3];
+	adap->tids.nftids = val[4] - val[3] + 1;
+
+	if (c.ofldcaps) {
+		/* query offload-related parameters */
+		params[0] = FW_PARAM_DEV(NTID);
+		params[1] = FW_PARAM_PFVF(SERVER_START);
+		params[2] = FW_PARAM_PFVF(SERVER_END);
+		params[3] = FW_PARAM_PFVF(TDDP_START);
+		params[4] = FW_PARAM_PFVF(TDDP_END);
+		params[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
+		ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
+		if (ret < 0)
+			goto bye;
+		adap->tids.ntids = val[0];
+		adap->tids.natids = min(adap->tids.ntids / 2, MAX_ATIDS);
+		adap->tids.stid_base = val[1];
+		adap->tids.nstids = val[2] - val[1] + 1;
+		adap->vres.ddp.start = val[3];
+		adap->vres.ddp.size = val[4] - val[3] + 1;
+		adap->params.ofldq_wr_cred = val[5];
+		adap->params.offload = 1;
+	}
+	if (c.rdmacaps) {
+		params[0] = FW_PARAM_PFVF(STAG_START);
+		params[1] = FW_PARAM_PFVF(STAG_END);
+		params[2] = FW_PARAM_PFVF(RQ_START);
+		params[3] = FW_PARAM_PFVF(RQ_END);
+		params[4] = FW_PARAM_PFVF(PBL_START);
+		params[5] = FW_PARAM_PFVF(PBL_END);
+		ret = t4_query_params(adap, 0, 0, 0, 6, params, val);
+		if (ret < 0)
+			goto bye;
+		adap->vres.stag.start = val[0];
+		adap->vres.stag.size = val[1] - val[0] + 1;
+		adap->vres.rq.start = val[2];
+		adap->vres.rq.size = val[3] - val[2] + 1;
+		adap->vres.pbl.start = val[4];
+		adap->vres.pbl.size = val[5] - val[4] + 1;
+	}
+	if (c.iscsicaps) {
+		params[0] = FW_PARAM_PFVF(ISCSI_START);
+		params[1] = FW_PARAM_PFVF(ISCSI_END);
+		ret = t4_query_params(adap, 0, 0, 0, 2, params, val);
+		if (ret < 0)
+			goto bye;
+		adap->vres.iscsi.start = val[0];
+		adap->vres.iscsi.size = val[1] - val[0] + 1;
+	}
+#undef FW_PARAM_PFVF
+#undef FW_PARAM_DEV
+
+	adap->params.nports = hweight32(port_vec);
+	adap->params.portvec = port_vec;
+	adap->flags |= FW_OK;
+
+	/* These are finalized by FW initialization, load their values now */
+	v = t4_read_reg(adap, TP_TIMER_RESOLUTION);
+	adap->params.tp.tre = TIMERRESOLUTION_GET(v);
+	t4_read_mtu_tbl(adap, adap->params.mtus, NULL);
+	t4_load_mtus(adap, adap->params.mtus, adap->params.a_wnd,
+		     adap->params.b_wnd);
+
+	/* tweak some settings */
+	t4_write_reg(adap, TP_SHIFT_CNT, 0x64f8849);
+	t4_write_reg(adap, ULP_RX_TDDP_PSZ, HPZ0(PAGE_SHIFT - 12));
+	t4_write_reg(adap, TP_PIO_ADDR, TP_INGRESS_CONFIG);
+	v = t4_read_reg(adap, TP_PIO_DATA);
+	t4_write_reg(adap, TP_PIO_DATA, v & ~CSUM_HAS_PSEUDO_HDR);
+	setup_memwin(adap);
+	return 0;
+
+	/*
+	 * If a command timed out or failed with EIO FW does not operate within
+	 * its spec or something catastrophic happened to HW/FW, stop issuing
+	 * commands.
+	 */
+bye:	if (ret != -ETIMEDOUT && ret != -EIO)
+		t4_fw_bye(adap, 0);
+	return ret;
+}
+
+static inline bool is_10g_port(const struct link_config *lc)
+{
+	return (lc->supported & FW_PORT_CAP_SPEED_10G) != 0;
+}
+
+static inline void init_rspq(struct sge_rspq *q, u8 timer_idx, u8 pkt_cnt_idx,
+			     unsigned int size, unsigned int iqe_size)
+{
+	q->intr_params = QINTR_TIMER_IDX(timer_idx) |
+			 (pkt_cnt_idx < SGE_NCOUNTERS ? QINTR_CNT_EN : 0);
+	q->pktcnt_idx = pkt_cnt_idx < SGE_NCOUNTERS ? pkt_cnt_idx : 0;
+	q->iqe_len = iqe_size;
+	q->size = size;
+}
+
+/*
+ * Perform default configuration of DMA queues depending on the number and type
+ * of ports we found and the number of available CPUs.  Most settings can be
+ * modified by the admin prior to actual use.
+ */
+static void __devinit cfg_queues(struct adapter *adap)
+{
+	struct sge *s = &adap->sge;
+	int i, q10g = 0, n10g = 0, qidx = 0;
+
+	for_each_port(adap, i)
+		n10g += is_10g_port(&adap2pinfo(adap, i)->link_cfg);
+
+	/*
+	 * We default to 1 queue per non-10G port and up to # of cores queues
+	 * per 10G port.
+	 */
+	if (n10g)
+		q10g = (MAX_ETH_QSETS - (adap->params.nports - n10g)) / n10g;
+	if (q10g > num_online_cpus())
+		q10g = num_online_cpus();
+
+	for_each_port(adap, i) {
+		struct port_info *pi = adap2pinfo(adap, i);
+
+		pi->first_qset = qidx;
+		pi->nqsets = is_10g_port(&pi->link_cfg) ? q10g : 1;
+		qidx += pi->nqsets;
+	}
+
+	s->ethqsets = qidx;
+	s->max_ethqsets = qidx;   /* MSI-X may lower it later */
+
+	if (is_offload(adap)) {
+		/*
+		 * For offload we use 1 queue/channel if all ports are up to 1G,
+		 * otherwise we divide all available queues amongst the channels
+		 * capped by the number of available cores.
+		 */
+		if (n10g) {
+			i = min_t(int, ARRAY_SIZE(s->ofldrxq),
+				  num_online_cpus());
+			s->ofldqsets = roundup(i, adap->params.nports);
+		} else
+			s->ofldqsets = adap->params.nports;
+		/* For RDMA one Rx queue per channel suffices */
+		s->rdmaqs = adap->params.nports;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(s->ethrxq); i++) {
+		struct sge_eth_rxq *r = &s->ethrxq[i];
+
+		init_rspq(&r->rspq, 0, 0, 1024, 64);
+		r->fl.size = 72;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(s->ethtxq); i++)
+		s->ethtxq[i].q.size = 1024;
+
+	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++)
+		s->ctrlq[i].q.size = 512;
+
+	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++)
+		s->ofldtxq[i].q.size = 1024;
+
+	for (i = 0; i < ARRAY_SIZE(s->ofldrxq); i++) {
+		struct sge_ofld_rxq *r = &s->ofldrxq[i];
+
+		init_rspq(&r->rspq, 0, 0, 1024, 64);
+		r->rspq.uld = CXGB4_ULD_ISCSI;
+		r->fl.size = 72;
+	}
+
+	for (i = 0; i < ARRAY_SIZE(s->rdmarxq); i++) {
+		struct sge_ofld_rxq *r = &s->rdmarxq[i];
+
+		init_rspq(&r->rspq, 0, 0, 511, 64);
+		r->rspq.uld = CXGB4_ULD_RDMA;
+		r->fl.size = 72;
+	}
+
+	init_rspq(&s->fw_evtq, 6, 0, 512, 64);
+	init_rspq(&s->intrq, 6, 0, 2 * MAX_INGQ, 64);
+}
+
+/*
+ * Reduce the number of Ethernet queues across all ports to at most n.
+ * n provides at least one queue per port.
+ */
+static void __devinit reduce_ethqs(struct adapter *adap, int n)
+{
+	int i;
+	struct port_info *pi;
+
+	while (n < adap->sge.ethqsets)
+		for_each_port(adap, i) {
+			pi = adap2pinfo(adap, i);
+			if (pi->nqsets > 1) {
+				pi->nqsets--;
+				adap->sge.ethqsets--;
+				if (adap->sge.ethqsets <= n)
+					break;
+			}
+		}
+
+	n = 0;
+	for_each_port(adap, i) {
+		pi = adap2pinfo(adap, i);
+		pi->first_qset = n;
+		n += pi->nqsets;
+	}
+}
+
+/* 2 MSI-X vectors needed for the FW queue and non-data interrupts */
+#define EXTRA_VECS 2
+
+static int __devinit enable_msix(struct adapter *adap)
+{
+	int ofld_need = 0;
+	int i, err, want, need;
+	struct sge *s = &adap->sge;
+	unsigned int nchan = adap->params.nports;
+	struct msix_entry entries[MAX_INGQ + 1];
+
+	for (i = 0; i < ARRAY_SIZE(entries); ++i)
+		entries[i].entry = i;
+
+	want = s->max_ethqsets + EXTRA_VECS;
+	if (is_offload(adap)) {
+		want += s->rdmaqs + s->ofldqsets;
+		/* need nchan for each possible ULD */
+		ofld_need = 2 * nchan;
+	}
+	need = adap->params.nports + EXTRA_VECS + ofld_need;
+
+	while ((err = pci_enable_msix(adap->pdev, entries, want)) >= need)
+		want = err;
+
+	if (!err) {
+		/*
+		 * Distribute available vectors to the various queue groups.
+		 * Every group gets its minimum requirement and NIC gets top
+		 * priority for leftovers.
+		 */
+		i = want - EXTRA_VECS - ofld_need;
+		if (i < s->max_ethqsets) {
+			s->max_ethqsets = i;
+			if (i < s->ethqsets)
+				reduce_ethqs(adap, i);
+		}
+		if (is_offload(adap)) {
+			i = want - EXTRA_VECS - s->max_ethqsets;
+			i -= ofld_need - nchan;
+			s->ofldqsets = (i / nchan) * nchan;  /* round down */
+		}
+		for (i = 0; i < want; ++i)
+			adap->msix_info[i].vec = entries[i].vector;
+	} else if (err > 0)
+		dev_info(adap->pdev_dev,
+			 "only %d MSI-X vectors left, not using MSI-X\n", err);
+	return err;
+}
+
+#undef EXTRA_VECS
+
+static void __devinit print_port_info(struct adapter *adap)
+{
+	static const char *base[] = {
+		"R", "KX4", "T", "KX", "T", "KR", "CX4"
+	};
+
+	int i;
+	char buf[80];
+
+	for_each_port(adap, i) {
+		struct net_device *dev = adap->port[i];
+		const struct port_info *pi = netdev_priv(dev);
+		char *bufp = buf;
+
+		if (!test_bit(i, &adap->registered_device_map))
+			continue;
+
+		if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_100M)
+			bufp += sprintf(bufp, "100/");
+		if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_1G)
+			bufp += sprintf(bufp, "1000/");
+		if (pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G)
+			bufp += sprintf(bufp, "10G/");
+		if (bufp != buf)
+			--bufp;
+		sprintf(bufp, "BASE-%s", base[pi->port_type]);
+
+		netdev_info(dev, "Chelsio %s rev %d %s %sNIC PCIe x%d%s\n",
+			    adap->params.vpd.id, adap->params.rev,
+			    buf, is_offload(adap) ? "R" : "",
+			    adap->params.pci.width,
+			    (adap->flags & USING_MSIX) ? " MSI-X" :
+			    (adap->flags & USING_MSI) ? " MSI" : "");
+		if (adap->name == dev->name)
+			netdev_info(dev, "S/N: %s, E/C: %s\n",
+				    adap->params.vpd.sn, adap->params.vpd.ec);
+	}
+}
+
+#define VLAN_FEAT (NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO | NETIF_F_TSO6 |\
+		   NETIF_F_IPV6_CSUM | NETIF_F_HIGHDMA)
+
+static int __devinit init_one(struct pci_dev *pdev,
+			      const struct pci_device_id *ent)
+{
+	int func, i, err;
+	struct port_info *pi;
+	unsigned int highdma = 0;
+	struct adapter *adapter = NULL;
+
+	printk_once(KERN_INFO "%s - version %s\n", DRV_DESC, DRV_VERSION);
+
+	err = pci_request_regions(pdev, KBUILD_MODNAME);
+	if (err) {
+		/* Just info, some other driver may have claimed the device. */
+		dev_info(&pdev->dev, "cannot obtain PCI resources\n");
+		return err;
+	}
+
+	/* We control everything through PF 0 */
+	func = PCI_FUNC(pdev->devfn);
+	if (func > 0)
+		goto sriov;
+
+	err = pci_enable_device(pdev);
+	if (err) {
+		dev_err(&pdev->dev, "cannot enable PCI device\n");
+		goto out_release_regions;
+	}
+
+	if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
+		highdma = NETIF_F_HIGHDMA;
+		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (err) {
+			dev_err(&pdev->dev, "unable to obtain 64-bit DMA for "
+				"coherent allocations\n");
+			goto out_disable_device;
+		}
+	} else {
+		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev, "no usable DMA configuration\n");
+			goto out_disable_device;
+		}
+	}
+
+	pci_enable_pcie_error_reporting(pdev);
+	pci_set_master(pdev);
+	pci_save_state(pdev);
+
+	adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
+	if (!adapter) {
+		err = -ENOMEM;
+		goto out_disable_device;
+	}
+
+	adapter->regs = pci_ioremap_bar(pdev, 0);
+	if (!adapter->regs) {
+		dev_err(&pdev->dev, "cannot map device registers\n");
+		err = -ENOMEM;
+		goto out_free_adapter;
+	}
+
+	adapter->pdev = pdev;
+	adapter->pdev_dev = &pdev->dev;
+	adapter->name = pci_name(pdev);
+	adapter->msg_enable = dflt_msg_enable;
+	memset(adapter->chan_map, 0xff, sizeof(adapter->chan_map));
+
+	spin_lock_init(&adapter->stats_lock);
+	spin_lock_init(&adapter->tid_release_lock);
+
+	INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
+
+	err = t4_prep_adapter(adapter);
+	if (err)
+		goto out_unmap_bar;
+	err = adap_init0(adapter);
+	if (err)
+		goto out_unmap_bar;
+
+	for_each_port(adapter, i) {
+		struct net_device *netdev;
+
+		netdev = alloc_etherdev_mq(sizeof(struct port_info),
+					   MAX_ETH_QSETS);
+		if (!netdev) {
+			err = -ENOMEM;
+			goto out_free_dev;
+		}
+
+		SET_NETDEV_DEV(netdev, &pdev->dev);
+
+		adapter->port[i] = netdev;
+		pi = netdev_priv(netdev);
+		pi->adapter = adapter;
+		pi->xact_addr_filt = -1;
+		pi->rx_offload = RX_CSO;
+		pi->port_id = i;
+		netif_carrier_off(netdev);
+		netif_tx_stop_all_queues(netdev);
+		netdev->irq = pdev->irq;
+
+		netdev->features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6;
+		netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
+		netdev->features |= NETIF_F_GRO | highdma;
+		netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
+		netdev->vlan_features = netdev->features & VLAN_FEAT;
+
+		netdev->netdev_ops = &cxgb4_netdev_ops;
+		SET_ETHTOOL_OPS(netdev, &cxgb_ethtool_ops);
+	}
+
+	pci_set_drvdata(pdev, adapter);
+
+	if (adapter->flags & FW_OK) {
+		err = t4_port_init(adapter, 0, 0, 0);
+		if (err)
+			goto out_free_dev;
+	}
+
+	/*
+	 * Configure queues and allocate tables now, they can be needed as
+	 * soon as the first register_netdev completes.
+	 */
+	cfg_queues(adapter);
+
+	adapter->l2t = t4_init_l2t();
+	if (!adapter->l2t) {
+		/* We tolerate a lack of L2T, giving up some functionality */
+		dev_warn(&pdev->dev, "could not allocate L2T, continuing\n");
+		adapter->params.offload = 0;
+	}
+
+	if (is_offload(adapter) && tid_init(&adapter->tids) < 0) {
+		dev_warn(&pdev->dev, "could not allocate TID table, "
+			 "continuing\n");
+		adapter->params.offload = 0;
+	}
+
+	/*
+	 * The card is now ready to go.  If any errors occur during device
+	 * registration we do not fail the whole card but rather proceed only
+	 * with the ports we manage to register successfully.  However we must
+	 * register at least one net device.
+	 */
+	for_each_port(adapter, i) {
+		err = register_netdev(adapter->port[i]);
+		if (err)
+			dev_warn(&pdev->dev,
+				 "cannot register net device %s, skipping\n",
+				 adapter->port[i]->name);
+		else {
+			/*
+			 * Change the name we use for messages to the name of
+			 * the first successfully registered interface.
+			 */
+			if (!adapter->registered_device_map)
+				adapter->name = adapter->port[i]->name;
+
+			__set_bit(i, &adapter->registered_device_map);
+			adapter->chan_map[adap2pinfo(adapter, i)->tx_chan] = i;
+		}
+	}
+	if (!adapter->registered_device_map) {
+		dev_err(&pdev->dev, "could not register any net devices\n");
+		goto out_free_dev;
+	}
+
+	if (cxgb4_debugfs_root) {
+		adapter->debugfs_root = debugfs_create_dir(pci_name(pdev),
+							   cxgb4_debugfs_root);
+		setup_debugfs(adapter);
+	}
+
+	/* See what interrupts we'll be using */
+	if (msi > 1 && enable_msix(adapter) == 0)
+		adapter->flags |= USING_MSIX;
+	else if (msi > 0 && pci_enable_msi(pdev) == 0)
+		adapter->flags |= USING_MSI;
+
+	if (is_offload(adapter))
+		attach_ulds(adapter);
+
+	print_port_info(adapter);
+
+sriov:
+#ifdef CONFIG_PCI_IOV
+	if (func < ARRAY_SIZE(num_vf) && num_vf[func] > 0)
+		if (pci_enable_sriov(pdev, num_vf[func]) == 0)
+			dev_info(&pdev->dev,
+				 "instantiated %u virtual functions\n",
+				 num_vf[func]);
+#endif
+	return 0;
+
+ out_free_dev:
+	t4_free_mem(adapter->tids.tid_tab);
+	t4_free_mem(adapter->l2t);
+	for_each_port(adapter, i)
+		if (adapter->port[i])
+			free_netdev(adapter->port[i]);
+	if (adapter->flags & FW_OK)
+		t4_fw_bye(adapter, 0);
+ out_unmap_bar:
+	iounmap(adapter->regs);
+ out_free_adapter:
+	kfree(adapter);
+ out_disable_device:
+	pci_disable_pcie_error_reporting(pdev);
+	pci_disable_device(pdev);
+ out_release_regions:
+	pci_release_regions(pdev);
+	pci_set_drvdata(pdev, NULL);
+	return err;
+}
+
+static void __devexit remove_one(struct pci_dev *pdev)
+{
+	struct adapter *adapter = pci_get_drvdata(pdev);
+
+	pci_disable_sriov(pdev);
+
+	if (adapter) {
+		int i;
+
+		if (is_offload(adapter))
+			detach_ulds(adapter);
+
+		for_each_port(adapter, i)
+			if (test_bit(i, &adapter->registered_device_map))
+				unregister_netdev(adapter->port[i]);
+
+		if (adapter->debugfs_root)
+			debugfs_remove_recursive(adapter->debugfs_root);
+
+		t4_sge_stop(adapter);
+		t4_free_sge_resources(adapter);
+		t4_free_mem(adapter->l2t);
+		t4_free_mem(adapter->tids.tid_tab);
+		disable_msi(adapter);
+
+		for_each_port(adapter, i)
+			if (adapter->port[i])
+				free_netdev(adapter->port[i]);
+
+		if (adapter->flags & FW_OK)
+			t4_fw_bye(adapter, 0);
+		iounmap(adapter->regs);
+		kfree(adapter);
+		pci_disable_pcie_error_reporting(pdev);
+		pci_disable_device(pdev);
+		pci_release_regions(pdev);
+		pci_set_drvdata(pdev, NULL);
+	} else if (PCI_FUNC(pdev->devfn) > 0)
+		pci_release_regions(pdev);
+}
+
+static struct pci_driver cxgb4_driver = {
+	.name     = KBUILD_MODNAME,
+	.id_table = cxgb4_pci_tbl,
+	.probe    = init_one,
+	.remove   = __devexit_p(remove_one),
+};
+
+static int __init cxgb4_init_module(void)
+{
+	int ret;
+
+	/* Debugfs support is optional, just warn if this fails */
+	cxgb4_debugfs_root = debugfs_create_dir(KBUILD_MODNAME, NULL);
+	if (!cxgb4_debugfs_root)
+		pr_warning("could not create debugfs entry, continuing\n");
+
+	ret = pci_register_driver(&cxgb4_driver);
+	if (ret < 0)
+		debugfs_remove(cxgb4_debugfs_root);
+	return ret;
+}
+
+static void __exit cxgb4_cleanup_module(void)
+{
+	pci_unregister_driver(&cxgb4_driver);
+	debugfs_remove(cxgb4_debugfs_root);  /* NULL ok */
+}
+
+module_init(cxgb4_init_module);
+module_exit(cxgb4_cleanup_module);
diff --git a/drivers/net/cxgb4/cxgb4_uld.h b/drivers/net/cxgb4/cxgb4_uld.h
new file mode 100644
index 0000000..5b98546
--- /dev/null
+++ b/drivers/net/cxgb4/cxgb4_uld.h
@@ -0,0 +1,239 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_OFLD_H
+#define __CXGB4_OFLD_H
+
+#include <linux/cache.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <asm/atomic.h>
+
+/* CPL message priority levels */
+enum {
+	CPL_PRIORITY_DATA     = 0,  /* data messages */
+	CPL_PRIORITY_SETUP    = 1,  /* connection setup messages */
+	CPL_PRIORITY_TEARDOWN = 0,  /* connection teardown messages */
+	CPL_PRIORITY_LISTEN   = 1,  /* listen start/stop messages */
+	CPL_PRIORITY_ACK      = 1,  /* RX ACK messages */
+	CPL_PRIORITY_CONTROL  = 1   /* control messages */
+};
+
+#define INIT_TP_WR(w, tid) do { \
+	(w)->wr.wr_hi = htonl(FW_WR_OP(FW_TP_WR) | \
+			      FW_WR_IMMDLEN(sizeof(*w) - sizeof(w->wr))); \
+	(w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(sizeof(*w), 16)) | \
+			       FW_WR_FLOWID(tid)); \
+	(w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+#define INIT_TP_WR_CPL(w, cpl, tid) do { \
+	INIT_TP_WR(w, tid); \
+	OPCODE_TID(w) = htonl(MK_OPCODE_TID(cpl, tid)); \
+} while (0)
+
+#define INIT_ULPTX_WR(w, wrlen, atomic, tid) do { \
+	(w)->wr.wr_hi = htonl(FW_WR_OP(FW_ULPTX_WR) | FW_WR_ATOMIC(atomic)); \
+	(w)->wr.wr_mid = htonl(FW_WR_LEN16(DIV_ROUND_UP(wrlen, 16)) | \
+			       FW_WR_FLOWID(tid)); \
+	(w)->wr.wr_lo = cpu_to_be64(0); \
+} while (0)
+
+/* Special asynchronous notification message */
+#define CXGB4_MSG_AN ((void *)1)
+
+struct serv_entry {
+	void *data;
+};
+
+union aopen_entry {
+	void *data;
+	union aopen_entry *next;
+};
+
+/*
+ * Holds the size, base address, free list start, etc of the TID, server TID,
+ * and active-open TID tables.  The tables themselves are allocated dynamically.
+ */
+struct tid_info {
+	void **tid_tab;
+	unsigned int ntids;
+
+	struct serv_entry *stid_tab;
+	unsigned long *stid_bmap;
+	unsigned int nstids;
+	unsigned int stid_base;
+
+	union aopen_entry *atid_tab;
+	unsigned int natids;
+
+	unsigned int nftids;
+	unsigned int ftid_base;
+
+	spinlock_t atid_lock ____cacheline_aligned_in_smp;
+	union aopen_entry *afree;
+	unsigned int atids_in_use;
+
+	spinlock_t stid_lock;
+	unsigned int stids_in_use;
+
+	atomic_t tids_in_use;
+};
+
+static inline void *lookup_tid(const struct tid_info *t, unsigned int tid)
+{
+	return tid < t->ntids ? t->tid_tab[tid] : NULL;
+}
+
+static inline void *lookup_atid(const struct tid_info *t, unsigned int atid)
+{
+	return atid < t->natids ? t->atid_tab[atid].data : NULL;
+}
+
+static inline void *lookup_stid(const struct tid_info *t, unsigned int stid)
+{
+	stid -= t->stid_base;
+	return stid < t->nstids ? t->stid_tab[stid].data : NULL;
+}
+
+static inline void cxgb4_insert_tid(struct tid_info *t, void *data,
+				    unsigned int tid)
+{
+	t->tid_tab[tid] = data;
+	atomic_inc(&t->tids_in_use);
+}
+
+int cxgb4_alloc_atid(struct tid_info *t, void *data);
+int cxgb4_alloc_stid(struct tid_info *t, int family, void *data);
+void cxgb4_free_atid(struct tid_info *t, unsigned int atid);
+void cxgb4_free_stid(struct tid_info *t, unsigned int stid, int family);
+void cxgb4_remove_tid(struct tid_info *t, unsigned int qid, unsigned int tid);
+void cxgb4_queue_tid_release(struct tid_info *t, unsigned int chan,
+			     unsigned int tid);
+
+struct in6_addr;
+
+int cxgb4_create_server(const struct net_device *dev, unsigned int stid,
+			__be32 sip, __be16 sport, unsigned int queue);
+int cxgb4_create_server6(const struct net_device *dev, unsigned int stid,
+			 const struct in6_addr *sip, __be16 sport,
+			 unsigned int queue);
+
+static inline void set_wr_txq(struct sk_buff *skb, int prio, int queue)
+{
+	skb_set_queue_mapping(skb, (queue << 1) | prio);
+}
+
+enum cxgb4_uld {
+	CXGB4_ULD_RDMA,
+	CXGB4_ULD_ISCSI,
+	CXGB4_ULD_MAX
+};
+
+enum cxgb4_state {
+	CXGB4_STATE_UP,
+	CXGB4_STATE_START_RECOVERY,
+	CXGB4_STATE_DOWN,
+	CXGB4_STATE_DETACH
+};
+
+struct pci_dev;
+struct l2t_data;
+struct net_device;
+struct pkt_gl;
+struct tp_tcp_stats;
+
+struct cxgb4_range {
+	unsigned int start;
+	unsigned int size;
+};
+
+struct cxgb4_virt_res {                      /* virtualized HW resources */
+	struct cxgb4_range ddp;
+	struct cxgb4_range iscsi;
+	struct cxgb4_range stag;
+	struct cxgb4_range rq;
+	struct cxgb4_range pbl;
+};
+
+/*
+ * Block of information the LLD provides to ULDs attaching to a device.
+ */
+struct cxgb4_lld_info {
+	struct pci_dev *pdev;                /* associated PCI device */
+	struct l2t_data *l2t;                /* L2 table */
+	struct tid_info *tids;               /* TID table */
+	struct net_device **ports;           /* device ports */
+	const struct cxgb4_virt_res *vr;     /* assorted HW resources */
+	const unsigned short *mtus;          /* MTU table */
+	const unsigned short *rxq_ids;       /* the ULD's Rx queue ids */
+	unsigned short nrxq;                 /* # of Rx queues */
+	unsigned short ntxq;                 /* # of Tx queues */
+	unsigned char nchan:4;               /* # of channels */
+	unsigned char nports:4;              /* # of ports */
+	unsigned char wr_cred;               /* WR 16-byte credits */
+	unsigned char adapter_type;          /* type of adapter */
+	unsigned char fw_api_ver;            /* FW API version */
+	unsigned int fw_vers;                /* FW version */
+	unsigned int iscsi_iolen;            /* iSCSI max I/O length */
+	unsigned short udb_density;          /* # of user DB/page */
+	unsigned short ucq_density;          /* # of user CQs/page */
+	void __iomem *gts_reg;               /* address of GTS register */
+	void __iomem *db_reg;                /* address of kernel doorbell */
+};
+
+struct cxgb4_uld_info {
+	const char *name;
+	void *(*add)(const struct cxgb4_lld_info *p);
+	int (*rx_handler)(void *handle, const __be64 *rsp,
+			  const struct pkt_gl *gl);
+	int (*state_change)(void *handle, enum cxgb4_state new_state);
+};
+
+int cxgb4_register_uld(enum cxgb4_uld type, const struct cxgb4_uld_info *p);
+int cxgb4_unregister_uld(enum cxgb4_uld type);
+int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb);
+unsigned int cxgb4_port_chan(const struct net_device *dev);
+unsigned int cxgb4_port_viid(const struct net_device *dev);
+unsigned int cxgb4_port_idx(const struct net_device *dev);
+struct net_device *cxgb4_netdev_by_hwid(struct pci_dev *pdev, unsigned int id);
+unsigned int cxgb4_best_mtu(const unsigned short *mtus, unsigned short mtu,
+			    unsigned int *idx);
+void cxgb4_get_tcp_stats(struct pci_dev *pdev, struct tp_tcp_stats *v4,
+			 struct tp_tcp_stats *v6);
+void cxgb4_iscsi_init(struct net_device *dev, unsigned int tag_mask,
+		      const unsigned int *pgsz_order);
+struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
+				   unsigned int skb_len, unsigned int pull_len);
+#endif  /* !__CXGB4_OFLD_H */
diff --git a/drivers/net/cxgb4/l2t.c b/drivers/net/cxgb4/l2t.c
new file mode 100644
index 0000000..9f96724
--- /dev/null
+++ b/drivers/net/cxgb4/l2t.c
@@ -0,0 +1,624 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/jhash.h>
+#include <net/neighbour.h>
+#include "cxgb4.h"
+#include "l2t.h"
+#include "t4_msg.h"
+#include "t4fw_api.h"
+
+#define VLAN_NONE 0xfff
+
+/* identifies sync vs async L2T_WRITE_REQs */
+#define F_SYNC_WR    (1 << 12)
+
+enum {
+	L2T_STATE_VALID,      /* entry is up to date */
+	L2T_STATE_STALE,      /* entry may be used but needs revalidation */
+	L2T_STATE_RESOLVING,  /* entry needs address resolution */
+	L2T_STATE_SYNC_WRITE, /* synchronous write of entry underway */
+
+	/* when state is one of the below the entry is not hashed */
+	L2T_STATE_SWITCHING,  /* entry is being used by a switching filter */
+	L2T_STATE_UNUSED      /* entry not in use */
+};
+
+struct l2t_data {
+	rwlock_t lock;
+	atomic_t nfree;             /* number of free entries */
+	struct l2t_entry *rover;    /* starting point for next allocation */
+	struct l2t_entry l2tab[L2T_SIZE];
+};
+
+static inline unsigned int vlan_prio(const struct l2t_entry *e)
+{
+	return e->vlan >> 13;
+}
+
+static inline void l2t_hold(struct l2t_data *d, struct l2t_entry *e)
+{
+	if (atomic_add_return(1, &e->refcnt) == 1)  /* 0 -> 1 transition */
+		atomic_dec(&d->nfree);
+}
+
+/*
+ * To avoid having to check address families we do not allow v4 and v6
+ * neighbors to be on the same hash chain.  We keep v4 entries in the first
+ * half of available hash buckets and v6 in the second.
+ */
+enum {
+	L2T_SZ_HALF = L2T_SIZE / 2,
+	L2T_HASH_MASK = L2T_SZ_HALF - 1
+};
+
+static inline unsigned int arp_hash(const u32 *key, int ifindex)
+{
+	return jhash_2words(*key, ifindex, 0) & L2T_HASH_MASK;
+}
+
+static inline unsigned int ipv6_hash(const u32 *key, int ifindex)
+{
+	u32 xor = key[0] ^ key[1] ^ key[2] ^ key[3];
+
+	return L2T_SZ_HALF + (jhash_2words(xor, ifindex, 0) & L2T_HASH_MASK);
+}
+
+static unsigned int addr_hash(const u32 *addr, int addr_len, int ifindex)
+{
+	return addr_len == 4 ? arp_hash(addr, ifindex) :
+			       ipv6_hash(addr, ifindex);
+}
+
+/*
+ * Checks if an L2T entry is for the given IP/IPv6 address.  It does not check
+ * whether the L2T entry and the address are of the same address family.
+ * Callers ensure an address is only checked against L2T entries of the same
+ * family, something made trivial by the separation of IP and IPv6 hash chains
+ * mentioned above.  Returns 0 if there's a match,
+ */
+static int addreq(const struct l2t_entry *e, const u32 *addr)
+{
+	if (e->v6)
+		return (e->addr[0] ^ addr[0]) | (e->addr[1] ^ addr[1]) |
+		       (e->addr[2] ^ addr[2]) | (e->addr[3] ^ addr[3]);
+	return e->addr[0] ^ addr[0];
+}
+
+static void neigh_replace(struct l2t_entry *e, struct neighbour *n)
+{
+	neigh_hold(n);
+	if (e->neigh)
+		neigh_release(e->neigh);
+	e->neigh = n;
+}
+
+/*
+ * Write an L2T entry.  Must be called with the entry locked.
+ * The write may be synchronous or asynchronous.
+ */
+static int write_l2e(struct adapter *adap, struct l2t_entry *e, int sync)
+{
+	struct sk_buff *skb;
+	struct cpl_l2t_write_req *req;
+
+	skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
+	if (!skb)
+		return -ENOMEM;
+
+	req = (struct cpl_l2t_write_req *)__skb_put(skb, sizeof(*req));
+	INIT_TP_WR(req, 0);
+
+	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ,
+					e->idx | (sync ? F_SYNC_WR : 0) |
+					TID_QID(adap->sge.fw_evtq.abs_id)));
+	req->params = htons(L2T_W_PORT(e->lport) | L2T_W_NOREPLY(!sync));
+	req->l2t_idx = htons(e->idx);
+	req->vlan = htons(e->vlan);
+	if (e->neigh)
+		memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
+	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
+
+	set_wr_txq(skb, CPL_PRIORITY_CONTROL, 0);
+	t4_ofld_send(adap, skb);
+
+	if (sync && e->state != L2T_STATE_SWITCHING)
+		e->state = L2T_STATE_SYNC_WRITE;
+	return 0;
+}
+
+/*
+ * Send packets waiting in an L2T entry's ARP queue.  Must be called with the
+ * entry locked.
+ */
+static void send_pending(struct adapter *adap, struct l2t_entry *e)
+{
+	while (e->arpq_head) {
+		struct sk_buff *skb = e->arpq_head;
+
+		e->arpq_head = skb->next;
+		skb->next = NULL;
+		t4_ofld_send(adap, skb);
+	}
+	e->arpq_tail = NULL;
+}
+
+/*
+ * Process a CPL_L2T_WRITE_RPL.  Wake up the ARP queue if it completes a
+ * synchronous L2T_WRITE.  Note that the TID in the reply is really the L2T
+ * index it refers to.
+ */
+void do_l2t_write_rpl(struct adapter *adap, const struct cpl_l2t_write_rpl *rpl)
+{
+	unsigned int tid = GET_TID(rpl);
+	unsigned int idx = tid & (L2T_SIZE - 1);
+
+	if (unlikely(rpl->status != CPL_ERR_NONE)) {
+		dev_err(adap->pdev_dev,
+			"Unexpected L2T_WRITE_RPL status %u for entry %u\n",
+			rpl->status, idx);
+		return;
+	}
+
+	if (tid & F_SYNC_WR) {
+		struct l2t_entry *e = &adap->l2t->l2tab[idx];
+
+		spin_lock(&e->lock);
+		if (e->state != L2T_STATE_SWITCHING) {
+			send_pending(adap, e);
+			e->state = (e->neigh->nud_state & NUD_STALE) ?
+					L2T_STATE_STALE : L2T_STATE_VALID;
+		}
+		spin_unlock(&e->lock);
+	}
+}
+
+/*
+ * Add a packet to an L2T entry's queue of packets awaiting resolution.
+ * Must be called with the entry's lock held.
+ */
+static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
+{
+	skb->next = NULL;
+	if (e->arpq_head)
+		e->arpq_tail->next = skb;
+	else
+		e->arpq_head = skb;
+	e->arpq_tail = skb;
+}
+
+int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
+		   struct l2t_entry *e)
+{
+	struct adapter *adap = netdev2adap(dev);
+
+again:
+	switch (e->state) {
+	case L2T_STATE_STALE:     /* entry is stale, kick off revalidation */
+		neigh_event_send(e->neigh, NULL);
+		spin_lock_bh(&e->lock);
+		if (e->state == L2T_STATE_STALE)
+			e->state = L2T_STATE_VALID;
+		spin_unlock_bh(&e->lock);
+	case L2T_STATE_VALID:     /* fast-path, send the packet on */
+		return t4_ofld_send(adap, skb);
+	case L2T_STATE_RESOLVING:
+	case L2T_STATE_SYNC_WRITE:
+		spin_lock_bh(&e->lock);
+		if (e->state != L2T_STATE_SYNC_WRITE &&
+		    e->state != L2T_STATE_RESOLVING) {
+			spin_unlock_bh(&e->lock);
+			goto again;
+		}
+		arpq_enqueue(e, skb);
+		spin_unlock_bh(&e->lock);
+
+		if (e->state == L2T_STATE_RESOLVING &&
+		    !neigh_event_send(e->neigh, NULL)) {
+			spin_lock_bh(&e->lock);
+			if (e->state == L2T_STATE_RESOLVING && e->arpq_head)
+				write_l2e(adap, e, 1);
+			spin_unlock_bh(&e->lock);
+		}
+	}
+	return 0;
+}
+EXPORT_SYMBOL(cxgb4_l2t_send);
+
+/*
+ * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
+ */
+static struct l2t_entry *alloc_l2e(struct l2t_data *d)
+{
+	struct l2t_entry *end, *e, **p;
+
+	if (!atomic_read(&d->nfree))
+		return NULL;
+
+	/* there's definitely a free entry */
+	for (e = d->rover, end = &d->l2tab[L2T_SIZE]; e != end; ++e)
+		if (atomic_read(&e->refcnt) == 0)
+			goto found;
+
+	for (e = d->l2tab; atomic_read(&e->refcnt); ++e)
+		;
+found:
+	d->rover = e + 1;
+	atomic_dec(&d->nfree);
+
+	/*
+	 * The entry we found may be an inactive entry that is
+	 * presently in the hash table.  We need to remove it.
+	 */
+	if (e->state < L2T_STATE_SWITCHING)
+		for (p = &d->l2tab[e->hash].first; *p; p = &(*p)->next)
+			if (*p == e) {
+				*p = e->next;
+				e->next = NULL;
+				break;
+			}
+
+	e->state = L2T_STATE_UNUSED;
+	return e;
+}
+
+/*
+ * Called when an L2T entry has no more users.
+ */
+static void t4_l2e_free(struct l2t_entry *e)
+{
+	struct l2t_data *d;
+
+	spin_lock_bh(&e->lock);
+	if (atomic_read(&e->refcnt) == 0) {  /* hasn't been recycled */
+		if (e->neigh) {
+			neigh_release(e->neigh);
+			e->neigh = NULL;
+		}
+	}
+	spin_unlock_bh(&e->lock);
+
+	d = container_of(e, struct l2t_data, l2tab[e->idx]);
+	atomic_inc(&d->nfree);
+}
+
+void cxgb4_l2t_release(struct l2t_entry *e)
+{
+	if (atomic_dec_and_test(&e->refcnt))
+		t4_l2e_free(e);
+}
+EXPORT_SYMBOL(cxgb4_l2t_release);
+
+/*
+ * Update an L2T entry that was previously used for the same next hop as neigh.
+ * Must be called with softirqs disabled.
+ */
+static void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
+{
+	unsigned int nud_state;
+
+	spin_lock(&e->lock);                /* avoid race with t4_l2t_free */
+	if (neigh != e->neigh)
+		neigh_replace(e, neigh);
+	nud_state = neigh->nud_state;
+	if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
+	    !(nud_state & NUD_VALID))
+		e->state = L2T_STATE_RESOLVING;
+	else if (nud_state & NUD_CONNECTED)
+		e->state = L2T_STATE_VALID;
+	else
+		e->state = L2T_STATE_STALE;
+	spin_unlock(&e->lock);
+}
+
+struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
+				const struct net_device *physdev,
+				unsigned int priority)
+{
+	u8 lport;
+	u16 vlan;
+	struct l2t_entry *e;
+	int addr_len = neigh->tbl->key_len;
+	u32 *addr = (u32 *)neigh->primary_key;
+	int ifidx = neigh->dev->ifindex;
+	int hash = addr_hash(addr, addr_len, ifidx);
+
+	if (neigh->dev->flags & IFF_LOOPBACK)
+		lport = netdev2pinfo(physdev)->tx_chan + 4;
+	else
+		lport = netdev2pinfo(physdev)->lport;
+
+	if (neigh->dev->priv_flags & IFF_802_1Q_VLAN)
+		vlan = vlan_dev_vlan_id(neigh->dev);
+	else
+		vlan = VLAN_NONE;
+
+	write_lock_bh(&d->lock);
+	for (e = d->l2tab[hash].first; e; e = e->next)
+		if (!addreq(e, addr) && e->ifindex == ifidx &&
+		    e->vlan == vlan && e->lport == lport) {
+			l2t_hold(d, e);
+			if (atomic_read(&e->refcnt) == 1)
+				reuse_entry(e, neigh);
+			goto done;
+		}
+
+	/* Need to allocate a new entry */
+	e = alloc_l2e(d);
+	if (e) {
+		spin_lock(&e->lock);          /* avoid race with t4_l2t_free */
+		e->state = L2T_STATE_RESOLVING;
+		memcpy(e->addr, addr, addr_len);
+		e->ifindex = ifidx;
+		e->hash = hash;
+		e->lport = lport;
+		e->v6 = addr_len == 16;
+		atomic_set(&e->refcnt, 1);
+		neigh_replace(e, neigh);
+		e->vlan = vlan;
+		e->next = d->l2tab[hash].first;
+		d->l2tab[hash].first = e;
+		spin_unlock(&e->lock);
+	}
+done:
+	write_unlock_bh(&d->lock);
+	return e;
+}
+EXPORT_SYMBOL(cxgb4_l2t_get);
+
+/*
+ * Called when address resolution fails for an L2T entry to handle packets
+ * on the arpq head.  If a packet specifies a failure handler it is invoked,
+ * otherwise the packet is sent to the device.
+ */
+static void handle_failed_resolution(struct adapter *adap, struct sk_buff *arpq)
+{
+	while (arpq) {
+		struct sk_buff *skb = arpq;
+		const struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
+
+		arpq = skb->next;
+		skb->next = NULL;
+		if (cb->arp_err_handler)
+			cb->arp_err_handler(cb->handle, skb);
+		else
+			t4_ofld_send(adap, skb);
+	}
+}
+
+/*
+ * Called when the host's neighbor layer makes a change to some entry that is
+ * loaded into the HW L2 table.
+ */
+void t4_l2t_update(struct adapter *adap, struct neighbour *neigh)
+{
+	struct l2t_entry *e;
+	struct sk_buff *arpq = NULL;
+	struct l2t_data *d = adap->l2t;
+	int addr_len = neigh->tbl->key_len;
+	u32 *addr = (u32 *) neigh->primary_key;
+	int ifidx = neigh->dev->ifindex;
+	int hash = addr_hash(addr, addr_len, ifidx);
+
+	read_lock_bh(&d->lock);
+	for (e = d->l2tab[hash].first; e; e = e->next)
+		if (!addreq(e, addr) && e->ifindex == ifidx) {
+			spin_lock(&e->lock);
+			if (atomic_read(&e->refcnt))
+				goto found;
+			spin_unlock(&e->lock);
+			break;
+		}
+	read_unlock_bh(&d->lock);
+	return;
+
+ found:
+	read_unlock(&d->lock);
+
+	if (neigh != e->neigh)
+		neigh_replace(e, neigh);
+
+	if (e->state == L2T_STATE_RESOLVING) {
+		if (neigh->nud_state & NUD_FAILED) {
+			arpq = e->arpq_head;
+			e->arpq_head = e->arpq_tail = NULL;
+		} else if ((neigh->nud_state & (NUD_CONNECTED | NUD_STALE)) &&
+			   e->arpq_head) {
+			write_l2e(adap, e, 1);
+		}
+	} else {
+		e->state = neigh->nud_state & NUD_CONNECTED ?
+			L2T_STATE_VALID : L2T_STATE_STALE;
+		if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)))
+			write_l2e(adap, e, 0);
+	}
+
+	spin_unlock_bh(&e->lock);
+
+	if (arpq)
+		handle_failed_resolution(adap, arpq);
+}
+
+/*
+ * Allocate an L2T entry for use by a switching rule.  Such entries need to be
+ * explicitly freed and while busy they are not on any hash chain, so normal
+ * address resolution updates do not see them.
+ */
+struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d)
+{
+	struct l2t_entry *e;
+
+	write_lock_bh(&d->lock);
+	e = alloc_l2e(d);
+	if (e) {
+		spin_lock(&e->lock);          /* avoid race with t4_l2t_free */
+		e->state = L2T_STATE_SWITCHING;
+		atomic_set(&e->refcnt, 1);
+		spin_unlock(&e->lock);
+	}
+	write_unlock_bh(&d->lock);
+	return e;
+}
+
+/*
+ * Sets/updates the contents of a switching L2T entry that has been allocated
+ * with an earlier call to @t4_l2t_alloc_switching.
+ */
+int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
+			 u8 port, u8 *eth_addr)
+{
+	e->vlan = vlan;
+	e->lport = port;
+	memcpy(e->dmac, eth_addr, ETH_ALEN);
+	return write_l2e(adap, e, 0);
+}
+
+struct l2t_data *t4_init_l2t(void)
+{
+	int i;
+	struct l2t_data *d;
+
+	d = t4_alloc_mem(sizeof(*d));
+	if (!d)
+		return NULL;
+
+	d->rover = d->l2tab;
+	atomic_set(&d->nfree, L2T_SIZE);
+	rwlock_init(&d->lock);
+
+	for (i = 0; i < L2T_SIZE; ++i) {
+		d->l2tab[i].idx = i;
+		d->l2tab[i].state = L2T_STATE_UNUSED;
+		spin_lock_init(&d->l2tab[i].lock);
+		atomic_set(&d->l2tab[i].refcnt, 0);
+	}
+	return d;
+}
+
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+static inline void *l2t_get_idx(struct seq_file *seq, loff_t pos)
+{
+	struct l2t_entry *l2tab = seq->private;
+
+	return pos >= L2T_SIZE ? NULL : &l2tab[pos];
+}
+
+static void *l2t_seq_start(struct seq_file *seq, loff_t *pos)
+{
+	return *pos ? l2t_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
+}
+
+static void *l2t_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+	v = l2t_get_idx(seq, *pos);
+	if (v)
+		++*pos;
+	return v;
+}
+
+static void l2t_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static char l2e_state(const struct l2t_entry *e)
+{
+	switch (e->state) {
+	case L2T_STATE_VALID: return 'V';
+	case L2T_STATE_STALE: return 'S';
+	case L2T_STATE_SYNC_WRITE: return 'W';
+	case L2T_STATE_RESOLVING: return e->arpq_head ? 'A' : 'R';
+	case L2T_STATE_SWITCHING: return 'X';
+	default:
+		return 'U';
+	}
+}
+
+static int l2t_seq_show(struct seq_file *seq, void *v)
+{
+	if (v == SEQ_START_TOKEN)
+		seq_puts(seq, " Idx IP address                "
+			 "Ethernet address  VLAN/P LP State Users Port\n");
+	else {
+		char ip[60];
+		struct l2t_entry *e = v;
+
+		spin_lock_bh(&e->lock);
+		if (e->state == L2T_STATE_SWITCHING)
+			ip[0] = '\0';
+		else
+			sprintf(ip, e->v6 ? "%pI6c" : "%pI4", e->addr);
+		seq_printf(seq, "%4u %-25s %17pM %4d %u %2u   %c   %5u %s\n",
+			   e->idx, ip, e->dmac,
+			   e->vlan & VLAN_VID_MASK, vlan_prio(e), e->lport,
+			   l2e_state(e), atomic_read(&e->refcnt),
+			   e->neigh ? e->neigh->dev->name : "");
+		spin_unlock_bh(&e->lock);
+	}
+	return 0;
+}
+
+static const struct seq_operations l2t_seq_ops = {
+	.start = l2t_seq_start,
+	.next = l2t_seq_next,
+	.stop = l2t_seq_stop,
+	.show = l2t_seq_show
+};
+
+static int l2t_seq_open(struct inode *inode, struct file *file)
+{
+	int rc = seq_open(file, &l2t_seq_ops);
+
+	if (!rc) {
+		struct adapter *adap = inode->i_private;
+		struct seq_file *seq = file->private_data;
+
+		seq->private = adap->l2t->l2tab;
+	}
+	return rc;
+}
+
+const struct file_operations t4_l2t_fops = {
+	.owner = THIS_MODULE,
+	.open = l2t_seq_open,
+	.read = seq_read,
+	.llseek = seq_lseek,
+	.release = seq_release,
+};
diff --git a/drivers/net/cxgb4/l2t.h b/drivers/net/cxgb4/l2t.h
new file mode 100644
index 0000000..643f27e
--- /dev/null
+++ b/drivers/net/cxgb4/l2t.h
@@ -0,0 +1,110 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __CXGB4_L2T_H
+#define __CXGB4_L2T_H
+
+#include <linux/spinlock.h>
+#include <linux/if_ether.h>
+#include <asm/atomic.h>
+
+struct adapter;
+struct l2t_data;
+struct neighbour;
+struct net_device;
+struct file_operations;
+struct cpl_l2t_write_rpl;
+
+/*
+ * Each L2T entry plays multiple roles.  First of all, it keeps state for the
+ * corresponding entry of the HW L2 table and maintains a queue of offload
+ * packets awaiting address resolution.  Second, it is a node of a hash table
+ * chain, where the nodes of the chain are linked together through their next
+ * pointer.  Finally, each node is a bucket of a hash table, pointing to the
+ * first element in its chain through its first pointer.
+ */
+struct l2t_entry {
+	u16 state;                  /* entry state */
+	u16 idx;                    /* entry index */
+	u32 addr[4];                /* next hop IP or IPv6 address */
+	int ifindex;                /* neighbor's net_device's ifindex */
+	struct neighbour *neigh;    /* associated neighbour */
+	struct l2t_entry *first;    /* start of hash chain */
+	struct l2t_entry *next;     /* next l2t_entry on chain */
+	struct sk_buff *arpq_head;  /* queue of packets awaiting resolution */
+	struct sk_buff *arpq_tail;
+	spinlock_t lock;
+	atomic_t refcnt;            /* entry reference count */
+	u16 hash;                   /* hash bucket the entry is on */
+	u16 vlan;                   /* VLAN TCI (id: bits 0-11, prio: 13-15 */
+	u8 v6;                      /* whether entry is for IPv6 */
+	u8 lport;                   /* associated offload logical interface */
+	u8 dmac[ETH_ALEN];          /* neighbour's MAC address */
+};
+
+typedef void (*arp_err_handler_t)(void *handle, struct sk_buff *skb);
+
+/*
+ * Callback stored in an skb to handle address resolution failure.
+ */
+struct l2t_skb_cb {
+	void *handle;
+	arp_err_handler_t arp_err_handler;
+};
+
+#define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
+
+static inline void t4_set_arp_err_handler(struct sk_buff *skb, void *handle,
+					  arp_err_handler_t handler)
+{
+	L2T_SKB_CB(skb)->handle = handle;
+	L2T_SKB_CB(skb)->arp_err_handler = handler;
+}
+
+void cxgb4_l2t_release(struct l2t_entry *e);
+int cxgb4_l2t_send(struct net_device *dev, struct sk_buff *skb,
+		   struct l2t_entry *e);
+struct l2t_entry *cxgb4_l2t_get(struct l2t_data *d, struct neighbour *neigh,
+				const struct net_device *physdev,
+				unsigned int priority);
+
+void t4_l2t_update(struct adapter *adap, struct neighbour *neigh);
+struct l2t_entry *t4_l2t_alloc_switching(struct l2t_data *d);
+int t4_l2t_set_switching(struct adapter *adap, struct l2t_entry *e, u16 vlan,
+			 u8 port, u8 *eth_addr);
+struct l2t_data *t4_init_l2t(void);
+void do_l2t_write_rpl(struct adapter *p, const struct cpl_l2t_write_rpl *rpl);
+
+extern const struct file_operations t4_l2t_fops;
+#endif  /* __CXGB4_L2T_H */
diff --git a/drivers/net/cxgb4/sge.c b/drivers/net/cxgb4/sge.c
new file mode 100644
index 0000000..14adc58
--- /dev/null
+++ b/drivers/net/cxgb4/sge.c
@@ -0,0 +1,2431 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/ip.h>
+#include <linux/dma-mapping.h>
+#include <linux/jiffies.h>
+#include <net/ipv6.h>
+#include <net/tcp.h>
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4_msg.h"
+#include "t4fw_api.h"
+
+/*
+ * Rx buffer size.  We use largish buffers if possible but settle for single
+ * pages under memory shortage.
+ */
+#if PAGE_SHIFT >= 16
+# define FL_PG_ORDER 0
+#else
+# define FL_PG_ORDER (16 - PAGE_SHIFT)
+#endif
+
+/* RX_PULL_LEN should be <= RX_COPY_THRES */
+#define RX_COPY_THRES    256
+#define RX_PULL_LEN      128
+
+/*
+ * Main body length for sk_buffs used for Rx Ethernet packets with fragments.
+ * Should be >= RX_PULL_LEN but possibly bigger to give pskb_may_pull some room.
+ */
+#define RX_PKT_SKB_LEN   512
+
+/* Ethernet header padding prepended to RX_PKTs */
+#define RX_PKT_PAD 2
+
+/*
+ * Max number of Tx descriptors we clean up at a time.  Should be modest as
+ * freeing skbs isn't cheap and it happens while holding locks.  We just need
+ * to free packets faster than they arrive, we eventually catch up and keep
+ * the amortized cost reasonable.  Must be >= 2 * TXQ_STOP_THRES.
+ */
+#define MAX_TX_RECLAIM 16
+
+/*
+ * Max number of Rx buffers we replenish at a time.  Again keep this modest,
+ * allocating buffers isn't cheap either.
+ */
+#define MAX_RX_REFILL 16U
+
+/*
+ * Period of the Rx queue check timer.  This timer is infrequent as it has
+ * something to do only when the system experiences severe memory shortage.
+ */
+#define RX_QCHECK_PERIOD (HZ / 2)
+
+/*
+ * Period of the Tx queue check timer.
+ */
+#define TX_QCHECK_PERIOD (HZ / 2)
+
+/*
+ * Max number of Tx descriptors to be reclaimed by the Tx timer.
+ */
+#define MAX_TIMER_TX_RECLAIM 100
+
+/*
+ * Timer index used when backing off due to memory shortage.
+ */
+#define NOMEM_TMR_IDX (SGE_NTIMERS - 1)
+
+/*
+ * An FL with <= FL_STARVE_THRES buffers is starving and a periodic timer will
+ * attempt to refill it.
+ */
+#define FL_STARVE_THRES 4
+
+/*
+ * Suspend an Ethernet Tx queue with fewer available descriptors than this.
+ * This is the same as calc_tx_descs() for a TSO packet with
+ * nr_frags == MAX_SKB_FRAGS.
+ */
+#define ETHTXQ_STOP_THRES \
+	(1 + DIV_ROUND_UP((3 * MAX_SKB_FRAGS) / 2 + (MAX_SKB_FRAGS & 1), 8))
+
+/*
+ * Suspension threshold for non-Ethernet Tx queues.  We require enough room
+ * for a full sized WR.
+ */
+#define TXQ_STOP_THRES (SGE_MAX_WR_LEN / sizeof(struct tx_desc))
+
+/*
+ * Max Tx descriptor space we allow for an Ethernet packet to be inlined
+ * into a WR.
+ */
+#define MAX_IMM_TX_PKT_LEN 128
+
+/*
+ * Max size of a WR sent through a control Tx queue.
+ */
+#define MAX_CTRL_WR_LEN SGE_MAX_WR_LEN
+
+enum {
+	/* packet alignment in FL buffers */
+	FL_ALIGN = L1_CACHE_BYTES < 32 ? 32 : L1_CACHE_BYTES,
+	/* egress status entry size */
+	STAT_LEN = L1_CACHE_BYTES > 64 ? 128 : 64
+};
+
+struct tx_sw_desc {                /* SW state per Tx descriptor */
+	struct sk_buff *skb;
+	struct ulptx_sgl *sgl;
+};
+
+struct rx_sw_desc {                /* SW state per Rx descriptor */
+	struct page *page;
+	dma_addr_t dma_addr;
+};
+
+/*
+ * The low bits of rx_sw_desc.dma_addr have special meaning.
+ */
+enum {
+	RX_LARGE_BUF    = 1 << 0, /* buffer is larger than PAGE_SIZE */
+	RX_UNMAPPED_BUF = 1 << 1, /* buffer is not mapped */
+};
+
+static inline dma_addr_t get_buf_addr(const struct rx_sw_desc *d)
+{
+	return d->dma_addr & ~(dma_addr_t)(RX_LARGE_BUF | RX_UNMAPPED_BUF);
+}
+
+static inline bool is_buf_mapped(const struct rx_sw_desc *d)
+{
+	return !(d->dma_addr & RX_UNMAPPED_BUF);
+}
+
+/**
+ *	txq_avail - return the number of available slots in a Tx queue
+ *	@q: the Tx queue
+ *
+ *	Returns the number of descriptors in a Tx queue available to write new
+ *	packets.
+ */
+static inline unsigned int txq_avail(const struct sge_txq *q)
+{
+	return q->size - 1 - q->in_use;
+}
+
+/**
+ *	fl_cap - return the capacity of a free-buffer list
+ *	@fl: the FL
+ *
+ *	Returns the capacity of a free-buffer list.  The capacity is less than
+ *	the size because one descriptor needs to be left unpopulated, otherwise
+ *	HW will think the FL is empty.
+ */
+static inline unsigned int fl_cap(const struct sge_fl *fl)
+{
+	return fl->size - 8;   /* 1 descriptor = 8 buffers */
+}
+
+static inline bool fl_starving(const struct sge_fl *fl)
+{
+	return fl->avail - fl->pend_cred <= FL_STARVE_THRES;
+}
+
+static int map_skb(struct device *dev, const struct sk_buff *skb,
+		   dma_addr_t *addr)
+{
+	const skb_frag_t *fp, *end;
+	const struct skb_shared_info *si;
+
+	*addr = dma_map_single(dev, skb->data, skb_headlen(skb), DMA_TO_DEVICE);
+	if (dma_mapping_error(dev, *addr))
+		goto out_err;
+
+	si = skb_shinfo(skb);
+	end = &si->frags[si->nr_frags];
+
+	for (fp = si->frags; fp < end; fp++) {
+		*++addr = dma_map_page(dev, fp->page, fp->page_offset, fp->size,
+				       DMA_TO_DEVICE);
+		if (dma_mapping_error(dev, *addr))
+			goto unwind;
+	}
+	return 0;
+
+unwind:
+	while (fp-- > si->frags)
+		dma_unmap_page(dev, *--addr, fp->size, DMA_TO_DEVICE);
+
+	dma_unmap_single(dev, addr[-1], skb_headlen(skb), DMA_TO_DEVICE);
+out_err:
+	return -ENOMEM;
+}
+
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+static void unmap_skb(struct device *dev, const struct sk_buff *skb,
+		      const dma_addr_t *addr)
+{
+	const skb_frag_t *fp, *end;
+	const struct skb_shared_info *si;
+
+	dma_unmap_single(dev, *addr++, skb_headlen(skb), DMA_TO_DEVICE);
+
+	si = skb_shinfo(skb);
+	end = &si->frags[si->nr_frags];
+	for (fp = si->frags; fp < end; fp++)
+		dma_unmap_page(dev, *addr++, fp->size, DMA_TO_DEVICE);
+}
+
+/**
+ *	deferred_unmap_destructor - unmap a packet when it is freed
+ *	@skb: the packet
+ *
+ *	This is the packet destructor used for Tx packets that need to remain
+ *	mapped until they are freed rather than until their Tx descriptors are
+ *	freed.
+ */
+static void deferred_unmap_destructor(struct sk_buff *skb)
+{
+	unmap_skb(skb->dev->dev.parent, skb, (dma_addr_t *)skb->head);
+}
+#endif
+
+static void unmap_sgl(struct device *dev, const struct sk_buff *skb,
+		      const struct ulptx_sgl *sgl, const struct sge_txq *q)
+{
+	const struct ulptx_sge_pair *p;
+	unsigned int nfrags = skb_shinfo(skb)->nr_frags;
+
+	if (likely(skb_headlen(skb)))
+		dma_unmap_single(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
+				 DMA_TO_DEVICE);
+	else {
+		dma_unmap_page(dev, be64_to_cpu(sgl->addr0), ntohl(sgl->len0),
+			       DMA_TO_DEVICE);
+		nfrags--;
+	}
+
+	/*
+	 * the complexity below is because of the possibility of a wrap-around
+	 * in the middle of an SGL
+	 */
+	for (p = sgl->sge; nfrags >= 2; nfrags -= 2) {
+		if (likely((u8 *)(p + 1) <= (u8 *)q->stat)) {
+unmap:			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+				       ntohl(p->len[0]), DMA_TO_DEVICE);
+			dma_unmap_page(dev, be64_to_cpu(p->addr[1]),
+				       ntohl(p->len[1]), DMA_TO_DEVICE);
+			p++;
+		} else if ((u8 *)p == (u8 *)q->stat) {
+			p = (const struct ulptx_sge_pair *)q->desc;
+			goto unmap;
+		} else if ((u8 *)p + 8 == (u8 *)q->stat) {
+			const __be64 *addr = (const __be64 *)q->desc;
+
+			dma_unmap_page(dev, be64_to_cpu(addr[0]),
+				       ntohl(p->len[0]), DMA_TO_DEVICE);
+			dma_unmap_page(dev, be64_to_cpu(addr[1]),
+				       ntohl(p->len[1]), DMA_TO_DEVICE);
+			p = (const struct ulptx_sge_pair *)&addr[2];
+		} else {
+			const __be64 *addr = (const __be64 *)q->desc;
+
+			dma_unmap_page(dev, be64_to_cpu(p->addr[0]),
+				       ntohl(p->len[0]), DMA_TO_DEVICE);
+			dma_unmap_page(dev, be64_to_cpu(addr[0]),
+				       ntohl(p->len[1]), DMA_TO_DEVICE);
+			p = (const struct ulptx_sge_pair *)&addr[1];
+		}
+	}
+	if (nfrags) {
+		__be64 addr;
+
+		if ((u8 *)p == (u8 *)q->stat)
+			p = (const struct ulptx_sge_pair *)q->desc;
+		addr = (u8 *)p + 16 <= (u8 *)q->stat ? p->addr[0] :
+						       *(const __be64 *)q->desc;
+		dma_unmap_page(dev, be64_to_cpu(addr), ntohl(p->len[0]),
+			       DMA_TO_DEVICE);
+	}
+}
+
+/**
+ *	free_tx_desc - reclaims Tx descriptors and their buffers
+ *	@adapter: the adapter
+ *	@q: the Tx queue to reclaim descriptors from
+ *	@n: the number of descriptors to reclaim
+ *	@unmap: whether the buffers should be unmapped for DMA
+ *
+ *	Reclaims Tx descriptors from an SGE Tx queue and frees the associated
+ *	Tx buffers.  Called with the Tx queue lock held.
+ */
+static void free_tx_desc(struct adapter *adap, struct sge_txq *q,
+			 unsigned int n, bool unmap)
+{
+	struct tx_sw_desc *d;
+	unsigned int cidx = q->cidx;
+	struct device *dev = adap->pdev_dev;
+
+	d = &q->sdesc[cidx];
+	while (n--) {
+		if (d->skb) {                       /* an SGL is present */
+			if (unmap)
+				unmap_sgl(dev, d->skb, d->sgl, q);
+			kfree_skb(d->skb);
+			d->skb = NULL;
+		}
+		++d;
+		if (++cidx == q->size) {
+			cidx = 0;
+			d = q->sdesc;
+		}
+	}
+	q->cidx = cidx;
+}
+
+/*
+ * Return the number of reclaimable descriptors in a Tx queue.
+ */
+static inline int reclaimable(const struct sge_txq *q)
+{
+	int hw_cidx = ntohs(q->stat->cidx);
+	hw_cidx -= q->cidx;
+	return hw_cidx < 0 ? hw_cidx + q->size : hw_cidx;
+}
+
+/**
+ *	reclaim_completed_tx - reclaims completed Tx descriptors
+ *	@adap: the adapter
+ *	@q: the Tx queue to reclaim completed descriptors from
+ *	@unmap: whether the buffers should be unmapped for DMA
+ *
+ *	Reclaims Tx descriptors that the SGE has indicated it has processed,
+ *	and frees the associated buffers if possible.  Called with the Tx
+ *	queue locked.
+ */
+static inline void reclaim_completed_tx(struct adapter *adap, struct sge_txq *q,
+					bool unmap)
+{
+	int avail = reclaimable(q);
+
+	if (avail) {
+		/*
+		 * Limit the amount of clean up work we do at a time to keep
+		 * the Tx lock hold time O(1).
+		 */
+		if (avail > MAX_TX_RECLAIM)
+			avail = MAX_TX_RECLAIM;
+
+		free_tx_desc(adap, q, avail, unmap);
+		q->in_use -= avail;
+	}
+}
+
+static inline int get_buf_size(const struct rx_sw_desc *d)
+{
+#if FL_PG_ORDER > 0
+	return (d->dma_addr & RX_LARGE_BUF) ? (PAGE_SIZE << FL_PG_ORDER) :
+					      PAGE_SIZE;
+#else
+	return PAGE_SIZE;
+#endif
+}
+
+/**
+ *	free_rx_bufs - free the Rx buffers on an SGE free list
+ *	@adap: the adapter
+ *	@q: the SGE free list to free buffers from
+ *	@n: how many buffers to free
+ *
+ *	Release the next @n buffers on an SGE free-buffer Rx queue.   The
+ *	buffers must be made inaccessible to HW before calling this function.
+ */
+static void free_rx_bufs(struct adapter *adap, struct sge_fl *q, int n)
+{
+	while (n--) {
+		struct rx_sw_desc *d = &q->sdesc[q->cidx];
+
+		if (is_buf_mapped(d))
+			dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
+				       get_buf_size(d), PCI_DMA_FROMDEVICE);
+		put_page(d->page);
+		d->page = NULL;
+		if (++q->cidx == q->size)
+			q->cidx = 0;
+		q->avail--;
+	}
+}
+
+/**
+ *	unmap_rx_buf - unmap the current Rx buffer on an SGE free list
+ *	@adap: the adapter
+ *	@q: the SGE free list
+ *
+ *	Unmap the current buffer on an SGE free-buffer Rx queue.   The
+ *	buffer must be made inaccessible to HW before calling this function.
+ *
+ *	This is similar to @free_rx_bufs above but does not free the buffer.
+ *	Do note that the FL still loses any further access to the buffer.
+ */
+static void unmap_rx_buf(struct adapter *adap, struct sge_fl *q)
+{
+	struct rx_sw_desc *d = &q->sdesc[q->cidx];
+
+	if (is_buf_mapped(d))
+		dma_unmap_page(adap->pdev_dev, get_buf_addr(d),
+			       get_buf_size(d), PCI_DMA_FROMDEVICE);
+	d->page = NULL;
+	if (++q->cidx == q->size)
+		q->cidx = 0;
+	q->avail--;
+}
+
+static inline void ring_fl_db(struct adapter *adap, struct sge_fl *q)
+{
+	if (q->pend_cred >= 8) {
+		wmb();
+		t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL), DBPRIO |
+			     QID(q->cntxt_id) | PIDX(q->pend_cred / 8));
+		q->pend_cred &= 7;
+	}
+}
+
+static inline void set_rx_sw_desc(struct rx_sw_desc *sd, struct page *pg,
+				  dma_addr_t mapping)
+{
+	sd->page = pg;
+	sd->dma_addr = mapping;      /* includes size low bits */
+}
+
+/**
+ *	refill_fl - refill an SGE Rx buffer ring
+ *	@adap: the adapter
+ *	@q: the ring to refill
+ *	@n: the number of new buffers to allocate
+ *	@gfp: the gfp flags for the allocations
+ *
+ *	(Re)populate an SGE free-buffer queue with up to @n new packet buffers,
+ *	allocated with the supplied gfp flags.  The caller must assure that
+ *	@n does not exceed the queue's capacity.  If afterwards the queue is
+ *	found critically low mark it as starving in the bitmap of starving FLs.
+ *
+ *	Returns the number of buffers allocated.
+ */
+static unsigned int refill_fl(struct adapter *adap, struct sge_fl *q, int n,
+			      gfp_t gfp)
+{
+	struct page *pg;
+	dma_addr_t mapping;
+	unsigned int cred = q->avail;
+	__be64 *d = &q->desc[q->pidx];
+	struct rx_sw_desc *sd = &q->sdesc[q->pidx];
+
+	gfp |= __GFP_NOWARN;         /* failures are expected */
+
+#if FL_PG_ORDER > 0
+	/*
+	 * Prefer large buffers
+	 */
+	while (n) {
+		pg = alloc_pages(gfp | __GFP_COMP, FL_PG_ORDER);
+		if (unlikely(!pg)) {
+			q->large_alloc_failed++;
+			break;       /* fall back to single pages */
+		}
+
+		mapping = dma_map_page(adap->pdev_dev, pg, 0,
+				       PAGE_SIZE << FL_PG_ORDER,
+				       PCI_DMA_FROMDEVICE);
+		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
+			__free_pages(pg, FL_PG_ORDER);
+			goto out;   /* do not try small pages for this error */
+		}
+		mapping |= RX_LARGE_BUF;
+		*d++ = cpu_to_be64(mapping);
+
+		set_rx_sw_desc(sd, pg, mapping);
+		sd++;
+
+		q->avail++;
+		if (++q->pidx == q->size) {
+			q->pidx = 0;
+			sd = q->sdesc;
+			d = q->desc;
+		}
+		n--;
+	}
+#endif
+
+	while (n--) {
+		pg = __netdev_alloc_page(adap->port[0], gfp);
+		if (unlikely(!pg)) {
+			q->alloc_failed++;
+			break;
+		}
+
+		mapping = dma_map_page(adap->pdev_dev, pg, 0, PAGE_SIZE,
+				       PCI_DMA_FROMDEVICE);
+		if (unlikely(dma_mapping_error(adap->pdev_dev, mapping))) {
+			netdev_free_page(adap->port[0], pg);
+			goto out;
+		}
+		*d++ = cpu_to_be64(mapping);
+
+		set_rx_sw_desc(sd, pg, mapping);
+		sd++;
+
+		q->avail++;
+		if (++q->pidx == q->size) {
+			q->pidx = 0;
+			sd = q->sdesc;
+			d = q->desc;
+		}
+	}
+
+out:	cred = q->avail - cred;
+	q->pend_cred += cred;
+	ring_fl_db(adap, q);
+
+	if (unlikely(fl_starving(q))) {
+		smp_wmb();
+		set_bit(q->cntxt_id, adap->sge.starving_fl);
+	}
+
+	return cred;
+}
+
+static inline void __refill_fl(struct adapter *adap, struct sge_fl *fl)
+{
+	refill_fl(adap, fl, min(MAX_RX_REFILL, fl_cap(fl) - fl->avail),
+		  GFP_ATOMIC);
+}
+
+/**
+ *	alloc_ring - allocate resources for an SGE descriptor ring
+ *	@dev: the PCI device's core device
+ *	@nelem: the number of descriptors
+ *	@elem_size: the size of each descriptor
+ *	@sw_size: the size of the SW state associated with each ring element
+ *	@phys: the physical address of the allocated ring
+ *	@metadata: address of the array holding the SW state for the ring
+ *	@stat_size: extra space in HW ring for status information
+ *
+ *	Allocates resources for an SGE descriptor ring, such as Tx queues,
+ *	free buffer lists, or response queues.  Each SGE ring requires
+ *	space for its HW descriptors plus, optionally, space for the SW state
+ *	associated with each HW entry (the metadata).  The function returns
+ *	three values: the virtual address for the HW ring (the return value
+ *	of the function), the bus address of the HW ring, and the address
+ *	of the SW ring.
+ */
+static void *alloc_ring(struct device *dev, size_t nelem, size_t elem_size,
+			size_t sw_size, dma_addr_t *phys, void *metadata,
+			size_t stat_size)
+{
+	size_t len = nelem * elem_size + stat_size;
+	void *s = NULL;
+	void *p = dma_alloc_coherent(dev, len, phys, GFP_KERNEL);
+
+	if (!p)
+		return NULL;
+	if (sw_size) {
+		s = kcalloc(nelem, sw_size, GFP_KERNEL);
+
+		if (!s) {
+			dma_free_coherent(dev, len, p, *phys);
+			return NULL;
+		}
+	}
+	if (metadata)
+		*(void **)metadata = s;
+	memset(p, 0, len);
+	return p;
+}
+
+/**
+ *	sgl_len - calculates the size of an SGL of the given capacity
+ *	@n: the number of SGL entries
+ *
+ *	Calculates the number of flits needed for a scatter/gather list that
+ *	can hold the given number of entries.
+ */
+static inline unsigned int sgl_len(unsigned int n)
+{
+	n--;
+	return (3 * n) / 2 + (n & 1) + 2;
+}
+
+/**
+ *	flits_to_desc - returns the num of Tx descriptors for the given flits
+ *	@n: the number of flits
+ *
+ *	Returns the number of Tx descriptors needed for the supplied number
+ *	of flits.
+ */
+static inline unsigned int flits_to_desc(unsigned int n)
+{
+	BUG_ON(n > SGE_MAX_WR_LEN / 8);
+	return DIV_ROUND_UP(n, 8);
+}
+
+/**
+ *	is_eth_imm - can an Ethernet packet be sent as immediate data?
+ *	@skb: the packet
+ *
+ *	Returns whether an Ethernet packet is small enough to fit as
+ *	immediate data.
+ */
+static inline int is_eth_imm(const struct sk_buff *skb)
+{
+	return skb->len <= MAX_IMM_TX_PKT_LEN - sizeof(struct cpl_tx_pkt);
+}
+
+/**
+ *	calc_tx_flits - calculate the number of flits for a packet Tx WR
+ *	@skb: the packet
+ *
+ *	Returns the number of flits needed for a Tx WR for the given Ethernet
+ *	packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_flits(const struct sk_buff *skb)
+{
+	unsigned int flits;
+
+	if (is_eth_imm(skb))
+		return DIV_ROUND_UP(skb->len + sizeof(struct cpl_tx_pkt), 8);
+
+	flits = sgl_len(skb_shinfo(skb)->nr_frags + 1) + 4;
+	if (skb_shinfo(skb)->gso_size)
+		flits += 2;
+	return flits;
+}
+
+/**
+ *	calc_tx_descs - calculate the number of Tx descriptors for a packet
+ *	@skb: the packet
+ *
+ *	Returns the number of Tx descriptors needed for the given Ethernet
+ *	packet, including the needed WR and CPL headers.
+ */
+static inline unsigned int calc_tx_descs(const struct sk_buff *skb)
+{
+	return flits_to_desc(calc_tx_flits(skb));
+}
+
+/**
+ *	write_sgl - populate a scatter/gather list for a packet
+ *	@skb: the packet
+ *	@q: the Tx queue we are writing into
+ *	@sgl: starting location for writing the SGL
+ *	@end: points right after the end of the SGL
+ *	@start: start offset into skb main-body data to include in the SGL
+ *	@addr: the list of bus addresses for the SGL elements
+ *
+ *	Generates a gather list for the buffers that make up a packet.
+ *	The caller must provide adequate space for the SGL that will be written.
+ *	The SGL includes all of the packet's page fragments and the data in its
+ *	main body except for the first @start bytes.  @sgl must be 16-byte
+ *	aligned and within a Tx descriptor with available space.  @end points
+ *	right after the end of the SGL but does not account for any potential
+ *	wrap around, i.e., @end > @sgl.
+ */
+static void write_sgl(const struct sk_buff *skb, struct sge_txq *q,
+		      struct ulptx_sgl *sgl, u64 *end, unsigned int start,
+		      const dma_addr_t *addr)
+{
+	unsigned int i, len;
+	struct ulptx_sge_pair *to;
+	const struct skb_shared_info *si = skb_shinfo(skb);
+	unsigned int nfrags = si->nr_frags;
+	struct ulptx_sge_pair buf[MAX_SKB_FRAGS / 2 + 1];
+
+	len = skb_headlen(skb) - start;
+	if (likely(len)) {
+		sgl->len0 = htonl(len);
+		sgl->addr0 = cpu_to_be64(addr[0] + start);
+		nfrags++;
+	} else {
+		sgl->len0 = htonl(si->frags[0].size);
+		sgl->addr0 = cpu_to_be64(addr[1]);
+	}
+
+	sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_NSGE(nfrags));
+	if (likely(--nfrags == 0))
+		return;
+	/*
+	 * Most of the complexity below deals with the possibility we hit the
+	 * end of the queue in the middle of writing the SGL.  For this case
+	 * only we create the SGL in a temporary buffer and then copy it.
+	 */
+	to = (u8 *)end > (u8 *)q->stat ? buf : sgl->sge;
+
+	for (i = (nfrags != si->nr_frags); nfrags >= 2; nfrags -= 2, to++) {
+		to->len[0] = cpu_to_be32(si->frags[i].size);
+		to->len[1] = cpu_to_be32(si->frags[++i].size);
+		to->addr[0] = cpu_to_be64(addr[i]);
+		to->addr[1] = cpu_to_be64(addr[++i]);
+	}
+	if (nfrags) {
+		to->len[0] = cpu_to_be32(si->frags[i].size);
+		to->len[1] = cpu_to_be32(0);
+		to->addr[0] = cpu_to_be64(addr[i + 1]);
+	}
+	if (unlikely((u8 *)end > (u8 *)q->stat)) {
+		unsigned int part0 = (u8 *)q->stat - (u8 *)sgl->sge, part1;
+
+		if (likely(part0))
+			memcpy(sgl->sge, buf, part0);
+		part1 = (u8 *)end - (u8 *)q->stat;
+		memcpy(q->desc, (u8 *)buf + part0, part1);
+		end = (void *)q->desc + part1;
+	}
+	if ((uintptr_t)end & 8)           /* 0-pad to multiple of 16 */
+		*(u64 *)end = 0;
+}
+
+/**
+ *	ring_tx_db - check and potentially ring a Tx queue's doorbell
+ *	@adap: the adapter
+ *	@q: the Tx queue
+ *	@n: number of new descriptors to give to HW
+ *
+ *	Ring the doorbel for a Tx queue.
+ */
+static inline void ring_tx_db(struct adapter *adap, struct sge_txq *q, int n)
+{
+	wmb();            /* write descriptors before telling HW */
+	t4_write_reg(adap, MYPF_REG(SGE_PF_KDOORBELL),
+		     QID(q->cntxt_id) | PIDX(n));
+}
+
+/**
+ *	inline_tx_skb - inline a packet's data into Tx descriptors
+ *	@skb: the packet
+ *	@q: the Tx queue where the packet will be inlined
+ *	@pos: starting position in the Tx queue where to inline the packet
+ *
+ *	Inline a packet's contents directly into Tx descriptors, starting at
+ *	the given position within the Tx DMA ring.
+ *	Most of the complexity of this operation is dealing with wrap arounds
+ *	in the middle of the packet we want to inline.
+ */
+static void inline_tx_skb(const struct sk_buff *skb, const struct sge_txq *q,
+			  void *pos)
+{
+	u64 *p;
+	int left = (void *)q->stat - pos;
+
+	if (likely(skb->len <= left)) {
+		if (likely(!skb->data_len))
+			skb_copy_from_linear_data(skb, pos, skb->len);
+		else
+			skb_copy_bits(skb, 0, pos, skb->len);
+		pos += skb->len;
+	} else {
+		skb_copy_bits(skb, 0, pos, left);
+		skb_copy_bits(skb, left, q->desc, skb->len - left);
+		pos = (void *)q->desc + (skb->len - left);
+	}
+
+	/* 0-pad to multiple of 16 */
+	p = PTR_ALIGN(pos, 8);
+	if ((uintptr_t)p & 8)
+		*p = 0;
+}
+
+/*
+ * Figure out what HW csum a packet wants and return the appropriate control
+ * bits.
+ */
+static u64 hwcsum(const struct sk_buff *skb)
+{
+	int csum_type;
+	const struct iphdr *iph = ip_hdr(skb);
+
+	if (iph->version == 4) {
+		if (iph->protocol == IPPROTO_TCP)
+			csum_type = TX_CSUM_TCPIP;
+		else if (iph->protocol == IPPROTO_UDP)
+			csum_type = TX_CSUM_UDPIP;
+		else {
+nocsum:			/*
+			 * unknown protocol, disable HW csum
+			 * and hope a bad packet is detected
+			 */
+			return TXPKT_L4CSUM_DIS;
+		}
+	} else {
+		/*
+		 * this doesn't work with extension headers
+		 */
+		const struct ipv6hdr *ip6h = (const struct ipv6hdr *)iph;
+
+		if (ip6h->nexthdr == IPPROTO_TCP)
+			csum_type = TX_CSUM_TCPIP6;
+		else if (ip6h->nexthdr == IPPROTO_UDP)
+			csum_type = TX_CSUM_UDPIP6;
+		else
+			goto nocsum;
+	}
+
+	if (likely(csum_type >= TX_CSUM_TCPIP))
+		return TXPKT_CSUM_TYPE(csum_type) |
+			TXPKT_IPHDR_LEN(skb_network_header_len(skb)) |
+			TXPKT_ETHHDR_LEN(skb_network_offset(skb) - ETH_HLEN);
+	else {
+		int start = skb_transport_offset(skb);
+
+		return TXPKT_CSUM_TYPE(csum_type) | TXPKT_CSUM_START(start) |
+			TXPKT_CSUM_LOC(start + skb->csum_offset);
+	}
+}
+
+static void eth_txq_stop(struct sge_eth_txq *q)
+{
+	netif_tx_stop_queue(q->txq);
+	q->q.stops++;
+}
+
+static inline void txq_advance(struct sge_txq *q, unsigned int n)
+{
+	q->in_use += n;
+	q->pidx += n;
+	if (q->pidx >= q->size)
+		q->pidx -= q->size;
+}
+
+/**
+ *	t4_eth_xmit - add a packet to an Ethernet Tx queue
+ *	@skb: the packet
+ *	@dev: the egress net device
+ *
+ *	Add a packet to an SGE Ethernet Tx queue.  Runs with softirqs disabled.
+ */
+netdev_tx_t t4_eth_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	u32 wr_mid;
+	u64 cntrl, *end;
+	int qidx, credits;
+	unsigned int flits, ndesc;
+	struct adapter *adap;
+	struct sge_eth_txq *q;
+	const struct port_info *pi;
+	struct fw_eth_tx_pkt_wr *wr;
+	struct cpl_tx_pkt_core *cpl;
+	const struct skb_shared_info *ssi;
+	dma_addr_t addr[MAX_SKB_FRAGS + 1];
+
+	/*
+	 * The chip min packet length is 10 octets but play safe and reject
+	 * anything shorter than an Ethernet header.
+	 */
+	if (unlikely(skb->len < ETH_HLEN)) {
+out_free:	dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	pi = netdev_priv(dev);
+	adap = pi->adapter;
+	qidx = skb_get_queue_mapping(skb);
+	q = &adap->sge.ethtxq[qidx + pi->first_qset];
+
+	reclaim_completed_tx(adap, &q->q, true);
+
+	flits = calc_tx_flits(skb);
+	ndesc = flits_to_desc(flits);
+	credits = txq_avail(&q->q) - ndesc;
+
+	if (unlikely(credits < 0)) {
+		eth_txq_stop(q);
+		dev_err(adap->pdev_dev,
+			"%s: Tx ring %u full while queue awake!\n",
+			dev->name, qidx);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (!is_eth_imm(skb) &&
+	    unlikely(map_skb(adap->pdev_dev, skb, addr) < 0)) {
+		q->mapping_err++;
+		goto out_free;
+	}
+
+	wr_mid = FW_WR_LEN16(DIV_ROUND_UP(flits, 2));
+	if (unlikely(credits < ETHTXQ_STOP_THRES)) {
+		eth_txq_stop(q);
+		wr_mid |= FW_WR_EQUEQ | FW_WR_EQUIQ;
+	}
+
+	wr = (void *)&q->q.desc[q->q.pidx];
+	wr->equiq_to_len16 = htonl(wr_mid);
+	wr->r3 = cpu_to_be64(0);
+	end = (u64 *)wr + flits;
+
+	ssi = skb_shinfo(skb);
+	if (ssi->gso_size) {
+		struct cpl_tx_pkt_lso *lso = (void *)wr;
+		bool v6 = (ssi->gso_type & SKB_GSO_TCPV6) != 0;
+		int l3hdr_len = skb_network_header_len(skb);
+		int eth_xtra_len = skb_network_offset(skb) - ETH_HLEN;
+
+		wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
+				       FW_WR_IMMDLEN(sizeof(*lso)));
+		lso->lso_ctrl = htonl(LSO_OPCODE(CPL_TX_PKT_LSO) |
+				      LSO_FIRST_SLICE | LSO_LAST_SLICE |
+				      LSO_IPV6(v6) |
+				      LSO_ETHHDR_LEN(eth_xtra_len / 4) |
+				      LSO_IPHDR_LEN(l3hdr_len / 4) |
+				      LSO_TCPHDR_LEN(tcp_hdr(skb)->doff));
+		lso->ipid_ofst = htons(0);
+		lso->mss = htons(ssi->gso_size);
+		lso->seqno_offset = htonl(0);
+		lso->len = htonl(skb->len);
+		cpl = (void *)(lso + 1);
+		cntrl = TXPKT_CSUM_TYPE(v6 ? TX_CSUM_TCPIP6 : TX_CSUM_TCPIP) |
+			TXPKT_IPHDR_LEN(l3hdr_len) |
+			TXPKT_ETHHDR_LEN(eth_xtra_len);
+		q->tso++;
+		q->tx_cso += ssi->gso_segs;
+	} else {
+		int len;
+
+		len = is_eth_imm(skb) ? skb->len + sizeof(*cpl) : sizeof(*cpl);
+		wr->op_immdlen = htonl(FW_WR_OP(FW_ETH_TX_PKT_WR) |
+				       FW_WR_IMMDLEN(len));
+		cpl = (void *)(wr + 1);
+		if (skb->ip_summed == CHECKSUM_PARTIAL) {
+			cntrl = hwcsum(skb) | TXPKT_IPCSUM_DIS;
+			q->tx_cso++;
+		} else
+			cntrl = TXPKT_L4CSUM_DIS | TXPKT_IPCSUM_DIS;
+	}
+
+	if (vlan_tx_tag_present(skb)) {
+		q->vlan_ins++;
+		cntrl |= TXPKT_VLAN_VLD | TXPKT_VLAN(vlan_tx_tag_get(skb));
+	}
+
+	cpl->ctrl0 = htonl(TXPKT_OPCODE(CPL_TX_PKT_XT) |
+			   TXPKT_INTF(pi->tx_chan) | TXPKT_PF(0));
+	cpl->pack = htons(0);
+	cpl->len = htons(skb->len);
+	cpl->ctrl1 = cpu_to_be64(cntrl);
+
+	if (is_eth_imm(skb)) {
+		inline_tx_skb(skb, &q->q, cpl + 1);
+		dev_kfree_skb(skb);
+	} else {
+		int last_desc;
+
+		write_sgl(skb, &q->q, (struct ulptx_sgl *)(cpl + 1), end, 0,
+			  addr);
+		skb_orphan(skb);
+
+		last_desc = q->q.pidx + ndesc - 1;
+		if (last_desc >= q->q.size)
+			last_desc -= q->q.size;
+		q->q.sdesc[last_desc].skb = skb;
+		q->q.sdesc[last_desc].sgl = (struct ulptx_sgl *)(cpl + 1);
+	}
+
+	txq_advance(&q->q, ndesc);
+
+	ring_tx_db(adap, &q->q, ndesc);
+	return NETDEV_TX_OK;
+}
+
+/**
+ *	reclaim_completed_tx_imm - reclaim completed control-queue Tx descs
+ *	@q: the SGE control Tx queue
+ *
+ *	This is a variant of reclaim_completed_tx() that is used for Tx queues
+ *	that send only immediate data (presently just the control queues) and
+ *	thus do not have any sk_buffs to release.
+ */
+static inline void reclaim_completed_tx_imm(struct sge_txq *q)
+{
+	int hw_cidx = ntohs(q->stat->cidx);
+	int reclaim = hw_cidx - q->cidx;
+
+	if (reclaim < 0)
+		reclaim += q->size;
+
+	q->in_use -= reclaim;
+	q->cidx = hw_cidx;
+}
+
+/**
+ *	is_imm - check whether a packet can be sent as immediate data
+ *	@skb: the packet
+ *
+ *	Returns true if a packet can be sent as a WR with immediate data.
+ */
+static inline int is_imm(const struct sk_buff *skb)
+{
+	return skb->len <= MAX_CTRL_WR_LEN;
+}
+
+/**
+ *	ctrlq_check_stop - check if a control queue is full and should stop
+ *	@q: the queue
+ *	@wr: most recent WR written to the queue
+ *
+ *	Check if a control queue has become full and should be stopped.
+ *	We clean up control queue descriptors very lazily, only when we are out.
+ *	If the queue is still full after reclaiming any completed descriptors
+ *	we suspend it and have the last WR wake it up.
+ */
+static void ctrlq_check_stop(struct sge_ctrl_txq *q, struct fw_wr_hdr *wr)
+{
+	reclaim_completed_tx_imm(&q->q);
+	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
+		wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
+		q->q.stops++;
+		q->full = 1;
+	}
+}
+
+/**
+ *	ctrl_xmit - send a packet through an SGE control Tx queue
+ *	@q: the control queue
+ *	@skb: the packet
+ *
+ *	Send a packet through an SGE control Tx queue.  Packets sent through
+ *	a control queue must fit entirely as immediate data.
+ */
+static int ctrl_xmit(struct sge_ctrl_txq *q, struct sk_buff *skb)
+{
+	unsigned int ndesc;
+	struct fw_wr_hdr *wr;
+
+	if (unlikely(!is_imm(skb))) {
+		WARN_ON(1);
+		dev_kfree_skb(skb);
+		return NET_XMIT_DROP;
+	}
+
+	ndesc = DIV_ROUND_UP(skb->len, sizeof(struct tx_desc));
+	spin_lock(&q->sendq.lock);
+
+	if (unlikely(q->full)) {
+		skb->priority = ndesc;                  /* save for restart */
+		__skb_queue_tail(&q->sendq, skb);
+		spin_unlock(&q->sendq.lock);
+		return NET_XMIT_CN;
+	}
+
+	wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+	inline_tx_skb(skb, &q->q, wr);
+
+	txq_advance(&q->q, ndesc);
+	if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES))
+		ctrlq_check_stop(q, wr);
+
+	ring_tx_db(q->adap, &q->q, ndesc);
+	spin_unlock(&q->sendq.lock);
+
+	kfree_skb(skb);
+	return NET_XMIT_SUCCESS;
+}
+
+/**
+ *	restart_ctrlq - restart a suspended control queue
+ *	@data: the control queue to restart
+ *
+ *	Resumes transmission on a suspended Tx control queue.
+ */
+static void restart_ctrlq(unsigned long data)
+{
+	struct sk_buff *skb;
+	unsigned int written = 0;
+	struct sge_ctrl_txq *q = (struct sge_ctrl_txq *)data;
+
+	spin_lock(&q->sendq.lock);
+	reclaim_completed_tx_imm(&q->q);
+	BUG_ON(txq_avail(&q->q) < TXQ_STOP_THRES);  /* q should be empty */
+
+	while ((skb = __skb_dequeue(&q->sendq)) != NULL) {
+		struct fw_wr_hdr *wr;
+		unsigned int ndesc = skb->priority;     /* previously saved */
+
+		/*
+		 * Write descriptors and free skbs outside the lock to limit
+		 * wait times.  q->full is still set so new skbs will be queued.
+		 */
+		spin_unlock(&q->sendq.lock);
+
+		wr = (struct fw_wr_hdr *)&q->q.desc[q->q.pidx];
+		inline_tx_skb(skb, &q->q, wr);
+		kfree_skb(skb);
+
+		written += ndesc;
+		txq_advance(&q->q, ndesc);
+		if (unlikely(txq_avail(&q->q) < TXQ_STOP_THRES)) {
+			unsigned long old = q->q.stops;
+
+			ctrlq_check_stop(q, wr);
+			if (q->q.stops != old) {          /* suspended anew */
+				spin_lock(&q->sendq.lock);
+				goto ringdb;
+			}
+		}
+		if (written > 16) {
+			ring_tx_db(q->adap, &q->q, written);
+			written = 0;
+		}
+		spin_lock(&q->sendq.lock);
+	}
+	q->full = 0;
+ringdb: if (written)
+		ring_tx_db(q->adap, &q->q, written);
+	spin_unlock(&q->sendq.lock);
+}
+
+/**
+ *	t4_mgmt_tx - send a management message
+ *	@adap: the adapter
+ *	@skb: the packet containing the management message
+ *
+ *	Send a management message through control queue 0.
+ */
+int t4_mgmt_tx(struct adapter *adap, struct sk_buff *skb)
+{
+	int ret;
+
+	local_bh_disable();
+	ret = ctrl_xmit(&adap->sge.ctrlq[0], skb);
+	local_bh_enable();
+	return ret;
+}
+
+/**
+ *	is_ofld_imm - check whether a packet can be sent as immediate data
+ *	@skb: the packet
+ *
+ *	Returns true if a packet can be sent as an offload WR with immediate
+ *	data.  We currently use the same limit as for Ethernet packets.
+ */
+static inline int is_ofld_imm(const struct sk_buff *skb)
+{
+	return skb->len <= MAX_IMM_TX_PKT_LEN;
+}
+
+/**
+ *	calc_tx_flits_ofld - calculate # of flits for an offload packet
+ *	@skb: the packet
+ *
+ *	Returns the number of flits needed for the given offload packet.
+ *	These packets are already fully constructed and no additional headers
+ *	will be added.
+ */
+static inline unsigned int calc_tx_flits_ofld(const struct sk_buff *skb)
+{
+	unsigned int flits, cnt;
+
+	if (is_ofld_imm(skb))
+		return DIV_ROUND_UP(skb->len, 8);
+
+	flits = skb_transport_offset(skb) / 8U;   /* headers */
+	cnt = skb_shinfo(skb)->nr_frags;
+	if (skb->tail != skb->transport_header)
+		cnt++;
+	return flits + sgl_len(cnt);
+}
+
+/**
+ *	txq_stop_maperr - stop a Tx queue due to I/O MMU exhaustion
+ *	@adap: the adapter
+ *	@q: the queue to stop
+ *
+ *	Mark a Tx queue stopped due to I/O MMU exhaustion and resulting
+ *	inability to map packets.  A periodic timer attempts to restart
+ *	queues so marked.
+ */
+static void txq_stop_maperr(struct sge_ofld_txq *q)
+{
+	q->mapping_err++;
+	q->q.stops++;
+	set_bit(q->q.cntxt_id, q->adap->sge.txq_maperr);
+}
+
+/**
+ *	ofldtxq_stop - stop an offload Tx queue that has become full
+ *	@q: the queue to stop
+ *	@skb: the packet causing the queue to become full
+ *
+ *	Stops an offload Tx queue that has become full and modifies the packet
+ *	being written to request a wakeup.
+ */
+static void ofldtxq_stop(struct sge_ofld_txq *q, struct sk_buff *skb)
+{
+	struct fw_wr_hdr *wr = (struct fw_wr_hdr *)skb->data;
+
+	wr->lo |= htonl(FW_WR_EQUEQ | FW_WR_EQUIQ);
+	q->q.stops++;
+	q->full = 1;
+}
+
+/**
+ *	service_ofldq - restart a suspended offload queue
+ *	@q: the offload queue
+ *
+ *	Services an offload Tx queue by moving packets from its packet queue
+ *	to the HW Tx ring.  The function starts and ends with the queue locked.
+ */
+static void service_ofldq(struct sge_ofld_txq *q)
+{
+	u64 *pos;
+	int credits;
+	struct sk_buff *skb;
+	unsigned int written = 0;
+	unsigned int flits, ndesc;
+
+	while ((skb = skb_peek(&q->sendq)) != NULL && !q->full) {
+		/*
+		 * We drop the lock but leave skb on sendq, thus retaining
+		 * exclusive access to the state of the queue.
+		 */
+		spin_unlock(&q->sendq.lock);
+
+		reclaim_completed_tx(q->adap, &q->q, false);
+
+		flits = skb->priority;                /* previously saved */
+		ndesc = flits_to_desc(flits);
+		credits = txq_avail(&q->q) - ndesc;
+		BUG_ON(credits < 0);
+		if (unlikely(credits < TXQ_STOP_THRES))
+			ofldtxq_stop(q, skb);
+
+		pos = (u64 *)&q->q.desc[q->q.pidx];
+		if (is_ofld_imm(skb))
+			inline_tx_skb(skb, &q->q, pos);
+		else if (map_skb(q->adap->pdev_dev, skb,
+				 (dma_addr_t *)skb->head)) {
+			txq_stop_maperr(q);
+			spin_lock(&q->sendq.lock);
+			break;
+		} else {
+			int last_desc, hdr_len = skb_transport_offset(skb);
+
+			memcpy(pos, skb->data, hdr_len);
+			write_sgl(skb, &q->q, (void *)pos + hdr_len,
+				  pos + flits, hdr_len,
+				  (dma_addr_t *)skb->head);
+#ifdef CONFIG_NEED_DMA_MAP_STATE
+			skb->dev = q->adap->port[0];
+			skb->destructor = deferred_unmap_destructor;
+#endif
+			last_desc = q->q.pidx + ndesc - 1;
+			if (last_desc >= q->q.size)
+				last_desc -= q->q.size;
+			q->q.sdesc[last_desc].skb = skb;
+		}
+
+		txq_advance(&q->q, ndesc);
+		written += ndesc;
+		if (unlikely(written > 32)) {
+			ring_tx_db(q->adap, &q->q, written);
+			written = 0;
+		}
+
+		spin_lock(&q->sendq.lock);
+		__skb_unlink(skb, &q->sendq);
+		if (is_ofld_imm(skb))
+			kfree_skb(skb);
+	}
+	if (likely(written))
+		ring_tx_db(q->adap, &q->q, written);
+}
+
+/**
+ *	ofld_xmit - send a packet through an offload queue
+ *	@q: the Tx offload queue
+ *	@skb: the packet
+ *
+ *	Send an offload packet through an SGE offload queue.
+ */
+static int ofld_xmit(struct sge_ofld_txq *q, struct sk_buff *skb)
+{
+	skb->priority = calc_tx_flits_ofld(skb);       /* save for restart */
+	spin_lock(&q->sendq.lock);
+	__skb_queue_tail(&q->sendq, skb);
+	if (q->sendq.qlen == 1)
+		service_ofldq(q);
+	spin_unlock(&q->sendq.lock);
+	return NET_XMIT_SUCCESS;
+}
+
+/**
+ *	restart_ofldq - restart a suspended offload queue
+ *	@data: the offload queue to restart
+ *
+ *	Resumes transmission on a suspended Tx offload queue.
+ */
+static void restart_ofldq(unsigned long data)
+{
+	struct sge_ofld_txq *q = (struct sge_ofld_txq *)data;
+
+	spin_lock(&q->sendq.lock);
+	q->full = 0;            /* the queue actually is completely empty now */
+	service_ofldq(q);
+	spin_unlock(&q->sendq.lock);
+}
+
+/**
+ *	skb_txq - return the Tx queue an offload packet should use
+ *	@skb: the packet
+ *
+ *	Returns the Tx queue an offload packet should use as indicated by bits
+ *	1-15 in the packet's queue_mapping.
+ */
+static inline unsigned int skb_txq(const struct sk_buff *skb)
+{
+	return skb->queue_mapping >> 1;
+}
+
+/**
+ *	is_ctrl_pkt - return whether an offload packet is a control packet
+ *	@skb: the packet
+ *
+ *	Returns whether an offload packet should use an OFLD or a CTRL
+ *	Tx queue as indicated by bit 0 in the packet's queue_mapping.
+ */
+static inline unsigned int is_ctrl_pkt(const struct sk_buff *skb)
+{
+	return skb->queue_mapping & 1;
+}
+
+static inline int ofld_send(struct adapter *adap, struct sk_buff *skb)
+{
+	unsigned int idx = skb_txq(skb);
+
+	if (unlikely(is_ctrl_pkt(skb)))
+		return ctrl_xmit(&adap->sge.ctrlq[idx], skb);
+	return ofld_xmit(&adap->sge.ofldtxq[idx], skb);
+}
+
+/**
+ *	t4_ofld_send - send an offload packet
+ *	@adap: the adapter
+ *	@skb: the packet
+ *
+ *	Sends an offload packet.  We use the packet queue_mapping to select the
+ *	appropriate Tx queue as follows: bit 0 indicates whether the packet
+ *	should be sent as regular or control, bits 1-15 select the queue.
+ */
+int t4_ofld_send(struct adapter *adap, struct sk_buff *skb)
+{
+	int ret;
+
+	local_bh_disable();
+	ret = ofld_send(adap, skb);
+	local_bh_enable();
+	return ret;
+}
+
+/**
+ *	cxgb4_ofld_send - send an offload packet
+ *	@dev: the net device
+ *	@skb: the packet
+ *
+ *	Sends an offload packet.  This is an exported version of @t4_ofld_send,
+ *	intended for ULDs.
+ */
+int cxgb4_ofld_send(struct net_device *dev, struct sk_buff *skb)
+{
+	return t4_ofld_send(netdev2adap(dev), skb);
+}
+EXPORT_SYMBOL(cxgb4_ofld_send);
+
+static inline void copy_frags(struct skb_shared_info *ssi,
+			      const struct pkt_gl *gl, unsigned int offset)
+{
+	unsigned int n;
+
+	/* usually there's just one frag */
+	ssi->frags[0].page = gl->frags[0].page;
+	ssi->frags[0].page_offset = gl->frags[0].page_offset + offset;
+	ssi->frags[0].size = gl->frags[0].size - offset;
+	ssi->nr_frags = gl->nfrags;
+	n = gl->nfrags - 1;
+	if (n)
+		memcpy(&ssi->frags[1], &gl->frags[1], n * sizeof(skb_frag_t));
+
+	/* get a reference to the last page, we don't own it */
+	get_page(gl->frags[n].page);
+}
+
+/**
+ *	cxgb4_pktgl_to_skb - build an sk_buff from a packet gather list
+ *	@gl: the gather list
+ *	@skb_len: size of sk_buff main body if it carries fragments
+ *	@pull_len: amount of data to move to the sk_buff's main body
+ *
+ *	Builds an sk_buff from the given packet gather list.  Returns the
+ *	sk_buff or %NULL if sk_buff allocation failed.
+ */
+struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
+				   unsigned int skb_len, unsigned int pull_len)
+{
+	struct sk_buff *skb;
+
+	/*
+	 * Below we rely on RX_COPY_THRES being less than the smallest Rx buffer
+	 * size, which is expected since buffers are at least PAGE_SIZEd.
+	 * In this case packets up to RX_COPY_THRES have only one fragment.
+	 */
+	if (gl->tot_len <= RX_COPY_THRES) {
+		skb = dev_alloc_skb(gl->tot_len);
+		if (unlikely(!skb))
+			goto out;
+		__skb_put(skb, gl->tot_len);
+		skb_copy_to_linear_data(skb, gl->va, gl->tot_len);
+	} else {
+		skb = dev_alloc_skb(skb_len);
+		if (unlikely(!skb))
+			goto out;
+		__skb_put(skb, pull_len);
+		skb_copy_to_linear_data(skb, gl->va, pull_len);
+
+		copy_frags(skb_shinfo(skb), gl, pull_len);
+		skb->len = gl->tot_len;
+		skb->data_len = skb->len - pull_len;
+		skb->truesize += skb->data_len;
+	}
+out:	return skb;
+}
+EXPORT_SYMBOL(cxgb4_pktgl_to_skb);
+
+/**
+ *	t4_pktgl_free - free a packet gather list
+ *	@gl: the gather list
+ *
+ *	Releases the pages of a packet gather list.  We do not own the last
+ *	page on the list and do not free it.
+ */
+void t4_pktgl_free(const struct pkt_gl *gl)
+{
+	int n;
+	const skb_frag_t *p;
+
+	for (p = gl->frags, n = gl->nfrags - 1; n--; p++)
+		put_page(p->page);
+}
+
+/*
+ * Process an MPS trace packet.  Give it an unused protocol number so it won't
+ * be delivered to anyone and send it to the stack for capture.
+ */
+static noinline int handle_trace_pkt(struct adapter *adap,
+				     const struct pkt_gl *gl)
+{
+	struct sk_buff *skb;
+	struct cpl_trace_pkt *p;
+
+	skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN);
+	if (unlikely(!skb)) {
+		t4_pktgl_free(gl);
+		return 0;
+	}
+
+	p = (struct cpl_trace_pkt *)skb->data;
+	__skb_pull(skb, sizeof(*p));
+	skb_reset_mac_header(skb);
+	skb->protocol = htons(0xffff);
+	skb->dev = adap->port[0];
+	netif_receive_skb(skb);
+	return 0;
+}
+
+static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
+		   const struct cpl_rx_pkt *pkt)
+{
+	int ret;
+	struct sk_buff *skb;
+
+	skb = napi_get_frags(&rxq->rspq.napi);
+	if (unlikely(!skb)) {
+		t4_pktgl_free(gl);
+		rxq->stats.rx_drops++;
+		return;
+	}
+
+	copy_frags(skb_shinfo(skb), gl, RX_PKT_PAD);
+	skb->len = gl->tot_len - RX_PKT_PAD;
+	skb->data_len = skb->len;
+	skb->truesize += skb->data_len;
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	skb_record_rx_queue(skb, rxq->rspq.idx);
+
+	if (unlikely(pkt->vlan_ex)) {
+		struct port_info *pi = netdev_priv(rxq->rspq.netdev);
+		struct vlan_group *grp = pi->vlan_grp;
+
+		rxq->stats.vlan_ex++;
+		if (likely(grp)) {
+			ret = vlan_gro_frags(&rxq->rspq.napi, grp,
+					     ntohs(pkt->vlan));
+			goto stats;
+		}
+	}
+	ret = napi_gro_frags(&rxq->rspq.napi);
+stats:	if (ret == GRO_HELD)
+		rxq->stats.lro_pkts++;
+	else if (ret == GRO_MERGED || ret == GRO_MERGED_FREE)
+		rxq->stats.lro_merged++;
+	rxq->stats.pkts++;
+	rxq->stats.rx_cso++;
+}
+
+/**
+ *	t4_ethrx_handler - process an ingress ethernet packet
+ *	@q: the response queue that received the packet
+ *	@rsp: the response queue descriptor holding the RX_PKT message
+ *	@si: the gather list of packet fragments
+ *
+ *	Process an ingress ethernet packet and deliver it to the stack.
+ */
+int t4_ethrx_handler(struct sge_rspq *q, const __be64 *rsp,
+		     const struct pkt_gl *si)
+{
+	bool csum_ok;
+	struct sk_buff *skb;
+	struct port_info *pi;
+	const struct cpl_rx_pkt *pkt;
+	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+
+	if (unlikely(*(u8 *)rsp == CPL_TRACE_PKT))
+		return handle_trace_pkt(q->adap, si);
+
+	pkt = (void *)&rsp[1];
+	csum_ok = pkt->csum_calc && !pkt->err_vec;
+	if ((pkt->l2info & htonl(RXF_TCP)) &&
+	    (q->netdev->features & NETIF_F_GRO) && csum_ok && !pkt->ip_frag) {
+		do_gro(rxq, si, pkt);
+		return 0;
+	}
+
+	skb = cxgb4_pktgl_to_skb(si, RX_PKT_SKB_LEN, RX_PULL_LEN);
+	if (unlikely(!skb)) {
+		t4_pktgl_free(si);
+		rxq->stats.rx_drops++;
+		return 0;
+	}
+
+	__skb_pull(skb, RX_PKT_PAD);      /* remove ethernet header padding */
+	skb->protocol = eth_type_trans(skb, q->netdev);
+	skb_record_rx_queue(skb, q->idx);
+	pi = netdev_priv(skb->dev);
+	rxq->stats.pkts++;
+
+	if (csum_ok && (pi->rx_offload & RX_CSO) &&
+	    (pkt->l2info & htonl(RXF_UDP | RXF_TCP))) {
+		if (!pkt->ip_frag)
+			skb->ip_summed = CHECKSUM_UNNECESSARY;
+		else {
+			__sum16 c = (__force __sum16)pkt->csum;
+			skb->csum = csum_unfold(c);
+			skb->ip_summed = CHECKSUM_COMPLETE;
+		}
+		rxq->stats.rx_cso++;
+	} else
+		skb->ip_summed = CHECKSUM_NONE;
+
+	if (unlikely(pkt->vlan_ex)) {
+		struct vlan_group *grp = pi->vlan_grp;
+
+		rxq->stats.vlan_ex++;
+		if (likely(grp))
+			vlan_hwaccel_receive_skb(skb, grp, ntohs(pkt->vlan));
+		else
+			dev_kfree_skb_any(skb);
+	} else
+		netif_receive_skb(skb);
+
+	return 0;
+}
+
+/**
+ *	restore_rx_bufs - put back a packet's Rx buffers
+ *	@si: the packet gather list
+ *	@q: the SGE free list
+ *	@frags: number of FL buffers to restore
+ *
+ *	Puts back on an FL the Rx buffers associated with @si.  The buffers
+ *	have already been unmapped and are left unmapped, we mark them so to
+ *	prevent further unmapping attempts.
+ *
+ *	This function undoes a series of @unmap_rx_buf calls when we find out
+ *	that the current packet can't be processed right away afterall and we
+ *	need to come back to it later.  This is a very rare event and there's
+ *	no effort to make this particularly efficient.
+ */
+static void restore_rx_bufs(const struct pkt_gl *si, struct sge_fl *q,
+			    int frags)
+{
+	struct rx_sw_desc *d;
+
+	while (frags--) {
+		if (q->cidx == 0)
+			q->cidx = q->size - 1;
+		else
+			q->cidx--;
+		d = &q->sdesc[q->cidx];
+		d->page = si->frags[frags].page;
+		d->dma_addr |= RX_UNMAPPED_BUF;
+		q->avail++;
+	}
+}
+
+/**
+ *	is_new_response - check if a response is newly written
+ *	@r: the response descriptor
+ *	@q: the response queue
+ *
+ *	Returns true if a response descriptor contains a yet unprocessed
+ *	response.
+ */
+static inline bool is_new_response(const struct rsp_ctrl *r,
+				   const struct sge_rspq *q)
+{
+	return RSPD_GEN(r->type_gen) == q->gen;
+}
+
+/**
+ *	rspq_next - advance to the next entry in a response queue
+ *	@q: the queue
+ *
+ *	Updates the state of a response queue to advance it to the next entry.
+ */
+static inline void rspq_next(struct sge_rspq *q)
+{
+	q->cur_desc = (void *)q->cur_desc + q->iqe_len;
+	if (unlikely(++q->cidx == q->size)) {
+		q->cidx = 0;
+		q->gen ^= 1;
+		q->cur_desc = q->desc;
+	}
+}
+
+/**
+ *	process_responses - process responses from an SGE response queue
+ *	@q: the ingress queue to process
+ *	@budget: how many responses can be processed in this round
+ *
+ *	Process responses from an SGE response queue up to the supplied budget.
+ *	Responses include received packets as well as control messages from FW
+ *	or HW.
+ *
+ *	Additionally choose the interrupt holdoff time for the next interrupt
+ *	on this queue.  If the system is under memory shortage use a fairly
+ *	long delay to help recovery.
+ */
+static int process_responses(struct sge_rspq *q, int budget)
+{
+	int ret, rsp_type;
+	int budget_left = budget;
+	const struct rsp_ctrl *rc;
+	struct sge_eth_rxq *rxq = container_of(q, struct sge_eth_rxq, rspq);
+
+	while (likely(budget_left)) {
+		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
+		if (!is_new_response(rc, q))
+			break;
+
+		rmb();
+		rsp_type = RSPD_TYPE(rc->type_gen);
+		if (likely(rsp_type == RSP_TYPE_FLBUF)) {
+			skb_frag_t *fp;
+			struct pkt_gl si;
+			const struct rx_sw_desc *rsd;
+			u32 len = ntohl(rc->pldbuflen_qid), bufsz, frags;
+
+			if (len & RSPD_NEWBUF) {
+				if (likely(q->offset > 0)) {
+					free_rx_bufs(q->adap, &rxq->fl, 1);
+					q->offset = 0;
+				}
+				len &= RSPD_LEN;
+			}
+			si.tot_len = len;
+
+			/* gather packet fragments */
+			for (frags = 0, fp = si.frags; ; frags++, fp++) {
+				rsd = &rxq->fl.sdesc[rxq->fl.cidx];
+				bufsz = get_buf_size(rsd);
+				fp->page = rsd->page;
+				fp->page_offset = q->offset;
+				fp->size = min(bufsz, len);
+				len -= fp->size;
+				if (!len)
+					break;
+				unmap_rx_buf(q->adap, &rxq->fl);
+			}
+
+			/*
+			 * Last buffer remains mapped so explicitly make it
+			 * coherent for CPU access.
+			 */
+			dma_sync_single_for_cpu(q->adap->pdev_dev,
+						get_buf_addr(rsd),
+						fp->size, DMA_FROM_DEVICE);
+
+			si.va = page_address(si.frags[0].page) +
+				si.frags[0].page_offset;
+			prefetch(si.va);
+
+			si.nfrags = frags + 1;
+			ret = q->handler(q, q->cur_desc, &si);
+			if (likely(ret == 0))
+				q->offset += ALIGN(fp->size, FL_ALIGN);
+			else
+				restore_rx_bufs(&si, &rxq->fl, frags);
+		} else if (likely(rsp_type == RSP_TYPE_CPL)) {
+			ret = q->handler(q, q->cur_desc, NULL);
+		} else {
+			ret = q->handler(q, (const __be64 *)rc, CXGB4_MSG_AN);
+		}
+
+		if (unlikely(ret)) {
+			/* couldn't process descriptor, back off for recovery */
+			q->next_intr_params = QINTR_TIMER_IDX(NOMEM_TMR_IDX);
+			break;
+		}
+
+		rspq_next(q);
+		budget_left--;
+	}
+
+	if (q->offset >= 0 && rxq->fl.size - rxq->fl.avail >= 16)
+		__refill_fl(q->adap, &rxq->fl);
+	return budget - budget_left;
+}
+
+/**
+ *	napi_rx_handler - the NAPI handler for Rx processing
+ *	@napi: the napi instance
+ *	@budget: how many packets we can process in this round
+ *
+ *	Handler for new data events when using NAPI.  This does not need any
+ *	locking or protection from interrupts as data interrupts are off at
+ *	this point and other adapter interrupts do not interfere (the latter
+ *	in not a concern at all with MSI-X as non-data interrupts then have
+ *	a separate handler).
+ */
+static int napi_rx_handler(struct napi_struct *napi, int budget)
+{
+	unsigned int params;
+	struct sge_rspq *q = container_of(napi, struct sge_rspq, napi);
+	int work_done = process_responses(q, budget);
+
+	if (likely(work_done < budget)) {
+		napi_complete(napi);
+		params = q->next_intr_params;
+		q->next_intr_params = q->intr_params;
+	} else
+		params = QINTR_TIMER_IDX(7);
+
+	t4_write_reg(q->adap, MYPF_REG(SGE_PF_GTS), CIDXINC(work_done) |
+		     INGRESSQID((u32)q->cntxt_id) | SEINTARM(params));
+	return work_done;
+}
+
+/*
+ * The MSI-X interrupt handler for an SGE response queue.
+ */
+irqreturn_t t4_sge_intr_msix(int irq, void *cookie)
+{
+	struct sge_rspq *q = cookie;
+
+	napi_schedule(&q->napi);
+	return IRQ_HANDLED;
+}
+
+/*
+ * Process the indirect interrupt entries in the interrupt queue and kick off
+ * NAPI for each queue that has generated an entry.
+ */
+static unsigned int process_intrq(struct adapter *adap)
+{
+	unsigned int credits;
+	const struct rsp_ctrl *rc;
+	struct sge_rspq *q = &adap->sge.intrq;
+
+	spin_lock(&adap->sge.intrq_lock);
+	for (credits = 0; ; credits++) {
+		rc = (void *)q->cur_desc + (q->iqe_len - sizeof(*rc));
+		if (!is_new_response(rc, q))
+			break;
+
+		rmb();
+		if (RSPD_TYPE(rc->type_gen) == RSP_TYPE_INTR) {
+			unsigned int qid = ntohl(rc->pldbuflen_qid);
+
+			napi_schedule(&adap->sge.ingr_map[qid]->napi);
+		}
+
+		rspq_next(q);
+	}
+
+	t4_write_reg(adap, MYPF_REG(SGE_PF_GTS), CIDXINC(credits) |
+		     INGRESSQID(q->cntxt_id) | SEINTARM(q->intr_params));
+	spin_unlock(&adap->sge.intrq_lock);
+	return credits;
+}
+
+/*
+ * The MSI interrupt handler, which handles data events from SGE response queues
+ * as well as error and other async events as they all use the same MSI vector.
+ */
+static irqreturn_t t4_intr_msi(int irq, void *cookie)
+{
+	struct adapter *adap = cookie;
+
+	t4_slow_intr_handler(adap);
+	process_intrq(adap);
+	return IRQ_HANDLED;
+}
+
+/*
+ * Interrupt handler for legacy INTx interrupts.
+ * Handles data events from SGE response queues as well as error and other
+ * async events as they all use the same interrupt line.
+ */
+static irqreturn_t t4_intr_intx(int irq, void *cookie)
+{
+	struct adapter *adap = cookie;
+
+	t4_write_reg(adap, MYPF_REG(PCIE_PF_CLI), 0);
+	if (t4_slow_intr_handler(adap) | process_intrq(adap))
+		return IRQ_HANDLED;
+	return IRQ_NONE;             /* probably shared interrupt */
+}
+
+/**
+ *	t4_intr_handler - select the top-level interrupt handler
+ *	@adap: the adapter
+ *
+ *	Selects the top-level interrupt handler based on the type of interrupts
+ *	(MSI-X, MSI, or INTx).
+ */
+irq_handler_t t4_intr_handler(struct adapter *adap)
+{
+	if (adap->flags & USING_MSIX)
+		return t4_sge_intr_msix;
+	if (adap->flags & USING_MSI)
+		return t4_intr_msi;
+	return t4_intr_intx;
+}
+
+static void sge_rx_timer_cb(unsigned long data)
+{
+	unsigned long m;
+	unsigned int i, cnt[2];
+	struct adapter *adap = (struct adapter *)data;
+	struct sge *s = &adap->sge;
+
+	for (i = 0; i < ARRAY_SIZE(s->starving_fl); i++)
+		for (m = s->starving_fl[i]; m; m &= m - 1) {
+			struct sge_eth_rxq *rxq;
+			unsigned int id = __ffs(m) + i * BITS_PER_LONG;
+			struct sge_fl *fl = s->egr_map[id];
+
+			clear_bit(id, s->starving_fl);
+			smp_mb__after_clear_bit();
+
+			if (fl_starving(fl)) {
+				rxq = container_of(fl, struct sge_eth_rxq, fl);
+				if (napi_reschedule(&rxq->rspq.napi))
+					fl->starving++;
+				else
+					set_bit(id, s->starving_fl);
+			}
+		}
+
+	t4_write_reg(adap, SGE_DEBUG_INDEX, 13);
+	cnt[0] = t4_read_reg(adap, SGE_DEBUG_DATA_HIGH);
+	cnt[1] = t4_read_reg(adap, SGE_DEBUG_DATA_LOW);
+
+	for (i = 0; i < 2; i++)
+		if (cnt[i] >= s->starve_thres) {
+			if (s->idma_state[i] || cnt[i] == 0xffffffff)
+				continue;
+			s->idma_state[i] = 1;
+			t4_write_reg(adap, SGE_DEBUG_INDEX, 11);
+			m = t4_read_reg(adap, SGE_DEBUG_DATA_LOW) >> (i * 16);
+			dev_warn(adap->pdev_dev,
+				 "SGE idma%u starvation detected for "
+				 "queue %lu\n", i, m & 0xffff);
+		} else if (s->idma_state[i])
+			s->idma_state[i] = 0;
+
+	mod_timer(&s->rx_timer, jiffies + RX_QCHECK_PERIOD);
+}
+
+static void sge_tx_timer_cb(unsigned long data)
+{
+	unsigned long m;
+	unsigned int i, budget;
+	struct adapter *adap = (struct adapter *)data;
+	struct sge *s = &adap->sge;
+
+	for (i = 0; i < ARRAY_SIZE(s->txq_maperr); i++)
+		for (m = s->txq_maperr[i]; m; m &= m - 1) {
+			unsigned long id = __ffs(m) + i * BITS_PER_LONG;
+			struct sge_ofld_txq *txq = s->egr_map[id];
+
+			clear_bit(id, s->txq_maperr);
+			tasklet_schedule(&txq->qresume_tsk);
+		}
+
+	budget = MAX_TIMER_TX_RECLAIM;
+	i = s->ethtxq_rover;
+	do {
+		struct sge_eth_txq *q = &s->ethtxq[i];
+
+		if (q->q.in_use &&
+		    time_after_eq(jiffies, q->txq->trans_start + HZ / 100) &&
+		    __netif_tx_trylock(q->txq)) {
+			int avail = reclaimable(&q->q);
+
+			if (avail) {
+				if (avail > budget)
+					avail = budget;
+
+				free_tx_desc(adap, &q->q, avail, true);
+				q->q.in_use -= avail;
+				budget -= avail;
+			}
+			__netif_tx_unlock(q->txq);
+		}
+
+		if (++i >= s->ethqsets)
+			i = 0;
+	} while (budget && i != s->ethtxq_rover);
+	s->ethtxq_rover = i;
+	mod_timer(&s->tx_timer, jiffies + (budget ? TX_QCHECK_PERIOD : 2));
+}
+
+int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq,
+		     struct net_device *dev, int intr_idx,
+		     struct sge_fl *fl, rspq_handler_t hnd)
+{
+	int ret, flsz = 0;
+	struct fw_iq_cmd c;
+	struct port_info *pi = netdev_priv(dev);
+
+	/* Size needs to be multiple of 16, including status entry. */
+	iq->size = roundup(iq->size, 16);
+
+	iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0,
+			      &iq->phys_addr, NULL, 0);
+	if (!iq->desc)
+		return -ENOMEM;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
+			    FW_CMD_WRITE | FW_CMD_EXEC |
+			    FW_IQ_CMD_PFN(0) | FW_IQ_CMD_VFN(0));
+	c.alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC | FW_IQ_CMD_IQSTART(1) |
+				 FW_LEN16(c));
+	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(FW_IQ_TYPE_FL_INT_CAP) |
+		FW_IQ_CMD_IQASYNCH(fwevtq) | FW_IQ_CMD_VIID(pi->viid) |
+		FW_IQ_CMD_IQANDST(intr_idx < 0) | FW_IQ_CMD_IQANUD(1) |
+		FW_IQ_CMD_IQANDSTINDEX(intr_idx >= 0 ? intr_idx :
+							-intr_idx - 1));
+	c.iqdroprss_to_iqesize = htons(FW_IQ_CMD_IQPCIECH(pi->tx_chan) |
+		FW_IQ_CMD_IQGTSMODE |
+		FW_IQ_CMD_IQINTCNTTHRESH(iq->pktcnt_idx) |
+		FW_IQ_CMD_IQESIZE(ilog2(iq->iqe_len) - 4));
+	c.iqsize = htons(iq->size);
+	c.iqaddr = cpu_to_be64(iq->phys_addr);
+
+	if (fl) {
+		fl->size = roundup(fl->size, 8);
+		fl->desc = alloc_ring(adap->pdev_dev, fl->size, sizeof(__be64),
+				      sizeof(struct rx_sw_desc), &fl->addr,
+				      &fl->sdesc, STAT_LEN);
+		if (!fl->desc)
+			goto fl_nomem;
+
+		flsz = fl->size / 8 + STAT_LEN / sizeof(struct tx_desc);
+		c.iqns_to_fl0congen = htonl(FW_IQ_CMD_FL0PACKEN |
+					    FW_IQ_CMD_FL0PADEN);
+		c.fl0dcaen_to_fl0cidxfthresh = htons(FW_IQ_CMD_FL0FBMIN(2) |
+				FW_IQ_CMD_FL0FBMAX(3));
+		c.fl0size = htons(flsz);
+		c.fl0addr = cpu_to_be64(fl->addr);
+	}
+
+	ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+	if (ret)
+		goto err;
+
+	netif_napi_add(dev, &iq->napi, napi_rx_handler, 64);
+	iq->cur_desc = iq->desc;
+	iq->cidx = 0;
+	iq->gen = 1;
+	iq->next_intr_params = iq->intr_params;
+	iq->cntxt_id = ntohs(c.iqid);
+	iq->abs_id = ntohs(c.physiqid);
+	iq->size--;                           /* subtract status entry */
+	iq->adap = adap;
+	iq->netdev = dev;
+	iq->handler = hnd;
+
+	/* set offset to -1 to distinguish ingress queues without FL */
+	iq->offset = fl ? 0 : -1;
+
+	adap->sge.ingr_map[iq->cntxt_id] = iq;
+
+	if (fl) {
+		fl->cntxt_id = htons(c.fl0id);
+		fl->avail = fl->pend_cred = 0;
+		fl->pidx = fl->cidx = 0;
+		fl->alloc_failed = fl->large_alloc_failed = fl->starving = 0;
+		adap->sge.egr_map[fl->cntxt_id] = fl;
+		refill_fl(adap, fl, fl_cap(fl), GFP_KERNEL);
+	}
+	return 0;
+
+fl_nomem:
+	ret = -ENOMEM;
+err:
+	if (iq->desc) {
+		dma_free_coherent(adap->pdev_dev, iq->size * iq->iqe_len,
+				  iq->desc, iq->phys_addr);
+		iq->desc = NULL;
+	}
+	if (fl && fl->desc) {
+		kfree(fl->sdesc);
+		fl->sdesc = NULL;
+		dma_free_coherent(adap->pdev_dev, flsz * sizeof(struct tx_desc),
+				  fl->desc, fl->addr);
+		fl->desc = NULL;
+	}
+	return ret;
+}
+
+static void init_txq(struct adapter *adap, struct sge_txq *q, unsigned int id)
+{
+	q->in_use = 0;
+	q->cidx = q->pidx = 0;
+	q->stops = q->restarts = 0;
+	q->stat = (void *)&q->desc[q->size];
+	q->cntxt_id = id;
+	adap->sge.egr_map[id] = q;
+}
+
+int t4_sge_alloc_eth_txq(struct adapter *adap, struct sge_eth_txq *txq,
+			 struct net_device *dev, struct netdev_queue *netdevq,
+			 unsigned int iqid)
+{
+	int ret, nentries;
+	struct fw_eq_eth_cmd c;
+	struct port_info *pi = netdev_priv(dev);
+
+	/* Add status entries */
+	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+
+	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
+			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
+			&txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+	if (!txq->q.desc)
+		return -ENOMEM;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
+			    FW_CMD_WRITE | FW_CMD_EXEC |
+			    FW_EQ_ETH_CMD_PFN(0) | FW_EQ_ETH_CMD_VFN(0));
+	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_ALLOC |
+				 FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c));
+	c.viid_pkd = htonl(FW_EQ_ETH_CMD_VIID(pi->viid));
+	c.fetchszm_to_iqid = htonl(FW_EQ_ETH_CMD_HOSTFCMODE(2) |
+				   FW_EQ_ETH_CMD_PCIECHN(pi->tx_chan) |
+				   FW_EQ_ETH_CMD_IQID(iqid));
+	c.dcaen_to_eqsize = htonl(FW_EQ_ETH_CMD_FBMIN(2) |
+				  FW_EQ_ETH_CMD_FBMAX(3) |
+				  FW_EQ_ETH_CMD_CIDXFTHRESH(5) |
+				  FW_EQ_ETH_CMD_EQSIZE(nentries));
+	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+	ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+	if (ret) {
+		kfree(txq->q.sdesc);
+		txq->q.sdesc = NULL;
+		dma_free_coherent(adap->pdev_dev,
+				  nentries * sizeof(struct tx_desc),
+				  txq->q.desc, txq->q.phys_addr);
+		txq->q.desc = NULL;
+		return ret;
+	}
+
+	init_txq(adap, &txq->q, FW_EQ_ETH_CMD_EQID_GET(ntohl(c.eqid_pkd)));
+	txq->txq = netdevq;
+	txq->tso = txq->tx_cso = txq->vlan_ins = 0;
+	txq->mapping_err = 0;
+	return 0;
+}
+
+int t4_sge_alloc_ctrl_txq(struct adapter *adap, struct sge_ctrl_txq *txq,
+			  struct net_device *dev, unsigned int iqid,
+			  unsigned int cmplqid)
+{
+	int ret, nentries;
+	struct fw_eq_ctrl_cmd c;
+	struct port_info *pi = netdev_priv(dev);
+
+	/* Add status entries */
+	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+
+	txq->q.desc = alloc_ring(adap->pdev_dev, nentries,
+				 sizeof(struct tx_desc), 0, &txq->q.phys_addr,
+				 NULL, 0);
+	if (!txq->q.desc)
+		return -ENOMEM;
+
+	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
+			    FW_CMD_WRITE | FW_CMD_EXEC |
+			    FW_EQ_CTRL_CMD_PFN(0) | FW_EQ_CTRL_CMD_VFN(0));
+	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_ALLOC |
+				 FW_EQ_CTRL_CMD_EQSTART | FW_LEN16(c));
+	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_CMPLIQID(cmplqid));
+	c.physeqid_pkd = htonl(0);
+	c.fetchszm_to_iqid = htonl(FW_EQ_CTRL_CMD_HOSTFCMODE(2) |
+				   FW_EQ_CTRL_CMD_PCIECHN(pi->tx_chan) |
+				   FW_EQ_CTRL_CMD_IQID(iqid));
+	c.dcaen_to_eqsize = htonl(FW_EQ_CTRL_CMD_FBMIN(2) |
+				  FW_EQ_CTRL_CMD_FBMAX(3) |
+				  FW_EQ_CTRL_CMD_CIDXFTHRESH(5) |
+				  FW_EQ_CTRL_CMD_EQSIZE(nentries));
+	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+	ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+	if (ret) {
+		dma_free_coherent(adap->pdev_dev,
+				  nentries * sizeof(struct tx_desc),
+				  txq->q.desc, txq->q.phys_addr);
+		txq->q.desc = NULL;
+		return ret;
+	}
+
+	init_txq(adap, &txq->q, FW_EQ_CTRL_CMD_EQID_GET(ntohl(c.cmpliqid_eqid)));
+	txq->adap = adap;
+	skb_queue_head_init(&txq->sendq);
+	tasklet_init(&txq->qresume_tsk, restart_ctrlq, (unsigned long)txq);
+	txq->full = 0;
+	return 0;
+}
+
+int t4_sge_alloc_ofld_txq(struct adapter *adap, struct sge_ofld_txq *txq,
+			  struct net_device *dev, unsigned int iqid)
+{
+	int ret, nentries;
+	struct fw_eq_ofld_cmd c;
+	struct port_info *pi = netdev_priv(dev);
+
+	/* Add status entries */
+	nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc);
+
+	txq->q.desc = alloc_ring(adap->pdev_dev, txq->q.size,
+			sizeof(struct tx_desc), sizeof(struct tx_sw_desc),
+			&txq->q.phys_addr, &txq->q.sdesc, STAT_LEN);
+	if (!txq->q.desc)
+		return -ENOMEM;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
+			    FW_CMD_WRITE | FW_CMD_EXEC |
+			    FW_EQ_OFLD_CMD_PFN(0) | FW_EQ_OFLD_CMD_VFN(0));
+	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
+				 FW_EQ_OFLD_CMD_EQSTART | FW_LEN16(c));
+	c.fetchszm_to_iqid = htonl(FW_EQ_OFLD_CMD_HOSTFCMODE(2) |
+				   FW_EQ_OFLD_CMD_PCIECHN(pi->tx_chan) |
+				   FW_EQ_OFLD_CMD_IQID(iqid));
+	c.dcaen_to_eqsize = htonl(FW_EQ_OFLD_CMD_FBMIN(2) |
+				  FW_EQ_OFLD_CMD_FBMAX(3) |
+				  FW_EQ_OFLD_CMD_CIDXFTHRESH(5) |
+				  FW_EQ_OFLD_CMD_EQSIZE(nentries));
+	c.eqaddr = cpu_to_be64(txq->q.phys_addr);
+
+	ret = t4_wr_mbox(adap, 0, &c, sizeof(c), &c);
+	if (ret) {
+		kfree(txq->q.sdesc);
+		txq->q.sdesc = NULL;
+		dma_free_coherent(adap->pdev_dev,
+				  nentries * sizeof(struct tx_desc),
+				  txq->q.desc, txq->q.phys_addr);
+		txq->q.desc = NULL;
+		return ret;
+	}
+
+	init_txq(adap, &txq->q, FW_EQ_OFLD_CMD_EQID_GET(ntohl(c.eqid_pkd)));
+	txq->adap = adap;
+	skb_queue_head_init(&txq->sendq);
+	tasklet_init(&txq->qresume_tsk, restart_ofldq, (unsigned long)txq);
+	txq->full = 0;
+	txq->mapping_err = 0;
+	return 0;
+}
+
+static void free_txq(struct adapter *adap, struct sge_txq *q)
+{
+	dma_free_coherent(adap->pdev_dev,
+			  q->size * sizeof(struct tx_desc) + STAT_LEN,
+			  q->desc, q->phys_addr);
+	q->cntxt_id = 0;
+	q->sdesc = NULL;
+	q->desc = NULL;
+}
+
+static void free_rspq_fl(struct adapter *adap, struct sge_rspq *rq,
+			 struct sge_fl *fl)
+{
+	unsigned int fl_id = fl ? fl->cntxt_id : 0xffff;
+
+	adap->sge.ingr_map[rq->cntxt_id] = NULL;
+	t4_iq_free(adap, 0, 0, 0, FW_IQ_TYPE_FL_INT_CAP, rq->cntxt_id, fl_id,
+		   0xffff);
+	dma_free_coherent(adap->pdev_dev, (rq->size + 1) * rq->iqe_len,
+			  rq->desc, rq->phys_addr);
+	netif_napi_del(&rq->napi);
+	rq->netdev = NULL;
+	rq->cntxt_id = rq->abs_id = 0;
+	rq->desc = NULL;
+
+	if (fl) {
+		free_rx_bufs(adap, fl, fl->avail);
+		dma_free_coherent(adap->pdev_dev, fl->size * 8 + STAT_LEN,
+				  fl->desc, fl->addr);
+		kfree(fl->sdesc);
+		fl->sdesc = NULL;
+		fl->cntxt_id = 0;
+		fl->desc = NULL;
+	}
+}
+
+/**
+ *	t4_free_sge_resources - free SGE resources
+ *	@adap: the adapter
+ *
+ *	Frees resources used by the SGE queue sets.
+ */
+void t4_free_sge_resources(struct adapter *adap)
+{
+	int i;
+	struct sge_eth_rxq *eq = adap->sge.ethrxq;
+	struct sge_eth_txq *etq = adap->sge.ethtxq;
+	struct sge_ofld_rxq *oq = adap->sge.ofldrxq;
+
+	/* clean up Ethernet Tx/Rx queues */
+	for (i = 0; i < adap->sge.ethqsets; i++, eq++, etq++) {
+		if (eq->rspq.desc)
+			free_rspq_fl(adap, &eq->rspq, &eq->fl);
+		if (etq->q.desc) {
+			t4_eth_eq_free(adap, 0, 0, 0, etq->q.cntxt_id);
+			free_tx_desc(adap, &etq->q, etq->q.in_use, true);
+			kfree(etq->q.sdesc);
+			free_txq(adap, &etq->q);
+		}
+	}
+
+	/* clean up RDMA and iSCSI Rx queues */
+	for (i = 0; i < adap->sge.ofldqsets; i++, oq++) {
+		if (oq->rspq.desc)
+			free_rspq_fl(adap, &oq->rspq, &oq->fl);
+	}
+	for (i = 0, oq = adap->sge.rdmarxq; i < adap->sge.rdmaqs; i++, oq++) {
+		if (oq->rspq.desc)
+			free_rspq_fl(adap, &oq->rspq, &oq->fl);
+	}
+
+	/* clean up offload Tx queues */
+	for (i = 0; i < ARRAY_SIZE(adap->sge.ofldtxq); i++) {
+		struct sge_ofld_txq *q = &adap->sge.ofldtxq[i];
+
+		if (q->q.desc) {
+			tasklet_kill(&q->qresume_tsk);
+			t4_ofld_eq_free(adap, 0, 0, 0, q->q.cntxt_id);
+			free_tx_desc(adap, &q->q, q->q.in_use, false);
+			kfree(q->q.sdesc);
+			__skb_queue_purge(&q->sendq);
+			free_txq(adap, &q->q);
+		}
+	}
+
+	/* clean up control Tx queues */
+	for (i = 0; i < ARRAY_SIZE(adap->sge.ctrlq); i++) {
+		struct sge_ctrl_txq *cq = &adap->sge.ctrlq[i];
+
+		if (cq->q.desc) {
+			tasklet_kill(&cq->qresume_tsk);
+			t4_ctrl_eq_free(adap, 0, 0, 0, cq->q.cntxt_id);
+			__skb_queue_purge(&cq->sendq);
+			free_txq(adap, &cq->q);
+		}
+	}
+
+	if (adap->sge.fw_evtq.desc)
+		free_rspq_fl(adap, &adap->sge.fw_evtq, NULL);
+
+	if (adap->sge.intrq.desc)
+		free_rspq_fl(adap, &adap->sge.intrq, NULL);
+
+	/* clear the reverse egress queue map */
+	memset(adap->sge.egr_map, 0, sizeof(adap->sge.egr_map));
+}
+
+void t4_sge_start(struct adapter *adap)
+{
+	adap->sge.ethtxq_rover = 0;
+	mod_timer(&adap->sge.rx_timer, jiffies + RX_QCHECK_PERIOD);
+	mod_timer(&adap->sge.tx_timer, jiffies + TX_QCHECK_PERIOD);
+}
+
+/**
+ *	t4_sge_stop - disable SGE operation
+ *	@adap: the adapter
+ *
+ *	Stop tasklets and timers associated with the DMA engine.  Note that
+ *	this is effective only if measures have been taken to disable any HW
+ *	events that may restart them.
+ */
+void t4_sge_stop(struct adapter *adap)
+{
+	int i;
+	struct sge *s = &adap->sge;
+
+	if (in_interrupt())  /* actions below require waiting */
+		return;
+
+	if (s->rx_timer.function)
+		del_timer_sync(&s->rx_timer);
+	if (s->tx_timer.function)
+		del_timer_sync(&s->tx_timer);
+
+	for (i = 0; i < ARRAY_SIZE(s->ofldtxq); i++) {
+		struct sge_ofld_txq *q = &s->ofldtxq[i];
+
+		if (q->q.desc)
+			tasklet_kill(&q->qresume_tsk);
+	}
+	for (i = 0; i < ARRAY_SIZE(s->ctrlq); i++) {
+		struct sge_ctrl_txq *cq = &s->ctrlq[i];
+
+		if (cq->q.desc)
+			tasklet_kill(&cq->qresume_tsk);
+	}
+}
+
+/**
+ *	t4_sge_init - initialize SGE
+ *	@adap: the adapter
+ *
+ *	Performs SGE initialization needed every time after a chip reset.
+ *	We do not initialize any of the queues here, instead the driver
+ *	top-level must request them individually.
+ */
+void t4_sge_init(struct adapter *adap)
+{
+	struct sge *s = &adap->sge;
+	unsigned int fl_align_log = ilog2(FL_ALIGN);
+
+	t4_set_reg_field(adap, SGE_CONTROL, PKTSHIFT_MASK |
+			 INGPADBOUNDARY_MASK | EGRSTATUSPAGESIZE,
+			 INGPADBOUNDARY(fl_align_log - 5) | PKTSHIFT(2) |
+			 RXPKTCPLMODE |
+			 (STAT_LEN == 128 ? EGRSTATUSPAGESIZE : 0));
+	t4_set_reg_field(adap, SGE_HOST_PAGE_SIZE, HOSTPAGESIZEPF0_MASK,
+			 HOSTPAGESIZEPF0(PAGE_SHIFT - 10));
+	t4_write_reg(adap, SGE_FL_BUFFER_SIZE0, PAGE_SIZE);
+#if FL_PG_ORDER > 0
+	t4_write_reg(adap, SGE_FL_BUFFER_SIZE1, PAGE_SIZE << FL_PG_ORDER);
+#endif
+	t4_write_reg(adap, SGE_INGRESS_RX_THRESHOLD,
+		     THRESHOLD_0(s->counter_val[0]) |
+		     THRESHOLD_1(s->counter_val[1]) |
+		     THRESHOLD_2(s->counter_val[2]) |
+		     THRESHOLD_3(s->counter_val[3]));
+	t4_write_reg(adap, SGE_TIMER_VALUE_0_AND_1,
+		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[0])) |
+		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[1])));
+	t4_write_reg(adap, SGE_TIMER_VALUE_2_AND_3,
+		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[2])) |
+		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[3])));
+	t4_write_reg(adap, SGE_TIMER_VALUE_4_AND_5,
+		     TIMERVALUE0(us_to_core_ticks(adap, s->timer_val[4])) |
+		     TIMERVALUE1(us_to_core_ticks(adap, s->timer_val[5])));
+	setup_timer(&s->rx_timer, sge_rx_timer_cb, (unsigned long)adap);
+	setup_timer(&s->tx_timer, sge_tx_timer_cb, (unsigned long)adap);
+	s->starve_thres = core_ticks_per_usec(adap) * 1000000;  /* 1 s */
+	s->idma_state[0] = s->idma_state[1] = 0;
+	spin_lock_init(&s->intrq_lock);
+}
diff --git a/drivers/net/cxgb4/t4_hw.c b/drivers/net/cxgb4/t4_hw.c
new file mode 100644
index 0000000..a814a3a
--- /dev/null
+++ b/drivers/net/cxgb4/t4_hw.c
@@ -0,0 +1,3131 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include "cxgb4.h"
+#include "t4_regs.h"
+#include "t4fw_api.h"
+
+/**
+ *	t4_wait_op_done_val - wait until an operation is completed
+ *	@adapter: the adapter performing the operation
+ *	@reg: the register to check for completion
+ *	@mask: a single-bit field within @reg that indicates completion
+ *	@polarity: the value of the field when the operation is completed
+ *	@attempts: number of check iterations
+ *	@delay: delay in usecs between iterations
+ *	@valp: where to store the value of the register at completion time
+ *
+ *	Wait until an operation is completed by checking a bit in a register
+ *	up to @attempts times.  If @valp is not NULL the value of the register
+ *	at the time it indicated completion is stored there.  Returns 0 if the
+ *	operation completes and	-EAGAIN	otherwise.
+ */
+int t4_wait_op_done_val(struct adapter *adapter, int reg, u32 mask,
+			int polarity, int attempts, int delay, u32 *valp)
+{
+	while (1) {
+		u32 val = t4_read_reg(adapter, reg);
+
+		if (!!(val & mask) == polarity) {
+			if (valp)
+				*valp = val;
+			return 0;
+		}
+		if (--attempts == 0)
+			return -EAGAIN;
+		if (delay)
+			udelay(delay);
+	}
+}
+
+static inline int t4_wait_op_done(struct adapter *adapter, int reg, u32 mask,
+				  int polarity, int attempts, int delay)
+{
+	return t4_wait_op_done_val(adapter, reg, mask, polarity, attempts,
+				   delay, NULL);
+}
+
+/**
+ *	t4_set_reg_field - set a register field to a value
+ *	@adapter: the adapter to program
+ *	@addr: the register address
+ *	@mask: specifies the portion of the register to modify
+ *	@val: the new value for the register field
+ *
+ *	Sets a register field specified by the supplied mask to the
+ *	given value.
+ */
+void t4_set_reg_field(struct adapter *adapter, unsigned int addr, u32 mask,
+		      u32 val)
+{
+	u32 v = t4_read_reg(adapter, addr) & ~mask;
+
+	t4_write_reg(adapter, addr, v | val);
+	(void) t4_read_reg(adapter, addr);      /* flush */
+}
+
+/**
+ *	t4_read_indirect - read indirectly addressed registers
+ *	@adap: the adapter
+ *	@addr_reg: register holding the indirect address
+ *	@data_reg: register holding the value of the indirect register
+ *	@vals: where the read register values are stored
+ *	@nregs: how many indirect registers to read
+ *	@start_idx: index of first indirect register to read
+ *
+ *	Reads registers that are accessed indirectly through an address/data
+ *	register pair.
+ */
+void t4_read_indirect(struct adapter *adap, unsigned int addr_reg,
+		      unsigned int data_reg, u32 *vals, unsigned int nregs,
+		      unsigned int start_idx)
+{
+	while (nregs--) {
+		t4_write_reg(adap, addr_reg, start_idx);
+		*vals++ = t4_read_reg(adap, data_reg);
+		start_idx++;
+	}
+}
+
+/**
+ *	t4_write_indirect - write indirectly addressed registers
+ *	@adap: the adapter
+ *	@addr_reg: register holding the indirect addresses
+ *	@data_reg: register holding the value for the indirect registers
+ *	@vals: values to write
+ *	@nregs: how many indirect registers to write
+ *	@start_idx: address of first indirect register to write
+ *
+ *	Writes a sequential block of registers that are accessed indirectly
+ *	through an address/data register pair.
+ */
+void t4_write_indirect(struct adapter *adap, unsigned int addr_reg,
+		       unsigned int data_reg, const u32 *vals,
+		       unsigned int nregs, unsigned int start_idx)
+{
+	while (nregs--) {
+		t4_write_reg(adap, addr_reg, start_idx++);
+		t4_write_reg(adap, data_reg, *vals++);
+	}
+}
+
+/*
+ * Get the reply to a mailbox command and store it in @rpl in big-endian order.
+ */
+static void get_mbox_rpl(struct adapter *adap, __be64 *rpl, int nflit,
+			 u32 mbox_addr)
+{
+	for ( ; nflit; nflit--, mbox_addr += 8)
+		*rpl++ = cpu_to_be64(t4_read_reg64(adap, mbox_addr));
+}
+
+/*
+ * Handle a FW assertion reported in a mailbox.
+ */
+static void fw_asrt(struct adapter *adap, u32 mbox_addr)
+{
+	struct fw_debug_cmd asrt;
+
+	get_mbox_rpl(adap, (__be64 *)&asrt, sizeof(asrt) / 8, mbox_addr);
+	dev_alert(adap->pdev_dev,
+		  "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
+		  asrt.u.assert.filename_0_7, ntohl(asrt.u.assert.line),
+		  ntohl(asrt.u.assert.x), ntohl(asrt.u.assert.y));
+}
+
+static void dump_mbox(struct adapter *adap, int mbox, u32 data_reg)
+{
+	dev_err(adap->pdev_dev,
+		"mbox %d: %llx %llx %llx %llx %llx %llx %llx %llx\n", mbox,
+		(unsigned long long)t4_read_reg64(adap, data_reg),
+		(unsigned long long)t4_read_reg64(adap, data_reg + 8),
+		(unsigned long long)t4_read_reg64(adap, data_reg + 16),
+		(unsigned long long)t4_read_reg64(adap, data_reg + 24),
+		(unsigned long long)t4_read_reg64(adap, data_reg + 32),
+		(unsigned long long)t4_read_reg64(adap, data_reg + 40),
+		(unsigned long long)t4_read_reg64(adap, data_reg + 48),
+		(unsigned long long)t4_read_reg64(adap, data_reg + 56));
+}
+
+/**
+ *	t4_wr_mbox_meat - send a command to FW through the given mailbox
+ *	@adap: the adapter
+ *	@mbox: index of the mailbox to use
+ *	@cmd: the command to write
+ *	@size: command length in bytes
+ *	@rpl: where to optionally store the reply
+ *	@sleep_ok: if true we may sleep while awaiting command completion
+ *
+ *	Sends the given command to FW through the selected mailbox and waits
+ *	for the FW to execute the command.  If @rpl is not %NULL it is used to
+ *	store the FW's reply to the command.  The command and its optional
+ *	reply are of the same length.  FW can take up to %FW_CMD_MAX_TIMEOUT ms
+ *	to respond.  @sleep_ok determines whether we may sleep while awaiting
+ *	the response.  If sleeping is allowed we use progressive backoff
+ *	otherwise we spin.
+ *
+ *	The return value is 0 on success or a negative errno on failure.  A
+ *	failure can happen either because we are not able to execute the
+ *	command or FW executes it but signals an error.  In the latter case
+ *	the return value is the error code indicated by FW (negated).
+ */
+int t4_wr_mbox_meat(struct adapter *adap, int mbox, const void *cmd, int size,
+		    void *rpl, bool sleep_ok)
+{
+	static int delay[] = {
+		1, 1, 3, 5, 10, 10, 20, 50, 100, 200
+	};
+
+	u32 v;
+	u64 res;
+	int i, ms, delay_idx;
+	const __be64 *p = cmd;
+	u32 data_reg = PF_REG(mbox, CIM_PF_MAILBOX_DATA);
+	u32 ctl_reg = PF_REG(mbox, CIM_PF_MAILBOX_CTRL);
+
+	if ((size & 15) || size > MBOX_LEN)
+		return -EINVAL;
+
+	v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
+	for (i = 0; v == MBOX_OWNER_NONE && i < 3; i++)
+		v = MBOWNER_GET(t4_read_reg(adap, ctl_reg));
+
+	if (v != MBOX_OWNER_DRV)
+		return v ? -EBUSY : -ETIMEDOUT;
+
+	for (i = 0; i < size; i += 8)
+		t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p++));
+
+	t4_write_reg(adap, ctl_reg, MBMSGVALID | MBOWNER(MBOX_OWNER_FW));
+	t4_read_reg(adap, ctl_reg);          /* flush write */
+
+	delay_idx = 0;
+	ms = delay[0];
+
+	for (i = 0; i < FW_CMD_MAX_TIMEOUT; i += ms) {
+		if (sleep_ok) {
+			ms = delay[delay_idx];  /* last element may repeat */
+			if (delay_idx < ARRAY_SIZE(delay) - 1)
+				delay_idx++;
+			msleep(ms);
+		} else
+			mdelay(ms);
+
+		v = t4_read_reg(adap, ctl_reg);
+		if (MBOWNER_GET(v) == MBOX_OWNER_DRV) {
+			if (!(v & MBMSGVALID)) {
+				t4_write_reg(adap, ctl_reg, 0);
+				continue;
+			}
+
+			res = t4_read_reg64(adap, data_reg);
+			if (FW_CMD_OP_GET(res >> 32) == FW_DEBUG_CMD) {
+				fw_asrt(adap, data_reg);
+				res = FW_CMD_RETVAL(EIO);
+			} else if (rpl)
+				get_mbox_rpl(adap, rpl, size / 8, data_reg);
+
+			if (FW_CMD_RETVAL_GET((int)res))
+				dump_mbox(adap, mbox, data_reg);
+			t4_write_reg(adap, ctl_reg, 0);
+			return -FW_CMD_RETVAL_GET((int)res);
+		}
+	}
+
+	dump_mbox(adap, mbox, data_reg);
+	dev_err(adap->pdev_dev, "command %#x in mailbox %d timed out\n",
+		*(const u8 *)cmd, mbox);
+	return -ETIMEDOUT;
+}
+
+/**
+ *	t4_mc_read - read from MC through backdoor accesses
+ *	@adap: the adapter
+ *	@addr: address of first byte requested
+ *	@data: 64 bytes of data containing the requested address
+ *	@ecc: where to store the corresponding 64-bit ECC word
+ *
+ *	Read 64 bytes of data from MC starting at a 64-byte-aligned address
+ *	that covers the requested address @addr.  If @parity is not %NULL it
+ *	is assigned the 64-bit ECC word for the read data.
+ */
+int t4_mc_read(struct adapter *adap, u32 addr, __be32 *data, u64 *ecc)
+{
+	int i;
+
+	if (t4_read_reg(adap, MC_BIST_CMD) & START_BIST)
+		return -EBUSY;
+	t4_write_reg(adap, MC_BIST_CMD_ADDR, addr & ~0x3fU);
+	t4_write_reg(adap, MC_BIST_CMD_LEN, 64);
+	t4_write_reg(adap, MC_BIST_DATA_PATTERN, 0xc);
+	t4_write_reg(adap, MC_BIST_CMD, BIST_OPCODE(1) | START_BIST |
+		     BIST_CMD_GAP(1));
+	i = t4_wait_op_done(adap, MC_BIST_CMD, START_BIST, 0, 10, 1);
+	if (i)
+		return i;
+
+#define MC_DATA(i) MC_BIST_STATUS_REG(MC_BIST_STATUS_RDATA, i)
+
+	for (i = 15; i >= 0; i--)
+		*data++ = htonl(t4_read_reg(adap, MC_DATA(i)));
+	if (ecc)
+		*ecc = t4_read_reg64(adap, MC_DATA(16));
+#undef MC_DATA
+	return 0;
+}
+
+/**
+ *	t4_edc_read - read from EDC through backdoor accesses
+ *	@adap: the adapter
+ *	@idx: which EDC to access
+ *	@addr: address of first byte requested
+ *	@data: 64 bytes of data containing the requested address
+ *	@ecc: where to store the corresponding 64-bit ECC word
+ *
+ *	Read 64 bytes of data from EDC starting at a 64-byte-aligned address
+ *	that covers the requested address @addr.  If @parity is not %NULL it
+ *	is assigned the 64-bit ECC word for the read data.
+ */
+int t4_edc_read(struct adapter *adap, int idx, u32 addr, __be32 *data, u64 *ecc)
+{
+	int i;
+
+	idx *= EDC_STRIDE;
+	if (t4_read_reg(adap, EDC_BIST_CMD + idx) & START_BIST)
+		return -EBUSY;
+	t4_write_reg(adap, EDC_BIST_CMD_ADDR + idx, addr & ~0x3fU);
+	t4_write_reg(adap, EDC_BIST_CMD_LEN + idx, 64);
+	t4_write_reg(adap, EDC_BIST_DATA_PATTERN + idx, 0xc);
+	t4_write_reg(adap, EDC_BIST_CMD + idx,
+		     BIST_OPCODE(1) | BIST_CMD_GAP(1) | START_BIST);
+	i = t4_wait_op_done(adap, EDC_BIST_CMD + idx, START_BIST, 0, 10, 1);
+	if (i)
+		return i;
+
+#define EDC_DATA(i) (EDC_BIST_STATUS_REG(EDC_BIST_STATUS_RDATA, i) + idx)
+
+	for (i = 15; i >= 0; i--)
+		*data++ = htonl(t4_read_reg(adap, EDC_DATA(i)));
+	if (ecc)
+		*ecc = t4_read_reg64(adap, EDC_DATA(16));
+#undef EDC_DATA
+	return 0;
+}
+
+#define VPD_ENTRY(name, len) \
+	u8 name##_kword[2]; u8 name##_len; u8 name##_data[len]
+
+/*
+ * Partial EEPROM Vital Product Data structure.  Includes only the ID and
+ * VPD-R sections.
+ */
+struct t4_vpd {
+	u8  id_tag;
+	u8  id_len[2];
+	u8  id_data[ID_LEN];
+	u8  vpdr_tag;
+	u8  vpdr_len[2];
+	VPD_ENTRY(pn, 16);                     /* part number */
+	VPD_ENTRY(ec, EC_LEN);                 /* EC level */
+	VPD_ENTRY(sn, SERNUM_LEN);             /* serial number */
+	VPD_ENTRY(na, 12);                     /* MAC address base */
+	VPD_ENTRY(port_type, 8);               /* port types */
+	VPD_ENTRY(gpio, 14);                   /* GPIO usage */
+	VPD_ENTRY(cclk, 6);                    /* core clock */
+	VPD_ENTRY(port_addr, 8);               /* port MDIO addresses */
+	VPD_ENTRY(rv, 1);                      /* csum */
+	u32 pad;                  /* for multiple-of-4 sizing and alignment */
+};
+
+#define EEPROM_STAT_ADDR   0x7bfc
+#define VPD_BASE           0
+
+/**
+ *	t4_seeprom_wp - enable/disable EEPROM write protection
+ *	@adapter: the adapter
+ *	@enable: whether to enable or disable write protection
+ *
+ *	Enables or disables write protection on the serial EEPROM.
+ */
+int t4_seeprom_wp(struct adapter *adapter, bool enable)
+{
+	unsigned int v = enable ? 0xc : 0;
+	int ret = pci_write_vpd(adapter->pdev, EEPROM_STAT_ADDR, 4, &v);
+	return ret < 0 ? ret : 0;
+}
+
+/**
+ *	get_vpd_params - read VPD parameters from VPD EEPROM
+ *	@adapter: adapter to read
+ *	@p: where to store the parameters
+ *
+ *	Reads card parameters stored in VPD EEPROM.
+ */
+static int get_vpd_params(struct adapter *adapter, struct vpd_params *p)
+{
+	int ret;
+	struct t4_vpd vpd;
+	u8 *q = (u8 *)&vpd, csum;
+
+	ret = pci_read_vpd(adapter->pdev, VPD_BASE, sizeof(vpd), &vpd);
+	if (ret < 0)
+		return ret;
+
+	for (csum = 0; q <= vpd.rv_data; q++)
+		csum += *q;
+
+	if (csum) {
+		dev_err(adapter->pdev_dev,
+			"corrupted VPD EEPROM, actual csum %u\n", csum);
+		return -EINVAL;
+	}
+
+	p->cclk = simple_strtoul(vpd.cclk_data, NULL, 10);
+	memcpy(p->id, vpd.id_data, sizeof(vpd.id_data));
+	strim(p->id);
+	memcpy(p->ec, vpd.ec_data, sizeof(vpd.ec_data));
+	strim(p->ec);
+	memcpy(p->sn, vpd.sn_data, sizeof(vpd.sn_data));
+	strim(p->sn);
+	return 0;
+}
+
+/* serial flash and firmware constants */
+enum {
+	SF_ATTEMPTS = 10,             /* max retries for SF operations */
+
+	/* flash command opcodes */
+	SF_PROG_PAGE    = 2,          /* program page */
+	SF_WR_DISABLE   = 4,          /* disable writes */
+	SF_RD_STATUS    = 5,          /* read status register */
+	SF_WR_ENABLE    = 6,          /* enable writes */
+	SF_RD_DATA_FAST = 0xb,        /* read flash */
+	SF_ERASE_SECTOR = 0xd8,       /* erase sector */
+
+	FW_START_SEC = 8,             /* first flash sector for FW */
+	FW_END_SEC = 15,              /* last flash sector for FW */
+	FW_IMG_START = FW_START_SEC * SF_SEC_SIZE,
+	FW_MAX_SIZE = (FW_END_SEC - FW_START_SEC + 1) * SF_SEC_SIZE,
+};
+
+/**
+ *	sf1_read - read data from the serial flash
+ *	@adapter: the adapter
+ *	@byte_cnt: number of bytes to read
+ *	@cont: whether another operation will be chained
+ *	@lock: whether to lock SF for PL access only
+ *	@valp: where to store the read data
+ *
+ *	Reads up to 4 bytes of data from the serial flash.  The location of
+ *	the read needs to be specified prior to calling this by issuing the
+ *	appropriate commands to the serial flash.
+ */
+static int sf1_read(struct adapter *adapter, unsigned int byte_cnt, int cont,
+		    int lock, u32 *valp)
+{
+	int ret;
+
+	if (!byte_cnt || byte_cnt > 4)
+		return -EINVAL;
+	if (t4_read_reg(adapter, SF_OP) & BUSY)
+		return -EBUSY;
+	cont = cont ? SF_CONT : 0;
+	lock = lock ? SF_LOCK : 0;
+	t4_write_reg(adapter, SF_OP, lock | cont | BYTECNT(byte_cnt - 1));
+	ret = t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
+	if (!ret)
+		*valp = t4_read_reg(adapter, SF_DATA);
+	return ret;
+}
+
+/**
+ *	sf1_write - write data to the serial flash
+ *	@adapter: the adapter
+ *	@byte_cnt: number of bytes to write
+ *	@cont: whether another operation will be chained
+ *	@lock: whether to lock SF for PL access only
+ *	@val: value to write
+ *
+ *	Writes up to 4 bytes of data to the serial flash.  The location of
+ *	the write needs to be specified prior to calling this by issuing the
+ *	appropriate commands to the serial flash.
+ */
+static int sf1_write(struct adapter *adapter, unsigned int byte_cnt, int cont,
+		     int lock, u32 val)
+{
+	if (!byte_cnt || byte_cnt > 4)
+		return -EINVAL;
+	if (t4_read_reg(adapter, SF_OP) & BUSY)
+		return -EBUSY;
+	cont = cont ? SF_CONT : 0;
+	lock = lock ? SF_LOCK : 0;
+	t4_write_reg(adapter, SF_DATA, val);
+	t4_write_reg(adapter, SF_OP, lock |
+		     cont | BYTECNT(byte_cnt - 1) | OP_WR);
+	return t4_wait_op_done(adapter, SF_OP, BUSY, 0, SF_ATTEMPTS, 5);
+}
+
+/**
+ *	flash_wait_op - wait for a flash operation to complete
+ *	@adapter: the adapter
+ *	@attempts: max number of polls of the status register
+ *	@delay: delay between polls in ms
+ *
+ *	Wait for a flash operation to complete by polling the status register.
+ */
+static int flash_wait_op(struct adapter *adapter, int attempts, int delay)
+{
+	int ret;
+	u32 status;
+
+	while (1) {
+		if ((ret = sf1_write(adapter, 1, 1, 1, SF_RD_STATUS)) != 0 ||
+		    (ret = sf1_read(adapter, 1, 0, 1, &status)) != 0)
+			return ret;
+		if (!(status & 1))
+			return 0;
+		if (--attempts == 0)
+			return -EAGAIN;
+		if (delay)
+			msleep(delay);
+	}
+}
+
+/**
+ *	t4_read_flash - read words from serial flash
+ *	@adapter: the adapter
+ *	@addr: the start address for the read
+ *	@nwords: how many 32-bit words to read
+ *	@data: where to store the read data
+ *	@byte_oriented: whether to store data as bytes or as words
+ *
+ *	Read the specified number of 32-bit words from the serial flash.
+ *	If @byte_oriented is set the read data is stored as a byte array
+ *	(i.e., big-endian), otherwise as 32-bit words in the platform's
+ *	natural endianess.
+ */
+int t4_read_flash(struct adapter *adapter, unsigned int addr,
+		  unsigned int nwords, u32 *data, int byte_oriented)
+{
+	int ret;
+
+	if (addr + nwords * sizeof(u32) > SF_SIZE || (addr & 3))
+		return -EINVAL;
+
+	addr = swab32(addr) | SF_RD_DATA_FAST;
+
+	if ((ret = sf1_write(adapter, 4, 1, 0, addr)) != 0 ||
+	    (ret = sf1_read(adapter, 1, 1, 0, data)) != 0)
+		return ret;
+
+	for ( ; nwords; nwords--, data++) {
+		ret = sf1_read(adapter, 4, nwords > 1, nwords == 1, data);
+		if (nwords == 1)
+			t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
+		if (ret)
+			return ret;
+		if (byte_oriented)
+			*data = htonl(*data);
+	}
+	return 0;
+}
+
+/**
+ *	t4_write_flash - write up to a page of data to the serial flash
+ *	@adapter: the adapter
+ *	@addr: the start address to write
+ *	@n: length of data to write in bytes
+ *	@data: the data to write
+ *
+ *	Writes up to a page of data (256 bytes) to the serial flash starting
+ *	at the given address.  All the data must be written to the same page.
+ */
+static int t4_write_flash(struct adapter *adapter, unsigned int addr,
+			  unsigned int n, const u8 *data)
+{
+	int ret;
+	u32 buf[64];
+	unsigned int i, c, left, val, offset = addr & 0xff;
+
+	if (addr >= SF_SIZE || offset + n > SF_PAGE_SIZE)
+		return -EINVAL;
+
+	val = swab32(addr) | SF_PROG_PAGE;
+
+	if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
+	    (ret = sf1_write(adapter, 4, 1, 1, val)) != 0)
+		goto unlock;
+
+	for (left = n; left; left -= c) {
+		c = min(left, 4U);
+		for (val = 0, i = 0; i < c; ++i)
+			val = (val << 8) + *data++;
+
+		ret = sf1_write(adapter, c, c != left, 1, val);
+		if (ret)
+			goto unlock;
+	}
+	ret = flash_wait_op(adapter, 5, 1);
+	if (ret)
+		goto unlock;
+
+	t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
+
+	/* Read the page to verify the write succeeded */
+	ret = t4_read_flash(adapter, addr & ~0xff, ARRAY_SIZE(buf), buf, 1);
+	if (ret)
+		return ret;
+
+	if (memcmp(data - n, (u8 *)buf + offset, n)) {
+		dev_err(adapter->pdev_dev,
+			"failed to correctly write the flash page at %#x\n",
+			addr);
+		return -EIO;
+	}
+	return 0;
+
+unlock:
+	t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
+	return ret;
+}
+
+/**
+ *	get_fw_version - read the firmware version
+ *	@adapter: the adapter
+ *	@vers: where to place the version
+ *
+ *	Reads the FW version from flash.
+ */
+static int get_fw_version(struct adapter *adapter, u32 *vers)
+{
+	return t4_read_flash(adapter,
+			     FW_IMG_START + offsetof(struct fw_hdr, fw_ver), 1,
+			     vers, 0);
+}
+
+/**
+ *	get_tp_version - read the TP microcode version
+ *	@adapter: the adapter
+ *	@vers: where to place the version
+ *
+ *	Reads the TP microcode version from flash.
+ */
+static int get_tp_version(struct adapter *adapter, u32 *vers)
+{
+	return t4_read_flash(adapter, FW_IMG_START + offsetof(struct fw_hdr,
+							      tp_microcode_ver),
+			     1, vers, 0);
+}
+
+/**
+ *	t4_check_fw_version - check if the FW is compatible with this driver
+ *	@adapter: the adapter
+ *
+ *	Checks if an adapter's FW is compatible with the driver.  Returns 0
+ *	if there's exact match, a negative error if the version could not be
+ *	read or there's a major version mismatch, and a positive value if the
+ *	expected major version is found but there's a minor version mismatch.
+ */
+int t4_check_fw_version(struct adapter *adapter)
+{
+	u32 api_vers[2];
+	int ret, major, minor, micro;
+
+	ret = get_fw_version(adapter, &adapter->params.fw_vers);
+	if (!ret)
+		ret = get_tp_version(adapter, &adapter->params.tp_vers);
+	if (!ret)
+		ret = t4_read_flash(adapter,
+			FW_IMG_START + offsetof(struct fw_hdr, intfver_nic),
+			2, api_vers, 1);
+	if (ret)
+		return ret;
+
+	major = FW_HDR_FW_VER_MAJOR_GET(adapter->params.fw_vers);
+	minor = FW_HDR_FW_VER_MINOR_GET(adapter->params.fw_vers);
+	micro = FW_HDR_FW_VER_MICRO_GET(adapter->params.fw_vers);
+	memcpy(adapter->params.api_vers, api_vers,
+	       sizeof(adapter->params.api_vers));
+
+	if (major != FW_VERSION_MAJOR) {            /* major mismatch - fail */
+		dev_err(adapter->pdev_dev,
+			"card FW has major version %u, driver wants %u\n",
+			major, FW_VERSION_MAJOR);
+		return -EINVAL;
+	}
+
+	if (minor == FW_VERSION_MINOR && micro == FW_VERSION_MICRO)
+		return 0;                                   /* perfect match */
+
+	/* Minor/micro version mismatch.  Report it but often it's OK. */
+	return 1;
+}
+
+/**
+ *	t4_flash_erase_sectors - erase a range of flash sectors
+ *	@adapter: the adapter
+ *	@start: the first sector to erase
+ *	@end: the last sector to erase
+ *
+ *	Erases the sectors in the given inclusive range.
+ */
+static int t4_flash_erase_sectors(struct adapter *adapter, int start, int end)
+{
+	int ret = 0;
+
+	while (start <= end) {
+		if ((ret = sf1_write(adapter, 1, 0, 1, SF_WR_ENABLE)) != 0 ||
+		    (ret = sf1_write(adapter, 4, 0, 1,
+				     SF_ERASE_SECTOR | (start << 8))) != 0 ||
+		    (ret = flash_wait_op(adapter, 5, 500)) != 0) {
+			dev_err(adapter->pdev_dev,
+				"erase of flash sector %d failed, error %d\n",
+				start, ret);
+			break;
+		}
+		start++;
+	}
+	t4_write_reg(adapter, SF_OP, 0);    /* unlock SF */
+	return ret;
+}
+
+/**
+ *	t4_load_fw - download firmware
+ *	@adap: the adapter
+ *	@fw_data: the firmware image to write
+ *	@size: image size
+ *
+ *	Write the supplied firmware image to the card's serial flash.
+ */
+int t4_load_fw(struct adapter *adap, const u8 *fw_data, unsigned int size)
+{
+	u32 csum;
+	int ret, addr;
+	unsigned int i;
+	u8 first_page[SF_PAGE_SIZE];
+	const u32 *p = (const u32 *)fw_data;
+	const struct fw_hdr *hdr = (const struct fw_hdr *)fw_data;
+
+	if (!size) {
+		dev_err(adap->pdev_dev, "FW image has no data\n");
+		return -EINVAL;
+	}
+	if (size & 511) {
+		dev_err(adap->pdev_dev,
+			"FW image size not multiple of 512 bytes\n");
+		return -EINVAL;
+	}
+	if (ntohs(hdr->len512) * 512 != size) {
+		dev_err(adap->pdev_dev,
+			"FW image size differs from size in FW header\n");
+		return -EINVAL;
+	}
+	if (size > FW_MAX_SIZE) {
+		dev_err(adap->pdev_dev, "FW image too large, max is %u bytes\n",
+			FW_MAX_SIZE);
+		return -EFBIG;
+	}
+
+	for (csum = 0, i = 0; i < size / sizeof(csum); i++)
+		csum += ntohl(p[i]);
+
+	if (csum != 0xffffffff) {
+		dev_err(adap->pdev_dev,
+			"corrupted firmware image, checksum %#x\n", csum);
+		return -EINVAL;
+	}
+
+	i = DIV_ROUND_UP(size, SF_SEC_SIZE);        /* # of sectors spanned */
+	ret = t4_flash_erase_sectors(adap, FW_START_SEC, FW_START_SEC + i - 1);
+	if (ret)
+		goto out;
+
+	/*
+	 * We write the correct version at the end so the driver can see a bad
+	 * version if the FW write fails.  Start by writing a copy of the
+	 * first page with a bad version.
+	 */
+	memcpy(first_page, fw_data, SF_PAGE_SIZE);
+	((struct fw_hdr *)first_page)->fw_ver = htonl(0xffffffff);
+	ret = t4_write_flash(adap, FW_IMG_START, SF_PAGE_SIZE, first_page);
+	if (ret)
+		goto out;
+
+	addr = FW_IMG_START;
+	for (size -= SF_PAGE_SIZE; size; size -= SF_PAGE_SIZE) {
+		addr += SF_PAGE_SIZE;
+		fw_data += SF_PAGE_SIZE;
+		ret = t4_write_flash(adap, addr, SF_PAGE_SIZE, fw_data);
+		if (ret)
+			goto out;
+	}
+
+	ret = t4_write_flash(adap,
+			     FW_IMG_START + offsetof(struct fw_hdr, fw_ver),
+			     sizeof(hdr->fw_ver), (const u8 *)&hdr->fw_ver);
+out:
+	if (ret)
+		dev_err(adap->pdev_dev, "firmware download failed, error %d\n",
+			ret);
+	return ret;
+}
+
+#define ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
+		     FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
+
+/**
+ *	t4_link_start - apply link configuration to MAC/PHY
+ *	@phy: the PHY to setup
+ *	@mac: the MAC to setup
+ *	@lc: the requested link configuration
+ *
+ *	Set up a port's MAC and PHY according to a desired link configuration.
+ *	- If the PHY can auto-negotiate first decide what to advertise, then
+ *	  enable/disable auto-negotiation as desired, and reset.
+ *	- If the PHY does not auto-negotiate just reset it.
+ *	- If auto-negotiation is off set the MAC to the proper speed/duplex/FC,
+ *	  otherwise do it later based on the outcome of auto-negotiation.
+ */
+int t4_link_start(struct adapter *adap, unsigned int mbox, unsigned int port,
+		  struct link_config *lc)
+{
+	struct fw_port_cmd c;
+	unsigned int fc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
+
+	lc->link_ok = 0;
+	if (lc->requested_fc & PAUSE_RX)
+		fc |= FW_PORT_CAP_FC_RX;
+	if (lc->requested_fc & PAUSE_TX)
+		fc |= FW_PORT_CAP_FC_TX;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
+			       FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
+	c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+				  FW_LEN16(c));
+
+	if (!(lc->supported & FW_PORT_CAP_ANEG)) {
+		c.u.l1cfg.rcap = htonl((lc->supported & ADVERT_MASK) | fc);
+		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+	} else if (lc->autoneg == AUTONEG_DISABLE) {
+		c.u.l1cfg.rcap = htonl(lc->requested_speed | fc | mdi);
+		lc->fc = lc->requested_fc & (PAUSE_RX | PAUSE_TX);
+	} else
+		c.u.l1cfg.rcap = htonl(lc->advertising | fc | mdi);
+
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_restart_aneg - restart autonegotiation
+ *	@adap: the adapter
+ *	@mbox: mbox to use for the FW command
+ *	@port: the port id
+ *
+ *	Restarts autonegotiation for the selected port.
+ */
+int t4_restart_aneg(struct adapter *adap, unsigned int mbox, unsigned int port)
+{
+	struct fw_port_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) | FW_CMD_REQUEST |
+			       FW_CMD_EXEC | FW_PORT_CMD_PORTID(port));
+	c.action_to_len16 = htonl(FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
+				  FW_LEN16(c));
+	c.u.l1cfg.rcap = htonl(FW_PORT_CAP_ANEG);
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_set_vlan_accel - configure HW VLAN extraction
+ *	@adap: the adapter
+ *	@ports: bitmap of adapter ports to operate on
+ *	@on: enable (1) or disable (0) HW VLAN extraction
+ *
+ *	Enables or disables HW extraction of VLAN tags for the ports specified
+ *	by @ports.  @ports is a bitmap with the ith bit designating the port
+ *	associated with the ith adapter channel.
+ */
+void t4_set_vlan_accel(struct adapter *adap, unsigned int ports, int on)
+{
+	ports <<= VLANEXTENABLE_SHIFT;
+	t4_set_reg_field(adap, TP_OUT_CONFIG, ports, on ? ports : 0);
+}
+
+struct intr_info {
+	unsigned int mask;       /* bits to check in interrupt status */
+	const char *msg;         /* message to print or NULL */
+	short stat_idx;          /* stat counter to increment or -1 */
+	unsigned short fatal;    /* whether the condition reported is fatal */
+};
+
+/**
+ *	t4_handle_intr_status - table driven interrupt handler
+ *	@adapter: the adapter that generated the interrupt
+ *	@reg: the interrupt status register to process
+ *	@acts: table of interrupt actions
+ *
+ *	A table driven interrupt handler that applies a set of masks to an
+ *	interrupt status word and performs the corresponding actions if the
+ *	interrupts described by the mask have occured.  The actions include
+ *	optionally emitting a warning or alert message.  The table is terminated
+ *	by an entry specifying mask 0.  Returns the number of fatal interrupt
+ *	conditions.
+ */
+static int t4_handle_intr_status(struct adapter *adapter, unsigned int reg,
+				 const struct intr_info *acts)
+{
+	int fatal = 0;
+	unsigned int mask = 0;
+	unsigned int status = t4_read_reg(adapter, reg);
+
+	for ( ; acts->mask; ++acts) {
+		if (!(status & acts->mask))
+			continue;
+		if (acts->fatal) {
+			fatal++;
+			dev_alert(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
+				  status & acts->mask);
+		} else if (acts->msg && printk_ratelimit())
+			dev_warn(adapter->pdev_dev, "%s (0x%x)\n", acts->msg,
+				 status & acts->mask);
+		mask |= acts->mask;
+	}
+	status &= mask;
+	if (status)                           /* clear processed interrupts */
+		t4_write_reg(adapter, reg, status);
+	return fatal;
+}
+
+/*
+ * Interrupt handler for the PCIE module.
+ */
+static void pcie_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info sysbus_intr_info[] = {
+		{ RNPP, "RXNP array parity error", -1, 1 },
+		{ RPCP, "RXPC array parity error", -1, 1 },
+		{ RCIP, "RXCIF array parity error", -1, 1 },
+		{ RCCP, "Rx completions control array parity error", -1, 1 },
+		{ RFTP, "RXFT array parity error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info pcie_port_intr_info[] = {
+		{ TPCP, "TXPC array parity error", -1, 1 },
+		{ TNPP, "TXNP array parity error", -1, 1 },
+		{ TFTP, "TXFT array parity error", -1, 1 },
+		{ TCAP, "TXCA array parity error", -1, 1 },
+		{ TCIP, "TXCIF array parity error", -1, 1 },
+		{ RCAP, "RXCA array parity error", -1, 1 },
+		{ OTDD, "outbound request TLP discarded", -1, 1 },
+		{ RDPE, "Rx data parity error", -1, 1 },
+		{ TDUE, "Tx uncorrectable data error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info pcie_intr_info[] = {
+		{ MSIADDRLPERR, "MSI AddrL parity error", -1, 1 },
+		{ MSIADDRHPERR, "MSI AddrH parity error", -1, 1 },
+		{ MSIDATAPERR, "MSI data parity error", -1, 1 },
+		{ MSIXADDRLPERR, "MSI-X AddrL parity error", -1, 1 },
+		{ MSIXADDRHPERR, "MSI-X AddrH parity error", -1, 1 },
+		{ MSIXDATAPERR, "MSI-X data parity error", -1, 1 },
+		{ MSIXDIPERR, "MSI-X DI parity error", -1, 1 },
+		{ PIOCPLPERR, "PCI PIO completion FIFO parity error", -1, 1 },
+		{ PIOREQPERR, "PCI PIO request FIFO parity error", -1, 1 },
+		{ TARTAGPERR, "PCI PCI target tag FIFO parity error", -1, 1 },
+		{ CCNTPERR, "PCI CMD channel count parity error", -1, 1 },
+		{ CREQPERR, "PCI CMD channel request parity error", -1, 1 },
+		{ CRSPPERR, "PCI CMD channel response parity error", -1, 1 },
+		{ DCNTPERR, "PCI DMA channel count parity error", -1, 1 },
+		{ DREQPERR, "PCI DMA channel request parity error", -1, 1 },
+		{ DRSPPERR, "PCI DMA channel response parity error", -1, 1 },
+		{ HCNTPERR, "PCI HMA channel count parity error", -1, 1 },
+		{ HREQPERR, "PCI HMA channel request parity error", -1, 1 },
+		{ HRSPPERR, "PCI HMA channel response parity error", -1, 1 },
+		{ CFGSNPPERR, "PCI config snoop FIFO parity error", -1, 1 },
+		{ FIDPERR, "PCI FID parity error", -1, 1 },
+		{ INTXCLRPERR, "PCI INTx clear parity error", -1, 1 },
+		{ MATAGPERR, "PCI MA tag parity error", -1, 1 },
+		{ PIOTAGPERR, "PCI PIO tag parity error", -1, 1 },
+		{ RXCPLPERR, "PCI Rx completion parity error", -1, 1 },
+		{ RXWRPERR, "PCI Rx write parity error", -1, 1 },
+		{ RPLPERR, "PCI replay buffer parity error", -1, 1 },
+		{ PCIESINT, "PCI core secondary fault", -1, 1 },
+		{ PCIEPINT, "PCI core primary fault", -1, 1 },
+		{ UNXSPLCPLERR, "PCI unexpected split completion error", -1, 0 },
+		{ 0 }
+	};
+
+	int fat;
+
+	fat = t4_handle_intr_status(adapter,
+				    PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+				    sysbus_intr_info) +
+	      t4_handle_intr_status(adapter,
+				    PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+				    pcie_port_intr_info) +
+	      t4_handle_intr_status(adapter, PCIE_INT_CAUSE, pcie_intr_info);
+	if (fat)
+		t4_fatal_err(adapter);
+}
+
+/*
+ * TP interrupt handler.
+ */
+static void tp_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info tp_intr_info[] = {
+		{ 0x3fffffff, "TP parity error", -1, 1 },
+		{ FLMTXFLSTEMPTY, "TP out of Tx pages", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, TP_INT_CAUSE, tp_intr_info))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * SGE interrupt handler.
+ */
+static void sge_intr_handler(struct adapter *adapter)
+{
+	u64 v;
+
+	static struct intr_info sge_intr_info[] = {
+		{ ERR_CPL_EXCEED_IQE_SIZE,
+		  "SGE received CPL exceeding IQE size", -1, 1 },
+		{ ERR_INVALID_CIDX_INC,
+		  "SGE GTS CIDX increment too large", -1, 0 },
+		{ ERR_CPL_OPCODE_0, "SGE received 0-length CPL", -1, 0 },
+		{ ERR_DROPPED_DB, "SGE doorbell dropped", -1, 0 },
+		{ ERR_DATA_CPL_ON_HIGH_QID1 | ERR_DATA_CPL_ON_HIGH_QID0,
+		  "SGE IQID > 1023 received CPL for FL", -1, 0 },
+		{ ERR_BAD_DB_PIDX3, "SGE DBP 3 pidx increment too large", -1,
+		  0 },
+		{ ERR_BAD_DB_PIDX2, "SGE DBP 2 pidx increment too large", -1,
+		  0 },
+		{ ERR_BAD_DB_PIDX1, "SGE DBP 1 pidx increment too large", -1,
+		  0 },
+		{ ERR_BAD_DB_PIDX0, "SGE DBP 0 pidx increment too large", -1,
+		  0 },
+		{ ERR_ING_CTXT_PRIO,
+		  "SGE too many priority ingress contexts", -1, 0 },
+		{ ERR_EGR_CTXT_PRIO,
+		  "SGE too many priority egress contexts", -1, 0 },
+		{ INGRESS_SIZE_ERR, "SGE illegal ingress QID", -1, 0 },
+		{ EGRESS_SIZE_ERR, "SGE illegal egress QID", -1, 0 },
+		{ 0 }
+	};
+
+	v = (u64)t4_read_reg(adapter, SGE_INT_CAUSE1) |
+	    ((u64)t4_read_reg(adapter, SGE_INT_CAUSE2) << 32);
+	if (v) {
+		dev_alert(adapter->pdev_dev, "SGE parity error (%#llx)\n",
+			 (unsigned long long)v);
+		t4_write_reg(adapter, SGE_INT_CAUSE1, v);
+		t4_write_reg(adapter, SGE_INT_CAUSE2, v >> 32);
+	}
+
+	if (t4_handle_intr_status(adapter, SGE_INT_CAUSE3, sge_intr_info) ||
+	    v != 0)
+		t4_fatal_err(adapter);
+}
+
+/*
+ * CIM interrupt handler.
+ */
+static void cim_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info cim_intr_info[] = {
+		{ PREFDROPINT, "CIM control register prefetch drop", -1, 1 },
+		{ OBQPARERR, "CIM OBQ parity error", -1, 1 },
+		{ IBQPARERR, "CIM IBQ parity error", -1, 1 },
+		{ MBUPPARERR, "CIM mailbox uP parity error", -1, 1 },
+		{ MBHOSTPARERR, "CIM mailbox host parity error", -1, 1 },
+		{ TIEQINPARERRINT, "CIM TIEQ outgoing parity error", -1, 1 },
+		{ TIEQOUTPARERRINT, "CIM TIEQ incoming parity error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info cim_upintr_info[] = {
+		{ RSVDSPACEINT, "CIM reserved space access", -1, 1 },
+		{ ILLTRANSINT, "CIM illegal transaction", -1, 1 },
+		{ ILLWRINT, "CIM illegal write", -1, 1 },
+		{ ILLRDINT, "CIM illegal read", -1, 1 },
+		{ ILLRDBEINT, "CIM illegal read BE", -1, 1 },
+		{ ILLWRBEINT, "CIM illegal write BE", -1, 1 },
+		{ SGLRDBOOTINT, "CIM single read from boot space", -1, 1 },
+		{ SGLWRBOOTINT, "CIM single write to boot space", -1, 1 },
+		{ BLKWRBOOTINT, "CIM block write to boot space", -1, 1 },
+		{ SGLRDFLASHINT, "CIM single read from flash space", -1, 1 },
+		{ SGLWRFLASHINT, "CIM single write to flash space", -1, 1 },
+		{ BLKWRFLASHINT, "CIM block write to flash space", -1, 1 },
+		{ SGLRDEEPROMINT, "CIM single EEPROM read", -1, 1 },
+		{ SGLWREEPROMINT, "CIM single EEPROM write", -1, 1 },
+		{ BLKRDEEPROMINT, "CIM block EEPROM read", -1, 1 },
+		{ BLKWREEPROMINT, "CIM block EEPROM write", -1, 1 },
+		{ SGLRDCTLINT , "CIM single read from CTL space", -1, 1 },
+		{ SGLWRCTLINT , "CIM single write to CTL space", -1, 1 },
+		{ BLKRDCTLINT , "CIM block read from CTL space", -1, 1 },
+		{ BLKWRCTLINT , "CIM block write to CTL space", -1, 1 },
+		{ SGLRDPLINT , "CIM single read from PL space", -1, 1 },
+		{ SGLWRPLINT , "CIM single write to PL space", -1, 1 },
+		{ BLKRDPLINT , "CIM block read from PL space", -1, 1 },
+		{ BLKWRPLINT , "CIM block write to PL space", -1, 1 },
+		{ REQOVRLOOKUPINT , "CIM request FIFO overwrite", -1, 1 },
+		{ RSPOVRLOOKUPINT , "CIM response FIFO overwrite", -1, 1 },
+		{ TIMEOUTINT , "CIM PIF timeout", -1, 1 },
+		{ TIMEOUTMAINT , "CIM PIF MA timeout", -1, 1 },
+		{ 0 }
+	};
+
+	int fat;
+
+	fat = t4_handle_intr_status(adapter, CIM_HOST_INT_CAUSE,
+				    cim_intr_info) +
+	      t4_handle_intr_status(adapter, CIM_HOST_UPACC_INT_CAUSE,
+				    cim_upintr_info);
+	if (fat)
+		t4_fatal_err(adapter);
+}
+
+/*
+ * ULP RX interrupt handler.
+ */
+static void ulprx_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info ulprx_intr_info[] = {
+		{ 0x7fffff, "ULPRX parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, ULP_RX_INT_CAUSE, ulprx_intr_info))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * ULP TX interrupt handler.
+ */
+static void ulptx_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info ulptx_intr_info[] = {
+		{ PBL_BOUND_ERR_CH3, "ULPTX channel 3 PBL out of bounds", -1,
+		  0 },
+		{ PBL_BOUND_ERR_CH2, "ULPTX channel 2 PBL out of bounds", -1,
+		  0 },
+		{ PBL_BOUND_ERR_CH1, "ULPTX channel 1 PBL out of bounds", -1,
+		  0 },
+		{ PBL_BOUND_ERR_CH0, "ULPTX channel 0 PBL out of bounds", -1,
+		  0 },
+		{ 0xfffffff, "ULPTX parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, ULP_TX_INT_CAUSE, ulptx_intr_info))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * PM TX interrupt handler.
+ */
+static void pmtx_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info pmtx_intr_info[] = {
+		{ PCMD_LEN_OVFL0, "PMTX channel 0 pcmd too large", -1, 1 },
+		{ PCMD_LEN_OVFL1, "PMTX channel 1 pcmd too large", -1, 1 },
+		{ PCMD_LEN_OVFL2, "PMTX channel 2 pcmd too large", -1, 1 },
+		{ ZERO_C_CMD_ERROR, "PMTX 0-length pcmd", -1, 1 },
+		{ PMTX_FRAMING_ERROR, "PMTX framing error", -1, 1 },
+		{ OESPI_PAR_ERROR, "PMTX oespi parity error", -1, 1 },
+		{ DB_OPTIONS_PAR_ERROR, "PMTX db_options parity error", -1, 1 },
+		{ ICSPI_PAR_ERROR, "PMTX icspi parity error", -1, 1 },
+		{ C_PCMD_PAR_ERROR, "PMTX c_pcmd parity error", -1, 1},
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, PM_TX_INT_CAUSE, pmtx_intr_info))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * PM RX interrupt handler.
+ */
+static void pmrx_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info pmrx_intr_info[] = {
+		{ ZERO_E_CMD_ERROR, "PMRX 0-length pcmd", -1, 1 },
+		{ PMRX_FRAMING_ERROR, "PMRX framing error", -1, 1 },
+		{ OCSPI_PAR_ERROR, "PMRX ocspi parity error", -1, 1 },
+		{ DB_OPTIONS_PAR_ERROR, "PMRX db_options parity error", -1, 1 },
+		{ IESPI_PAR_ERROR, "PMRX iespi parity error", -1, 1 },
+		{ E_PCMD_PAR_ERROR, "PMRX e_pcmd parity error", -1, 1},
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, PM_RX_INT_CAUSE, pmrx_intr_info))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * CPL switch interrupt handler.
+ */
+static void cplsw_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info cplsw_intr_info[] = {
+		{ CIM_OP_MAP_PERR, "CPLSW CIM op_map parity error", -1, 1 },
+		{ CIM_OVFL_ERROR, "CPLSW CIM overflow", -1, 1 },
+		{ TP_FRAMING_ERROR, "CPLSW TP framing error", -1, 1 },
+		{ SGE_FRAMING_ERROR, "CPLSW SGE framing error", -1, 1 },
+		{ CIM_FRAMING_ERROR, "CPLSW CIM framing error", -1, 1 },
+		{ ZERO_SWITCH_ERROR, "CPLSW no-switch error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adapter, CPL_INTR_CAUSE, cplsw_intr_info))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * LE interrupt handler.
+ */
+static void le_intr_handler(struct adapter *adap)
+{
+	static struct intr_info le_intr_info[] = {
+		{ LIPMISS, "LE LIP miss", -1, 0 },
+		{ LIP0, "LE 0 LIP error", -1, 0 },
+		{ PARITYERR, "LE parity error", -1, 1 },
+		{ UNKNOWNCMD, "LE unknown command", -1, 1 },
+		{ REQQPARERR, "LE request queue parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adap, LE_DB_INT_CAUSE, le_intr_info))
+		t4_fatal_err(adap);
+}
+
+/*
+ * MPS interrupt handler.
+ */
+static void mps_intr_handler(struct adapter *adapter)
+{
+	static struct intr_info mps_rx_intr_info[] = {
+		{ 0xffffff, "MPS Rx parity error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info mps_tx_intr_info[] = {
+		{ TPFIFO, "MPS Tx TP FIFO parity error", -1, 1 },
+		{ NCSIFIFO, "MPS Tx NC-SI FIFO parity error", -1, 1 },
+		{ TXDATAFIFO, "MPS Tx data FIFO parity error", -1, 1 },
+		{ TXDESCFIFO, "MPS Tx desc FIFO parity error", -1, 1 },
+		{ BUBBLE, "MPS Tx underflow", -1, 1 },
+		{ SECNTERR, "MPS Tx SOP/EOP error", -1, 1 },
+		{ FRMERR, "MPS Tx framing error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info mps_trc_intr_info[] = {
+		{ FILTMEM, "MPS TRC filter parity error", -1, 1 },
+		{ PKTFIFO, "MPS TRC packet FIFO parity error", -1, 1 },
+		{ MISCPERR, "MPS TRC misc parity error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info mps_stat_sram_intr_info[] = {
+		{ 0x1fffff, "MPS statistics SRAM parity error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info mps_stat_tx_intr_info[] = {
+		{ 0xfffff, "MPS statistics Tx FIFO parity error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info mps_stat_rx_intr_info[] = {
+		{ 0xffffff, "MPS statistics Rx FIFO parity error", -1, 1 },
+		{ 0 }
+	};
+	static struct intr_info mps_cls_intr_info[] = {
+		{ MATCHSRAM, "MPS match SRAM parity error", -1, 1 },
+		{ MATCHTCAM, "MPS match TCAM parity error", -1, 1 },
+		{ HASHSRAM, "MPS hash SRAM parity error", -1, 1 },
+		{ 0 }
+	};
+
+	int fat;
+
+	fat = t4_handle_intr_status(adapter, MPS_RX_PERR_INT_CAUSE,
+				    mps_rx_intr_info) +
+	      t4_handle_intr_status(adapter, MPS_TX_INT_CAUSE,
+				    mps_tx_intr_info) +
+	      t4_handle_intr_status(adapter, MPS_TRC_INT_CAUSE,
+				    mps_trc_intr_info) +
+	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_SRAM,
+				    mps_stat_sram_intr_info) +
+	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_TX_FIFO,
+				    mps_stat_tx_intr_info) +
+	      t4_handle_intr_status(adapter, MPS_STAT_PERR_INT_CAUSE_RX_FIFO,
+				    mps_stat_rx_intr_info) +
+	      t4_handle_intr_status(adapter, MPS_CLS_INT_CAUSE,
+				    mps_cls_intr_info);
+
+	t4_write_reg(adapter, MPS_INT_CAUSE, CLSINT | TRCINT |
+		     RXINT | TXINT | STATINT);
+	t4_read_reg(adapter, MPS_INT_CAUSE);                    /* flush */
+	if (fat)
+		t4_fatal_err(adapter);
+}
+
+#define MEM_INT_MASK (PERR_INT_CAUSE | ECC_CE_INT_CAUSE | ECC_UE_INT_CAUSE)
+
+/*
+ * EDC/MC interrupt handler.
+ */
+static void mem_intr_handler(struct adapter *adapter, int idx)
+{
+	static const char name[3][5] = { "EDC0", "EDC1", "MC" };
+
+	unsigned int addr, cnt_addr, v;
+
+	if (idx <= MEM_EDC1) {
+		addr = EDC_REG(EDC_INT_CAUSE, idx);
+		cnt_addr = EDC_REG(EDC_ECC_STATUS, idx);
+	} else {
+		addr = MC_INT_CAUSE;
+		cnt_addr = MC_ECC_STATUS;
+	}
+
+	v = t4_read_reg(adapter, addr) & MEM_INT_MASK;
+	if (v & PERR_INT_CAUSE)
+		dev_alert(adapter->pdev_dev, "%s FIFO parity error\n",
+			  name[idx]);
+	if (v & ECC_CE_INT_CAUSE) {
+		u32 cnt = ECC_CECNT_GET(t4_read_reg(adapter, cnt_addr));
+
+		t4_write_reg(adapter, cnt_addr, ECC_CECNT_MASK);
+		if (printk_ratelimit())
+			dev_warn(adapter->pdev_dev,
+				 "%u %s correctable ECC data error%s\n",
+				 cnt, name[idx], cnt > 1 ? "s" : "");
+	}
+	if (v & ECC_UE_INT_CAUSE)
+		dev_alert(adapter->pdev_dev,
+			  "%s uncorrectable ECC data error\n", name[idx]);
+
+	t4_write_reg(adapter, addr, v);
+	if (v & (PERR_INT_CAUSE | ECC_UE_INT_CAUSE))
+		t4_fatal_err(adapter);
+}
+
+/*
+ * MA interrupt handler.
+ */
+static void ma_intr_handler(struct adapter *adap)
+{
+	u32 v, status = t4_read_reg(adap, MA_INT_CAUSE);
+
+	if (status & MEM_PERR_INT_CAUSE)
+		dev_alert(adap->pdev_dev,
+			  "MA parity error, parity status %#x\n",
+			  t4_read_reg(adap, MA_PARITY_ERROR_STATUS));
+	if (status & MEM_WRAP_INT_CAUSE) {
+		v = t4_read_reg(adap, MA_INT_WRAP_STATUS);
+		dev_alert(adap->pdev_dev, "MA address wrap-around error by "
+			  "client %u to address %#x\n",
+			  MEM_WRAP_CLIENT_NUM_GET(v),
+			  MEM_WRAP_ADDRESS_GET(v) << 4);
+	}
+	t4_write_reg(adap, MA_INT_CAUSE, status);
+	t4_fatal_err(adap);
+}
+
+/*
+ * SMB interrupt handler.
+ */
+static void smb_intr_handler(struct adapter *adap)
+{
+	static struct intr_info smb_intr_info[] = {
+		{ MSTTXFIFOPARINT, "SMB master Tx FIFO parity error", -1, 1 },
+		{ MSTRXFIFOPARINT, "SMB master Rx FIFO parity error", -1, 1 },
+		{ SLVFIFOPARINT, "SMB slave FIFO parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adap, SMB_INT_CAUSE, smb_intr_info))
+		t4_fatal_err(adap);
+}
+
+/*
+ * NC-SI interrupt handler.
+ */
+static void ncsi_intr_handler(struct adapter *adap)
+{
+	static struct intr_info ncsi_intr_info[] = {
+		{ CIM_DM_PRTY_ERR, "NC-SI CIM parity error", -1, 1 },
+		{ MPS_DM_PRTY_ERR, "NC-SI MPS parity error", -1, 1 },
+		{ TXFIFO_PRTY_ERR, "NC-SI Tx FIFO parity error", -1, 1 },
+		{ RXFIFO_PRTY_ERR, "NC-SI Rx FIFO parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adap, NCSI_INT_CAUSE, ncsi_intr_info))
+		t4_fatal_err(adap);
+}
+
+/*
+ * XGMAC interrupt handler.
+ */
+static void xgmac_intr_handler(struct adapter *adap, int port)
+{
+	u32 v = t4_read_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE));
+
+	v &= TXFIFO_PRTY_ERR | RXFIFO_PRTY_ERR;
+	if (!v)
+		return;
+
+	if (v & TXFIFO_PRTY_ERR)
+		dev_alert(adap->pdev_dev, "XGMAC %d Tx FIFO parity error\n",
+			  port);
+	if (v & RXFIFO_PRTY_ERR)
+		dev_alert(adap->pdev_dev, "XGMAC %d Rx FIFO parity error\n",
+			  port);
+	t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_INT_CAUSE), v);
+	t4_fatal_err(adap);
+}
+
+/*
+ * PL interrupt handler.
+ */
+static void pl_intr_handler(struct adapter *adap)
+{
+	static struct intr_info pl_intr_info[] = {
+		{ FATALPERR, "T4 fatal parity error", -1, 1 },
+		{ PERRVFID, "PL VFID_MAP parity error", -1, 1 },
+		{ 0 }
+	};
+
+	if (t4_handle_intr_status(adap, PL_PL_INT_CAUSE, pl_intr_info))
+		t4_fatal_err(adap);
+}
+
+#define PF_INTR_MASK (PFSW | PFCIM)
+#define GLBL_INTR_MASK (CIM | MPS | PL | PCIE | MC | EDC0 | \
+		EDC1 | LE | TP | MA | PM_TX | PM_RX | ULP_RX | \
+		CPL_SWITCH | SGE | ULP_TX)
+
+/**
+ *	t4_slow_intr_handler - control path interrupt handler
+ *	@adapter: the adapter
+ *
+ *	T4 interrupt handler for non-data global interrupt events, e.g., errors.
+ *	The designation 'slow' is because it involves register reads, while
+ *	data interrupts typically don't involve any MMIOs.
+ */
+int t4_slow_intr_handler(struct adapter *adapter)
+{
+	u32 cause = t4_read_reg(adapter, PL_INT_CAUSE);
+
+	if (!(cause & GLBL_INTR_MASK))
+		return 0;
+	if (cause & CIM)
+		cim_intr_handler(adapter);
+	if (cause & MPS)
+		mps_intr_handler(adapter);
+	if (cause & NCSI)
+		ncsi_intr_handler(adapter);
+	if (cause & PL)
+		pl_intr_handler(adapter);
+	if (cause & SMB)
+		smb_intr_handler(adapter);
+	if (cause & XGMAC0)
+		xgmac_intr_handler(adapter, 0);
+	if (cause & XGMAC1)
+		xgmac_intr_handler(adapter, 1);
+	if (cause & XGMAC_KR0)
+		xgmac_intr_handler(adapter, 2);
+	if (cause & XGMAC_KR1)
+		xgmac_intr_handler(adapter, 3);
+	if (cause & PCIE)
+		pcie_intr_handler(adapter);
+	if (cause & MC)
+		mem_intr_handler(adapter, MEM_MC);
+	if (cause & EDC0)
+		mem_intr_handler(adapter, MEM_EDC0);
+	if (cause & EDC1)
+		mem_intr_handler(adapter, MEM_EDC1);
+	if (cause & LE)
+		le_intr_handler(adapter);
+	if (cause & TP)
+		tp_intr_handler(adapter);
+	if (cause & MA)
+		ma_intr_handler(adapter);
+	if (cause & PM_TX)
+		pmtx_intr_handler(adapter);
+	if (cause & PM_RX)
+		pmrx_intr_handler(adapter);
+	if (cause & ULP_RX)
+		ulprx_intr_handler(adapter);
+	if (cause & CPL_SWITCH)
+		cplsw_intr_handler(adapter);
+	if (cause & SGE)
+		sge_intr_handler(adapter);
+	if (cause & ULP_TX)
+		ulptx_intr_handler(adapter);
+
+	/* Clear the interrupts just processed for which we are the master. */
+	t4_write_reg(adapter, PL_INT_CAUSE, cause & GLBL_INTR_MASK);
+	(void) t4_read_reg(adapter, PL_INT_CAUSE); /* flush */
+	return 1;
+}
+
+/**
+ *	t4_intr_enable - enable interrupts
+ *	@adapter: the adapter whose interrupts should be enabled
+ *
+ *	Enable PF-specific interrupts for the calling function and the top-level
+ *	interrupt concentrator for global interrupts.  Interrupts are already
+ *	enabled at each module,	here we just enable the roots of the interrupt
+ *	hierarchies.
+ *
+ *	Note: this function should be called only when the driver manages
+ *	non PF-specific interrupts from the various HW modules.  Only one PCI
+ *	function at a time should be doing this.
+ */
+void t4_intr_enable(struct adapter *adapter)
+{
+	u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
+
+	t4_write_reg(adapter, SGE_INT_ENABLE3, ERR_CPL_EXCEED_IQE_SIZE |
+		     ERR_INVALID_CIDX_INC | ERR_CPL_OPCODE_0 |
+		     ERR_DROPPED_DB | ERR_DATA_CPL_ON_HIGH_QID1 |
+		     ERR_DATA_CPL_ON_HIGH_QID0 | ERR_BAD_DB_PIDX3 |
+		     ERR_BAD_DB_PIDX2 | ERR_BAD_DB_PIDX1 |
+		     ERR_BAD_DB_PIDX0 | ERR_ING_CTXT_PRIO |
+		     ERR_EGR_CTXT_PRIO | INGRESS_SIZE_ERR |
+		     EGRESS_SIZE_ERR);
+	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), PF_INTR_MASK);
+	t4_set_reg_field(adapter, PL_INT_MAP0, 0, 1 << pf);
+}
+
+/**
+ *	t4_intr_disable - disable interrupts
+ *	@adapter: the adapter whose interrupts should be disabled
+ *
+ *	Disable interrupts.  We only disable the top-level interrupt
+ *	concentrators.  The caller must be a PCI function managing global
+ *	interrupts.
+ */
+void t4_intr_disable(struct adapter *adapter)
+{
+	u32 pf = SOURCEPF_GET(t4_read_reg(adapter, PL_WHOAMI));
+
+	t4_write_reg(adapter, MYPF_REG(PL_PF_INT_ENABLE), 0);
+	t4_set_reg_field(adapter, PL_INT_MAP0, 1 << pf, 0);
+}
+
+/**
+ *	t4_intr_clear - clear all interrupts
+ *	@adapter: the adapter whose interrupts should be cleared
+ *
+ *	Clears all interrupts.  The caller must be a PCI function managing
+ *	global interrupts.
+ */
+void t4_intr_clear(struct adapter *adapter)
+{
+	static const unsigned int cause_reg[] = {
+		SGE_INT_CAUSE1, SGE_INT_CAUSE2, SGE_INT_CAUSE3,
+		PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS,
+		PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS,
+		PCIE_NONFAT_ERR, PCIE_INT_CAUSE,
+		MC_INT_CAUSE,
+		MA_INT_WRAP_STATUS, MA_PARITY_ERROR_STATUS, MA_INT_CAUSE,
+		EDC_INT_CAUSE, EDC_REG(EDC_INT_CAUSE, 1),
+		CIM_HOST_INT_CAUSE, CIM_HOST_UPACC_INT_CAUSE,
+		MYPF_REG(CIM_PF_HOST_INT_CAUSE),
+		TP_INT_CAUSE,
+		ULP_RX_INT_CAUSE, ULP_TX_INT_CAUSE,
+		PM_RX_INT_CAUSE, PM_TX_INT_CAUSE,
+		MPS_RX_PERR_INT_CAUSE,
+		CPL_INTR_CAUSE,
+		MYPF_REG(PL_PF_INT_CAUSE),
+		PL_PL_INT_CAUSE,
+		LE_DB_INT_CAUSE,
+	};
+
+	unsigned int i;
+
+	for (i = 0; i < ARRAY_SIZE(cause_reg); ++i)
+		t4_write_reg(adapter, cause_reg[i], 0xffffffff);
+
+	t4_write_reg(adapter, PL_INT_CAUSE, GLBL_INTR_MASK);
+	(void) t4_read_reg(adapter, PL_INT_CAUSE);          /* flush */
+}
+
+/**
+ *	hash_mac_addr - return the hash value of a MAC address
+ *	@addr: the 48-bit Ethernet MAC address
+ *
+ *	Hashes a MAC address according to the hash function used by HW inexact
+ *	(hash) address matching.
+ */
+static int hash_mac_addr(const u8 *addr)
+{
+	u32 a = ((u32)addr[0] << 16) | ((u32)addr[1] << 8) | addr[2];
+	u32 b = ((u32)addr[3] << 16) | ((u32)addr[4] << 8) | addr[5];
+	a ^= b;
+	a ^= (a >> 12);
+	a ^= (a >> 6);
+	return a & 0x3f;
+}
+
+/**
+ *	t4_config_rss_range - configure a portion of the RSS mapping table
+ *	@adapter: the adapter
+ *	@mbox: mbox to use for the FW command
+ *	@viid: virtual interface whose RSS subtable is to be written
+ *	@start: start entry in the table to write
+ *	@n: how many table entries to write
+ *	@rspq: values for the response queue lookup table
+ *	@nrspq: number of values in @rspq
+ *
+ *	Programs the selected part of the VI's RSS mapping table with the
+ *	provided values.  If @nrspq < @n the supplied values are used repeatedly
+ *	until the full table range is populated.
+ *
+ *	The caller must ensure the values in @rspq are in the range allowed for
+ *	@viid.
+ */
+int t4_config_rss_range(struct adapter *adapter, int mbox, unsigned int viid,
+			int start, int n, const u16 *rspq, unsigned int nrspq)
+{
+	int ret;
+	const u16 *rsp = rspq;
+	const u16 *rsp_end = rspq + nrspq;
+	struct fw_rss_ind_tbl_cmd cmd;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.op_to_viid = htonl(FW_CMD_OP(FW_RSS_IND_TBL_CMD) |
+			       FW_CMD_REQUEST | FW_CMD_WRITE |
+			       FW_RSS_IND_TBL_CMD_VIID(viid));
+	cmd.retval_len16 = htonl(FW_LEN16(cmd));
+
+	/* each fw_rss_ind_tbl_cmd takes up to 32 entries */
+	while (n > 0) {
+		int nq = min(n, 32);
+		__be32 *qp = &cmd.iq0_to_iq2;
+
+		cmd.niqid = htons(nq);
+		cmd.startidx = htons(start);
+
+		start += nq;
+		n -= nq;
+
+		while (nq > 0) {
+			unsigned int v;
+
+			v = FW_RSS_IND_TBL_CMD_IQ0(*rsp);
+			if (++rsp >= rsp_end)
+				rsp = rspq;
+			v |= FW_RSS_IND_TBL_CMD_IQ1(*rsp);
+			if (++rsp >= rsp_end)
+				rsp = rspq;
+			v |= FW_RSS_IND_TBL_CMD_IQ2(*rsp);
+			if (++rsp >= rsp_end)
+				rsp = rspq;
+
+			*qp++ = htonl(v);
+			nq -= 3;
+		}
+
+		ret = t4_wr_mbox(adapter, mbox, &cmd, sizeof(cmd), NULL);
+		if (ret)
+			return ret;
+	}
+	return 0;
+}
+
+/**
+ *	t4_config_glbl_rss - configure the global RSS mode
+ *	@adapter: the adapter
+ *	@mbox: mbox to use for the FW command
+ *	@mode: global RSS mode
+ *	@flags: mode-specific flags
+ *
+ *	Sets the global RSS mode.
+ */
+int t4_config_glbl_rss(struct adapter *adapter, int mbox, unsigned int mode,
+		       unsigned int flags)
+{
+	struct fw_rss_glb_config_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
+			      FW_CMD_REQUEST | FW_CMD_WRITE);
+	c.retval_len16 = htonl(FW_LEN16(c));
+	if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
+		c.u.manual.mode_pkd = htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+	} else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
+		c.u.basicvirtual.mode_pkd =
+			htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
+		c.u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
+	} else
+		return -EINVAL;
+	return t4_wr_mbox(adapter, mbox, &c, sizeof(c), NULL);
+}
+
+/* Read an RSS table row */
+static int rd_rss_row(struct adapter *adap, int row, u32 *val)
+{
+	t4_write_reg(adap, TP_RSS_LKP_TABLE, 0xfff00000 | row);
+	return t4_wait_op_done_val(adap, TP_RSS_LKP_TABLE, LKPTBLROWVLD, 1,
+				   5, 0, val);
+}
+
+/**
+ *	t4_read_rss - read the contents of the RSS mapping table
+ *	@adapter: the adapter
+ *	@map: holds the contents of the RSS mapping table
+ *
+ *	Reads the contents of the RSS hash->queue mapping table.
+ */
+int t4_read_rss(struct adapter *adapter, u16 *map)
+{
+	u32 val;
+	int i, ret;
+
+	for (i = 0; i < RSS_NENTRIES / 2; ++i) {
+		ret = rd_rss_row(adapter, i, &val);
+		if (ret)
+			return ret;
+		*map++ = LKPTBLQUEUE0_GET(val);
+		*map++ = LKPTBLQUEUE1_GET(val);
+	}
+	return 0;
+}
+
+/**
+ *	t4_tp_get_tcp_stats - read TP's TCP MIB counters
+ *	@adap: the adapter
+ *	@v4: holds the TCP/IP counter values
+ *	@v6: holds the TCP/IPv6 counter values
+ *
+ *	Returns the values of TP's TCP/IP and TCP/IPv6 MIB counters.
+ *	Either @v4 or @v6 may be %NULL to skip the corresponding stats.
+ */
+void t4_tp_get_tcp_stats(struct adapter *adap, struct tp_tcp_stats *v4,
+			 struct tp_tcp_stats *v6)
+{
+	u32 val[TP_MIB_TCP_RXT_SEG_LO - TP_MIB_TCP_OUT_RST + 1];
+
+#define STAT_IDX(x) ((TP_MIB_TCP_##x) - TP_MIB_TCP_OUT_RST)
+#define STAT(x)     val[STAT_IDX(x)]
+#define STAT64(x)   (((u64)STAT(x##_HI) << 32) | STAT(x##_LO))
+
+	if (v4) {
+		t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
+				 ARRAY_SIZE(val), TP_MIB_TCP_OUT_RST);
+		v4->tcpOutRsts = STAT(OUT_RST);
+		v4->tcpInSegs  = STAT64(IN_SEG);
+		v4->tcpOutSegs = STAT64(OUT_SEG);
+		v4->tcpRetransSegs = STAT64(RXT_SEG);
+	}
+	if (v6) {
+		t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, val,
+				 ARRAY_SIZE(val), TP_MIB_TCP_V6OUT_RST);
+		v6->tcpOutRsts = STAT(OUT_RST);
+		v6->tcpInSegs  = STAT64(IN_SEG);
+		v6->tcpOutSegs = STAT64(OUT_SEG);
+		v6->tcpRetransSegs = STAT64(RXT_SEG);
+	}
+#undef STAT64
+#undef STAT
+#undef STAT_IDX
+}
+
+/**
+ *	t4_tp_get_err_stats - read TP's error MIB counters
+ *	@adap: the adapter
+ *	@st: holds the counter values
+ *
+ *	Returns the values of TP's error counters.
+ */
+void t4_tp_get_err_stats(struct adapter *adap, struct tp_err_stats *st)
+{
+	t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->macInErrs,
+			 12, TP_MIB_MAC_IN_ERR_0);
+	t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlCongDrops,
+			 8, TP_MIB_TNL_CNG_DROP_0);
+	t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tnlTxDrops,
+			 4, TP_MIB_TNL_DROP_0);
+	t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->ofldVlanDrops,
+			 4, TP_MIB_OFD_VLN_DROP_0);
+	t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, st->tcp6InErrs,
+			 4, TP_MIB_TCP_V6IN_ERR_0);
+	t4_read_indirect(adap, TP_MIB_INDEX, TP_MIB_DATA, &st->ofldNoNeigh,
+			 2, TP_MIB_OFD_ARP_DROP);
+}
+
+/**
+ *	t4_read_mtu_tbl - returns the values in the HW path MTU table
+ *	@adap: the adapter
+ *	@mtus: where to store the MTU values
+ *	@mtu_log: where to store the MTU base-2 log (may be %NULL)
+ *
+ *	Reads the HW path MTU table.
+ */
+void t4_read_mtu_tbl(struct adapter *adap, u16 *mtus, u8 *mtu_log)
+{
+	u32 v;
+	int i;
+
+	for (i = 0; i < NMTUS; ++i) {
+		t4_write_reg(adap, TP_MTU_TABLE,
+			     MTUINDEX(0xff) | MTUVALUE(i));
+		v = t4_read_reg(adap, TP_MTU_TABLE);
+		mtus[i] = MTUVALUE_GET(v);
+		if (mtu_log)
+			mtu_log[i] = MTUWIDTH_GET(v);
+	}
+}
+
+/**
+ *	init_cong_ctrl - initialize congestion control parameters
+ *	@a: the alpha values for congestion control
+ *	@b: the beta values for congestion control
+ *
+ *	Initialize the congestion control parameters.
+ */
+static void __devinit init_cong_ctrl(unsigned short *a, unsigned short *b)
+{
+	a[0] = a[1] = a[2] = a[3] = a[4] = a[5] = a[6] = a[7] = a[8] = 1;
+	a[9] = 2;
+	a[10] = 3;
+	a[11] = 4;
+	a[12] = 5;
+	a[13] = 6;
+	a[14] = 7;
+	a[15] = 8;
+	a[16] = 9;
+	a[17] = 10;
+	a[18] = 14;
+	a[19] = 17;
+	a[20] = 21;
+	a[21] = 25;
+	a[22] = 30;
+	a[23] = 35;
+	a[24] = 45;
+	a[25] = 60;
+	a[26] = 80;
+	a[27] = 100;
+	a[28] = 200;
+	a[29] = 300;
+	a[30] = 400;
+	a[31] = 500;
+
+	b[0] = b[1] = b[2] = b[3] = b[4] = b[5] = b[6] = b[7] = b[8] = 0;
+	b[9] = b[10] = 1;
+	b[11] = b[12] = 2;
+	b[13] = b[14] = b[15] = b[16] = 3;
+	b[17] = b[18] = b[19] = b[20] = b[21] = 4;
+	b[22] = b[23] = b[24] = b[25] = b[26] = b[27] = 5;
+	b[28] = b[29] = 6;
+	b[30] = b[31] = 7;
+}
+
+/* The minimum additive increment value for the congestion control table */
+#define CC_MIN_INCR 2U
+
+/**
+ *	t4_load_mtus - write the MTU and congestion control HW tables
+ *	@adap: the adapter
+ *	@mtus: the values for the MTU table
+ *	@alpha: the values for the congestion control alpha parameter
+ *	@beta: the values for the congestion control beta parameter
+ *
+ *	Write the HW MTU table with the supplied MTUs and the high-speed
+ *	congestion control table with the supplied alpha, beta, and MTUs.
+ *	We write the two tables together because the additive increments
+ *	depend on the MTUs.
+ */
+void t4_load_mtus(struct adapter *adap, const unsigned short *mtus,
+		  const unsigned short *alpha, const unsigned short *beta)
+{
+	static const unsigned int avg_pkts[NCCTRL_WIN] = {
+		2, 6, 10, 14, 20, 28, 40, 56, 80, 112, 160, 224, 320, 448, 640,
+		896, 1281, 1792, 2560, 3584, 5120, 7168, 10240, 14336, 20480,
+		28672, 40960, 57344, 81920, 114688, 163840, 229376
+	};
+
+	unsigned int i, w;
+
+	for (i = 0; i < NMTUS; ++i) {
+		unsigned int mtu = mtus[i];
+		unsigned int log2 = fls(mtu);
+
+		if (!(mtu & ((1 << log2) >> 2)))     /* round */
+			log2--;
+		t4_write_reg(adap, TP_MTU_TABLE, MTUINDEX(i) |
+			     MTUWIDTH(log2) | MTUVALUE(mtu));
+
+		for (w = 0; w < NCCTRL_WIN; ++w) {
+			unsigned int inc;
+
+			inc = max(((mtu - 40) * alpha[w]) / avg_pkts[w],
+				  CC_MIN_INCR);
+
+			t4_write_reg(adap, TP_CCTRL_TABLE, (i << 21) |
+				     (w << 16) | (beta[w] << 13) | inc);
+		}
+	}
+}
+
+/**
+ *	t4_set_trace_filter - configure one of the tracing filters
+ *	@adap: the adapter
+ *	@tp: the desired trace filter parameters
+ *	@idx: which filter to configure
+ *	@enable: whether to enable or disable the filter
+ *
+ *	Configures one of the tracing filters available in HW.  If @enable is
+ *	%0 @tp is not examined and may be %NULL.
+ */
+int t4_set_trace_filter(struct adapter *adap, const struct trace_params *tp,
+			int idx, int enable)
+{
+	int i, ofst = idx * 4;
+	u32 data_reg, mask_reg, cfg;
+	u32 multitrc = TRCMULTIFILTER;
+
+	if (!enable) {
+		t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
+		goto out;
+	}
+
+	if (tp->port > 11 || tp->invert > 1 || tp->skip_len > 0x1f ||
+	    tp->skip_ofst > 0x1f || tp->min_len > 0x1ff ||
+	    tp->snap_len > 9600 || (idx && tp->snap_len > 256))
+		return -EINVAL;
+
+	if (tp->snap_len > 256) {            /* must be tracer 0 */
+		if ((t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 4) |
+		     t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 8) |
+		     t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + 12)) & TFEN)
+			return -EINVAL;  /* other tracers are enabled */
+		multitrc = 0;
+	} else if (idx) {
+		i = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B);
+		if (TFCAPTUREMAX_GET(i) > 256 &&
+		    (t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A) & TFEN))
+			return -EINVAL;
+	}
+
+	/* stop the tracer we'll be changing */
+	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst, 0);
+
+	/* disable tracing globally if running in the wrong single/multi mode */
+	cfg = t4_read_reg(adap, MPS_TRC_CFG);
+	if ((cfg & TRCEN) && multitrc != (cfg & TRCMULTIFILTER)) {
+		t4_write_reg(adap, MPS_TRC_CFG, cfg ^ TRCEN);
+		t4_read_reg(adap, MPS_TRC_CFG);                  /* flush */
+		msleep(1);
+		if (!(t4_read_reg(adap, MPS_TRC_CFG) & TRCFIFOEMPTY))
+			return -ETIMEDOUT;
+	}
+	/*
+	 * At this point either the tracing is enabled and in the right mode or
+	 * disabled.
+	 */
+
+	idx *= (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH);
+	data_reg = MPS_TRC_FILTER0_MATCH + idx;
+	mask_reg = MPS_TRC_FILTER0_DONT_CARE + idx;
+
+	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+		t4_write_reg(adap, data_reg, tp->data[i]);
+		t4_write_reg(adap, mask_reg, ~tp->mask[i]);
+	}
+	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst,
+		     TFCAPTUREMAX(tp->snap_len) |
+		     TFMINPKTSIZE(tp->min_len));
+	t4_write_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst,
+		     TFOFFSET(tp->skip_ofst) | TFLENGTH(tp->skip_len) |
+		     TFPORT(tp->port) | TFEN |
+		     (tp->invert ? TFINVERTMATCH : 0));
+
+	cfg &= ~TRCMULTIFILTER;
+	t4_write_reg(adap, MPS_TRC_CFG, cfg | TRCEN | multitrc);
+out:	t4_read_reg(adap, MPS_TRC_CFG);  /* flush */
+	return 0;
+}
+
+/**
+ *	t4_get_trace_filter - query one of the tracing filters
+ *	@adap: the adapter
+ *	@tp: the current trace filter parameters
+ *	@idx: which trace filter to query
+ *	@enabled: non-zero if the filter is enabled
+ *
+ *	Returns the current settings of one of the HW tracing filters.
+ */
+void t4_get_trace_filter(struct adapter *adap, struct trace_params *tp, int idx,
+			 int *enabled)
+{
+	u32 ctla, ctlb;
+	int i, ofst = idx * 4;
+	u32 data_reg, mask_reg;
+
+	ctla = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_A + ofst);
+	ctlb = t4_read_reg(adap, MPS_TRC_FILTER_MATCH_CTL_B + ofst);
+
+	*enabled = !!(ctla & TFEN);
+	tp->snap_len = TFCAPTUREMAX_GET(ctlb);
+	tp->min_len = TFMINPKTSIZE_GET(ctlb);
+	tp->skip_ofst = TFOFFSET_GET(ctla);
+	tp->skip_len = TFLENGTH_GET(ctla);
+	tp->invert = !!(ctla & TFINVERTMATCH);
+	tp->port = TFPORT_GET(ctla);
+
+	ofst = (MPS_TRC_FILTER1_MATCH - MPS_TRC_FILTER0_MATCH) * idx;
+	data_reg = MPS_TRC_FILTER0_MATCH + ofst;
+	mask_reg = MPS_TRC_FILTER0_DONT_CARE + ofst;
+
+	for (i = 0; i < TRACE_LEN / 4; i++, data_reg += 4, mask_reg += 4) {
+		tp->mask[i] = ~t4_read_reg(adap, mask_reg);
+		tp->data[i] = t4_read_reg(adap, data_reg) & tp->mask[i];
+	}
+}
+
+/**
+ *	get_mps_bg_map - return the buffer groups associated with a port
+ *	@adap: the adapter
+ *	@idx: the port index
+ *
+ *	Returns a bitmap indicating which MPS buffer groups are associated
+ *	with the given port.  Bit i is set if buffer group i is used by the
+ *	port.
+ */
+static unsigned int get_mps_bg_map(struct adapter *adap, int idx)
+{
+	u32 n = NUMPORTS_GET(t4_read_reg(adap, MPS_CMN_CTL));
+
+	if (n == 0)
+		return idx == 0 ? 0xf : 0;
+	if (n == 1)
+		return idx < 2 ? (3 << (2 * idx)) : 0;
+	return 1 << idx;
+}
+
+/**
+ *	t4_get_port_stats - collect port statistics
+ *	@adap: the adapter
+ *	@idx: the port index
+ *	@p: the stats structure to fill
+ *
+ *	Collect statistics related to the given port from HW.
+ */
+void t4_get_port_stats(struct adapter *adap, int idx, struct port_stats *p)
+{
+	u32 bgmap = get_mps_bg_map(adap, idx);
+
+#define GET_STAT(name) \
+	t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_##name##_L))
+#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
+
+	p->tx_octets           = GET_STAT(TX_PORT_BYTES);
+	p->tx_frames           = GET_STAT(TX_PORT_FRAMES);
+	p->tx_bcast_frames     = GET_STAT(TX_PORT_BCAST);
+	p->tx_mcast_frames     = GET_STAT(TX_PORT_MCAST);
+	p->tx_ucast_frames     = GET_STAT(TX_PORT_UCAST);
+	p->tx_error_frames     = GET_STAT(TX_PORT_ERROR);
+	p->tx_frames_64        = GET_STAT(TX_PORT_64B);
+	p->tx_frames_65_127    = GET_STAT(TX_PORT_65B_127B);
+	p->tx_frames_128_255   = GET_STAT(TX_PORT_128B_255B);
+	p->tx_frames_256_511   = GET_STAT(TX_PORT_256B_511B);
+	p->tx_frames_512_1023  = GET_STAT(TX_PORT_512B_1023B);
+	p->tx_frames_1024_1518 = GET_STAT(TX_PORT_1024B_1518B);
+	p->tx_frames_1519_max  = GET_STAT(TX_PORT_1519B_MAX);
+	p->tx_drop             = GET_STAT(TX_PORT_DROP);
+	p->tx_pause            = GET_STAT(TX_PORT_PAUSE);
+	p->tx_ppp0             = GET_STAT(TX_PORT_PPP0);
+	p->tx_ppp1             = GET_STAT(TX_PORT_PPP1);
+	p->tx_ppp2             = GET_STAT(TX_PORT_PPP2);
+	p->tx_ppp3             = GET_STAT(TX_PORT_PPP3);
+	p->tx_ppp4             = GET_STAT(TX_PORT_PPP4);
+	p->tx_ppp5             = GET_STAT(TX_PORT_PPP5);
+	p->tx_ppp6             = GET_STAT(TX_PORT_PPP6);
+	p->tx_ppp7             = GET_STAT(TX_PORT_PPP7);
+
+	p->rx_octets           = GET_STAT(RX_PORT_BYTES);
+	p->rx_frames           = GET_STAT(RX_PORT_FRAMES);
+	p->rx_bcast_frames     = GET_STAT(RX_PORT_BCAST);
+	p->rx_mcast_frames     = GET_STAT(RX_PORT_MCAST);
+	p->rx_ucast_frames     = GET_STAT(RX_PORT_UCAST);
+	p->rx_too_long         = GET_STAT(RX_PORT_MTU_ERROR);
+	p->rx_jabber           = GET_STAT(RX_PORT_MTU_CRC_ERROR);
+	p->rx_fcs_err          = GET_STAT(RX_PORT_CRC_ERROR);
+	p->rx_len_err          = GET_STAT(RX_PORT_LEN_ERROR);
+	p->rx_symbol_err       = GET_STAT(RX_PORT_SYM_ERROR);
+	p->rx_runt             = GET_STAT(RX_PORT_LESS_64B);
+	p->rx_frames_64        = GET_STAT(RX_PORT_64B);
+	p->rx_frames_65_127    = GET_STAT(RX_PORT_65B_127B);
+	p->rx_frames_128_255   = GET_STAT(RX_PORT_128B_255B);
+	p->rx_frames_256_511   = GET_STAT(RX_PORT_256B_511B);
+	p->rx_frames_512_1023  = GET_STAT(RX_PORT_512B_1023B);
+	p->rx_frames_1024_1518 = GET_STAT(RX_PORT_1024B_1518B);
+	p->rx_frames_1519_max  = GET_STAT(RX_PORT_1519B_MAX);
+	p->rx_pause            = GET_STAT(RX_PORT_PAUSE);
+	p->rx_ppp0             = GET_STAT(RX_PORT_PPP0);
+	p->rx_ppp1             = GET_STAT(RX_PORT_PPP1);
+	p->rx_ppp2             = GET_STAT(RX_PORT_PPP2);
+	p->rx_ppp3             = GET_STAT(RX_PORT_PPP3);
+	p->rx_ppp4             = GET_STAT(RX_PORT_PPP4);
+	p->rx_ppp5             = GET_STAT(RX_PORT_PPP5);
+	p->rx_ppp6             = GET_STAT(RX_PORT_PPP6);
+	p->rx_ppp7             = GET_STAT(RX_PORT_PPP7);
+
+	p->rx_ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_DROP_FRAME) : 0;
+	p->rx_ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_DROP_FRAME) : 0;
+	p->rx_ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_DROP_FRAME) : 0;
+	p->rx_ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_DROP_FRAME) : 0;
+	p->rx_trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_MAC_TRUNC_FRAME) : 0;
+	p->rx_trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_MAC_TRUNC_FRAME) : 0;
+	p->rx_trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_MAC_TRUNC_FRAME) : 0;
+	p->rx_trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_MAC_TRUNC_FRAME) : 0;
+
+#undef GET_STAT
+#undef GET_STAT_COM
+}
+
+/**
+ *	t4_get_lb_stats - collect loopback port statistics
+ *	@adap: the adapter
+ *	@idx: the loopback port index
+ *	@p: the stats structure to fill
+ *
+ *	Return HW statistics for the given loopback port.
+ */
+void t4_get_lb_stats(struct adapter *adap, int idx, struct lb_port_stats *p)
+{
+	u32 bgmap = get_mps_bg_map(adap, idx);
+
+#define GET_STAT(name) \
+	t4_read_reg64(adap, PORT_REG(idx, MPS_PORT_STAT_LB_PORT_##name##_L))
+#define GET_STAT_COM(name) t4_read_reg64(adap, MPS_STAT_##name##_L)
+
+	p->octets           = GET_STAT(BYTES);
+	p->frames           = GET_STAT(FRAMES);
+	p->bcast_frames     = GET_STAT(BCAST);
+	p->mcast_frames     = GET_STAT(MCAST);
+	p->ucast_frames     = GET_STAT(UCAST);
+	p->error_frames     = GET_STAT(ERROR);
+
+	p->frames_64        = GET_STAT(64B);
+	p->frames_65_127    = GET_STAT(65B_127B);
+	p->frames_128_255   = GET_STAT(128B_255B);
+	p->frames_256_511   = GET_STAT(256B_511B);
+	p->frames_512_1023  = GET_STAT(512B_1023B);
+	p->frames_1024_1518 = GET_STAT(1024B_1518B);
+	p->frames_1519_max  = GET_STAT(1519B_MAX);
+	p->drop             = t4_read_reg(adap, PORT_REG(idx,
+					  MPS_PORT_STAT_LB_PORT_DROP_FRAMES));
+
+	p->ovflow0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_DROP_FRAME) : 0;
+	p->ovflow1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_DROP_FRAME) : 0;
+	p->ovflow2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_DROP_FRAME) : 0;
+	p->ovflow3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_DROP_FRAME) : 0;
+	p->trunc0 = (bgmap & 1) ? GET_STAT_COM(RX_BG_0_LB_TRUNC_FRAME) : 0;
+	p->trunc1 = (bgmap & 2) ? GET_STAT_COM(RX_BG_1_LB_TRUNC_FRAME) : 0;
+	p->trunc2 = (bgmap & 4) ? GET_STAT_COM(RX_BG_2_LB_TRUNC_FRAME) : 0;
+	p->trunc3 = (bgmap & 8) ? GET_STAT_COM(RX_BG_3_LB_TRUNC_FRAME) : 0;
+
+#undef GET_STAT
+#undef GET_STAT_COM
+}
+
+/**
+ *	t4_wol_magic_enable - enable/disable magic packet WoL
+ *	@adap: the adapter
+ *	@port: the physical port index
+ *	@addr: MAC address expected in magic packets, %NULL to disable
+ *
+ *	Enables/disables magic packet wake-on-LAN for the selected port.
+ */
+void t4_wol_magic_enable(struct adapter *adap, unsigned int port,
+			 const u8 *addr)
+{
+	if (addr) {
+		t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_LO),
+			     (addr[2] << 24) | (addr[3] << 16) |
+			     (addr[4] << 8) | addr[5]);
+		t4_write_reg(adap, PORT_REG(port, XGMAC_PORT_MAGIC_MACID_HI),
+			     (addr[0] << 8) | addr[1]);
+	}
+	t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), MAGICEN,
+			 addr ? MAGICEN : 0);
+}
+
+/**
+ *	t4_wol_pat_enable - enable/disable pattern-based WoL
+ *	@adap: the adapter
+ *	@port: the physical port index
+ *	@map: bitmap of which HW pattern filters to set
+ *	@mask0: byte mask for bytes 0-63 of a packet
+ *	@mask1: byte mask for bytes 64-127 of a packet
+ *	@crc: Ethernet CRC for selected bytes
+ *	@enable: enable/disable switch
+ *
+ *	Sets the pattern filters indicated in @map to mask out the bytes
+ *	specified in @mask0/@mask1 in received packets and compare the CRC of
+ *	the resulting packet against @crc.  If @enable is %true pattern-based
+ *	WoL is enabled, otherwise disabled.
+ */
+int t4_wol_pat_enable(struct adapter *adap, unsigned int port, unsigned int map,
+		      u64 mask0, u64 mask1, unsigned int crc, bool enable)
+{
+	int i;
+
+	if (!enable) {
+		t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2),
+				 PATEN, 0);
+		return 0;
+	}
+	if (map > 0xff)
+		return -EINVAL;
+
+#define EPIO_REG(name) PORT_REG(port, XGMAC_PORT_EPIO_##name)
+
+	t4_write_reg(adap, EPIO_REG(DATA1), mask0 >> 32);
+	t4_write_reg(adap, EPIO_REG(DATA2), mask1);
+	t4_write_reg(adap, EPIO_REG(DATA3), mask1 >> 32);
+
+	for (i = 0; i < NWOL_PAT; i++, map >>= 1) {
+		if (!(map & 1))
+			continue;
+
+		/* write byte masks */
+		t4_write_reg(adap, EPIO_REG(DATA0), mask0);
+		t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i) | EPIOWR);
+		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
+		if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
+			return -ETIMEDOUT;
+
+		/* write CRC */
+		t4_write_reg(adap, EPIO_REG(DATA0), crc);
+		t4_write_reg(adap, EPIO_REG(OP), ADDRESS(i + 32) | EPIOWR);
+		t4_read_reg(adap, EPIO_REG(OP));                /* flush */
+		if (t4_read_reg(adap, EPIO_REG(OP)) & BUSY)
+			return -ETIMEDOUT;
+	}
+#undef EPIO_REG
+
+	t4_set_reg_field(adap, PORT_REG(port, XGMAC_PORT_CFG2), 0, PATEN);
+	return 0;
+}
+
+#define INIT_CMD(var, cmd, rd_wr) do { \
+	(var).op_to_write = htonl(FW_CMD_OP(FW_##cmd##_CMD) | \
+				  FW_CMD_REQUEST | FW_CMD_##rd_wr); \
+	(var).retval_len16 = htonl(FW_LEN16(var)); \
+} while (0)
+
+/**
+ *	t4_mdio_rd - read a PHY register through MDIO
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@phy_addr: the PHY address
+ *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
+ *	@reg: the register to read
+ *	@valp: where to store the value
+ *
+ *	Issues a FW command through the given mailbox to read a PHY register.
+ */
+int t4_mdio_rd(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+	       unsigned int mmd, unsigned int reg, u16 *valp)
+{
+	int ret;
+	struct fw_ldst_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
+		FW_CMD_READ | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
+	c.cycles_to_len16 = htonl(FW_LEN16(c));
+	c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
+				   FW_LDST_CMD_MMD(mmd));
+	c.u.mdio.raddr = htons(reg);
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+	if (ret == 0)
+		*valp = ntohs(c.u.mdio.rval);
+	return ret;
+}
+
+/**
+ *	t4_mdio_wr - write a PHY register through MDIO
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@phy_addr: the PHY address
+ *	@mmd: the PHY MMD to access (0 for clause 22 PHYs)
+ *	@reg: the register to write
+ *	@valp: value to write
+ *
+ *	Issues a FW command through the given mailbox to write a PHY register.
+ */
+int t4_mdio_wr(struct adapter *adap, unsigned int mbox, unsigned int phy_addr,
+	       unsigned int mmd, unsigned int reg, u16 val)
+{
+	struct fw_ldst_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_addrspace = htonl(FW_CMD_OP(FW_LDST_CMD) | FW_CMD_REQUEST |
+		FW_CMD_WRITE | FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_MDIO));
+	c.cycles_to_len16 = htonl(FW_LEN16(c));
+	c.u.mdio.paddr_mmd = htons(FW_LDST_CMD_PADDR(phy_addr) |
+				   FW_LDST_CMD_MMD(mmd));
+	c.u.mdio.raddr = htons(reg);
+	c.u.mdio.rval = htons(val);
+
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_fw_hello - establish communication with FW
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@evt_mbox: mailbox to receive async FW events
+ *	@master: specifies the caller's willingness to be the device master
+ *	@state: returns the current device state
+ *
+ *	Issues a command to establish communication with FW.
+ */
+int t4_fw_hello(struct adapter *adap, unsigned int mbox, unsigned int evt_mbox,
+		enum dev_master master, enum dev_state *state)
+{
+	int ret;
+	struct fw_hello_cmd c;
+
+	INIT_CMD(c, HELLO, WRITE);
+	c.err_to_mbasyncnot = htonl(
+		FW_HELLO_CMD_MASTERDIS(master == MASTER_CANT) |
+		FW_HELLO_CMD_MASTERFORCE(master == MASTER_MUST) |
+		FW_HELLO_CMD_MBMASTER(master == MASTER_MUST ? mbox : 0xff) |
+		FW_HELLO_CMD_MBASYNCNOT(evt_mbox));
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+	if (ret == 0 && state) {
+		u32 v = ntohl(c.err_to_mbasyncnot);
+		if (v & FW_HELLO_CMD_INIT)
+			*state = DEV_STATE_INIT;
+		else if (v & FW_HELLO_CMD_ERR)
+			*state = DEV_STATE_ERR;
+		else
+			*state = DEV_STATE_UNINIT;
+	}
+	return ret;
+}
+
+/**
+ *	t4_fw_bye - end communication with FW
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *
+ *	Issues a command to terminate communication with FW.
+ */
+int t4_fw_bye(struct adapter *adap, unsigned int mbox)
+{
+	struct fw_bye_cmd c;
+
+	INIT_CMD(c, BYE, WRITE);
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_init_cmd - ask FW to initialize the device
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *
+ *	Issues a command to FW to partially initialize the device.  This
+ *	performs initialization that generally doesn't depend on user input.
+ */
+int t4_early_init(struct adapter *adap, unsigned int mbox)
+{
+	struct fw_initialize_cmd c;
+
+	INIT_CMD(c, INITIALIZE, WRITE);
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_fw_reset - issue a reset to FW
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@reset: specifies the type of reset to perform
+ *
+ *	Issues a reset command of the specified type to FW.
+ */
+int t4_fw_reset(struct adapter *adap, unsigned int mbox, int reset)
+{
+	struct fw_reset_cmd c;
+
+	INIT_CMD(c, RESET, WRITE);
+	c.val = htonl(reset);
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_query_params - query FW or device parameters
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF
+ *	@vf: the VF
+ *	@nparams: the number of parameters
+ *	@params: the parameter names
+ *	@val: the parameter values
+ *
+ *	Reads the value of FW or device parameters.  Up to 7 parameters can be
+ *	queried at once.
+ */
+int t4_query_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		    unsigned int vf, unsigned int nparams, const u32 *params,
+		    u32 *val)
+{
+	int i, ret;
+	struct fw_params_cmd c;
+	__be32 *p = &c.param[0].mnem;
+
+	if (nparams > 7)
+		return -EINVAL;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
+			    FW_CMD_READ | FW_PARAMS_CMD_PFN(pf) |
+			    FW_PARAMS_CMD_VFN(vf));
+	c.retval_len16 = htonl(FW_LEN16(c));
+	for (i = 0; i < nparams; i++, p += 2)
+		*p = htonl(*params++);
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+	if (ret == 0)
+		for (i = 0, p = &c.param[0].val; i < nparams; i++, p += 2)
+			*val++ = ntohl(*p);
+	return ret;
+}
+
+/**
+ *	t4_set_params - sets FW or device parameters
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF
+ *	@vf: the VF
+ *	@nparams: the number of parameters
+ *	@params: the parameter names
+ *	@val: the parameter values
+ *
+ *	Sets the value of FW or device parameters.  Up to 7 parameters can be
+ *	specified at once.
+ */
+int t4_set_params(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		  unsigned int vf, unsigned int nparams, const u32 *params,
+		  const u32 *val)
+{
+	struct fw_params_cmd c;
+	__be32 *p = &c.param[0].mnem;
+
+	if (nparams > 7)
+		return -EINVAL;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) | FW_CMD_REQUEST |
+			    FW_CMD_WRITE | FW_PARAMS_CMD_PFN(pf) |
+			    FW_PARAMS_CMD_VFN(vf));
+	c.retval_len16 = htonl(FW_LEN16(c));
+	while (nparams--) {
+		*p++ = htonl(*params++);
+		*p++ = htonl(*val++);
+	}
+
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_cfg_pfvf - configure PF/VF resource limits
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF being configured
+ *	@vf: the VF being configured
+ *	@txq: the max number of egress queues
+ *	@txq_eth_ctrl: the max number of egress Ethernet or control queues
+ *	@rxqi: the max number of interrupt-capable ingress queues
+ *	@rxq: the max number of interruptless ingress queues
+ *	@tc: the PCI traffic class
+ *	@vi: the max number of virtual interfaces
+ *	@cmask: the channel access rights mask for the PF/VF
+ *	@pmask: the port access rights mask for the PF/VF
+ *	@nexact: the maximum number of exact MPS filters
+ *	@rcaps: read capabilities
+ *	@wxcaps: write/execute capabilities
+ *
+ *	Configures resource limits and capabilities for a physical or virtual
+ *	function.
+ */
+int t4_cfg_pfvf(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		unsigned int vf, unsigned int txq, unsigned int txq_eth_ctrl,
+		unsigned int rxqi, unsigned int rxq, unsigned int tc,
+		unsigned int vi, unsigned int cmask, unsigned int pmask,
+		unsigned int nexact, unsigned int rcaps, unsigned int wxcaps)
+{
+	struct fw_pfvf_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) | FW_CMD_REQUEST |
+			    FW_CMD_WRITE | FW_PFVF_CMD_PFN(pf) |
+			    FW_PFVF_CMD_VFN(vf));
+	c.retval_len16 = htonl(FW_LEN16(c));
+	c.niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
+			       FW_PFVF_CMD_NIQ(rxq));
+	c.cmask_to_neq = htonl(FW_PFVF_CMD_CMASK(cmask) |
+			       FW_PFVF_CMD_PMASK(pmask) |
+			       FW_PFVF_CMD_NEQ(txq));
+	c.tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) | FW_PFVF_CMD_NVI(vi) |
+				FW_PFVF_CMD_NEXACTF(nexact));
+	c.r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
+				     FW_PFVF_CMD_WX_CAPS(wxcaps) |
+				     FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_alloc_vi - allocate a virtual interface
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@port: physical port associated with the VI
+ *	@pf: the PF owning the VI
+ *	@vf: the VF owning the VI
+ *	@nmac: number of MAC addresses needed (1 to 5)
+ *	@mac: the MAC addresses of the VI
+ *	@rss_size: size of RSS table slice associated with this VI
+ *
+ *	Allocates a virtual interface for the given physical port.  If @mac is
+ *	not %NULL it contains the MAC addresses of the VI as assigned by FW.
+ *	@mac should be large enough to hold @nmac Ethernet addresses, they are
+ *	stored consecutively so the space needed is @nmac * 6 bytes.
+ *	Returns a negative error number or the non-negative VI id.
+ */
+int t4_alloc_vi(struct adapter *adap, unsigned int mbox, unsigned int port,
+		unsigned int pf, unsigned int vf, unsigned int nmac, u8 *mac,
+		unsigned int *rss_size)
+{
+	int ret;
+	struct fw_vi_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
+			    FW_CMD_WRITE | FW_CMD_EXEC |
+			    FW_VI_CMD_PFN(pf) | FW_VI_CMD_VFN(vf));
+	c.alloc_to_len16 = htonl(FW_VI_CMD_ALLOC | FW_LEN16(c));
+	c.portid_pkd = FW_VI_CMD_PORTID(port);
+	c.nmac = nmac - 1;
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+	if (ret)
+		return ret;
+
+	if (mac) {
+		memcpy(mac, c.mac, sizeof(c.mac));
+		switch (nmac) {
+		case 5:
+			memcpy(mac + 24, c.nmac3, sizeof(c.nmac3));
+		case 4:
+			memcpy(mac + 18, c.nmac2, sizeof(c.nmac2));
+		case 3:
+			memcpy(mac + 12, c.nmac1, sizeof(c.nmac1));
+		case 2:
+			memcpy(mac + 6,  c.nmac0, sizeof(c.nmac0));
+		}
+	}
+	if (rss_size)
+		*rss_size = FW_VI_CMD_RSSSIZE_GET(ntohs(c.rsssize_pkd));
+	return ntohs(c.viid_pkd);
+}
+
+/**
+ *	t4_free_vi - free a virtual interface
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the VI
+ *	@vf: the VF owning the VI
+ *	@viid: virtual interface identifiler
+ *
+ *	Free a previously allocated virtual interface.
+ */
+int t4_free_vi(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	       unsigned int vf, unsigned int viid)
+{
+	struct fw_vi_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = htonl(FW_CMD_OP(FW_VI_CMD) | FW_CMD_REQUEST |
+			    FW_CMD_EXEC | FW_VI_CMD_PFN(pf) |
+			    FW_VI_CMD_VFN(vf));
+	c.alloc_to_len16 = htonl(FW_VI_CMD_FREE | FW_LEN16(c));
+	c.viid_pkd = htons(FW_VI_CMD_VIID(viid));
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+}
+
+/**
+ *	t4_set_rxmode - set Rx properties of a virtual interface
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@mtu: the new MTU or -1
+ *	@promisc: 1 to enable promiscuous mode, 0 to disable it, -1 no change
+ *	@all_multi: 1 to enable all-multi mode, 0 to disable it, -1 no change
+ *	@bcast: 1 to enable broadcast Rx, 0 to disable it, -1 no change
+ *	@sleep_ok: if true we may sleep while awaiting command completion
+ *
+ *	Sets Rx properties of a virtual interface.
+ */
+int t4_set_rxmode(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		  int mtu, int promisc, int all_multi, int bcast, bool sleep_ok)
+{
+	struct fw_vi_rxmode_cmd c;
+
+	/* convert to FW values */
+	if (mtu < 0)
+		mtu = FW_RXMODE_MTU_NO_CHG;
+	if (promisc < 0)
+		promisc = FW_VI_RXMODE_CMD_PROMISCEN_MASK;
+	if (all_multi < 0)
+		all_multi = FW_VI_RXMODE_CMD_ALLMULTIEN_MASK;
+	if (bcast < 0)
+		bcast = FW_VI_RXMODE_CMD_BROADCASTEN_MASK;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_RXMODE_CMD) | FW_CMD_REQUEST |
+			     FW_CMD_WRITE | FW_VI_RXMODE_CMD_VIID(viid));
+	c.retval_len16 = htonl(FW_LEN16(c));
+	c.mtu_to_broadcasten = htonl(FW_VI_RXMODE_CMD_MTU(mtu) |
+				     FW_VI_RXMODE_CMD_PROMISCEN(promisc) |
+				     FW_VI_RXMODE_CMD_ALLMULTIEN(all_multi) |
+				     FW_VI_RXMODE_CMD_BROADCASTEN(bcast));
+	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
+}
+
+/**
+ *	t4_alloc_mac_filt - allocates exact-match filters for MAC addresses
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@free: if true any existing filters for this VI id are first removed
+ *	@naddr: the number of MAC addresses to allocate filters for (up to 7)
+ *	@addr: the MAC address(es)
+ *	@idx: where to store the index of each allocated filter
+ *	@hash: pointer to hash address filter bitmap
+ *	@sleep_ok: call is allowed to sleep
+ *
+ *	Allocates an exact-match filter for each of the supplied addresses and
+ *	sets it to the corresponding address.  If @idx is not %NULL it should
+ *	have at least @naddr entries, each of which will be set to the index of
+ *	the filter allocated for the corresponding MAC address.  If a filter
+ *	could not be allocated for an address its index is set to 0xffff.
+ *	If @hash is not %NULL addresses that fail to allocate an exact filter
+ *	are hashed and update the hash filter bitmap pointed at by @hash.
+ *
+ *	Returns a negative error number or the number of filters allocated.
+ */
+int t4_alloc_mac_filt(struct adapter *adap, unsigned int mbox,
+		      unsigned int viid, bool free, unsigned int naddr,
+		      const u8 **addr, u16 *idx, u64 *hash, bool sleep_ok)
+{
+	int i, ret;
+	struct fw_vi_mac_cmd c;
+	struct fw_vi_mac_exact *p;
+
+	if (naddr > 7)
+		return -EINVAL;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
+			     FW_CMD_WRITE | (free ? FW_CMD_EXEC : 0) |
+			     FW_VI_MAC_CMD_VIID(viid));
+	c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_FREEMACS(free) |
+				    FW_CMD_LEN16((naddr + 2) / 2));
+
+	for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
+		p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
+				      FW_VI_MAC_CMD_IDX(FW_VI_MAC_ADD_MAC));
+		memcpy(p->macaddr, addr[i], sizeof(p->macaddr));
+	}
+
+	ret = t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), &c, sleep_ok);
+	if (ret)
+		return ret;
+
+	for (i = 0, p = c.u.exact; i < naddr; i++, p++) {
+		u16 index = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
+
+		if (idx)
+			idx[i] = index >= NEXACT_MAC ? 0xffff : index;
+		if (index < NEXACT_MAC)
+			ret++;
+		else if (hash)
+			*hash |= (1 << hash_mac_addr(addr[i]));
+	}
+	return ret;
+}
+
+/**
+ *	t4_change_mac - modifies the exact-match filter for a MAC address
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@idx: index of existing filter for old value of MAC address, or -1
+ *	@addr: the new MAC address value
+ *	@persist: whether a new MAC allocation should be persistent
+ *	@add_smt: if true also add the address to the HW SMT
+ *
+ *	Modifies an exact-match filter and sets it to the new MAC address.
+ *	Note that in general it is not possible to modify the value of a given
+ *	filter so the generic way to modify an address filter is to free the one
+ *	being used by the old address value and allocate a new filter for the
+ *	new address value.  @idx can be -1 if the address is a new addition.
+ *
+ *	Returns a negative error number or the index of the filter with the new
+ *	MAC value.
+ */
+int t4_change_mac(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		  int idx, const u8 *addr, bool persist, bool add_smt)
+{
+	int ret, mode;
+	struct fw_vi_mac_cmd c;
+	struct fw_vi_mac_exact *p = c.u.exact;
+
+	if (idx < 0)                             /* new allocation */
+		idx = persist ? FW_VI_MAC_ADD_PERSIST_MAC : FW_VI_MAC_ADD_MAC;
+	mode = add_smt ? FW_VI_MAC_SMT_AND_MPSTCAM : FW_VI_MAC_MPS_TCAM_ENTRY;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
+			     FW_CMD_WRITE | FW_VI_MAC_CMD_VIID(viid));
+	c.freemacs_to_len16 = htonl(FW_CMD_LEN16(1));
+	p->valid_to_idx = htons(FW_VI_MAC_CMD_VALID |
+				FW_VI_MAC_CMD_SMAC_RESULT(mode) |
+				FW_VI_MAC_CMD_IDX(idx));
+	memcpy(p->macaddr, addr, sizeof(p->macaddr));
+
+	ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+	if (ret == 0) {
+		ret = FW_VI_MAC_CMD_IDX_GET(ntohs(p->valid_to_idx));
+		if (ret >= NEXACT_MAC)
+			ret = -ENOMEM;
+	}
+	return ret;
+}
+
+/**
+ *	t4_set_addr_hash - program the MAC inexact-match hash filter
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@ucast: whether the hash filter should also match unicast addresses
+ *	@vec: the value to be written to the hash filter
+ *	@sleep_ok: call is allowed to sleep
+ *
+ *	Sets the 64-bit inexact-match hash filter for a virtual interface.
+ */
+int t4_set_addr_hash(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		     bool ucast, u64 vec, bool sleep_ok)
+{
+	struct fw_vi_mac_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_MAC_CMD) | FW_CMD_REQUEST |
+			     FW_CMD_WRITE | FW_VI_ENABLE_CMD_VIID(viid));
+	c.freemacs_to_len16 = htonl(FW_VI_MAC_CMD_HASHVECEN |
+				    FW_VI_MAC_CMD_HASHUNIEN(ucast) |
+				    FW_CMD_LEN16(1));
+	c.u.hash.hashvec = cpu_to_be64(vec);
+	return t4_wr_mbox_meat(adap, mbox, &c, sizeof(c), NULL, sleep_ok);
+}
+
+/**
+ *	t4_enable_vi - enable/disable a virtual interface
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@rx_en: 1=enable Rx, 0=disable Rx
+ *	@tx_en: 1=enable Tx, 0=disable Tx
+ *
+ *	Enables/disables a virtual interface.
+ */
+int t4_enable_vi(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		 bool rx_en, bool tx_en)
+{
+	struct fw_vi_enable_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
+			     FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
+	c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_IEN(rx_en) |
+			       FW_VI_ENABLE_CMD_EEN(tx_en) | FW_LEN16(c));
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_identify_port - identify a VI's port by blinking its LED
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@viid: the VI id
+ *	@nblinks: how many times to blink LED at 2.5 Hz
+ *
+ *	Identifies a VI's port by blinking its LED.
+ */
+int t4_identify_port(struct adapter *adap, unsigned int mbox, unsigned int viid,
+		     unsigned int nblinks)
+{
+	struct fw_vi_enable_cmd c;
+
+	c.op_to_viid = htonl(FW_CMD_OP(FW_VI_ENABLE_CMD) | FW_CMD_REQUEST |
+			     FW_CMD_EXEC | FW_VI_ENABLE_CMD_VIID(viid));
+	c.ien_to_len16 = htonl(FW_VI_ENABLE_CMD_LED | FW_LEN16(c));
+	c.blinkdur = htons(nblinks);
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_iq_start_stop - enable/disable an ingress queue and its FLs
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@start: %true to enable the queues, %false to disable them
+ *	@pf: the PF owning the queues
+ *	@vf: the VF owning the queues
+ *	@iqid: ingress queue id
+ *	@fl0id: FL0 queue id or 0xffff if no attached FL0
+ *	@fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ *	Starts or stops an ingress queue and its associated FLs, if any.
+ */
+int t4_iq_start_stop(struct adapter *adap, unsigned int mbox, bool start,
+		     unsigned int pf, unsigned int vf, unsigned int iqid,
+		     unsigned int fl0id, unsigned int fl1id)
+{
+	struct fw_iq_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
+			    FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
+			    FW_IQ_CMD_VFN(vf));
+	c.alloc_to_len16 = htonl(FW_IQ_CMD_IQSTART(start) |
+				 FW_IQ_CMD_IQSTOP(!start) | FW_LEN16(c));
+	c.iqid = htons(iqid);
+	c.fl0id = htons(fl0id);
+	c.fl1id = htons(fl1id);
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_iq_free - free an ingress queue and its FLs
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the queues
+ *	@vf: the VF owning the queues
+ *	@iqtype: the ingress queue type
+ *	@iqid: ingress queue id
+ *	@fl0id: FL0 queue id or 0xffff if no attached FL0
+ *	@fl1id: FL1 queue id or 0xffff if no attached FL1
+ *
+ *	Frees an ingress queue and its associated FLs, if any.
+ */
+int t4_iq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+	       unsigned int vf, unsigned int iqtype, unsigned int iqid,
+	       unsigned int fl0id, unsigned int fl1id)
+{
+	struct fw_iq_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) | FW_CMD_REQUEST |
+			    FW_CMD_EXEC | FW_IQ_CMD_PFN(pf) |
+			    FW_IQ_CMD_VFN(vf));
+	c.alloc_to_len16 = htonl(FW_IQ_CMD_FREE | FW_LEN16(c));
+	c.type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iqtype));
+	c.iqid = htons(iqid);
+	c.fl0id = htons(fl0id);
+	c.fl1id = htons(fl1id);
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_eth_eq_free - free an Ethernet egress queue
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the queue
+ *	@vf: the VF owning the queue
+ *	@eqid: egress queue id
+ *
+ *	Frees an Ethernet egress queue.
+ */
+int t4_eth_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		   unsigned int vf, unsigned int eqid)
+{
+	struct fw_eq_eth_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_ETH_CMD) | FW_CMD_REQUEST |
+			    FW_CMD_EXEC | FW_EQ_ETH_CMD_PFN(pf) |
+			    FW_EQ_ETH_CMD_VFN(vf));
+	c.alloc_to_len16 = htonl(FW_EQ_ETH_CMD_FREE | FW_LEN16(c));
+	c.eqid_pkd = htonl(FW_EQ_ETH_CMD_EQID(eqid));
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_ctrl_eq_free - free a control egress queue
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the queue
+ *	@vf: the VF owning the queue
+ *	@eqid: egress queue id
+ *
+ *	Frees a control egress queue.
+ */
+int t4_ctrl_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		    unsigned int vf, unsigned int eqid)
+{
+	struct fw_eq_ctrl_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_CTRL_CMD) | FW_CMD_REQUEST |
+			    FW_CMD_EXEC | FW_EQ_CTRL_CMD_PFN(pf) |
+			    FW_EQ_CTRL_CMD_VFN(vf));
+	c.alloc_to_len16 = htonl(FW_EQ_CTRL_CMD_FREE | FW_LEN16(c));
+	c.cmpliqid_eqid = htonl(FW_EQ_CTRL_CMD_EQID(eqid));
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_ofld_eq_free - free an offload egress queue
+ *	@adap: the adapter
+ *	@mbox: mailbox to use for the FW command
+ *	@pf: the PF owning the queue
+ *	@vf: the VF owning the queue
+ *	@eqid: egress queue id
+ *
+ *	Frees a control egress queue.
+ */
+int t4_ofld_eq_free(struct adapter *adap, unsigned int mbox, unsigned int pf,
+		    unsigned int vf, unsigned int eqid)
+{
+	struct fw_eq_ofld_cmd c;
+
+	memset(&c, 0, sizeof(c));
+	c.op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) | FW_CMD_REQUEST |
+			    FW_CMD_EXEC | FW_EQ_OFLD_CMD_PFN(pf) |
+			    FW_EQ_OFLD_CMD_VFN(vf));
+	c.alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE | FW_LEN16(c));
+	c.eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eqid));
+	return t4_wr_mbox(adap, mbox, &c, sizeof(c), NULL);
+}
+
+/**
+ *	t4_handle_fw_rpl - process a FW reply message
+ *	@adap: the adapter
+ *	@rpl: start of the FW message
+ *
+ *	Processes a FW message, such as link state change messages.
+ */
+int t4_handle_fw_rpl(struct adapter *adap, const __be64 *rpl)
+{
+	u8 opcode = *(const u8 *)rpl;
+
+	if (opcode == FW_PORT_CMD) {    /* link/module state change message */
+		int speed = 0, fc = 0;
+		const struct fw_port_cmd *p = (void *)rpl;
+		int chan = FW_PORT_CMD_PORTID_GET(ntohl(p->op_to_portid));
+		int port = adap->chan_map[chan];
+		struct port_info *pi = adap2pinfo(adap, port);
+		struct link_config *lc = &pi->link_cfg;
+		u32 stat = ntohl(p->u.info.lstatus_to_modtype);
+		int link_ok = (stat & FW_PORT_CMD_LSTATUS) != 0;
+		u32 mod = FW_PORT_CMD_MODTYPE_GET(stat);
+
+		if (stat & FW_PORT_CMD_RXPAUSE)
+			fc |= PAUSE_RX;
+		if (stat & FW_PORT_CMD_TXPAUSE)
+			fc |= PAUSE_TX;
+		if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_100M))
+			speed = SPEED_100;
+		else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_1G))
+			speed = SPEED_1000;
+		else if (stat & FW_PORT_CMD_LSPEED(FW_PORT_CAP_SPEED_10G))
+			speed = SPEED_10000;
+
+		if (link_ok != lc->link_ok || speed != lc->speed ||
+		    fc != lc->fc) {                    /* something changed */
+			lc->link_ok = link_ok;
+			lc->speed = speed;
+			lc->fc = fc;
+			t4_os_link_changed(adap, port, link_ok);
+		}
+		if (mod != pi->mod_type) {
+			pi->mod_type = mod;
+			t4_os_portmod_changed(adap, port);
+		}
+	}
+	return 0;
+}
+
+static void __devinit get_pci_mode(struct adapter *adapter,
+				   struct pci_params *p)
+{
+	u16 val;
+	u32 pcie_cap = pci_pcie_cap(adapter->pdev);
+
+	if (pcie_cap) {
+		pci_read_config_word(adapter->pdev, pcie_cap + PCI_EXP_LNKSTA,
+				     &val);
+		p->speed = val & PCI_EXP_LNKSTA_CLS;
+		p->width = (val & PCI_EXP_LNKSTA_NLW) >> 4;
+	}
+}
+
+/**
+ *	init_link_config - initialize a link's SW state
+ *	@lc: structure holding the link state
+ *	@caps: link capabilities
+ *
+ *	Initializes the SW state maintained for each link, including the link's
+ *	capabilities and default speed/flow-control/autonegotiation settings.
+ */
+static void __devinit init_link_config(struct link_config *lc,
+				       unsigned int caps)
+{
+	lc->supported = caps;
+	lc->requested_speed = 0;
+	lc->speed = 0;
+	lc->requested_fc = lc->fc = PAUSE_RX | PAUSE_TX;
+	if (lc->supported & FW_PORT_CAP_ANEG) {
+		lc->advertising = lc->supported & ADVERT_MASK;
+		lc->autoneg = AUTONEG_ENABLE;
+		lc->requested_fc |= PAUSE_AUTONEG;
+	} else {
+		lc->advertising = 0;
+		lc->autoneg = AUTONEG_DISABLE;
+	}
+}
+
+static int __devinit wait_dev_ready(struct adapter *adap)
+{
+	if (t4_read_reg(adap, PL_WHOAMI) != 0xffffffff)
+		return 0;
+	msleep(500);
+	return t4_read_reg(adap, PL_WHOAMI) != 0xffffffff ? 0 : -EIO;
+}
+
+/**
+ *	t4_prep_adapter - prepare SW and HW for operation
+ *	@adapter: the adapter
+ *	@reset: if true perform a HW reset
+ *
+ *	Initialize adapter SW state for the various HW modules, set initial
+ *	values for some adapter tunables, take PHYs out of reset, and
+ *	initialize the MDIO interface.
+ */
+int __devinit t4_prep_adapter(struct adapter *adapter)
+{
+	int ret;
+
+	ret = wait_dev_ready(adapter);
+	if (ret < 0)
+		return ret;
+
+	get_pci_mode(adapter, &adapter->params.pci);
+	adapter->params.rev = t4_read_reg(adapter, PL_REV);
+
+	ret = get_vpd_params(adapter, &adapter->params.vpd);
+	if (ret < 0)
+		return ret;
+
+	init_cong_ctrl(adapter->params.a_wnd, adapter->params.b_wnd);
+
+	/*
+	 * Default port for debugging in case we can't reach FW.
+	 */
+	adapter->params.nports = 1;
+	adapter->params.portvec = 1;
+	return 0;
+}
+
+int __devinit t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
+{
+	u8 addr[6];
+	int ret, i, j = 0;
+	struct fw_port_cmd c;
+
+	memset(&c, 0, sizeof(c));
+
+	for_each_port(adap, i) {
+		unsigned int rss_size;
+		struct port_info *p = adap2pinfo(adap, i);
+
+		while ((adap->params.portvec & (1 << j)) == 0)
+			j++;
+
+		c.op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
+				       FW_CMD_REQUEST | FW_CMD_READ |
+				       FW_PORT_CMD_PORTID(j));
+		c.action_to_len16 = htonl(
+			FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
+			FW_LEN16(c));
+		ret = t4_wr_mbox(adap, mbox, &c, sizeof(c), &c);
+		if (ret)
+			return ret;
+
+		ret = t4_alloc_vi(adap, mbox, j, pf, vf, 1, addr, &rss_size);
+		if (ret < 0)
+			return ret;
+
+		p->viid = ret;
+		p->tx_chan = j;
+		p->lport = j;
+		p->rss_size = rss_size;
+		memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
+		memcpy(adap->port[i]->perm_addr, addr, ETH_ALEN);
+
+		ret = ntohl(c.u.info.lstatus_to_modtype);
+		p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
+			FW_PORT_CMD_MDIOADDR_GET(ret) : -1;
+		p->port_type = FW_PORT_CMD_PTYPE_GET(ret);
+		p->mod_type = FW_PORT_CMD_MODTYPE_GET(ret);
+
+		init_link_config(&p->link_cfg, ntohs(c.u.info.pcap));
+		j++;
+	}
+	return 0;
+}
diff --git a/drivers/net/cxgb4/t4_hw.h b/drivers/net/cxgb4/t4_hw.h
new file mode 100644
index 0000000..0256232
--- /dev/null
+++ b/drivers/net/cxgb4/t4_hw.h
@@ -0,0 +1,100 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_HW_H
+#define __T4_HW_H
+
+#include <linux/types.h>
+
+enum {
+	NCHAN          = 4,     /* # of HW channels */
+	MAX_MTU        = 9600,  /* max MAC MTU, excluding header + FCS */
+	EEPROMSIZE     = 17408, /* Serial EEPROM physical size */
+	EEPROMVSIZE    = 32768, /* Serial EEPROM virtual address space size */
+	RSS_NENTRIES   = 2048,  /* # of entries in RSS mapping table */
+	TCB_SIZE       = 128,   /* TCB size */
+	NMTUS          = 16,    /* size of MTU table */
+	NCCTRL_WIN     = 32,    /* # of congestion control windows */
+	NEXACT_MAC     = 336,   /* # of exact MAC address filters */
+	L2T_SIZE       = 4096,  /* # of L2T entries */
+	MBOX_LEN       = 64,    /* mailbox size in bytes */
+	TRACE_LEN      = 112,   /* length of trace data and mask */
+	FILTER_OPT_LEN = 36,    /* filter tuple width for optional components */
+	NWOL_PAT       = 8,     /* # of WoL patterns */
+	WOL_PAT_LEN    = 128,   /* length of WoL patterns */
+};
+
+enum {
+	SF_PAGE_SIZE = 256,           /* serial flash page size */
+	SF_SEC_SIZE = 64 * 1024,      /* serial flash sector size */
+	SF_SIZE = SF_SEC_SIZE * 16,   /* serial flash size */
+};
+
+enum { RSP_TYPE_FLBUF, RSP_TYPE_CPL, RSP_TYPE_INTR }; /* response entry types */
+
+enum { MBOX_OWNER_NONE, MBOX_OWNER_FW, MBOX_OWNER_DRV };    /* mailbox owners */
+
+enum {
+	SGE_MAX_WR_LEN = 512,     /* max WR size in bytes */
+	SGE_NTIMERS = 6,          /* # of interrupt holdoff timer values */
+	SGE_NCOUNTERS = 4,        /* # of interrupt packet counter values */
+};
+
+struct sge_qstat {                /* data written to SGE queue status entries */
+	__be32 qid;
+	__be16 cidx;
+	__be16 pidx;
+};
+
+/*
+ * Structure for last 128 bits of response descriptors
+ */
+struct rsp_ctrl {
+	__be32 hdrbuflen_pidx;
+	__be32 pldbuflen_qid;
+	union {
+		u8 type_gen;
+		__be64 last_flit;
+	};
+};
+
+#define RSPD_NEWBUF 0x80000000U
+#define RSPD_LEN    0x7fffffffU
+
+#define RSPD_GEN(x)  ((x) >> 7)
+#define RSPD_TYPE(x) (((x) >> 4) & 3)
+
+#define QINTR_CNT_EN       0x1
+#define QINTR_TIMER_IDX(x) ((x) << 1)
+#endif /* __T4_HW_H */
diff --git a/drivers/net/cxgb4/t4_msg.h b/drivers/net/cxgb4/t4_msg.h
new file mode 100644
index 0000000..fdb1174
--- /dev/null
+++ b/drivers/net/cxgb4/t4_msg.h
@@ -0,0 +1,664 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2003-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_MSG_H
+#define __T4_MSG_H
+
+#include <linux/types.h>
+
+enum {
+	CPL_PASS_OPEN_REQ     = 0x1,
+	CPL_PASS_ACCEPT_RPL   = 0x2,
+	CPL_ACT_OPEN_REQ      = 0x3,
+	CPL_SET_TCB_FIELD     = 0x5,
+	CPL_GET_TCB           = 0x6,
+	CPL_CLOSE_CON_REQ     = 0x8,
+	CPL_CLOSE_LISTSRV_REQ = 0x9,
+	CPL_ABORT_REQ         = 0xA,
+	CPL_ABORT_RPL         = 0xB,
+	CPL_RX_DATA_ACK       = 0xD,
+	CPL_TX_PKT            = 0xE,
+	CPL_L2T_WRITE_REQ     = 0x12,
+	CPL_TID_RELEASE       = 0x1A,
+
+	CPL_CLOSE_LISTSRV_RPL = 0x20,
+	CPL_L2T_WRITE_RPL     = 0x23,
+	CPL_PASS_OPEN_RPL     = 0x24,
+	CPL_ACT_OPEN_RPL      = 0x25,
+	CPL_PEER_CLOSE        = 0x26,
+	CPL_ABORT_REQ_RSS     = 0x2B,
+	CPL_ABORT_RPL_RSS     = 0x2D,
+
+	CPL_CLOSE_CON_RPL     = 0x32,
+	CPL_ISCSI_HDR         = 0x33,
+	CPL_RDMA_CQE          = 0x35,
+	CPL_RDMA_CQE_READ_RSP = 0x36,
+	CPL_RDMA_CQE_ERR      = 0x37,
+	CPL_RX_DATA           = 0x39,
+	CPL_SET_TCB_RPL       = 0x3A,
+	CPL_RX_PKT            = 0x3B,
+	CPL_RX_DDP_COMPLETE   = 0x3F,
+
+	CPL_ACT_ESTABLISH     = 0x40,
+	CPL_PASS_ESTABLISH    = 0x41,
+	CPL_RX_DATA_DDP       = 0x42,
+	CPL_PASS_ACCEPT_REQ   = 0x44,
+
+	CPL_RDMA_READ_REQ     = 0x60,
+
+	CPL_PASS_OPEN_REQ6    = 0x81,
+	CPL_ACT_OPEN_REQ6     = 0x83,
+
+	CPL_RDMA_TERMINATE    = 0xA2,
+	CPL_RDMA_WRITE        = 0xA4,
+	CPL_SGE_EGR_UPDATE    = 0xA5,
+
+	CPL_TRACE_PKT         = 0xB0,
+
+	CPL_FW4_MSG           = 0xC0,
+	CPL_FW4_PLD           = 0xC1,
+	CPL_FW4_ACK           = 0xC3,
+
+	CPL_FW6_MSG           = 0xE0,
+	CPL_FW6_PLD           = 0xE1,
+	CPL_TX_PKT_LSO        = 0xED,
+	CPL_TX_PKT_XT         = 0xEE,
+
+	NUM_CPL_CMDS
+};
+
+enum CPL_error {
+	CPL_ERR_NONE               = 0,
+	CPL_ERR_TCAM_FULL          = 3,
+	CPL_ERR_BAD_LENGTH         = 15,
+	CPL_ERR_BAD_ROUTE          = 18,
+	CPL_ERR_CONN_RESET         = 20,
+	CPL_ERR_CONN_EXIST_SYNRECV = 21,
+	CPL_ERR_CONN_EXIST         = 22,
+	CPL_ERR_ARP_MISS           = 23,
+	CPL_ERR_BAD_SYN            = 24,
+	CPL_ERR_CONN_TIMEDOUT      = 30,
+	CPL_ERR_XMIT_TIMEDOUT      = 31,
+	CPL_ERR_PERSIST_TIMEDOUT   = 32,
+	CPL_ERR_FINWAIT2_TIMEDOUT  = 33,
+	CPL_ERR_KEEPALIVE_TIMEDOUT = 34,
+	CPL_ERR_RTX_NEG_ADVICE     = 35,
+	CPL_ERR_PERSIST_NEG_ADVICE = 36,
+	CPL_ERR_ABORT_FAILED       = 42,
+	CPL_ERR_IWARP_FLM          = 50,
+};
+
+enum {
+	ULP_MODE_NONE          = 0,
+	ULP_MODE_ISCSI         = 2,
+	ULP_MODE_RDMA          = 4,
+	ULP_MODE_FCOE          = 6,
+};
+
+enum {
+	ULP_CRC_HEADER = 1 << 0,
+	ULP_CRC_DATA   = 1 << 1
+};
+
+enum {
+	CPL_ABORT_SEND_RST = 0,
+	CPL_ABORT_NO_RST,
+};
+
+enum {                     /* TX_PKT_XT checksum types */
+	TX_CSUM_TCP    = 0,
+	TX_CSUM_UDP    = 1,
+	TX_CSUM_CRC16  = 4,
+	TX_CSUM_CRC32  = 5,
+	TX_CSUM_CRC32C = 6,
+	TX_CSUM_FCOE   = 7,
+	TX_CSUM_TCPIP  = 8,
+	TX_CSUM_UDPIP  = 9,
+	TX_CSUM_TCPIP6 = 10,
+	TX_CSUM_UDPIP6 = 11,
+	TX_CSUM_IP     = 12,
+};
+
+union opcode_tid {
+	__be32 opcode_tid;
+	u8 opcode;
+};
+
+#define CPL_OPCODE(x) ((x) << 24)
+#define MK_OPCODE_TID(opcode, tid) (CPL_OPCODE(opcode) | (tid))
+#define OPCODE_TID(cmd) ((cmd)->ot.opcode_tid)
+#define GET_TID(cmd) (ntohl(OPCODE_TID(cmd)) & 0xFFFFFF)
+
+/* partitioning of TID fields that also carry a queue id */
+#define GET_TID_TID(x) ((x) & 0x3fff)
+#define GET_TID_QID(x) (((x) >> 14) & 0x3ff)
+#define TID_QID(x)     ((x) << 14)
+
+struct rss_header {
+	u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 channel:2;
+	u8 filter_hit:1;
+	u8 filter_tid:1;
+	u8 hash_type:2;
+	u8 ipv6:1;
+	u8 send2fw:1;
+#else
+	u8 send2fw:1;
+	u8 ipv6:1;
+	u8 hash_type:2;
+	u8 filter_tid:1;
+	u8 filter_hit:1;
+	u8 channel:2;
+#endif
+	__be16 qid;
+	__be32 hash_val;
+};
+
+struct work_request_hdr {
+	__be32 wr_hi;
+	__be32 wr_mid;
+	__be64 wr_lo;
+};
+
+#define WR_HDR struct work_request_hdr wr
+
+struct cpl_pass_open_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be64 opt0;
+#define TX_CHAN(x)    ((x) << 2)
+#define DELACK(x)     ((x) << 5)
+#define ULP_MODE(x)   ((x) << 8)
+#define RCV_BUFSIZ(x) ((x) << 12)
+#define DSCP(x)       ((x) << 22)
+#define SMAC_SEL(x)   ((u64)(x) << 28)
+#define L2T_IDX(x)    ((u64)(x) << 36)
+#define NAGLE(x)      ((u64)(x) << 49)
+#define WND_SCALE(x)  ((u64)(x) << 50)
+#define KEEP_ALIVE(x) ((u64)(x) << 54)
+#define MSS_IDX(x)    ((u64)(x) << 60)
+	__be64 opt1;
+#define SYN_RSS_ENABLE   (1 << 0)
+#define SYN_RSS_QUEUE(x) ((x) << 2)
+#define CONN_POLICY_ASK  (1 << 22)
+};
+
+struct cpl_pass_open_req6 {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be64 local_ip_hi;
+	__be64 local_ip_lo;
+	__be64 peer_ip_hi;
+	__be64 peer_ip_lo;
+	__be64 opt0;
+	__be64 opt1;
+};
+
+struct cpl_pass_open_rpl {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 status;
+};
+
+struct cpl_pass_accept_rpl {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 opt2;
+#define RSS_QUEUE(x)         ((x) << 0)
+#define RSS_QUEUE_VALID      (1 << 10)
+#define RX_COALESCE_VALID(x) ((x) << 11)
+#define RX_COALESCE(x)       ((x) << 12)
+#define TX_QUEUE(x)          ((x) << 23)
+#define RX_CHANNEL(x)        ((x) << 26)
+#define WND_SCALE_EN(x)      ((x) << 28)
+#define TSTAMPS_EN(x)        ((x) << 29)
+#define SACK_EN(x)           ((x) << 30)
+	__be64 opt0;
+};
+
+struct cpl_act_open_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be32 local_ip;
+	__be32 peer_ip;
+	__be64 opt0;
+	__be32 params;
+	__be32 opt2;
+};
+
+struct cpl_act_open_req6 {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 local_port;
+	__be16 peer_port;
+	__be64 local_ip_hi;
+	__be64 local_ip_lo;
+	__be64 peer_ip_hi;
+	__be64 peer_ip_lo;
+	__be64 opt0;
+	__be32 params;
+	__be32 opt2;
+};
+
+struct cpl_act_open_rpl {
+	union opcode_tid ot;
+	__be32 atid_status;
+#define GET_AOPEN_STATUS(x) ((x) & 0xff)
+#define GET_AOPEN_ATID(x)   (((x) >> 8) & 0xffffff)
+};
+
+struct cpl_pass_establish {
+	union opcode_tid ot;
+	__be32 rsvd;
+	__be32 tos_stid;
+#define GET_POPEN_TID(x) ((x) & 0xffffff)
+#define GET_POPEN_TOS(x) (((x) >> 24) & 0xff)
+	__be16 mac_idx;
+	__be16 tcp_opt;
+#define GET_TCPOPT_WSCALE_OK(x)  (((x) >> 5) & 1)
+#define GET_TCPOPT_SACK(x)       (((x) >> 6) & 1)
+#define GET_TCPOPT_TSTAMP(x)     (((x) >> 7) & 1)
+#define GET_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf)
+#define GET_TCPOPT_MSS(x)        (((x) >> 12) & 0xf)
+	__be32 snd_isn;
+	__be32 rcv_isn;
+};
+
+struct cpl_act_establish {
+	union opcode_tid ot;
+	__be32 rsvd;
+	__be32 tos_atid;
+	__be16 mac_idx;
+	__be16 tcp_opt;
+	__be32 snd_isn;
+	__be32 rcv_isn;
+};
+
+struct cpl_get_tcb {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 reply_ctrl;
+#define QUEUENO(x)    ((x) << 0)
+#define REPLY_CHAN(x) ((x) << 14)
+#define NO_REPLY(x)   ((x) << 15)
+	__be16 cookie;
+};
+
+struct cpl_set_tcb_field {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 reply_ctrl;
+	__be16 word_cookie;
+#define TCB_WORD(x)   ((x) << 0)
+#define TCB_COOKIE(x) ((x) << 5)
+	__be64 mask;
+	__be64 val;
+};
+
+struct cpl_set_tcb_rpl {
+	union opcode_tid ot;
+	__be16 rsvd;
+	u8 cookie;
+	u8 status;
+	__be64 oldval;
+};
+
+struct cpl_close_con_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd;
+};
+
+struct cpl_close_con_rpl {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 status;
+	__be32 snd_nxt;
+	__be32 rcv_nxt;
+};
+
+struct cpl_close_listsvr_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 reply_ctrl;
+#define LISTSVR_IPV6 (1 << 14)
+	__be16 rsvd;
+};
+
+struct cpl_close_listsvr_rpl {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 status;
+};
+
+struct cpl_abort_req_rss {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 status;
+};
+
+struct cpl_abort_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd0;
+	u8 rsvd1;
+	u8 cmd;
+	u8 rsvd2[6];
+};
+
+struct cpl_abort_rpl_rss {
+	union opcode_tid ot;
+	u8 rsvd[3];
+	u8 status;
+};
+
+struct cpl_abort_rpl {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd0;
+	u8 rsvd1;
+	u8 cmd;
+	u8 rsvd2[6];
+};
+
+struct cpl_peer_close {
+	union opcode_tid ot;
+	__be32 rcv_nxt;
+};
+
+struct cpl_tid_release {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 rsvd;
+};
+
+struct cpl_tx_pkt_core {
+	__be32 ctrl0;
+#define TXPKT_VF(x)        ((x) << 0)
+#define TXPKT_PF(x)        ((x) << 8)
+#define TXPKT_VF_VLD       (1 << 11)
+#define TXPKT_OVLAN_IDX(x) ((x) << 12)
+#define TXPKT_INTF(x)      ((x) << 16)
+#define TXPKT_INS_OVLAN    (1 << 21)
+#define TXPKT_OPCODE(x)    ((x) << 24)
+	__be16 pack;
+	__be16 len;
+	__be64 ctrl1;
+#define TXPKT_CSUM_END(x)   ((x) << 12)
+#define TXPKT_CSUM_START(x) ((x) << 20)
+#define TXPKT_IPHDR_LEN(x)  ((u64)(x) << 20)
+#define TXPKT_CSUM_LOC(x)   ((u64)(x) << 30)
+#define TXPKT_ETHHDR_LEN(x) ((u64)(x) << 34)
+#define TXPKT_CSUM_TYPE(x)  ((u64)(x) << 40)
+#define TXPKT_VLAN(x)       ((u64)(x) << 44)
+#define TXPKT_VLAN_VLD      (1ULL << 60)
+#define TXPKT_IPCSUM_DIS    (1ULL << 62)
+#define TXPKT_L4CSUM_DIS    (1ULL << 63)
+};
+
+struct cpl_tx_pkt {
+	WR_HDR;
+	struct cpl_tx_pkt_core c;
+};
+
+#define cpl_tx_pkt_xt cpl_tx_pkt
+
+struct cpl_tx_pkt_lso {
+	WR_HDR;
+	__be32 lso_ctrl;
+#define LSO_TCPHDR_LEN(x) ((x) << 0)
+#define LSO_IPHDR_LEN(x)  ((x) << 4)
+#define LSO_ETHHDR_LEN(x) ((x) << 16)
+#define LSO_IPV6(x)       ((x) << 20)
+#define LSO_LAST_SLICE    (1 << 22)
+#define LSO_FIRST_SLICE   (1 << 23)
+#define LSO_OPCODE(x)     ((x) << 24)
+	__be16 ipid_ofst;
+	__be16 mss;
+	__be32 seqno_offset;
+	__be32 len;
+	/* encapsulated CPL (TX_PKT, TX_PKT_XT or TX_DATA) follows here */
+};
+
+struct cpl_iscsi_hdr {
+	union opcode_tid ot;
+	__be16 pdu_len_ddp;
+#define ISCSI_PDU_LEN(x) ((x) & 0x7FFF)
+#define ISCSI_DDP        (1 << 15)
+	__be16 len;
+	__be32 seq;
+	__be16 urg;
+	u8 rsvd;
+	u8 status;
+};
+
+struct cpl_rx_data {
+	union opcode_tid ot;
+	__be16 rsvd;
+	__be16 len;
+	__be32 seq;
+	__be16 urg;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 dack_mode:2;
+	u8 psh:1;
+	u8 heartbeat:1;
+	u8 ddp_off:1;
+	u8 :3;
+#else
+	u8 :3;
+	u8 ddp_off:1;
+	u8 heartbeat:1;
+	u8 psh:1;
+	u8 dack_mode:2;
+#endif
+	u8 status;
+};
+
+struct cpl_rx_data_ack {
+	WR_HDR;
+	union opcode_tid ot;
+	__be32 credit_dack;
+#define RX_CREDITS(x)   ((x) << 0)
+#define RX_FORCE_ACK(x) ((x) << 28)
+};
+
+struct cpl_rx_pkt {
+	u8 opcode;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 iff:4;
+	u8 csum_calc:1;
+	u8 ipmi_pkt:1;
+	u8 vlan_ex:1;
+	u8 ip_frag:1;
+#else
+	u8 ip_frag:1;
+	u8 vlan_ex:1;
+	u8 ipmi_pkt:1;
+	u8 csum_calc:1;
+	u8 iff:4;
+#endif
+	__be16 csum;
+	__be16 vlan;
+	__be16 len;
+	__be32 l2info;
+#define RXF_UDP (1 << 22)
+#define RXF_TCP (1 << 23)
+	__be16 hdr_len;
+	__be16 err_vec;
+};
+
+struct cpl_trace_pkt {
+	u8 opcode;
+	u8 intf;
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 runt:4;
+	u8 filter_hit:4;
+	u8 :6;
+	u8 err:1;
+	u8 trunc:1;
+#else
+	u8 filter_hit:4;
+	u8 runt:4;
+	u8 trunc:1;
+	u8 err:1;
+	u8 :6;
+#endif
+	__be16 rsvd;
+	__be16 len;
+	__be64 tstamp;
+};
+
+struct cpl_l2t_write_req {
+	WR_HDR;
+	union opcode_tid ot;
+	__be16 params;
+#define L2T_W_INFO(x)    ((x) << 2)
+#define L2T_W_PORT(x)    ((x) << 8)
+#define L2T_W_NOREPLY(x) ((x) << 15)
+	__be16 l2t_idx;
+	__be16 vlan;
+	u8 dst_mac[6];
+};
+
+struct cpl_l2t_write_rpl {
+	union opcode_tid ot;
+	u8 status;
+	u8 rsvd[3];
+};
+
+struct cpl_rdma_terminate {
+	union opcode_tid ot;
+	__be16 rsvd;
+	__be16 len;
+};
+
+struct cpl_sge_egr_update {
+	__be32 opcode_qid;
+#define EGR_QID(x) ((x) & 0x1FFFF)
+	__be16 cidx;
+	__be16 pidx;
+};
+
+struct cpl_fw4_pld {
+	u8 opcode;
+	u8 rsvd0[3];
+	u8 type;
+	u8 rsvd1;
+	__be16 len;
+	__be64 data;
+	__be64 rsvd2;
+};
+
+struct cpl_fw6_pld {
+	u8 opcode;
+	u8 rsvd[5];
+	__be16 len;
+	__be64 data[4];
+};
+
+struct cpl_fw4_msg {
+	u8 opcode;
+	u8 type;
+	__be16 rsvd0;
+	__be32 rsvd1;
+	__be64 data[2];
+};
+
+struct cpl_fw4_ack {
+	union opcode_tid ot;
+	u8 credits;
+	u8 rsvd0[2];
+	u8 seq_vld;
+	__be32 snd_nxt;
+	__be32 snd_una;
+	__be64 rsvd1;
+};
+
+struct cpl_fw6_msg {
+	u8 opcode;
+	u8 type;
+	__be16 rsvd0;
+	__be32 rsvd1;
+	__be64 data[4];
+};
+
+enum {
+	ULP_TX_MEM_READ = 2,
+	ULP_TX_MEM_WRITE = 3,
+	ULP_TX_PKT = 4
+};
+
+enum {
+	ULP_TX_SC_NOOP = 0x80,
+	ULP_TX_SC_IMM  = 0x81,
+	ULP_TX_SC_DSGL = 0x82,
+	ULP_TX_SC_ISGL = 0x83
+};
+
+struct ulptx_sge_pair {
+	__be32 len[2];
+	__be64 addr[2];
+};
+
+struct ulptx_sgl {
+	__be32 cmd_nsge;
+#define ULPTX_CMD(x) ((x) << 24)
+#define ULPTX_NSGE(x) ((x) << 0)
+	__be32 len0;
+	__be64 addr0;
+	struct ulptx_sge_pair sge[0];
+};
+
+struct ulp_mem_io {
+	WR_HDR;
+	__be32 cmd;
+#define ULP_MEMIO_ORDER(x) ((x) << 23)
+	__be32 len16;             /* command length */
+	__be32 dlen;              /* data length in 32-byte units */
+#define ULP_MEMIO_DATA_LEN(x) ((x) << 0)
+	__be32 lock_addr;
+#define ULP_MEMIO_ADDR(x) ((x) << 0)
+#define ULP_MEMIO_LOCK(x) ((x) << 31)
+};
+
+#endif  /* __T4_MSG_H */
diff --git a/drivers/net/cxgb4/t4_regs.h b/drivers/net/cxgb4/t4_regs.h
new file mode 100644
index 0000000..5ed5648
--- /dev/null
+++ b/drivers/net/cxgb4/t4_regs.h
@@ -0,0 +1,878 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef __T4_REGS_H
+#define __T4_REGS_H
+
+#define MYPF_BASE 0x1b000
+#define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr))
+
+#define PF0_BASE 0x1e000
+#define PF0_REG(reg_addr) (PF0_BASE + (reg_addr))
+
+#define PF_STRIDE 0x400
+#define PF_BASE(idx) (PF0_BASE + (idx) * PF_STRIDE)
+#define PF_REG(idx, reg) (PF_BASE(idx) + (reg))
+
+#define MYPORT_BASE 0x1c000
+#define MYPORT_REG(reg_addr) (MYPORT_BASE + (reg_addr))
+
+#define PORT0_BASE 0x20000
+#define PORT0_REG(reg_addr) (PORT0_BASE + (reg_addr))
+
+#define PORT_STRIDE 0x2000
+#define PORT_BASE(idx) (PORT0_BASE + (idx) * PORT_STRIDE)
+#define PORT_REG(idx, reg) (PORT_BASE(idx) + (reg))
+
+#define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR)
+#define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx)
+
+#define PCIE_MEM_ACCESS_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define PCIE_MAILBOX_REG(reg_addr, idx) ((reg_addr) + (idx) * 8)
+#define MC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+#define EDC_BIST_STATUS_REG(reg_addr, idx) ((reg_addr) + (idx) * 4)
+
+#define SGE_PF_KDOORBELL 0x0
+#define  QID_MASK    0xffff8000U
+#define  QID_SHIFT   15
+#define  QID(x)      ((x) << QID_SHIFT)
+#define  DBPRIO      0x00004000U
+#define  PIDX_MASK   0x00003fffU
+#define  PIDX_SHIFT  0
+#define  PIDX(x)     ((x) << PIDX_SHIFT)
+
+#define SGE_PF_GTS 0x4
+#define  INGRESSQID_MASK   0xffff0000U
+#define  INGRESSQID_SHIFT  16
+#define  INGRESSQID(x)     ((x) << INGRESSQID_SHIFT)
+#define  TIMERREG_MASK     0x0000e000U
+#define  TIMERREG_SHIFT    13
+#define  TIMERREG(x)       ((x) << TIMERREG_SHIFT)
+#define  SEINTARM_MASK     0x00001000U
+#define  SEINTARM_SHIFT    12
+#define  SEINTARM(x)       ((x) << SEINTARM_SHIFT)
+#define  CIDXINC_MASK      0x00000fffU
+#define  CIDXINC_SHIFT     0
+#define  CIDXINC(x)        ((x) << CIDXINC_SHIFT)
+
+#define SGE_CONTROL 0x1008
+#define  DCASYSTYPE             0x00080000U
+#define  RXPKTCPLMODE           0x00040000U
+#define  EGRSTATUSPAGESIZE      0x00020000U
+#define  PKTSHIFT_MASK          0x00001c00U
+#define  PKTSHIFT_SHIFT         10
+#define  PKTSHIFT(x)            ((x) << PKTSHIFT_SHIFT)
+#define  INGPCIEBOUNDARY_MASK   0x00000380U
+#define  INGPCIEBOUNDARY_SHIFT  7
+#define  INGPCIEBOUNDARY(x)     ((x) << INGPCIEBOUNDARY_SHIFT)
+#define  INGPADBOUNDARY_MASK    0x00000070U
+#define  INGPADBOUNDARY_SHIFT   4
+#define  INGPADBOUNDARY(x)      ((x) << INGPADBOUNDARY_SHIFT)
+#define  EGRPCIEBOUNDARY_MASK   0x0000000eU
+#define  EGRPCIEBOUNDARY_SHIFT  1
+#define  EGRPCIEBOUNDARY(x)     ((x) << EGRPCIEBOUNDARY_SHIFT)
+#define  GLOBALENABLE           0x00000001U
+
+#define SGE_HOST_PAGE_SIZE 0x100c
+#define  HOSTPAGESIZEPF0_MASK   0x0000000fU
+#define  HOSTPAGESIZEPF0_SHIFT  0
+#define  HOSTPAGESIZEPF0(x)     ((x) << HOSTPAGESIZEPF0_SHIFT)
+
+#define SGE_EGRESS_QUEUES_PER_PAGE_PF 0x1010
+#define  QUEUESPERPAGEPF0_MASK   0x0000000fU
+#define  QUEUESPERPAGEPF0_GET(x) ((x) & QUEUESPERPAGEPF0_MASK)
+
+#define SGE_INT_CAUSE1 0x1024
+#define SGE_INT_CAUSE2 0x1030
+#define SGE_INT_CAUSE3 0x103c
+#define  ERR_FLM_DBP               0x80000000U
+#define  ERR_FLM_IDMA1             0x40000000U
+#define  ERR_FLM_IDMA0             0x20000000U
+#define  ERR_FLM_HINT              0x10000000U
+#define  ERR_PCIE_ERROR3           0x08000000U
+#define  ERR_PCIE_ERROR2           0x04000000U
+#define  ERR_PCIE_ERROR1           0x02000000U
+#define  ERR_PCIE_ERROR0           0x01000000U
+#define  ERR_TIMER_ABOVE_MAX_QID   0x00800000U
+#define  ERR_CPL_EXCEED_IQE_SIZE   0x00400000U
+#define  ERR_INVALID_CIDX_INC      0x00200000U
+#define  ERR_ITP_TIME_PAUSED       0x00100000U
+#define  ERR_CPL_OPCODE_0          0x00080000U
+#define  ERR_DROPPED_DB            0x00040000U
+#define  ERR_DATA_CPL_ON_HIGH_QID1 0x00020000U
+#define  ERR_DATA_CPL_ON_HIGH_QID0 0x00010000U
+#define  ERR_BAD_DB_PIDX3          0x00008000U
+#define  ERR_BAD_DB_PIDX2          0x00004000U
+#define  ERR_BAD_DB_PIDX1          0x00002000U
+#define  ERR_BAD_DB_PIDX0          0x00001000U
+#define  ERR_ING_PCIE_CHAN         0x00000800U
+#define  ERR_ING_CTXT_PRIO         0x00000400U
+#define  ERR_EGR_CTXT_PRIO         0x00000200U
+#define  DBFIFO_HP_INT             0x00000100U
+#define  DBFIFO_LP_INT             0x00000080U
+#define  REG_ADDRESS_ERR           0x00000040U
+#define  INGRESS_SIZE_ERR          0x00000020U
+#define  EGRESS_SIZE_ERR           0x00000010U
+#define  ERR_INV_CTXT3             0x00000008U
+#define  ERR_INV_CTXT2             0x00000004U
+#define  ERR_INV_CTXT1             0x00000002U
+#define  ERR_INV_CTXT0             0x00000001U
+
+#define SGE_INT_ENABLE3 0x1040
+#define SGE_FL_BUFFER_SIZE0 0x1044
+#define SGE_FL_BUFFER_SIZE1 0x1048
+#define SGE_INGRESS_RX_THRESHOLD 0x10a0
+#define  THRESHOLD_0_MASK   0x3f000000U
+#define  THRESHOLD_0_SHIFT  24
+#define  THRESHOLD_0(x)     ((x) << THRESHOLD_0_SHIFT)
+#define  THRESHOLD_0_GET(x) (((x) & THRESHOLD_0_MASK) >> THRESHOLD_0_SHIFT)
+#define  THRESHOLD_1_MASK   0x003f0000U
+#define  THRESHOLD_1_SHIFT  16
+#define  THRESHOLD_1(x)     ((x) << THRESHOLD_1_SHIFT)
+#define  THRESHOLD_1_GET(x) (((x) & THRESHOLD_1_MASK) >> THRESHOLD_1_SHIFT)
+#define  THRESHOLD_2_MASK   0x00003f00U
+#define  THRESHOLD_2_SHIFT  8
+#define  THRESHOLD_2(x)     ((x) << THRESHOLD_2_SHIFT)
+#define  THRESHOLD_2_GET(x) (((x) & THRESHOLD_2_MASK) >> THRESHOLD_2_SHIFT)
+#define  THRESHOLD_3_MASK   0x0000003fU
+#define  THRESHOLD_3_SHIFT  0
+#define  THRESHOLD_3(x)     ((x) << THRESHOLD_3_SHIFT)
+#define  THRESHOLD_3_GET(x) (((x) & THRESHOLD_3_MASK) >> THRESHOLD_3_SHIFT)
+
+#define SGE_TIMER_VALUE_0_AND_1 0x10b8
+#define  TIMERVALUE0_MASK   0xffff0000U
+#define  TIMERVALUE0_SHIFT  16
+#define  TIMERVALUE0(x)     ((x) << TIMERVALUE0_SHIFT)
+#define  TIMERVALUE0_GET(x) (((x) & TIMERVALUE0_MASK) >> TIMERVALUE0_SHIFT)
+#define  TIMERVALUE1_MASK   0x0000ffffU
+#define  TIMERVALUE1_SHIFT  0
+#define  TIMERVALUE1(x)     ((x) << TIMERVALUE1_SHIFT)
+#define  TIMERVALUE1_GET(x) (((x) & TIMERVALUE1_MASK) >> TIMERVALUE1_SHIFT)
+
+#define SGE_TIMER_VALUE_2_AND_3 0x10bc
+#define SGE_TIMER_VALUE_4_AND_5 0x10c0
+#define SGE_DEBUG_INDEX 0x10cc
+#define SGE_DEBUG_DATA_HIGH 0x10d0
+#define SGE_DEBUG_DATA_LOW 0x10d4
+#define SGE_INGRESS_QUEUES_PER_PAGE_PF 0x10f4
+
+#define PCIE_PF_CLI 0x44
+#define PCIE_INT_CAUSE 0x3004
+#define  UNXSPLCPLERR  0x20000000U
+#define  PCIEPINT      0x10000000U
+#define  PCIESINT      0x08000000U
+#define  RPLPERR       0x04000000U
+#define  RXWRPERR      0x02000000U
+#define  RXCPLPERR     0x01000000U
+#define  PIOTAGPERR    0x00800000U
+#define  MATAGPERR     0x00400000U
+#define  INTXCLRPERR   0x00200000U
+#define  FIDPERR       0x00100000U
+#define  CFGSNPPERR    0x00080000U
+#define  HRSPPERR      0x00040000U
+#define  HREQPERR      0x00020000U
+#define  HCNTPERR      0x00010000U
+#define  DRSPPERR      0x00008000U
+#define  DREQPERR      0x00004000U
+#define  DCNTPERR      0x00002000U
+#define  CRSPPERR      0x00001000U
+#define  CREQPERR      0x00000800U
+#define  CCNTPERR      0x00000400U
+#define  TARTAGPERR    0x00000200U
+#define  PIOREQPERR    0x00000100U
+#define  PIOCPLPERR    0x00000080U
+#define  MSIXDIPERR    0x00000040U
+#define  MSIXDATAPERR  0x00000020U
+#define  MSIXADDRHPERR 0x00000010U
+#define  MSIXADDRLPERR 0x00000008U
+#define  MSIDATAPERR   0x00000004U
+#define  MSIADDRHPERR  0x00000002U
+#define  MSIADDRLPERR  0x00000001U
+
+#define PCIE_NONFAT_ERR 0x3010
+#define PCIE_MEM_ACCESS_BASE_WIN 0x3068
+#define  PCIEOFST_MASK   0xfffffc00U
+#define  BIR_MASK        0x00000300U
+#define  BIR_SHIFT       8
+#define  BIR(x)          ((x) << BIR_SHIFT)
+#define  WINDOW_MASK     0x000000ffU
+#define  WINDOW_SHIFT    0
+#define  WINDOW(x)       ((x) << WINDOW_SHIFT)
+
+#define PCIE_CORE_UTL_SYSTEM_BUS_AGENT_STATUS 0x5908
+#define  RNPP 0x80000000U
+#define  RPCP 0x20000000U
+#define  RCIP 0x08000000U
+#define  RCCP 0x04000000U
+#define  RFTP 0x00800000U
+#define  PTRP 0x00100000U
+
+#define PCIE_CORE_UTL_PCI_EXPRESS_PORT_STATUS 0x59a4
+#define  TPCP 0x40000000U
+#define  TNPP 0x20000000U
+#define  TFTP 0x10000000U
+#define  TCAP 0x08000000U
+#define  TCIP 0x04000000U
+#define  RCAP 0x02000000U
+#define  PLUP 0x00800000U
+#define  PLDN 0x00400000U
+#define  OTDD 0x00200000U
+#define  GTRP 0x00100000U
+#define  RDPE 0x00040000U
+#define  TDCE 0x00020000U
+#define  TDUE 0x00010000U
+
+#define MC_INT_CAUSE 0x7518
+#define  ECC_UE_INT_CAUSE 0x00000004U
+#define  ECC_CE_INT_CAUSE 0x00000002U
+#define  PERR_INT_CAUSE   0x00000001U
+
+#define MC_ECC_STATUS 0x751c
+#define  ECC_CECNT_MASK   0xffff0000U
+#define  ECC_CECNT_SHIFT  16
+#define  ECC_CECNT(x)     ((x) << ECC_CECNT_SHIFT)
+#define  ECC_CECNT_GET(x) (((x) & ECC_CECNT_MASK) >> ECC_CECNT_SHIFT)
+#define  ECC_UECNT_MASK   0x0000ffffU
+#define  ECC_UECNT_SHIFT  0
+#define  ECC_UECNT(x)     ((x) << ECC_UECNT_SHIFT)
+#define  ECC_UECNT_GET(x) (((x) & ECC_UECNT_MASK) >> ECC_UECNT_SHIFT)
+
+#define MC_BIST_CMD 0x7600
+#define  START_BIST          0x80000000U
+#define  BIST_CMD_GAP_MASK   0x0000ff00U
+#define  BIST_CMD_GAP_SHIFT  8
+#define  BIST_CMD_GAP(x)     ((x) << BIST_CMD_GAP_SHIFT)
+#define  BIST_OPCODE_MASK    0x00000003U
+#define  BIST_OPCODE_SHIFT   0
+#define  BIST_OPCODE(x)      ((x) << BIST_OPCODE_SHIFT)
+
+#define MC_BIST_CMD_ADDR 0x7604
+#define MC_BIST_CMD_LEN 0x7608
+#define MC_BIST_DATA_PATTERN 0x760c
+#define  BIST_DATA_TYPE_MASK   0x0000000fU
+#define  BIST_DATA_TYPE_SHIFT  0
+#define  BIST_DATA_TYPE(x)     ((x) << BIST_DATA_TYPE_SHIFT)
+
+#define MC_BIST_STATUS_RDATA 0x7688
+
+#define MA_EXT_MEMORY_BAR 0x77c8
+#define  EXT_MEM_SIZE_MASK   0x00000fffU
+#define  EXT_MEM_SIZE_SHIFT  0
+#define  EXT_MEM_SIZE_GET(x) (((x) & EXT_MEM_SIZE_MASK) >> EXT_MEM_SIZE_SHIFT)
+
+#define MA_TARGET_MEM_ENABLE 0x77d8
+#define  EXT_MEM_ENABLE 0x00000004U
+#define  EDRAM1_ENABLE  0x00000002U
+#define  EDRAM0_ENABLE  0x00000001U
+
+#define MA_INT_CAUSE 0x77e0
+#define  MEM_PERR_INT_CAUSE 0x00000002U
+#define  MEM_WRAP_INT_CAUSE 0x00000001U
+
+#define MA_INT_WRAP_STATUS 0x77e4
+#define  MEM_WRAP_ADDRESS_MASK   0xfffffff0U
+#define  MEM_WRAP_ADDRESS_SHIFT  4
+#define  MEM_WRAP_ADDRESS_GET(x) (((x) & MEM_WRAP_ADDRESS_MASK) >> MEM_WRAP_ADDRESS_SHIFT)
+#define  MEM_WRAP_CLIENT_NUM_MASK   0x0000000fU
+#define  MEM_WRAP_CLIENT_NUM_SHIFT  0
+#define  MEM_WRAP_CLIENT_NUM_GET(x) (((x) & MEM_WRAP_CLIENT_NUM_MASK) >> MEM_WRAP_CLIENT_NUM_SHIFT)
+
+#define MA_PARITY_ERROR_STATUS 0x77f4
+
+#define EDC_0_BASE_ADDR 0x7900
+
+#define EDC_BIST_CMD 0x7904
+#define EDC_BIST_CMD_ADDR 0x7908
+#define EDC_BIST_CMD_LEN 0x790c
+#define EDC_BIST_DATA_PATTERN 0x7910
+#define EDC_BIST_STATUS_RDATA 0x7928
+#define EDC_INT_CAUSE 0x7978
+#define  ECC_UE_PAR     0x00000020U
+#define  ECC_CE_PAR     0x00000010U
+#define  PERR_PAR_CAUSE 0x00000008U
+
+#define EDC_ECC_STATUS 0x797c
+
+#define EDC_1_BASE_ADDR 0x7980
+
+#define CIM_PF_MAILBOX_DATA 0x240
+#define CIM_PF_MAILBOX_CTRL 0x280
+#define  MBMSGVALID     0x00000008U
+#define  MBINTREQ       0x00000004U
+#define  MBOWNER_MASK   0x00000003U
+#define  MBOWNER_SHIFT  0
+#define  MBOWNER(x)     ((x) << MBOWNER_SHIFT)
+#define  MBOWNER_GET(x) (((x) & MBOWNER_MASK) >> MBOWNER_SHIFT)
+
+#define CIM_PF_HOST_INT_CAUSE 0x28c
+#define  MBMSGRDYINT 0x00080000U
+
+#define CIM_HOST_INT_CAUSE 0x7b2c
+#define  TIEQOUTPARERRINT  0x00100000U
+#define  TIEQINPARERRINT   0x00080000U
+#define  MBHOSTPARERR      0x00040000U
+#define  MBUPPARERR        0x00020000U
+#define  IBQPARERR         0x0001f800U
+#define  IBQTP0PARERR      0x00010000U
+#define  IBQTP1PARERR      0x00008000U
+#define  IBQULPPARERR      0x00004000U
+#define  IBQSGELOPARERR    0x00002000U
+#define  IBQSGEHIPARERR    0x00001000U
+#define  IBQNCSIPARERR     0x00000800U
+#define  OBQPARERR         0x000007e0U
+#define  OBQULP0PARERR     0x00000400U
+#define  OBQULP1PARERR     0x00000200U
+#define  OBQULP2PARERR     0x00000100U
+#define  OBQULP3PARERR     0x00000080U
+#define  OBQSGEPARERR      0x00000040U
+#define  OBQNCSIPARERR     0x00000020U
+#define  PREFDROPINT       0x00000002U
+#define  UPACCNONZERO      0x00000001U
+
+#define CIM_HOST_UPACC_INT_CAUSE 0x7b34
+#define  EEPROMWRINT      0x40000000U
+#define  TIMEOUTMAINT     0x20000000U
+#define  TIMEOUTINT       0x10000000U
+#define  RSPOVRLOOKUPINT  0x08000000U
+#define  REQOVRLOOKUPINT  0x04000000U
+#define  BLKWRPLINT       0x02000000U
+#define  BLKRDPLINT       0x01000000U
+#define  SGLWRPLINT       0x00800000U
+#define  SGLRDPLINT       0x00400000U
+#define  BLKWRCTLINT      0x00200000U
+#define  BLKRDCTLINT      0x00100000U
+#define  SGLWRCTLINT      0x00080000U
+#define  SGLRDCTLINT      0x00040000U
+#define  BLKWREEPROMINT   0x00020000U
+#define  BLKRDEEPROMINT   0x00010000U
+#define  SGLWREEPROMINT   0x00008000U
+#define  SGLRDEEPROMINT   0x00004000U
+#define  BLKWRFLASHINT    0x00002000U
+#define  BLKRDFLASHINT    0x00001000U
+#define  SGLWRFLASHINT    0x00000800U
+#define  SGLRDFLASHINT    0x00000400U
+#define  BLKWRBOOTINT     0x00000200U
+#define  BLKRDBOOTINT     0x00000100U
+#define  SGLWRBOOTINT     0x00000080U
+#define  SGLRDBOOTINT     0x00000040U
+#define  ILLWRBEINT       0x00000020U
+#define  ILLRDBEINT       0x00000010U
+#define  ILLRDINT         0x00000008U
+#define  ILLWRINT         0x00000004U
+#define  ILLTRANSINT      0x00000002U
+#define  RSVDSPACEINT     0x00000001U
+
+#define TP_OUT_CONFIG 0x7d04
+#define  VLANEXTENABLE_MASK  0x0000f000U
+#define  VLANEXTENABLE_SHIFT 12
+
+#define TP_PARA_REG2 0x7d68
+#define  MAXRXDATA_MASK    0xffff0000U
+#define  MAXRXDATA_SHIFT   16
+#define  MAXRXDATA_GET(x) (((x) & MAXRXDATA_MASK) >> MAXRXDATA_SHIFT)
+
+#define TP_TIMER_RESOLUTION 0x7d90
+#define  TIMERRESOLUTION_MASK   0x00ff0000U
+#define  TIMERRESOLUTION_SHIFT  16
+#define  TIMERRESOLUTION_GET(x) (((x) & TIMERRESOLUTION_MASK) >> TIMERRESOLUTION_SHIFT)
+
+#define TP_SHIFT_CNT 0x7dc0
+
+#define TP_CCTRL_TABLE 0x7ddc
+#define TP_MTU_TABLE 0x7de4
+#define  MTUINDEX_MASK   0xff000000U
+#define  MTUINDEX_SHIFT  24
+#define  MTUINDEX(x)     ((x) << MTUINDEX_SHIFT)
+#define  MTUWIDTH_MASK   0x000f0000U
+#define  MTUWIDTH_SHIFT  16
+#define  MTUWIDTH(x)     ((x) << MTUWIDTH_SHIFT)
+#define  MTUWIDTH_GET(x) (((x) & MTUWIDTH_MASK) >> MTUWIDTH_SHIFT)
+#define  MTUVALUE_MASK   0x00003fffU
+#define  MTUVALUE_SHIFT  0
+#define  MTUVALUE(x)     ((x) << MTUVALUE_SHIFT)
+#define  MTUVALUE_GET(x) (((x) & MTUVALUE_MASK) >> MTUVALUE_SHIFT)
+
+#define TP_RSS_LKP_TABLE 0x7dec
+#define  LKPTBLROWVLD        0x80000000U
+#define  LKPTBLQUEUE1_MASK   0x000ffc00U
+#define  LKPTBLQUEUE1_SHIFT  10
+#define  LKPTBLQUEUE1(x)     ((x) << LKPTBLQUEUE1_SHIFT)
+#define  LKPTBLQUEUE1_GET(x) (((x) & LKPTBLQUEUE1_MASK) >> LKPTBLQUEUE1_SHIFT)
+#define  LKPTBLQUEUE0_MASK   0x000003ffU
+#define  LKPTBLQUEUE0_SHIFT  0
+#define  LKPTBLQUEUE0(x)     ((x) << LKPTBLQUEUE0_SHIFT)
+#define  LKPTBLQUEUE0_GET(x) (((x) & LKPTBLQUEUE0_MASK) >> LKPTBLQUEUE0_SHIFT)
+
+#define TP_PIO_ADDR 0x7e40
+#define TP_PIO_DATA 0x7e44
+#define TP_MIB_INDEX 0x7e50
+#define TP_MIB_DATA 0x7e54
+#define TP_INT_CAUSE 0x7e74
+#define  FLMTXFLSTEMPTY 0x40000000U
+
+#define TP_INGRESS_CONFIG 0x141
+#define  VNIC                0x00000800U
+#define  CSUM_HAS_PSEUDO_HDR 0x00000400U
+#define  RM_OVLAN            0x00000200U
+#define  LOOKUPEVERYPKT      0x00000100U
+
+#define TP_MIB_MAC_IN_ERR_0 0x0
+#define TP_MIB_TCP_OUT_RST 0xc
+#define TP_MIB_TCP_IN_SEG_HI 0x10
+#define TP_MIB_TCP_IN_SEG_LO 0x11
+#define TP_MIB_TCP_OUT_SEG_HI 0x12
+#define TP_MIB_TCP_OUT_SEG_LO 0x13
+#define TP_MIB_TCP_RXT_SEG_HI 0x14
+#define TP_MIB_TCP_RXT_SEG_LO 0x15
+#define TP_MIB_TNL_CNG_DROP_0 0x18
+#define TP_MIB_TCP_V6IN_ERR_0 0x28
+#define TP_MIB_TCP_V6OUT_RST 0x2c
+#define TP_MIB_OFD_ARP_DROP 0x36
+#define TP_MIB_TNL_DROP_0 0x44
+#define TP_MIB_OFD_VLN_DROP_0 0x58
+
+#define ULP_TX_INT_CAUSE 0x8dcc
+#define  PBL_BOUND_ERR_CH3 0x80000000U
+#define  PBL_BOUND_ERR_CH2 0x40000000U
+#define  PBL_BOUND_ERR_CH1 0x20000000U
+#define  PBL_BOUND_ERR_CH0 0x10000000U
+
+#define PM_RX_INT_CAUSE 0x8fdc
+#define  ZERO_E_CMD_ERROR     0x00400000U
+#define  PMRX_FRAMING_ERROR   0x003ffff0U
+#define  OCSPI_PAR_ERROR      0x00000008U
+#define  DB_OPTIONS_PAR_ERROR 0x00000004U
+#define  IESPI_PAR_ERROR      0x00000002U
+#define  E_PCMD_PAR_ERROR     0x00000001U
+
+#define PM_TX_INT_CAUSE 0x8ffc
+#define  PCMD_LEN_OVFL0     0x80000000U
+#define  PCMD_LEN_OVFL1     0x40000000U
+#define  PCMD_LEN_OVFL2     0x20000000U
+#define  ZERO_C_CMD_ERROR   0x10000000U
+#define  PMTX_FRAMING_ERROR 0x0ffffff0U
+#define  OESPI_PAR_ERROR    0x00000008U
+#define  ICSPI_PAR_ERROR    0x00000002U
+#define  C_PCMD_PAR_ERROR   0x00000001U
+
+#define MPS_PORT_STAT_TX_PORT_BYTES_L 0x400
+#define MPS_PORT_STAT_TX_PORT_BYTES_H 0x404
+#define MPS_PORT_STAT_TX_PORT_FRAMES_L 0x408
+#define MPS_PORT_STAT_TX_PORT_FRAMES_H 0x40c
+#define MPS_PORT_STAT_TX_PORT_BCAST_L 0x410
+#define MPS_PORT_STAT_TX_PORT_BCAST_H 0x414
+#define MPS_PORT_STAT_TX_PORT_MCAST_L 0x418
+#define MPS_PORT_STAT_TX_PORT_MCAST_H 0x41c
+#define MPS_PORT_STAT_TX_PORT_UCAST_L 0x420
+#define MPS_PORT_STAT_TX_PORT_UCAST_H 0x424
+#define MPS_PORT_STAT_TX_PORT_ERROR_L 0x428
+#define MPS_PORT_STAT_TX_PORT_ERROR_H 0x42c
+#define MPS_PORT_STAT_TX_PORT_64B_L 0x430
+#define MPS_PORT_STAT_TX_PORT_64B_H 0x434
+#define MPS_PORT_STAT_TX_PORT_65B_127B_L 0x438
+#define MPS_PORT_STAT_TX_PORT_65B_127B_H 0x43c
+#define MPS_PORT_STAT_TX_PORT_128B_255B_L 0x440
+#define MPS_PORT_STAT_TX_PORT_128B_255B_H 0x444
+#define MPS_PORT_STAT_TX_PORT_256B_511B_L 0x448
+#define MPS_PORT_STAT_TX_PORT_256B_511B_H 0x44c
+#define MPS_PORT_STAT_TX_PORT_512B_1023B_L 0x450
+#define MPS_PORT_STAT_TX_PORT_512B_1023B_H 0x454
+#define MPS_PORT_STAT_TX_PORT_1024B_1518B_L 0x458
+#define MPS_PORT_STAT_TX_PORT_1024B_1518B_H 0x45c
+#define MPS_PORT_STAT_TX_PORT_1519B_MAX_L 0x460
+#define MPS_PORT_STAT_TX_PORT_1519B_MAX_H 0x464
+#define MPS_PORT_STAT_TX_PORT_DROP_L 0x468
+#define MPS_PORT_STAT_TX_PORT_DROP_H 0x46c
+#define MPS_PORT_STAT_TX_PORT_PAUSE_L 0x470
+#define MPS_PORT_STAT_TX_PORT_PAUSE_H 0x474
+#define MPS_PORT_STAT_TX_PORT_PPP0_L 0x478
+#define MPS_PORT_STAT_TX_PORT_PPP0_H 0x47c
+#define MPS_PORT_STAT_TX_PORT_PPP1_L 0x480
+#define MPS_PORT_STAT_TX_PORT_PPP1_H 0x484
+#define MPS_PORT_STAT_TX_PORT_PPP2_L 0x488
+#define MPS_PORT_STAT_TX_PORT_PPP2_H 0x48c
+#define MPS_PORT_STAT_TX_PORT_PPP3_L 0x490
+#define MPS_PORT_STAT_TX_PORT_PPP3_H 0x494
+#define MPS_PORT_STAT_TX_PORT_PPP4_L 0x498
+#define MPS_PORT_STAT_TX_PORT_PPP4_H 0x49c
+#define MPS_PORT_STAT_TX_PORT_PPP5_L 0x4a0
+#define MPS_PORT_STAT_TX_PORT_PPP5_H 0x4a4
+#define MPS_PORT_STAT_TX_PORT_PPP6_L 0x4a8
+#define MPS_PORT_STAT_TX_PORT_PPP6_H 0x4ac
+#define MPS_PORT_STAT_TX_PORT_PPP7_L 0x4b0
+#define MPS_PORT_STAT_TX_PORT_PPP7_H 0x4b4
+#define MPS_PORT_STAT_LB_PORT_BYTES_L 0x4c0
+#define MPS_PORT_STAT_LB_PORT_BYTES_H 0x4c4
+#define MPS_PORT_STAT_LB_PORT_FRAMES_L 0x4c8
+#define MPS_PORT_STAT_LB_PORT_FRAMES_H 0x4cc
+#define MPS_PORT_STAT_LB_PORT_BCAST_L 0x4d0
+#define MPS_PORT_STAT_LB_PORT_BCAST_H 0x4d4
+#define MPS_PORT_STAT_LB_PORT_MCAST_L 0x4d8
+#define MPS_PORT_STAT_LB_PORT_MCAST_H 0x4dc
+#define MPS_PORT_STAT_LB_PORT_UCAST_L 0x4e0
+#define MPS_PORT_STAT_LB_PORT_UCAST_H 0x4e4
+#define MPS_PORT_STAT_LB_PORT_ERROR_L 0x4e8
+#define MPS_PORT_STAT_LB_PORT_ERROR_H 0x4ec
+#define MPS_PORT_STAT_LB_PORT_64B_L 0x4f0
+#define MPS_PORT_STAT_LB_PORT_64B_H 0x4f4
+#define MPS_PORT_STAT_LB_PORT_65B_127B_L 0x4f8
+#define MPS_PORT_STAT_LB_PORT_65B_127B_H 0x4fc
+#define MPS_PORT_STAT_LB_PORT_128B_255B_L 0x500
+#define MPS_PORT_STAT_LB_PORT_128B_255B_H 0x504
+#define MPS_PORT_STAT_LB_PORT_256B_511B_L 0x508
+#define MPS_PORT_STAT_LB_PORT_256B_511B_H 0x50c
+#define MPS_PORT_STAT_LB_PORT_512B_1023B_L 0x510
+#define MPS_PORT_STAT_LB_PORT_512B_1023B_H 0x514
+#define MPS_PORT_STAT_LB_PORT_1024B_1518B_L 0x518
+#define MPS_PORT_STAT_LB_PORT_1024B_1518B_H 0x51c
+#define MPS_PORT_STAT_LB_PORT_1519B_MAX_L 0x520
+#define MPS_PORT_STAT_LB_PORT_1519B_MAX_H 0x524
+#define MPS_PORT_STAT_LB_PORT_DROP_FRAMES 0x528
+#define MPS_PORT_STAT_RX_PORT_BYTES_L 0x540
+#define MPS_PORT_STAT_RX_PORT_BYTES_H 0x544
+#define MPS_PORT_STAT_RX_PORT_FRAMES_L 0x548
+#define MPS_PORT_STAT_RX_PORT_FRAMES_H 0x54c
+#define MPS_PORT_STAT_RX_PORT_BCAST_L 0x550
+#define MPS_PORT_STAT_RX_PORT_BCAST_H 0x554
+#define MPS_PORT_STAT_RX_PORT_MCAST_L 0x558
+#define MPS_PORT_STAT_RX_PORT_MCAST_H 0x55c
+#define MPS_PORT_STAT_RX_PORT_UCAST_L 0x560
+#define MPS_PORT_STAT_RX_PORT_UCAST_H 0x564
+#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_L 0x568
+#define MPS_PORT_STAT_RX_PORT_MTU_ERROR_H 0x56c
+#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_L 0x570
+#define MPS_PORT_STAT_RX_PORT_MTU_CRC_ERROR_H 0x574
+#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_L 0x578
+#define MPS_PORT_STAT_RX_PORT_CRC_ERROR_H 0x57c
+#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_L 0x580
+#define MPS_PORT_STAT_RX_PORT_LEN_ERROR_H 0x584
+#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_L 0x588
+#define MPS_PORT_STAT_RX_PORT_SYM_ERROR_H 0x58c
+#define MPS_PORT_STAT_RX_PORT_64B_L 0x590
+#define MPS_PORT_STAT_RX_PORT_64B_H 0x594
+#define MPS_PORT_STAT_RX_PORT_65B_127B_L 0x598
+#define MPS_PORT_STAT_RX_PORT_65B_127B_H 0x59c
+#define MPS_PORT_STAT_RX_PORT_128B_255B_L 0x5a0
+#define MPS_PORT_STAT_RX_PORT_128B_255B_H 0x5a4
+#define MPS_PORT_STAT_RX_PORT_256B_511B_L 0x5a8
+#define MPS_PORT_STAT_RX_PORT_256B_511B_H 0x5ac
+#define MPS_PORT_STAT_RX_PORT_512B_1023B_L 0x5b0
+#define MPS_PORT_STAT_RX_PORT_512B_1023B_H 0x5b4
+#define MPS_PORT_STAT_RX_PORT_1024B_1518B_L 0x5b8
+#define MPS_PORT_STAT_RX_PORT_1024B_1518B_H 0x5bc
+#define MPS_PORT_STAT_RX_PORT_1519B_MAX_L 0x5c0
+#define MPS_PORT_STAT_RX_PORT_1519B_MAX_H 0x5c4
+#define MPS_PORT_STAT_RX_PORT_PAUSE_L 0x5c8
+#define MPS_PORT_STAT_RX_PORT_PAUSE_H 0x5cc
+#define MPS_PORT_STAT_RX_PORT_PPP0_L 0x5d0
+#define MPS_PORT_STAT_RX_PORT_PPP0_H 0x5d4
+#define MPS_PORT_STAT_RX_PORT_PPP1_L 0x5d8
+#define MPS_PORT_STAT_RX_PORT_PPP1_H 0x5dc
+#define MPS_PORT_STAT_RX_PORT_PPP2_L 0x5e0
+#define MPS_PORT_STAT_RX_PORT_PPP2_H 0x5e4
+#define MPS_PORT_STAT_RX_PORT_PPP3_L 0x5e8
+#define MPS_PORT_STAT_RX_PORT_PPP3_H 0x5ec
+#define MPS_PORT_STAT_RX_PORT_PPP4_L 0x5f0
+#define MPS_PORT_STAT_RX_PORT_PPP4_H 0x5f4
+#define MPS_PORT_STAT_RX_PORT_PPP5_L 0x5f8
+#define MPS_PORT_STAT_RX_PORT_PPP5_H 0x5fc
+#define MPS_PORT_STAT_RX_PORT_PPP6_L 0x600
+#define MPS_PORT_STAT_RX_PORT_PPP6_H 0x604
+#define MPS_PORT_STAT_RX_PORT_PPP7_L 0x608
+#define MPS_PORT_STAT_RX_PORT_PPP7_H 0x60c
+#define MPS_PORT_STAT_RX_PORT_LESS_64B_L 0x610
+#define MPS_PORT_STAT_RX_PORT_LESS_64B_H 0x614
+#define MPS_CMN_CTL 0x9000
+#define  NUMPORTS_MASK   0x00000003U
+#define  NUMPORTS_SHIFT  0
+#define  NUMPORTS_GET(x) (((x) & NUMPORTS_MASK) >> NUMPORTS_SHIFT)
+
+#define MPS_INT_CAUSE 0x9008
+#define  STATINT 0x00000020U
+#define  TXINT   0x00000010U
+#define  RXINT   0x00000008U
+#define  TRCINT  0x00000004U
+#define  CLSINT  0x00000002U
+#define  PLINT   0x00000001U
+
+#define MPS_TX_INT_CAUSE 0x9408
+#define  PORTERR    0x00010000U
+#define  FRMERR     0x00008000U
+#define  SECNTERR   0x00004000U
+#define  BUBBLE     0x00002000U
+#define  TXDESCFIFO 0x00001e00U
+#define  TXDATAFIFO 0x000001e0U
+#define  NCSIFIFO   0x00000010U
+#define  TPFIFO     0x0000000fU
+
+#define MPS_STAT_PERR_INT_CAUSE_SRAM 0x9614
+#define MPS_STAT_PERR_INT_CAUSE_TX_FIFO 0x9620
+#define MPS_STAT_PERR_INT_CAUSE_RX_FIFO 0x962c
+
+#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_L 0x9640
+#define MPS_STAT_RX_BG_0_MAC_DROP_FRAME_H 0x9644
+#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_L 0x9648
+#define MPS_STAT_RX_BG_1_MAC_DROP_FRAME_H 0x964c
+#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_L 0x9650
+#define MPS_STAT_RX_BG_2_MAC_DROP_FRAME_H 0x9654
+#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_L 0x9658
+#define MPS_STAT_RX_BG_3_MAC_DROP_FRAME_H 0x965c
+#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_L 0x9660
+#define MPS_STAT_RX_BG_0_LB_DROP_FRAME_H 0x9664
+#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_L 0x9668
+#define MPS_STAT_RX_BG_1_LB_DROP_FRAME_H 0x966c
+#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_L 0x9670
+#define MPS_STAT_RX_BG_2_LB_DROP_FRAME_H 0x9674
+#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_L 0x9678
+#define MPS_STAT_RX_BG_3_LB_DROP_FRAME_H 0x967c
+#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_L 0x9680
+#define MPS_STAT_RX_BG_0_MAC_TRUNC_FRAME_H 0x9684
+#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_L 0x9688
+#define MPS_STAT_RX_BG_1_MAC_TRUNC_FRAME_H 0x968c
+#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_L 0x9690
+#define MPS_STAT_RX_BG_2_MAC_TRUNC_FRAME_H 0x9694
+#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_L 0x9698
+#define MPS_STAT_RX_BG_3_MAC_TRUNC_FRAME_H 0x969c
+#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_L 0x96a0
+#define MPS_STAT_RX_BG_0_LB_TRUNC_FRAME_H 0x96a4
+#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_L 0x96a8
+#define MPS_STAT_RX_BG_1_LB_TRUNC_FRAME_H 0x96ac
+#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_L 0x96b0
+#define MPS_STAT_RX_BG_2_LB_TRUNC_FRAME_H 0x96b4
+#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_L 0x96b8
+#define MPS_STAT_RX_BG_3_LB_TRUNC_FRAME_H 0x96bc
+#define MPS_TRC_CFG 0x9800
+#define  TRCFIFOEMPTY       0x00000010U
+#define  TRCIGNOREDROPINPUT 0x00000008U
+#define  TRCKEEPDUPLICATES  0x00000004U
+#define  TRCEN              0x00000002U
+#define  TRCMULTIFILTER     0x00000001U
+
+#define MPS_TRC_RSS_CONTROL 0x9808
+#define  RSSCONTROL_MASK    0x00ff0000U
+#define  RSSCONTROL_SHIFT   16
+#define  RSSCONTROL(x)      ((x) << RSSCONTROL_SHIFT)
+#define  QUEUENUMBER_MASK   0x0000ffffU
+#define  QUEUENUMBER_SHIFT  0
+#define  QUEUENUMBER(x)     ((x) << QUEUENUMBER_SHIFT)
+
+#define MPS_TRC_FILTER_MATCH_CTL_A 0x9810
+#define  TFINVERTMATCH   0x01000000U
+#define  TFPKTTOOLARGE   0x00800000U
+#define  TFEN            0x00400000U
+#define  TFPORT_MASK     0x003c0000U
+#define  TFPORT_SHIFT    18
+#define  TFPORT(x)       ((x) << TFPORT_SHIFT)
+#define  TFPORT_GET(x)   (((x) & TFPORT_MASK) >> TFPORT_SHIFT)
+#define  TFDROP          0x00020000U
+#define  TFSOPEOPERR     0x00010000U
+#define  TFLENGTH_MASK   0x00001f00U
+#define  TFLENGTH_SHIFT  8
+#define  TFLENGTH(x)     ((x) << TFLENGTH_SHIFT)
+#define  TFLENGTH_GET(x) (((x) & TFLENGTH_MASK) >> TFLENGTH_SHIFT)
+#define  TFOFFSET_MASK   0x0000001fU
+#define  TFOFFSET_SHIFT  0
+#define  TFOFFSET(x)     ((x) << TFOFFSET_SHIFT)
+#define  TFOFFSET_GET(x) (((x) & TFOFFSET_MASK) >> TFOFFSET_SHIFT)
+
+#define MPS_TRC_FILTER_MATCH_CTL_B 0x9820
+#define  TFMINPKTSIZE_MASK   0x01ff0000U
+#define  TFMINPKTSIZE_SHIFT  16
+#define  TFMINPKTSIZE(x)     ((x) << TFMINPKTSIZE_SHIFT)
+#define  TFMINPKTSIZE_GET(x) (((x) & TFMINPKTSIZE_MASK) >> TFMINPKTSIZE_SHIFT)
+#define  TFCAPTUREMAX_MASK   0x00003fffU
+#define  TFCAPTUREMAX_SHIFT  0
+#define  TFCAPTUREMAX(x)     ((x) << TFCAPTUREMAX_SHIFT)
+#define  TFCAPTUREMAX_GET(x) (((x) & TFCAPTUREMAX_MASK) >> TFCAPTUREMAX_SHIFT)
+
+#define MPS_TRC_INT_CAUSE 0x985c
+#define  MISCPERR 0x00000100U
+#define  PKTFIFO  0x000000f0U
+#define  FILTMEM  0x0000000fU
+
+#define MPS_TRC_FILTER0_MATCH 0x9c00
+#define MPS_TRC_FILTER0_DONT_CARE 0x9c80
+#define MPS_TRC_FILTER1_MATCH 0x9d00
+#define MPS_CLS_INT_CAUSE 0xd028
+#define  PLERRENB  0x00000008U
+#define  HASHSRAM  0x00000004U
+#define  MATCHTCAM 0x00000002U
+#define  MATCHSRAM 0x00000001U
+
+#define MPS_RX_PERR_INT_CAUSE 0x11074
+
+#define CPL_INTR_CAUSE 0x19054
+#define  CIM_OP_MAP_PERR   0x00000020U
+#define  CIM_OVFL_ERROR    0x00000010U
+#define  TP_FRAMING_ERROR  0x00000008U
+#define  SGE_FRAMING_ERROR 0x00000004U
+#define  CIM_FRAMING_ERROR 0x00000002U
+#define  ZERO_SWITCH_ERROR 0x00000001U
+
+#define SMB_INT_CAUSE 0x19090
+#define  MSTTXFIFOPARINT 0x00200000U
+#define  MSTRXFIFOPARINT 0x00100000U
+#define  SLVFIFOPARINT   0x00080000U
+
+#define ULP_RX_INT_CAUSE 0x19158
+#define ULP_RX_ISCSI_TAGMASK 0x19164
+#define ULP_RX_ISCSI_PSZ 0x19168
+#define  HPZ3_MASK   0x0f000000U
+#define  HPZ3_SHIFT  24
+#define  HPZ3(x)     ((x) << HPZ3_SHIFT)
+#define  HPZ2_MASK   0x000f0000U
+#define  HPZ2_SHIFT  16
+#define  HPZ2(x)     ((x) << HPZ2_SHIFT)
+#define  HPZ1_MASK   0x00000f00U
+#define  HPZ1_SHIFT  8
+#define  HPZ1(x)     ((x) << HPZ1_SHIFT)
+#define  HPZ0_MASK   0x0000000fU
+#define  HPZ0_SHIFT  0
+#define  HPZ0(x)     ((x) << HPZ0_SHIFT)
+
+#define ULP_RX_TDDP_PSZ 0x19178
+
+#define SF_DATA 0x193f8
+#define SF_OP 0x193fc
+#define  BUSY          0x80000000U
+#define  SF_LOCK       0x00000010U
+#define  SF_CONT       0x00000008U
+#define  BYTECNT_MASK  0x00000006U
+#define  BYTECNT_SHIFT 1
+#define  BYTECNT(x)    ((x) << BYTECNT_SHIFT)
+#define  OP_WR         0x00000001U
+
+#define PL_PF_INT_CAUSE 0x3c0
+#define  PFSW  0x00000008U
+#define  PFSGE 0x00000004U
+#define  PFCIM 0x00000002U
+#define  PFMPS 0x00000001U
+
+#define PL_PF_INT_ENABLE 0x3c4
+#define PL_PF_CTL 0x3c8
+#define  SWINT 0x00000001U
+
+#define PL_WHOAMI 0x19400
+#define  SOURCEPF_MASK   0x00000700U
+#define  SOURCEPF_SHIFT  8
+#define  SOURCEPF(x)     ((x) << SOURCEPF_SHIFT)
+#define  SOURCEPF_GET(x) (((x) & SOURCEPF_MASK) >> SOURCEPF_SHIFT)
+#define  ISVF            0x00000080U
+#define  VFID_MASK       0x0000007fU
+#define  VFID_SHIFT      0
+#define  VFID(x)         ((x) << VFID_SHIFT)
+#define  VFID_GET(x)     (((x) & VFID_MASK) >> VFID_SHIFT)
+
+#define PL_INT_CAUSE 0x1940c
+#define  ULP_TX     0x08000000U
+#define  SGE        0x04000000U
+#define  HMA        0x02000000U
+#define  CPL_SWITCH 0x01000000U
+#define  ULP_RX     0x00800000U
+#define  PM_RX      0x00400000U
+#define  PM_TX      0x00200000U
+#define  MA         0x00100000U
+#define  TP         0x00080000U
+#define  LE         0x00040000U
+#define  EDC1       0x00020000U
+#define  EDC0       0x00010000U
+#define  MC         0x00008000U
+#define  PCIE       0x00004000U
+#define  PMU        0x00002000U
+#define  XGMAC_KR1  0x00001000U
+#define  XGMAC_KR0  0x00000800U
+#define  XGMAC1     0x00000400U
+#define  XGMAC0     0x00000200U
+#define  SMB        0x00000100U
+#define  SF         0x00000080U
+#define  PL         0x00000040U
+#define  NCSI       0x00000020U
+#define  MPS        0x00000010U
+#define  MI         0x00000008U
+#define  DBG        0x00000004U
+#define  I2CM       0x00000002U
+#define  CIM        0x00000001U
+
+#define PL_INT_MAP0 0x19414
+#define PL_RST 0x19428
+#define  PIORST     0x00000002U
+#define  PIORSTMODE 0x00000001U
+
+#define PL_PL_INT_CAUSE 0x19430
+#define  FATALPERR 0x00000010U
+#define  PERRVFID  0x00000001U
+
+#define PL_REV 0x1943c
+
+#define LE_DB_CONFIG 0x19c04
+#define  HASHEN 0x00100000U
+
+#define LE_DB_SERVER_INDEX 0x19c18
+#define LE_DB_ACT_CNT_IPV4 0x19c20
+#define LE_DB_ACT_CNT_IPV6 0x19c24
+
+#define LE_DB_INT_CAUSE 0x19c3c
+#define  REQQPARERR 0x00010000U
+#define  UNKNOWNCMD 0x00008000U
+#define  PARITYERR  0x00000040U
+#define  LIPMISS    0x00000020U
+#define  LIP0       0x00000010U
+
+#define LE_DB_TID_HASHBASE 0x19df8
+
+#define NCSI_INT_CAUSE 0x1a0d8
+#define  CIM_DM_PRTY_ERR 0x00000100U
+#define  MPS_DM_PRTY_ERR 0x00000080U
+#define  TXFIFO_PRTY_ERR 0x00000002U
+#define  RXFIFO_PRTY_ERR 0x00000001U
+
+#define XGMAC_PORT_CFG2 0x1018
+#define  PATEN   0x00040000U
+#define  MAGICEN 0x00020000U
+
+#define XGMAC_PORT_MAGIC_MACID_LO 0x1024
+#define XGMAC_PORT_MAGIC_MACID_HI 0x1028
+
+#define XGMAC_PORT_EPIO_DATA0 0x10c0
+#define XGMAC_PORT_EPIO_DATA1 0x10c4
+#define XGMAC_PORT_EPIO_DATA2 0x10c8
+#define XGMAC_PORT_EPIO_DATA3 0x10cc
+#define XGMAC_PORT_EPIO_OP 0x10d0
+#define  EPIOWR         0x00000100U
+#define  ADDRESS_MASK   0x000000ffU
+#define  ADDRESS_SHIFT  0
+#define  ADDRESS(x)     ((x) << ADDRESS_SHIFT)
+
+#define XGMAC_PORT_INT_CAUSE 0x10dc
+#endif /* __T4_REGS_H */
diff --git a/drivers/net/cxgb4/t4fw_api.h b/drivers/net/cxgb4/t4fw_api.h
new file mode 100644
index 0000000..3393d05
--- /dev/null
+++ b/drivers/net/cxgb4/t4fw_api.h
@@ -0,0 +1,1580 @@
+/*
+ * This file is part of the Chelsio T4 Ethernet driver for Linux.
+ *
+ * Copyright (c) 2009-2010 Chelsio Communications, Inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses.  You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ *     Redistribution and use in source and binary forms, with or
+ *     without modification, are permitted provided that the following
+ *     conditions are met:
+ *
+ *      - Redistributions of source code must retain the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer.
+ *
+ *      - Redistributions in binary form must reproduce the above
+ *        copyright notice, this list of conditions and the following
+ *        disclaimer in the documentation and/or other materials
+ *        provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _T4FW_INTERFACE_H_
+#define _T4FW_INTERFACE_H_
+
+#define FW_T4VF_SGE_BASE_ADDR      0x0000
+#define FW_T4VF_MPS_BASE_ADDR      0x0100
+#define FW_T4VF_PL_BASE_ADDR       0x0200
+#define FW_T4VF_MBDATA_BASE_ADDR   0x0240
+#define FW_T4VF_CIM_BASE_ADDR      0x0300
+
+enum fw_wr_opcodes {
+	FW_FILTER_WR                   = 0x02,
+	FW_ULPTX_WR                    = 0x04,
+	FW_TP_WR                       = 0x05,
+	FW_ETH_TX_PKT_WR               = 0x08,
+	FW_FLOWC_WR                    = 0x0a,
+	FW_OFLD_TX_DATA_WR             = 0x0b,
+	FW_CMD_WR                      = 0x10,
+	FW_ETH_TX_PKT_VM_WR            = 0x11,
+	FW_RI_RES_WR                   = 0x0c,
+	FW_RI_INIT_WR                  = 0x0d,
+	FW_RI_RDMA_WRITE_WR            = 0x14,
+	FW_RI_SEND_WR                  = 0x15,
+	FW_RI_RDMA_READ_WR             = 0x16,
+	FW_RI_RECV_WR                  = 0x17,
+	FW_RI_BIND_MW_WR               = 0x18,
+	FW_RI_FR_NSMR_WR               = 0x19,
+	FW_RI_INV_LSTAG_WR             = 0x1a,
+	FW_LASTC2E_WR                  = 0x40
+};
+
+struct fw_wr_hdr {
+	__be32 hi;
+	__be32 lo;
+};
+
+#define FW_WR_OP(x)	 ((x) << 24)
+#define FW_WR_ATOMIC(x)	 ((x) << 23)
+#define FW_WR_FLUSH(x)   ((x) << 22)
+#define FW_WR_COMPL(x)   ((x) << 21)
+#define FW_WR_IMMDLEN(x) ((x) << 0)
+
+#define FW_WR_EQUIQ	(1U << 31)
+#define FW_WR_EQUEQ	(1U << 30)
+#define FW_WR_FLOWID(x)	((x) << 8)
+#define FW_WR_LEN16(x)	((x) << 0)
+
+struct fw_ulptx_wr {
+	__be32 op_to_compl;
+	__be32 flowid_len16;
+	u64 cookie;
+};
+
+struct fw_tp_wr {
+	__be32 op_to_immdlen;
+	__be32 flowid_len16;
+	u64 cookie;
+};
+
+struct fw_eth_tx_pkt_wr {
+	__be32 op_immdlen;
+	__be32 equiq_to_len16;
+	__be64 r3;
+};
+
+enum fw_flowc_mnem {
+	FW_FLOWC_MNEM_PFNVFN,		/* PFN [15:8] VFN [7:0] */
+	FW_FLOWC_MNEM_CH,
+	FW_FLOWC_MNEM_PORT,
+	FW_FLOWC_MNEM_IQID,
+	FW_FLOWC_MNEM_SNDNXT,
+	FW_FLOWC_MNEM_RCVNXT,
+	FW_FLOWC_MNEM_SNDBUF,
+	FW_FLOWC_MNEM_MSS,
+};
+
+struct fw_flowc_mnemval {
+	u8 mnemonic;
+	u8 r4[3];
+	__be32 val;
+};
+
+struct fw_flowc_wr {
+	__be32 op_to_nparams;
+#define FW_FLOWC_WR_NPARAMS(x)	((x) << 0)
+	__be32 flowid_len16;
+	struct fw_flowc_mnemval mnemval[0];
+};
+
+struct fw_ofld_tx_data_wr {
+	__be32 op_to_immdlen;
+	__be32 flowid_len16;
+	__be32 plen;
+	__be32 tunnel_to_proxy;
+#define FW_OFLD_TX_DATA_WR_TUNNEL(x)	 ((x) << 19)
+#define FW_OFLD_TX_DATA_WR_SAVE(x)	 ((x) << 18)
+#define FW_OFLD_TX_DATA_WR_FLUSH(x)	 ((x) << 17)
+#define FW_OFLD_TX_DATA_WR_URGENT(x)	 ((x) << 16)
+#define FW_OFLD_TX_DATA_WR_MORE(x)	 ((x) << 15)
+#define FW_OFLD_TX_DATA_WR_SHOVE(x)	 ((x) << 14)
+#define FW_OFLD_TX_DATA_WR_ULPMODE(x)	 ((x) << 10)
+#define FW_OFLD_TX_DATA_WR_ULPSUBMODE(x) ((x) << 6)
+};
+
+struct fw_cmd_wr {
+	__be32 op_dma;
+#define FW_CMD_WR_DMA (1U << 17)
+	__be32 len16_pkd;
+	__be64 cookie_daddr;
+};
+
+struct fw_eth_tx_pkt_vm_wr {
+	__be32 op_immdlen;
+	__be32 equiq_to_len16;
+	__be32 r3[2];
+	u8 ethmacdst[6];
+	u8 ethmacsrc[6];
+	__be16 ethtype;
+	__be16 vlantci;
+};
+
+#define FW_CMD_MAX_TIMEOUT 3000
+
+enum fw_cmd_opcodes {
+	FW_LDST_CMD                    = 0x01,
+	FW_RESET_CMD                   = 0x03,
+	FW_HELLO_CMD                   = 0x04,
+	FW_BYE_CMD                     = 0x05,
+	FW_INITIALIZE_CMD              = 0x06,
+	FW_CAPS_CONFIG_CMD             = 0x07,
+	FW_PARAMS_CMD                  = 0x08,
+	FW_PFVF_CMD                    = 0x09,
+	FW_IQ_CMD                      = 0x10,
+	FW_EQ_MNGT_CMD                 = 0x11,
+	FW_EQ_ETH_CMD                  = 0x12,
+	FW_EQ_CTRL_CMD                 = 0x13,
+	FW_EQ_OFLD_CMD                 = 0x21,
+	FW_VI_CMD                      = 0x14,
+	FW_VI_MAC_CMD                  = 0x15,
+	FW_VI_RXMODE_CMD               = 0x16,
+	FW_VI_ENABLE_CMD               = 0x17,
+	FW_ACL_MAC_CMD                 = 0x18,
+	FW_ACL_VLAN_CMD                = 0x19,
+	FW_VI_STATS_CMD                = 0x1a,
+	FW_PORT_CMD                    = 0x1b,
+	FW_PORT_STATS_CMD              = 0x1c,
+	FW_PORT_LB_STATS_CMD           = 0x1d,
+	FW_PORT_TRACE_CMD              = 0x1e,
+	FW_PORT_TRACE_MMAP_CMD         = 0x1f,
+	FW_RSS_IND_TBL_CMD             = 0x20,
+	FW_RSS_GLB_CONFIG_CMD          = 0x22,
+	FW_RSS_VI_CONFIG_CMD           = 0x23,
+	FW_LASTC2E_CMD                 = 0x40,
+	FW_ERROR_CMD                   = 0x80,
+	FW_DEBUG_CMD                   = 0x81,
+};
+
+enum fw_cmd_cap {
+	FW_CMD_CAP_PF                  = 0x01,
+	FW_CMD_CAP_DMAQ                = 0x02,
+	FW_CMD_CAP_PORT                = 0x04,
+	FW_CMD_CAP_PORTPROMISC         = 0x08,
+	FW_CMD_CAP_PORTSTATS           = 0x10,
+	FW_CMD_CAP_VF                  = 0x80,
+};
+
+/*
+ * Generic command header flit0
+ */
+struct fw_cmd_hdr {
+	__be32 hi;
+	__be32 lo;
+};
+
+#define FW_CMD_OP(x)		((x) << 24)
+#define FW_CMD_OP_GET(x)        (((x) >> 24) & 0xff)
+#define FW_CMD_REQUEST          (1U << 23)
+#define FW_CMD_READ		(1U << 22)
+#define FW_CMD_WRITE		(1U << 21)
+#define FW_CMD_EXEC		(1U << 20)
+#define FW_CMD_RAMASK(x)	((x) << 20)
+#define FW_CMD_RETVAL(x)	((x) << 8)
+#define FW_CMD_RETVAL_GET(x)	(((x) >> 8) & 0xff)
+#define FW_CMD_LEN16(x)         ((x) << 0)
+
+enum fw_ldst_addrspc {
+	FW_LDST_ADDRSPC_FIRMWARE  = 0x0001,
+	FW_LDST_ADDRSPC_SGE_EGRC  = 0x0008,
+	FW_LDST_ADDRSPC_SGE_INGC  = 0x0009,
+	FW_LDST_ADDRSPC_SGE_FLMC  = 0x000a,
+	FW_LDST_ADDRSPC_SGE_CONMC = 0x000b,
+	FW_LDST_ADDRSPC_TP_PIO    = 0x0010,
+	FW_LDST_ADDRSPC_TP_TM_PIO = 0x0011,
+	FW_LDST_ADDRSPC_TP_MIB    = 0x0012,
+	FW_LDST_ADDRSPC_MDIO      = 0x0018,
+	FW_LDST_ADDRSPC_MPS       = 0x0020,
+	FW_LDST_ADDRSPC_FUNC      = 0x0028
+};
+
+enum fw_ldst_mps_fid {
+	FW_LDST_MPS_ATRB,
+	FW_LDST_MPS_RPLC
+};
+
+enum fw_ldst_func_access_ctl {
+	FW_LDST_FUNC_ACC_CTL_VIID,
+	FW_LDST_FUNC_ACC_CTL_FID
+};
+
+enum fw_ldst_func_mod_index {
+	FW_LDST_FUNC_MPS
+};
+
+struct fw_ldst_cmd {
+	__be32 op_to_addrspace;
+#define FW_LDST_CMD_ADDRSPACE(x) ((x) << 0)
+	__be32 cycles_to_len16;
+	union fw_ldst {
+		struct fw_ldst_addrval {
+			__be32 addr;
+			__be32 val;
+		} addrval;
+		struct fw_ldst_idctxt {
+			__be32 physid;
+			__be32 msg_pkd;
+			__be32 ctxt_data7;
+			__be32 ctxt_data6;
+			__be32 ctxt_data5;
+			__be32 ctxt_data4;
+			__be32 ctxt_data3;
+			__be32 ctxt_data2;
+			__be32 ctxt_data1;
+			__be32 ctxt_data0;
+		} idctxt;
+		struct fw_ldst_mdio {
+			__be16 paddr_mmd;
+			__be16 raddr;
+			__be16 vctl;
+			__be16 rval;
+		} mdio;
+		struct fw_ldst_mps {
+			__be16 fid_ctl;
+			__be16 rplcpf_pkd;
+			__be32 rplc127_96;
+			__be32 rplc95_64;
+			__be32 rplc63_32;
+			__be32 rplc31_0;
+			__be32 atrb;
+			__be16 vlan[16];
+		} mps;
+		struct fw_ldst_func {
+			u8 access_ctl;
+			u8 mod_index;
+			__be16 ctl_id;
+			__be32 offset;
+			__be64 data0;
+			__be64 data1;
+		} func;
+	} u;
+};
+
+#define FW_LDST_CMD_MSG(x)	((x) << 31)
+#define FW_LDST_CMD_PADDR(x)	((x) << 8)
+#define FW_LDST_CMD_MMD(x)	((x) << 0)
+#define FW_LDST_CMD_FID(x)	((x) << 15)
+#define FW_LDST_CMD_CTL(x)	((x) << 0)
+#define FW_LDST_CMD_RPLCPF(x)	((x) << 0)
+
+struct fw_reset_cmd {
+	__be32 op_to_write;
+	__be32 retval_len16;
+	__be32 val;
+	__be32 r3;
+};
+
+struct fw_hello_cmd {
+	__be32 op_to_write;
+	__be32 retval_len16;
+	__be32 err_to_mbasyncnot;
+#define FW_HELLO_CMD_ERR	    (1U << 31)
+#define FW_HELLO_CMD_INIT	    (1U << 30)
+#define FW_HELLO_CMD_MASTERDIS(x)   ((x) << 29)
+#define FW_HELLO_CMD_MASTERFORCE(x) ((x) << 28)
+#define FW_HELLO_CMD_MBMASTER(x)    ((x) << 24)
+#define FW_HELLO_CMD_MBASYNCNOT(x)  ((x) << 20)
+	__be32 fwrev;
+};
+
+struct fw_bye_cmd {
+	__be32 op_to_write;
+	__be32 retval_len16;
+	__be64 r3;
+};
+
+struct fw_initialize_cmd {
+	__be32 op_to_write;
+	__be32 retval_len16;
+	__be64 r3;
+};
+
+enum fw_caps_config_hm {
+	FW_CAPS_CONFIG_HM_PCIE		= 0x00000001,
+	FW_CAPS_CONFIG_HM_PL		= 0x00000002,
+	FW_CAPS_CONFIG_HM_SGE		= 0x00000004,
+	FW_CAPS_CONFIG_HM_CIM		= 0x00000008,
+	FW_CAPS_CONFIG_HM_ULPTX		= 0x00000010,
+	FW_CAPS_CONFIG_HM_TP		= 0x00000020,
+	FW_CAPS_CONFIG_HM_ULPRX		= 0x00000040,
+	FW_CAPS_CONFIG_HM_PMRX		= 0x00000080,
+	FW_CAPS_CONFIG_HM_PMTX		= 0x00000100,
+	FW_CAPS_CONFIG_HM_MC		= 0x00000200,
+	FW_CAPS_CONFIG_HM_LE		= 0x00000400,
+	FW_CAPS_CONFIG_HM_MPS		= 0x00000800,
+	FW_CAPS_CONFIG_HM_XGMAC		= 0x00001000,
+	FW_CAPS_CONFIG_HM_CPLSWITCH	= 0x00002000,
+	FW_CAPS_CONFIG_HM_T4DBG		= 0x00004000,
+	FW_CAPS_CONFIG_HM_MI		= 0x00008000,
+	FW_CAPS_CONFIG_HM_I2CM		= 0x00010000,
+	FW_CAPS_CONFIG_HM_NCSI		= 0x00020000,
+	FW_CAPS_CONFIG_HM_SMB		= 0x00040000,
+	FW_CAPS_CONFIG_HM_MA		= 0x00080000,
+	FW_CAPS_CONFIG_HM_EDRAM		= 0x00100000,
+	FW_CAPS_CONFIG_HM_PMU		= 0x00200000,
+	FW_CAPS_CONFIG_HM_UART		= 0x00400000,
+	FW_CAPS_CONFIG_HM_SF		= 0x00800000,
+};
+
+enum fw_caps_config_nbm {
+	FW_CAPS_CONFIG_NBM_IPMI		= 0x00000001,
+	FW_CAPS_CONFIG_NBM_NCSI		= 0x00000002,
+};
+
+enum fw_caps_config_link {
+	FW_CAPS_CONFIG_LINK_PPP		= 0x00000001,
+	FW_CAPS_CONFIG_LINK_QFC		= 0x00000002,
+	FW_CAPS_CONFIG_LINK_DCBX	= 0x00000004,
+};
+
+enum fw_caps_config_switch {
+	FW_CAPS_CONFIG_SWITCH_INGRESS	= 0x00000001,
+	FW_CAPS_CONFIG_SWITCH_EGRESS	= 0x00000002,
+};
+
+enum fw_caps_config_nic {
+	FW_CAPS_CONFIG_NIC		= 0x00000001,
+	FW_CAPS_CONFIG_NIC_VM		= 0x00000002,
+};
+
+enum fw_caps_config_ofld {
+	FW_CAPS_CONFIG_OFLD		= 0x00000001,
+};
+
+enum fw_caps_config_rdma {
+	FW_CAPS_CONFIG_RDMA_RDDP	= 0x00000001,
+	FW_CAPS_CONFIG_RDMA_RDMAC	= 0x00000002,
+};
+
+enum fw_caps_config_iscsi {
+	FW_CAPS_CONFIG_ISCSI_INITIATOR_PDU = 0x00000001,
+	FW_CAPS_CONFIG_ISCSI_TARGET_PDU = 0x00000002,
+	FW_CAPS_CONFIG_ISCSI_INITIATOR_CNXOFLD = 0x00000004,
+	FW_CAPS_CONFIG_ISCSI_TARGET_CNXOFLD = 0x00000008,
+};
+
+enum fw_caps_config_fcoe {
+	FW_CAPS_CONFIG_FCOE_INITIATOR	= 0x00000001,
+	FW_CAPS_CONFIG_FCOE_TARGET	= 0x00000002,
+};
+
+struct fw_caps_config_cmd {
+	__be32 op_to_write;
+	__be32 retval_len16;
+	__be32 r2;
+	__be32 hwmbitmap;
+	__be16 nbmcaps;
+	__be16 linkcaps;
+	__be16 switchcaps;
+	__be16 r3;
+	__be16 niccaps;
+	__be16 ofldcaps;
+	__be16 rdmacaps;
+	__be16 r4;
+	__be16 iscsicaps;
+	__be16 fcoecaps;
+	__be32 r5;
+	__be64 r6;
+};
+
+/*
+ * params command mnemonics
+ */
+enum fw_params_mnem {
+	FW_PARAMS_MNEM_DEV		= 1,	/* device params */
+	FW_PARAMS_MNEM_PFVF		= 2,	/* function params */
+	FW_PARAMS_MNEM_REG		= 3,	/* limited register access */
+	FW_PARAMS_MNEM_DMAQ		= 4,	/* dma queue params */
+	FW_PARAMS_MNEM_LAST
+};
+
+/*
+ * device parameters
+ */
+enum fw_params_param_dev {
+	FW_PARAMS_PARAM_DEV_CCLK	= 0x00, /* chip core clock in khz */
+	FW_PARAMS_PARAM_DEV_PORTVEC	= 0x01, /* the port vector */
+	FW_PARAMS_PARAM_DEV_NTID	= 0x02, /* reads the number of TIDs
+						 * allocated by the device's
+						 * Lookup Engine
+						 */
+	FW_PARAMS_PARAM_DEV_FLOWC_BUFFIFO_SZ = 0x03,
+	FW_PARAMS_PARAM_DEV_INTVER_NIC	= 0x04,
+	FW_PARAMS_PARAM_DEV_INTVER_VNIC = 0x05,
+	FW_PARAMS_PARAM_DEV_INTVER_OFLD = 0x06,
+	FW_PARAMS_PARAM_DEV_INTVER_RI	= 0x07,
+	FW_PARAMS_PARAM_DEV_INTVER_ISCSIPDU = 0x08,
+	FW_PARAMS_PARAM_DEV_INTVER_ISCSI = 0x09,
+	FW_PARAMS_PARAM_DEV_INTVER_FCOE = 0x0A
+};
+
+/*
+ * physical and virtual function parameters
+ */
+enum fw_params_param_pfvf {
+	FW_PARAMS_PARAM_PFVF_RWXCAPS	= 0x00,
+	FW_PARAMS_PARAM_PFVF_ROUTE_START = 0x01,
+	FW_PARAMS_PARAM_PFVF_ROUTE_END = 0x02,
+	FW_PARAMS_PARAM_PFVF_CLIP_START = 0x03,
+	FW_PARAMS_PARAM_PFVF_CLIP_END = 0x04,
+	FW_PARAMS_PARAM_PFVF_FILTER_START = 0x05,
+	FW_PARAMS_PARAM_PFVF_FILTER_END = 0x06,
+	FW_PARAMS_PARAM_PFVF_SERVER_START = 0x07,
+	FW_PARAMS_PARAM_PFVF_SERVER_END = 0x08,
+	FW_PARAMS_PARAM_PFVF_TDDP_START = 0x09,
+	FW_PARAMS_PARAM_PFVF_TDDP_END = 0x0A,
+	FW_PARAMS_PARAM_PFVF_ISCSI_START = 0x0B,
+	FW_PARAMS_PARAM_PFVF_ISCSI_END = 0x0C,
+	FW_PARAMS_PARAM_PFVF_STAG_START = 0x0D,
+	FW_PARAMS_PARAM_PFVF_STAG_END = 0x0E,
+	FW_PARAMS_PARAM_PFVF_RQ_START = 0x1F,
+	FW_PARAMS_PARAM_PFVF_RQ_END	= 0x10,
+	FW_PARAMS_PARAM_PFVF_PBL_START = 0x11,
+	FW_PARAMS_PARAM_PFVF_PBL_END	= 0x12,
+	FW_PARAMS_PARAM_PFVF_L2T_START = 0x13,
+	FW_PARAMS_PARAM_PFVF_L2T_END = 0x14,
+	FW_PARAMS_PARAM_PFVF_SCHEDCLASS_ETH = 0x20,
+};
+
+/*
+ * dma queue parameters
+ */
+enum fw_params_param_dmaq {
+	FW_PARAMS_PARAM_DMAQ_IQ_DCAEN_DCACPU = 0x00,
+	FW_PARAMS_PARAM_DMAQ_IQ_INTCNTTHRESH = 0x01,
+	FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_MNGT = 0x10,
+	FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL = 0x11,
+	FW_PARAMS_PARAM_DMAQ_EQ_SCHEDCLASS_ETH = 0x12,
+};
+
+#define FW_PARAMS_MNEM(x)      ((x) << 24)
+#define FW_PARAMS_PARAM_X(x)   ((x) << 16)
+#define FW_PARAMS_PARAM_Y(x)   ((x) << 8)
+#define FW_PARAMS_PARAM_Z(x)   ((x) << 0)
+#define FW_PARAMS_PARAM_XYZ(x) ((x) << 0)
+#define FW_PARAMS_PARAM_YZ(x)  ((x) << 0)
+
+struct fw_params_cmd {
+	__be32 op_to_vfn;
+	__be32 retval_len16;
+	struct fw_params_param {
+		__be32 mnem;
+		__be32 val;
+	} param[7];
+};
+
+#define FW_PARAMS_CMD_PFN(x) ((x) << 8)
+#define FW_PARAMS_CMD_VFN(x) ((x) << 0)
+
+struct fw_pfvf_cmd {
+	__be32 op_to_vfn;
+	__be32 retval_len16;
+	__be32 niqflint_niq;
+	__be32 cmask_to_neq;
+	__be32 tc_to_nexactf;
+	__be32 r_caps_to_nethctrl;
+	__be16 nricq;
+	__be16 nriqp;
+	__be32 r4;
+};
+
+#define FW_PFVF_CMD_PFN(x) ((x) << 8)
+#define FW_PFVF_CMD_VFN(x) ((x) << 0)
+
+#define FW_PFVF_CMD_NIQFLINT(x) ((x) << 20)
+#define FW_PFVF_CMD_NIQFLINT_GET(x) (((x) >> 20) & 0xfff)
+
+#define FW_PFVF_CMD_NIQ(x) ((x) << 0)
+#define FW_PFVF_CMD_NIQ_GET(x) (((x) >> 0) & 0xfffff)
+
+#define FW_PFVF_CMD_CMASK(x) ((x) << 24)
+#define FW_PFVF_CMD_CMASK_GET(x) (((x) >> 24) & 0xf)
+
+#define FW_PFVF_CMD_PMASK(x) ((x) << 20)
+#define FW_PFVF_CMD_PMASK_GET(x) (((x) >> 20) & 0xf)
+
+#define FW_PFVF_CMD_NEQ(x) ((x) << 0)
+#define FW_PFVF_CMD_NEQ_GET(x) (((x) >> 0) & 0xfffff)
+
+#define FW_PFVF_CMD_TC(x) ((x) << 24)
+#define FW_PFVF_CMD_TC_GET(x) (((x) >> 24) & 0xff)
+
+#define FW_PFVF_CMD_NVI(x) ((x) << 16)
+#define FW_PFVF_CMD_NVI_GET(x) (((x) >> 16) & 0xff)
+
+#define FW_PFVF_CMD_NEXACTF(x) ((x) << 0)
+#define FW_PFVF_CMD_NEXACTF_GET(x) (((x) >> 0) & 0xffff)
+
+#define FW_PFVF_CMD_R_CAPS(x) ((x) << 24)
+#define FW_PFVF_CMD_R_CAPS_GET(x) (((x) >> 24) & 0xff)
+
+#define FW_PFVF_CMD_WX_CAPS(x) ((x) << 16)
+#define FW_PFVF_CMD_WX_CAPS_GET(x) (((x) >> 16) & 0xff)
+
+#define FW_PFVF_CMD_NETHCTRL(x) ((x) << 0)
+#define FW_PFVF_CMD_NETHCTRL_GET(x) (((x) >> 0) & 0xffff)
+
+enum fw_iq_type {
+	FW_IQ_TYPE_FL_INT_CAP,
+	FW_IQ_TYPE_NO_FL_INT_CAP
+};
+
+struct fw_iq_cmd {
+	__be32 op_to_vfn;
+	__be32 alloc_to_len16;
+	__be16 physiqid;
+	__be16 iqid;
+	__be16 fl0id;
+	__be16 fl1id;
+	__be32 type_to_iqandstindex;
+	__be16 iqdroprss_to_iqesize;
+	__be16 iqsize;
+	__be64 iqaddr;
+	__be32 iqns_to_fl0congen;
+	__be16 fl0dcaen_to_fl0cidxfthresh;
+	__be16 fl0size;
+	__be64 fl0addr;
+	__be32 fl1cngchmap_to_fl1congen;
+	__be16 fl1dcaen_to_fl1cidxfthresh;
+	__be16 fl1size;
+	__be64 fl1addr;
+};
+
+#define FW_IQ_CMD_PFN(x) ((x) << 8)
+#define FW_IQ_CMD_VFN(x) ((x) << 0)
+
+#define FW_IQ_CMD_ALLOC (1U << 31)
+#define FW_IQ_CMD_FREE (1U << 30)
+#define FW_IQ_CMD_MODIFY (1U << 29)
+#define FW_IQ_CMD_IQSTART(x) ((x) << 28)
+#define FW_IQ_CMD_IQSTOP(x) ((x) << 27)
+
+#define FW_IQ_CMD_TYPE(x) ((x) << 29)
+#define FW_IQ_CMD_IQASYNCH(x) ((x) << 28)
+#define FW_IQ_CMD_VIID(x) ((x) << 16)
+#define FW_IQ_CMD_IQANDST(x) ((x) << 15)
+#define FW_IQ_CMD_IQANUS(x) ((x) << 14)
+#define FW_IQ_CMD_IQANUD(x) ((x) << 12)
+#define FW_IQ_CMD_IQANDSTINDEX(x) ((x) << 0)
+
+#define FW_IQ_CMD_IQDROPRSS (1U << 15)
+#define FW_IQ_CMD_IQGTSMODE (1U << 14)
+#define FW_IQ_CMD_IQPCIECH(x) ((x) << 12)
+#define FW_IQ_CMD_IQDCAEN(x) ((x) << 11)
+#define FW_IQ_CMD_IQDCACPU(x) ((x) << 6)
+#define FW_IQ_CMD_IQINTCNTTHRESH(x) ((x) << 4)
+#define FW_IQ_CMD_IQO (1U << 3)
+#define FW_IQ_CMD_IQCPRIO(x) ((x) << 2)
+#define FW_IQ_CMD_IQESIZE(x) ((x) << 0)
+
+#define FW_IQ_CMD_IQNS(x) ((x) << 31)
+#define FW_IQ_CMD_IQRO(x) ((x) << 30)
+#define FW_IQ_CMD_IQFLINTIQHSEN(x) ((x) << 28)
+#define FW_IQ_CMD_IQFLINTCONGEN(x) ((x) << 27)
+#define FW_IQ_CMD_IQFLINTISCSIC(x) ((x) << 26)
+#define FW_IQ_CMD_FL0CNGCHMAP(x) ((x) << 20)
+#define FW_IQ_CMD_FL0CACHELOCK(x) ((x) << 15)
+#define FW_IQ_CMD_FL0DBP(x) ((x) << 14)
+#define FW_IQ_CMD_FL0DATANS(x) ((x) << 13)
+#define FW_IQ_CMD_FL0DATARO(x) ((x) << 12)
+#define FW_IQ_CMD_FL0CONGCIF(x) ((x) << 11)
+#define FW_IQ_CMD_FL0ONCHIP(x) ((x) << 10)
+#define FW_IQ_CMD_FL0STATUSPGNS(x) ((x) << 9)
+#define FW_IQ_CMD_FL0STATUSPGRO(x) ((x) << 8)
+#define FW_IQ_CMD_FL0FETCHNS(x) ((x) << 7)
+#define FW_IQ_CMD_FL0FETCHRO(x) ((x) << 6)
+#define FW_IQ_CMD_FL0HOSTFCMODE(x) ((x) << 4)
+#define FW_IQ_CMD_FL0CPRIO(x) ((x) << 3)
+#define FW_IQ_CMD_FL0PADEN (1U << 2)
+#define FW_IQ_CMD_FL0PACKEN (1U << 1)
+#define FW_IQ_CMD_FL0CONGEN (1U << 0)
+
+#define FW_IQ_CMD_FL0DCAEN(x) ((x) << 15)
+#define FW_IQ_CMD_FL0DCACPU(x) ((x) << 10)
+#define FW_IQ_CMD_FL0FBMIN(x) ((x) << 7)
+#define FW_IQ_CMD_FL0FBMAX(x) ((x) << 4)
+#define FW_IQ_CMD_FL0CIDXFTHRESHO (1U << 3)
+#define FW_IQ_CMD_FL0CIDXFTHRESH(x) ((x) << 0)
+
+#define FW_IQ_CMD_FL1CNGCHMAP(x) ((x) << 20)
+#define FW_IQ_CMD_FL1CACHELOCK(x) ((x) << 15)
+#define FW_IQ_CMD_FL1DBP(x) ((x) << 14)
+#define FW_IQ_CMD_FL1DATANS(x) ((x) << 13)
+#define FW_IQ_CMD_FL1DATARO(x) ((x) << 12)
+#define FW_IQ_CMD_FL1CONGCIF(x) ((x) << 11)
+#define FW_IQ_CMD_FL1ONCHIP(x) ((x) << 10)
+#define FW_IQ_CMD_FL1STATUSPGNS(x) ((x) << 9)
+#define FW_IQ_CMD_FL1STATUSPGRO(x) ((x) << 8)
+#define FW_IQ_CMD_FL1FETCHNS(x) ((x) << 7)
+#define FW_IQ_CMD_FL1FETCHRO(x) ((x) << 6)
+#define FW_IQ_CMD_FL1HOSTFCMODE(x) ((x) << 4)
+#define FW_IQ_CMD_FL1CPRIO(x) ((x) << 3)
+#define FW_IQ_CMD_FL1PADEN (1U << 2)
+#define FW_IQ_CMD_FL1PACKEN (1U << 1)
+#define FW_IQ_CMD_FL1CONGEN (1U << 0)
+
+#define FW_IQ_CMD_FL1DCAEN(x) ((x) << 15)
+#define FW_IQ_CMD_FL1DCACPU(x) ((x) << 10)
+#define FW_IQ_CMD_FL1FBMIN(x) ((x) << 7)
+#define FW_IQ_CMD_FL1FBMAX(x) ((x) << 4)
+#define FW_IQ_CMD_FL1CIDXFTHRESHO (1U << 3)
+#define FW_IQ_CMD_FL1CIDXFTHRESH(x) ((x) << 0)
+
+struct fw_eq_eth_cmd {
+	__be32 op_to_vfn;
+	__be32 alloc_to_len16;
+	__be32 eqid_pkd;
+	__be32 physeqid_pkd;
+	__be32 fetchszm_to_iqid;
+	__be32 dcaen_to_eqsize;
+	__be64 eqaddr;
+	__be32 viid_pkd;
+	__be32 r8_lo;
+	__be64 r9;
+};
+
+#define FW_EQ_ETH_CMD_PFN(x) ((x) << 8)
+#define FW_EQ_ETH_CMD_VFN(x) ((x) << 0)
+#define FW_EQ_ETH_CMD_ALLOC (1U << 31)
+#define FW_EQ_ETH_CMD_FREE (1U << 30)
+#define FW_EQ_ETH_CMD_MODIFY (1U << 29)
+#define FW_EQ_ETH_CMD_EQSTART (1U << 28)
+#define FW_EQ_ETH_CMD_EQSTOP (1U << 27)
+
+#define FW_EQ_ETH_CMD_EQID(x) ((x) << 0)
+#define FW_EQ_ETH_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_EQ_ETH_CMD_PHYSEQID(x) ((x) << 0)
+
+#define FW_EQ_ETH_CMD_FETCHSZM(x) ((x) << 26)
+#define FW_EQ_ETH_CMD_STATUSPGNS(x) ((x) << 25)
+#define FW_EQ_ETH_CMD_STATUSPGRO(x) ((x) << 24)
+#define FW_EQ_ETH_CMD_FETCHNS(x) ((x) << 23)
+#define FW_EQ_ETH_CMD_FETCHRO(x) ((x) << 22)
+#define FW_EQ_ETH_CMD_HOSTFCMODE(x) ((x) << 20)
+#define FW_EQ_ETH_CMD_CPRIO(x) ((x) << 19)
+#define FW_EQ_ETH_CMD_ONCHIP(x) ((x) << 18)
+#define FW_EQ_ETH_CMD_PCIECHN(x) ((x) << 16)
+#define FW_EQ_ETH_CMD_IQID(x) ((x) << 0)
+
+#define FW_EQ_ETH_CMD_DCAEN(x) ((x) << 31)
+#define FW_EQ_ETH_CMD_DCACPU(x) ((x) << 26)
+#define FW_EQ_ETH_CMD_FBMIN(x) ((x) << 23)
+#define FW_EQ_ETH_CMD_FBMAX(x) ((x) << 20)
+#define FW_EQ_ETH_CMD_CIDXFTHRESHO(x) ((x) << 19)
+#define FW_EQ_ETH_CMD_CIDXFTHRESH(x) ((x) << 16)
+#define FW_EQ_ETH_CMD_EQSIZE(x) ((x) << 0)
+
+#define FW_EQ_ETH_CMD_VIID(x) ((x) << 16)
+
+struct fw_eq_ctrl_cmd {
+	__be32 op_to_vfn;
+	__be32 alloc_to_len16;
+	__be32 cmpliqid_eqid;
+	__be32 physeqid_pkd;
+	__be32 fetchszm_to_iqid;
+	__be32 dcaen_to_eqsize;
+	__be64 eqaddr;
+};
+
+#define FW_EQ_CTRL_CMD_PFN(x) ((x) << 8)
+#define FW_EQ_CTRL_CMD_VFN(x) ((x) << 0)
+
+#define FW_EQ_CTRL_CMD_ALLOC (1U << 31)
+#define FW_EQ_CTRL_CMD_FREE (1U << 30)
+#define FW_EQ_CTRL_CMD_MODIFY (1U << 29)
+#define FW_EQ_CTRL_CMD_EQSTART (1U << 28)
+#define FW_EQ_CTRL_CMD_EQSTOP (1U << 27)
+
+#define FW_EQ_CTRL_CMD_CMPLIQID(x) ((x) << 20)
+#define FW_EQ_CTRL_CMD_EQID(x) ((x) << 0)
+#define FW_EQ_CTRL_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_EQ_CTRL_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
+
+#define FW_EQ_CTRL_CMD_FETCHSZM (1U << 26)
+#define FW_EQ_CTRL_CMD_STATUSPGNS (1U << 25)
+#define FW_EQ_CTRL_CMD_STATUSPGRO (1U << 24)
+#define FW_EQ_CTRL_CMD_FETCHNS (1U << 23)
+#define FW_EQ_CTRL_CMD_FETCHRO (1U << 22)
+#define FW_EQ_CTRL_CMD_HOSTFCMODE(x) ((x) << 20)
+#define FW_EQ_CTRL_CMD_CPRIO(x) ((x) << 19)
+#define FW_EQ_CTRL_CMD_ONCHIP(x) ((x) << 18)
+#define FW_EQ_CTRL_CMD_PCIECHN(x) ((x) << 16)
+#define FW_EQ_CTRL_CMD_IQID(x) ((x) << 0)
+
+#define FW_EQ_CTRL_CMD_DCAEN(x) ((x) << 31)
+#define FW_EQ_CTRL_CMD_DCACPU(x) ((x) << 26)
+#define FW_EQ_CTRL_CMD_FBMIN(x) ((x) << 23)
+#define FW_EQ_CTRL_CMD_FBMAX(x) ((x) << 20)
+#define FW_EQ_CTRL_CMD_CIDXFTHRESHO(x) ((x) << 19)
+#define FW_EQ_CTRL_CMD_CIDXFTHRESH(x) ((x) << 16)
+#define FW_EQ_CTRL_CMD_EQSIZE(x) ((x) << 0)
+
+struct fw_eq_ofld_cmd {
+	__be32 op_to_vfn;
+	__be32 alloc_to_len16;
+	__be32 eqid_pkd;
+	__be32 physeqid_pkd;
+	__be32 fetchszm_to_iqid;
+	__be32 dcaen_to_eqsize;
+	__be64 eqaddr;
+};
+
+#define FW_EQ_OFLD_CMD_PFN(x) ((x) << 8)
+#define FW_EQ_OFLD_CMD_VFN(x) ((x) << 0)
+
+#define FW_EQ_OFLD_CMD_ALLOC (1U << 31)
+#define FW_EQ_OFLD_CMD_FREE (1U << 30)
+#define FW_EQ_OFLD_CMD_MODIFY (1U << 29)
+#define FW_EQ_OFLD_CMD_EQSTART (1U << 28)
+#define FW_EQ_OFLD_CMD_EQSTOP (1U << 27)
+
+#define FW_EQ_OFLD_CMD_EQID(x) ((x) << 0)
+#define FW_EQ_OFLD_CMD_EQID_GET(x) (((x) >> 0) & 0xfffff)
+#define FW_EQ_OFLD_CMD_PHYSEQID_GET(x) (((x) >> 0) & 0xfffff)
+
+#define FW_EQ_OFLD_CMD_FETCHSZM(x) ((x) << 26)
+#define FW_EQ_OFLD_CMD_STATUSPGNS(x) ((x) << 25)
+#define FW_EQ_OFLD_CMD_STATUSPGRO(x) ((x) << 24)
+#define FW_EQ_OFLD_CMD_FETCHNS(x) ((x) << 23)
+#define FW_EQ_OFLD_CMD_FETCHRO(x) ((x) << 22)
+#define FW_EQ_OFLD_CMD_HOSTFCMODE(x) ((x) << 20)
+#define FW_EQ_OFLD_CMD_CPRIO(x) ((x) << 19)
+#define FW_EQ_OFLD_CMD_ONCHIP(x) ((x) << 18)
+#define FW_EQ_OFLD_CMD_PCIECHN(x) ((x) << 16)
+#define FW_EQ_OFLD_CMD_IQID(x) ((x) << 0)
+
+#define FW_EQ_OFLD_CMD_DCAEN(x) ((x) << 31)
+#define FW_EQ_OFLD_CMD_DCACPU(x) ((x) << 26)
+#define FW_EQ_OFLD_CMD_FBMIN(x) ((x) << 23)
+#define FW_EQ_OFLD_CMD_FBMAX(x) ((x) << 20)
+#define FW_EQ_OFLD_CMD_CIDXFTHRESHO(x) ((x) << 19)
+#define FW_EQ_OFLD_CMD_CIDXFTHRESH(x) ((x) << 16)
+#define FW_EQ_OFLD_CMD_EQSIZE(x) ((x) << 0)
+
+/*
+ * Macros for VIID parsing:
+ * VIID - [10:8] PFN, [7] VI Valid, [6:0] VI number
+ */
+#define FW_VIID_PFN_GET(x) (((x) >> 8) & 0x7)
+#define FW_VIID_VIVLD_GET(x) (((x) >> 7) & 0x1)
+#define FW_VIID_VIN_GET(x) (((x) >> 0) & 0x7F)
+
+struct fw_vi_cmd {
+	__be32 op_to_vfn;
+	__be32 alloc_to_len16;
+	__be16 viid_pkd;
+	u8 mac[6];
+	u8 portid_pkd;
+	u8 nmac;
+	u8 nmac0[6];
+	__be16 rsssize_pkd;
+	u8 nmac1[6];
+	__be16 r7;
+	u8 nmac2[6];
+	__be16 r8;
+	u8 nmac3[6];
+	__be64 r9;
+	__be64 r10;
+};
+
+#define FW_VI_CMD_PFN(x) ((x) << 8)
+#define FW_VI_CMD_VFN(x) ((x) << 0)
+#define FW_VI_CMD_ALLOC (1U << 31)
+#define FW_VI_CMD_FREE (1U << 30)
+#define FW_VI_CMD_VIID(x) ((x) << 0)
+#define FW_VI_CMD_PORTID(x) ((x) << 4)
+#define FW_VI_CMD_RSSSIZE_GET(x) (((x) >> 0) & 0x7ff)
+
+/* Special VI_MAC command index ids */
+#define FW_VI_MAC_ADD_MAC		0x3FF
+#define FW_VI_MAC_ADD_PERSIST_MAC	0x3FE
+#define FW_VI_MAC_MAC_BASED_FREE	0x3FD
+
+enum fw_vi_mac_smac {
+	FW_VI_MAC_MPS_TCAM_ENTRY,
+	FW_VI_MAC_MPS_TCAM_ONLY,
+	FW_VI_MAC_SMT_ONLY,
+	FW_VI_MAC_SMT_AND_MPSTCAM
+};
+
+enum fw_vi_mac_result {
+	FW_VI_MAC_R_SUCCESS,
+	FW_VI_MAC_R_F_NONEXISTENT_NOMEM,
+	FW_VI_MAC_R_SMAC_FAIL,
+	FW_VI_MAC_R_F_ACL_CHECK
+};
+
+struct fw_vi_mac_cmd {
+	__be32 op_to_viid;
+	__be32 freemacs_to_len16;
+	union fw_vi_mac {
+		struct fw_vi_mac_exact {
+			__be16 valid_to_idx;
+			u8 macaddr[6];
+		} exact[7];
+		struct fw_vi_mac_hash {
+			__be64 hashvec;
+		} hash;
+	} u;
+};
+
+#define FW_VI_MAC_CMD_VIID(x) ((x) << 0)
+#define FW_VI_MAC_CMD_FREEMACS(x) ((x) << 31)
+#define FW_VI_MAC_CMD_HASHVECEN (1U << 23)
+#define FW_VI_MAC_CMD_HASHUNIEN(x) ((x) << 22)
+#define FW_VI_MAC_CMD_VALID (1U << 15)
+#define FW_VI_MAC_CMD_PRIO(x) ((x) << 12)
+#define FW_VI_MAC_CMD_SMAC_RESULT(x) ((x) << 10)
+#define FW_VI_MAC_CMD_SMAC_RESULT_GET(x) (((x) >> 10) & 0x3)
+#define FW_VI_MAC_CMD_IDX(x) ((x) << 0)
+#define FW_VI_MAC_CMD_IDX_GET(x) (((x) >> 0) & 0x3ff)
+
+#define FW_RXMODE_MTU_NO_CHG	65535
+
+struct fw_vi_rxmode_cmd {
+	__be32 op_to_viid;
+	__be32 retval_len16;
+	__be32 mtu_to_broadcasten;
+	__be32 r4_lo;
+};
+
+#define FW_VI_RXMODE_CMD_VIID(x) ((x) << 0)
+#define FW_VI_RXMODE_CMD_MTU(x) ((x) << 16)
+#define FW_VI_RXMODE_CMD_PROMISCEN_MASK 0x3
+#define FW_VI_RXMODE_CMD_PROMISCEN(x) ((x) << 14)
+#define FW_VI_RXMODE_CMD_ALLMULTIEN_MASK 0x3
+#define FW_VI_RXMODE_CMD_ALLMULTIEN(x) ((x) << 12)
+#define FW_VI_RXMODE_CMD_BROADCASTEN_MASK 0x3
+#define FW_VI_RXMODE_CMD_BROADCASTEN(x) ((x) << 10)
+
+struct fw_vi_enable_cmd {
+	__be32 op_to_viid;
+	__be32 ien_to_len16;
+	__be16 blinkdur;
+	__be16 r3;
+	__be32 r4;
+};
+
+#define FW_VI_ENABLE_CMD_VIID(x) ((x) << 0)
+#define FW_VI_ENABLE_CMD_IEN(x) ((x) << 31)
+#define FW_VI_ENABLE_CMD_EEN(x) ((x) << 30)
+#define FW_VI_ENABLE_CMD_LED (1U << 29)
+
+/* VI VF stats offset definitions */
+#define VI_VF_NUM_STATS	16
+enum fw_vi_stats_vf_index {
+	FW_VI_VF_STAT_TX_BCAST_BYTES_IX,
+	FW_VI_VF_STAT_TX_BCAST_FRAMES_IX,
+	FW_VI_VF_STAT_TX_MCAST_BYTES_IX,
+	FW_VI_VF_STAT_TX_MCAST_FRAMES_IX,
+	FW_VI_VF_STAT_TX_UCAST_BYTES_IX,
+	FW_VI_VF_STAT_TX_UCAST_FRAMES_IX,
+	FW_VI_VF_STAT_TX_DROP_FRAMES_IX,
+	FW_VI_VF_STAT_TX_OFLD_BYTES_IX,
+	FW_VI_VF_STAT_TX_OFLD_FRAMES_IX,
+	FW_VI_VF_STAT_RX_BCAST_BYTES_IX,
+	FW_VI_VF_STAT_RX_BCAST_FRAMES_IX,
+	FW_VI_VF_STAT_RX_MCAST_BYTES_IX,
+	FW_VI_VF_STAT_RX_MCAST_FRAMES_IX,
+	FW_VI_VF_STAT_RX_UCAST_BYTES_IX,
+	FW_VI_VF_STAT_RX_UCAST_FRAMES_IX,
+	FW_VI_VF_STAT_RX_ERR_FRAMES_IX
+};
+
+/* VI PF stats offset definitions */
+#define VI_PF_NUM_STATS	17
+enum fw_vi_stats_pf_index {
+	FW_VI_PF_STAT_TX_BCAST_BYTES_IX,
+	FW_VI_PF_STAT_TX_BCAST_FRAMES_IX,
+	FW_VI_PF_STAT_TX_MCAST_BYTES_IX,
+	FW_VI_PF_STAT_TX_MCAST_FRAMES_IX,
+	FW_VI_PF_STAT_TX_UCAST_BYTES_IX,
+	FW_VI_PF_STAT_TX_UCAST_FRAMES_IX,
+	FW_VI_PF_STAT_TX_OFLD_BYTES_IX,
+	FW_VI_PF_STAT_TX_OFLD_FRAMES_IX,
+	FW_VI_PF_STAT_RX_BYTES_IX,
+	FW_VI_PF_STAT_RX_FRAMES_IX,
+	FW_VI_PF_STAT_RX_BCAST_BYTES_IX,
+	FW_VI_PF_STAT_RX_BCAST_FRAMES_IX,
+	FW_VI_PF_STAT_RX_MCAST_BYTES_IX,
+	FW_VI_PF_STAT_RX_MCAST_FRAMES_IX,
+	FW_VI_PF_STAT_RX_UCAST_BYTES_IX,
+	FW_VI_PF_STAT_RX_UCAST_FRAMES_IX,
+	FW_VI_PF_STAT_RX_ERR_FRAMES_IX
+};
+
+struct fw_vi_stats_cmd {
+	__be32 op_to_viid;
+	__be32 retval_len16;
+	union fw_vi_stats {
+		struct fw_vi_stats_ctl {
+			__be16 nstats_ix;
+			__be16 r6;
+			__be32 r7;
+			__be64 stat0;
+			__be64 stat1;
+			__be64 stat2;
+			__be64 stat3;
+			__be64 stat4;
+			__be64 stat5;
+		} ctl;
+		struct fw_vi_stats_pf {
+			__be64 tx_bcast_bytes;
+			__be64 tx_bcast_frames;
+			__be64 tx_mcast_bytes;
+			__be64 tx_mcast_frames;
+			__be64 tx_ucast_bytes;
+			__be64 tx_ucast_frames;
+			__be64 tx_offload_bytes;
+			__be64 tx_offload_frames;
+			__be64 rx_pf_bytes;
+			__be64 rx_pf_frames;
+			__be64 rx_bcast_bytes;
+			__be64 rx_bcast_frames;
+			__be64 rx_mcast_bytes;
+			__be64 rx_mcast_frames;
+			__be64 rx_ucast_bytes;
+			__be64 rx_ucast_frames;
+			__be64 rx_err_frames;
+		} pf;
+		struct fw_vi_stats_vf {
+			__be64 tx_bcast_bytes;
+			__be64 tx_bcast_frames;
+			__be64 tx_mcast_bytes;
+			__be64 tx_mcast_frames;
+			__be64 tx_ucast_bytes;
+			__be64 tx_ucast_frames;
+			__be64 tx_drop_frames;
+			__be64 tx_offload_bytes;
+			__be64 tx_offload_frames;
+			__be64 rx_bcast_bytes;
+			__be64 rx_bcast_frames;
+			__be64 rx_mcast_bytes;
+			__be64 rx_mcast_frames;
+			__be64 rx_ucast_bytes;
+			__be64 rx_ucast_frames;
+			__be64 rx_err_frames;
+		} vf;
+	} u;
+};
+
+#define FW_VI_STATS_CMD_VIID(x) ((x) << 0)
+#define FW_VI_STATS_CMD_NSTATS(x) ((x) << 12)
+#define FW_VI_STATS_CMD_IX(x) ((x) << 0)
+
+struct fw_acl_mac_cmd {
+	__be32 op_to_vfn;
+	__be32 en_to_len16;
+	u8 nmac;
+	u8 r3[7];
+	__be16 r4;
+	u8 macaddr0[6];
+	__be16 r5;
+	u8 macaddr1[6];
+	__be16 r6;
+	u8 macaddr2[6];
+	__be16 r7;
+	u8 macaddr3[6];
+};
+
+#define FW_ACL_MAC_CMD_PFN(x) ((x) << 8)
+#define FW_ACL_MAC_CMD_VFN(x) ((x) << 0)
+#define FW_ACL_MAC_CMD_EN(x) ((x) << 31)
+
+struct fw_acl_vlan_cmd {
+	__be32 op_to_vfn;
+	__be32 en_to_len16;
+	u8 nvlan;
+	u8 dropnovlan_fm;
+	u8 r3_lo[6];
+	__be16 vlanid[16];
+};
+
+#define FW_ACL_VLAN_CMD_PFN(x) ((x) << 8)
+#define FW_ACL_VLAN_CMD_VFN(x) ((x) << 0)
+#define FW_ACL_VLAN_CMD_EN(x) ((x) << 31)
+#define FW_ACL_VLAN_CMD_DROPNOVLAN(x) ((x) << 7)
+#define FW_ACL_VLAN_CMD_FM(x) ((x) << 6)
+
+enum fw_port_cap {
+	FW_PORT_CAP_SPEED_100M		= 0x0001,
+	FW_PORT_CAP_SPEED_1G		= 0x0002,
+	FW_PORT_CAP_SPEED_2_5G		= 0x0004,
+	FW_PORT_CAP_SPEED_10G		= 0x0008,
+	FW_PORT_CAP_SPEED_40G		= 0x0010,
+	FW_PORT_CAP_SPEED_100G		= 0x0020,
+	FW_PORT_CAP_FC_RX		= 0x0040,
+	FW_PORT_CAP_FC_TX		= 0x0080,
+	FW_PORT_CAP_ANEG		= 0x0100,
+	FW_PORT_CAP_MDI_0		= 0x0200,
+	FW_PORT_CAP_MDI_1		= 0x0400,
+	FW_PORT_CAP_BEAN		= 0x0800,
+	FW_PORT_CAP_PMA_LPBK		= 0x1000,
+	FW_PORT_CAP_PCS_LPBK		= 0x2000,
+	FW_PORT_CAP_PHYXS_LPBK		= 0x4000,
+	FW_PORT_CAP_FAR_END_LPBK	= 0x8000,
+};
+
+enum fw_port_mdi {
+	FW_PORT_MDI_UNCHANGED,
+	FW_PORT_MDI_AUTO,
+	FW_PORT_MDI_F_STRAIGHT,
+	FW_PORT_MDI_F_CROSSOVER
+};
+
+#define FW_PORT_MDI(x) ((x) << 9)
+
+enum fw_port_action {
+	FW_PORT_ACTION_L1_CFG		= 0x0001,
+	FW_PORT_ACTION_L2_CFG		= 0x0002,
+	FW_PORT_ACTION_GET_PORT_INFO	= 0x0003,
+	FW_PORT_ACTION_L2_PPP_CFG	= 0x0004,
+	FW_PORT_ACTION_L2_DCB_CFG	= 0x0005,
+	FW_PORT_ACTION_LOW_PWR_TO_NORMAL = 0x0010,
+	FW_PORT_ACTION_L1_LOW_PWR_EN	= 0x0011,
+	FW_PORT_ACTION_L2_WOL_MODE_EN	= 0x0012,
+	FW_PORT_ACTION_LPBK_TO_NORMAL	= 0x0020,
+	FW_PORT_ACTION_L1_LPBK		= 0x0021,
+	FW_PORT_ACTION_L1_PMA_LPBK	= 0x0022,
+	FW_PORT_ACTION_L1_PCS_LPBK	= 0x0023,
+	FW_PORT_ACTION_L1_PHYXS_CSIDE_LPBK = 0x0024,
+	FW_PORT_ACTION_L1_PHYXS_ESIDE_LPBK = 0x0025,
+	FW_PORT_ACTION_PHY_RESET	= 0x0040,
+	FW_PORT_ACTION_PMA_RESET	= 0x0041,
+	FW_PORT_ACTION_PCS_RESET	= 0x0042,
+	FW_PORT_ACTION_PHYXS_RESET	= 0x0043,
+	FW_PORT_ACTION_DTEXS_REEST	= 0x0044,
+	FW_PORT_ACTION_AN_RESET		= 0x0045
+};
+
+enum fw_port_l2cfg_ctlbf {
+	FW_PORT_L2_CTLBF_OVLAN0	= 0x01,
+	FW_PORT_L2_CTLBF_OVLAN1	= 0x02,
+	FW_PORT_L2_CTLBF_OVLAN2	= 0x04,
+	FW_PORT_L2_CTLBF_OVLAN3	= 0x08,
+	FW_PORT_L2_CTLBF_IVLAN	= 0x10,
+	FW_PORT_L2_CTLBF_TXIPG	= 0x20
+};
+
+enum fw_port_dcb_cfg {
+	FW_PORT_DCB_CFG_PG	= 0x01,
+	FW_PORT_DCB_CFG_PFC	= 0x02,
+	FW_PORT_DCB_CFG_APPL	= 0x04
+};
+
+enum fw_port_dcb_cfg_rc {
+	FW_PORT_DCB_CFG_SUCCESS	= 0x0,
+	FW_PORT_DCB_CFG_ERROR	= 0x1
+};
+
+struct fw_port_cmd {
+	__be32 op_to_portid;
+	__be32 action_to_len16;
+	union fw_port {
+		struct fw_port_l1cfg {
+			__be32 rcap;
+			__be32 r;
+		} l1cfg;
+		struct fw_port_l2cfg {
+			__be16 ctlbf_to_ivlan0;
+			__be16 ivlantype;
+			__be32 txipg_pkd;
+			__be16 ovlan0mask;
+			__be16 ovlan0type;
+			__be16 ovlan1mask;
+			__be16 ovlan1type;
+			__be16 ovlan2mask;
+			__be16 ovlan2type;
+			__be16 ovlan3mask;
+			__be16 ovlan3type;
+		} l2cfg;
+		struct fw_port_info {
+			__be32 lstatus_to_modtype;
+			__be16 pcap;
+			__be16 acap;
+		} info;
+		struct fw_port_ppp {
+			__be32 pppen_to_ncsich;
+			__be32 r11;
+		} ppp;
+		struct fw_port_dcb {
+			__be16 cfg;
+			u8 up_map;
+			u8 sf_cfgrc;
+			__be16 prot_ix;
+			u8 pe7_to_pe0;
+			u8 numTCPFCs;
+			__be32 pgid0_to_pgid7;
+			__be32 numTCs_oui;
+			u8 pgpc[8];
+		} dcb;
+	} u;
+};
+
+#define FW_PORT_CMD_READ (1U << 22)
+
+#define FW_PORT_CMD_PORTID(x) ((x) << 0)
+#define FW_PORT_CMD_PORTID_GET(x) (((x) >> 0) & 0xf)
+
+#define FW_PORT_CMD_ACTION(x) ((x) << 16)
+
+#define FW_PORT_CMD_CTLBF(x) ((x) << 10)
+#define FW_PORT_CMD_OVLAN3(x) ((x) << 7)
+#define FW_PORT_CMD_OVLAN2(x) ((x) << 6)
+#define FW_PORT_CMD_OVLAN1(x) ((x) << 5)
+#define FW_PORT_CMD_OVLAN0(x) ((x) << 4)
+#define FW_PORT_CMD_IVLAN0(x) ((x) << 3)
+
+#define FW_PORT_CMD_TXIPG(x) ((x) << 19)
+
+#define FW_PORT_CMD_LSTATUS (1U << 31)
+#define FW_PORT_CMD_LSPEED(x) ((x) << 24)
+#define FW_PORT_CMD_LSPEED_GET(x) (((x) >> 24) & 0x3f)
+#define FW_PORT_CMD_TXPAUSE (1U << 23)
+#define FW_PORT_CMD_RXPAUSE (1U << 22)
+#define FW_PORT_CMD_MDIOCAP (1U << 21)
+#define FW_PORT_CMD_MDIOADDR_GET(x) (((x) >> 16) & 0x1f)
+#define FW_PORT_CMD_LPTXPAUSE (1U << 15)
+#define FW_PORT_CMD_LPRXPAUSE (1U << 14)
+#define FW_PORT_CMD_PTYPE_MASK 0x1f
+#define FW_PORT_CMD_PTYPE_GET(x) (((x) >> 8) & FW_PORT_CMD_PTYPE_MASK)
+#define FW_PORT_CMD_MODTYPE_MASK 0x1f
+#define FW_PORT_CMD_MODTYPE_GET(x) (((x) >> 0) & FW_PORT_CMD_MODTYPE_MASK)
+
+#define FW_PORT_CMD_PPPEN(x) ((x) << 31)
+#define FW_PORT_CMD_TPSRC(x) ((x) << 28)
+#define FW_PORT_CMD_NCSISRC(x) ((x) << 24)
+
+#define FW_PORT_CMD_CH0(x) ((x) << 20)
+#define FW_PORT_CMD_CH1(x) ((x) << 16)
+#define FW_PORT_CMD_CH2(x) ((x) << 12)
+#define FW_PORT_CMD_CH3(x) ((x) << 8)
+#define FW_PORT_CMD_NCSICH(x) ((x) << 4)
+
+enum fw_port_type {
+	FW_PORT_TYPE_FIBER,
+	FW_PORT_TYPE_KX4,
+	FW_PORT_TYPE_BT_SGMII,
+	FW_PORT_TYPE_KX,
+	FW_PORT_TYPE_BT_XAUI,
+	FW_PORT_TYPE_KR,
+	FW_PORT_TYPE_CX4,
+	FW_PORT_TYPE_TWINAX,
+
+	FW_PORT_TYPE_NONE = FW_PORT_CMD_PTYPE_MASK
+};
+
+enum fw_port_module_type {
+	FW_PORT_MOD_TYPE_NA,
+	FW_PORT_MOD_TYPE_LR,
+	FW_PORT_MOD_TYPE_SR,
+	FW_PORT_MOD_TYPE_ER,
+
+	FW_PORT_MOD_TYPE_NONE = FW_PORT_CMD_MODTYPE_MASK
+};
+
+/* port stats */
+#define FW_NUM_PORT_STATS 50
+#define FW_NUM_PORT_TX_STATS 23
+#define FW_NUM_PORT_RX_STATS 27
+
+enum fw_port_stats_tx_index {
+	FW_STAT_TX_PORT_BYTES_IX,
+	FW_STAT_TX_PORT_FRAMES_IX,
+	FW_STAT_TX_PORT_BCAST_IX,
+	FW_STAT_TX_PORT_MCAST_IX,
+	FW_STAT_TX_PORT_UCAST_IX,
+	FW_STAT_TX_PORT_ERROR_IX,
+	FW_STAT_TX_PORT_64B_IX,
+	FW_STAT_TX_PORT_65B_127B_IX,
+	FW_STAT_TX_PORT_128B_255B_IX,
+	FW_STAT_TX_PORT_256B_511B_IX,
+	FW_STAT_TX_PORT_512B_1023B_IX,
+	FW_STAT_TX_PORT_1024B_1518B_IX,
+	FW_STAT_TX_PORT_1519B_MAX_IX,
+	FW_STAT_TX_PORT_DROP_IX,
+	FW_STAT_TX_PORT_PAUSE_IX,
+	FW_STAT_TX_PORT_PPP0_IX,
+	FW_STAT_TX_PORT_PPP1_IX,
+	FW_STAT_TX_PORT_PPP2_IX,
+	FW_STAT_TX_PORT_PPP3_IX,
+	FW_STAT_TX_PORT_PPP4_IX,
+	FW_STAT_TX_PORT_PPP5_IX,
+	FW_STAT_TX_PORT_PPP6_IX,
+	FW_STAT_TX_PORT_PPP7_IX
+};
+
+enum fw_port_stat_rx_index {
+	FW_STAT_RX_PORT_BYTES_IX,
+	FW_STAT_RX_PORT_FRAMES_IX,
+	FW_STAT_RX_PORT_BCAST_IX,
+	FW_STAT_RX_PORT_MCAST_IX,
+	FW_STAT_RX_PORT_UCAST_IX,
+	FW_STAT_RX_PORT_MTU_ERROR_IX,
+	FW_STAT_RX_PORT_MTU_CRC_ERROR_IX,
+	FW_STAT_RX_PORT_CRC_ERROR_IX,
+	FW_STAT_RX_PORT_LEN_ERROR_IX,
+	FW_STAT_RX_PORT_SYM_ERROR_IX,
+	FW_STAT_RX_PORT_64B_IX,
+	FW_STAT_RX_PORT_65B_127B_IX,
+	FW_STAT_RX_PORT_128B_255B_IX,
+	FW_STAT_RX_PORT_256B_511B_IX,
+	FW_STAT_RX_PORT_512B_1023B_IX,
+	FW_STAT_RX_PORT_1024B_1518B_IX,
+	FW_STAT_RX_PORT_1519B_MAX_IX,
+	FW_STAT_RX_PORT_PAUSE_IX,
+	FW_STAT_RX_PORT_PPP0_IX,
+	FW_STAT_RX_PORT_PPP1_IX,
+	FW_STAT_RX_PORT_PPP2_IX,
+	FW_STAT_RX_PORT_PPP3_IX,
+	FW_STAT_RX_PORT_PPP4_IX,
+	FW_STAT_RX_PORT_PPP5_IX,
+	FW_STAT_RX_PORT_PPP6_IX,
+	FW_STAT_RX_PORT_PPP7_IX,
+	FW_STAT_RX_PORT_LESS_64B_IX
+};
+
+struct fw_port_stats_cmd {
+	__be32 op_to_portid;
+	__be32 retval_len16;
+	union fw_port_stats {
+		struct fw_port_stats_ctl {
+			u8 nstats_bg_bm;
+			u8 tx_ix;
+			__be16 r6;
+			__be32 r7;
+			__be64 stat0;
+			__be64 stat1;
+			__be64 stat2;
+			__be64 stat3;
+			__be64 stat4;
+			__be64 stat5;
+		} ctl;
+		struct fw_port_stats_all {
+			__be64 tx_bytes;
+			__be64 tx_frames;
+			__be64 tx_bcast;
+			__be64 tx_mcast;
+			__be64 tx_ucast;
+			__be64 tx_error;
+			__be64 tx_64b;
+			__be64 tx_65b_127b;
+			__be64 tx_128b_255b;
+			__be64 tx_256b_511b;
+			__be64 tx_512b_1023b;
+			__be64 tx_1024b_1518b;
+			__be64 tx_1519b_max;
+			__be64 tx_drop;
+			__be64 tx_pause;
+			__be64 tx_ppp0;
+			__be64 tx_ppp1;
+			__be64 tx_ppp2;
+			__be64 tx_ppp3;
+			__be64 tx_ppp4;
+			__be64 tx_ppp5;
+			__be64 tx_ppp6;
+			__be64 tx_ppp7;
+			__be64 rx_bytes;
+			__be64 rx_frames;
+			__be64 rx_bcast;
+			__be64 rx_mcast;
+			__be64 rx_ucast;
+			__be64 rx_mtu_error;
+			__be64 rx_mtu_crc_error;
+			__be64 rx_crc_error;
+			__be64 rx_len_error;
+			__be64 rx_sym_error;
+			__be64 rx_64b;
+			__be64 rx_65b_127b;
+			__be64 rx_128b_255b;
+			__be64 rx_256b_511b;
+			__be64 rx_512b_1023b;
+			__be64 rx_1024b_1518b;
+			__be64 rx_1519b_max;
+			__be64 rx_pause;
+			__be64 rx_ppp0;
+			__be64 rx_ppp1;
+			__be64 rx_ppp2;
+			__be64 rx_ppp3;
+			__be64 rx_ppp4;
+			__be64 rx_ppp5;
+			__be64 rx_ppp6;
+			__be64 rx_ppp7;
+			__be64 rx_less_64b;
+			__be64 rx_bg_drop;
+			__be64 rx_bg_trunc;
+		} all;
+	} u;
+};
+
+#define FW_PORT_STATS_CMD_NSTATS(x) ((x) << 4)
+#define FW_PORT_STATS_CMD_BG_BM(x) ((x) << 0)
+#define FW_PORT_STATS_CMD_TX(x) ((x) << 7)
+#define FW_PORT_STATS_CMD_IX(x) ((x) << 0)
+
+/* port loopback stats */
+#define FW_NUM_LB_STATS 16
+enum fw_port_lb_stats_index {
+	FW_STAT_LB_PORT_BYTES_IX,
+	FW_STAT_LB_PORT_FRAMES_IX,
+	FW_STAT_LB_PORT_BCAST_IX,
+	FW_STAT_LB_PORT_MCAST_IX,
+	FW_STAT_LB_PORT_UCAST_IX,
+	FW_STAT_LB_PORT_ERROR_IX,
+	FW_STAT_LB_PORT_64B_IX,
+	FW_STAT_LB_PORT_65B_127B_IX,
+	FW_STAT_LB_PORT_128B_255B_IX,
+	FW_STAT_LB_PORT_256B_511B_IX,
+	FW_STAT_LB_PORT_512B_1023B_IX,
+	FW_STAT_LB_PORT_1024B_1518B_IX,
+	FW_STAT_LB_PORT_1519B_MAX_IX,
+	FW_STAT_LB_PORT_DROP_FRAMES_IX
+};
+
+struct fw_port_lb_stats_cmd {
+	__be32 op_to_lbport;
+	__be32 retval_len16;
+	union fw_port_lb_stats {
+		struct fw_port_lb_stats_ctl {
+			u8 nstats_bg_bm;
+			u8 ix_pkd;
+			__be16 r6;
+			__be32 r7;
+			__be64 stat0;
+			__be64 stat1;
+			__be64 stat2;
+			__be64 stat3;
+			__be64 stat4;
+			__be64 stat5;
+		} ctl;
+		struct fw_port_lb_stats_all {
+			__be64 tx_bytes;
+			__be64 tx_frames;
+			__be64 tx_bcast;
+			__be64 tx_mcast;
+			__be64 tx_ucast;
+			__be64 tx_error;
+			__be64 tx_64b;
+			__be64 tx_65b_127b;
+			__be64 tx_128b_255b;
+			__be64 tx_256b_511b;
+			__be64 tx_512b_1023b;
+			__be64 tx_1024b_1518b;
+			__be64 tx_1519b_max;
+			__be64 rx_lb_drop;
+			__be64 rx_lb_trunc;
+		} all;
+	} u;
+};
+
+#define FW_PORT_LB_STATS_CMD_LBPORT(x) ((x) << 0)
+#define FW_PORT_LB_STATS_CMD_NSTATS(x) ((x) << 4)
+#define FW_PORT_LB_STATS_CMD_BG_BM(x) ((x) << 0)
+#define FW_PORT_LB_STATS_CMD_IX(x) ((x) << 0)
+
+struct fw_rss_ind_tbl_cmd {
+	__be32 op_to_viid;
+#define FW_RSS_IND_TBL_CMD_VIID(x) ((x) << 0)
+	__be32 retval_len16;
+	__be16 niqid;
+	__be16 startidx;
+	__be32 r3;
+	__be32 iq0_to_iq2;
+#define FW_RSS_IND_TBL_CMD_IQ0(x) ((x) << 20)
+#define FW_RSS_IND_TBL_CMD_IQ1(x) ((x) << 10)
+#define FW_RSS_IND_TBL_CMD_IQ2(x) ((x) << 0)
+	__be32 iq3_to_iq5;
+	__be32 iq6_to_iq8;
+	__be32 iq9_to_iq11;
+	__be32 iq12_to_iq14;
+	__be32 iq15_to_iq17;
+	__be32 iq18_to_iq20;
+	__be32 iq21_to_iq23;
+	__be32 iq24_to_iq26;
+	__be32 iq27_to_iq29;
+	__be32 iq30_iq31;
+	__be32 r15_lo;
+};
+
+struct fw_rss_glb_config_cmd {
+	__be32 op_to_write;
+	__be32 retval_len16;
+	union fw_rss_glb_config {
+		struct fw_rss_glb_config_manual {
+			__be32 mode_pkd;
+			__be32 r3;
+			__be64 r4;
+			__be64 r5;
+		} manual;
+		struct fw_rss_glb_config_basicvirtual {
+			__be32 mode_pkd;
+			__be32 synmapen_to_hashtoeplitz;
+#define FW_RSS_GLB_CONFIG_CMD_SYNMAPEN      (1U << 8)
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV6 (1U << 7)
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV6 (1U << 6)
+#define FW_RSS_GLB_CONFIG_CMD_SYN4TUPENIPV4 (1U << 5)
+#define FW_RSS_GLB_CONFIG_CMD_SYN2TUPENIPV4 (1U << 4)
+#define FW_RSS_GLB_CONFIG_CMD_OFDMAPEN      (1U << 3)
+#define FW_RSS_GLB_CONFIG_CMD_TNLMAPEN      (1U << 2)
+#define FW_RSS_GLB_CONFIG_CMD_TNLALLLKP     (1U << 1)
+#define FW_RSS_GLB_CONFIG_CMD_HASHTOEPLITZ  (1U << 0)
+			__be64 r8;
+			__be64 r9;
+		} basicvirtual;
+	} u;
+};
+
+#define FW_RSS_GLB_CONFIG_CMD_MODE(x)	((x) << 28)
+
+#define FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL	0
+#define FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL	1
+
+struct fw_rss_vi_config_cmd {
+	__be32 op_to_viid;
+#define FW_RSS_VI_CONFIG_CMD_VIID(x) ((x) << 0)
+	__be32 retval_len16;
+	union fw_rss_vi_config {
+		struct fw_rss_vi_config_manual {
+			__be64 r3;
+			__be64 r4;
+			__be64 r5;
+		} manual;
+		struct fw_rss_vi_config_basicvirtual {
+			__be32 r6;
+			__be32 defaultq_to_ip4udpen;
+#define FW_RSS_VI_CONFIG_CMD_DEFAULTQ(x)  ((x) << 16)
+#define FW_RSS_VI_CONFIG_CMD_IP6FOURTUPEN (1U << 4)
+#define FW_RSS_VI_CONFIG_CMD_IP6TWOTUPEN  (1U << 3)
+#define FW_RSS_VI_CONFIG_CMD_IP4FOURTUPEN (1U << 2)
+#define FW_RSS_VI_CONFIG_CMD_IP4TWOTUPEN  (1U << 1)
+#define FW_RSS_VI_CONFIG_CMD_IP4UDPEN     (1U << 0)
+			__be64 r9;
+			__be64 r10;
+		} basicvirtual;
+	} u;
+};
+
+enum fw_error_type {
+	FW_ERROR_TYPE_EXCEPTION		= 0x0,
+	FW_ERROR_TYPE_HWMODULE		= 0x1,
+	FW_ERROR_TYPE_WR		= 0x2,
+	FW_ERROR_TYPE_ACL		= 0x3,
+};
+
+struct fw_error_cmd {
+	__be32 op_to_type;
+	__be32 len16_pkd;
+	union fw_error {
+		struct fw_error_exception {
+			__be32 info[6];
+		} exception;
+		struct fw_error_hwmodule {
+			__be32 regaddr;
+			__be32 regval;
+		} hwmodule;
+		struct fw_error_wr {
+			__be16 cidx;
+			__be16 pfn_vfn;
+			__be32 eqid;
+			u8 wrhdr[16];
+		} wr;
+		struct fw_error_acl {
+			__be16 cidx;
+			__be16 pfn_vfn;
+			__be32 eqid;
+			__be16 mv_pkd;
+			u8 val[6];
+			__be64 r4;
+		} acl;
+	} u;
+};
+
+struct fw_debug_cmd {
+	__be32 op_type;
+#define FW_DEBUG_CMD_TYPE_GET(x) ((x) & 0xff)
+	__be32 len16_pkd;
+	union fw_debug {
+		struct fw_debug_assert {
+			__be32 fcid;
+			__be32 line;
+			__be32 x;
+			__be32 y;
+			u8 filename_0_7[8];
+			u8 filename_8_15[8];
+			__be64 r3;
+		} assert;
+		struct fw_debug_prt {
+			__be16 dprtstridx;
+			__be16 r3[3];
+			__be32 dprtstrparam0;
+			__be32 dprtstrparam1;
+			__be32 dprtstrparam2;
+			__be32 dprtstrparam3;
+		} prt;
+	} u;
+};
+
+struct fw_hdr {
+	u8 ver;
+	u8 reserved1;
+	__be16	len512;			/* bin length in units of 512-bytes */
+	__be32	fw_ver;			/* firmware version */
+	__be32	tp_microcode_ver;
+	u8 intfver_nic;
+	u8 intfver_vnic;
+	u8 intfver_ofld;
+	u8 intfver_ri;
+	u8 intfver_iscsipdu;
+	u8 intfver_iscsi;
+	u8 intfver_fcoe;
+	u8 reserved2;
+	__be32  reserved3[27];
+};
+
+#define FW_HDR_FW_VER_MAJOR_GET(x) (((x) >> 24) & 0xff)
+#define FW_HDR_FW_VER_MINOR_GET(x) (((x) >> 16) & 0xff)
+#define FW_HDR_FW_VER_MICRO_GET(x) (((x) >> 8) & 0xff)
+#define FW_HDR_FW_VER_BUILD_GET(x) (((x) >> 0) & 0xff)
+#endif /* _T4FW_INTERFACE_H_ */
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 9902b33..2f29c21 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -261,7 +261,6 @@
 	/* TX */
 	struct e1000_tx_ring *tx_ring;      /* One per active queue */
 	unsigned int restart_queue;
-	unsigned long tx_queue_len;
 	u32 txd_cmd;
 	u32 tx_int_delay;
 	u32 tx_abs_int_delay;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 4133034..47da5fc 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -383,8 +383,6 @@
 		adapter->alloc_rx_buf(adapter, ring,
 		                      E1000_DESC_UNUSED(ring));
 	}
-
-	adapter->tx_queue_len = netdev->tx_queue_len;
 }
 
 int e1000_up(struct e1000_adapter *adapter)
@@ -503,7 +501,6 @@
 	del_timer_sync(&adapter->watchdog_timer);
 	del_timer_sync(&adapter->phy_info_timer);
 
-	netdev->tx_queue_len = adapter->tx_queue_len;
 	adapter->link_speed = 0;
 	adapter->link_duplex = 0;
 	netif_carrier_off(netdev);
@@ -2315,19 +2312,15 @@
 			        E1000_CTRL_RFCE) ? "RX" : ((ctrl &
 			        E1000_CTRL_TFCE) ? "TX" : "None" )));
 
-			/* tweak tx_queue_len according to speed/duplex
-			 * and adjust the timeout factor */
-			netdev->tx_queue_len = adapter->tx_queue_len;
+			/* adjust timeout factor according to speed/duplex */
 			adapter->tx_timeout_factor = 1;
 			switch (adapter->link_speed) {
 			case SPEED_10:
 				txb2b = false;
-				netdev->tx_queue_len = 10;
 				adapter->tx_timeout_factor = 16;
 				break;
 			case SPEED_100:
 				txb2b = false;
-				netdev->tx_queue_len = 100;
 				/* maybe add some timeout factor ? */
 				break;
 			}
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 11e02e1..12648a1 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -273,7 +273,6 @@
 
 	struct napi_struct napi;
 
-	unsigned long tx_queue_len;
 	unsigned int restart_queue;
 	u32 txd_cmd;
 
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 02f7d20..167b1ae 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2292,8 +2292,6 @@
 	ew32(TCTL, tctl);
 
 	e1000e_config_collision_dist(hw);
-
-	adapter->tx_queue_len = adapter->netdev->tx_queue_len;
 }
 
 /**
@@ -2879,7 +2877,6 @@
 	del_timer_sync(&adapter->watchdog_timer);
 	del_timer_sync(&adapter->phy_info_timer);
 
-	netdev->tx_queue_len = adapter->tx_queue_len;
 	netif_carrier_off(netdev);
 	adapter->link_speed = 0;
 	adapter->link_duplex = 0;
@@ -3612,21 +3609,15 @@
 					       "link gets many collisions.\n");
 			}
 
-			/*
-			 * tweak tx_queue_len according to speed/duplex
-			 * and adjust the timeout factor
-			 */
-			netdev->tx_queue_len = adapter->tx_queue_len;
+			/* adjust timeout factor according to speed/duplex */
 			adapter->tx_timeout_factor = 1;
 			switch (adapter->link_speed) {
 			case SPEED_10:
 				txb2b = 0;
-				netdev->tx_queue_len = 10;
 				adapter->tx_timeout_factor = 16;
 				break;
 			case SPEED_100:
 				txb2b = 0;
-				netdev->tx_queue_len = 100;
 				adapter->tx_timeout_factor = 10;
 				break;
 			}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index fdd26c2..5175233 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -676,7 +676,7 @@
 		priv->rx_queue[i] = NULL;
 
 	for (i = 0; i < priv->num_tx_queues; i++) {
-		priv->tx_queue[i] =  (struct gfar_priv_tx_q *)kmalloc(
+		priv->tx_queue[i] =  (struct gfar_priv_tx_q *)kzalloc(
 				sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
 		if (!priv->tx_queue[i]) {
 			err = -ENOMEM;
@@ -689,7 +689,7 @@
 	}
 
 	for (i = 0; i < priv->num_rx_queues; i++) {
-		priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
+		priv->rx_queue[i] = (struct gfar_priv_rx_q *)kzalloc(
 					sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
 		if (!priv->rx_queue[i]) {
 			err = -ENOMEM;
@@ -1120,10 +1120,10 @@
 	/* provided which set of benchmarks. */
 	printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
 	for (i = 0; i < priv->num_rx_queues; i++)
-		printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
+		printk(KERN_INFO "%s: RX BD ring size for Q[%d]: %d\n",
 			dev->name, i, priv->rx_queue[i]->rx_ring_size);
 	for(i = 0; i < priv->num_tx_queues; i++)
-		 printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
+		 printk(KERN_INFO "%s: TX BD ring size for Q[%d]: %d\n",
 			dev->name, i, priv->tx_queue[i]->tx_ring_size);
 
 	return 0;
@@ -1638,13 +1638,13 @@
 	/* Go through all the buffer descriptors and free their data buffers */
 	for (i = 0; i < priv->num_tx_queues; i++) {
 		tx_queue = priv->tx_queue[i];
-		if(!tx_queue->tx_skbuff)
+		if(tx_queue->tx_skbuff)
 			free_skb_tx_queue(tx_queue);
 	}
 
 	for (i = 0; i < priv->num_rx_queues; i++) {
 		rx_queue = priv->rx_queue[i];
-		if(!rx_queue->rx_skbuff)
+		if(rx_queue->rx_skbuff)
 			free_skb_rx_queue(rx_queue);
 	}
 
@@ -2393,6 +2393,7 @@
 	 * as many bytes as needed to align the data properly
 	 */
 	skb_reserve(skb, alignamount);
+	GFAR_CB(skb)->alignamount = alignamount;
 
 	return skb;
 }
@@ -2533,13 +2534,13 @@
 				newskb = skb;
 			else if (skb) {
 				/*
-				 * We need to reset ->data to what it
+				 * We need to un-reserve() the skb to what it
 				 * was before gfar_new_skb() re-aligned
 				 * it to an RXBUF_ALIGNMENT boundary
 				 * before we put the skb back on the
 				 * recycle list.
 				 */
-				skb->data = skb->head + NET_SKB_PAD;
+				skb_reserve(skb, -GFAR_CB(skb)->alignamount);
 				__skb_queue_head(&priv->rx_recycle, skb);
 			}
 		} else {
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 3d72dc4..17d25e7 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -566,6 +566,12 @@
 	u16	vlctl;	/* VLAN control word */
 };
 
+struct gianfar_skb_cb {
+	int alignamount;
+};
+
+#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb))
+
 struct rmon_mib
 {
 	u32	tr64;	/* 0x.680 - Transmit and Receive 64-byte Frame Counter */
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 2a8a886..be8d010 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -1367,7 +1367,8 @@
  *  igb_enable_mng_pass_thru - Enable processing of ARP's
  *  @hw: pointer to the HW structure
  *
- *  Verifies the hardware needs to allow ARPs to be processed by the host.
+ *  Verifies the hardware needs to leave interface enabled so that frames can
+ *  be directed to and from the management interface.
  **/
 bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
 {
@@ -1380,8 +1381,7 @@
 
 	manc = rd32(E1000_MANC);
 
-	if (!(manc & E1000_MANC_RCV_TCO_EN) ||
-	    !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+	if (!(manc & E1000_MANC_RCV_TCO_EN))
 		goto out;
 
 	if (hw->mac.arc_subsystem_valid) {
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 4f69b6d..7d288ccc 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -268,7 +268,6 @@
 
 	/* TX */
 	struct igb_ring *tx_ring[16];
-	unsigned long tx_queue_len;
 	u32 tx_timeout_count;
 
 	/* RX */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 78cc742..2745e17 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1086,9 +1086,6 @@
 		struct igb_ring *ring = adapter->rx_ring[i];
 		igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
 	}
-
-
-	adapter->tx_queue_len = netdev->tx_queue_len;
 }
 
 /**
@@ -1194,7 +1191,6 @@
 	del_timer_sync(&adapter->watchdog_timer);
 	del_timer_sync(&adapter->phy_info_timer);
 
-	netdev->tx_queue_len = adapter->tx_queue_len;
 	netif_carrier_off(netdev);
 
 	/* record the stats before reset*/
@@ -3092,17 +3088,13 @@
 			       ((ctrl & E1000_CTRL_RFCE) ?  "RX" :
 			       ((ctrl & E1000_CTRL_TFCE) ?  "TX" : "None")));
 
-			/* tweak tx_queue_len according to speed/duplex and
-			 * adjust the timeout factor */
-			netdev->tx_queue_len = adapter->tx_queue_len;
+			/* adjust timeout factor according to speed/duplex */
 			adapter->tx_timeout_factor = 1;
 			switch (adapter->link_speed) {
 			case SPEED_10:
-				netdev->tx_queue_len = 10;
 				adapter->tx_timeout_factor = 14;
 				break;
 			case SPEED_100:
-				netdev->tx_queue_len = 100;
 				/* maybe add some timeout factor ? */
 				break;
 			}
@@ -3960,7 +3952,7 @@
 	struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
 	struct e1000_hw *hw = &adapter->hw;
 	struct pci_dev *pdev = adapter->pdev;
-	u32 rnbc, reg;
+	u32 reg, mpc;
 	u16 phy_tmp;
 	int i;
 	u64 bytes, packets;
@@ -4018,7 +4010,9 @@
 	adapter->stats.symerrs += rd32(E1000_SYMERRS);
 	adapter->stats.sec += rd32(E1000_SEC);
 
-	adapter->stats.mpc += rd32(E1000_MPC);
+	mpc = rd32(E1000_MPC);
+	adapter->stats.mpc += mpc;
+	net_stats->rx_fifo_errors += mpc;
 	adapter->stats.scc += rd32(E1000_SCC);
 	adapter->stats.ecol += rd32(E1000_ECOL);
 	adapter->stats.mcc += rd32(E1000_MCC);
@@ -4033,9 +4027,7 @@
 	adapter->stats.gptc += rd32(E1000_GPTC);
 	adapter->stats.gotc += rd32(E1000_GOTCL);
 	rd32(E1000_GOTCH); /* clear GOTCL */
-	rnbc = rd32(E1000_RNBC);
-	adapter->stats.rnbc += rnbc;
-	net_stats->rx_fifo_errors += rnbc;
+	adapter->stats.rnbc += rd32(E1000_RNBC);
 	adapter->stats.ruc += rd32(E1000_RUC);
 	adapter->stats.rfc += rd32(E1000_RFC);
 	adapter->stats.rjc += rd32(E1000_RJC);
@@ -5107,7 +5099,7 @@
 {
 	struct igb_adapter *adapter = q_vector->adapter;
 
-	if (vlan_tag)
+	if (vlan_tag && adapter->vlgrp)
 		vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
 		                 vlan_tag, skb);
 	else
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index a1774b2..debeee2 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -198,7 +198,6 @@
 	struct igbvf_ring *tx_ring /* One per active queue */
 	____cacheline_aligned_in_smp;
 
-	unsigned long tx_queue_len;
 	unsigned int restart_queue;
 	u32 txd_cmd;
 
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index ea8abf5..8688550 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1304,8 +1304,6 @@
 
 	/* enable Report Status bit */
 	adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
-
-	adapter->tx_queue_len = adapter->netdev->tx_queue_len;
 }
 
 /**
@@ -1524,7 +1522,6 @@
 
 	del_timer_sync(&adapter->watchdog_timer);
 
-	netdev->tx_queue_len = adapter->tx_queue_len;
 	netif_carrier_off(netdev);
 
 	/* record the stats before reset*/
@@ -1857,21 +1854,15 @@
 			                          &adapter->link_duplex);
 			igbvf_print_link_info(adapter);
 
-			/*
-			 * tweak tx_queue_len according to speed/duplex
-			 * and adjust the timeout factor
-			 */
-			netdev->tx_queue_len = adapter->tx_queue_len;
+			/* adjust timeout factor according to speed/duplex */
 			adapter->tx_timeout_factor = 1;
 			switch (adapter->link_speed) {
 			case SPEED_10:
 				txb2b = 0;
-				netdev->tx_queue_len = 10;
 				adapter->tx_timeout_factor = 16;
 				break;
 			case SPEED_100:
 				txb2b = 0;
-				netdev->tx_queue_len = 100;
 				/* maybe add some timeout factor ? */
 				break;
 			}
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 19e94ee..79c35ae 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -204,14 +204,17 @@
 #define IXGBE_MAX_FDIR_INDICES 64
 #ifdef IXGBE_FCOE
 #define IXGBE_MAX_FCOE_INDICES  8
+#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
+#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
+#else
+#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
+#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
 #endif /* IXGBE_FCOE */
 struct ixgbe_ring_feature {
 	int indices;
 	int mask;
 } ____cacheline_internodealigned_in_smp;
 
-#define MAX_RX_QUEUES 128
-#define MAX_TX_QUEUES 128
 
 #define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
                               ? 8 : 1)
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 7949a44..1959ef7 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1853,6 +1853,26 @@
 		if (ixgbe_link_test(adapter, &data[4]))
 			eth_test->flags |= ETH_TEST_FL_FAILED;
 
+		if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+			int i;
+			for (i = 0; i < adapter->num_vfs; i++) {
+				if (adapter->vfinfo[i].clear_to_send) {
+					netdev_warn(netdev, "%s",
+						    "offline diagnostic is not "
+						    "supported when VFs are "
+						    "present\n");
+					data[0] = 1;
+					data[1] = 1;
+					data[2] = 1;
+					data[3] = 1;
+					eth_test->flags |= ETH_TEST_FL_FAILED;
+					clear_bit(__IXGBE_TESTING,
+						  &adapter->state);
+					goto skip_ol_tests;
+				}
+			}
+		}
+
 		if (if_running)
 			/* indicate we're in test mode */
 			dev_close(netdev);
@@ -1908,6 +1928,7 @@
 
 		clear_bit(__IXGBE_TESTING, &adapter->state);
 	}
+skip_ol_tests:
 	msleep_interruptible(4 * 1000);
 }
 
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 700cfc0..9276d59 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -202,6 +202,15 @@
 		addr = sg_dma_address(sg);
 		len = sg_dma_len(sg);
 		while (len) {
+			/* max number of buffers allowed in one DDP context */
+			if (j >= IXGBE_BUFFCNT_MAX) {
+				netif_err(adapter, drv, adapter->netdev,
+					  "xid=%x:%d,%d,%d:addr=%llx "
+					  "not enough descriptors\n",
+					  xid, i, j, dmacount, (u64)addr);
+				goto out_noddp_free;
+			}
+
 			/* get the offset of length of current buffer */
 			thisoff = addr & ((dma_addr_t)bufflen - 1);
 			thislen = min((bufflen - thisoff), len);
@@ -227,20 +236,13 @@
 			len -= thislen;
 			addr += thislen;
 			j++;
-			/* max number of buffers allowed in one DDP context */
-			if (j > IXGBE_BUFFCNT_MAX) {
-				DPRINTK(DRV, ERR, "xid=%x:%d,%d,%d:addr=%llx "
-					"not enough descriptors\n",
-					xid, i, j, dmacount, (u64)addr);
-				goto out_noddp_free;
-			}
 		}
 	}
 	/* only the last buffer may have non-full bufflen */
 	lastsize = thisoff + thislen;
 
 	fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
-	fcbuff |= (j << IXGBE_FCBUFF_BUFFCNT_SHIFT);
+	fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
 	fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
 	fcbuff |= (IXGBE_FCBUFF_VALID);
 
@@ -520,6 +522,9 @@
 	/* Enable L2 eth type filter for FCoE */
 	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
 			(ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
+	/* Enable L2 eth type filter for FIP */
+	IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
+			(ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
 	if (adapter->ring_feature[RING_F_FCOE].indices) {
 		/* Use multiple rx queues for FCoE by redirection table */
 		for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
@@ -530,6 +535,12 @@
 		}
 		IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
 		IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
+		fcoe_i = f->mask;
+		fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
+		fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+		IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
+				IXGBE_ETQS_QUEUE_EN |
+				(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
 	} else  {
 		/* Use single rx queue for FCoE */
 		fcoe_i = f->mask;
@@ -539,6 +550,12 @@
 				IXGBE_ETQS_QUEUE_EN |
 				(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
 	}
+	/* send FIP frames to the first FCoE queue */
+	fcoe_i = f->mask;
+	fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+	IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
+			IXGBE_ETQS_QUEUE_EN |
+			(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
 
 	IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
 			IXGBE_FCRXCTRL_FCOELLI |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index 6effa2c..7216db2 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3036,6 +3036,14 @@
 	while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
 		msleep(1);
 	ixgbe_down(adapter);
+	/*
+	 * If SR-IOV enabled then wait a bit before bringing the adapter
+	 * back up to give the VFs time to respond to the reset.  The
+	 * two second wait is based upon the watchdog timer cycle in
+	 * the VF driver.
+	 */
+	if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+		msleep(2000);
 	ixgbe_up(adapter);
 	clear_bit(__IXGBE_RESETTING, &adapter->state);
 }
@@ -3216,13 +3224,15 @@
 
 	/* disable receive for all VFs and wait one second */
 	if (adapter->num_vfs) {
-		for (i = 0 ; i < adapter->num_vfs; i++)
-			adapter->vfinfo[i].clear_to_send = 0;
-
 		/* ping all the active vfs to let them know we are going down */
 		ixgbe_ping_all_vfs(adapter);
+
 		/* Disable all VFTE/VFRE TX/RX */
 		ixgbe_disable_tx_rx(adapter);
+
+		/* Mark all the VFs as inactive */
+		for (i = 0 ; i < adapter->num_vfs; i++)
+			adapter->vfinfo[i].clear_to_send = 0;
 	}
 
 	/* disable receives */
@@ -5618,7 +5628,8 @@
 
 #ifdef IXGBE_FCOE
 	if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
-	    (skb->protocol == htons(ETH_P_FCOE))) {
+	    ((skb->protocol == htons(ETH_P_FCOE)) ||
+	     (skb->protocol == htons(ETH_P_FIP)))) {
 		txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
 		txq += adapter->ring_feature[RING_F_FCOE].mask;
 		return txq;
@@ -5665,18 +5676,25 @@
 
 	tx_ring = adapter->tx_ring[skb->queue_mapping];
 
-	if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
-	    (skb->protocol == htons(ETH_P_FCOE))) {
-		tx_flags |= IXGBE_TX_FLAGS_FCOE;
 #ifdef IXGBE_FCOE
+	if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 #ifdef CONFIG_IXGBE_DCB
-		tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
-			      << IXGBE_TX_FLAGS_VLAN_SHIFT);
-		tx_flags |= ((adapter->fcoe.up << 13)
-			      << IXGBE_TX_FLAGS_VLAN_SHIFT);
+		/* for FCoE with DCB, we force the priority to what
+		 * was specified by the switch */
+		if ((skb->protocol == htons(ETH_P_FCOE)) ||
+		    (skb->protocol == htons(ETH_P_FIP))) {
+			tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
+				      << IXGBE_TX_FLAGS_VLAN_SHIFT);
+			tx_flags |= ((adapter->fcoe.up << 13)
+				     << IXGBE_TX_FLAGS_VLAN_SHIFT);
+		}
 #endif
-#endif
+		/* flag for FCoE offloads */
+		if (skb->protocol == htons(ETH_P_FCOE))
+			tx_flags |= IXGBE_TX_FLAGS_FCOE;
 	}
+#endif
+
 	/* four things can cause us to need a context descriptor */
 	if (skb_is_gso(skb) ||
 	    (skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -6031,7 +6049,6 @@
 	indices += min_t(unsigned int, num_possible_cpus(),
 			 IXGBE_MAX_FCOE_INDICES);
 #endif
-	indices = min_t(unsigned int, indices, MAX_TX_QUEUES);
 	netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
 	if (!netdev) {
 		err = -ENOMEM;
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index c574d0a..aed4ed6 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1298,6 +1298,7 @@
 #define IXGBE_ETQF_FILTER_BCN            1
 #define IXGBE_ETQF_FILTER_FCOE           2
 #define IXGBE_ETQF_FILTER_1588           3
+#define IXGBE_ETQF_FILTER_FIP            4
 /* VLAN Control Bit Masks */
 #define IXGBE_VLNCTRL_VET       0x0000FFFF  /* bits 0-15 */
 #define IXGBE_VLNCTRL_CFI       0x10000000  /* bit 28 */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index 6ced5ef..65cb133 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -2921,9 +2921,10 @@
 	struct ixgbevf_tx_buffer *tx_buffer_info;
 	unsigned int len;
 	unsigned int total = skb->len;
-	unsigned int offset = 0, size, count = 0, i;
+	unsigned int offset = 0, size, count = 0;
 	unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
 	unsigned int f;
+	int i;
 
 	i = tx_ring->next_to_use;
 
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 3487695..0977964 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -6318,7 +6318,7 @@
 	int len;
 
 	if (eeprom->magic != EEPROM_MAGIC)
-		return 1;
+		return -EINVAL;
 
 	len = (eeprom->offset + eeprom->len + 1) / 2;
 	for (i = eeprom->offset / 2; i < len; i++)
diff --git a/drivers/net/mlx4/main.c b/drivers/net/mlx4/main.c
index 8f6e816..b402a95 100644
--- a/drivers/net/mlx4/main.c
+++ b/drivers/net/mlx4/main.c
@@ -1023,6 +1023,7 @@
 	info->port_attr.attr.mode = S_IRUGO | S_IWUSR;
 	info->port_attr.show      = show_port_type;
 	info->port_attr.store     = set_port_type;
+	sysfs_attr_init(&info->port_attr.attr);
 
 	err = device_create_file(&dev->pdev->dev, &info->port_attr);
 	if (err) {
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 33ae5e1..174ac8e 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
 
 #define _NETXEN_NIC_LINUX_MAJOR 4
 #define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 72
-#define NETXEN_NIC_LINUX_VERSIONID  "4.0.72"
+#define _NETXEN_NIC_LINUX_SUBVERSION 73
+#define NETXEN_NIC_LINUX_VERSIONID  "4.0.73"
 
 #define NETXEN_VERSION_CODE(a, b, c)	(((a) << 24) + ((b) << 16) + (c))
 #define _major(v)	(((v) >> 24) & 0xff)
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 2a8ef5f..f26e547 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -669,13 +669,15 @@
 		}
 		sds_ring->desc_head = (struct status_desc *)addr;
 
-		sds_ring->crb_sts_consumer =
-			netxen_get_ioaddr(adapter,
-			recv_crb_registers[port].crb_sts_consumer[ring]);
+		if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+			sds_ring->crb_sts_consumer =
+				netxen_get_ioaddr(adapter,
+				recv_crb_registers[port].crb_sts_consumer[ring]);
 
-		sds_ring->crb_intr_mask =
-			netxen_get_ioaddr(adapter,
-			recv_crb_registers[port].sw_int_mask[ring]);
+			sds_ring->crb_intr_mask =
+				netxen_get_ioaddr(adapter,
+				recv_crb_registers[port].sw_int_mask[ring]);
+		}
 	}
 
 
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 439f3e8..ecb6eed 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -891,7 +891,7 @@
 	if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
 		bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
 						+ NX_UNI_BIOS_VERSION_OFF));
-		return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) +
+		return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) +
 							(bios_ver >> 24);
 	} else
 		return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index f1daa9a..9e82061 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -604,16 +604,14 @@
 static int
 netxen_setup_pci_map(struct netxen_adapter *adapter)
 {
-	void __iomem *mem_ptr0 = NULL;
-	void __iomem *mem_ptr1 = NULL;
-	void __iomem *mem_ptr2 = NULL;
 	void __iomem *db_ptr = NULL;
 
 	resource_size_t mem_base, db_base;
-	unsigned long mem_len, db_len = 0, pci_len0 = 0;
+	unsigned long mem_len, db_len = 0;
 
 	struct pci_dev *pdev = adapter->pdev;
 	int pci_func = adapter->ahw.pci_func;
+	struct netxen_hardware_context *ahw = &adapter->ahw;
 
 	int err = 0;
 
@@ -630,24 +628,40 @@
 
 	/* 128 Meg of memory */
 	if (mem_len == NETXEN_PCI_128MB_SIZE) {
-		mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
-		mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
+
+		ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
+		ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
 				SECOND_PAGE_GROUP_SIZE);
-		mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
+		ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
 				THIRD_PAGE_GROUP_SIZE);
-		pci_len0 = FIRST_PAGE_GROUP_SIZE;
+		if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL ||
+						ahw->pci_base2 == NULL) {
+			dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+			err = -EIO;
+			goto err_out;
+		}
+
+		ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE;
+
 	} else if (mem_len == NETXEN_PCI_32MB_SIZE) {
-		mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
-		mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
+
+		ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
+		ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
 			SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
+		if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) {
+			dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+			err = -EIO;
+			goto err_out;
+		}
+
 	} else if (mem_len == NETXEN_PCI_2MB_SIZE) {
 
-		mem_ptr0 = pci_ioremap_bar(pdev, 0);
-		if (mem_ptr0 == NULL) {
+		ahw->pci_base0 = pci_ioremap_bar(pdev, 0);
+		if (ahw->pci_base0 == NULL) {
 			dev_err(&pdev->dev, "failed to map PCI bar 0\n");
 			return -EIO;
 		}
-		pci_len0 = mem_len;
+		ahw->pci_len0 = mem_len;
 	} else {
 		return -EIO;
 	}
@@ -656,11 +670,6 @@
 
 	dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
 
-	adapter->ahw.pci_base0 = mem_ptr0;
-	adapter->ahw.pci_len0 = pci_len0;
-	adapter->ahw.pci_base1 = mem_ptr1;
-	adapter->ahw.pci_base2 = mem_ptr2;
-
 	if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
 		adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
 			NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
@@ -1253,8 +1262,8 @@
 	int pci_func_id = PCI_FUNC(pdev->devfn);
 	uint8_t revision_id;
 
-	if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
-		pr_warning("%s: chip revisions between 0x%x-0x%x"
+	if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
+		pr_warning("%s: chip revisions between 0x%x-0x%x "
 				"will not be enabled.\n",
 				module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
 		return -ENODEV;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 776cad2..1028fcb 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1549,6 +1549,7 @@
 	PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101),
 	PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab),
 	PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4),
+	PCMCIA_PFC_DEVICE_PROD_ID12(0, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e),
 	PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff),
 	PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae),
 	PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033),
@@ -1740,7 +1741,7 @@
 	PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
 	PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
 	PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
-	PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
+	PCMCIA_DEVICE_CIS_PROD_ID12("Allied Telesis,K.K", "Ethernet LAN Card", 0x2ad62f3c, 0x9fd2f0a2, "cis/LA-PCM.cis"),
 	PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"),
 	PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
 	PCMCIA_DEVICE_CIS_PROD_ID12("PMX   ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"),
diff --git a/drivers/net/pcmcia/smc91c92_cs.c b/drivers/net/pcmcia/smc91c92_cs.c
index f45c626..ad22676 100644
--- a/drivers/net/pcmcia/smc91c92_cs.c
+++ b/drivers/net/pcmcia/smc91c92_cs.c
@@ -493,13 +493,14 @@
 {
 	struct net_device *dev = priv;
 	cisparse_t parse;
+	u8 *buf;
 
 	if (pcmcia_parse_tuple(tuple, &parse))
 		return -EINVAL;
 
-	if ((parse.version_1.ns > 3) &&
-	    (cvt_ascii_address(dev,
-			       (parse.version_1.str + parse.version_1.ofs[3]))))
+	buf = parse.version_1.str + parse.version_1.ofs[3];
+
+	if ((parse.version_1.ns > 3) && (cvt_ascii_address(dev, buf) == 0))
 		return 0;
 
 	return -EINVAL;
@@ -528,7 +529,7 @@
     len = pcmcia_get_tuple(link, 0x81, &buf);
     if (buf && len >= 13) {
 	    buf[12] = '\0';
-	    if (cvt_ascii_address(dev, buf))
+	    if (cvt_ascii_address(dev, buf) == 0)
 		    rc = 0;
     }
     kfree(buf);
@@ -910,7 +911,7 @@
 
     if (i != 0) {
 	printk(KERN_NOTICE "smc91c92_cs: Unable to find hardware address.\n");
-	goto config_undo;
+	goto config_failed;
     }
 
     smc->duplex = 0;
@@ -998,6 +999,7 @@
     unregister_netdev(dev);
 config_failed:
     smc91c92_release(link);
+    free_netdev(dev);
     return -ENODEV;
 } /* smc91c92_config */
 
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 64cd250..340da39 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -187,8 +187,13 @@
 
 MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
 
-static int rx_copybreak = 200;
-static int use_dac = -1;
+/*
+ * we set our copybreak very high so that we don't have
+ * to allocate 16k frames all the time (see note in
+ * rtl8169_open()
+ */
+static int rx_copybreak = 16383;
+static int use_dac;
 static struct {
 	u32 msg_enable;
 } debug = { -1 };
@@ -513,8 +518,7 @@
 module_param(rx_copybreak, int, 0);
 MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
 module_param(use_dac, int, 0);
-MODULE_PARM_DESC(use_dac, "Enable PCI DAC. -1 defaults on for PCI Express only."
-" Unsafe on 32 bit PCI slot.");
+MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
 module_param_named(debug, debug.msg_enable, int, 0);
 MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
 MODULE_LICENSE("GPL");
@@ -2837,8 +2841,8 @@
 	spin_lock_irq(&tp->lock);
 
 	RTL_W8(Cfg9346, Cfg9346_Unlock);
-	RTL_W32(MAC0, low);
 	RTL_W32(MAC4, high);
+	RTL_W32(MAC0, low);
 	RTL_W8(Cfg9346, Cfg9346_Lock);
 
 	spin_unlock_irq(&tp->lock);
@@ -2990,7 +2994,6 @@
 	void __iomem *ioaddr;
 	unsigned int i;
 	int rc;
-	int this_use_dac = use_dac;
 
 	if (netif_msg_drv(&debug)) {
 		printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
@@ -3056,17 +3059,8 @@
 
 	tp->cp_cmd = PCIMulRW | RxChkSum;
 
-	tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
-	if (!tp->pcie_cap)
-		netif_info(tp, probe, dev, "no PCI Express capability\n");
-
-	if (this_use_dac < 0)
-		this_use_dac = tp->pcie_cap != 0;
-
 	if ((sizeof(dma_addr_t) > 4) &&
-	    this_use_dac &&
-	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
-		netif_info(tp, probe, dev, "using 64-bit DMA\n");
+	    !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
 		tp->cp_cmd |= PCIDAC;
 		dev->features |= NETIF_F_HIGHDMA;
 	} else {
@@ -3085,6 +3079,10 @@
 		goto err_out_free_res_4;
 	}
 
+	tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+	if (!tp->pcie_cap)
+		netif_info(tp, probe, dev, "no PCI Express capability\n");
+
 	RTL_W16(IntrMask, 0x0000);
 
 	/* Soft reset the chip. */
@@ -3254,9 +3252,13 @@
 }
 
 static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
-				  struct net_device *dev)
+				  unsigned int mtu)
 {
-	unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+	unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+	if (max_frame != 16383)
+		printk(KERN_WARNING PFX "WARNING! Changing of MTU on this "
+			"NIC may lead to frame reception errors!\n");
 
 	tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
 }
@@ -3269,7 +3271,17 @@
 
 	pm_runtime_get_sync(&pdev->dev);
 
-	rtl8169_set_rxbufsize(tp, dev);
+	/*
+	 * Note that we use a magic value here, its wierd I know
+	 * its done because, some subset of rtl8169 hardware suffers from
+	 * a problem in which frames received that are longer than
+	 * the size set in RxMaxSize register return garbage sizes
+	 * when received.  To avoid this we need to turn off filtering,
+	 * which is done by setting a value of 16383 in the RxMaxSize register
+	 * and allocating 16k frames to handle the largest possible rx value
+	 * thats what the magic math below does.
+	 */
+	rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN);
 
 	/*
 	 * Rx and Tx desscriptors needs 256 bytes alignment.
@@ -3929,7 +3941,7 @@
 
 	rtl8169_down(dev);
 
-	rtl8169_set_rxbufsize(tp, dev);
+	rtl8169_set_rxbufsize(tp, dev->mtu);
 
 	ret = rtl8169_init_ring(dev);
 	if (ret < 0)
@@ -4808,8 +4820,8 @@
 		mc_filter[1] = swab32(data);
 	}
 
-	RTL_W32(MAR0 + 0, mc_filter[0]);
 	RTL_W32(MAR0 + 4, mc_filter[1]);
+	RTL_W32(MAR0 + 0, mc_filter[0]);
 
 	RTL_W32(RxConfig, tmp);
 
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index ed999d3..37f6a00 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -592,8 +592,10 @@
 	/* Setup... */
 	len = skb->len;
 	if (len < ETH_ZLEN) {
-		if (skb_padto(skb, ETH_ZLEN))
+		if (skb_padto(skb, ETH_ZLEN)) {
+			spin_unlock_irqrestore(&sp->tx_lock, flags);
 			return NETDEV_TX_OK;
+		}
 		len = ETH_ZLEN;
 	}
 
diff --git a/drivers/net/stmmac/Kconfig b/drivers/net/stmmac/Kconfig
index fb28764..eb63d44 100644
--- a/drivers/net/stmmac/Kconfig
+++ b/drivers/net/stmmac/Kconfig
@@ -2,6 +2,7 @@
 	tristate "STMicroelectronics 10/100/1000 Ethernet driver"
 	select MII
 	select PHYLIB
+	select CRC32
 	depends on NETDEVICES && CPU_SUBTYPE_ST40
 	help
 	  This is the driver for the Ethernet IPs are built around a
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index b79d908..7063f56 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -851,13 +851,15 @@
 
 			if ( !(rdes0 & 0x8000) ||
 				((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
+				struct sk_buff *new_skb = NULL;
+
 				skb = rxptr->rx_skb_ptr;
 
 				/* Good packet, send to upper layer */
 				/* Shorst packet used new SKB */
-				if ( (rxlen < RX_COPY_SIZE) &&
-					( (skb = dev_alloc_skb(rxlen + 2) )
-					!= NULL) ) {
+				if ((rxlen < RX_COPY_SIZE) &&
+				    (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) {
+					skb = new_skb;
 					/* size less than COPY_SIZE, allocate a rxlen SKB */
 					skb_reserve(skb, 2); /* 16byte align */
 					memcpy(skb_put(skb, rxlen),
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 078903f..616f8c9 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -812,7 +812,7 @@
 
 	case FLOW_CNTL_TX_RX:
 		MII_REG_BITS_ON(ADVERTISE_PAUSE_CAP, MII_ADVERTISE, vptr->mac_regs);
-		MII_REG_BITS_ON(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
+		MII_REG_BITS_OFF(ADVERTISE_PAUSE_ASYM, MII_ADVERTISE, vptr->mac_regs);
 		break;
 
 	case FLOW_CNTL_DISABLE:
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index 4e30197..6b1cb70 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -94,6 +94,8 @@
 	{ USB_DEVICE(0x04bb, 0x093f) },
 	/* AVM FRITZ!WLAN USB Stick N */
 	{ USB_DEVICE(0x057C, 0x8401) },
+	/* NEC WL300NU-G */
+	{ USB_DEVICE(0x0409, 0x0249) },
 	/* AVM FRITZ!WLAN USB Stick N 2.4 */
 	{ USB_DEVICE(0x057C, 0x8402), .driver_info = AR9170_REQ_FW1_ONLY },
 
@@ -416,7 +418,7 @@
 	spin_unlock_irqrestore(&aru->common.cmdlock, flags);
 
 	usb_fill_int_urb(urb, aru->udev,
-			 usb_sndbulkpipe(aru->udev, AR9170_EP_CMD),
+			 usb_sndintpipe(aru->udev, AR9170_EP_CMD),
 			 aru->common.cmdbuf, plen + 4,
 			 ar9170_usb_tx_urb_complete, NULL, 1);
 
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index 3949133..3297fc7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -2047,16 +2047,14 @@
 				   tx_resp->failure_frame);
 
 		freed = iwl_tx_queue_reclaim(priv, txq_id, index);
-		if (qc && likely(sta_id != IWL_INVALID_STATION))
-			priv->stations[sta_id].tid[tid].tfds_in_queue -= freed;
+		iwl_free_tfds_in_queue(priv, sta_id, tid, freed);
 
 		if (priv->mac80211_registered &&
 		    (iwl_queue_space(&txq->q) > txq->q.low_mark))
 			iwl_wake_queue(priv, txq_id);
 	}
 
-	if (qc && likely(sta_id != IWL_INVALID_STATION))
-		iwl_txq_check_empty(priv, sta_id, tid, txq_id);
+	iwl_txq_check_empty(priv, sta_id, tid, txq_id);
 
 	if (iwl_check_bits(status, TX_ABORT_REQUIRED_MSK))
 		IWL_ERR(priv, "TODO:  Implement Tx ABORT REQUIRED!!!\n");
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 0a376f7..f43a45d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -1236,7 +1236,15 @@
 	/* Ack/clear/reset pending uCode interrupts.
 	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
 	 */
-	iwl_write32(priv, CSR_INT, priv->_agn.inta);
+	/* There is a hardware bug in the interrupt mask function that some
+	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
+	 * they are disabled in the CSR_INT_MASK register. Furthermore the
+	 * ICT interrupt handling mechanism has another bug that might cause
+	 * these unmasked interrupts fail to be detected. We workaround the
+	 * hardware bugs here by ACKing all the possible interrupts so that
+	 * interrupt coalescing can still be achieved.
+	 */
+	iwl_write32(priv, CSR_INT, priv->_agn.inta | ~priv->inta_mask);
 
 	inta = priv->_agn.inta;
 
@@ -2611,7 +2619,7 @@
 		BIT(NL80211_IFTYPE_STATION) |
 		BIT(NL80211_IFTYPE_ADHOC);
 
-	hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
+	hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
 			    WIPHY_FLAG_DISABLE_BEACON_HINTS;
 
 	/*
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 4995134..64f150b 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -1946,7 +1946,7 @@
 {
 	int i;
 
-	for (i = 0; i < IWL_RATE_COUNT; i++) {
+	for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
 		rates[i].bitrate = iwl3945_rates[i].ieee * 5;
 		rates[i].hw_value = i; /* Rate scaling will work on indexes */
 		rates[i].hw_value_short = i;
@@ -3950,7 +3950,7 @@
 		BIT(NL80211_IFTYPE_STATION) |
 		BIT(NL80211_IFTYPE_ADHOC);
 
-	hw->wiphy->flags |= WIPHY_FLAG_STRICT_REGULATORY |
+	hw->wiphy->flags |= WIPHY_FLAG_CUSTOM_REGULATORY |
 			    WIPHY_FLAG_DISABLE_BEACON_HINTS;
 
 	hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX_3945;
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
index 4396dcc..82ebe14 100644
--- a/drivers/net/wireless/libertas/cfg.c
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -172,6 +172,8 @@
 	if (ret < 0)
 		lbs_pr_err("cannot register wiphy device\n");
 
+	priv->wiphy_registered = true;
+
 	ret = register_netdev(priv->dev);
 	if (ret)
 		lbs_pr_err("cannot register network device\n");
@@ -190,9 +192,11 @@
 	if (!wdev)
 		return;
 
-	if (wdev->wiphy) {
+	if (priv->wiphy_registered)
 		wiphy_unregister(wdev->wiphy);
+
+	if (wdev->wiphy)
 		wiphy_free(wdev->wiphy);
-	}
+
 	kfree(wdev);
 }
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index 058d172..a54880e 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -36,6 +36,7 @@
 
 	/* CFG80211 */
 	struct wireless_dev *wdev;
+	bool wiphy_registered;
 
 	/* Mesh */
 	struct net_device *mesh_dev; /* Virtual device */
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 6599fd1..1b5d0ae 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -3851,6 +3851,7 @@
 MODULE_FIRMWARE("mwl8k/fmimage_8366.fw");
 
 static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
+	{ PCI_VDEVICE(MARVELL, 0x2a0a), .driver_data = MWL8363, },
 	{ PCI_VDEVICE(MARVELL, 0x2a0c), .driver_data = MWL8363, },
 	{ PCI_VDEVICE(MARVELL, 0x2a24), .driver_data = MWL8363, },
 	{ PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c
index b3c4fbd..e3cfc00 100644
--- a/drivers/net/wireless/p54/p54usb.c
+++ b/drivers/net/wireless/p54/p54usb.c
@@ -35,6 +35,7 @@
 static struct usb_device_id p54u_table[] __devinitdata = {
 	/* Version 1 devices (pci chip + net2280) */
 	{USB_DEVICE(0x0506, 0x0a11)},	/* 3COM 3CRWE254G72 */
+	{USB_DEVICE(0x06b9, 0x0120)},	/* Thomson SpeedTouch 120g */
 	{USB_DEVICE(0x0707, 0xee06)},	/* SMC 2862W-G */
 	{USB_DEVICE(0x07aa, 0x001c)},	/* Corega CG-WLUSB2GT */
 	{USB_DEVICE(0x083a, 0x4501)},	/* Accton 802.11g WN4501 USB */
diff --git a/drivers/net/wireless/rt2x00/rt2500usb.c b/drivers/net/wireless/rt2x00/rt2500usb.c
index 9b04964..13444b6 100644
--- a/drivers/net/wireless/rt2x00/rt2500usb.c
+++ b/drivers/net/wireless/rt2x00/rt2500usb.c
@@ -1643,6 +1643,11 @@
 	unsigned int i;
 
 	/*
+	 * Disable powersaving as default.
+	 */
+	rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
+
+	/*
 	 * Initialize all hw fields.
 	 */
 	rt2x00dev->hw->flags =
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
index 18d4d8e..326fce7 100644
--- a/drivers/net/wireless/rt2x00/rt2800lib.c
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -812,9 +812,9 @@
 	rt2800_rfcsr_write(rt2x00dev, 24,
 			      rt2x00dev->calibration[conf_is_ht40(conf)]);
 
-	rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
+	rt2800_rfcsr_read(rt2x00dev, 7, &rfcsr);
 	rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
-	rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
+	rt2800_rfcsr_write(rt2x00dev, 7, rfcsr);
 }
 
 static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 3d102dd..0b51857 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -48,6 +48,7 @@
 obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o
 obj-$(CONFIG_X86_VISWS) += setup-irq.o
 obj-$(CONFIG_MN10300) += setup-bus.o
+obj-$(CONFIG_MICROBLAZE) += setup-bus.o
 
 #
 # ACPI Related PCI FW Functions
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index de29645..9976685 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -655,8 +655,8 @@
 		goto legacy_io_err;
 
 	/* Allocated above after the legacy_io struct */
-	sysfs_bin_attr_init(b->legacy_mem);
 	b->legacy_mem = b->legacy_io + 1;
+	sysfs_bin_attr_init(b->legacy_mem);
 	b->legacy_mem->attr.name = "legacy_mem";
 	b->legacy_mem->size = 1024*1024;
 	b->legacy_mem->attr.mode = S_IRUSR | S_IWUSR;
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index a04f21c..f5da626 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -133,6 +133,7 @@
 		sockets[i].socket.map_size = 0x1000;
 		sockets[i].socket.irq_mask = 0;
 		sockets[i].socket.pci_irq  = dev->irq;
+		sockets[i].socket.cb_dev  = dev;
 		sockets[i].socket.owner = THIS_MODULE;
 
 		sockets[i].number = i;
diff --git a/drivers/pcmcia/i82365.h b/drivers/pcmcia/i82365.h
index 849ef1b..3f84d7a 100644
--- a/drivers/pcmcia/i82365.h
+++ b/drivers/pcmcia/i82365.h
@@ -95,6 +95,7 @@
 #define I365_CSC_DETECT	0x08
 #define I365_CSC_ANY	0x0F
 #define I365_CSC_GPI	0x10
+#define I365_CSC_IRQ_MASK	0xF0
 
 /* Flags for I365_ADDRWIN */
 #define I365_ENA_IO(map)	(0x40 << (map))
diff --git a/drivers/pcmcia/pcmcia_resource.c b/drivers/pcmcia/pcmcia_resource.c
index b2df041..c4612c5 100644
--- a/drivers/pcmcia/pcmcia_resource.c
+++ b/drivers/pcmcia/pcmcia_resource.c
@@ -256,6 +256,7 @@
 {
 	struct pcmcia_socket *s;
 	config_t *c;
+	int ret;
 
 	s = p_dev->socket;
 
@@ -264,13 +265,13 @@
 
 	if (!(s->state & SOCKET_PRESENT)) {
 		dev_dbg(&s->dev, "No card present\n");
-		mutex_unlock(&s->ops_mutex);
-		return -ENODEV;
+		ret = -ENODEV;
+		goto unlock;
 	}
 	if (!(c->state & CONFIG_LOCKED)) {
 		dev_dbg(&s->dev, "Configuration isnt't locked\n");
-		mutex_unlock(&s->ops_mutex);
-		return -EACCES;
+		ret = -EACCES;
+		goto unlock;
 	}
 
 	if (mod->Attributes & CONF_IRQ_CHANGE_VALID) {
@@ -286,7 +287,8 @@
 
 	if (mod->Attributes & CONF_VCC_CHANGE_VALID) {
 		dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
-		return -EINVAL;
+		ret = -EINVAL;
+		goto unlock;
 	}
 
 	/* We only allow changing Vpp1 and Vpp2 to the same value */
@@ -294,21 +296,21 @@
 	    (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
 		if (mod->Vpp1 != mod->Vpp2) {
 			dev_dbg(&s->dev, "Vpp1 and Vpp2 must be the same\n");
-			mutex_unlock(&s->ops_mutex);
-			return -EINVAL;
+			ret = -EINVAL;
+			goto unlock;
 		}
 		s->socket.Vpp = mod->Vpp1;
 		if (s->ops->set_socket(s, &s->socket)) {
-			mutex_unlock(&s->ops_mutex);
 			dev_printk(KERN_WARNING, &s->dev,
 				   "Unable to set VPP\n");
-			return -EIO;
+			ret = -EIO;
+			goto unlock;
 		}
 	} else if ((mod->Attributes & CONF_VPP1_CHANGE_VALID) ||
 		   (mod->Attributes & CONF_VPP2_CHANGE_VALID)) {
 		dev_dbg(&s->dev, "changing Vcc is not allowed at this time\n");
-		mutex_unlock(&s->ops_mutex);
-		return -EINVAL;
+		ret = -EINVAL;
+		goto unlock;
 	}
 
 	if (mod->Attributes & CONF_IO_CHANGE_WIDTH) {
@@ -332,9 +334,11 @@
 			s->ops->set_io_map(s, &io_on);
 		}
 	}
+	ret = 0;
+unlock:
 	mutex_unlock(&s->ops_mutex);
 
-	return 0;
+	return ret;
 } /* modify_configuration */
 EXPORT_SYMBOL(pcmcia_modify_configuration);
 
@@ -752,14 +756,6 @@
 
 #ifdef CONFIG_PCMCIA_PROBE
 
-#ifdef IRQ_NOAUTOEN
-	/* if the underlying IRQ infrastructure allows for it, only allocate
-	 * the IRQ, but do not enable it
-	 */
-	if (!(req->Handler))
-		type |= IRQ_NOAUTOEN;
-#endif /* IRQ_NOAUTOEN */
-
 	if (s->irq.AssignedIRQ != 0) {
 		/* If the interrupt is already assigned, it must be the same */
 		irq = s->irq.AssignedIRQ;
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 7c20491..7ba57a5 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -671,6 +671,7 @@
 		socket[i].socket.map_size = 0x1000;
 		socket[i].socket.irq_mask = mask;
 		socket[i].socket.pci_irq  = dev->irq;
+		socket[i].socket.cb_dev = dev;
 		socket[i].socket.owner = THIS_MODULE;
 
 		socket[i].number = i;
diff --git a/drivers/pcmcia/ti113x.h b/drivers/pcmcia/ti113x.h
index aaa7022..9ffa97d 100644
--- a/drivers/pcmcia/ti113x.h
+++ b/drivers/pcmcia/ti113x.h
@@ -296,7 +296,7 @@
 	u8 new, reg = exca_readb(socket, I365_INTCTL);
 
 	new = reg & ~I365_INTR_ENA;
-	if (socket->cb_irq)
+	if (socket->dev->irq)
 		new |= I365_INTR_ENA;
 	if (new != reg)
 		exca_writeb(socket, I365_INTCTL, new);
@@ -316,14 +316,47 @@
 	return 0;
 }
 
+static void ti113x_use_isa_irq(struct yenta_socket *socket)
+{
+	int isa_irq = -1;
+	u8 intctl;
+	u32 isa_irq_mask = 0;
+
+	if (!isa_probe)
+		return;
+
+	/* get a free isa int */
+	isa_irq_mask = yenta_probe_irq(socket, isa_interrupts);
+	if (!isa_irq_mask)
+		return; /* no useable isa irq found */
+
+	/* choose highest available */
+	for (; isa_irq_mask; isa_irq++)
+		isa_irq_mask >>= 1;
+	socket->cb_irq = isa_irq;
+
+	exca_writeb(socket, I365_CSCINT, (isa_irq << 4));
+
+	intctl = exca_readb(socket, I365_INTCTL);
+	intctl &= ~(I365_INTR_ENA | I365_IRQ_MASK);     /* CSC Enable */
+	exca_writeb(socket, I365_INTCTL, intctl);
+
+	dev_info(&socket->dev->dev,
+		"Yenta TI113x: using isa irq %d for CardBus\n", isa_irq);
+}
+
+
 static int ti113x_override(struct yenta_socket *socket)
 {
 	u8 cardctl;
 
 	cardctl = config_readb(socket, TI113X_CARD_CONTROL);
 	cardctl &= ~(TI113X_CCR_PCI_IRQ_ENA | TI113X_CCR_PCI_IREQ | TI113X_CCR_PCI_CSC);
-	if (socket->cb_irq)
+	if (socket->dev->irq)
 		cardctl |= TI113X_CCR_PCI_IRQ_ENA | TI113X_CCR_PCI_CSC | TI113X_CCR_PCI_IREQ;
+	else
+		ti113x_use_isa_irq(socket);
+
 	config_writeb(socket, TI113X_CARD_CONTROL, cardctl);
 
 	return ti_override(socket);
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index c9fcbdc..aaccdb9 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -105,6 +105,7 @@
 	char name[24];
 	int csc_irq;
 	int io_irq;
+	spinlock_t lock;
 } vrc4171_socket_t;
 
 static vrc4171_socket_t vrc4171_sockets[CARD_MAX_SLOTS];
@@ -327,7 +328,7 @@
 	slot = sock->sock;
 	socket = &vrc4171_sockets[slot];
 
-	spin_lock_irq(&sock->lock);
+	spin_lock_irq(&socket->lock);
 
 	voltage = set_Vcc_value(state->Vcc);
 	exca_write_byte(slot, CARD_VOLTAGE_SELECT, voltage);
@@ -370,7 +371,7 @@
 		cscint |= I365_CSC_DETECT;
         exca_write_byte(slot, I365_CSCINT, cscint);
 
-	spin_unlock_irq(&sock->lock);
+	spin_unlock_irq(&socket->lock);
 
 	return 0;
 }
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 967c766..418988a 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -42,6 +42,18 @@
 MODULE_PARM_DESC(o2_speedup, "Use prefetch/burst for O2-bridges: 'on', 'off' "
 	"or 'default' (uses recommended behaviour for the detected bridge)");
 
+/*
+ * Only probe "regular" interrupts, don't
+ * touch dangerous spots like the mouse irq,
+ * because there are mice that apparently
+ * get really confused if they get fondled
+ * too intimately.
+ *
+ * Default to 11, 10, 9, 7, 6, 5, 4, 3.
+ */
+static u32 isa_interrupts = 0x0ef8;
+
+
 #define debug(x, s, args...) dev_dbg(&s->dev->dev, x, ##args)
 
 /* Don't ask.. */
@@ -54,6 +66,8 @@
  */
 #ifdef CONFIG_YENTA_TI
 static int yenta_probe_cb_irq(struct yenta_socket *socket);
+static unsigned int yenta_probe_irq(struct yenta_socket *socket,
+				u32 isa_irq_mask);
 #endif
 
 
@@ -329,8 +343,8 @@
 		/* ISA interrupt control? */
 		intr = exca_readb(socket, I365_INTCTL);
 		intr = (intr & ~0xf);
-		if (!socket->cb_irq) {
-			intr |= state->io_irq;
+		if (!socket->dev->irq) {
+			intr |= socket->cb_irq ? socket->cb_irq : state->io_irq;
 			bridge |= CB_BRIDGE_INTR;
 		}
 		exca_writeb(socket, I365_INTCTL, intr);
@@ -340,7 +354,7 @@
 		reg = exca_readb(socket, I365_INTCTL) & (I365_RING_ENA | I365_INTR_ENA);
 		reg |= (state->flags & SS_RESET) ? 0 : I365_PC_RESET;
 		reg |= (state->flags & SS_IOCARD) ? I365_PC_IOCARD : 0;
-		if (state->io_irq != socket->cb_irq) {
+		if (state->io_irq != socket->dev->irq) {
 			reg |= state->io_irq;
 			bridge |= CB_BRIDGE_INTR;
 		}
@@ -356,7 +370,9 @@
 			exca_writeb(socket, I365_POWER, reg);
 
 		/* CSC interrupt: no ISA irq for CSC */
-		reg = I365_CSC_DETECT;
+		reg = exca_readb(socket, I365_CSCINT);
+		reg &= I365_CSC_IRQ_MASK;
+		reg |= I365_CSC_DETECT;
 		if (state->flags & SS_IOCARD) {
 			if (state->csc_mask & SS_STSCHG)
 				reg |= I365_CSC_STSCHG;
@@ -896,22 +912,12 @@
 };
 
 
-/*
- * Only probe "regular" interrupts, don't
- * touch dangerous spots like the mouse irq,
- * because there are mice that apparently
- * get really confused if they get fondled
- * too intimately.
- *
- * Default to 11, 10, 9, 7, 6, 5, 4, 3.
- */
-static u32 isa_interrupts = 0x0ef8;
-
 static unsigned int yenta_probe_irq(struct yenta_socket *socket, u32 isa_irq_mask)
 {
 	int i;
 	unsigned long val;
 	u32 mask;
+	u8 reg;
 
 	/*
 	 * Probe for usable interrupts using the force
@@ -919,6 +925,7 @@
 	 */
 	cb_writel(socket, CB_SOCKET_EVENT, -1);
 	cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK);
+	reg = exca_readb(socket, I365_CSCINT);
 	exca_writeb(socket, I365_CSCINT, 0);
 	val = probe_irq_on() & isa_irq_mask;
 	for (i = 1; i < 16; i++) {
@@ -930,7 +937,7 @@
 		cb_writel(socket, CB_SOCKET_EVENT, -1);
 	}
 	cb_writel(socket, CB_SOCKET_MASK, 0);
-	exca_writeb(socket, I365_CSCINT, 0);
+	exca_writeb(socket, I365_CSCINT, reg);
 
 	mask = probe_irq_mask(val) & 0xffff;
 
@@ -967,6 +974,8 @@
 /* probes the PCI interrupt, use only on override functions */
 static int yenta_probe_cb_irq(struct yenta_socket *socket)
 {
+	u8 reg;
+
 	if (!socket->cb_irq)
 		return -1;
 
@@ -979,7 +988,8 @@
 	}
 
 	/* generate interrupt, wait */
-	exca_writeb(socket, I365_CSCINT, I365_CSC_STSCHG);
+	reg = exca_readb(socket, I365_CSCINT);
+	exca_writeb(socket, I365_CSCINT, reg | I365_CSC_STSCHG);
 	cb_writel(socket, CB_SOCKET_EVENT, -1);
 	cb_writel(socket, CB_SOCKET_MASK, CB_CSTSMASK);
 	cb_writel(socket, CB_SOCKET_FORCE, CB_FCARDSTS);
@@ -988,7 +998,7 @@
 
 	/* disable interrupts */
 	cb_writel(socket, CB_SOCKET_MASK, 0);
-	exca_writeb(socket, I365_CSCINT, 0);
+	exca_writeb(socket, I365_CSCINT, reg);
 	cb_writel(socket, CB_SOCKET_EVENT, -1);
 	exca_readb(socket, I365_CSC);
 
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 226b3e9..cbca40a 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -922,9 +922,13 @@
 
 static int __devinit acer_backlight_init(struct device *dev)
 {
+	struct backlight_properties props;
 	struct backlight_device *bd;
 
-	bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = max_brightness;
+	bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops,
+				       &props);
 	if (IS_ERR(bd)) {
 		printk(ACER_ERR "Could not register Acer backlight device\n");
 		acer_backlight_device = NULL;
@@ -935,7 +939,6 @@
 
 	bd->props.power = FB_BLANK_UNBLANK;
 	bd->props.brightness = read_brightness(bd);
-	bd->props.max_brightness = max_brightness;
 	backlight_update_status(bd);
 	return 0;
 }
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index 791fcf3..db5f7db 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -639,12 +639,16 @@
 {
 	struct backlight_device *bd;
 	struct device *dev = &asus->platform_device->dev;
+	struct backlight_properties props;
 
 	if (!acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_GET, NULL) &&
 	    !acpi_check_handle(asus->handle, METHOD_BRIGHTNESS_SET, NULL) &&
 	    lcd_switch_handle) {
+		memset(&props, 0, sizeof(struct backlight_properties));
+		props.max_brightness = 15;
+
 		bd = backlight_device_register(ASUS_LAPTOP_FILE, dev,
-					       asus, &asusbl_ops);
+					       asus, &asusbl_ops, &props);
 		if (IS_ERR(bd)) {
 			pr_err("Could not register asus backlight device\n");
 			asus->backlight_device = NULL;
@@ -653,7 +657,6 @@
 
 		asus->backlight_device = bd;
 
-		bd->props.max_brightness = 15;
 		bd->props.power = FB_BLANK_UNBLANK;
 		bd->props.brightness = asus_read_brightness(bd);
 		backlight_update_status(bd);
diff --git a/drivers/platform/x86/asus_acpi.c b/drivers/platform/x86/asus_acpi.c
index 1381430..ee52035 100644
--- a/drivers/platform/x86/asus_acpi.c
+++ b/drivers/platform/x86/asus_acpi.c
@@ -1481,6 +1481,7 @@
 
 static int __init asus_acpi_init(void)
 {
+	struct backlight_properties props;
 	int result;
 
 	result = acpi_bus_register_driver(&asus_hotk_driver);
@@ -1507,15 +1508,17 @@
 		return -ENODEV;
 	}
 
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 15;
 	asus_backlight_device = backlight_device_register("asus", NULL, NULL,
-							  &asus_backlight_data);
+							  &asus_backlight_data,
+							  &props);
 	if (IS_ERR(asus_backlight_device)) {
 		printk(KERN_ERR "Could not register asus backlight device\n");
 		asus_backlight_device = NULL;
 		asus_acpi_exit();
 		return -ENODEV;
 	}
-	asus_backlight_device->props.max_brightness = 15;
 
 	return 0;
 }
diff --git a/drivers/platform/x86/classmate-laptop.c b/drivers/platform/x86/classmate-laptop.c
index 035a7dd..c696cf1 100644
--- a/drivers/platform/x86/classmate-laptop.c
+++ b/drivers/platform/x86/classmate-laptop.c
@@ -455,18 +455,22 @@
 		return -1;
 }
 
-static struct backlight_ops cmpc_bl_ops = {
+static const struct backlight_ops cmpc_bl_ops = {
 	.get_brightness = cmpc_bl_get_brightness,
 	.update_status = cmpc_bl_update_status
 };
 
 static int cmpc_bl_add(struct acpi_device *acpi)
 {
+	struct backlight_properties props;
 	struct backlight_device *bd;
 
-	bd = backlight_device_register("cmpc_bl", &acpi->dev,
-				       acpi->handle, &cmpc_bl_ops);
-	bd->props.max_brightness = 7;
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 7;
+	bd = backlight_device_register("cmpc_bl", &acpi->dev, acpi->handle,
+				       &cmpc_bl_ops, &props);
+	if (IS_ERR(bd))
+		return PTR_ERR(bd);
 	dev_set_drvdata(&acpi->dev, bd);
 	return 0;
 }
diff --git a/drivers/platform/x86/compal-laptop.c b/drivers/platform/x86/compal-laptop.c
index 2740b40..71ff154 100644
--- a/drivers/platform/x86/compal-laptop.c
+++ b/drivers/platform/x86/compal-laptop.c
@@ -291,12 +291,15 @@
 	/* Register backlight stuff */
 
 	if (!acpi_video_backlight_support()) {
-		compalbl_device = backlight_device_register("compal-laptop", NULL, NULL,
-							    &compalbl_ops);
+		struct backlight_properties props;
+		memset(&props, 0, sizeof(struct backlight_properties));
+		props.max_brightness = COMPAL_LCD_LEVEL_MAX - 1;
+		compalbl_device = backlight_device_register("compal-laptop",
+							    NULL, NULL,
+							    &compalbl_ops,
+							    &props);
 		if (IS_ERR(compalbl_device))
 			return PTR_ERR(compalbl_device);
-
-		compalbl_device->props.max_brightness = COMPAL_LCD_LEVEL_MAX-1;
 	}
 
 	ret = platform_driver_register(&compal_driver);
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index ef61497..46435ac 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -559,10 +559,14 @@
 	release_buffer();
 
 	if (max_intensity) {
-		dell_backlight_device = backlight_device_register(
-			"dell_backlight",
-			&platform_device->dev, NULL,
-			&dell_ops);
+		struct backlight_properties props;
+		memset(&props, 0, sizeof(struct backlight_properties));
+		props.max_brightness = max_intensity;
+		dell_backlight_device = backlight_device_register("dell_backlight",
+								  &platform_device->dev,
+								  NULL,
+								  &dell_ops,
+								  &props);
 
 		if (IS_ERR(dell_backlight_device)) {
 			ret = PTR_ERR(dell_backlight_device);
@@ -570,7 +574,6 @@
 			goto fail_backlight;
 		}
 
-		dell_backlight_device->props.max_brightness = max_intensity;
 		dell_backlight_device->props.brightness =
 			dell_get_intensity(dell_backlight_device);
 		backlight_update_status(dell_backlight_device);
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index 9a844ca..3fdf21e 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -1131,18 +1131,20 @@
 
 static int eeepc_backlight_init(struct eeepc_laptop *eeepc)
 {
+	struct backlight_properties props;
 	struct backlight_device *bd;
 
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 15;
 	bd = backlight_device_register(EEEPC_LAPTOP_FILE,
-				       &eeepc->platform_device->dev,
-				       eeepc, &eeepcbl_ops);
+				       &eeepc->platform_device->dev, eeepc,
+				       &eeepcbl_ops, &props);
 	if (IS_ERR(bd)) {
 		pr_err("Could not register eeepc backlight device\n");
 		eeepc->backlight_device = NULL;
 		return PTR_ERR(bd);
 	}
 	eeepc->backlight_device = bd;
-	bd->props.max_brightness = 15;
 	bd->props.brightness = read_brightness(bd);
 	bd->props.power = FB_BLANK_UNBLANK;
 	backlight_update_status(bd);
diff --git a/drivers/platform/x86/fujitsu-laptop.c b/drivers/platform/x86/fujitsu-laptop.c
index 5f3320d..c1074b3 100644
--- a/drivers/platform/x86/fujitsu-laptop.c
+++ b/drivers/platform/x86/fujitsu-laptop.c
@@ -1126,16 +1126,20 @@
 	/* Register backlight stuff */
 
 	if (!acpi_video_backlight_support()) {
-		fujitsu->bl_device =
-			backlight_device_register("fujitsu-laptop", NULL, NULL,
-						  &fujitsubl_ops);
+		struct backlight_properties props;
+
+		memset(&props, 0, sizeof(struct backlight_properties));
+		max_brightness = fujitsu->max_brightness;
+		props.max_brightness = max_brightness - 1;
+		fujitsu->bl_device = backlight_device_register("fujitsu-laptop",
+							       NULL, NULL,
+							       &fujitsubl_ops,
+							       &props);
 		if (IS_ERR(fujitsu->bl_device)) {
 			ret = PTR_ERR(fujitsu->bl_device);
 			fujitsu->bl_device = NULL;
 			goto fail_sysfs_group;
 		}
-		max_brightness = fujitsu->max_brightness;
-		fujitsu->bl_device->props.max_brightness = max_brightness - 1;
 		fujitsu->bl_device->props.brightness = fujitsu->brightness_level;
 	}
 
diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
index c2b05da..996223a 100644
--- a/drivers/platform/x86/msi-laptop.c
+++ b/drivers/platform/x86/msi-laptop.c
@@ -683,11 +683,14 @@
 		printk(KERN_INFO "MSI: Brightness ignored, must be controlled "
 		       "by ACPI video driver\n");
 	} else {
+		struct backlight_properties props;
+		memset(&props, 0, sizeof(struct backlight_properties));
+		props.max_brightness = MSI_LCD_LEVEL_MAX - 1;
 		msibl_device = backlight_device_register("msi-laptop-bl", NULL,
-							 NULL, &msibl_ops);
+							 NULL, &msibl_ops,
+							 &props);
 		if (IS_ERR(msibl_device))
 			return PTR_ERR(msibl_device);
-		msibl_device->props.max_brightness = MSI_LCD_LEVEL_MAX-1;
 	}
 
 	ret = platform_driver_register(&msipf_driver);
diff --git a/drivers/platform/x86/msi-wmi.c b/drivers/platform/x86/msi-wmi.c
index f5f70d4..367caaa 100644
--- a/drivers/platform/x86/msi-wmi.c
+++ b/drivers/platform/x86/msi-wmi.c
@@ -138,7 +138,7 @@
 	return msi_wmi_set_block(0, backlight_map[bright]);
 }
 
-static struct backlight_ops msi_backlight_ops = {
+static const struct backlight_ops msi_backlight_ops = {
 	.get_brightness	= bl_get,
 	.update_status	= bl_set_status,
 };
@@ -249,12 +249,17 @@
 		goto err_uninstall_notifier;
 
 	if (!acpi_video_backlight_support()) {
-		backlight = backlight_device_register(DRV_NAME,
-				NULL, NULL, &msi_backlight_ops);
-		if (IS_ERR(backlight))
+		struct backlight_properties props;
+		memset(&props, 0, sizeof(struct backlight_properties));
+		props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
+		backlight = backlight_device_register(DRV_NAME, NULL, NULL,
+						      &msi_backlight_ops,
+						      &props);
+		if (IS_ERR(backlight)) {
+			err = PTR_ERR(backlight);
 			goto err_free_input;
+		}
 
-		backlight->props.max_brightness = ARRAY_SIZE(backlight_map) - 1;
 		err = bl_get(NULL);
 		if (err < 0)
 			goto err_free_backlight;
diff --git a/drivers/platform/x86/panasonic-laptop.c b/drivers/platform/x86/panasonic-laptop.c
index c9fc479..726f02a 100644
--- a/drivers/platform/x86/panasonic-laptop.c
+++ b/drivers/platform/x86/panasonic-laptop.c
@@ -352,7 +352,7 @@
 	return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
 }
 
-static struct backlight_ops pcc_backlight_ops = {
+static const struct backlight_ops pcc_backlight_ops = {
 	.get_brightness	= bl_get,
 	.update_status	= bl_set_status,
 };
@@ -600,6 +600,7 @@
 
 static int acpi_pcc_hotkey_add(struct acpi_device *device)
 {
+	struct backlight_properties props;
 	struct pcc_acpi *pcc;
 	int num_sifr, result;
 
@@ -637,24 +638,25 @@
 	if (result) {
 		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
 				  "Error installing keyinput handler\n"));
-		goto out_sinf;
+		goto out_hotkey;
 	}
 
-	/* initialize backlight */
-	pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
-						   &pcc_backlight_ops);
-	if (IS_ERR(pcc->backlight))
-		goto out_input;
-
 	if (!acpi_pcc_retrieve_biosdata(pcc, pcc->sinf)) {
 		ACPI_DEBUG_PRINT((ACPI_DB_ERROR,
 				 "Couldn't retrieve BIOS data\n"));
-		goto out_backlight;
+		goto out_input;
+	}
+	/* initialize backlight */
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = pcc->sinf[SINF_AC_MAX_BRIGHT];
+	pcc->backlight = backlight_device_register("panasonic", NULL, pcc,
+						   &pcc_backlight_ops, &props);
+	if (IS_ERR(pcc->backlight)) {
+		result = PTR_ERR(pcc->backlight);
+		goto out_sinf;
 	}
 
 	/* read the initial brightness setting from the hardware */
-	pcc->backlight->props.max_brightness =
-					pcc->sinf[SINF_AC_MAX_BRIGHT];
 	pcc->backlight->props.brightness = pcc->sinf[SINF_AC_CUR_BRIGHT];
 
 	/* read the initial sticky key mode from the hardware */
@@ -669,12 +671,12 @@
 
 out_backlight:
 	backlight_device_unregister(pcc->backlight);
+out_sinf:
+	kfree(pcc->sinf);
 out_input:
 	input_unregister_device(pcc->input_dev);
 	/* no need to input_free_device() since core input API refcount and
 	 * free()s the device */
-out_sinf:
-	kfree(pcc->sinf);
 out_hotkey:
 	kfree(pcc);
 
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 5a3d851..6553b91 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1291,9 +1291,13 @@
 		       "controlled by ACPI video driver\n");
 	} else if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "GBRT",
 						&handle))) {
+							struct backlight_properties props;
+		memset(&props, 0, sizeof(struct backlight_properties));
+		props.max_brightness = SONY_MAX_BRIGHTNESS - 1;
 		sony_backlight_device = backlight_device_register("sony", NULL,
 								  NULL,
-								  &sony_backlight_ops);
+								  &sony_backlight_ops,
+								  &props);
 
 		if (IS_ERR(sony_backlight_device)) {
 			printk(KERN_WARNING DRV_PFX "unable to register backlight device\n");
@@ -1302,8 +1306,6 @@
 			sony_backlight_device->props.brightness =
 			    sony_backlight_get_brightness
 			    (sony_backlight_device);
-			sony_backlight_device->props.max_brightness =
-			    SONY_MAX_BRIGHTNESS - 1;
 		}
 
 	}
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index c64e352..770b853 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -6170,6 +6170,7 @@
 
 static int __init brightness_init(struct ibm_init_struct *iibm)
 {
+	struct backlight_properties props;
 	int b;
 	unsigned long quirks;
 
@@ -6259,9 +6260,12 @@
 		printk(TPACPI_INFO
 		       "detected a 16-level brightness capable ThinkPad\n");
 
-	ibm_backlight_device = backlight_device_register(
-					TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL,
-					&ibm_backlight_data);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = (tp_features.bright_16levels) ? 15 : 7;
+	ibm_backlight_device = backlight_device_register(TPACPI_BACKLIGHT_DEV_NAME,
+							 NULL, NULL,
+							 &ibm_backlight_data,
+							 &props);
 	if (IS_ERR(ibm_backlight_device)) {
 		int rc = PTR_ERR(ibm_backlight_device);
 		ibm_backlight_device = NULL;
@@ -6280,8 +6284,6 @@
 			"or not on your ThinkPad\n", TPACPI_MAIL);
 	}
 
-	ibm_backlight_device->props.max_brightness =
-				(tp_features.bright_16levels)? 15 : 7;
 	ibm_backlight_device->props.brightness = b & TP_EC_BACKLIGHT_LVLMSK;
 	backlight_update_status(ibm_backlight_device);
 
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 789240d..def4841 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -924,6 +924,7 @@
 	u32 hci_result;
 	bool bt_present;
 	int ret = 0;
+	struct backlight_properties props;
 
 	if (acpi_disabled)
 		return -ENODEV;
@@ -974,10 +975,12 @@
 		}
 	}
 
+	props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
 	toshiba_backlight_device = backlight_device_register("toshiba",
-						&toshiba_acpi.p_dev->dev,
-						NULL,
-						&toshiba_backlight_data);
+							     &toshiba_acpi.p_dev->dev,
+							     NULL,
+							     &toshiba_backlight_data,
+							     &props);
         if (IS_ERR(toshiba_backlight_device)) {
 		ret = PTR_ERR(toshiba_backlight_device);
 
@@ -986,7 +989,6 @@
 		toshiba_acpi_exit();
 		return ret;
 	}
-        toshiba_backlight_device->props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
 
 	/* Register rfkill switch for Bluetooth */
 	if (hci_get_bt_present(&bt_present) == HCI_SUCCESS && bt_present) {
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index b3beab6..fc7ae05 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -704,6 +704,13 @@
 	return do_chp_configure(SCLP_CMDW_DECONFIGURE_CHPATH | chpid.id << 8);
 }
 
+int arch_get_memory_phys_device(unsigned long start_pfn)
+{
+	if (!rzm)
+		return 0;
+	return PFN_PHYS(start_pfn) / rzm;
+}
+
 struct chp_info_sccb {
 	struct sccb_header header;
 	u8 recognized[SCLP_CHP_INFO_MASK_SIZE];
diff --git a/drivers/scsi/Kconfig b/drivers/scsi/Kconfig
index 9191d1e..75f2336 100644
--- a/drivers/scsi/Kconfig
+++ b/drivers/scsi/Kconfig
@@ -1,9 +1,15 @@
 menu "SCSI device support"
 
+config SCSI_MOD
+       tristate
+       default y if SCSI=n || SCSI=y
+       default m if SCSI=m
+
 config RAID_ATTRS
 	tristate "RAID Transport Class"
 	default n
 	depends on BLOCK
+	depends on SCSI_MOD
 	---help---
 	  Provides RAID
 
diff --git a/drivers/scsi/be2iscsi/be_cmds.c b/drivers/scsi/be2iscsi/be_cmds.c
index 6709857..cda6642 100644
--- a/drivers/scsi/be2iscsi/be_cmds.c
+++ b/drivers/scsi/be2iscsi/be_cmds.c
@@ -32,18 +32,11 @@
 unsigned int alloc_mcc_tag(struct beiscsi_hba *phba)
 {
 	unsigned int tag = 0;
-	unsigned int num = 0;
 
-mcc_tag_rdy:
 	if (phba->ctrl.mcc_tag_available) {
 		tag = phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index];
 		phba->ctrl.mcc_tag[phba->ctrl.mcc_alloc_index] = 0;
 		phba->ctrl.mcc_numtag[tag] = 0;
-	} else {
-		udelay(100);
-		num++;
-		if (num < mcc_timeout)
-			goto mcc_tag_rdy;
 	}
 	if (tag) {
 		phba->ctrl.mcc_tag_available--;
diff --git a/drivers/scsi/be2iscsi/be_iscsi.c b/drivers/scsi/be2iscsi/be_iscsi.c
index 29a3aaf..c3928cb 100644
--- a/drivers/scsi/be2iscsi/be_iscsi.c
+++ b/drivers/scsi/be2iscsi/be_iscsi.c
@@ -482,7 +482,7 @@
 	tag = mgmt_open_connection(phba, dst_addr, beiscsi_ep);
 	if (!tag) {
 		SE_DEBUG(DBG_LVL_1,
-			 "mgmt_invalidate_connection Failed for cid=%d \n",
+			 "mgmt_open_connection Failed for cid=%d \n",
 			 beiscsi_ep->ep_cid);
 	} else {
 		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
@@ -701,7 +701,7 @@
 	if (!tag) {
 		SE_DEBUG(DBG_LVL_1,
 			 "mgmt_invalidate_connection Failed for cid=%d \n",
-			 beiscsi_ep->ep_cid);
+			  beiscsi_ep->ep_cid);
 	} else {
 		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
 					 phba->ctrl.mcc_numtag[tag]);
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 7c22616..fcfb29e 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -58,6 +58,123 @@
 	return 0;
 }
 
+static int beiscsi_eh_abort(struct scsi_cmnd *sc)
+{
+	struct iscsi_cls_session *cls_session;
+	struct iscsi_task *aborted_task = (struct iscsi_task *)sc->SCp.ptr;
+	struct beiscsi_io_task *aborted_io_task;
+	struct iscsi_conn *conn;
+	struct beiscsi_conn *beiscsi_conn;
+	struct beiscsi_hba *phba;
+	struct iscsi_session *session;
+	struct invalidate_command_table *inv_tbl;
+	unsigned int cid, tag, num_invalidate;
+
+	cls_session = starget_to_session(scsi_target(sc->device));
+	session = cls_session->dd_data;
+
+	spin_lock_bh(&session->lock);
+	if (!aborted_task || !aborted_task->sc) {
+		/* we raced */
+		spin_unlock_bh(&session->lock);
+		return SUCCESS;
+	}
+
+	aborted_io_task = aborted_task->dd_data;
+	if (!aborted_io_task->scsi_cmnd) {
+		/* raced or invalid command */
+		spin_unlock_bh(&session->lock);
+		return SUCCESS;
+	}
+	spin_unlock_bh(&session->lock);
+	conn = aborted_task->conn;
+	beiscsi_conn = conn->dd_data;
+	phba = beiscsi_conn->phba;
+
+	/* invalidate iocb */
+	cid = beiscsi_conn->beiscsi_conn_cid;
+	inv_tbl = phba->inv_tbl;
+	memset(inv_tbl, 0x0, sizeof(*inv_tbl));
+	inv_tbl->cid = cid;
+	inv_tbl->icd = aborted_io_task->psgl_handle->sgl_index;
+	num_invalidate = 1;
+	tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
+	if (!tag) {
+		shost_printk(KERN_WARNING, phba->shost,
+			     "mgmt_invalidate_icds could not be"
+			     " submitted\n");
+		return FAILED;
+	} else {
+		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+					 phba->ctrl.mcc_numtag[tag]);
+		free_mcc_tag(&phba->ctrl, tag);
+	}
+
+	return iscsi_eh_abort(sc);
+}
+
+static int beiscsi_eh_device_reset(struct scsi_cmnd *sc)
+{
+	struct iscsi_task *abrt_task;
+	struct beiscsi_io_task *abrt_io_task;
+	struct iscsi_conn *conn;
+	struct beiscsi_conn *beiscsi_conn;
+	struct beiscsi_hba *phba;
+	struct iscsi_session *session;
+	struct iscsi_cls_session *cls_session;
+	struct invalidate_command_table *inv_tbl;
+	unsigned int cid, tag, i, num_invalidate;
+	int rc = FAILED;
+
+	/* invalidate iocbs */
+	cls_session = starget_to_session(scsi_target(sc->device));
+	session = cls_session->dd_data;
+	spin_lock_bh(&session->lock);
+	if (!session->leadconn || session->state != ISCSI_STATE_LOGGED_IN)
+		goto unlock;
+
+	conn = session->leadconn;
+	beiscsi_conn = conn->dd_data;
+	phba = beiscsi_conn->phba;
+	cid = beiscsi_conn->beiscsi_conn_cid;
+	inv_tbl = phba->inv_tbl;
+	memset(inv_tbl, 0x0, sizeof(*inv_tbl) * BE2_CMDS_PER_CXN);
+	num_invalidate = 0;
+	for (i = 0; i < conn->session->cmds_max; i++) {
+		abrt_task = conn->session->cmds[i];
+		abrt_io_task = abrt_task->dd_data;
+		if (!abrt_task->sc || abrt_task->state == ISCSI_TASK_FREE)
+			continue;
+
+		if (abrt_task->sc->device->lun != abrt_task->sc->device->lun)
+			continue;
+
+		inv_tbl->cid = cid;
+		inv_tbl->icd = abrt_io_task->psgl_handle->sgl_index;
+		num_invalidate++;
+		inv_tbl++;
+	}
+	spin_unlock_bh(&session->lock);
+	inv_tbl = phba->inv_tbl;
+
+	tag = mgmt_invalidate_icds(phba, inv_tbl, num_invalidate, cid);
+	if (!tag) {
+		shost_printk(KERN_WARNING, phba->shost,
+			     "mgmt_invalidate_icds could not be"
+			     " submitted\n");
+		return FAILED;
+	} else {
+		wait_event_interruptible(phba->ctrl.mcc_wait[tag],
+					 phba->ctrl.mcc_numtag[tag]);
+		free_mcc_tag(&phba->ctrl, tag);
+	}
+
+	return iscsi_eh_device_reset(sc);
+unlock:
+	spin_unlock_bh(&session->lock);
+	return rc;
+}
+
 /*------------------- PCI Driver operations and data ----------------- */
 static DEFINE_PCI_DEVICE_TABLE(beiscsi_pci_id_table) = {
 	{ PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
@@ -74,12 +191,12 @@
 	.name = "ServerEngines 10Gbe open-iscsi Initiator Driver",
 	.proc_name = DRV_NAME,
 	.queuecommand = iscsi_queuecommand,
-	.eh_abort_handler = iscsi_eh_abort,
 	.change_queue_depth = iscsi_change_queue_depth,
 	.slave_configure = beiscsi_slave_configure,
 	.target_alloc = iscsi_target_alloc,
-	.eh_device_reset_handler = iscsi_eh_device_reset,
-	.eh_target_reset_handler = iscsi_eh_target_reset,
+	.eh_abort_handler = beiscsi_eh_abort,
+	.eh_device_reset_handler = beiscsi_eh_device_reset,
+	.eh_target_reset_handler = iscsi_eh_session_reset,
 	.sg_tablesize = BEISCSI_SGLIST_ELEMENTS,
 	.can_queue = BE2_IO_DEPTH,
 	.this_id = -1,
@@ -242,7 +359,7 @@
 				    + BE2_TMFS
 				    + BE2_NOPOUT_REQ));
 	phba->params.cxns_per_ctrl = phba->fw_config.iscsi_cid_count;
-	phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count;;
+	phba->params.asyncpdus_per_ctrl = phba->fw_config.iscsi_cid_count * 2;
 	phba->params.icds_per_ctrl = phba->fw_config.iscsi_icd_count;;
 	phba->params.num_sge_per_io = BE2_SGE;
 	phba->params.defpdu_hdr_sz = BE2_DEFPDU_HDR_SZ;
@@ -946,14 +1063,18 @@
 	case HWH_TYPE_IO:
 	case HWH_TYPE_IO_RD:
 		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) ==
-		    ISCSI_OP_NOOP_OUT) {
+		     ISCSI_OP_NOOP_OUT)
 			be_complete_nopin_resp(beiscsi_conn, task, psol);
-		} else
+		else
 			be_complete_io(beiscsi_conn, task, psol);
 		break;
 
 	case HWH_TYPE_LOGOUT:
-		be_complete_logout(beiscsi_conn, task, psol);
+		if ((task->hdr->opcode & ISCSI_OPCODE_MASK) == ISCSI_OP_LOGOUT)
+			be_complete_logout(beiscsi_conn, task, psol);
+		else
+			be_complete_tmf(beiscsi_conn, task, psol);
+
 		break;
 
 	case HWH_TYPE_LOGIN:
@@ -962,10 +1083,6 @@
 			 "- Solicited path \n");
 		break;
 
-	case HWH_TYPE_TMF:
-		be_complete_tmf(beiscsi_conn, task, psol);
-		break;
-
 	case HWH_TYPE_NOP:
 		be_complete_nopin_resp(beiscsi_conn, task, psol);
 		break;
@@ -2052,7 +2169,7 @@
 	num_cxn_wrb = (mem_descr_wrb->mem_array[idx].size) /
 		      ((sizeof(struct iscsi_wrb) *
 			phba->params.wrbs_per_cxn));
-	for (index = 0; index < phba->params.cxns_per_ctrl; index += 2) {
+	for (index = 0; index < phba->params.cxns_per_ctrl * 2; index += 2) {
 		pwrb_context = &phwi_ctrlr->wrb_context[index];
 		if (num_cxn_wrb) {
 			for (j = 0; j < phba->params.wrbs_per_cxn; j++) {
@@ -3073,14 +3190,18 @@
 		reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
 		SE_DEBUG(DBG_LVL_8, "reg =x%08x addr=%p \n", reg, addr);
 		iowrite32(reg, addr);
-		for (i = 0; i <= phba->num_cpus; i++) {
-			eq = &phwi_context->be_eq[i].q;
+		if (!phba->msix_enabled) {
+			eq = &phwi_context->be_eq[0].q;
 			SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
 			hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
+		} else {
+			for (i = 0; i <= phba->num_cpus; i++) {
+				eq = &phwi_context->be_eq[i].q;
+				SE_DEBUG(DBG_LVL_8, "eq->id=%d \n", eq->id);
+				hwi_ring_eq_db(phba, eq->id, 0, 0, 1, 1);
+			}
 		}
-	} else
-		shost_printk(KERN_WARNING, phba->shost,
-			     "In hwi_enable_intr, Not Enabled \n");
+	}
 	return true;
 }
 
@@ -3476,19 +3597,13 @@
 
 static int beiscsi_mtask(struct iscsi_task *task)
 {
-	struct beiscsi_io_task *aborted_io_task, *io_task = task->dd_data;
+	struct beiscsi_io_task *io_task = task->dd_data;
 	struct iscsi_conn *conn = task->conn;
 	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
 	struct beiscsi_hba *phba = beiscsi_conn->phba;
-	struct iscsi_session *session;
 	struct iscsi_wrb *pwrb = NULL;
-	struct hwi_controller *phwi_ctrlr;
-	struct hwi_wrb_context *pwrb_context;
-	struct wrb_handle *pwrb_handle;
 	unsigned int doorbell = 0;
-	unsigned int i, cid;
-	struct iscsi_task *aborted_task;
-	unsigned int tag;
+	unsigned int cid;
 
 	cid = beiscsi_conn->beiscsi_conn_cid;
 	pwrb = io_task->pwrb_handle->pwrb;
@@ -3499,6 +3614,7 @@
 		      io_task->pwrb_handle->wrb_index);
 	AMAP_SET_BITS(struct amap_iscsi_wrb, sgl_icd_idx, pwrb,
 		      io_task->psgl_handle->sgl_index);
+
 	switch (task->hdr->opcode & ISCSI_OPCODE_MASK) {
 	case ISCSI_OP_LOGIN:
 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
@@ -3523,33 +3639,6 @@
 		hwi_write_buffer(pwrb, task);
 		break;
 	case ISCSI_OP_SCSI_TMFUNC:
-		session = conn->session;
-		i = ((struct iscsi_tm *)task->hdr)->rtt;
-		phwi_ctrlr = phba->phwi_ctrlr;
-		pwrb_context = &phwi_ctrlr->wrb_context[cid -
-					    phba->fw_config.iscsi_cid_start];
-		pwrb_handle = pwrb_context->pwrb_handle_basestd[be32_to_cpu(i)
-								>> 16];
-		aborted_task = pwrb_handle->pio_handle;
-		 if (!aborted_task)
-			return 0;
-
-		aborted_io_task = aborted_task->dd_data;
-		if (!aborted_io_task->scsi_cmnd)
-			return 0;
-
-		tag = mgmt_invalidate_icds(phba,
-				     aborted_io_task->psgl_handle->sgl_index,
-				     cid);
-		if (!tag) {
-			shost_printk(KERN_WARNING, phba->shost,
-				     "mgmt_invalidate_icds could not be"
-				     " submitted\n");
-		} else {
-			wait_event_interruptible(phba->ctrl.mcc_wait[tag],
-						 phba->ctrl.mcc_numtag[tag]);
-			free_mcc_tag(&phba->ctrl, tag);
-		}
 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
 			      INI_TMF_CMD);
 		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
@@ -3558,7 +3647,7 @@
 	case ISCSI_OP_LOGOUT:
 		AMAP_SET_BITS(struct amap_iscsi_wrb, dmsg, pwrb, 0);
 		AMAP_SET_BITS(struct amap_iscsi_wrb, type, pwrb,
-				HWH_TYPE_LOGOUT);
+			      HWH_TYPE_LOGOUT);
 		hwi_write_buffer(pwrb, task);
 		break;
 
@@ -3584,17 +3673,12 @@
 
 static int beiscsi_task_xmit(struct iscsi_task *task)
 {
-	struct iscsi_conn *conn = task->conn;
 	struct beiscsi_io_task *io_task = task->dd_data;
 	struct scsi_cmnd *sc = task->sc;
-	struct beiscsi_conn *beiscsi_conn = conn->dd_data;
 	struct scatterlist *sg;
 	int num_sg;
 	unsigned int  writedir = 0, xferlen = 0;
 
-	SE_DEBUG(DBG_LVL_4, "\n cid=%d In beiscsi_task_xmit task=%p conn=%p \t"
-		 "beiscsi_conn=%p \n", beiscsi_conn->beiscsi_conn_cid,
-		 task, conn, beiscsi_conn);
 	if (!sc)
 		return beiscsi_mtask(task);
 
@@ -3699,7 +3783,6 @@
 			" Failed in beiscsi_hba_alloc \n");
 		goto disable_pci;
 	}
-	SE_DEBUG(DBG_LVL_8, " phba = %p \n", phba);
 
 	switch (pcidev->device) {
 	case BE_DEVICE_ID1:
diff --git a/drivers/scsi/be2iscsi/be_main.h b/drivers/scsi/be2iscsi/be_main.h
index c53a80a..87ec212 100644
--- a/drivers/scsi/be2iscsi/be_main.h
+++ b/drivers/scsi/be2iscsi/be_main.h
@@ -257,6 +257,11 @@
 	unsigned int num_sge;
 };
 
+struct invalidate_command_table {
+	unsigned short icd;
+	unsigned short cid;
+} __packed;
+
 struct beiscsi_hba {
 	struct hba_parameters params;
 	struct hwi_controller *phwi_ctrlr;
@@ -329,6 +334,8 @@
 	struct work_struct work_cqs;	/* The work being queued */
 	struct be_ctrl_info ctrl;
 	unsigned int generation;
+	struct invalidate_command_table inv_tbl[128];
+
 };
 
 struct beiscsi_session {
@@ -491,8 +498,6 @@
 	struct list_head data_busy_list;
 };
 
-#define BE_MIN_ASYNC_ENTRIES 128
-
 struct hwi_async_pdu_context {
 	struct {
 		struct be_bus_address pa_base;
@@ -533,7 +538,7 @@
 	 * This is a varying size list! Do not add anything
 	 * after this entry!!
 	 */
-	struct hwi_async_entry async_entry[BE_MIN_ASYNC_ENTRIES];
+	struct hwi_async_entry async_entry[BE2_MAX_SESSIONS * 2];
 };
 
 #define PDUCQE_CODE_MASK	0x0000003F
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 317bcd0..72617b6 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -145,14 +145,15 @@
 }
 
 unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
-				   unsigned int icd, unsigned int cid)
+				struct invalidate_command_table *inv_tbl,
+				unsigned int num_invalidate, unsigned int cid)
 {
 	struct be_dma_mem nonemb_cmd;
 	struct be_ctrl_info *ctrl = &phba->ctrl;
 	struct be_mcc_wrb *wrb;
 	struct be_sge *sge;
 	struct invalidate_commands_params_in *req;
-	unsigned int tag = 0;
+	unsigned int i, tag = 0;
 
 	spin_lock(&ctrl->mbox_lock);
 	tag = alloc_mcc_tag(phba);
@@ -183,9 +184,12 @@
 			sizeof(*req));
 	req->ref_handle = 0;
 	req->cleanup_type = CMD_ISCSI_COMMAND_INVALIDATE;
-	req->icd_count = 0;
-	req->table[req->icd_count].icd = icd;
-	req->table[req->icd_count].cid = cid;
+	for (i = 0; i < num_invalidate; i++) {
+		req->table[i].icd = inv_tbl->icd;
+		req->table[i].cid = inv_tbl->cid;
+		req->icd_count++;
+		inv_tbl++;
+	}
 	sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd.dma));
 	sge->pa_lo = cpu_to_le32(nonemb_cmd.dma & 0xFFFFFFFF);
 	sge->len = cpu_to_le32(nonemb_cmd.size);
diff --git a/drivers/scsi/be2iscsi/be_mgmt.h b/drivers/scsi/be2iscsi/be_mgmt.h
index ecead6a..3d316b8 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.h
+++ b/drivers/scsi/be2iscsi/be_mgmt.h
@@ -94,7 +94,8 @@
 				     unsigned short cid,
 				     unsigned int upload_flag);
 unsigned char mgmt_invalidate_icds(struct beiscsi_hba *phba,
-				   unsigned int icd, unsigned int cid);
+				struct invalidate_command_table *inv_tbl,
+				unsigned int num_invalidate, unsigned int cid);
 
 struct iscsi_invalidate_connection_params_in {
 	struct be_cmd_req_hdr hdr;
@@ -116,11 +117,6 @@
 	struct iscsi_invalidate_connection_params_out response;
 } __packed;
 
-struct invalidate_command_table {
-	unsigned short icd;
-	unsigned short cid;
-} __packed;
-
 struct invalidate_commands_params_in {
 	struct be_cmd_req_hdr hdr;
 	unsigned int ref_handle;
diff --git a/drivers/scsi/bfa/Makefile b/drivers/scsi/bfa/Makefile
index 1d60094..17e06ca 100644
--- a/drivers/scsi/bfa/Makefile
+++ b/drivers/scsi/bfa/Makefile
@@ -2,14 +2,14 @@
 
 bfa-y := bfad.o bfad_intr.o bfad_os.o bfad_im.o bfad_attr.o bfad_fwimg.o
 
-bfa-y += bfa_core.o bfa_ioc.o bfa_iocfc.o bfa_fcxp.o bfa_lps.o
-bfa-y += bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o 
+bfa-y += bfa_core.o bfa_ioc.o bfa_ioc_ct.o bfa_ioc_cb.o bfa_iocfc.o bfa_fcxp.o
+bfa-y += bfa_lps.o bfa_hw_cb.o bfa_hw_ct.o bfa_intr.o bfa_timer.o bfa_rport.o
 bfa-y += bfa_fcport.o bfa_port.o bfa_uf.o bfa_sgpg.o bfa_module.o bfa_ioim.o
 bfa-y += bfa_itnim.o bfa_fcpim.o bfa_tskim.o bfa_log.o bfa_log_module.o
 bfa-y += bfa_csdebug.o bfa_sm.o plog.o
 
-bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o 
+bfa-y += fcbuild.o fabric.o fcpim.o vfapi.o fcptm.o bfa_fcs.o bfa_fcs_port.o
 bfa-y += bfa_fcs_uf.o bfa_fcs_lport.o fab.o fdmi.o ms.o ns.o scn.o loop.o
 bfa-y += lport_api.o n2n.o rport.o rport_api.o rport_ftrs.o vport.o
 
-ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna
+ccflags-y := -I$(obj) -I$(obj)/include -I$(obj)/include/cna -DBFA_PERF_BUILD
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 44e2d11..0c08e18 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -385,6 +385,15 @@
 }
 
 /**
+ * Clear the saved firmware trace information of an IOC.
+ */
+void
+bfa_debug_fwsave_clear(struct bfa_s *bfa)
+{
+	bfa_ioc_debug_fwsave_clear(&bfa->ioc);
+}
+
+/**
  * 		Fetch firmware trace data.
  *
  * @param[in]		bfa			BFA instance
@@ -399,4 +408,14 @@
 {
 	return bfa_ioc_debug_fwtrc(&bfa->ioc, trcdata, trclen);
 }
+
+/**
+ * Reset hw semaphore & usage cnt regs and initialize.
+ */
+void
+bfa_chip_reset(struct bfa_s *bfa)
+{
+	bfa_ioc_ownership_reset(&bfa->ioc);
+	bfa_ioc_pll_init(&bfa->ioc);
+}
 #endif
diff --git a/drivers/scsi/bfa/bfa_fcport.c b/drivers/scsi/bfa/bfa_fcport.c
index aef648b..c589488 100644
--- a/drivers/scsi/bfa/bfa_fcport.c
+++ b/drivers/scsi/bfa/bfa_fcport.c
@@ -23,40 +23,33 @@
 #include <cs/bfa_plog.h>
 #include <aen/bfa_aen_port.h>
 
-BFA_TRC_FILE(HAL, PPORT);
-BFA_MODULE(pport);
-
-#define bfa_pport_callback(__pport, __event) do {			\
-	if ((__pport)->bfa->fcs) {      \
-		(__pport)->event_cbfn((__pport)->event_cbarg, (__event));      \
-	} else {							\
-		(__pport)->hcb_event = (__event);      \
-		bfa_cb_queue((__pport)->bfa, &(__pport)->hcb_qe,	\
-		__bfa_cb_port_event, (__pport));      \
-	}								\
-} while (0)
+BFA_TRC_FILE(HAL, FCPORT);
+BFA_MODULE(fcport);
 
 /*
  * The port is considered disabled if corresponding physical port or IOC are
  * disabled explicitly
  */
 #define BFA_PORT_IS_DISABLED(bfa) \
-	((bfa_pport_is_disabled(bfa) == BFA_TRUE) || \
+	((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
 	(bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
 
 /*
  * forward declarations
  */
-static bfa_boolean_t bfa_pport_send_enable(struct bfa_pport_s *port);
-static bfa_boolean_t bfa_pport_send_disable(struct bfa_pport_s *port);
-static void     bfa_pport_update_linkinfo(struct bfa_pport_s *pport);
-static void     bfa_pport_reset_linkinfo(struct bfa_pport_s *pport);
-static void     bfa_pport_set_wwns(struct bfa_pport_s *port);
-static void     __bfa_cb_port_event(void *cbarg, bfa_boolean_t complete);
-static void     __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete);
-static void     __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete);
-static void     bfa_port_stats_timeout(void *cbarg);
-static void     bfa_port_stats_clr_timeout(void *cbarg);
+static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
+static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
+static void     bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
+static void     bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
+static void     bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
+static void     __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
+static void     bfa_fcport_callback(struct bfa_fcport_s *fcport,
+				enum bfa_pport_linkstate event);
+static void     bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
+				enum bfa_pport_linkstate event);
+static void     __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
+static void     bfa_fcport_stats_get_timeout(void *cbarg);
+static void     bfa_fcport_stats_clr_timeout(void *cbarg);
 
 /**
  *  bfa_pport_private
@@ -65,111 +58,114 @@
 /**
  * BFA port state machine events
  */
-enum bfa_pport_sm_event {
-	BFA_PPORT_SM_START = 1,	/*  start port state machine */
-	BFA_PPORT_SM_STOP = 2,	/*  stop port state machine */
-	BFA_PPORT_SM_ENABLE = 3,	/*  enable port */
-	BFA_PPORT_SM_DISABLE = 4,	/*  disable port state machine */
-	BFA_PPORT_SM_FWRSP = 5,	/*  firmware enable/disable rsp */
-	BFA_PPORT_SM_LINKUP = 6,	/*  firmware linkup event */
-	BFA_PPORT_SM_LINKDOWN = 7,	/*  firmware linkup down */
-	BFA_PPORT_SM_QRESUME = 8,	/*  CQ space available */
-	BFA_PPORT_SM_HWFAIL = 9,	/*  IOC h/w failure */
+enum bfa_fcport_sm_event {
+	BFA_FCPORT_SM_START = 1,	/*  start port state machine */
+	BFA_FCPORT_SM_STOP = 2,	/*  stop port state machine */
+	BFA_FCPORT_SM_ENABLE = 3,	/*  enable port */
+	BFA_FCPORT_SM_DISABLE = 4,	/*  disable port state machine */
+	BFA_FCPORT_SM_FWRSP = 5,	/*  firmware enable/disable rsp */
+	BFA_FCPORT_SM_LINKUP = 6,	/*  firmware linkup event */
+	BFA_FCPORT_SM_LINKDOWN = 7,	/*  firmware linkup down */
+	BFA_FCPORT_SM_QRESUME = 8,	/*  CQ space available */
+	BFA_FCPORT_SM_HWFAIL = 9,	/*  IOC h/w failure */
 };
 
-static void     bfa_pport_sm_uninit(struct bfa_pport_s *pport,
-				    enum bfa_pport_sm_event event);
-static void     bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport,
-					    enum bfa_pport_sm_event event);
-static void     bfa_pport_sm_enabling(struct bfa_pport_s *pport,
-				      enum bfa_pport_sm_event event);
-static void     bfa_pport_sm_linkdown(struct bfa_pport_s *pport,
-				      enum bfa_pport_sm_event event);
-static void     bfa_pport_sm_linkup(struct bfa_pport_s *pport,
-				    enum bfa_pport_sm_event event);
-static void     bfa_pport_sm_disabling(struct bfa_pport_s *pport,
-				       enum bfa_pport_sm_event event);
-static void     bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport,
-					     enum bfa_pport_sm_event event);
-static void     bfa_pport_sm_disabled(struct bfa_pport_s *pport,
-				      enum bfa_pport_sm_event event);
-static void     bfa_pport_sm_stopped(struct bfa_pport_s *pport,
-				     enum bfa_pport_sm_event event);
-static void     bfa_pport_sm_iocdown(struct bfa_pport_s *pport,
-				     enum bfa_pport_sm_event event);
-static void     bfa_pport_sm_iocfail(struct bfa_pport_s *pport,
-				     enum bfa_pport_sm_event event);
+/**
+ * BFA port link notification state machine events
+ */
+
+enum bfa_fcport_ln_sm_event {
+	BFA_FCPORT_LN_SM_LINKUP         = 1,    /*  linkup event */
+	BFA_FCPORT_LN_SM_LINKDOWN       = 2,    /*  linkdown event */
+	BFA_FCPORT_LN_SM_NOTIFICATION   = 3     /*  done notification */
+};
+
+static void     bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
+					enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
+						enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
+						enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
+						enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
+						enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
+						enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
+						enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
+						enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
+					 enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
+					 enum bfa_fcport_sm_event event);
+static void     bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
+					 enum bfa_fcport_sm_event event);
+
+static void     bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
+					 enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
+					 enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
+					 enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
+					 enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
+					 enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
+					 enum bfa_fcport_ln_sm_event event);
+static void     bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
+					 enum bfa_fcport_ln_sm_event event);
 
 static struct bfa_sm_table_s hal_pport_sm_table[] = {
-	{BFA_SM(bfa_pport_sm_uninit), BFA_PPORT_ST_UNINIT},
-	{BFA_SM(bfa_pport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT},
-	{BFA_SM(bfa_pport_sm_enabling), BFA_PPORT_ST_ENABLING},
-	{BFA_SM(bfa_pport_sm_linkdown), BFA_PPORT_ST_LINKDOWN},
-	{BFA_SM(bfa_pport_sm_linkup), BFA_PPORT_ST_LINKUP},
-	{BFA_SM(bfa_pport_sm_disabling_qwait),
-	 BFA_PPORT_ST_DISABLING_QWAIT},
-	{BFA_SM(bfa_pport_sm_disabling), BFA_PPORT_ST_DISABLING},
-	{BFA_SM(bfa_pport_sm_disabled), BFA_PPORT_ST_DISABLED},
-	{BFA_SM(bfa_pport_sm_stopped), BFA_PPORT_ST_STOPPED},
-	{BFA_SM(bfa_pport_sm_iocdown), BFA_PPORT_ST_IOCDOWN},
-	{BFA_SM(bfa_pport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
+	{BFA_SM(bfa_fcport_sm_uninit), BFA_PPORT_ST_UNINIT},
+	{BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT},
+	{BFA_SM(bfa_fcport_sm_enabling), BFA_PPORT_ST_ENABLING},
+	{BFA_SM(bfa_fcport_sm_linkdown), BFA_PPORT_ST_LINKDOWN},
+	{BFA_SM(bfa_fcport_sm_linkup), BFA_PPORT_ST_LINKUP},
+	{BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PPORT_ST_DISABLING_QWAIT},
+	{BFA_SM(bfa_fcport_sm_disabling), BFA_PPORT_ST_DISABLING},
+	{BFA_SM(bfa_fcport_sm_disabled), BFA_PPORT_ST_DISABLED},
+	{BFA_SM(bfa_fcport_sm_stopped), BFA_PPORT_ST_STOPPED},
+	{BFA_SM(bfa_fcport_sm_iocdown), BFA_PPORT_ST_IOCDOWN},
+	{BFA_SM(bfa_fcport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
 };
 
 static void
-bfa_pport_aen_post(struct bfa_pport_s *pport, enum bfa_port_aen_event event)
+bfa_fcport_aen_post(struct bfa_fcport_s *fcport, enum bfa_port_aen_event event)
 {
 	union bfa_aen_data_u aen_data;
-	struct bfa_log_mod_s *logmod = pport->bfa->logm;
-	wwn_t           pwwn = pport->pwwn;
+	struct bfa_log_mod_s *logmod = fcport->bfa->logm;
+	wwn_t           pwwn = fcport->pwwn;
 	char            pwwn_ptr[BFA_STRING_32];
-	struct bfa_ioc_attr_s ioc_attr;
 
+	memset(&aen_data, 0, sizeof(aen_data));
 	wwn2str(pwwn_ptr, pwwn);
-	switch (event) {
-	case BFA_PORT_AEN_ONLINE:
-		bfa_log(logmod, BFA_AEN_PORT_ONLINE, pwwn_ptr);
-		break;
-	case BFA_PORT_AEN_OFFLINE:
-		bfa_log(logmod, BFA_AEN_PORT_OFFLINE, pwwn_ptr);
-		break;
-	case BFA_PORT_AEN_ENABLE:
-		bfa_log(logmod, BFA_AEN_PORT_ENABLE, pwwn_ptr);
-		break;
-	case BFA_PORT_AEN_DISABLE:
-		bfa_log(logmod, BFA_AEN_PORT_DISABLE, pwwn_ptr);
-		break;
-	case BFA_PORT_AEN_DISCONNECT:
-		bfa_log(logmod, BFA_AEN_PORT_DISCONNECT, pwwn_ptr);
-		break;
-	case BFA_PORT_AEN_QOS_NEG:
-		bfa_log(logmod, BFA_AEN_PORT_QOS_NEG, pwwn_ptr);
-		break;
-	default:
-		break;
-	}
+	bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event), pwwn_ptr);
 
-	bfa_ioc_get_attr(&pport->bfa->ioc, &ioc_attr);
-	aen_data.port.ioc_type = ioc_attr.ioc_type;
+	aen_data.port.ioc_type = bfa_get_type(fcport->bfa);
 	aen_data.port.pwwn = pwwn;
 }
 
 static void
-bfa_pport_sm_uninit(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
+			enum bfa_fcport_sm_event event)
 {
-	bfa_trc(pport->bfa, event);
+	bfa_trc(fcport->bfa, event);
 
 	switch (event) {
-	case BFA_PPORT_SM_START:
+	case BFA_FCPORT_SM_START:
 		/**
 		 * Start event after IOC is configured and BFA is started.
 		 */
-		if (bfa_pport_send_enable(pport))
-			bfa_sm_set_state(pport, bfa_pport_sm_enabling);
+		if (bfa_fcport_send_enable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
 		else
-			bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
 		break;
 
-	case BFA_PPORT_SM_ENABLE:
+	case BFA_FCPORT_SM_ENABLE:
 		/**
 		 * Port is persistently configured to be in enabled state. Do
 		 * not change state. Port enabling is done when START event is
@@ -177,389 +173,412 @@
 		 */
 		break;
 
-	case BFA_PPORT_SM_DISABLE:
+	case BFA_FCPORT_SM_DISABLE:
 		/**
 		 * If a port is persistently configured to be disabled, the
 		 * first event will a port disable request.
 		 */
-		bfa_sm_set_state(pport, bfa_pport_sm_disabled);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
 		break;
 
-	case BFA_PPORT_SM_HWFAIL:
-		bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
 		break;
 
 	default:
-		bfa_sm_fault(pport->bfa, event);
+		bfa_sm_fault(fcport->bfa, event);
 	}
 }
 
 static void
-bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport,
-			    enum bfa_pport_sm_event event)
+bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
+			    enum bfa_fcport_sm_event event)
 {
-	bfa_trc(pport->bfa, event);
+	bfa_trc(fcport->bfa, event);
 
 	switch (event) {
-	case BFA_PPORT_SM_QRESUME:
-		bfa_sm_set_state(pport, bfa_pport_sm_enabling);
-		bfa_pport_send_enable(pport);
+	case BFA_FCPORT_SM_QRESUME:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
+		bfa_fcport_send_enable(fcport);
 		break;
 
-	case BFA_PPORT_SM_STOP:
-		bfa_reqq_wcancel(&pport->reqq_wait);
-		bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+	case BFA_FCPORT_SM_STOP:
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
 		break;
 
-	case BFA_PPORT_SM_ENABLE:
+	case BFA_FCPORT_SM_ENABLE:
 		/**
 		 * Already enable is in progress.
 		 */
 		break;
 
-	case BFA_PPORT_SM_DISABLE:
+	case BFA_FCPORT_SM_DISABLE:
 		/**
 		 * Just send disable request to firmware when room becomes
 		 * available in request queue.
 		 */
-		bfa_sm_set_state(pport, bfa_pport_sm_disabled);
-		bfa_reqq_wcancel(&pport->reqq_wait);
-		bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
 			     BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
-		bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
 		break;
 
-	case BFA_PPORT_SM_LINKUP:
-	case BFA_PPORT_SM_LINKDOWN:
+	case BFA_FCPORT_SM_LINKUP:
+	case BFA_FCPORT_SM_LINKDOWN:
 		/**
 		 * Possible to get link events when doing back-to-back
 		 * enable/disables.
 		 */
 		break;
 
-	case BFA_PPORT_SM_HWFAIL:
-		bfa_reqq_wcancel(&pport->reqq_wait);
-		bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_reqq_wcancel(&fcport->reqq_wait);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
 		break;
 
 	default:
-		bfa_sm_fault(pport->bfa, event);
+		bfa_sm_fault(fcport->bfa, event);
 	}
 }
 
 static void
-bfa_pport_sm_enabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
+		enum bfa_fcport_sm_event event)
 {
-	bfa_trc(pport->bfa, event);
+	bfa_trc(fcport->bfa, event);
 
 	switch (event) {
-	case BFA_PPORT_SM_FWRSP:
-	case BFA_PPORT_SM_LINKDOWN:
-		bfa_sm_set_state(pport, bfa_pport_sm_linkdown);
+	case BFA_FCPORT_SM_FWRSP:
+	case BFA_FCPORT_SM_LINKDOWN:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
 		break;
 
-	case BFA_PPORT_SM_LINKUP:
-		bfa_pport_update_linkinfo(pport);
-		bfa_sm_set_state(pport, bfa_pport_sm_linkup);
+	case BFA_FCPORT_SM_LINKUP:
+		bfa_fcport_update_linkinfo(fcport);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
 
-		bfa_assert(pport->event_cbfn);
-		bfa_pport_callback(pport, BFA_PPORT_LINKUP);
+		bfa_assert(fcport->event_cbfn);
+		bfa_fcport_callback(fcport, BFA_PPORT_LINKUP);
 		break;
 
-	case BFA_PPORT_SM_ENABLE:
+	case BFA_FCPORT_SM_ENABLE:
 		/**
 		 * Already being enabled.
 		 */
 		break;
 
-	case BFA_PPORT_SM_DISABLE:
-		if (bfa_pport_send_disable(pport))
-			bfa_sm_set_state(pport, bfa_pport_sm_disabling);
+	case BFA_FCPORT_SM_DISABLE:
+		if (bfa_fcport_send_disable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
 		else
-			bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
+			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
 
-		bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
 			     BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
-		bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
 		break;
 
-	case BFA_PPORT_SM_STOP:
-		bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
 		break;
 
-	case BFA_PPORT_SM_HWFAIL:
-		bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
 		break;
 
 	default:
-		bfa_sm_fault(pport->bfa, event);
+		bfa_sm_fault(fcport->bfa, event);
 	}
 }
 
 static void
-bfa_pport_sm_linkdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
+			enum bfa_fcport_sm_event event)
 {
-	bfa_trc(pport->bfa, event);
+	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
+	bfa_trc(fcport->bfa, event);
 
 	switch (event) {
-	case BFA_PPORT_SM_LINKUP:
-		bfa_pport_update_linkinfo(pport);
-		bfa_sm_set_state(pport, bfa_pport_sm_linkup);
-		bfa_assert(pport->event_cbfn);
-		bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+	case BFA_FCPORT_SM_LINKUP:
+		bfa_fcport_update_linkinfo(fcport);
+		bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
+		bfa_assert(fcport->event_cbfn);
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
 			     BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
-		bfa_pport_callback(pport, BFA_PPORT_LINKUP);
-		bfa_pport_aen_post(pport, BFA_PORT_AEN_ONLINE);
+
+		if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
+
+			bfa_trc(fcport->bfa, pevent->link_state.fcf.fipenabled);
+			bfa_trc(fcport->bfa, pevent->link_state.fcf.fipfailed);
+
+			if (pevent->link_state.fcf.fipfailed)
+				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+					BFA_PL_EID_FIP_FCF_DISC, 0,
+					"FIP FCF Discovery Failed");
+			else
+				bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
+					BFA_PL_EID_FIP_FCF_DISC, 0,
+					"FIP FCF Discovered");
+		}
+
+		bfa_fcport_callback(fcport, BFA_PPORT_LINKUP);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ONLINE);
 		/**
 		 * If QoS is enabled and it is not online,
 		 * Send a separate event.
 		 */
-		if ((pport->cfg.qos_enabled)
-		    && (bfa_os_ntohl(pport->qos_attr.state) != BFA_QOS_ONLINE))
-			bfa_pport_aen_post(pport, BFA_PORT_AEN_QOS_NEG);
+		if ((fcport->cfg.qos_enabled)
+		    && (bfa_os_ntohl(fcport->qos_attr.state) != BFA_QOS_ONLINE))
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_QOS_NEG);
 
 		break;
 
-	case BFA_PPORT_SM_LINKDOWN:
+	case BFA_FCPORT_SM_LINKDOWN:
 		/**
 		 * Possible to get link down event.
 		 */
 		break;
 
-	case BFA_PPORT_SM_ENABLE:
+	case BFA_FCPORT_SM_ENABLE:
 		/**
 		 * Already enabled.
 		 */
 		break;
 
-	case BFA_PPORT_SM_DISABLE:
-		if (bfa_pport_send_disable(pport))
-			bfa_sm_set_state(pport, bfa_pport_sm_disabling);
+	case BFA_FCPORT_SM_DISABLE:
+		if (bfa_fcport_send_disable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
 		else
-			bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
+			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
 
-		bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
 			     BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
-		bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
 		break;
 
-	case BFA_PPORT_SM_STOP:
-		bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
 		break;
 
-	case BFA_PPORT_SM_HWFAIL:
-		bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
 		break;
 
 	default:
-		bfa_sm_fault(pport->bfa, event);
+		bfa_sm_fault(fcport->bfa, event);
 	}
 }
 
 static void
-bfa_pport_sm_linkup(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
+			enum bfa_fcport_sm_event event)
 {
-	bfa_trc(pport->bfa, event);
+	bfa_trc(fcport->bfa, event);
 
 	switch (event) {
-	case BFA_PPORT_SM_ENABLE:
+	case BFA_FCPORT_SM_ENABLE:
 		/**
 		 * Already enabled.
 		 */
 		break;
 
-	case BFA_PPORT_SM_DISABLE:
-		if (bfa_pport_send_disable(pport))
-			bfa_sm_set_state(pport, bfa_pport_sm_disabling);
+	case BFA_FCPORT_SM_DISABLE:
+		if (bfa_fcport_send_disable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
 		else
-			bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
+			bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
 
-		bfa_pport_reset_linkinfo(pport);
-		bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
-		bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
 			     BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
-		bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
-		bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISABLE);
 		break;
 
-	case BFA_PPORT_SM_LINKDOWN:
-		bfa_sm_set_state(pport, bfa_pport_sm_linkdown);
-		bfa_pport_reset_linkinfo(pport);
-		bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
-		bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+	case BFA_FCPORT_SM_LINKDOWN:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
 			     BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
-		if (BFA_PORT_IS_DISABLED(pport->bfa))
-			bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
+		if (BFA_PORT_IS_DISABLED(fcport->bfa))
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
 		else
-			bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
 		break;
 
-	case BFA_PPORT_SM_STOP:
-		bfa_sm_set_state(pport, bfa_pport_sm_stopped);
-		bfa_pport_reset_linkinfo(pport);
-		if (BFA_PORT_IS_DISABLED(pport->bfa))
-			bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		bfa_fcport_reset_linkinfo(fcport);
+		if (BFA_PORT_IS_DISABLED(fcport->bfa))
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
 		else
-			bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
 		break;
 
-	case BFA_PPORT_SM_HWFAIL:
-		bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
-		bfa_pport_reset_linkinfo(pport);
-		bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
-		if (BFA_PORT_IS_DISABLED(pport->bfa))
-			bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
+		bfa_fcport_reset_linkinfo(fcport);
+		bfa_fcport_callback(fcport, BFA_PPORT_LINKDOWN);
+		if (BFA_PORT_IS_DISABLED(fcport->bfa))
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_OFFLINE);
 		else
-			bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
+			bfa_fcport_aen_post(fcport, BFA_PORT_AEN_DISCONNECT);
 		break;
 
 	default:
-		bfa_sm_fault(pport->bfa, event);
+		bfa_sm_fault(fcport->bfa, event);
 	}
 }
 
 static void
-bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport,
-			     enum bfa_pport_sm_event event)
+bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
+			     enum bfa_fcport_sm_event event)
 {
-	bfa_trc(pport->bfa, event);
+	bfa_trc(fcport->bfa, event);
 
 	switch (event) {
-	case BFA_PPORT_SM_QRESUME:
-		bfa_sm_set_state(pport, bfa_pport_sm_disabling);
-		bfa_pport_send_disable(pport);
+	case BFA_FCPORT_SM_QRESUME:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
+		bfa_fcport_send_disable(fcport);
 		break;
 
-	case BFA_PPORT_SM_STOP:
-		bfa_sm_set_state(pport, bfa_pport_sm_stopped);
-		bfa_reqq_wcancel(&pport->reqq_wait);
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
+		bfa_reqq_wcancel(&fcport->reqq_wait);
 		break;
 
-	case BFA_PPORT_SM_DISABLE:
+	case BFA_FCPORT_SM_DISABLE:
 		/**
 		 * Already being disabled.
 		 */
 		break;
 
-	case BFA_PPORT_SM_LINKUP:
-	case BFA_PPORT_SM_LINKDOWN:
+	case BFA_FCPORT_SM_LINKUP:
+	case BFA_FCPORT_SM_LINKDOWN:
 		/**
 		 * Possible to get link events when doing back-to-back
 		 * enable/disables.
 		 */
 		break;
 
-	case BFA_PPORT_SM_HWFAIL:
-		bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
-		bfa_reqq_wcancel(&pport->reqq_wait);
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
+		bfa_reqq_wcancel(&fcport->reqq_wait);
 		break;
 
 	default:
-		bfa_sm_fault(pport->bfa, event);
+		bfa_sm_fault(fcport->bfa, event);
 	}
 }
 
 static void
-bfa_pport_sm_disabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
+			enum bfa_fcport_sm_event event)
 {
-	bfa_trc(pport->bfa, event);
+	bfa_trc(fcport->bfa, event);
 
 	switch (event) {
-	case BFA_PPORT_SM_FWRSP:
-		bfa_sm_set_state(pport, bfa_pport_sm_disabled);
+	case BFA_FCPORT_SM_FWRSP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
 		break;
 
-	case BFA_PPORT_SM_DISABLE:
+	case BFA_FCPORT_SM_DISABLE:
 		/**
 		 * Already being disabled.
 		 */
 		break;
 
-	case BFA_PPORT_SM_ENABLE:
-		if (bfa_pport_send_enable(pport))
-			bfa_sm_set_state(pport, bfa_pport_sm_enabling);
+	case BFA_FCPORT_SM_ENABLE:
+		if (bfa_fcport_send_enable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
 		else
-			bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
 
-		bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
 			     BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
-		bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
 		break;
 
-	case BFA_PPORT_SM_STOP:
-		bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
 		break;
 
-	case BFA_PPORT_SM_LINKUP:
-	case BFA_PPORT_SM_LINKDOWN:
+	case BFA_FCPORT_SM_LINKUP:
+	case BFA_FCPORT_SM_LINKDOWN:
 		/**
 		 * Possible to get link events when doing back-to-back
 		 * enable/disables.
 		 */
 		break;
 
-	case BFA_PPORT_SM_HWFAIL:
-		bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
 		break;
 
 	default:
-		bfa_sm_fault(pport->bfa, event);
+		bfa_sm_fault(fcport->bfa, event);
 	}
 }
 
 static void
-bfa_pport_sm_disabled(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
+			enum bfa_fcport_sm_event event)
 {
-	bfa_trc(pport->bfa, event);
+	bfa_trc(fcport->bfa, event);
 
 	switch (event) {
-	case BFA_PPORT_SM_START:
+	case BFA_FCPORT_SM_START:
 		/**
 		 * Ignore start event for a port that is disabled.
 		 */
 		break;
 
-	case BFA_PPORT_SM_STOP:
-		bfa_sm_set_state(pport, bfa_pport_sm_stopped);
+	case BFA_FCPORT_SM_STOP:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
 		break;
 
-	case BFA_PPORT_SM_ENABLE:
-		if (bfa_pport_send_enable(pport))
-			bfa_sm_set_state(pport, bfa_pport_sm_enabling);
+	case BFA_FCPORT_SM_ENABLE:
+		if (bfa_fcport_send_enable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
 		else
-			bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
 
-		bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
+		bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
 			     BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
-		bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE);
+		bfa_fcport_aen_post(fcport, BFA_PORT_AEN_ENABLE);
 		break;
 
-	case BFA_PPORT_SM_DISABLE:
+	case BFA_FCPORT_SM_DISABLE:
 		/**
 		 * Already disabled.
 		 */
 		break;
 
-	case BFA_PPORT_SM_HWFAIL:
-		bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
+	case BFA_FCPORT_SM_HWFAIL:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
 		break;
 
 	default:
-		bfa_sm_fault(pport->bfa, event);
+		bfa_sm_fault(fcport->bfa, event);
 	}
 }
 
 static void
-bfa_pport_sm_stopped(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
+			enum bfa_fcport_sm_event event)
 {
-	bfa_trc(pport->bfa, event);
+	bfa_trc(fcport->bfa, event);
 
 	switch (event) {
-	case BFA_PPORT_SM_START:
-		if (bfa_pport_send_enable(pport))
-			bfa_sm_set_state(pport, bfa_pport_sm_enabling);
+	case BFA_FCPORT_SM_START:
+		if (bfa_fcport_send_enable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
 		else
-			bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
 		break;
 
 	default:
@@ -574,16 +593,17 @@
  * Port is enabled. IOC is down/failed.
  */
 static void
-bfa_pport_sm_iocdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
+			enum bfa_fcport_sm_event event)
 {
-	bfa_trc(pport->bfa, event);
+	bfa_trc(fcport->bfa, event);
 
 	switch (event) {
-	case BFA_PPORT_SM_START:
-		if (bfa_pport_send_enable(pport))
-			bfa_sm_set_state(pport, bfa_pport_sm_enabling);
+	case BFA_FCPORT_SM_START:
+		if (bfa_fcport_send_enable(fcport))
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
 		else
-			bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
+			bfa_sm_set_state(fcport, bfa_fcport_sm_enabling_qwait);
 		break;
 
 	default:
@@ -598,17 +618,18 @@
  * Port is disabled. IOC is down/failed.
  */
 static void
-bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
+bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
+			enum bfa_fcport_sm_event event)
 {
-	bfa_trc(pport->bfa, event);
+	bfa_trc(fcport->bfa, event);
 
 	switch (event) {
-	case BFA_PPORT_SM_START:
-		bfa_sm_set_state(pport, bfa_pport_sm_disabled);
+	case BFA_FCPORT_SM_START:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
 		break;
 
-	case BFA_PPORT_SM_ENABLE:
-		bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
+	case BFA_FCPORT_SM_ENABLE:
+		bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
 		break;
 
 	default:
@@ -619,41 +640,226 @@
 	}
 }
 
+/**
+ * Link state is down
+ */
+static void
+bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
 
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKUP:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
+		bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/**
+ * Link state is waiting for down notification
+ */
+static void
+bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKUP:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
+		break;
+
+	case BFA_FCPORT_LN_SM_NOTIFICATION:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/**
+ * Link state is waiting for down notification and there is a pending up
+ */
+static void
+bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKDOWN:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
+		break;
+
+	case BFA_FCPORT_LN_SM_NOTIFICATION:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
+		bfa_fcport_queue_cb(ln, BFA_PPORT_LINKUP);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/**
+ * Link state is up
+ */
+static void
+bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKDOWN:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
+		bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/**
+ * Link state is waiting for up notification
+ */
+static void
+bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKDOWN:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
+		break;
+
+	case BFA_FCPORT_LN_SM_NOTIFICATION:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/**
+ * Link state is waiting for up notification and there is a pending down
+ */
+static void
+bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
+		enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKUP:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
+		break;
+
+	case BFA_FCPORT_LN_SM_NOTIFICATION:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
+		bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
+
+/**
+ * Link state is waiting for up notification and there are pending down and up
+ */
+static void
+bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
+			enum bfa_fcport_ln_sm_event event)
+{
+	bfa_trc(ln->fcport->bfa, event);
+
+	switch (event) {
+	case BFA_FCPORT_LN_SM_LINKDOWN:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
+		break;
+
+	case BFA_FCPORT_LN_SM_NOTIFICATION:
+		bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
+		bfa_fcport_queue_cb(ln, BFA_PPORT_LINKDOWN);
+		break;
+
+	default:
+		bfa_sm_fault(ln->fcport->bfa, event);
+	}
+}
 
 /**
  *  bfa_pport_private
  */
 
 static void
-__bfa_cb_port_event(void *cbarg, bfa_boolean_t complete)
+__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
 {
-	struct bfa_pport_s *pport = cbarg;
+	struct bfa_fcport_ln_s *ln = cbarg;
 
 	if (complete)
-		pport->event_cbfn(pport->event_cbarg, pport->hcb_event);
+		ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
+	else
+		bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
 }
 
-#define PPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), \
+static void
+bfa_fcport_callback(struct bfa_fcport_s *fcport, enum bfa_pport_linkstate event)
+{
+	if (fcport->bfa->fcs) {
+		fcport->event_cbfn(fcport->event_cbarg, event);
+		return;
+	}
+
+	switch (event) {
+	case BFA_PPORT_LINKUP:
+		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
+		break;
+	case BFA_PPORT_LINKDOWN:
+		bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
+		break;
+	default:
+		bfa_assert(0);
+	}
+}
+
+static void
+bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_pport_linkstate event)
+{
+	ln->ln_event = event;
+	bfa_cb_queue(ln->fcport->bfa, &ln->ln_qe, __bfa_cb_fcport_event, ln);
+}
+
+#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
 							BFA_CACHELINE_SZ))
 
 static void
-bfa_pport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
+bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
 		  u32 *dm_len)
 {
-	*dm_len += PPORT_STATS_DMA_SZ;
+	*dm_len += FCPORT_STATS_DMA_SZ;
 }
 
 static void
-bfa_pport_qresume(void *cbarg)
+bfa_fcport_qresume(void *cbarg)
 {
-	struct bfa_pport_s *port = cbarg;
+	struct bfa_fcport_s *fcport = cbarg;
 
-	bfa_sm_send_event(port, BFA_PPORT_SM_QRESUME);
+	bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
 }
 
 static void
-bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo)
+bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
 {
 	u8        *dm_kva;
 	u64        dm_pa;
@@ -661,12 +867,12 @@
 	dm_kva = bfa_meminfo_dma_virt(meminfo);
 	dm_pa = bfa_meminfo_dma_phys(meminfo);
 
-	pport->stats_kva = dm_kva;
-	pport->stats_pa = dm_pa;
-	pport->stats = (union bfa_pport_stats_u *)dm_kva;
+	fcport->stats_kva = dm_kva;
+	fcport->stats_pa = dm_pa;
+	fcport->stats = (union bfa_fcport_stats_u *)dm_kva;
 
-	dm_kva += PPORT_STATS_DMA_SZ;
-	dm_pa += PPORT_STATS_DMA_SZ;
+	dm_kva += FCPORT_STATS_DMA_SZ;
+	dm_pa += FCPORT_STATS_DMA_SZ;
 
 	bfa_meminfo_dma_virt(meminfo) = dm_kva;
 	bfa_meminfo_dma_phys(meminfo) = dm_pa;
@@ -676,18 +882,21 @@
  * Memory initialization.
  */
 static void
-bfa_pport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
+bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
 		 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
-	struct bfa_pport_cfg_s *port_cfg = &pport->cfg;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	struct bfa_pport_cfg_s *port_cfg = &fcport->cfg;
+	struct bfa_fcport_ln_s *ln = &fcport->ln;
 
-	bfa_os_memset(pport, 0, sizeof(struct bfa_pport_s));
-	pport->bfa = bfa;
+	bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
+	fcport->bfa = bfa;
+	ln->fcport = fcport;
 
-	bfa_pport_mem_claim(pport, meminfo);
+	bfa_fcport_mem_claim(fcport, meminfo);
 
-	bfa_sm_set_state(pport, bfa_pport_sm_uninit);
+	bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
+	bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
 
 	/**
 	 * initialize and set default configuration
@@ -699,30 +908,30 @@
 
 	port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS;
 
-	bfa_reqq_winit(&pport->reqq_wait, bfa_pport_qresume, pport);
+	bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
 }
 
 static void
-bfa_pport_initdone(struct bfa_s *bfa)
+bfa_fcport_initdone(struct bfa_s *bfa)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
 	/**
 	 * Initialize port attributes from IOC hardware data.
 	 */
-	bfa_pport_set_wwns(pport);
-	if (pport->cfg.maxfrsize == 0)
-		pport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
-	pport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
-	pport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
+	bfa_fcport_set_wwns(fcport);
+	if (fcport->cfg.maxfrsize == 0)
+		fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
+	fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
+	fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
 
-	bfa_assert(pport->cfg.maxfrsize);
-	bfa_assert(pport->cfg.rx_bbcredit);
-	bfa_assert(pport->speed_sup);
+	bfa_assert(fcport->cfg.maxfrsize);
+	bfa_assert(fcport->cfg.rx_bbcredit);
+	bfa_assert(fcport->speed_sup);
 }
 
 static void
-bfa_pport_detach(struct bfa_s *bfa)
+bfa_fcport_detach(struct bfa_s *bfa)
 {
 }
 
@@ -730,95 +939,97 @@
  * Called when IOC is ready.
  */
 static void
-bfa_pport_start(struct bfa_s *bfa)
+bfa_fcport_start(struct bfa_s *bfa)
 {
-	bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_START);
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
 }
 
 /**
  * Called before IOC is stopped.
  */
 static void
-bfa_pport_stop(struct bfa_s *bfa)
+bfa_fcport_stop(struct bfa_s *bfa)
 {
-	bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_STOP);
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
 }
 
 /**
  * Called when IOC failure is detected.
  */
 static void
-bfa_pport_iocdisable(struct bfa_s *bfa)
+bfa_fcport_iocdisable(struct bfa_s *bfa)
 {
-	bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_HWFAIL);
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_HWFAIL);
 }
 
 static void
-bfa_pport_update_linkinfo(struct bfa_pport_s *pport)
+bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
 {
-	struct bfi_pport_event_s *pevent = pport->event_arg.i2hmsg.event;
+	struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
 
-	pport->speed = pevent->link_state.speed;
-	pport->topology = pevent->link_state.topology;
+	fcport->speed = pevent->link_state.speed;
+	fcport->topology = pevent->link_state.topology;
 
-	if (pport->topology == BFA_PPORT_TOPOLOGY_LOOP)
-		pport->myalpa = pevent->link_state.tl.loop_info.myalpa;
+	if (fcport->topology == BFA_PPORT_TOPOLOGY_LOOP)
+		fcport->myalpa =
+			pevent->link_state.tl.loop_info.myalpa;
 
 	/*
 	 * QoS Details
 	 */
-	bfa_os_assign(pport->qos_attr, pevent->link_state.qos_attr);
-	bfa_os_assign(pport->qos_vc_attr, pevent->link_state.qos_vc_attr);
+	bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
+	bfa_os_assign(fcport->qos_vc_attr, pevent->link_state.qos_vc_attr);
 
-	bfa_trc(pport->bfa, pport->speed);
-	bfa_trc(pport->bfa, pport->topology);
+	bfa_trc(fcport->bfa, fcport->speed);
+	bfa_trc(fcport->bfa, fcport->topology);
 }
 
 static void
-bfa_pport_reset_linkinfo(struct bfa_pport_s *pport)
+bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
 {
-	pport->speed = BFA_PPORT_SPEED_UNKNOWN;
-	pport->topology = BFA_PPORT_TOPOLOGY_NONE;
+	fcport->speed = BFA_PPORT_SPEED_UNKNOWN;
+	fcport->topology = BFA_PPORT_TOPOLOGY_NONE;
 }
 
 /**
  * Send port enable message to firmware.
  */
 static          bfa_boolean_t
-bfa_pport_send_enable(struct bfa_pport_s *port)
+bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
 {
-	struct bfi_pport_enable_req_s *m;
+	struct bfi_fcport_enable_req_s *m;
 
 	/**
 	 * Increment message tag before queue check, so that responses to old
 	 * requests are discarded.
 	 */
-	port->msgtag++;
+	fcport->msgtag++;
 
 	/**
 	 * check for room in queue to send request now
 	 */
-	m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
+	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
 	if (!m) {
-		bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait);
+		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+							&fcport->reqq_wait);
 		return BFA_FALSE;
 	}
 
-	bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_ENABLE_REQ,
-		    bfa_lpuid(port->bfa));
-	m->nwwn = port->nwwn;
-	m->pwwn = port->pwwn;
-	m->port_cfg = port->cfg;
-	m->msgtag = port->msgtag;
-	m->port_cfg.maxfrsize = bfa_os_htons(port->cfg.maxfrsize);
-	bfa_dma_be_addr_set(m->stats_dma_addr, port->stats_pa);
-	bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_lo);
-	bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_hi);
+	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
+				bfa_lpuid(fcport->bfa));
+	m->nwwn = fcport->nwwn;
+	m->pwwn = fcport->pwwn;
+	m->port_cfg = fcport->cfg;
+	m->msgtag = fcport->msgtag;
+	m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
+	bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
+	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
+	bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
 
 	/**
 	 * queue I/O message to firmware
 	 */
-	bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
+	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
 	return BFA_TRUE;
 }
 
@@ -826,74 +1037,226 @@
  * Send port disable message to firmware.
  */
 static          bfa_boolean_t
-bfa_pport_send_disable(struct bfa_pport_s *port)
+bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
 {
-	bfi_pport_disable_req_t *m;
+	struct bfi_fcport_req_s *m;
 
 	/**
 	 * Increment message tag before queue check, so that responses to old
 	 * requests are discarded.
 	 */
-	port->msgtag++;
+	fcport->msgtag++;
 
 	/**
 	 * check for room in queue to send request now
 	 */
-	m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
+	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
 	if (!m) {
-		bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait);
+		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+							&fcport->reqq_wait);
 		return BFA_FALSE;
 	}
 
-	bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_DISABLE_REQ,
-		    bfa_lpuid(port->bfa));
-	m->msgtag = port->msgtag;
+	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
+			bfa_lpuid(fcport->bfa));
+	m->msgtag = fcport->msgtag;
 
 	/**
 	 * queue I/O message to firmware
 	 */
-	bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
+	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
 
 	return BFA_TRUE;
 }
 
 static void
-bfa_pport_set_wwns(struct bfa_pport_s *port)
+bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
 {
-	port->pwwn = bfa_ioc_get_pwwn(&port->bfa->ioc);
-	port->nwwn = bfa_ioc_get_nwwn(&port->bfa->ioc);
+	fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc);
+	fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc);
 
-	bfa_trc(port->bfa, port->pwwn);
-	bfa_trc(port->bfa, port->nwwn);
+	bfa_trc(fcport->bfa, fcport->pwwn);
+	bfa_trc(fcport->bfa, fcport->nwwn);
 }
 
 static void
-bfa_port_send_txcredit(void *port_cbarg)
+bfa_fcport_send_txcredit(void *port_cbarg)
 {
 
-	struct bfa_pport_s *port = port_cbarg;
-	struct bfi_pport_set_svc_params_req_s *m;
+	struct bfa_fcport_s *fcport = port_cbarg;
+	struct bfi_fcport_set_svc_params_req_s *m;
 
 	/**
 	 * check for room in queue to send request now
 	 */
-	m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
+	m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
 	if (!m) {
-		bfa_trc(port->bfa, port->cfg.tx_bbcredit);
+		bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
 		return;
 	}
 
-	bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_SET_SVC_PARAMS_REQ,
-		    bfa_lpuid(port->bfa));
-	m->tx_bbcredit = bfa_os_htons((u16) port->cfg.tx_bbcredit);
+	bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
+			bfa_lpuid(fcport->bfa));
+	m->tx_bbcredit = bfa_os_htons((u16) fcport->cfg.tx_bbcredit);
 
 	/**
 	 * queue I/O message to firmware
 	 */
-	bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
+	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
 }
 
+static void
+bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
+	struct bfa_qos_stats_s *s)
+{
+	u32     *dip = (u32 *) d;
+	u32     *sip = (u32 *) s;
+	int             i;
 
+	/* Now swap the 32 bit fields */
+	for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
+		dip[i] = bfa_os_ntohl(sip[i]);
+}
+
+static void
+bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
+	struct bfa_fcoe_stats_s *s)
+{
+	u32     *dip = (u32 *) d;
+	u32     *sip = (u32 *) s;
+	int             i;
+
+	for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
+		i = i + 2) {
+#ifdef __BIGENDIAN
+		dip[i] = bfa_os_ntohl(sip[i]);
+		dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
+#else
+		dip[i] = bfa_os_ntohl(sip[i + 1]);
+		dip[i + 1] = bfa_os_ntohl(sip[i]);
+#endif
+	}
+}
+
+static void
+__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_fcport_s *fcport = cbarg;
+
+	if (complete) {
+		if (fcport->stats_status == BFA_STATUS_OK) {
+
+			/* Swap FC QoS or FCoE stats */
+			if (bfa_ioc_get_fcmode(&fcport->bfa->ioc))
+				bfa_fcport_qos_stats_swap(
+					&fcport->stats_ret->fcqos,
+					&fcport->stats->fcqos);
+			else
+				bfa_fcport_fcoe_stats_swap(
+					&fcport->stats_ret->fcoe,
+					&fcport->stats->fcoe);
+		}
+		fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
+	} else {
+		fcport->stats_busy = BFA_FALSE;
+		fcport->stats_status = BFA_STATUS_OK;
+	}
+}
+
+static void
+bfa_fcport_stats_get_timeout(void *cbarg)
+{
+	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+
+	bfa_trc(fcport->bfa, fcport->stats_qfull);
+
+	if (fcport->stats_qfull) {
+		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
+		fcport->stats_qfull = BFA_FALSE;
+	}
+
+	fcport->stats_status = BFA_STATUS_ETIMER;
+	bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
+		fcport);
+}
+
+static void
+bfa_fcport_send_stats_get(void *cbarg)
+{
+	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+	struct bfi_fcport_req_s *msg;
+
+	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+
+	if (!msg) {
+		fcport->stats_qfull = BFA_TRUE;
+		bfa_reqq_winit(&fcport->stats_reqq_wait,
+				bfa_fcport_send_stats_get, fcport);
+		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+				&fcport->stats_reqq_wait);
+		return;
+	}
+	fcport->stats_qfull = BFA_FALSE;
+
+	bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
+	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
+		bfa_lpuid(fcport->bfa));
+	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+}
+
+static void
+__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
+{
+	struct bfa_fcport_s *fcport = cbarg;
+
+	if (complete) {
+		fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
+	} else {
+		fcport->stats_busy = BFA_FALSE;
+		fcport->stats_status = BFA_STATUS_OK;
+	}
+}
+
+static void
+bfa_fcport_stats_clr_timeout(void *cbarg)
+{
+	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+
+	bfa_trc(fcport->bfa, fcport->stats_qfull);
+
+	if (fcport->stats_qfull) {
+		bfa_reqq_wcancel(&fcport->stats_reqq_wait);
+		fcport->stats_qfull = BFA_FALSE;
+	}
+
+	fcport->stats_status = BFA_STATUS_ETIMER;
+	bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
+			__bfa_cb_fcport_stats_clr, fcport);
+}
+
+static void
+bfa_fcport_send_stats_clear(void *cbarg)
+{
+	struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
+	struct bfi_fcport_req_s *msg;
+
+	msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
+
+	if (!msg) {
+		fcport->stats_qfull = BFA_TRUE;
+		bfa_reqq_winit(&fcport->stats_reqq_wait,
+				bfa_fcport_send_stats_clear, fcport);
+		bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
+				&fcport->stats_reqq_wait);
+		return;
+	}
+	fcport->stats_qfull = BFA_FALSE;
+
+	bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
+	bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
+			bfa_lpuid(fcport->bfa));
+	bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
+}
 
 /**
  *  bfa_pport_public
@@ -903,32 +1266,32 @@
  * Firmware message handler.
  */
 void
-bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
+bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
-	union bfi_pport_i2h_msg_u i2hmsg;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	union bfi_fcport_i2h_msg_u i2hmsg;
 
 	i2hmsg.msg = msg;
-	pport->event_arg.i2hmsg = i2hmsg;
+	fcport->event_arg.i2hmsg = i2hmsg;
 
 	switch (msg->mhdr.msg_id) {
-	case BFI_PPORT_I2H_ENABLE_RSP:
-		if (pport->msgtag == i2hmsg.enable_rsp->msgtag)
-			bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP);
+	case BFI_FCPORT_I2H_ENABLE_RSP:
+		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
+			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
 		break;
 
-	case BFI_PPORT_I2H_DISABLE_RSP:
-		if (pport->msgtag == i2hmsg.enable_rsp->msgtag)
-			bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP);
+	case BFI_FCPORT_I2H_DISABLE_RSP:
+		if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
+			bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
 		break;
 
-	case BFI_PPORT_I2H_EVENT:
+	case BFI_FCPORT_I2H_EVENT:
 		switch (i2hmsg.event->link_state.linkstate) {
 		case BFA_PPORT_LINKUP:
-			bfa_sm_send_event(pport, BFA_PPORT_SM_LINKUP);
+			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
 			break;
 		case BFA_PPORT_LINKDOWN:
-			bfa_sm_send_event(pport, BFA_PPORT_SM_LINKDOWN);
+			bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
 			break;
 		case BFA_PPORT_TRUNK_LINKDOWN:
 			/** todo: event notification */
@@ -936,42 +1299,40 @@
 		}
 		break;
 
-	case BFI_PPORT_I2H_GET_STATS_RSP:
-	case BFI_PPORT_I2H_GET_QOS_STATS_RSP:
+	case BFI_FCPORT_I2H_STATS_GET_RSP:
 		/*
 		 * check for timer pop before processing the rsp
 		 */
-		if (pport->stats_busy == BFA_FALSE
-		    || pport->stats_status == BFA_STATUS_ETIMER)
+		if (fcport->stats_busy == BFA_FALSE ||
+			fcport->stats_status == BFA_STATUS_ETIMER)
 			break;
 
-		bfa_timer_stop(&pport->timer);
-		pport->stats_status = i2hmsg.getstats_rsp->status;
-		bfa_cb_queue(pport->bfa, &pport->hcb_qe, __bfa_cb_port_stats,
-			     pport);
+		bfa_timer_stop(&fcport->timer);
+		fcport->stats_status = i2hmsg.pstatsget_rsp->status;
+		bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
+				__bfa_cb_fcport_stats_get, fcport);
 		break;
-	case BFI_PPORT_I2H_CLEAR_STATS_RSP:
-	case BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP:
+
+	case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
 		/*
 		 * check for timer pop before processing the rsp
 		 */
-		if (pport->stats_busy == BFA_FALSE
-		    || pport->stats_status == BFA_STATUS_ETIMER)
+		if (fcport->stats_busy == BFA_FALSE ||
+			fcport->stats_status == BFA_STATUS_ETIMER)
 			break;
 
-		bfa_timer_stop(&pport->timer);
-		pport->stats_status = BFA_STATUS_OK;
-		bfa_cb_queue(pport->bfa, &pport->hcb_qe,
-			     __bfa_cb_port_stats_clr, pport);
+		bfa_timer_stop(&fcport->timer);
+		fcport->stats_status = BFA_STATUS_OK;
+		bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
+				__bfa_cb_fcport_stats_clr, fcport);
 		break;
 
 	default:
 		bfa_assert(0);
+	break;
 	}
 }
 
-
-
 /**
  *  bfa_pport_api
  */
@@ -980,35 +1341,35 @@
  * Registered callback for port events.
  */
 void
-bfa_pport_event_register(struct bfa_s *bfa,
+bfa_fcport_event_register(struct bfa_s *bfa,
 			 void (*cbfn) (void *cbarg, bfa_pport_event_t event),
 			 void *cbarg)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	pport->event_cbfn = cbfn;
-	pport->event_cbarg = cbarg;
+	fcport->event_cbfn = cbfn;
+	fcport->event_cbarg = cbarg;
 }
 
 bfa_status_t
-bfa_pport_enable(struct bfa_s *bfa)
+bfa_fcport_enable(struct bfa_s *bfa)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	if (pport->diag_busy)
+	if (fcport->diag_busy)
 		return BFA_STATUS_DIAG_BUSY;
 	else if (bfa_sm_cmp_state
-		 (BFA_PORT_MOD(bfa), bfa_pport_sm_disabling_qwait))
+		 (BFA_FCPORT_MOD(bfa), bfa_fcport_sm_disabling_qwait))
 		return BFA_STATUS_DEVBUSY;
 
-	bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_ENABLE);
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
 	return BFA_STATUS_OK;
 }
 
 bfa_status_t
-bfa_pport_disable(struct bfa_s *bfa)
+bfa_fcport_disable(struct bfa_s *bfa)
 {
-	bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_DISABLE);
+	bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
 	return BFA_STATUS_OK;
 }
 
@@ -1016,18 +1377,18 @@
  * Configure port speed.
  */
 bfa_status_t
-bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
+bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
 	bfa_trc(bfa, speed);
 
-	if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > pport->speed_sup)) {
-		bfa_trc(bfa, pport->speed_sup);
+	if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
+		bfa_trc(bfa, fcport->speed_sup);
 		return BFA_STATUS_UNSUPP_SPEED;
 	}
 
-	pport->cfg.speed = speed;
+	fcport->cfg.speed = speed;
 
 	return BFA_STATUS_OK;
 }
@@ -1036,23 +1397,23 @@
  * Get current speed.
  */
 enum bfa_pport_speed
-bfa_pport_get_speed(struct bfa_s *bfa)
+bfa_fcport_get_speed(struct bfa_s *bfa)
 {
-	struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	return port->speed;
+	return fcport->speed;
 }
 
 /**
  * Configure port topology.
  */
 bfa_status_t
-bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
+bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
 	bfa_trc(bfa, topology);
-	bfa_trc(bfa, pport->cfg.topology);
+	bfa_trc(bfa, fcport->cfg.topology);
 
 	switch (topology) {
 	case BFA_PPORT_TOPOLOGY_P2P:
@@ -1064,7 +1425,7 @@
 		return BFA_STATUS_EINVAL;
 	}
 
-	pport->cfg.topology = topology;
+	fcport->cfg.topology = topology;
 	return BFA_STATUS_OK;
 }
 
@@ -1072,64 +1433,64 @@
  * Get current topology.
  */
 enum bfa_pport_topology
-bfa_pport_get_topology(struct bfa_s *bfa)
+bfa_fcport_get_topology(struct bfa_s *bfa)
 {
-	struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	return port->topology;
+	return fcport->topology;
 }
 
 bfa_status_t
-bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
+bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
 	bfa_trc(bfa, alpa);
-	bfa_trc(bfa, pport->cfg.cfg_hardalpa);
-	bfa_trc(bfa, pport->cfg.hardalpa);
+	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
+	bfa_trc(bfa, fcport->cfg.hardalpa);
 
-	pport->cfg.cfg_hardalpa = BFA_TRUE;
-	pport->cfg.hardalpa = alpa;
+	fcport->cfg.cfg_hardalpa = BFA_TRUE;
+	fcport->cfg.hardalpa = alpa;
 
 	return BFA_STATUS_OK;
 }
 
 bfa_status_t
-bfa_pport_clr_hardalpa(struct bfa_s *bfa)
+bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	bfa_trc(bfa, pport->cfg.cfg_hardalpa);
-	bfa_trc(bfa, pport->cfg.hardalpa);
+	bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
+	bfa_trc(bfa, fcport->cfg.hardalpa);
 
-	pport->cfg.cfg_hardalpa = BFA_FALSE;
+	fcport->cfg.cfg_hardalpa = BFA_FALSE;
 	return BFA_STATUS_OK;
 }
 
 bfa_boolean_t
-bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
+bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
 {
-	struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	*alpa = port->cfg.hardalpa;
-	return port->cfg.cfg_hardalpa;
+	*alpa = fcport->cfg.hardalpa;
+	return fcport->cfg.cfg_hardalpa;
 }
 
 u8
-bfa_pport_get_myalpa(struct bfa_s *bfa)
+bfa_fcport_get_myalpa(struct bfa_s *bfa)
 {
-	struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	return port->myalpa;
+	return fcport->myalpa;
 }
 
 bfa_status_t
-bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
+bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
 	bfa_trc(bfa, maxfrsize);
-	bfa_trc(bfa, pport->cfg.maxfrsize);
+	bfa_trc(bfa, fcport->cfg.maxfrsize);
 
 	/*
 	 * with in range
@@ -1143,41 +1504,41 @@
 	if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
 		return BFA_STATUS_INVLD_DFSZ;
 
-	pport->cfg.maxfrsize = maxfrsize;
+	fcport->cfg.maxfrsize = maxfrsize;
 	return BFA_STATUS_OK;
 }
 
 u16
-bfa_pport_get_maxfrsize(struct bfa_s *bfa)
+bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
 {
-	struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	return port->cfg.maxfrsize;
+	return fcport->cfg.maxfrsize;
 }
 
 u32
-bfa_pport_mypid(struct bfa_s *bfa)
+bfa_fcport_mypid(struct bfa_s *bfa)
 {
-	struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	return port->mypid;
+	return fcport->mypid;
 }
 
 u8
-bfa_pport_get_rx_bbcredit(struct bfa_s *bfa)
+bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
 {
-	struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	return port->cfg.rx_bbcredit;
+	return fcport->cfg.rx_bbcredit;
 }
 
 void
-bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
+bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
 {
-	struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	port->cfg.tx_bbcredit = (u8) tx_bbcredit;
-	bfa_port_send_txcredit(port);
+	fcport->cfg.tx_bbcredit = (u8) tx_bbcredit;
+	bfa_fcport_send_txcredit(fcport);
 }
 
 /**
@@ -1185,302 +1546,192 @@
  */
 
 wwn_t
-bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
+bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 	if (node)
-		return pport->nwwn;
+		return fcport->nwwn;
 	else
-		return pport->pwwn;
+		return fcport->pwwn;
 }
 
 void
-bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
+bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
 	bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s));
 
-	attr->nwwn = pport->nwwn;
-	attr->pwwn = pport->pwwn;
+	attr->nwwn = fcport->nwwn;
+	attr->pwwn = fcport->pwwn;
 
-	bfa_os_memcpy(&attr->pport_cfg, &pport->cfg,
+	bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
 		      sizeof(struct bfa_pport_cfg_s));
 	/*
 	 * speed attributes
 	 */
-	attr->pport_cfg.speed = pport->cfg.speed;
-	attr->speed_supported = pport->speed_sup;
-	attr->speed = pport->speed;
+	attr->pport_cfg.speed = fcport->cfg.speed;
+	attr->speed_supported = fcport->speed_sup;
+	attr->speed = fcport->speed;
 	attr->cos_supported = FC_CLASS_3;
 
 	/*
 	 * topology attributes
 	 */
-	attr->pport_cfg.topology = pport->cfg.topology;
-	attr->topology = pport->topology;
+	attr->pport_cfg.topology = fcport->cfg.topology;
+	attr->topology = fcport->topology;
 
 	/*
 	 * beacon attributes
 	 */
-	attr->beacon = pport->beacon;
-	attr->link_e2e_beacon = pport->link_e2e_beacon;
-	attr->plog_enabled = bfa_plog_get_setting(pport->bfa->plog);
+	attr->beacon = fcport->beacon;
+	attr->link_e2e_beacon = fcport->link_e2e_beacon;
+	attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog);
 
 	attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
 	attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
-	attr->port_state = bfa_sm_to_state(hal_pport_sm_table, pport->sm);
-	if (bfa_ioc_is_disabled(&pport->bfa->ioc))
+	attr->port_state = bfa_sm_to_state(hal_pport_sm_table, fcport->sm);
+	if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
 		attr->port_state = BFA_PPORT_ST_IOCDIS;
-	else if (bfa_ioc_fw_mismatch(&pport->bfa->ioc))
+	else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
 		attr->port_state = BFA_PPORT_ST_FWMISMATCH;
 }
 
-static void
-bfa_port_stats_query(void *cbarg)
-{
-	struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
-	bfi_pport_get_stats_req_t *msg;
-
-	msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
-
-	if (!msg) {
-		port->stats_qfull = BFA_TRUE;
-		bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_query,
-			       port);
-		bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
-		return;
-	}
-	port->stats_qfull = BFA_FALSE;
-
-	bfa_os_memset(msg, 0, sizeof(bfi_pport_get_stats_req_t));
-	bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_GET_STATS_REQ,
-		    bfa_lpuid(port->bfa));
-	bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
-
-	return;
-}
-
-static void
-bfa_port_stats_clear(void *cbarg)
-{
-	struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
-	bfi_pport_clear_stats_req_t *msg;
-
-	msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
-
-	if (!msg) {
-		port->stats_qfull = BFA_TRUE;
-		bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_clear,
-			       port);
-		bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
-		return;
-	}
-	port->stats_qfull = BFA_FALSE;
-
-	bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_stats_req_t));
-	bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_STATS_REQ,
-		    bfa_lpuid(port->bfa));
-	bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
-	return;
-}
-
-static void
-bfa_port_qos_stats_clear(void *cbarg)
-{
-	struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
-	bfi_pport_clear_qos_stats_req_t *msg;
-
-	msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
-
-	if (!msg) {
-		port->stats_qfull = BFA_TRUE;
-		bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_qos_stats_clear,
-			       port);
-		bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
-		return;
-	}
-	port->stats_qfull = BFA_FALSE;
-
-	bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_qos_stats_req_t));
-	bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ,
-		    bfa_lpuid(port->bfa));
-	bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
-	return;
-}
-
-static void
-bfa_pport_stats_swap(union bfa_pport_stats_u *d, union bfa_pport_stats_u *s)
-{
-	u32       *dip = (u32 *) d;
-	u32       *sip = (u32 *) s;
-	int             i;
-
-	/*
-	 * Do 64 bit fields swap first
-	 */
-	for (i = 0;
-	     i <
-	     ((sizeof(union bfa_pport_stats_u) -
-	       sizeof(struct bfa_qos_stats_s)) / sizeof(u32)); i = i + 2) {
-#ifdef __BIGENDIAN
-		dip[i] = bfa_os_ntohl(sip[i]);
-		dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
-#else
-		dip[i] = bfa_os_ntohl(sip[i + 1]);
-		dip[i + 1] = bfa_os_ntohl(sip[i]);
-#endif
-	}
-
-	/*
-	 * Now swap the 32 bit fields
-	 */
-	for (; i < (sizeof(union bfa_pport_stats_u) / sizeof(u32)); ++i)
-		dip[i] = bfa_os_ntohl(sip[i]);
-}
-
-static void
-__bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_pport_s *port = cbarg;
-
-	if (complete) {
-		port->stats_cbfn(port->stats_cbarg, port->stats_status);
-	} else {
-		port->stats_busy = BFA_FALSE;
-		port->stats_status = BFA_STATUS_OK;
-	}
-}
-
-static void
-bfa_port_stats_clr_timeout(void *cbarg)
-{
-	struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
-
-	bfa_trc(port->bfa, port->stats_qfull);
-
-	if (port->stats_qfull) {
-		bfa_reqq_wcancel(&port->stats_reqq_wait);
-		port->stats_qfull = BFA_FALSE;
-	}
-
-	port->stats_status = BFA_STATUS_ETIMER;
-	bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats_clr, port);
-}
-
-static void
-__bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete)
-{
-	struct bfa_pport_s *port = cbarg;
-
-	if (complete) {
-		if (port->stats_status == BFA_STATUS_OK)
-			bfa_pport_stats_swap(port->stats_ret, port->stats);
-		port->stats_cbfn(port->stats_cbarg, port->stats_status);
-	} else {
-		port->stats_busy = BFA_FALSE;
-		port->stats_status = BFA_STATUS_OK;
-	}
-}
-
-static void
-bfa_port_stats_timeout(void *cbarg)
-{
-	struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
-
-	bfa_trc(port->bfa, port->stats_qfull);
-
-	if (port->stats_qfull) {
-		bfa_reqq_wcancel(&port->stats_reqq_wait);
-		port->stats_qfull = BFA_FALSE;
-	}
-
-	port->stats_status = BFA_STATUS_ETIMER;
-	bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats, port);
-}
-
-#define BFA_PORT_STATS_TOV	1000
+#define BFA_FCPORT_STATS_TOV	1000
 
 /**
- * Fetch port attributes.
+ * Fetch port attributes (FCQoS or FCoE).
  */
 bfa_status_t
-bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
+bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
 		    bfa_cb_pport_t cbfn, void *cbarg)
 {
-	struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	if (port->stats_busy) {
-		bfa_trc(bfa, port->stats_busy);
+	if (fcport->stats_busy) {
+		bfa_trc(bfa, fcport->stats_busy);
 		return BFA_STATUS_DEVBUSY;
 	}
 
-	port->stats_busy = BFA_TRUE;
-	port->stats_ret = stats;
-	port->stats_cbfn = cbfn;
-	port->stats_cbarg = cbarg;
+	fcport->stats_busy  = BFA_TRUE;
+	fcport->stats_ret   = stats;
+	fcport->stats_cbfn  = cbfn;
+	fcport->stats_cbarg = cbarg;
 
-	bfa_port_stats_query(port);
+	bfa_fcport_send_stats_get(fcport);
 
-	bfa_timer_start(bfa, &port->timer, bfa_port_stats_timeout, port,
-			BFA_PORT_STATS_TOV);
+	bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
+		fcport, BFA_FCPORT_STATS_TOV);
 	return BFA_STATUS_OK;
 }
 
+/**
+ * Reset port statistics (FCQoS or FCoE).
+ */
 bfa_status_t
-bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
+bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
 {
-	struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	if (port->stats_busy) {
-		bfa_trc(bfa, port->stats_busy);
+	if (fcport->stats_busy) {
+		bfa_trc(bfa, fcport->stats_busy);
 		return BFA_STATUS_DEVBUSY;
 	}
 
-	port->stats_busy = BFA_TRUE;
-	port->stats_cbfn = cbfn;
-	port->stats_cbarg = cbarg;
+	fcport->stats_busy = BFA_TRUE;
+	fcport->stats_cbfn = cbfn;
+	fcport->stats_cbarg = cbarg;
 
-	bfa_port_stats_clear(port);
+	bfa_fcport_send_stats_clear(fcport);
 
-	bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
-			BFA_PORT_STATS_TOV);
+	bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
+			fcport, BFA_FCPORT_STATS_TOV);
 	return BFA_STATUS_OK;
 }
 
+/**
+ * Fetch FCQoS port statistics
+ */
 bfa_status_t
-bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap)
+bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
+	bfa_cb_pport_t cbfn, void *cbarg)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	/* Meaningful only for FC mode */
+	bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
+
+	return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
+}
+
+/**
+ * Reset FCoE port statistics
+ */
+bfa_status_t
+bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
+{
+	/* Meaningful only for FC mode */
+	bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
+
+	return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
+}
+
+/**
+ * Fetch FCQoS port statistics
+ */
+bfa_status_t
+bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
+	bfa_cb_pport_t cbfn, void *cbarg)
+{
+	/* Meaningful only for FCoE mode */
+	bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
+
+	return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
+}
+
+/**
+ * Reset FCoE port statistics
+ */
+bfa_status_t
+bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
+{
+	/* Meaningful only for FCoE mode */
+	bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
+
+	return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
+}
+
+bfa_status_t
+bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap)
+{
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
 	bfa_trc(bfa, bitmap);
-	bfa_trc(bfa, pport->cfg.trunked);
-	bfa_trc(bfa, pport->cfg.trunk_ports);
+	bfa_trc(bfa, fcport->cfg.trunked);
+	bfa_trc(bfa, fcport->cfg.trunk_ports);
 
 	if (!bitmap || (bitmap & (bitmap - 1)))
 		return BFA_STATUS_EINVAL;
 
-	pport->cfg.trunked = BFA_TRUE;
-	pport->cfg.trunk_ports = bitmap;
+	fcport->cfg.trunked = BFA_TRUE;
+	fcport->cfg.trunk_ports = bitmap;
 
 	return BFA_STATUS_OK;
 }
 
 void
-bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
+bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	qos_attr->state = bfa_os_ntohl(pport->qos_attr.state);
-	qos_attr->total_bb_cr = bfa_os_ntohl(pport->qos_attr.total_bb_cr);
+	qos_attr->state = bfa_os_ntohl(fcport->qos_attr.state);
+	qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr);
 }
 
 void
-bfa_pport_qos_get_vc_attr(struct bfa_s *bfa,
+bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
 			  struct bfa_qos_vc_attr_s *qos_vc_attr)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
-	struct bfa_qos_vc_attr_s *bfa_vc_attr = &pport->qos_vc_attr;
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
 	u32        i = 0;
 
 	qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
@@ -1503,119 +1754,89 @@
 }
 
 /**
- * Fetch QoS Stats.
- */
-bfa_status_t
-bfa_pport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
-			bfa_cb_pport_t cbfn, void *cbarg)
-{
-	/*
-	 * QoS stats is embedded in port stats
-	 */
-	return bfa_pport_get_stats(bfa, stats, cbfn, cbarg);
-}
-
-bfa_status_t
-bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
-{
-	struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
-
-	if (port->stats_busy) {
-		bfa_trc(bfa, port->stats_busy);
-		return BFA_STATUS_DEVBUSY;
-	}
-
-	port->stats_busy = BFA_TRUE;
-	port->stats_cbfn = cbfn;
-	port->stats_cbarg = cbarg;
-
-	bfa_port_qos_stats_clear(port);
-
-	bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
-			BFA_PORT_STATS_TOV);
-	return BFA_STATUS_OK;
-}
-
-/**
  * Fetch port attributes.
  */
 bfa_status_t
-bfa_pport_trunk_disable(struct bfa_s *bfa)
+bfa_fcport_trunk_disable(struct bfa_s *bfa)
 {
 	return BFA_STATUS_OK;
 }
 
 bfa_boolean_t
-bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap)
+bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap)
 {
-	struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	*bitmap = port->cfg.trunk_ports;
-	return port->cfg.trunked;
+	*bitmap = fcport->cfg.trunk_ports;
+	return fcport->cfg.trunked;
 }
 
 bfa_boolean_t
-bfa_pport_is_disabled(struct bfa_s *bfa)
+bfa_fcport_is_disabled(struct bfa_s *bfa)
 {
-	struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	return bfa_sm_to_state(hal_pport_sm_table, port->sm) ==
+	return bfa_sm_to_state(hal_pport_sm_table, fcport->sm) ==
 		BFA_PPORT_ST_DISABLED;
 
 }
 
 bfa_boolean_t
-bfa_pport_is_ratelim(struct bfa_s *bfa)
+bfa_fcport_is_ratelim(struct bfa_s *bfa)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	return pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
+	return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
 
 }
 
 void
-bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
+bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
+	enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
 
 	bfa_trc(bfa, on_off);
-	bfa_trc(bfa, pport->cfg.qos_enabled);
+	bfa_trc(bfa, fcport->cfg.qos_enabled);
 
-	pport->cfg.qos_enabled = on_off;
+	bfa_trc(bfa, ioc_type);
+
+	if (ioc_type == BFA_IOC_TYPE_FC)
+		fcport->cfg.qos_enabled = on_off;
 }
 
 void
-bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
+bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
 	bfa_trc(bfa, on_off);
-	bfa_trc(bfa, pport->cfg.ratelimit);
+	bfa_trc(bfa, fcport->cfg.ratelimit);
 
-	pport->cfg.ratelimit = on_off;
-	if (pport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN)
-		pport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS;
+	fcport->cfg.ratelimit = on_off;
+	if (fcport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN)
+		fcport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS;
 }
 
 /**
  * Configure default minimum ratelim speed
  */
 bfa_status_t
-bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
+bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
 	bfa_trc(bfa, speed);
 
 	/*
 	 * Auto and speeds greater than the supported speed, are invalid
 	 */
-	if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > pport->speed_sup)) {
-		bfa_trc(bfa, pport->speed_sup);
+	if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > fcport->speed_sup)) {
+		bfa_trc(bfa, fcport->speed_sup);
 		return BFA_STATUS_UNSUPP_SPEED;
 	}
 
-	pport->cfg.trl_def_speed = speed;
+	fcport->cfg.trl_def_speed = speed;
 
 	return BFA_STATUS_OK;
 }
@@ -1624,45 +1845,45 @@
  * Get default minimum ratelim speed
  */
 enum bfa_pport_speed
-bfa_pport_get_ratelim_speed(struct bfa_s *bfa)
+bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
-	bfa_trc(bfa, pport->cfg.trl_def_speed);
-	return pport->cfg.trl_def_speed;
+	bfa_trc(bfa, fcport->cfg.trl_def_speed);
+	return fcport->cfg.trl_def_speed;
 
 }
 
 void
-bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status)
+bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
 	bfa_trc(bfa, status);
-	bfa_trc(bfa, pport->diag_busy);
+	bfa_trc(bfa, fcport->diag_busy);
 
-	pport->diag_busy = status;
+	fcport->diag_busy = status;
 }
 
 void
-bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
+bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
 		 bfa_boolean_t link_e2e_beacon)
 {
-	struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
+	struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
 
 	bfa_trc(bfa, beacon);
 	bfa_trc(bfa, link_e2e_beacon);
-	bfa_trc(bfa, pport->beacon);
-	bfa_trc(bfa, pport->link_e2e_beacon);
+	bfa_trc(bfa, fcport->beacon);
+	bfa_trc(bfa, fcport->link_e2e_beacon);
 
-	pport->beacon = beacon;
-	pport->link_e2e_beacon = link_e2e_beacon;
+	fcport->beacon = beacon;
+	fcport->link_e2e_beacon = link_e2e_beacon;
 }
 
 bfa_boolean_t
-bfa_pport_is_linkup(struct bfa_s *bfa)
+bfa_fcport_is_linkup(struct bfa_s *bfa)
 {
-	return bfa_sm_cmp_state(BFA_PORT_MOD(bfa), bfa_pport_sm_linkup);
+	return bfa_sm_cmp_state(BFA_FCPORT_MOD(bfa), bfa_fcport_sm_linkup);
 }
 
 
diff --git a/drivers/scsi/bfa/bfa_fcs.c b/drivers/scsi/bfa/bfa_fcs.c
index 7cb39a3..3516172 100644
--- a/drivers/scsi/bfa/bfa_fcs.c
+++ b/drivers/scsi/bfa/bfa_fcs.c
@@ -36,6 +36,7 @@
  * FCS sub-modules
  */
 struct bfa_fcs_mod_s {
+	void		(*attach) (struct bfa_fcs_s *fcs);
 	void            (*modinit) (struct bfa_fcs_s *fcs);
 	void            (*modexit) (struct bfa_fcs_s *fcs);
 };
@@ -43,12 +44,10 @@
 #define BFA_FCS_MODULE(_mod) { _mod ## _modinit, _mod ## _modexit }
 
 static struct bfa_fcs_mod_s fcs_modules[] = {
-	BFA_FCS_MODULE(bfa_fcs_pport),
-	BFA_FCS_MODULE(bfa_fcs_uf),
-	BFA_FCS_MODULE(bfa_fcs_fabric),
-	BFA_FCS_MODULE(bfa_fcs_vport),
-	BFA_FCS_MODULE(bfa_fcs_rport),
-	BFA_FCS_MODULE(bfa_fcs_fcpim),
+	{ bfa_fcs_pport_attach, NULL, NULL },
+	{ bfa_fcs_uf_attach, NULL, NULL },
+	{ bfa_fcs_fabric_attach, bfa_fcs_fabric_modinit,
+	 bfa_fcs_fabric_modexit },
 };
 
 /**
@@ -71,16 +70,10 @@
  */
 
 /**
- * 		FCS instance initialization.
- *
- * 	param[in]		fcs		FCS instance
- * 	param[in]		bfa		BFA instance
- * 	param[in]		bfad		BFA driver instance
- *
- * 	return None
+ * fcs attach -- called once to initialize data structures at driver attach time
  */
 void
-bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
+bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
 			bfa_boolean_t min_cfg)
 {
 	int             i;
@@ -95,7 +88,24 @@
 
 	for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
 		mod = &fcs_modules[i];
-		mod->modinit(fcs);
+		if (mod->attach)
+			mod->attach(fcs);
+	}
+}
+
+/**
+ * fcs initialization, called once after bfa initialization is complete
+ */
+void
+bfa_fcs_init(struct bfa_fcs_s *fcs)
+{
+	int             i;
+	struct bfa_fcs_mod_s  *mod;
+
+	for (i = 0; i < sizeof(fcs_modules) / sizeof(fcs_modules[0]); i++) {
+		mod = &fcs_modules[i];
+		if (mod->modinit)
+			mod->modinit(fcs);
 	}
 }
 
@@ -127,6 +137,23 @@
 }
 
 /**
+ *      @brief
+ *              FCS FDMI Driver Parameter Initialization
+ *
+ *      @param[in]              fcs             FCS instance
+ *      @param[in]              fdmi_enable     TRUE/FALSE
+ *
+ *      @return None
+ */
+void
+bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable)
+{
+
+	fcs->fdmi_enabled = fdmi_enable;
+
+}
+
+/**
  * 		FCS instance cleanup and exit.
  *
  * 	param[in]		fcs			FCS instance
@@ -143,10 +170,12 @@
 	nmods = sizeof(fcs_modules) / sizeof(fcs_modules[0]);
 
 	for (i = 0; i < nmods; i++) {
-		bfa_wc_up(&fcs->wc);
 
 		mod = &fcs_modules[i];
-		mod->modexit(fcs);
+		if (mod->modexit) {
+			bfa_wc_up(&fcs->wc);
+			mod->modexit(fcs);
+		}
 	}
 
 	bfa_wc_wait(&fcs->wc);
diff --git a/drivers/scsi/bfa/bfa_fcs_lport.c b/drivers/scsi/bfa/bfa_fcs_lport.c
index c7ab257..7c1251c 100644
--- a/drivers/scsi/bfa/bfa_fcs_lport.c
+++ b/drivers/scsi/bfa/bfa_fcs_lport.c
@@ -114,7 +114,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(port->fcs, event);
 	}
 }
 
@@ -136,7 +136,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(port->fcs, event);
 	}
 }
 
@@ -176,7 +176,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(port->fcs, event);
 	}
 }
 
@@ -214,7 +214,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(port->fcs, event);
 	}
 }
 
@@ -234,7 +234,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(port->fcs, event);
 	}
 }
 
@@ -263,30 +263,8 @@
 
 	bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX);
 
-	switch (event) {
-	case BFA_LPORT_AEN_ONLINE:
-		bfa_log(logmod, BFA_AEN_LPORT_ONLINE, lpwwn_ptr,
-			role_str[role / 2]);
-		break;
-	case BFA_LPORT_AEN_OFFLINE:
-		bfa_log(logmod, BFA_AEN_LPORT_OFFLINE, lpwwn_ptr,
-			role_str[role / 2]);
-		break;
-	case BFA_LPORT_AEN_NEW:
-		bfa_log(logmod, BFA_AEN_LPORT_NEW, lpwwn_ptr,
-			role_str[role / 2]);
-		break;
-	case BFA_LPORT_AEN_DELETE:
-		bfa_log(logmod, BFA_AEN_LPORT_DELETE, lpwwn_ptr,
-			role_str[role / 2]);
-		break;
-	case BFA_LPORT_AEN_DISCONNECT:
-		bfa_log(logmod, BFA_AEN_LPORT_DISCONNECT, lpwwn_ptr,
-			role_str[role / 2]);
-		break;
-	default:
-		break;
-	}
+	bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr,
+		role_str[role/2]);
 
 	aen_data.lport.vf_id = port->fabric->vf_id;
 	aen_data.lport.roles = role;
@@ -873,36 +851,46 @@
 }
 
 /**
- * Logical port initialization of base or virtual port.
- * Called by fabric for base port or by vport for virtual ports.
+ * Attach time initialization of logical ports.
  */
 void
-bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
-		   u16 vf_id, struct bfa_port_cfg_s *port_cfg,
-		   struct bfa_fcs_vport_s *vport)
+bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
+		uint16_t vf_id, struct bfa_fcs_vport_s *vport)
 {
 	lport->fcs = fcs;
 	lport->fabric = bfa_fcs_vf_lookup(fcs, vf_id);
-	bfa_os_assign(lport->port_cfg, *port_cfg);
 	lport->vport = vport;
 	lport->lp_tag = (vport) ? bfa_lps_get_tag(vport->lps) :
 			 bfa_lps_get_tag(lport->fabric->lps);
 
 	INIT_LIST_HEAD(&lport->rport_q);
 	lport->num_rports = 0;
+}
 
-	lport->bfad_port =
-		bfa_fcb_port_new(fcs->bfad, lport, lport->port_cfg.roles,
+/**
+ * Logical port initialization of base or virtual port.
+ * Called by fabric for base port or by vport for virtual ports.
+ */
+
+void
+bfa_fcs_lport_init(struct bfa_fcs_port_s *lport,
+		struct bfa_port_cfg_s *port_cfg)
+{
+	struct bfa_fcs_vport_s *vport = lport->vport;
+
+	bfa_os_assign(lport->port_cfg, *port_cfg);
+
+	lport->bfad_port = bfa_fcb_port_new(lport->fcs->bfad, lport,
+				lport->port_cfg.roles,
 				lport->fabric->vf_drv,
 				vport ? vport->vport_drv : NULL);
+
 	bfa_fcs_port_aen_post(lport, BFA_LPORT_AEN_NEW);
 
 	bfa_sm_set_state(lport, bfa_fcs_port_sm_uninit);
 	bfa_sm_send_event(lport, BFA_FCS_PORT_SM_CREATE);
 }
 
-
-
 /**
  *  fcs_lport_api
  */
@@ -921,13 +909,20 @@
 	if (port->fabric) {
 		port_attr->port_type = bfa_fcs_fabric_port_type(port->fabric);
 		port_attr->loopback = bfa_fcs_fabric_is_loopback(port->fabric);
+		port_attr->authfail =
+				bfa_fcs_fabric_is_auth_failed(port->fabric);
 		port_attr->fabric_name = bfa_fcs_port_get_fabric_name(port);
 		memcpy(port_attr->fabric_ip_addr,
 		       bfa_fcs_port_get_fabric_ipaddr(port),
 		       BFA_FCS_FABRIC_IPADDR_SZ);
 
-		if (port->vport != NULL)
+		if (port->vport != NULL) {
 			port_attr->port_type = BFA_PPORT_TYPE_VPORT;
+			port_attr->fpma_mac =
+				bfa_lps_get_lp_mac(port->vport->lps);
+		} else
+			port_attr->fpma_mac =
+				bfa_lps_get_lp_mac(port->fabric->lps);
 
 	} else {
 		port_attr->port_type = BFA_PPORT_TYPE_UNKNOWN;
diff --git a/drivers/scsi/bfa/bfa_fcs_port.c b/drivers/scsi/bfa/bfa_fcs_port.c
index 9c4b24e..3c27788 100644
--- a/drivers/scsi/bfa/bfa_fcs_port.c
+++ b/drivers/scsi/bfa/bfa_fcs_port.c
@@ -55,14 +55,7 @@
 }
 
 void
-bfa_fcs_pport_modinit(struct bfa_fcs_s *fcs)
+bfa_fcs_pport_attach(struct bfa_fcs_s *fcs)
 {
-	bfa_pport_event_register(fcs->bfa, bfa_fcs_pport_event_handler,
-				     fcs);
-}
-
-void
-bfa_fcs_pport_modexit(struct bfa_fcs_s *fcs)
-{
-	bfa_fcs_modexit_comp(fcs);
+	bfa_fcport_event_register(fcs->bfa, bfa_fcs_pport_event_handler, fcs);
 }
diff --git a/drivers/scsi/bfa/bfa_fcs_uf.c b/drivers/scsi/bfa/bfa_fcs_uf.c
index ad01db6..3d57d48 100644
--- a/drivers/scsi/bfa/bfa_fcs_uf.c
+++ b/drivers/scsi/bfa/bfa_fcs_uf.c
@@ -93,13 +93,7 @@
 }
 
 void
-bfa_fcs_uf_modinit(struct bfa_fcs_s *fcs)
+bfa_fcs_uf_attach(struct bfa_fcs_s *fcs)
 {
 	bfa_uf_recv_register(fcs->bfa, bfa_fcs_uf_recv, fcs);
 }
-
-void
-bfa_fcs_uf_modexit(struct bfa_fcs_s *fcs)
-{
-	bfa_fcs_modexit_comp(fcs);
-}
diff --git a/drivers/scsi/bfa/bfa_hw_cb.c b/drivers/scsi/bfa/bfa_hw_cb.c
index ede1438..871a4e2 100644
--- a/drivers/scsi/bfa/bfa_hw_cb.c
+++ b/drivers/scsi/bfa/bfa_hw_cb.c
@@ -53,6 +53,18 @@
 }
 
 void
+bfa_hwcb_reqq_ack(struct bfa_s *bfa, int reqq)
+{
+}
+
+static void
+bfa_hwcb_reqq_ack_msix(struct bfa_s *bfa, int reqq)
+{
+	bfa_reg_write(bfa->iocfc.bfa_regs.intr_status,
+		__HFN_INT_CPE_Q0 << CPE_Q_NUM(bfa_ioc_pcifn(&bfa->ioc), reqq));
+}
+
+void
 bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq)
 {
 }
@@ -136,6 +148,7 @@
 void
 bfa_hwcb_isr_mode_set(struct bfa_s *bfa, bfa_boolean_t msix)
 {
+	bfa->iocfc.hwif.hw_reqq_ack = bfa_hwcb_reqq_ack_msix;
 	bfa->iocfc.hwif.hw_rspq_ack = bfa_hwcb_rspq_ack_msix;
 }
 
diff --git a/drivers/scsi/bfa/bfa_hw_ct.c b/drivers/scsi/bfa/bfa_hw_ct.c
index 51ae574..76ceb9a 100644
--- a/drivers/scsi/bfa/bfa_hw_ct.c
+++ b/drivers/scsi/bfa/bfa_hw_ct.c
@@ -85,6 +85,15 @@
 }
 
 void
+bfa_hwct_reqq_ack(struct bfa_s *bfa, int reqq)
+{
+	u32 r32;
+
+	r32 = bfa_reg_read(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq]);
+	bfa_reg_write(bfa->iocfc.bfa_regs.cpe_q_ctrl[reqq], r32);
+}
+
+void
 bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq)
 {
 	u32	r32;
diff --git a/drivers/scsi/bfa/bfa_intr.c b/drivers/scsi/bfa/bfa_intr.c
index b36540e..0eba3f9 100644
--- a/drivers/scsi/bfa/bfa_intr.c
+++ b/drivers/scsi/bfa/bfa_intr.c
@@ -15,7 +15,7 @@
  * General Public License for more details.
  */
 #include <bfa.h>
-#include <bfi/bfi_cbreg.h>
+#include <bfi/bfi_ctreg.h>
 #include <bfa_port_priv.h>
 #include <bfa_intr_priv.h>
 #include <cs/bfa_debug.h>
@@ -34,6 +34,26 @@
 	bfa_ioc_mbox_isr(&bfa->ioc);
 }
 
+static void
+bfa_reqq_resume(struct bfa_s *bfa, int qid)
+{
+	struct list_head *waitq, *qe, *qen;
+	struct bfa_reqq_wait_s *wqe;
+
+	waitq = bfa_reqq(bfa, qid);
+	list_for_each_safe(qe, qen, waitq) {
+		/**
+		 * Callback only as long as there is room in request queue
+		 */
+		if (bfa_reqq_full(bfa, qid))
+			break;
+
+		list_del(qe);
+		wqe = (struct bfa_reqq_wait_s *) qe;
+		wqe->qresume(wqe->cbarg);
+	}
+}
+
 void
 bfa_msix_all(struct bfa_s *bfa, int vec)
 {
@@ -96,7 +116,8 @@
 
 	bfa_msix_install(bfa);
 	intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
-		       __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS);
+		       __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
+		       __HFN_INT_LL_HALT);
 
 	if (pci_func == 0)
 		intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
@@ -127,23 +148,18 @@
 void
 bfa_msix_reqq(struct bfa_s *bfa, int qid)
 {
-	struct list_head 		*waitq, *qe, *qen;
-	struct bfa_reqq_wait_s	*wqe;
+	struct list_head *waitq;
 
 	qid &= (BFI_IOC_MAX_CQS - 1);
 
-	waitq = bfa_reqq(bfa, qid);
-	list_for_each_safe(qe, qen, waitq) {
-		/**
-		 * Callback only as long as there is room in request queue
-		 */
-		if (bfa_reqq_full(bfa, qid))
-			break;
+	bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
 
-		list_del(qe);
-		wqe = (struct bfa_reqq_wait_s *) qe;
-		wqe->qresume(wqe->cbarg);
-	}
+	/**
+	 * Resume any pending requests in the corresponding reqq.
+	 */
+	waitq = bfa_reqq(bfa, qid);
+	if (!list_empty(waitq))
+		bfa_reqq_resume(bfa, qid);
 }
 
 void
@@ -157,26 +173,27 @@
 }
 
 void
-bfa_msix_rspq(struct bfa_s *bfa, int rsp_qid)
+bfa_msix_rspq(struct bfa_s *bfa, int qid)
 {
-	struct bfi_msg_s      *m;
-	u32        pi, ci;
+	struct bfi_msg_s *m;
+	u32 pi, ci;
+	struct list_head *waitq;
 
-	bfa_trc_fp(bfa, rsp_qid);
+	bfa_trc_fp(bfa, qid);
 
-	rsp_qid &= (BFI_IOC_MAX_CQS - 1);
+	qid &= (BFI_IOC_MAX_CQS - 1);
 
-	bfa->iocfc.hwif.hw_rspq_ack(bfa, rsp_qid);
+	bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
 
-	ci = bfa_rspq_ci(bfa, rsp_qid);
-	pi = bfa_rspq_pi(bfa, rsp_qid);
+	ci = bfa_rspq_ci(bfa, qid);
+	pi = bfa_rspq_pi(bfa, qid);
 
 	bfa_trc_fp(bfa, ci);
 	bfa_trc_fp(bfa, pi);
 
 	if (bfa->rme_process) {
 		while (ci != pi) {
-			m = bfa_rspq_elem(bfa, rsp_qid, ci);
+			m = bfa_rspq_elem(bfa, qid, ci);
 			bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
 
 			bfa_isrs[m->mhdr.msg_class] (bfa, m);
@@ -188,25 +205,59 @@
 	/**
 	 * update CI
 	 */
-	bfa_rspq_ci(bfa, rsp_qid) = pi;
-	bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[rsp_qid], pi);
+	bfa_rspq_ci(bfa, qid) = pi;
+	bfa_reg_write(bfa->iocfc.bfa_regs.rme_q_ci[qid], pi);
 	bfa_os_mmiowb();
+
+	/**
+	 * Resume any pending requests in the corresponding reqq.
+	 */
+	waitq = bfa_reqq(bfa, qid);
+	if (!list_empty(waitq))
+		bfa_reqq_resume(bfa, qid);
 }
 
 void
 bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
 {
-	u32 intr;
+	u32 intr, curr_value;
 
 	intr = bfa_reg_read(bfa->iocfc.bfa_regs.intr_status);
 
 	if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
 		bfa_msix_lpu(bfa);
 
-	if (intr & (__HFN_INT_ERR_EMC |
-		    __HFN_INT_ERR_LPU0 | __HFN_INT_ERR_LPU1 |
-		    __HFN_INT_ERR_PSS))
+	intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
+		__HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
+
+	if (intr) {
+		if (intr & __HFN_INT_LL_HALT) {
+			/**
+			 * If LL_HALT bit is set then FW Init Halt LL Port
+			 * Register needs to be cleared as well so Interrupt
+			 * Status Register will be cleared.
+			 */
+			curr_value = bfa_reg_read(bfa->ioc.ioc_regs.ll_halt);
+			curr_value &= ~__FW_INIT_HALT_P;
+			bfa_reg_write(bfa->ioc.ioc_regs.ll_halt, curr_value);
+		}
+
+		if (intr & __HFN_INT_ERR_PSS) {
+			/**
+			 * ERR_PSS bit needs to be cleared as well in case
+			 * interrups are shared so driver's interrupt handler is
+			 * still called eventhough it is already masked out.
+			 */
+			curr_value = bfa_reg_read(
+				bfa->ioc.ioc_regs.pss_err_status_reg);
+			curr_value &= __PSS_ERR_STATUS_SET;
+			bfa_reg_write(bfa->ioc.ioc_regs.pss_err_status_reg,
+				curr_value);
+		}
+
+		bfa_reg_write(bfa->iocfc.bfa_regs.intr_status, intr);
 		bfa_msix_errint(bfa, intr);
+	}
 }
 
 void
diff --git a/drivers/scsi/bfa/bfa_ioc.c b/drivers/scsi/bfa/bfa_ioc.c
index 397d7e9..e038bc9 100644
--- a/drivers/scsi/bfa/bfa_ioc.c
+++ b/drivers/scsi/bfa/bfa_ioc.c
@@ -18,7 +18,7 @@
 #include <bfa.h>
 #include <bfa_ioc.h>
 #include <bfa_fwimg_priv.h>
-#include <bfa_trcmod_priv.h>
+#include <cna/bfa_cna_trcmod.h>
 #include <cs/bfa_debug.h>
 #include <bfi/bfi_ioc.h>
 #include <bfi/bfi_ctreg.h>
@@ -27,18 +27,17 @@
 #include <log/bfa_log_hal.h>
 #include <defs/bfa_defs_pci.h>
 
-BFA_TRC_FILE(HAL, IOC);
+BFA_TRC_FILE(CNA, IOC);
 
 /**
  * IOC local definitions
  */
 #define BFA_IOC_TOV		2000	/* msecs */
-#define BFA_IOC_HB_TOV		1000	/* msecs */
-#define BFA_IOC_HB_FAIL_MAX	4
-#define BFA_IOC_HWINIT_MAX	2
+#define BFA_IOC_HWSEM_TOV       500     /* msecs */
+#define BFA_IOC_HB_TOV          500     /* msecs */
+#define BFA_IOC_HWINIT_MAX      2
 #define BFA_IOC_FWIMG_MINSZ     (16 * 1024)
-#define BFA_IOC_TOV_RECOVER	(BFA_IOC_HB_FAIL_MAX * BFA_IOC_HB_TOV \
-				+ BFA_IOC_TOV)
+#define BFA_IOC_TOV_RECOVER      BFA_IOC_HB_TOV
 
 #define bfa_ioc_timer_start(__ioc)					\
 	bfa_timer_begin((__ioc)->timer_mod, &(__ioc)->ioc_timer,	\
@@ -51,12 +50,25 @@
 	 (sizeof(struct bfa_trc_mod_s) -			\
 	  BFA_TRC_MAX * sizeof(struct bfa_trc_s)))
 #define BFA_DBG_FWTRC_OFF(_fn)	(BFI_IOC_TRC_OFF + BFA_DBG_FWTRC_LEN * (_fn))
-#define bfa_ioc_stats(_ioc, _stats)	((_ioc)->stats._stats++)
 
-#define BFA_FLASH_CHUNK_NO(off)         (off / BFI_FLASH_CHUNK_SZ_WORDS)
-#define BFA_FLASH_OFFSET_IN_CHUNK(off)  (off % BFI_FLASH_CHUNK_SZ_WORDS)
-#define BFA_FLASH_CHUNK_ADDR(chunkno)   (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
-bfa_boolean_t   bfa_auto_recover = BFA_FALSE;
+/**
+ * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
+ */
+
+#define bfa_ioc_firmware_lock(__ioc)                    \
+			((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
+#define bfa_ioc_firmware_unlock(__ioc)                  \
+			((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
+#define bfa_ioc_fwimg_get_chunk(__ioc, __off)           \
+			((__ioc)->ioc_hwif->ioc_fwimg_get_chunk(__ioc, __off))
+#define bfa_ioc_fwimg_get_size(__ioc)                   \
+			((__ioc)->ioc_hwif->ioc_fwimg_get_size(__ioc))
+#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
+#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
+#define bfa_ioc_notify_hbfail(__ioc)                    \
+			((__ioc)->ioc_hwif->ioc_notify_hbfail(__ioc))
+
+bfa_boolean_t   bfa_auto_recover = BFA_TRUE;
 
 /*
  * forward declarations
@@ -64,7 +76,6 @@
 static void     bfa_ioc_aen_post(struct bfa_ioc_s *bfa,
 				 enum bfa_ioc_aen_event event);
 static void     bfa_ioc_hw_sem_get(struct bfa_ioc_s *ioc);
-static void     bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
 static void     bfa_ioc_hw_sem_get_cancel(struct bfa_ioc_s *ioc);
 static void     bfa_ioc_hwinit(struct bfa_ioc_s *ioc, bfa_boolean_t force);
 static void     bfa_ioc_timeout(void *ioc);
@@ -77,8 +88,6 @@
 static void     bfa_ioc_mbox_poll(struct bfa_ioc_s *ioc);
 static void     bfa_ioc_mbox_hbfail(struct bfa_ioc_s *ioc);
 static void     bfa_ioc_recover(struct bfa_ioc_s *ioc);
-static bfa_boolean_t bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc);
-static void     bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc);
 static void     bfa_ioc_disable_comp(struct bfa_ioc_s *ioc);
 static void     bfa_ioc_lpu_stop(struct bfa_ioc_s *ioc);
 
@@ -508,14 +517,19 @@
 	bfa_trc(ioc, event);
 
 	switch (event) {
-	case IOC_E_HWERROR:
 	case IOC_E_FWRSP_DISABLE:
 		bfa_ioc_timer_stop(ioc);
+		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
+		break;
+
+	case IOC_E_HWERROR:
+		bfa_ioc_timer_stop(ioc);
 		/*
 		 * !!! fall through !!!
 		 */
 
 	case IOC_E_TIMEOUT:
+		bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
 		bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
 		break;
 
@@ -608,15 +622,12 @@
 	 * Mark IOC as failed in hardware and stop firmware.
 	 */
 	bfa_ioc_lpu_stop(ioc);
-	bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_HBFAIL);
+	bfa_reg_write(ioc->ioc_regs.ioc_fwstate, BFI_IOC_FAIL);
 
-	if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
-		bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
-		/*
-		 * Wait for halt to take effect
-		 */
-		bfa_reg_read(ioc->ioc_regs.ll_halt);
-	}
+	/**
+	 * Notify other functions on HB failure.
+	 */
+	bfa_ioc_notify_hbfail(ioc);
 
 	/**
 	 * Notify driver and common modules registered for notification.
@@ -672,6 +683,12 @@
 		 */
 		break;
 
+	case IOC_E_HWERROR:
+		/*
+		 * HB failure notification, ignore.
+		 */
+		break;
+
 	default:
 		bfa_sm_fault(ioc, event);
 	}
@@ -700,7 +717,7 @@
 	}
 }
 
-static void
+void
 bfa_ioc_sem_timeout(void *ioc_arg)
 {
 	struct bfa_ioc_s *ioc = (struct bfa_ioc_s *)ioc_arg;
@@ -708,26 +725,32 @@
 	bfa_ioc_hw_sem_get(ioc);
 }
 
-static void
-bfa_ioc_usage_sem_get(struct bfa_ioc_s *ioc)
+bfa_boolean_t
+bfa_ioc_sem_get(bfa_os_addr_t sem_reg)
 {
-	u32        r32;
-	int             cnt = 0;
-#define BFA_SEM_SPINCNT	1000
+	u32 r32;
+	int cnt = 0;
+#define BFA_SEM_SPINCNT 3000
 
-	do {
-		r32 = bfa_reg_read(ioc->ioc_regs.ioc_usage_sem_reg);
+	r32 = bfa_reg_read(sem_reg);
+
+	while (r32 && (cnt < BFA_SEM_SPINCNT)) {
 		cnt++;
-		if (cnt > BFA_SEM_SPINCNT)
-			break;
-	} while (r32 != 0);
+		bfa_os_udelay(2);
+		r32 = bfa_reg_read(sem_reg);
+	}
+
+	if (r32 == 0)
+		return BFA_TRUE;
+
 	bfa_assert(cnt < BFA_SEM_SPINCNT);
+	return BFA_FALSE;
 }
 
-static void
-bfa_ioc_usage_sem_release(struct bfa_ioc_s *ioc)
+void
+bfa_ioc_sem_release(bfa_os_addr_t sem_reg)
 {
-	bfa_reg_write(ioc->ioc_regs.ioc_usage_sem_reg, 1);
+	bfa_reg_write(sem_reg, 1);
 }
 
 static void
@@ -737,7 +760,7 @@
 
 	/**
 	 * First read to the semaphore register will return 0, subsequent reads
-	 * will return 1. Semaphore is released by writing 0 to the register
+	 * will return 1. Semaphore is released by writing 1 to the register
 	 */
 	r32 = bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
 	if (r32 == 0) {
@@ -746,10 +769,10 @@
 	}
 
 	bfa_timer_begin(ioc->timer_mod, &ioc->sem_timer, bfa_ioc_sem_timeout,
-			ioc, BFA_IOC_TOV);
+			ioc, BFA_IOC_HWSEM_TOV);
 }
 
-static void
+void
 bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc)
 {
 	bfa_reg_write(ioc->ioc_regs.ioc_sem_reg, 1);
@@ -828,7 +851,7 @@
 /**
  * Get driver and firmware versions.
  */
-static void
+void
 bfa_ioc_fwver_get(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
 {
 	u32        pgnum, pgoff;
@@ -847,24 +870,10 @@
 	}
 }
 
-static u32 *
-bfa_ioc_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
-{
-	if (ioc->ctdev)
-		return bfi_image_ct_get_chunk(off);
-	return bfi_image_cb_get_chunk(off);
-}
-
-static          u32
-bfa_ioc_fwimg_get_size(struct bfa_ioc_s *ioc)
-{
-return (ioc->ctdev) ? bfi_image_ct_size : bfi_image_cb_size;
-}
-
 /**
  * Returns TRUE if same.
  */
-static          bfa_boolean_t
+bfa_boolean_t
 bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc, struct bfi_ioc_image_hdr_s *fwhdr)
 {
 	struct bfi_ioc_image_hdr_s *drv_fwhdr;
@@ -921,95 +930,6 @@
 }
 
 /**
- * Return true if firmware of current driver matches the running firmware.
- */
-static          bfa_boolean_t
-bfa_ioc_firmware_lock(struct bfa_ioc_s *ioc)
-{
-	enum bfi_ioc_state ioc_fwstate;
-	u32        usecnt;
-	struct bfi_ioc_image_hdr_s fwhdr;
-
-	/**
-	 * Firmware match check is relevant only for CNA.
-	 */
-	if (!ioc->cna)
-		return BFA_TRUE;
-
-	/**
-	 * If bios boot (flash based) -- do not increment usage count
-	 */
-	if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
-		return BFA_TRUE;
-
-	bfa_ioc_usage_sem_get(ioc);
-	usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
-
-	/**
-	 * If usage count is 0, always return TRUE.
-	 */
-	if (usecnt == 0) {
-		bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
-		bfa_ioc_usage_sem_release(ioc);
-		bfa_trc(ioc, usecnt);
-		return BFA_TRUE;
-	}
-
-	ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
-	bfa_trc(ioc, ioc_fwstate);
-
-	/**
-	 * Use count cannot be non-zero and chip in uninitialized state.
-	 */
-	bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
-
-	/**
-	 * Check if another driver with a different firmware is active
-	 */
-	bfa_ioc_fwver_get(ioc, &fwhdr);
-	if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
-		bfa_ioc_usage_sem_release(ioc);
-		bfa_trc(ioc, usecnt);
-		return BFA_FALSE;
-	}
-
-	/**
-	 * Same firmware version. Increment the reference count.
-	 */
-	usecnt++;
-	bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
-	bfa_ioc_usage_sem_release(ioc);
-	bfa_trc(ioc, usecnt);
-	return BFA_TRUE;
-}
-
-static void
-bfa_ioc_firmware_unlock(struct bfa_ioc_s *ioc)
-{
-	u32        usecnt;
-
-	/**
-	 * Firmware lock is relevant only for CNA.
-	 * If bios boot (flash based) -- do not decrement usage count
-	 */
-	if (!ioc->cna || (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ))
-		return;
-
-	/**
-	 * decrement usage count
-	 */
-	bfa_ioc_usage_sem_get(ioc);
-	usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
-	bfa_assert(usecnt > 0);
-
-	usecnt--;
-	bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
-	bfa_trc(ioc, usecnt);
-
-	bfa_ioc_usage_sem_release(ioc);
-}
-
-/**
  * Conditionally flush any pending message from firmware at start.
  */
 static void
@@ -1152,33 +1072,27 @@
 static void
 bfa_ioc_hb_check(void *cbarg)
 {
-	struct bfa_ioc_s *ioc = cbarg;
-	u32        hb_count;
+	struct bfa_ioc_s  *ioc = cbarg;
+	u32     hb_count;
 
 	hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
 	if (ioc->hb_count == hb_count) {
-		ioc->hb_fail++;
-	} else {
-		ioc->hb_count = hb_count;
-		ioc->hb_fail = 0;
-	}
-
-	if (ioc->hb_fail >= BFA_IOC_HB_FAIL_MAX) {
-		bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE, hb_count);
-		ioc->hb_fail = 0;
+		bfa_log(ioc->logm, BFA_LOG_HAL_HEARTBEAT_FAILURE,
+			hb_count);
 		bfa_ioc_recover(ioc);
 		return;
+	} else {
+		ioc->hb_count = hb_count;
 	}
 
 	bfa_ioc_mbox_poll(ioc);
-	bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
-			BFA_IOC_HB_TOV);
+	bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check,
+			ioc, BFA_IOC_HB_TOV);
 }
 
 static void
 bfa_ioc_hb_monitor(struct bfa_ioc_s *ioc)
 {
-	ioc->hb_fail = 0;
 	ioc->hb_count = bfa_reg_read(ioc->ioc_regs.heartbeat);
 	bfa_timer_begin(ioc->timer_mod, &ioc->ioc_timer, bfa_ioc_hb_check, ioc,
 			BFA_IOC_HB_TOV);
@@ -1191,112 +1105,6 @@
 }
 
 /**
- * Host to LPU mailbox message addresses
- */
-static struct {
-	u32        hfn_mbox, lpu_mbox, hfn_pgn;
-} iocreg_fnreg[] = {
-	{
-	HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0}, {
-	HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1}, {
-	HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2}, {
-	HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3}
-};
-
-/**
- * Host <-> LPU mailbox command/status registers - port 0
- */
-static struct {
-	u32        hfn, lpu;
-} iocreg_mbcmd_p0[] = {
-	{
-	HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT}, {
-	HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT}, {
-	HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT}, {
-	HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT}
-};
-
-/**
- * Host <-> LPU mailbox command/status registers - port 1
- */
-static struct {
-	u32        hfn, lpu;
-} iocreg_mbcmd_p1[] = {
-	{
-	HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT}, {
-	HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT}, {
-	HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT}, {
-	HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT}
-};
-
-/**
- * Shared IRQ handling in INTX mode
- */
-static struct {
-	u32        isr, msk;
-} iocreg_shirq_next[] = {
-	{
-	HOSTFN1_INT_STATUS, HOSTFN1_INT_MSK}, {
-	HOSTFN2_INT_STATUS, HOSTFN2_INT_MSK}, {
-	HOSTFN3_INT_STATUS, HOSTFN3_INT_MSK}, {
-HOSTFN0_INT_STATUS, HOSTFN0_INT_MSK},};
-
-static void
-bfa_ioc_reg_init(struct bfa_ioc_s *ioc)
-{
-	bfa_os_addr_t   rb;
-	int             pcifn = bfa_ioc_pcifn(ioc);
-
-	rb = bfa_ioc_bar0(ioc);
-
-	ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
-	ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
-	ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
-
-	if (ioc->port_id == 0) {
-		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
-		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
-		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
-		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
-		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
-	} else {
-		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
-		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
-		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
-		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
-		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
-	}
-
-	/**
-	 * Shared IRQ handling in INTX mode
-	 */
-	ioc->ioc_regs.shirq_isr_next = rb + iocreg_shirq_next[pcifn].isr;
-	ioc->ioc_regs.shirq_msk_next = rb + iocreg_shirq_next[pcifn].msk;
-
-	/*
-	 * PSS control registers
-	 */
-	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
-	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
-	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
-
-	/*
-	 * IOC semaphore registers and serialization
-	 */
-	ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
-	ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
-	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
-
-	/**
-	 * sram memory access
-	 */
-	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
-	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB;
-	if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT)
-		ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
-}
-
-/**
  *      Initiate a full firmware download.
  */
 static void
@@ -1321,9 +1129,6 @@
 	if (bfa_ioc_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
 		boot_type = BFI_BOOT_TYPE_FLASH;
 	fwimg = bfa_ioc_fwimg_get_chunk(ioc, chunkno);
-	fwimg[BFI_BOOT_TYPE_OFF / sizeof(u32)] = bfa_os_swap32(boot_type);
-	fwimg[BFI_BOOT_PARAM_OFF / sizeof(u32)] =
-		bfa_os_swap32(boot_param);
 
 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
 	pgoff = bfa_ioc_smem_pgoff(ioc, loff);
@@ -1332,17 +1137,17 @@
 
 	for (i = 0; i < bfa_ioc_fwimg_get_size(ioc); i++) {
 
-		if (BFA_FLASH_CHUNK_NO(i) != chunkno) {
-			chunkno = BFA_FLASH_CHUNK_NO(i);
+		if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
+			chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
 			fwimg = bfa_ioc_fwimg_get_chunk(ioc,
-					BFA_FLASH_CHUNK_ADDR(chunkno));
+					BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
 		}
 
 		/**
 		 * write smem
 		 */
 		bfa_mem_write(ioc->ioc_regs.smem_page_start, loff,
-			      fwimg[BFA_FLASH_OFFSET_IN_CHUNK(i)]);
+			      fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)]);
 
 		loff += sizeof(u32);
 
@@ -1358,6 +1163,14 @@
 
 	bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
 		      bfa_ioc_smem_pgnum(ioc, 0));
+
+	/*
+	 * Set boot type and boot param at the end.
+	 */
+	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_TYPE_OFF,
+			bfa_os_swap32(boot_type));
+	bfa_mem_write(ioc->ioc_regs.smem_page_start, BFI_BOOT_PARAM_OFF,
+			bfa_os_swap32(boot_param));
 }
 
 static void
@@ -1440,168 +1253,10 @@
 }
 
 /**
- * Initialize IOC to port mapping.
- */
-
-#define FNC_PERS_FN_SHIFT(__fn)	((__fn) * 8)
-static void
-bfa_ioc_map_port(struct bfa_ioc_s *ioc)
-{
-	bfa_os_addr_t   rb = ioc->pcidev.pci_bar_kva;
-	u32        r32;
-
-	/**
-	 * For crossbow, port id is same as pci function.
-	 */
-	if (ioc->pcidev.device_id != BFA_PCI_DEVICE_ID_CT) {
-		ioc->port_id = bfa_ioc_pcifn(ioc);
-		return;
-	}
-
-	/**
-	 * For catapult, base port id on personality register and IOC type
-	 */
-	r32 = bfa_reg_read(rb + FNC_PERS_REG);
-	r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
-	ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
-
-	bfa_trc(ioc, bfa_ioc_pcifn(ioc));
-	bfa_trc(ioc, ioc->port_id);
-}
-
-
-
-/**
  *  bfa_ioc_public
  */
 
 /**
-* Set interrupt mode for a function: INTX or MSIX
- */
-void
-bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
-{
-	bfa_os_addr_t   rb = ioc->pcidev.pci_bar_kva;
-	u32        r32, mode;
-
-	r32 = bfa_reg_read(rb + FNC_PERS_REG);
-	bfa_trc(ioc, r32);
-
-	mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
-		__F0_INTX_STATUS;
-
-	/**
-	 * If already in desired mode, do not change anything
-	 */
-	if (!msix && mode)
-		return;
-
-	if (msix)
-		mode = __F0_INTX_STATUS_MSIX;
-	else
-		mode = __F0_INTX_STATUS_INTA;
-
-	r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
-	r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
-	bfa_trc(ioc, r32);
-
-	bfa_reg_write(rb + FNC_PERS_REG, r32);
-}
-
-bfa_status_t
-bfa_ioc_pll_init(struct bfa_ioc_s *ioc)
-{
-	bfa_os_addr_t   rb = ioc->pcidev.pci_bar_kva;
-	u32        pll_sclk, pll_fclk, r32;
-
-	if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
-		pll_sclk =
-			__APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN |
-			__APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(0U) |
-			__APP_PLL_312_JITLMT0_1(3U) |
-			__APP_PLL_312_CNTLMT0_1(1U);
-		pll_fclk =
-			__APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN |
-			__APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(0U) |
-			__APP_PLL_425_JITLMT0_1(3U) |
-			__APP_PLL_425_CNTLMT0_1(1U);
-
-		/**
-		 * 	For catapult, choose operational mode FC/FCoE
-		 */
-		if (ioc->fcmode) {
-			bfa_reg_write((rb + OP_MODE), 0);
-			bfa_reg_write((rb + ETH_MAC_SER_REG),
-				      __APP_EMS_CMLCKSEL | __APP_EMS_REFCKBUFEN2
-				      | __APP_EMS_CHANNEL_SEL);
-		} else {
-			ioc->pllinit = BFA_TRUE;
-			bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
-			bfa_reg_write((rb + ETH_MAC_SER_REG),
-				      __APP_EMS_REFCKBUFEN1);
-		}
-	} else {
-		pll_sclk =
-			__APP_PLL_312_ENABLE | __APP_PLL_312_LRESETN |
-			__APP_PLL_312_P0_1(3U) | __APP_PLL_312_JITLMT0_1(3U) |
-			__APP_PLL_312_CNTLMT0_1(3U);
-		pll_fclk =
-			__APP_PLL_425_ENABLE | __APP_PLL_425_LRESETN |
-			__APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
-			__APP_PLL_425_JITLMT0_1(3U) |
-			__APP_PLL_425_CNTLMT0_1(3U);
-	}
-
-	bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
-	bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
-
-	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
-
-	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
-		      __APP_PLL_312_LOGIC_SOFT_RESET);
-	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
-		      __APP_PLL_312_BYPASS | __APP_PLL_312_LOGIC_SOFT_RESET);
-	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
-		      __APP_PLL_425_LOGIC_SOFT_RESET);
-	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
-		      __APP_PLL_425_BYPASS | __APP_PLL_425_LOGIC_SOFT_RESET);
-	bfa_os_udelay(2);
-	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
-		      __APP_PLL_312_LOGIC_SOFT_RESET);
-	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
-		      __APP_PLL_425_LOGIC_SOFT_RESET);
-
-	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
-		      pll_sclk | __APP_PLL_312_LOGIC_SOFT_RESET);
-	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
-		      pll_fclk | __APP_PLL_425_LOGIC_SOFT_RESET);
-
-	/**
-	 * Wait for PLLs to lock.
-	 */
-	bfa_os_udelay(2000);
-	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
-	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
-
-	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk);
-	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk);
-
-	if (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT) {
-		bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
-		bfa_os_udelay(1000);
-		r32 = bfa_reg_read((rb + MBIST_STAT_REG));
-		bfa_trc(ioc, r32);
-	}
-
-	return BFA_STATUS_OK;
-}
-
-/**
  * Interface used by diag module to do firmware boot with memory test
  * as the entry vector.
  */
@@ -1642,7 +1297,7 @@
 void
 bfa_ioc_auto_recover(bfa_boolean_t auto_recover)
 {
-	bfa_auto_recover = BFA_FALSE;
+	bfa_auto_recover = auto_recover;
 }
 
 
@@ -1764,6 +1419,14 @@
 	ioc->ctdev = (ioc->pcidev.device_id == BFA_PCI_DEVICE_ID_CT);
 	ioc->cna = ioc->ctdev && !ioc->fcmode;
 
+	/**
+	 * Set asic specific interfaces. See bfa_ioc_cb.c and bfa_ioc_ct.c
+	 */
+	if (ioc->ctdev)
+		bfa_ioc_set_ct_hwif(ioc);
+	else
+		bfa_ioc_set_cb_hwif(ioc);
+
 	bfa_ioc_map_port(ioc);
 	bfa_ioc_reg_init(ioc);
 }
@@ -1830,7 +1493,6 @@
 void
 bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave)
 {
-	bfa_assert(ioc->auto_recover);
 	ioc->dbg_fwsave = dbg_fwsave;
 	ioc->dbg_fwsave_len = bfa_ioc_debug_trcsz(ioc->auto_recover);
 }
@@ -1973,7 +1635,7 @@
 	 ((__sm) == BFI_IOC_INITING) ||		\
 	 ((__sm) == BFI_IOC_HWINIT) ||		\
 	 ((__sm) == BFI_IOC_DISABLED) ||	\
-	 ((__sm) == BFI_IOC_HBFAIL) ||		\
+	 ((__sm) == BFI_IOC_FAIL) ||		\
 	 ((__sm) == BFI_IOC_CFG_DISABLED))
 
 /**
@@ -2017,46 +1679,28 @@
 			 struct bfa_adapter_attr_s *ad_attr)
 {
 	struct bfi_ioc_attr_s *ioc_attr;
-	char            model[BFA_ADAPTER_MODEL_NAME_LEN];
 
 	ioc_attr = ioc->attr;
-	bfa_os_memcpy((void *)&ad_attr->serial_num,
-		      (void *)ioc_attr->brcd_serialnum,
-		      BFA_ADAPTER_SERIAL_NUM_LEN);
 
-	bfa_os_memcpy(&ad_attr->fw_ver, ioc_attr->fw_version, BFA_VERSION_LEN);
-	bfa_os_memcpy(&ad_attr->optrom_ver, ioc_attr->optrom_version,
-		      BFA_VERSION_LEN);
-	bfa_os_memcpy(&ad_attr->manufacturer, BFA_MFG_NAME,
-		      BFA_ADAPTER_MFG_NAME_LEN);
+	bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
+	bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
+	bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
+	bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
 	bfa_os_memcpy(&ad_attr->vpd, &ioc_attr->vpd,
 		      sizeof(struct bfa_mfg_vpd_s));
 
-	ad_attr->nports = BFI_ADAPTER_GETP(NPORTS, ioc_attr->adapter_prop);
-	ad_attr->max_speed = BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop);
+	ad_attr->nports = bfa_ioc_get_nports(ioc);
+	ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
 
-	/**
-	 * model name
-	 */
-	if (BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop) == 10) {
-		strcpy(model, "BR-10?0");
-		model[5] = '0' + ad_attr->nports;
-	} else {
-		strcpy(model, "Brocade-??5");
-		model[8] =
-			'0' + BFI_ADAPTER_GETP(SPEED, ioc_attr->adapter_prop);
-		model[9] = '0' + ad_attr->nports;
-	}
+	bfa_ioc_get_adapter_model(ioc, ad_attr->model);
+	/* For now, model descr uses same model string */
+	bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
 
 	if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
 		ad_attr->prototype = 1;
 	else
 		ad_attr->prototype = 0;
 
-	bfa_os_memcpy(&ad_attr->model, model, BFA_ADAPTER_MODEL_NAME_LEN);
-	bfa_os_memcpy(&ad_attr->model_descr, &ad_attr->model,
-		      BFA_ADAPTER_MODEL_NAME_LEN);
-
 	ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
 	ad_attr->mac = bfa_ioc_get_mac(ioc);
 
@@ -2064,41 +1708,122 @@
 	ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
 	ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
 	ad_attr->asic_rev = ioc_attr->asic_rev;
-	ad_attr->hw_ver[0] = 'R';
-	ad_attr->hw_ver[1] = 'e';
-	ad_attr->hw_ver[2] = 'v';
-	ad_attr->hw_ver[3] = '-';
-	ad_attr->hw_ver[4] = ioc_attr->asic_rev;
-	ad_attr->hw_ver[5] = '\0';
+
+	bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
 
 	ad_attr->cna_capable = ioc->cna;
 }
 
+enum bfa_ioc_type_e
+bfa_ioc_get_type(struct bfa_ioc_s *ioc)
+{
+	if (!ioc->ctdev || ioc->fcmode)
+		return BFA_IOC_TYPE_FC;
+	else if (ioc->ioc_mc == BFI_MC_IOCFC)
+		return BFA_IOC_TYPE_FCoE;
+	else if (ioc->ioc_mc == BFI_MC_LL)
+		return BFA_IOC_TYPE_LL;
+	else {
+		bfa_assert(ioc->ioc_mc == BFI_MC_LL);
+		return BFA_IOC_TYPE_LL;
+	}
+}
+
+void
+bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num)
+{
+	bfa_os_memset((void *)serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
+	bfa_os_memcpy((void *)serial_num,
+			(void *)ioc->attr->brcd_serialnum,
+			BFA_ADAPTER_SERIAL_NUM_LEN);
+}
+
+void
+bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver)
+{
+	bfa_os_memset((void *)fw_ver, 0, BFA_VERSION_LEN);
+	bfa_os_memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
+}
+
+void
+bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev)
+{
+	bfa_assert(chip_rev);
+
+	bfa_os_memset((void *)chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
+
+	chip_rev[0] = 'R';
+	chip_rev[1] = 'e';
+	chip_rev[2] = 'v';
+	chip_rev[3] = '-';
+	chip_rev[4] = ioc->attr->asic_rev;
+	chip_rev[5] = '\0';
+}
+
+void
+bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver)
+{
+	bfa_os_memset((void *)optrom_ver, 0, BFA_VERSION_LEN);
+	bfa_os_memcpy(optrom_ver, ioc->attr->optrom_version,
+		BFA_VERSION_LEN);
+}
+
+void
+bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc, char *manufacturer)
+{
+	bfa_os_memset((void *)manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
+	bfa_os_memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
+}
+
+void
+bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model)
+{
+	struct bfi_ioc_attr_s   *ioc_attr;
+	u8              nports;
+	u8              max_speed;
+
+	bfa_assert(model);
+	bfa_os_memset((void *)model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
+
+	ioc_attr = ioc->attr;
+
+	nports = bfa_ioc_get_nports(ioc);
+	max_speed = bfa_ioc_speed_sup(ioc);
+
+	/**
+	 * model name
+	 */
+	if (max_speed == 10) {
+		strcpy(model, "BR-10?0");
+		model[5] = '0' + nports;
+	} else {
+		strcpy(model, "Brocade-??5");
+		model[8] = '0' + max_speed;
+		model[9] = '0' + nports;
+	}
+}
+
+enum bfa_ioc_state
+bfa_ioc_get_state(struct bfa_ioc_s *ioc)
+{
+	return bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+}
+
 void
 bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr)
 {
 	bfa_os_memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr_s));
 
-	ioc_attr->state = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
+	ioc_attr->state = bfa_ioc_get_state(ioc);
 	ioc_attr->port_id = ioc->port_id;
 
-	if (!ioc->ctdev)
-		ioc_attr->ioc_type = BFA_IOC_TYPE_FC;
-	else if (ioc->ioc_mc == BFI_MC_IOCFC)
-		ioc_attr->ioc_type = BFA_IOC_TYPE_FCoE;
-	else if (ioc->ioc_mc == BFI_MC_LL)
-		ioc_attr->ioc_type = BFA_IOC_TYPE_LL;
+	ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
 
 	bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
 
 	ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
 	ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
-	ioc_attr->pci_attr.chip_rev[0] = 'R';
-	ioc_attr->pci_attr.chip_rev[1] = 'e';
-	ioc_attr->pci_attr.chip_rev[2] = 'v';
-	ioc_attr->pci_attr.chip_rev[3] = '-';
-	ioc_attr->pci_attr.chip_rev[4] = ioc_attr->adapter_attr.asic_rev;
-	ioc_attr->pci_attr.chip_rev[5] = '\0';
+	bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
 }
 
 /**
@@ -2195,29 +1920,6 @@
 }
 
 /**
- * Return true if interrupt should be claimed.
- */
-bfa_boolean_t
-bfa_ioc_intx_claim(struct bfa_ioc_s *ioc)
-{
-	u32        isr, msk;
-
-	/**
-	 * Always claim if not catapult.
-	 */
-	if (!ioc->ctdev)
-		return BFA_TRUE;
-
-	/**
-	 * FALSE if next device is claiming interrupt.
-	 * TRUE if next device is not interrupting or not present.
-	 */
-	msk = bfa_reg_read(ioc->ioc_regs.shirq_msk_next);
-	isr = bfa_reg_read(ioc->ioc_regs.shirq_isr_next);
-	return !(isr & ~msk);
-}
-
-/**
  * Send AEN notification
  */
 static void
@@ -2226,32 +1928,14 @@
 	union bfa_aen_data_u aen_data;
 	struct bfa_log_mod_s *logmod = ioc->logm;
 	s32         inst_num = 0;
-	struct bfa_ioc_attr_s ioc_attr;
+	enum bfa_ioc_type_e ioc_type;
 
-	switch (event) {
-	case BFA_IOC_AEN_HBGOOD:
-		bfa_log(logmod, BFA_AEN_IOC_HBGOOD, inst_num);
-		break;
-	case BFA_IOC_AEN_HBFAIL:
-		bfa_log(logmod, BFA_AEN_IOC_HBFAIL, inst_num);
-		break;
-	case BFA_IOC_AEN_ENABLE:
-		bfa_log(logmod, BFA_AEN_IOC_ENABLE, inst_num);
-		break;
-	case BFA_IOC_AEN_DISABLE:
-		bfa_log(logmod, BFA_AEN_IOC_DISABLE, inst_num);
-		break;
-	case BFA_IOC_AEN_FWMISMATCH:
-		bfa_log(logmod, BFA_AEN_IOC_FWMISMATCH, inst_num);
-		break;
-	default:
-		break;
-	}
+	bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_IOC, event), inst_num);
 
 	memset(&aen_data.ioc.pwwn, 0, sizeof(aen_data.ioc.pwwn));
 	memset(&aen_data.ioc.mac, 0, sizeof(aen_data.ioc.mac));
-	bfa_ioc_get_attr(ioc, &ioc_attr);
-	switch (ioc_attr.ioc_type) {
+	ioc_type = bfa_ioc_get_type(ioc);
+	switch (ioc_type) {
 	case BFA_IOC_TYPE_FC:
 		aen_data.ioc.pwwn = bfa_ioc_get_pwwn(ioc);
 		break;
@@ -2263,10 +1947,10 @@
 		aen_data.ioc.mac = bfa_ioc_get_mac(ioc);
 		break;
 	default:
-		bfa_assert(ioc_attr.ioc_type == BFA_IOC_TYPE_FC);
+		bfa_assert(ioc_type == BFA_IOC_TYPE_FC);
 		break;
 	}
-	aen_data.ioc.ioc_type = ioc_attr.ioc_type;
+	aen_data.ioc.ioc_type = ioc_type;
 }
 
 /**
@@ -2290,6 +1974,15 @@
 }
 
 /**
+ * Clear saved firmware trace
+ */
+void
+bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc)
+{
+	ioc->dbg_fwsave_once = BFA_TRUE;
+}
+
+/**
  * Retrieve saved firmware trace from a prior IOC failure.
  */
 bfa_status_t
@@ -2304,6 +1997,13 @@
 
 	pgnum = bfa_ioc_smem_pgnum(ioc, loff);
 	loff = bfa_ioc_smem_pgoff(ioc, loff);
+
+	/*
+	 *  Hold semaphore to serialize pll init and fwtrc.
+	 */
+	if (BFA_FALSE == bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg))
+		return BFA_STATUS_FAILED;
+
 	bfa_reg_write(ioc->ioc_regs.host_page_num_fn, pgnum);
 
 	tlen = *trclen;
@@ -2329,6 +2029,12 @@
 	}
 	bfa_reg_write(ioc->ioc_regs.host_page_num_fn,
 		      bfa_ioc_smem_pgnum(ioc, 0));
+
+	/*
+	 *  release semaphore.
+	 */
+	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+
 	bfa_trc(ioc, pgnum);
 
 	*trclen = tlen * sizeof(u32);
diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
index 7c30f05..d080440 100644
--- a/drivers/scsi/bfa/bfa_ioc.h
+++ b/drivers/scsi/bfa/bfa_ioc.h
@@ -74,15 +74,18 @@
 	bfa_os_addr_t   lpu_mbox_cmd;
 	bfa_os_addr_t   lpu_mbox;
 	bfa_os_addr_t   pss_ctl_reg;
+	bfa_os_addr_t   pss_err_status_reg;
 	bfa_os_addr_t   app_pll_fast_ctl_reg;
 	bfa_os_addr_t   app_pll_slow_ctl_reg;
 	bfa_os_addr_t   ioc_sem_reg;
 	bfa_os_addr_t   ioc_usage_sem_reg;
+	bfa_os_addr_t   ioc_init_sem_reg;
 	bfa_os_addr_t   ioc_usage_reg;
 	bfa_os_addr_t   host_page_num_fn;
 	bfa_os_addr_t   heartbeat;
 	bfa_os_addr_t   ioc_fwstate;
 	bfa_os_addr_t   ll_halt;
+	bfa_os_addr_t   err_set;
 	bfa_os_addr_t   shirq_isr_next;
 	bfa_os_addr_t   shirq_msk_next;
 	bfa_os_addr_t   smem_page_start;
@@ -154,7 +157,6 @@
 	struct bfa_timer_s 	ioc_timer;
 	struct bfa_timer_s 	sem_timer;
 	u32		hb_count;
-	u32		hb_fail;
 	u32		retry_count;
 	struct list_head		hb_notify_q;
 	void			*dbg_fwsave;
@@ -177,6 +179,22 @@
 	struct bfi_ioc_attr_s	*attr;
 	struct bfa_ioc_cbfn_s	*cbfn;
 	struct bfa_ioc_mbox_mod_s mbox_mod;
+	struct bfa_ioc_hwif_s   *ioc_hwif;
+};
+
+struct bfa_ioc_hwif_s {
+	bfa_status_t    (*ioc_pll_init) (struct bfa_ioc_s *ioc);
+	bfa_boolean_t   (*ioc_firmware_lock)    (struct bfa_ioc_s *ioc);
+	void            (*ioc_firmware_unlock)  (struct bfa_ioc_s *ioc);
+	u32 *   	(*ioc_fwimg_get_chunk)  (struct bfa_ioc_s *ioc,
+						u32 off);
+	u32		(*ioc_fwimg_get_size)   (struct bfa_ioc_s *ioc);
+	void		(*ioc_reg_init) (struct bfa_ioc_s *ioc);
+	void		(*ioc_map_port) (struct bfa_ioc_s *ioc);
+	void		(*ioc_isr_mode_set)     (struct bfa_ioc_s *ioc,
+						bfa_boolean_t msix);
+	void            (*ioc_notify_hbfail)    (struct bfa_ioc_s *ioc);
+	void            (*ioc_ownership_reset)  (struct bfa_ioc_s *ioc);
 };
 
 #define bfa_ioc_pcifn(__ioc)		((__ioc)->pcidev.pci_func)
@@ -191,6 +209,15 @@
 #define bfa_ioc_rx_bbcredit(__ioc)	((__ioc)->attr->rx_bbcredit)
 #define bfa_ioc_speed_sup(__ioc)	\
 	BFI_ADAPTER_GETP(SPEED, (__ioc)->attr->adapter_prop)
+#define bfa_ioc_get_nports(__ioc)       \
+	BFI_ADAPTER_GETP(NPORTS, (__ioc)->attr->adapter_prop)
+
+#define bfa_ioc_stats(_ioc, _stats)     ((_ioc)->stats._stats++)
+#define BFA_IOC_FWIMG_MINSZ     (16 * 1024)
+
+#define BFA_IOC_FLASH_CHUNK_NO(off)             (off / BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_OFFSET_IN_CHUNK(off)      (off % BFI_FLASH_CHUNK_SZ_WORDS)
+#define BFA_IOC_FLASH_CHUNK_ADDR(chunkno)  (chunkno * BFI_FLASH_CHUNK_SZ_WORDS)
 
 /**
  * IOC mailbox interface
@@ -207,6 +234,14 @@
 /**
  * IOC interfaces
  */
+#define bfa_ioc_pll_init(__ioc) ((__ioc)->ioc_hwif->ioc_pll_init(__ioc))
+#define bfa_ioc_isr_mode_set(__ioc, __msix)                     \
+			((__ioc)->ioc_hwif->ioc_isr_mode_set(__ioc, __msix))
+#define bfa_ioc_ownership_reset(__ioc)                          \
+			((__ioc)->ioc_hwif->ioc_ownership_reset(__ioc))
+
+void bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc);
+void bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc);
 void bfa_ioc_attach(struct bfa_ioc_s *ioc, void *bfa,
 		struct bfa_ioc_cbfn_s *cbfn, struct bfa_timer_mod_s *timer_mod,
 		struct bfa_trc_mod_s *trcmod,
@@ -223,13 +258,21 @@
 void bfa_ioc_boot(struct bfa_ioc_s *ioc, u32 boot_type, u32 boot_param);
 void bfa_ioc_isr(struct bfa_ioc_s *ioc, struct bfi_mbmsg_s *msg);
 void bfa_ioc_error_isr(struct bfa_ioc_s *ioc);
-void bfa_ioc_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t intx);
-bfa_status_t bfa_ioc_pll_init(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_is_operational(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_is_disabled(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_fw_mismatch(struct bfa_ioc_s *ioc);
 bfa_boolean_t bfa_ioc_adapter_is_disabled(struct bfa_ioc_s *ioc);
 void bfa_ioc_cfg_complete(struct bfa_ioc_s *ioc);
+enum bfa_ioc_type_e bfa_ioc_get_type(struct bfa_ioc_s *ioc);
+void bfa_ioc_get_adapter_serial_num(struct bfa_ioc_s *ioc, char *serial_num);
+void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc_s *ioc, char *fw_ver);
+void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc_s *ioc, char *optrom_ver);
+void bfa_ioc_get_adapter_model(struct bfa_ioc_s *ioc, char *model);
+void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc_s *ioc,
+	char *manufacturer);
+void bfa_ioc_get_pci_chip_rev(struct bfa_ioc_s *ioc, char *chip_rev);
+enum bfa_ioc_state bfa_ioc_get_state(struct bfa_ioc_s *ioc);
+
 void bfa_ioc_get_attr(struct bfa_ioc_s *ioc, struct bfa_ioc_attr_s *ioc_attr);
 void bfa_ioc_get_adapter_attr(struct bfa_ioc_s *ioc,
 		struct bfa_adapter_attr_s *ad_attr);
@@ -237,6 +280,7 @@
 void bfa_ioc_debug_memclaim(struct bfa_ioc_s *ioc, void *dbg_fwsave);
 bfa_status_t bfa_ioc_debug_fwsave(struct bfa_ioc_s *ioc, void *trcdata,
 		int *trclen);
+void bfa_ioc_debug_fwsave_clear(struct bfa_ioc_s *ioc);
 bfa_status_t bfa_ioc_debug_fwtrc(struct bfa_ioc_s *ioc, void *trcdata,
 				 int *trclen);
 u32 bfa_ioc_smem_pgnum(struct bfa_ioc_s *ioc, u32 fmaddr);
@@ -245,6 +289,13 @@
 bfa_boolean_t bfa_ioc_get_fcmode(struct bfa_ioc_s *ioc);
 void bfa_ioc_hbfail_register(struct bfa_ioc_s *ioc,
 	struct bfa_ioc_hbfail_notify_s *notify);
+bfa_boolean_t bfa_ioc_sem_get(bfa_os_addr_t sem_reg);
+void bfa_ioc_sem_release(bfa_os_addr_t sem_reg);
+void bfa_ioc_hw_sem_release(struct bfa_ioc_s *ioc);
+void bfa_ioc_fwver_get(struct bfa_ioc_s *ioc,
+			struct bfi_ioc_image_hdr_s *fwhdr);
+bfa_boolean_t bfa_ioc_fwver_cmp(struct bfa_ioc_s *ioc,
+			struct bfi_ioc_image_hdr_s *fwhdr);
 
 /*
  * bfa mfg wwn API functions
diff --git a/drivers/scsi/bfa/bfa_ioc_cb.c b/drivers/scsi/bfa/bfa_ioc_cb.c
new file mode 100644
index 0000000..3ce8531
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc_cb.c
@@ -0,0 +1,274 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <bfa.h>
+#include <bfa_ioc.h>
+#include <bfa_fwimg_priv.h>
+#include <cna/bfa_cna_trcmod.h>
+#include <cs/bfa_debug.h>
+#include <bfi/bfi_ioc.h>
+#include <bfi/bfi_cbreg.h>
+#include <log/bfa_log_hal.h>
+#include <defs/bfa_defs_pci.h>
+
+BFA_TRC_FILE(CNA, IOC_CB);
+
+/*
+ * forward declarations
+ */
+static bfa_status_t bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc);
+static u32  *bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off);
+static u32 bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
+static void bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc);
+
+struct bfa_ioc_hwif_s hwif_cb = {
+	bfa_ioc_cb_pll_init,
+	bfa_ioc_cb_firmware_lock,
+	bfa_ioc_cb_firmware_unlock,
+	bfa_ioc_cb_fwimg_get_chunk,
+	bfa_ioc_cb_fwimg_get_size,
+	bfa_ioc_cb_reg_init,
+	bfa_ioc_cb_map_port,
+	bfa_ioc_cb_isr_mode_set,
+	bfa_ioc_cb_notify_hbfail,
+	bfa_ioc_cb_ownership_reset,
+};
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_cb_hwif(struct bfa_ioc_s *ioc)
+{
+	ioc->ioc_hwif = &hwif_cb;
+}
+
+static u32 *
+bfa_ioc_cb_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
+{
+	return bfi_image_cb_get_chunk(off);
+}
+
+static u32
+bfa_ioc_cb_fwimg_get_size(struct bfa_ioc_s *ioc)
+{
+	return bfi_image_cb_size;
+}
+
+/**
+ * Return true if firmware of current driver matches the running firmware.
+ */
+static bfa_boolean_t
+bfa_ioc_cb_firmware_lock(struct bfa_ioc_s *ioc)
+{
+	return BFA_TRUE;
+}
+
+static void
+bfa_ioc_cb_firmware_unlock(struct bfa_ioc_s *ioc)
+{
+}
+
+/**
+ * Notify other functions on HB failure.
+ */
+static void
+bfa_ioc_cb_notify_hbfail(struct bfa_ioc_s *ioc)
+{
+	bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
+	bfa_reg_read(ioc->ioc_regs.err_set);
+}
+
+/**
+ * Host to LPU mailbox message addresses
+ */
+static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
+	{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
+	{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 }
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers
+ */
+static struct { u32 hfn, lpu; } iocreg_mbcmd[] = {
+	{ HOSTFN0_LPU0_CMD_STAT, LPU0_HOSTFN0_CMD_STAT },
+	{ HOSTFN1_LPU1_CMD_STAT, LPU1_HOSTFN1_CMD_STAT }
+};
+
+static void
+bfa_ioc_cb_reg_init(struct bfa_ioc_s *ioc)
+{
+	bfa_os_addr_t	rb;
+	int		pcifn = bfa_ioc_pcifn(ioc);
+
+	rb = bfa_ioc_bar0(ioc);
+
+	ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
+	ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
+	ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
+
+	if (ioc->port_id == 0) {
+		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
+		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+	} else {
+		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
+		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+	}
+
+	/**
+	 * Host <-> LPU mailbox command/status registers
+	 */
+	ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd[pcifn].hfn;
+	ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd[pcifn].lpu;
+
+	/*
+	 * PSS control registers
+	 */
+	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
+	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_400_CTL_REG);
+	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_212_CTL_REG);
+
+	/*
+	 * IOC semaphore registers and serialization
+	 */
+	ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
+	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
+
+	/**
+	 * sram memory access
+	 */
+	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CB;
+
+	/*
+	 * err set reg : for notification of hb failure
+	 */
+	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
+/**
+ * Initialize IOC to port mapping.
+ */
+static void
+bfa_ioc_cb_map_port(struct bfa_ioc_s *ioc)
+{
+	/**
+	 * For crossbow, port id is same as pci function.
+	 */
+	ioc->port_id = bfa_ioc_pcifn(ioc);
+	bfa_trc(ioc, ioc->port_id);
+}
+
+/**
+ * Set interrupt mode for a function: INTX or MSIX
+ */
+static void
+bfa_ioc_cb_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
+{
+}
+
+static bfa_status_t
+bfa_ioc_cb_pll_init(struct bfa_ioc_s *ioc)
+{
+	bfa_os_addr_t	rb = ioc->pcidev.pci_bar_kva;
+	u32	pll_sclk, pll_fclk;
+
+	/*
+	 *  Hold semaphore so that nobody can access the chip during init.
+	 */
+	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
+
+	pll_sclk = __APP_PLL_212_ENABLE | __APP_PLL_212_LRESETN |
+			__APP_PLL_212_P0_1(3U) |
+			__APP_PLL_212_JITLMT0_1(3U) |
+			__APP_PLL_212_CNTLMT0_1(3U);
+	pll_fclk = __APP_PLL_400_ENABLE | __APP_PLL_400_LRESETN |
+			__APP_PLL_400_RSEL200500 | __APP_PLL_400_P0_1(3U) |
+			__APP_PLL_400_JITLMT0_1(3U) |
+			__APP_PLL_400_CNTLMT0_1(3U);
+
+	bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
+	bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
+
+	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
+	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
+	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
+	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
+	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
+	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
+
+	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
+			__APP_PLL_212_LOGIC_SOFT_RESET);
+	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
+			__APP_PLL_212_BYPASS |
+			__APP_PLL_212_LOGIC_SOFT_RESET);
+	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
+			__APP_PLL_400_LOGIC_SOFT_RESET);
+	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
+			__APP_PLL_400_BYPASS |
+			__APP_PLL_400_LOGIC_SOFT_RESET);
+	bfa_os_udelay(2);
+	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
+			__APP_PLL_212_LOGIC_SOFT_RESET);
+	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
+			__APP_PLL_400_LOGIC_SOFT_RESET);
+
+	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg,
+			pll_sclk | __APP_PLL_212_LOGIC_SOFT_RESET);
+	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg,
+			pll_fclk | __APP_PLL_400_LOGIC_SOFT_RESET);
+
+	/**
+	 * Wait for PLLs to lock.
+	 */
+	bfa_os_udelay(2000);
+	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
+	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
+
+	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk);
+	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk);
+
+	/*
+	 *  release semaphore.
+	 */
+	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+
+	return BFA_STATUS_OK;
+}
+
+/**
+ * Cleanup hw semaphore and usecnt registers
+ */
+static void
+bfa_ioc_cb_ownership_reset(struct bfa_ioc_s *ioc)
+{
+
+	/*
+	 * Read the hw sem reg to make sure that it is locked
+	 * before we clear it. If it is not locked, writing 1
+	 * will lock it instead of clearing it.
+	 */
+	bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
+	bfa_ioc_hw_sem_release(ioc);
+}
diff --git a/drivers/scsi/bfa/bfa_ioc_ct.c b/drivers/scsi/bfa/bfa_ioc_ct.c
new file mode 100644
index 0000000..20b58ad
--- /dev/null
+++ b/drivers/scsi/bfa/bfa_ioc_ct.c
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+
+#include <bfa.h>
+#include <bfa_ioc.h>
+#include <bfa_fwimg_priv.h>
+#include <cna/bfa_cna_trcmod.h>
+#include <cs/bfa_debug.h>
+#include <bfi/bfi_ioc.h>
+#include <bfi/bfi_ctreg.h>
+#include <log/bfa_log_hal.h>
+#include <defs/bfa_defs_pci.h>
+
+BFA_TRC_FILE(CNA, IOC_CT);
+
+/*
+ * forward declarations
+ */
+static bfa_status_t bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc);
+static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
+static u32* bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc,
+					u32 off);
+static u32 bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
+static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc);
+static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
+
+struct bfa_ioc_hwif_s hwif_ct = {
+	bfa_ioc_ct_pll_init,
+	bfa_ioc_ct_firmware_lock,
+	bfa_ioc_ct_firmware_unlock,
+	bfa_ioc_ct_fwimg_get_chunk,
+	bfa_ioc_ct_fwimg_get_size,
+	bfa_ioc_ct_reg_init,
+	bfa_ioc_ct_map_port,
+	bfa_ioc_ct_isr_mode_set,
+	bfa_ioc_ct_notify_hbfail,
+	bfa_ioc_ct_ownership_reset,
+};
+
+/**
+ * Called from bfa_ioc_attach() to map asic specific calls.
+ */
+void
+bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
+{
+	ioc->ioc_hwif = &hwif_ct;
+}
+
+static u32*
+bfa_ioc_ct_fwimg_get_chunk(struct bfa_ioc_s *ioc, u32 off)
+{
+	return bfi_image_ct_get_chunk(off);
+}
+
+static u32
+bfa_ioc_ct_fwimg_get_size(struct bfa_ioc_s *ioc)
+{
+	return bfi_image_ct_size;
+}
+
+/**
+ * Return true if firmware of current driver matches the running firmware.
+ */
+static bfa_boolean_t
+bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
+{
+	enum bfi_ioc_state ioc_fwstate;
+	u32 usecnt;
+	struct bfi_ioc_image_hdr_s fwhdr;
+
+	/**
+	 * Firmware match check is relevant only for CNA.
+	 */
+	if (!ioc->cna)
+		return BFA_TRUE;
+
+	/**
+	 * If bios boot (flash based) -- do not increment usage count
+	 */
+	if (bfa_ioc_ct_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
+		return BFA_TRUE;
+
+	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+	usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
+
+	/**
+	 * If usage count is 0, always return TRUE.
+	 */
+	if (usecnt == 0) {
+		bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
+		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+		bfa_trc(ioc, usecnt);
+		return BFA_TRUE;
+	}
+
+	ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
+	bfa_trc(ioc, ioc_fwstate);
+
+	/**
+	 * Use count cannot be non-zero and chip in uninitialized state.
+	 */
+	bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
+
+	/**
+	 * Check if another driver with a different firmware is active
+	 */
+	bfa_ioc_fwver_get(ioc, &fwhdr);
+	if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
+		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+		bfa_trc(ioc, usecnt);
+		return BFA_FALSE;
+	}
+
+	/**
+	 * Same firmware version. Increment the reference count.
+	 */
+	usecnt++;
+	bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
+	bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+	bfa_trc(ioc, usecnt);
+	return BFA_TRUE;
+}
+
+static void
+bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
+{
+	u32 usecnt;
+
+	/**
+	 * Firmware lock is relevant only for CNA.
+	 * If bios boot (flash based) -- do not decrement usage count
+	 */
+	if (!ioc->cna || bfa_ioc_ct_fwimg_get_size(ioc) < BFA_IOC_FWIMG_MINSZ)
+		return;
+
+	/**
+	 * decrement usage count
+	 */
+	bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+	usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
+	bfa_assert(usecnt > 0);
+
+	usecnt--;
+	bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
+	bfa_trc(ioc, usecnt);
+
+	bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+}
+
+/**
+ * Notify other functions on HB failure.
+ */
+static void
+bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
+{
+	if (ioc->cna) {
+		bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
+		/* Wait for halt to take effect */
+		bfa_reg_read(ioc->ioc_regs.ll_halt);
+	} else {
+		bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
+		bfa_reg_read(ioc->ioc_regs.err_set);
+	}
+}
+
+/**
+ * Host to LPU mailbox message addresses
+ */
+static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
+	{ HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
+	{ HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
+	{ HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
+	{ HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers - port 0
+ */
+static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
+	{ HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
+	{ HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
+	{ HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
+	{ HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
+};
+
+/**
+ * Host <-> LPU mailbox command/status registers - port 1
+ */
+static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
+	{ HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
+	{ HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
+	{ HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
+	{ HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
+};
+
+static void
+bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
+{
+	bfa_os_addr_t	rb;
+	int		pcifn = bfa_ioc_pcifn(ioc);
+
+	rb = bfa_ioc_bar0(ioc);
+
+	ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
+	ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
+	ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
+
+	if (ioc->port_id == 0) {
+		ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
+		ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
+		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
+		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
+		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
+	} else {
+		ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
+		ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
+		ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
+		ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
+		ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
+	}
+
+	/*
+	 * PSS control registers
+	 */
+	ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
+	ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
+	ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
+	ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
+
+	/*
+	 * IOC semaphore registers and serialization
+	 */
+	ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
+	ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
+	ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
+	ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
+
+	/**
+	 * sram memory access
+	 */
+	ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
+	ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
+
+	/*
+	 * err set reg : for notification of hb failure in fcmode
+	 */
+	ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
+}
+
+/**
+ * Initialize IOC to port mapping.
+ */
+
+#define FNC_PERS_FN_SHIFT(__fn)	((__fn) * 8)
+static void
+bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
+{
+	bfa_os_addr_t	rb = ioc->pcidev.pci_bar_kva;
+	u32	r32;
+
+	/**
+	 * For catapult, base port id on personality register and IOC type
+	 */
+	r32 = bfa_reg_read(rb + FNC_PERS_REG);
+	r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
+	ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
+
+	bfa_trc(ioc, bfa_ioc_pcifn(ioc));
+	bfa_trc(ioc, ioc->port_id);
+}
+
+/**
+ * Set interrupt mode for a function: INTX or MSIX
+ */
+static void
+bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
+{
+	bfa_os_addr_t	rb = ioc->pcidev.pci_bar_kva;
+	u32	r32, mode;
+
+	r32 = bfa_reg_read(rb + FNC_PERS_REG);
+	bfa_trc(ioc, r32);
+
+	mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
+		__F0_INTX_STATUS;
+
+	/**
+	 * If already in desired mode, do not change anything
+	 */
+	if (!msix && mode)
+		return;
+
+	if (msix)
+		mode = __F0_INTX_STATUS_MSIX;
+	else
+		mode = __F0_INTX_STATUS_INTA;
+
+	r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+	r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
+	bfa_trc(ioc, r32);
+
+	bfa_reg_write(rb + FNC_PERS_REG, r32);
+}
+
+static bfa_status_t
+bfa_ioc_ct_pll_init(struct bfa_ioc_s *ioc)
+{
+	bfa_os_addr_t	rb = ioc->pcidev.pci_bar_kva;
+	u32	pll_sclk, pll_fclk, r32;
+
+	/*
+	 *  Hold semaphore so that nobody can access the chip during init.
+	 */
+	bfa_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
+
+	pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
+		__APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
+		__APP_PLL_312_JITLMT0_1(3U) |
+		__APP_PLL_312_CNTLMT0_1(1U);
+	pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
+		__APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
+		__APP_PLL_425_JITLMT0_1(3U) |
+		__APP_PLL_425_CNTLMT0_1(1U);
+
+	/**
+	 *	For catapult, choose operational mode FC/FCoE
+	 */
+	if (ioc->fcmode) {
+		bfa_reg_write((rb + OP_MODE), 0);
+		bfa_reg_write((rb + ETH_MAC_SER_REG),
+				__APP_EMS_CMLCKSEL |
+				__APP_EMS_REFCKBUFEN2 |
+				__APP_EMS_CHANNEL_SEL);
+	} else {
+		ioc->pllinit = BFA_TRUE;
+		bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
+		bfa_reg_write((rb + ETH_MAC_SER_REG),
+				 __APP_EMS_REFCKBUFEN1);
+	}
+
+	bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
+	bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
+
+	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
+	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
+	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
+	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
+	bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
+	bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
+
+	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
+		__APP_PLL_312_LOGIC_SOFT_RESET);
+	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
+		__APP_PLL_425_LOGIC_SOFT_RESET);
+	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
+		__APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
+	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
+		__APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
+
+	/**
+	 * Wait for PLLs to lock.
+	 */
+	bfa_reg_read(rb + HOSTFN0_INT_MSK);
+	bfa_os_udelay(2000);
+	bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
+	bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
+
+	bfa_reg_write(ioc->ioc_regs.app_pll_slow_ctl_reg, pll_sclk |
+		__APP_PLL_312_ENABLE);
+	bfa_reg_write(ioc->ioc_regs.app_pll_fast_ctl_reg, pll_fclk |
+		__APP_PLL_425_ENABLE);
+
+	bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
+	bfa_os_udelay(1000);
+	r32 = bfa_reg_read((rb + MBIST_STAT_REG));
+	bfa_trc(ioc, r32);
+	/*
+	 *  release semaphore.
+	 */
+	bfa_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
+
+	return BFA_STATUS_OK;
+}
+
+/**
+ * Cleanup hw semaphore and usecnt registers
+ */
+static void
+bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
+{
+
+	if (ioc->cna) {
+		bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
+		bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
+		bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
+	}
+
+	/*
+	 * Read the hw sem reg to make sure that it is locked
+	 * before we clear it. If it is not locked, writing 1
+	 * will lock it instead of clearing it.
+	 */
+	bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
+	bfa_ioc_hw_sem_release(ioc);
+}
diff --git a/drivers/scsi/bfa/bfa_iocfc.c b/drivers/scsi/bfa/bfa_iocfc.c
index d7ab792..a76de26 100644
--- a/drivers/scsi/bfa/bfa_iocfc.c
+++ b/drivers/scsi/bfa/bfa_iocfc.c
@@ -172,6 +172,7 @@
 	 */
 	if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT) {
 		iocfc->hwif.hw_reginit = bfa_hwct_reginit;
+		iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
 		iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
 		iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
 		iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
@@ -180,6 +181,7 @@
 		iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
 	} else {
 		iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
+		iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
 		iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
 		iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
 		iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
@@ -336,8 +338,10 @@
 			bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
 		else
 			bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
-	} else
-		bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
+	} else {
+		if (bfa->iocfc.cfgdone)
+			bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
+	}
 }
 
 static void
@@ -619,8 +623,6 @@
 
 	bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod,
 		bfa->trcmod, bfa->aen, bfa->logm);
-	bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
-	bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
 
 	/**
 	 * Choose FC (ssid: 0x1C) v/s FCoE (ssid: 0x14) mode.
@@ -628,6 +630,9 @@
 	if (0)
 		bfa_ioc_set_fcmode(&bfa->ioc);
 
+	bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
+	bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
+
 	bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
 	bfa_iocfc_mem_claim(bfa, cfg, meminfo);
 	bfa_timer_init(&bfa->timer_mod);
@@ -654,7 +659,6 @@
 {
 	bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
 	bfa_ioc_enable(&bfa->ioc);
-	bfa_msix_install(bfa);
 }
 
 /**
@@ -797,6 +801,11 @@
 		return BFA_STATUS_DEVBUSY;
 	}
 
+	if (!bfa_iocfc_is_operational(bfa)) {
+		bfa_trc(bfa, 0);
+		return BFA_STATUS_IOC_NON_OP;
+	}
+
 	iocfc->stats_busy = BFA_TRUE;
 	iocfc->stats_ret = stats;
 	iocfc->stats_cbfn = cbfn;
@@ -817,6 +826,11 @@
 		return BFA_STATUS_DEVBUSY;
 	}
 
+	if (!bfa_iocfc_is_operational(bfa)) {
+		bfa_trc(bfa, 0);
+		return BFA_STATUS_IOC_NON_OP;
+	}
+
 	iocfc->stats_busy = BFA_TRUE;
 	iocfc->stats_cbfn = cbfn;
 	iocfc->stats_cbarg = cbarg;
diff --git a/drivers/scsi/bfa/bfa_iocfc.h b/drivers/scsi/bfa/bfa_iocfc.h
index ce9a830..fbb4bdc 100644
--- a/drivers/scsi/bfa/bfa_iocfc.h
+++ b/drivers/scsi/bfa/bfa_iocfc.h
@@ -54,6 +54,7 @@
  */
 struct bfa_hwif_s {
 	void (*hw_reginit)(struct bfa_s *bfa);
+	void (*hw_reqq_ack)(struct bfa_s *bfa, int reqq);
 	void (*hw_rspq_ack)(struct bfa_s *bfa, int rspq);
 	void (*hw_msix_init)(struct bfa_s *bfa, int nvecs);
 	void (*hw_msix_install)(struct bfa_s *bfa);
@@ -143,6 +144,7 @@
 void bfa_msix_lpu_err(struct bfa_s *bfa, int vec);
 
 void bfa_hwcb_reginit(struct bfa_s *bfa);
+void bfa_hwcb_reqq_ack(struct bfa_s *bfa, int rspq);
 void bfa_hwcb_rspq_ack(struct bfa_s *bfa, int rspq);
 void bfa_hwcb_msix_init(struct bfa_s *bfa, int nvecs);
 void bfa_hwcb_msix_install(struct bfa_s *bfa);
@@ -151,6 +153,7 @@
 void bfa_hwcb_msix_getvecs(struct bfa_s *bfa, u32 *vecmap,
 			u32 *nvecs, u32 *maxvec);
 void bfa_hwct_reginit(struct bfa_s *bfa);
+void bfa_hwct_reqq_ack(struct bfa_s *bfa, int rspq);
 void bfa_hwct_rspq_ack(struct bfa_s *bfa, int rspq);
 void bfa_hwct_msix_init(struct bfa_s *bfa, int nvecs);
 void bfa_hwct_msix_install(struct bfa_s *bfa);
diff --git a/drivers/scsi/bfa/bfa_ioim.c b/drivers/scsi/bfa/bfa_ioim.c
index f81d359..5b107ab 100644
--- a/drivers/scsi/bfa/bfa_ioim.c
+++ b/drivers/scsi/bfa/bfa_ioim.c
@@ -149,7 +149,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ioim->bfa, event);
 	}
 }
 
@@ -194,7 +194,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ioim->bfa, event);
 	}
 }
 
@@ -259,7 +259,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ioim->bfa, event);
 	}
 }
 
@@ -317,7 +317,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ioim->bfa, event);
 	}
 }
 
@@ -377,7 +377,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ioim->bfa, event);
 	}
 }
 
@@ -419,7 +419,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ioim->bfa, event);
 	}
 }
 
@@ -467,7 +467,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ioim->bfa, event);
 	}
 }
 
@@ -516,7 +516,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ioim->bfa, event);
 	}
 }
 
@@ -544,7 +544,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ioim->bfa, event);
 	}
 }
 
@@ -577,7 +577,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ioim->bfa, event);
 	}
 }
 
@@ -605,7 +605,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ioim->bfa, event);
 	}
 }
 
diff --git a/drivers/scsi/bfa/bfa_itnim.c b/drivers/scsi/bfa/bfa_itnim.c
index eabf7d3..a914ff2 100644
--- a/drivers/scsi/bfa/bfa_itnim.c
+++ b/drivers/scsi/bfa/bfa_itnim.c
@@ -144,7 +144,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->bfa, event);
 	}
 }
 
@@ -175,7 +175,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->bfa, event);
 	}
 }
 
@@ -212,7 +212,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->bfa, event);
 	}
 }
 
@@ -247,7 +247,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->bfa, event);
 	}
 }
 
@@ -275,7 +275,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->bfa, event);
 	}
 }
 
@@ -317,7 +317,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->bfa, event);
 	}
 }
 
@@ -348,7 +348,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->bfa, event);
 	}
 }
 
@@ -385,7 +385,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->bfa, event);
 	}
 }
 
@@ -413,7 +413,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->bfa, event);
 	}
 }
 
@@ -442,7 +442,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->bfa, event);
 	}
 }
 
@@ -470,7 +470,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->bfa, event);
 	}
 }
 
@@ -502,7 +502,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->bfa, event);
 	}
 }
 
@@ -538,7 +538,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->bfa, event);
 	}
 }
 
@@ -559,7 +559,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->bfa, event);
 	}
 }
 
@@ -583,7 +583,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->bfa, event);
 	}
 }
 
diff --git a/drivers/scsi/bfa/bfa_lps.c b/drivers/scsi/bfa/bfa_lps.c
index 9844b45..ad06f61 100644
--- a/drivers/scsi/bfa/bfa_lps.c
+++ b/drivers/scsi/bfa/bfa_lps.c
@@ -18,6 +18,7 @@
 #include <bfa.h>
 #include <bfi/bfi_lps.h>
 #include <cs/bfa_debug.h>
+#include <defs/bfa_defs_pci.h>
 
 BFA_TRC_FILE(HAL, LPS);
 BFA_MODULE(lps);
@@ -25,6 +26,12 @@
 #define BFA_LPS_MIN_LPORTS	(1)
 #define BFA_LPS_MAX_LPORTS	(256)
 
+/*
+ * Maximum Vports supported per physical port or vf.
+ */
+#define BFA_LPS_MAX_VPORTS_SUPP_CB  255
+#define BFA_LPS_MAX_VPORTS_SUPP_CT  190
+
 /**
  * forward declarations
  */
@@ -49,7 +56,7 @@
 static void bfa_lps_send_logout(struct bfa_lps_s *lps);
 static void bfa_lps_login_comp(struct bfa_lps_s *lps);
 static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
-
+static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
 
 /**
  *  lps_pvt BFA LPS private functions
@@ -62,6 +69,7 @@
 	BFA_LPS_SM_RESUME	= 4,	/* space present in reqq queue	*/
 	BFA_LPS_SM_DELETE	= 5,	/* lps delete from user		*/
 	BFA_LPS_SM_OFFLINE	= 6,	/* Link is offline		*/
+	BFA_LPS_SM_RX_CVL       = 7,	/* Rx clear virtual link        */
 };
 
 static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
@@ -91,6 +99,12 @@
 			bfa_sm_set_state(lps, bfa_lps_sm_login);
 			bfa_lps_send_login(lps);
 		}
+		if (lps->fdisc)
+			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+			BFA_PL_EID_LOGIN, 0, "FDISC Request");
+		else
+			bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+			BFA_PL_EID_LOGIN, 0, "FLOGI Request");
 		break;
 
 	case BFA_LPS_SM_LOGOUT:
@@ -101,6 +115,7 @@
 		bfa_lps_free(lps);
 		break;
 
+	case BFA_LPS_SM_RX_CVL:
 	case BFA_LPS_SM_OFFLINE:
 		break;
 
@@ -112,7 +127,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(lps->bfa, event);
 	}
 }
 
@@ -127,10 +142,25 @@
 
 	switch (event) {
 	case BFA_LPS_SM_FWRSP:
-		if (lps->status == BFA_STATUS_OK)
+		if (lps->status == BFA_STATUS_OK) {
 			bfa_sm_set_state(lps, bfa_lps_sm_online);
-		else
+			if (lps->fdisc)
+				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+				BFA_PL_EID_LOGIN, 0, "FDISC Accept");
+			else
+				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+				BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
+		} else {
 			bfa_sm_set_state(lps, bfa_lps_sm_init);
+			if (lps->fdisc)
+				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+				BFA_PL_EID_LOGIN, 0,
+				"FDISC Fail (RJT or timeout)");
+			else
+				bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+				BFA_PL_EID_LOGIN, 0,
+				"FLOGI Fail (RJT or timeout)");
+		}
 		bfa_lps_login_comp(lps);
 		break;
 
@@ -139,7 +169,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(lps->bfa, event);
 	}
 }
 
@@ -162,8 +192,16 @@
 		bfa_reqq_wcancel(&lps->wqe);
 		break;
 
+	case BFA_LPS_SM_RX_CVL:
+		/*
+		 * Login was not even sent out; so when getting out
+		 * of this state, it will appear like a login retry
+		 * after Clear virtual link
+		 */
+		break;
+
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(lps->bfa, event);
 	}
 }
 
@@ -185,6 +223,17 @@
 			bfa_sm_set_state(lps, bfa_lps_sm_logout);
 			bfa_lps_send_logout(lps);
 		}
+		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+			BFA_PL_EID_LOGO, 0, "Logout");
+		break;
+
+	case BFA_LPS_SM_RX_CVL:
+		bfa_sm_set_state(lps, bfa_lps_sm_init);
+
+		/* Let the vport module know about this event */
+		bfa_lps_cvl_event(lps);
+		bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
+			BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
 		break;
 
 	case BFA_LPS_SM_OFFLINE:
@@ -193,7 +242,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(lps->bfa, event);
 	}
 }
 
@@ -217,7 +266,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(lps->bfa, event);
 	}
 }
 
@@ -242,7 +291,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(lps->bfa, event);
 	}
 }
 
@@ -396,6 +445,20 @@
 }
 
 /**
+ * Firmware received a Clear virtual link request (for FCoE)
+ */
+static void
+bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
+{
+	struct bfa_lps_mod_s    *mod = BFA_LPS_MOD(bfa);
+	struct bfa_lps_s        *lps;
+
+	lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
+
+	bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
+}
+
+/**
  * Space is available in request queue, resume queueing request to firmware.
  */
 static void
@@ -531,7 +594,48 @@
 		bfa_cb_lps_flogo_comp(lps->bfa->bfad, lps->uarg);
 }
 
+/**
+ * Clear virtual link completion handler for non-fcs
+ */
+static void
+bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
+{
+	struct bfa_lps_s *lps   = arg;
 
+	if (!complete)
+		return;
+
+	/* Clear virtual link to base port will result in link down */
+	if (lps->fdisc)
+		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
+}
+
+/**
+ * Received Clear virtual link event --direct call for fcs,
+ * queue for others
+ */
+static void
+bfa_lps_cvl_event(struct bfa_lps_s *lps)
+{
+	if (!lps->bfa->fcs) {
+		bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
+				lps);
+		return;
+	}
+
+	/* Clear virtual link to base port will result in link down */
+	if (lps->fdisc)
+		bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
+}
+
+u32
+bfa_lps_get_max_vport(struct bfa_s *bfa)
+{
+	if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
+		return BFA_LPS_MAX_VPORTS_SUPP_CT;
+	else
+		return BFA_LPS_MAX_VPORTS_SUPP_CB;
+}
 
 /**
  *  lps_public BFA LPS public functions
@@ -752,6 +856,14 @@
 	return lps->lsrjt_expl;
 }
 
+/**
+ * Return fpma/spma MAC for lport
+ */
+struct mac_s
+bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
+{
+	return lps->lp_mac;
+}
 
 /**
  * LPS firmware message class handler.
@@ -773,6 +885,10 @@
 		bfa_lps_logout_rsp(bfa, msg.logout_rsp);
 		break;
 
+	case BFI_LPS_H2I_CVL_EVENT:
+		bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
+		break;
+
 	default:
 		bfa_trc(bfa, m->mhdr.msg_id);
 		bfa_assert(0);
diff --git a/drivers/scsi/bfa/bfa_module.c b/drivers/scsi/bfa/bfa_module.c
index 32eda8e..a7fcc80 100644
--- a/drivers/scsi/bfa/bfa_module.c
+++ b/drivers/scsi/bfa/bfa_module.c
@@ -24,7 +24,7 @@
  */
 struct bfa_module_s *hal_mods[] = {
 	&hal_mod_sgpg,
-	&hal_mod_pport,
+	&hal_mod_fcport,
 	&hal_mod_fcxp,
 	&hal_mod_lps,
 	&hal_mod_uf,
@@ -45,7 +45,7 @@
 	bfa_isr_unhandled,	/* BFI_MC_DIAG */
 	bfa_isr_unhandled,	/* BFI_MC_FLASH */
 	bfa_isr_unhandled,	/* BFI_MC_CEE */
-	bfa_pport_isr,		/* BFI_MC_PORT */
+	bfa_fcport_isr,		/* BFI_MC_FCPORT */
 	bfa_isr_unhandled,	/* BFI_MC_IOCFC */
 	bfa_isr_unhandled,	/* BFI_MC_LL */
 	bfa_uf_isr,		/* BFI_MC_UF */
diff --git a/drivers/scsi/bfa/bfa_modules_priv.h b/drivers/scsi/bfa/bfa_modules_priv.h
index 96f7053..f554c2f 100644
--- a/drivers/scsi/bfa/bfa_modules_priv.h
+++ b/drivers/scsi/bfa/bfa_modules_priv.h
@@ -29,7 +29,7 @@
 
 
 struct bfa_modules_s {
-	struct bfa_pport_s	pport;	/*  physical port module	*/
+	struct bfa_fcport_s	fcport;	/*  fc port module	*/
 	struct bfa_fcxp_mod_s fcxp_mod; /*  fcxp module		*/
 	struct bfa_lps_mod_s lps_mod;   /*  fcxp module		*/
 	struct bfa_uf_mod_s uf_mod;	/*  unsolicited frame module	*/
diff --git a/drivers/scsi/bfa/bfa_port_priv.h b/drivers/scsi/bfa/bfa_port_priv.h
index 51f698a..40e256e 100644
--- a/drivers/scsi/bfa/bfa_port_priv.h
+++ b/drivers/scsi/bfa/bfa_port_priv.h
@@ -23,9 +23,19 @@
 #include "bfa_intr_priv.h"
 
 /**
- * BFA physical port data structure
+ * Link notification data structure
  */
-struct bfa_pport_s {
+struct bfa_fcport_ln_s {
+	struct bfa_fcport_s     *fcport;
+	bfa_sm_t                sm;
+	struct bfa_cb_qe_s      ln_qe;  /*  BFA callback queue elem for ln */
+	enum bfa_pport_linkstate ln_event; /*  ln event for callback */
+};
+
+/**
+ * BFA FC port data structure
+ */
+struct bfa_fcport_s {
 	struct bfa_s 		*bfa;	/*  parent BFA instance */
 	bfa_sm_t		sm;	/*  port state machine */
 	wwn_t			nwwn;	/*  node wwn of physical port */
@@ -36,6 +46,8 @@
 	enum bfa_pport_topology topology;	/*  current topology */
 	u8			myalpa;	/*  my ALPA in LOOP topology */
 	u8			rsvd[3];
+	u32             mypid:24;
+	u32             rsvd_b:8;
 	struct bfa_pport_cfg_s	cfg;	/*  current port configuration */
 	struct bfa_qos_attr_s  qos_attr;   /* QoS Attributes */
 	struct bfa_qos_vc_attr_s qos_vc_attr;  /*  VC info from ELP */
@@ -49,42 +61,31 @@
 	void			(*event_cbfn) (void *cbarg,
 						bfa_pport_event_t event);
 	union {
-		union bfi_pport_i2h_msg_u i2hmsg;
+		union bfi_fcport_i2h_msg_u i2hmsg;
 	} event_arg;
 	void			*bfad;	/*  BFA driver handle */
+	struct bfa_fcport_ln_s   ln; /* Link Notification */
 	struct bfa_cb_qe_s		hcb_qe;	/*  BFA callback queue elem */
-	enum bfa_pport_linkstate	hcb_event;
-					/*  link event for callback */
+	struct bfa_timer_s      timer;  /*  timer */
 	u32		msgtag;	/*  fimrware msg tag for reply */
 	u8			*stats_kva;
 	u64		stats_pa;
-	union bfa_pport_stats_u *stats;	/*  pport stats */
-	u32		mypid:24;
-	u32		rsvd_b:8;
-	struct bfa_timer_s 	timer;	/*  timer */
-	union bfa_pport_stats_u 	*stats_ret;
-					/*  driver stats location */
-	bfa_status_t		stats_status;
-					/*  stats/statsclr status */
-	bfa_boolean_t   	stats_busy;
-					/*  outstanding stats/statsclr */
-	bfa_boolean_t   	stats_qfull;
-	bfa_boolean_t   	diag_busy;
-					/*  diag busy status */
-	bfa_boolean_t   	beacon;
-					/*  port beacon status */
-	bfa_boolean_t   	link_e2e_beacon;
-					/*  link beacon status */
-	bfa_cb_pport_t		stats_cbfn;
-					/*  driver callback function */
-	void			*stats_cbarg;
-					/* *!< user callback arg */
+	union bfa_fcport_stats_u *stats;
+	union bfa_fcport_stats_u *stats_ret; /*  driver stats location */
+	bfa_status_t            stats_status; /*  stats/statsclr status */
+	bfa_boolean_t           stats_busy; /*  outstanding stats/statsclr */
+	bfa_boolean_t           stats_qfull;
+	bfa_cb_pport_t          stats_cbfn; /*  driver callback function */
+	void                    *stats_cbarg; /* *!< user callback arg */
+	bfa_boolean_t           diag_busy; /*  diag busy status */
+	bfa_boolean_t           beacon; /*  port beacon status */
+	bfa_boolean_t           link_e2e_beacon; /*  link beacon status */
 };
 
-#define BFA_PORT_MOD(__bfa)	(&(__bfa)->modules.pport)
+#define BFA_FCPORT_MOD(__bfa)	(&(__bfa)->modules.fcport)
 
 /*
  * public functions
  */
-void	bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
+void	bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg);
 #endif /* __BFA_PORT_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_priv.h b/drivers/scsi/bfa/bfa_priv.h
index 0747a6b..be80fc7 100644
--- a/drivers/scsi/bfa/bfa_priv.h
+++ b/drivers/scsi/bfa/bfa_priv.h
@@ -101,7 +101,7 @@
 extern struct bfa_module_s hal_mod_flash;
 extern struct bfa_module_s hal_mod_fcdiag;
 extern struct bfa_module_s hal_mod_sgpg;
-extern struct bfa_module_s hal_mod_pport;
+extern struct bfa_module_s hal_mod_fcport;
 extern struct bfa_module_s hal_mod_fcxp;
 extern struct bfa_module_s hal_mod_lps;
 extern struct bfa_module_s hal_mod_uf;
diff --git a/drivers/scsi/bfa/bfa_rport.c b/drivers/scsi/bfa/bfa_rport.c
index 3e1990a..7c509fa 100644
--- a/drivers/scsi/bfa/bfa_rport.c
+++ b/drivers/scsi/bfa/bfa_rport.c
@@ -114,7 +114,7 @@
 
 	default:
 		bfa_stats(rp, sm_un_unexp);
-		bfa_assert(0);
+		bfa_sm_fault(rp->bfa, event);
 	}
 }
 
@@ -146,7 +146,7 @@
 
 	default:
 		bfa_stats(rp, sm_cr_unexp);
-		bfa_assert(0);
+		bfa_sm_fault(rp->bfa, event);
 	}
 }
 
@@ -183,7 +183,7 @@
 
 	default:
 		bfa_stats(rp, sm_fwc_unexp);
-		bfa_assert(0);
+		bfa_sm_fault(rp->bfa, event);
 	}
 }
 
@@ -224,7 +224,7 @@
 
 	default:
 		bfa_stats(rp, sm_fwc_unexp);
-		bfa_assert(0);
+		bfa_sm_fault(rp->bfa, event);
 	}
 }
 
@@ -296,7 +296,7 @@
 
 	default:
 		bfa_stats(rp, sm_on_unexp);
-		bfa_assert(0);
+		bfa_sm_fault(rp->bfa, event);
 	}
 }
 
@@ -329,7 +329,7 @@
 
 	default:
 		bfa_stats(rp, sm_fwd_unexp);
-		bfa_assert(0);
+		bfa_sm_fault(rp->bfa, event);
 	}
 }
 
@@ -359,7 +359,7 @@
 
 	default:
 		bfa_stats(rp, sm_fwd_unexp);
-		bfa_assert(0);
+		bfa_sm_fault(rp->bfa, event);
 	}
 }
 
@@ -394,7 +394,7 @@
 
 	default:
 		bfa_stats(rp, sm_off_unexp);
-		bfa_assert(0);
+		bfa_sm_fault(rp->bfa, event);
 	}
 }
 
@@ -421,7 +421,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rp->bfa, event);
 	}
 }
 
@@ -446,7 +446,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rp->bfa, event);
 	}
 }
 
@@ -477,7 +477,7 @@
 
 	default:
 		bfa_stats(rp, sm_delp_unexp);
-		bfa_assert(0);
+		bfa_sm_fault(rp->bfa, event);
 	}
 }
 
@@ -512,7 +512,7 @@
 
 	default:
 		bfa_stats(rp, sm_offp_unexp);
-		bfa_assert(0);
+		bfa_sm_fault(rp->bfa, event);
 	}
 }
 
@@ -550,7 +550,7 @@
 
 	default:
 		bfa_stats(rp, sm_iocd_unexp);
-		bfa_assert(0);
+		bfa_sm_fault(rp->bfa, event);
 	}
 }
 
diff --git a/drivers/scsi/bfa/bfa_trcmod_priv.h b/drivers/scsi/bfa/bfa_trcmod_priv.h
index b3562dc..a7a8261 100644
--- a/drivers/scsi/bfa/bfa_trcmod_priv.h
+++ b/drivers/scsi/bfa/bfa_trcmod_priv.h
@@ -29,38 +29,36 @@
  * !!! needed between trace utility and driver version
  */
 enum {
-	BFA_TRC_HAL_IOC		= 1,
-	BFA_TRC_HAL_INTR	= 2,
-	BFA_TRC_HAL_FCXP	= 3,
-	BFA_TRC_HAL_UF		= 4,
-	BFA_TRC_HAL_DIAG	= 5,
-	BFA_TRC_HAL_RPORT	= 6,
-	BFA_TRC_HAL_FCPIM	= 7,
-	BFA_TRC_HAL_IOIM	= 8,
-	BFA_TRC_HAL_TSKIM	= 9,
-	BFA_TRC_HAL_ITNIM	= 10,
-	BFA_TRC_HAL_PPORT	= 11,
-	BFA_TRC_HAL_SGPG	= 12,
-	BFA_TRC_HAL_FLASH	= 13,
-	BFA_TRC_HAL_DEBUG	= 14,
-	BFA_TRC_HAL_WWN		= 15,
-	BFA_TRC_HAL_FLASH_RAW	= 16,
-	BFA_TRC_HAL_SBOOT	= 17,
-	BFA_TRC_HAL_SBOOT_IO	= 18,
-	BFA_TRC_HAL_SBOOT_INTR	= 19,
-	BFA_TRC_HAL_SBTEST	= 20,
-	BFA_TRC_HAL_IPFC	= 21,
-	BFA_TRC_HAL_IOCFC	= 22,
-	BFA_TRC_HAL_FCPTM	= 23,
-	BFA_TRC_HAL_IOTM	= 24,
-	BFA_TRC_HAL_TSKTM	= 25,
-	BFA_TRC_HAL_TIN		= 26,
-	BFA_TRC_HAL_LPS		= 27,
-	BFA_TRC_HAL_FCDIAG	= 28,
-	BFA_TRC_HAL_PBIND	= 29,
-	BFA_TRC_HAL_IOCFC_CT	= 30,
-	BFA_TRC_HAL_IOCFC_CB	= 31,
-	BFA_TRC_HAL_IOCFC_Q	= 32,
+	BFA_TRC_HAL_INTR	= 1,
+	BFA_TRC_HAL_FCXP	= 2,
+	BFA_TRC_HAL_UF		= 3,
+	BFA_TRC_HAL_RPORT	= 4,
+	BFA_TRC_HAL_FCPIM	= 5,
+	BFA_TRC_HAL_IOIM	= 6,
+	BFA_TRC_HAL_TSKIM	= 7,
+	BFA_TRC_HAL_ITNIM	= 8,
+	BFA_TRC_HAL_FCPORT	= 9,
+	BFA_TRC_HAL_SGPG	= 10,
+	BFA_TRC_HAL_FLASH	= 11,
+	BFA_TRC_HAL_DEBUG	= 12,
+	BFA_TRC_HAL_WWN		= 13,
+	BFA_TRC_HAL_FLASH_RAW	= 14,
+	BFA_TRC_HAL_SBOOT	= 15,
+	BFA_TRC_HAL_SBOOT_IO	= 16,
+	BFA_TRC_HAL_SBOOT_INTR	= 17,
+	BFA_TRC_HAL_SBTEST	= 18,
+	BFA_TRC_HAL_IPFC	= 19,
+	BFA_TRC_HAL_IOCFC	= 20,
+	BFA_TRC_HAL_FCPTM	= 21,
+	BFA_TRC_HAL_IOTM	= 22,
+	BFA_TRC_HAL_TSKTM	= 23,
+	BFA_TRC_HAL_TIN		= 24,
+	BFA_TRC_HAL_LPS		= 25,
+	BFA_TRC_HAL_FCDIAG	= 26,
+	BFA_TRC_HAL_PBIND	= 27,
+	BFA_TRC_HAL_IOCFC_CT	= 28,
+	BFA_TRC_HAL_IOCFC_CB	= 29,
+	BFA_TRC_HAL_IOCFC_Q	= 30,
 };
 
 #endif /* __BFA_TRCMOD_PRIV_H__ */
diff --git a/drivers/scsi/bfa/bfa_tskim.c b/drivers/scsi/bfa/bfa_tskim.c
index ff7a4dc..ad9aaae 100644
--- a/drivers/scsi/bfa/bfa_tskim.c
+++ b/drivers/scsi/bfa/bfa_tskim.c
@@ -110,7 +110,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(tskim->bfa, event);
 	}
 }
 
@@ -146,7 +146,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(tskim->bfa, event);
 	}
 }
 
@@ -178,7 +178,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(tskim->bfa, event);
 	}
 }
 
@@ -207,7 +207,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(tskim->bfa, event);
 	}
 }
 
@@ -242,7 +242,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(tskim->bfa, event);
 	}
 }
 
@@ -277,7 +277,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(tskim->bfa, event);
 	}
 }
 
@@ -303,7 +303,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(tskim->bfa, event);
 	}
 }
 
diff --git a/drivers/scsi/bfa/bfad.c b/drivers/scsi/bfa/bfad.c
index b52b773..6bff08e 100644
--- a/drivers/scsi/bfa/bfad.c
+++ b/drivers/scsi/bfa/bfad.c
@@ -20,6 +20,7 @@
  */
 
 #include <linux/module.h>
+#include <linux/kthread.h>
 #include "bfad_drv.h"
 #include "bfad_im.h"
 #include "bfad_tm.h"
@@ -53,6 +54,7 @@
 static int      ioc_auto_recover = BFA_TRUE;
 static int      ipfc_enable = BFA_FALSE;
 static int      ipfc_mtu = -1;
+static int	fdmi_enable = BFA_TRUE;
 int 		bfa_lun_queue_depth = BFAD_LUN_QUEUE_DEPTH;
 int      	bfa_linkup_delay = -1;
 
@@ -74,6 +76,7 @@
 module_param(ioc_auto_recover, int, S_IRUGO | S_IWUSR);
 module_param(ipfc_enable, int, S_IRUGO | S_IWUSR);
 module_param(ipfc_mtu, int, S_IRUGO | S_IWUSR);
+module_param(fdmi_enable, int, S_IRUGO | S_IWUSR);
 module_param(bfa_linkup_delay, int, S_IRUGO | S_IWUSR);
 
 /*
@@ -95,6 +98,8 @@
 
 	if (ipfc_enable)
 		bfad_ipfc_probe(bfad);
+
+	bfad->bfad_flags |= BFAD_FC4_PROBE_DONE;
 ext:
 	return rc;
 }
@@ -106,6 +111,7 @@
 	bfad_tm_probe_undo(bfad);
 	if (ipfc_enable)
 		bfad_ipfc_probe_undo(bfad);
+	bfad->bfad_flags &= ~BFAD_FC4_PROBE_DONE;
 }
 
 static void
@@ -173,9 +179,19 @@
 {
 	struct bfad_s  *bfad = drv;
 
-	if (init_status == BFA_STATUS_OK)
+	if (init_status == BFA_STATUS_OK) {
 		bfad->bfad_flags |= BFAD_HAL_INIT_DONE;
 
+		/* If BFAD_HAL_INIT_FAIL flag is set:
+		 * Wake up the kernel thread to start
+		 * the bfad operations after HAL init done
+		 */
+		if ((bfad->bfad_flags & BFAD_HAL_INIT_FAIL)) {
+			bfad->bfad_flags &= ~BFAD_HAL_INIT_FAIL;
+			wake_up_process(bfad->bfad_tsk);
+		}
+	}
+
 	complete(&bfad->comp);
 }
 
@@ -648,7 +664,7 @@
 
 	sprintf(symname, "%s-%d", BFAD_DRIVER_NAME, bfad->inst_no);
 	memcpy(port_cfg.sym_name.symname, symname, strlen(symname));
-	bfa_pport_get_attr(&bfad->bfa, &attr);
+	bfa_fcport_get_attr(&bfad->bfa, &attr);
 	port_cfg.nwwn = attr.nwwn;
 	port_cfg.pwwn = attr.pwwn;
 
@@ -661,7 +677,6 @@
 	bfa_status_t    rc;
 	unsigned long   flags;
 	struct bfa_fcs_driver_info_s driver_info;
-	int             i;
 
 	bfad->cfg_data.rport_del_timeout = rport_del_timeout;
 	bfad->cfg_data.lun_queue_depth = bfa_lun_queue_depth;
@@ -681,12 +696,7 @@
 	bfa_init_log(&bfad->bfa, bfad->logmod);
 	bfa_init_trc(&bfad->bfa, bfad->trcmod);
 	bfa_init_aen(&bfad->bfa, bfad->aen);
-	INIT_LIST_HEAD(&bfad->file_q);
-	INIT_LIST_HEAD(&bfad->file_free_q);
-	for (i = 0; i < BFAD_AEN_MAX_APPS; i++) {
-		bfa_q_qe_init(&bfad->file_buf[i].qe);
-		list_add_tail(&bfad->file_buf[i].qe, &bfad->file_free_q);
-	}
+	memset(bfad->file_map, 0, sizeof(bfad->file_map));
 	bfa_init_plog(&bfad->bfa, &bfad->plog_buf);
 	bfa_plog_init(&bfad->plog_buf);
 	bfa_plog_str(&bfad->plog_buf, BFA_PL_MID_DRVR, BFA_PL_EID_DRIVER_START,
@@ -746,8 +756,16 @@
 	bfa_fcs_log_init(&bfad->bfa_fcs, bfad->logmod);
 	bfa_fcs_trc_init(&bfad->bfa_fcs, bfad->trcmod);
 	bfa_fcs_aen_init(&bfad->bfa_fcs, bfad->aen);
-	bfa_fcs_init(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
+	bfa_fcs_attach(&bfad->bfa_fcs, &bfad->bfa, bfad, BFA_FALSE);
+
+	/* Do FCS init only when HAL init is done */
+	if ((bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
+		bfa_fcs_init(&bfad->bfa_fcs);
+		bfad->bfad_flags |= BFAD_FCS_INIT_DONE;
+	}
+
 	bfa_fcs_driver_info_init(&bfad->bfa_fcs, &driver_info);
+	bfa_fcs_set_fdmi_param(&bfad->bfa_fcs, fdmi_enable);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
 	bfad->bfad_flags |= BFAD_DRV_INIT_DONE;
@@ -763,12 +781,21 @@
 void
 bfad_drv_uninit(struct bfad_s *bfad)
 {
+	unsigned long   flags;
+
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	init_completion(&bfad->comp);
+	bfa_stop(&bfad->bfa);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+	wait_for_completion(&bfad->comp);
+
 	del_timer_sync(&bfad->hal_tmo);
 	bfa_isr_disable(&bfad->bfa);
 	bfa_detach(&bfad->bfa);
 	bfad_remove_intr(bfad);
-	bfa_assert(list_empty(&bfad->file_q));
 	bfad_hal_mem_release(bfad);
+
+	bfad->bfad_flags &= ~BFAD_DRV_INIT_DONE;
 }
 
 void
@@ -859,6 +886,86 @@
 		bfa_log_set_level_all(&bfad->log_data, log_level);
 }
 
+bfa_status_t
+bfad_start_ops(struct bfad_s *bfad)
+{
+	int retval;
+
+	/* PPORT FCS config */
+	bfad_fcs_port_cfg(bfad);
+
+	retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM);
+	if (retval != BFA_STATUS_OK)
+		goto out_cfg_pport_failure;
+
+	/* BFAD level FC4 (IM/TM/IPFC) specific resource allocation */
+	retval = bfad_fc4_probe(bfad);
+	if (retval != BFA_STATUS_OK) {
+		printk(KERN_WARNING "bfad_fc4_probe failed\n");
+		goto out_fc4_probe_failure;
+	}
+
+	bfad_drv_start(bfad);
+
+	/*
+	 * If bfa_linkup_delay is set to -1 default; try to retrive the
+	 * value using the bfad_os_get_linkup_delay(); else use the
+	 * passed in module param value as the bfa_linkup_delay.
+	 */
+	if (bfa_linkup_delay < 0) {
+
+		bfa_linkup_delay = bfad_os_get_linkup_delay(bfad);
+		bfad_os_rport_online_wait(bfad);
+		bfa_linkup_delay = -1;
+
+	} else {
+		bfad_os_rport_online_wait(bfad);
+	}
+
+	bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name);
+
+	return BFA_STATUS_OK;
+
+out_fc4_probe_failure:
+	bfad_fc4_probe_undo(bfad);
+	bfad_uncfg_pport(bfad);
+out_cfg_pport_failure:
+	return BFA_STATUS_FAILED;
+}
+
+int
+bfad_worker (void *ptr)
+{
+	struct bfad_s *bfad;
+	unsigned long   flags;
+
+	bfad = (struct bfad_s *)ptr;
+
+	while (!kthread_should_stop()) {
+
+		/* Check if the FCS init is done from bfad_drv_init;
+		 * if not done do FCS init and set the flag.
+		 */
+		if (!(bfad->bfad_flags & BFAD_FCS_INIT_DONE)) {
+			spin_lock_irqsave(&bfad->bfad_lock, flags);
+			bfa_fcs_init(&bfad->bfa_fcs);
+			bfad->bfad_flags |= BFAD_FCS_INIT_DONE;
+			spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		}
+
+		/* Start the bfad operations after HAL init done */
+		bfad_start_ops(bfad);
+
+		spin_lock_irqsave(&bfad->bfad_lock, flags);
+		bfad->bfad_tsk = NULL;
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
+		break;
+	}
+
+	return 0;
+}
+
  /*
   *  PCI_entry PCI driver entries * {
   */
@@ -871,7 +978,6 @@
 {
 	struct bfad_s  *bfad;
 	int             error = -ENODEV, retval;
-	char            buf[16];
 
 	/*
 	 * For single port cards - only claim function 0
@@ -902,8 +1008,7 @@
 	bfa_trc(bfad, bfad_inst);
 
 	bfad->logmod = &bfad->log_data;
-	sprintf(buf, "%d", bfad_inst);
-	bfa_log_init(bfad->logmod, buf, bfa_os_printf);
+	bfa_log_init(bfad->logmod, (char *)pci_name(pdev), bfa_os_printf);
 
 	bfad_drv_log_level_set(bfad);
 
@@ -933,57 +1038,39 @@
 	bfad->ref_count = 0;
 	bfad->pport.bfad = bfad;
 
+	bfad->bfad_tsk = kthread_create(bfad_worker, (void *) bfad, "%s",
+					"bfad_worker");
+	if (IS_ERR(bfad->bfad_tsk)) {
+		printk(KERN_INFO "bfad[%d]: Kernel thread"
+			" creation failed!\n",
+			bfad->inst_no);
+		goto out_kthread_create_failure;
+	}
+
 	retval = bfad_drv_init(bfad);
 	if (retval != BFA_STATUS_OK)
 		goto out_drv_init_failure;
 	if (!(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
+		bfad->bfad_flags |= BFAD_HAL_INIT_FAIL;
 		printk(KERN_WARNING "bfad%d: hal init failed\n", bfad->inst_no);
 		goto ok;
 	}
 
-	/*
-	 * PPORT FCS config
-	 */
-	bfad_fcs_port_cfg(bfad);
-
-	retval = bfad_cfg_pport(bfad, BFA_PORT_ROLE_FCP_IM);
+	retval = bfad_start_ops(bfad);
 	if (retval != BFA_STATUS_OK)
-		goto out_cfg_pport_failure;
+		goto out_start_ops_failure;
 
-	/*
-	 * BFAD level FC4 (IM/TM/IPFC) specific resource allocation
-	 */
-	retval = bfad_fc4_probe(bfad);
-	if (retval != BFA_STATUS_OK) {
-		printk(KERN_WARNING "bfad_fc4_probe failed\n");
-		goto out_fc4_probe_failure;
-	}
+	kthread_stop(bfad->bfad_tsk);
+	bfad->bfad_tsk = NULL;
 
-	bfad_drv_start(bfad);
-
-	/*
-	 * If bfa_linkup_delay is set to -1 default; try to retrive the
-	 * value using the bfad_os_get_linkup_delay(); else use the
-	 * passed in module param value as the bfa_linkup_delay.
-	 */
-	if (bfa_linkup_delay < 0) {
-		bfa_linkup_delay = bfad_os_get_linkup_delay(bfad);
-		bfad_os_rport_online_wait(bfad);
-		bfa_linkup_delay = -1;
-	} else {
-		bfad_os_rport_online_wait(bfad);
-	}
-
-	bfa_log(bfad->logmod, BFA_LOG_LINUX_DEVICE_CLAIMED, bfad->pci_name);
 ok:
 	return 0;
 
-out_fc4_probe_failure:
-	bfad_fc4_probe_undo(bfad);
-	bfad_uncfg_pport(bfad);
-out_cfg_pport_failure:
+out_start_ops_failure:
 	bfad_drv_uninit(bfad);
 out_drv_init_failure:
+	kthread_stop(bfad->bfad_tsk);
+out_kthread_create_failure:
 	mutex_lock(&bfad_mutex);
 	bfad_inst--;
 	list_del(&bfad->list_entry);
@@ -1008,6 +1095,11 @@
 
 	bfa_trc(bfad, bfad->inst_no);
 
+	spin_lock_irqsave(&bfad->bfad_lock, flags);
+	if (bfad->bfad_tsk != NULL)
+		kthread_stop(bfad->bfad_tsk);
+	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+
 	if ((bfad->bfad_flags & BFAD_DRV_INIT_DONE)
 	    && !(bfad->bfad_flags & BFAD_HAL_INIT_DONE)) {
 
@@ -1024,13 +1116,25 @@
 		goto remove_sysfs;
 	}
 
-	if (bfad->bfad_flags & BFAD_HAL_START_DONE)
+	if (bfad->bfad_flags & BFAD_HAL_START_DONE) {
 		bfad_drv_stop(bfad);
+	} else if (bfad->bfad_flags & BFAD_DRV_INIT_DONE) {
+		/* Invoking bfa_stop() before bfa_detach
+		 * when HAL and DRV init are success
+		 * but HAL start did not occur.
+		 */
+		spin_lock_irqsave(&bfad->bfad_lock, flags);
+		init_completion(&bfad->comp);
+		bfa_stop(&bfad->bfa);
+		spin_unlock_irqrestore(&bfad->bfad_lock, flags);
+		wait_for_completion(&bfad->comp);
+	}
 
 	bfad_remove_intr(bfad);
-
 	del_timer_sync(&bfad->hal_tmo);
-	bfad_fc4_probe_undo(bfad);
+
+	if (bfad->bfad_flags & BFAD_FC4_PROBE_DONE)
+		bfad_fc4_probe_undo(bfad);
 
 	if (bfad->bfad_flags & BFAD_CFG_PPORT_DONE)
 		bfad_uncfg_pport(bfad);
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 9129ae3..d97f691 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -141,7 +141,7 @@
 	struct bfad_s         *bfad = im_port->bfad;
 	struct bfa_pport_attr_s attr;
 
-	bfa_pport_get_attr(&bfad->bfa, &attr);
+	bfa_fcport_get_attr(&bfad->bfa, &attr);
 
 	switch (attr.port_type) {
 	case BFA_PPORT_TYPE_NPORT:
@@ -173,7 +173,7 @@
 	struct bfad_s         *bfad = im_port->bfad;
 	struct bfa_pport_attr_s attr;
 
-	bfa_pport_get_attr(&bfad->bfa, &attr);
+	bfa_fcport_get_attr(&bfad->bfa, &attr);
 
 	switch (attr.port_state) {
 	case BFA_PPORT_ST_LINKDOWN:
@@ -229,8 +229,10 @@
 			(struct bfad_im_port_s *) shost->hostdata[0];
 	struct bfad_s         *bfad = im_port->bfad;
 	struct bfa_pport_attr_s attr;
+	unsigned long   flags;
 
-	bfa_pport_get_attr(&bfad->bfa, &attr);
+	spin_lock_irqsave(shost->host_lock, flags);
+	bfa_fcport_get_attr(&bfad->bfa, &attr);
 	switch (attr.speed) {
 	case BFA_PPORT_SPEED_8GBPS:
 		fc_host_speed(shost) = FC_PORTSPEED_8GBIT;
@@ -248,6 +250,7 @@
 		fc_host_speed(shost) = FC_PORTSPEED_UNKNOWN;
 		break;
 	}
+	spin_unlock_irqrestore(shost->host_lock, flags);
 }
 
 /**
@@ -285,7 +288,7 @@
 	init_completion(&fcomp.comp);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
 	memset(hstats, 0, sizeof(struct fc_host_statistics));
-	rc = bfa_pport_get_stats(&bfad->bfa,
+	rc =  bfa_port_get_stats(BFA_FCPORT(&bfad->bfa),
 				     (union bfa_pport_stats_u *) hstats,
 				     bfad_hcb_comp, &fcomp);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
@@ -312,7 +315,8 @@
 
 	init_completion(&fcomp.comp);
 	spin_lock_irqsave(&bfad->bfad_lock, flags);
-	rc = bfa_pport_clear_stats(&bfad->bfa, bfad_hcb_comp, &fcomp);
+	rc = bfa_port_clear_stats(BFA_FCPORT(&bfad->bfa), bfad_hcb_comp,
+		&fcomp);
 	spin_unlock_irqrestore(&bfad->bfad_lock, flags);
 
 	if (rc != BFA_STATUS_OK)
@@ -421,12 +425,10 @@
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 	struct bfad_s         *bfad = im_port->bfad;
-	struct bfa_ioc_attr_s  ioc_attr;
+	char serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
 
-	memset(&ioc_attr, 0, sizeof(ioc_attr));
-	bfa_get_attr(&bfad->bfa, &ioc_attr);
-	return snprintf(buf, PAGE_SIZE, "%s\n",
-			ioc_attr.adapter_attr.serial_num);
+	bfa_get_adapter_serial_num(&bfad->bfa, serial_num);
+	return snprintf(buf, PAGE_SIZE, "%s\n", serial_num);
 }
 
 static ssize_t
@@ -437,11 +439,10 @@
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 	struct bfad_s         *bfad = im_port->bfad;
-	struct bfa_ioc_attr_s  ioc_attr;
+	char model[BFA_ADAPTER_MODEL_NAME_LEN];
 
-	memset(&ioc_attr, 0, sizeof(ioc_attr));
-	bfa_get_attr(&bfad->bfa, &ioc_attr);
-	return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.model);
+	bfa_get_adapter_model(&bfad->bfa, model);
+	return snprintf(buf, PAGE_SIZE, "%s\n", model);
 }
 
 static ssize_t
@@ -452,12 +453,10 @@
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 	struct bfad_s         *bfad = im_port->bfad;
-	struct bfa_ioc_attr_s  ioc_attr;
+	char model_descr[BFA_ADAPTER_MODEL_DESCR_LEN];
 
-	memset(&ioc_attr, 0, sizeof(ioc_attr));
-	bfa_get_attr(&bfad->bfa, &ioc_attr);
-	return snprintf(buf, PAGE_SIZE, "%s\n",
-			ioc_attr.adapter_attr.model_descr);
+	bfa_get_adapter_model(&bfad->bfa, model_descr);
+	return snprintf(buf, PAGE_SIZE, "%s\n", model_descr);
 }
 
 static ssize_t
@@ -482,14 +481,13 @@
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 	struct bfad_s         *bfad = im_port->bfad;
-	struct bfa_ioc_attr_s  ioc_attr;
+	char model[BFA_ADAPTER_MODEL_NAME_LEN];
+	char fw_ver[BFA_VERSION_LEN];
 
-	memset(&ioc_attr, 0, sizeof(ioc_attr));
-	bfa_get_attr(&bfad->bfa, &ioc_attr);
-
+	bfa_get_adapter_model(&bfad->bfa, model);
+	bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
 	return snprintf(buf, PAGE_SIZE, "Brocade %s FV%s DV%s\n",
-			ioc_attr.adapter_attr.model,
-			ioc_attr.adapter_attr.fw_ver, BFAD_DRIVER_VERSION);
+		model, fw_ver, BFAD_DRIVER_VERSION);
 }
 
 static ssize_t
@@ -500,11 +498,10 @@
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 	struct bfad_s         *bfad = im_port->bfad;
-	struct bfa_ioc_attr_s  ioc_attr;
+	char hw_ver[BFA_VERSION_LEN];
 
-	memset(&ioc_attr, 0, sizeof(ioc_attr));
-	bfa_get_attr(&bfad->bfa, &ioc_attr);
-	return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.hw_ver);
+	bfa_get_pci_chip_rev(&bfad->bfa, hw_ver);
+	return snprintf(buf, PAGE_SIZE, "%s\n", hw_ver);
 }
 
 static ssize_t
@@ -522,12 +519,10 @@
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 	struct bfad_s         *bfad = im_port->bfad;
-	struct bfa_ioc_attr_s  ioc_attr;
+	char optrom_ver[BFA_VERSION_LEN];
 
-	memset(&ioc_attr, 0, sizeof(ioc_attr));
-	bfa_get_attr(&bfad->bfa, &ioc_attr);
-	return snprintf(buf, PAGE_SIZE, "%s\n",
-			ioc_attr.adapter_attr.optrom_ver);
+	bfa_get_adapter_optrom_ver(&bfad->bfa, optrom_ver);
+	return snprintf(buf, PAGE_SIZE, "%s\n", optrom_ver);
 }
 
 static ssize_t
@@ -538,11 +533,10 @@
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 	struct bfad_s         *bfad = im_port->bfad;
-	struct bfa_ioc_attr_s  ioc_attr;
+	char fw_ver[BFA_VERSION_LEN];
 
-	memset(&ioc_attr, 0, sizeof(ioc_attr));
-	bfa_get_attr(&bfad->bfa, &ioc_attr);
-	return snprintf(buf, PAGE_SIZE, "%s\n", ioc_attr.adapter_attr.fw_ver);
+	bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
+	return snprintf(buf, PAGE_SIZE, "%s\n", fw_ver);
 }
 
 static ssize_t
@@ -553,11 +547,9 @@
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
 	struct bfad_s         *bfad = im_port->bfad;
-	struct bfa_ioc_attr_s  ioc_attr;
 
-	memset(&ioc_attr, 0, sizeof(ioc_attr));
-	bfa_get_attr(&bfad->bfa, &ioc_attr);
-	return snprintf(buf, PAGE_SIZE, "%d\n", ioc_attr.adapter_attr.nports);
+	return snprintf(buf, PAGE_SIZE, "%d\n",
+		bfa_get_nports(&bfad->bfa));
 }
 
 static ssize_t
diff --git a/drivers/scsi/bfa/bfad_attr.h b/drivers/scsi/bfa/bfad_attr.h
index 4d3312d..bf01020 100644
--- a/drivers/scsi/bfa/bfad_attr.h
+++ b/drivers/scsi/bfa/bfad_attr.h
@@ -17,9 +17,6 @@
 
 #ifndef __BFAD_ATTR_H__
 #define __BFAD_ATTR_H__
-/**
- *  bfad_attr.h VMware driver configuration interface module.
- */
 
 /**
  *  FC_transport_template FC transport template
@@ -52,12 +49,6 @@
 void
 bfad_im_get_host_port_id(struct Scsi_Host *shost);
 
-/**
- * FC transport template entry, issue a LIP.
- */
-int
-bfad_im_issue_fc_host_lip(struct Scsi_Host *shost);
-
 struct Scsi_Host*
 bfad_os_starget_to_shost(struct scsi_target *starget);
 
diff --git a/drivers/scsi/bfa/bfad_drv.h b/drivers/scsi/bfa/bfad_drv.h
index 172c81e..107848c 100644
--- a/drivers/scsi/bfa/bfad_drv.h
+++ b/drivers/scsi/bfa/bfad_drv.h
@@ -46,7 +46,7 @@
 #ifdef BFA_DRIVER_VERSION
 #define BFAD_DRIVER_VERSION    BFA_DRIVER_VERSION
 #else
-#define BFAD_DRIVER_VERSION    "2.0.0.0"
+#define BFAD_DRIVER_VERSION    "2.1.2.1"
 #endif
 
 
@@ -62,7 +62,9 @@
 #define BFAD_HAL_START_DONE			0x00000010
 #define BFAD_PORT_ONLINE			0x00000020
 #define BFAD_RPORT_ONLINE			0x00000040
-
+#define BFAD_FCS_INIT_DONE                      0x00000080
+#define BFAD_HAL_INIT_FAIL                      0x00000100
+#define BFAD_FC4_PROBE_DONE                     0x00000200
 #define BFAD_PORT_DELETE			0x00000001
 
 /*
@@ -137,12 +139,16 @@
 	u32        binding_method;
 };
 
-#define BFAD_AEN_MAX_APPS 8
-struct bfad_aen_file_s {
-	struct list_head  qe;
-	struct bfad_s *bfad;
-	s32 ri;
-	s32 app_id;
+union bfad_tmp_buf {
+	/* From struct bfa_adapter_attr_s */
+	char            manufacturer[BFA_ADAPTER_MFG_NAME_LEN];
+	char            serial_num[BFA_ADAPTER_SERIAL_NUM_LEN];
+	char            model[BFA_ADAPTER_MODEL_NAME_LEN];
+	char            fw_ver[BFA_VERSION_LEN];
+	char            optrom_ver[BFA_VERSION_LEN];
+
+	/* From struct bfa_ioc_pci_attr_s */
+	u8         chip_rev[BFA_IOC_CHIP_REV_LEN];  /*  chip revision */
 };
 
 /*
@@ -168,6 +174,7 @@
 	u32        inst_no;	/* BFAD instance number */
 	u32        bfad_flags;
 	spinlock_t      bfad_lock;
+	struct task_struct *bfad_tsk;
 	struct bfad_cfg_param_s cfg_data;
 	struct bfad_msix_s msix_tab[MAX_MSIX_ENTRY];
 	int             nvec;
@@ -183,18 +190,12 @@
 	struct bfa_log_mod_s  *logmod;
 	struct bfa_aen_s      *aen;
 	struct bfa_aen_s       aen_buf;
-	struct bfad_aen_file_s file_buf[BFAD_AEN_MAX_APPS];
-	struct list_head         file_q;
-	struct list_head         file_free_q;
+	void		*file_map[BFA_AEN_MAX_APP];
 	struct bfa_plog_s      plog_buf;
 	int             ref_count;
 	bfa_boolean_t	ipfc_enabled;
+	union bfad_tmp_buf tmp_buf;
 	struct fc_host_statistics link_stats;
-
-	struct kobject *bfa_kobj;
-	struct kobject *ioc_kobj;
-	struct kobject *pport_kobj;
-	struct kobject *lport_kobj;
 };
 
 /*
@@ -258,6 +259,7 @@
 			       struct bfa_port_cfg_s *port_cfg);
 bfa_status_t    bfad_cfg_pport(struct bfad_s *bfad, enum bfa_port_role role);
 bfa_status_t    bfad_drv_init(struct bfad_s *bfad);
+bfa_status_t	bfad_start_ops(struct bfad_s *bfad);
 void            bfad_drv_start(struct bfad_s *bfad);
 void            bfad_uncfg_pport(struct bfad_s *bfad);
 void            bfad_drv_stop(struct bfad_s *bfad);
@@ -279,6 +281,7 @@
 void		bfad_drv_log_level_set(struct bfad_s *bfad);
 bfa_status_t	bfad_fc4_module_init(void);
 void		bfad_fc4_module_exit(void);
+int		bfad_worker (void *ptr);
 
 void bfad_pci_remove(struct pci_dev *pdev);
 int bfad_pci_probe(struct pci_dev *pdev, const struct pci_device_id *pid);
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index f788c2a..f9fc67a 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -43,11 +43,11 @@
 	struct bfad_s         *bfad = drv;
 	struct bfad_itnim_data_s *itnim_data;
 	struct bfad_itnim_s *itnim;
+	u8         host_status = DID_OK;
 
 	switch (io_status) {
 	case BFI_IOIM_STS_OK:
 		bfa_trc(bfad, scsi_status);
-		cmnd->result = ScsiResult(DID_OK, scsi_status);
 		scsi_set_resid(cmnd, 0);
 
 		if (sns_len > 0) {
@@ -56,8 +56,18 @@
 				sns_len = SCSI_SENSE_BUFFERSIZE;
 			memcpy(cmnd->sense_buffer, sns_info, sns_len);
 		}
-		if (residue > 0)
+		if (residue > 0) {
+			bfa_trc(bfad, residue);
 			scsi_set_resid(cmnd, residue);
+			if (!sns_len && (scsi_status == SAM_STAT_GOOD) &&
+				(scsi_bufflen(cmnd) - residue) <
+					cmnd->underflow) {
+				bfa_trc(bfad, 0);
+				host_status = DID_ERROR;
+			}
+		}
+		cmnd->result = ScsiResult(host_status, scsi_status);
+
 		break;
 
 	case BFI_IOIM_STS_ABORTED:
@@ -167,17 +177,15 @@
 	static char     bfa_buf[256];
 	struct bfad_im_port_s *im_port =
 			(struct bfad_im_port_s *) shost->hostdata[0];
-	struct bfa_ioc_attr_s  ioc_attr;
 	struct bfad_s         *bfad = im_port->bfad;
+	char model[BFA_ADAPTER_MODEL_NAME_LEN];
 
-	memset(&ioc_attr, 0, sizeof(ioc_attr));
-	bfa_get_attr(&bfad->bfa, &ioc_attr);
+	bfa_get_adapter_model(&bfad->bfa, model);
 
 	memset(bfa_buf, 0, sizeof(bfa_buf));
 	snprintf(bfa_buf, sizeof(bfa_buf),
-		 "Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s",
-		 ioc_attr.adapter_attr.model, bfad->pci_name,
-		 BFAD_DRIVER_VERSION);
+		"Brocade FC/FCOE Adapter, " "model: %s hwpath: %s driver: %s",
+		model, bfad->pci_name, BFAD_DRIVER_VERSION);
 	return bfa_buf;
 }
 
@@ -501,16 +509,6 @@
 }
 
 /**
- * Path TOV processing begin notification -- dummy for linux
- */
-void
-bfa_fcb_itnim_tov_begin(struct bfad_itnim_s *itnim)
-{
-}
-
-
-
-/**
  * Allocate a Scsi_Host for a port.
  */
 int
@@ -931,10 +929,9 @@
 	struct Scsi_Host *host = im_port->shost;
 	struct bfad_s         *bfad = im_port->bfad;
 	struct bfad_port_s    *port = im_port->port;
-	union attr {
-		struct bfa_pport_attr_s pattr;
-		struct bfa_ioc_attr_s  ioc_attr;
-	} attr;
+	struct bfa_pport_attr_s pattr;
+	char model[BFA_ADAPTER_MODEL_NAME_LEN];
+	char fw_ver[BFA_VERSION_LEN];
 
 	fc_host_node_name(host) =
 		bfa_os_htonll((bfa_fcs_port_get_nwwn(port->fcs_port)));
@@ -954,20 +951,18 @@
 	/* For fibre channel services type 0x20 */
 	fc_host_supported_fc4s(host)[7] = 1;
 
-	memset(&attr.ioc_attr, 0, sizeof(attr.ioc_attr));
-	bfa_get_attr(&bfad->bfa, &attr.ioc_attr);
+	bfa_get_adapter_model(&bfad->bfa, model);
+	bfa_get_adapter_fw_ver(&bfad->bfa, fw_ver);
 	sprintf(fc_host_symbolic_name(host), "Brocade %s FV%s DV%s",
-		attr.ioc_attr.adapter_attr.model,
-		attr.ioc_attr.adapter_attr.fw_ver, BFAD_DRIVER_VERSION);
+		model, fw_ver, BFAD_DRIVER_VERSION);
 
 	fc_host_supported_speeds(host) = 0;
 	fc_host_supported_speeds(host) |=
 		FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | FC_PORTSPEED_2GBIT |
 		FC_PORTSPEED_1GBIT;
 
-	memset(&attr.pattr, 0, sizeof(attr.pattr));
-	bfa_pport_get_attr(&bfad->bfa, &attr.pattr);
-	fc_host_maxframe_size(host) = attr.pattr.pport_cfg.maxfrsize;
+	bfa_fcport_get_attr(&bfad->bfa, &pattr);
+	fc_host_maxframe_size(host) = pattr.pport_cfg.maxfrsize;
 }
 
 static void
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 189a5b2..85ab2da 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -23,7 +23,6 @@
 
 #define FCPI_NAME " fcpim"
 
-void bfad_flags_set(struct bfad_s *bfad, u32 flags);
 bfa_status_t bfad_im_module_init(void);
 void bfad_im_module_exit(void);
 bfa_status_t bfad_im_probe(struct bfad_s *bfad);
@@ -126,7 +125,6 @@
 void bfad_os_destroy_workq(struct bfad_im_s *im);
 void bfad_os_itnim_process(struct bfad_itnim_s *itnim_drv);
 void bfad_os_fc_host_init(struct bfad_im_port_s *im_port);
-void bfad_os_init_work(struct bfad_im_port_s *im_port);
 void bfad_os_scsi_host_free(struct bfad_s *bfad,
 				 struct bfad_im_port_s *im_port);
 void bfad_os_ramp_up_qdepth(struct bfad_itnim_s *itnim,
@@ -136,9 +134,6 @@
 int bfad_os_scsi_add_host(struct Scsi_Host *shost,
 		struct bfad_im_port_s *im_port, struct bfad_s *bfad);
 
-/*
- * scsi_host_template entries
- */
 void bfad_im_itnim_unmap(struct bfad_im_port_s  *im_port,
 			 struct bfad_itnim_s *itnim);
 
diff --git a/drivers/scsi/bfa/bfad_intr.c b/drivers/scsi/bfa/bfad_intr.c
index 7de8832..2b7dbec 100644
--- a/drivers/scsi/bfa/bfad_intr.c
+++ b/drivers/scsi/bfa/bfad_intr.c
@@ -23,8 +23,10 @@
 /**
  *  bfa_isr BFA driver interrupt functions
  */
-static int msix_disable;
-module_param(msix_disable, int, S_IRUGO | S_IWUSR);
+static int msix_disable_cb;
+static int msix_disable_ct;
+module_param(msix_disable_cb, int, S_IRUGO | S_IWUSR);
+module_param(msix_disable_ct, int, S_IRUGO | S_IWUSR);
 /**
  * Line based interrupt handler.
  */
@@ -141,6 +143,7 @@
 	int error = 0;
 	u32 mask = 0, i, num_bit = 0, max_bit = 0;
 	struct msix_entry msix_entries[MAX_MSIX_ENTRY];
+	struct pci_dev *pdev = bfad->pcidev;
 
 	/* Call BFA to get the msix map for this PCI function.  */
 	bfa_msix_getvecs(&bfad->bfa, &mask, &num_bit, &max_bit);
@@ -148,7 +151,9 @@
 	/* Set up the msix entry table */
 	bfad_init_msix_entry(bfad, msix_entries, mask, max_bit);
 
-	if (!msix_disable) {
+	if ((pdev->device == BFA_PCI_DEVICE_ID_CT && !msix_disable_ct) ||
+	    (pdev->device != BFA_PCI_DEVICE_ID_CT && !msix_disable_cb)) {
+
 		error = pci_enable_msix(bfad->pcidev, msix_entries, bfad->nvec);
 		if (error) {
 			/*
diff --git a/drivers/scsi/bfa/fabric.c b/drivers/scsi/bfa/fabric.c
index a4b5dd4..8166e97 100644
--- a/drivers/scsi/bfa/fabric.c
+++ b/drivers/scsi/bfa/fabric.c
@@ -37,7 +37,7 @@
 #define BFA_FCS_FABRIC_CLEANUP_DELAY	(10000)	/* Milliseconds */
 
 #define bfa_fcs_fabric_set_opertype(__fabric) do {             \
-	if (bfa_pport_get_topology((__fabric)->fcs->bfa)       \
+	if (bfa_fcport_get_topology((__fabric)->fcs->bfa)       \
 				== BFA_PPORT_TOPOLOGY_P2P)     \
 		(__fabric)->oper_type = BFA_PPORT_TYPE_NPORT;  \
 	else                                                   \
@@ -136,8 +136,7 @@
 	case BFA_FCS_FABRIC_SM_CREATE:
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_created);
 		bfa_fcs_fabric_init(fabric);
-		bfa_fcs_lport_init(&fabric->bport, fabric->fcs, FC_VF_ID_NULL,
-				   &fabric->bport.port_cfg, NULL);
+		bfa_fcs_lport_init(&fabric->bport, &fabric->bport.port_cfg);
 		break;
 
 	case BFA_FCS_FABRIC_SM_LINK_UP:
@@ -161,7 +160,7 @@
 
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_START:
-		if (bfa_pport_is_linkup(fabric->fcs->bfa)) {
+		if (bfa_fcport_is_linkup(fabric->fcs->bfa)) {
 			bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_flogi);
 			bfa_fcs_fabric_login(fabric);
 		} else
@@ -225,7 +224,7 @@
 	switch (event) {
 	case BFA_FCS_FABRIC_SM_CONT_OP:
 
-		bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
+		bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
 		fabric->fab_type = BFA_FCS_FABRIC_SWITCHED;
 
 		if (fabric->auth_reqd && fabric->is_auth) {
@@ -252,7 +251,7 @@
 
 	case BFA_FCS_FABRIC_SM_NO_FABRIC:
 		fabric->fab_type = BFA_FCS_FABRIC_N2N;
-		bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
+		bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
 		bfa_fcs_fabric_notify_online(fabric);
 		bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_nofabric);
 		break;
@@ -419,7 +418,7 @@
 
 	case BFA_FCS_FABRIC_SM_NO_FABRIC:
 		bfa_trc(fabric->fcs, fabric->bb_credit);
-		bfa_pport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
+		bfa_fcport_set_tx_bbcredit(fabric->fcs->bfa, fabric->bb_credit);
 		break;
 
 	default:
@@ -563,17 +562,15 @@
 bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric)
 {
 	struct bfa_port_cfg_s *port_cfg = &fabric->bport.port_cfg;
-	struct bfa_adapter_attr_s adapter_attr;
+	char            model[BFA_ADAPTER_MODEL_NAME_LEN] = {0};
 	struct bfa_fcs_driver_info_s *driver_info = &fabric->fcs->driver_info;
 
-	bfa_os_memset((void *)&adapter_attr, 0,
-		      sizeof(struct bfa_adapter_attr_s));
-	bfa_ioc_get_adapter_attr(&fabric->fcs->bfa->ioc, &adapter_attr);
+	bfa_ioc_get_adapter_model(&fabric->fcs->bfa->ioc, model);
 
 	/*
 	 * Model name/number
 	 */
-	strncpy((char *)&port_cfg->sym_name, adapter_attr.model,
+	strncpy((char *)&port_cfg->sym_name, model,
 		BFA_FCS_PORT_SYMBNAME_MODEL_SZ);
 	strncat((char *)&port_cfg->sym_name, BFA_FCS_PORT_SYMBNAME_SEPARATOR,
 		sizeof(BFA_FCS_PORT_SYMBNAME_SEPARATOR));
@@ -719,10 +716,10 @@
 	struct bfa_port_cfg_s *pcfg = &fabric->bport.port_cfg;
 	u8         alpa = 0;
 
-	if (bfa_pport_get_topology(bfa) == BFA_PPORT_TOPOLOGY_LOOP)
-		alpa = bfa_pport_get_myalpa(bfa);
+	if (bfa_fcport_get_topology(bfa) == BFA_PPORT_TOPOLOGY_LOOP)
+		alpa = bfa_fcport_get_myalpa(bfa);
 
-	bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_pport_get_maxfrsize(bfa),
+	bfa_lps_flogi(fabric->lps, fabric, alpa, bfa_fcport_get_maxfrsize(bfa),
 		      pcfg->pwwn, pcfg->nwwn, fabric->auth_reqd);
 
 	fabric->stats.flogi_sent++;
@@ -814,10 +811,10 @@
  */
 
 /**
- *   Module initialization
+ *   Attach time initialization
  */
 void
-bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
+bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs)
 {
 	struct bfa_fcs_fabric_s *fabric;
 
@@ -841,7 +838,13 @@
 	bfa_wc_up(&fabric->wc);	/* For the base port */
 
 	bfa_sm_set_state(fabric, bfa_fcs_fabric_sm_uninit);
-	bfa_sm_send_event(fabric, BFA_FCS_FABRIC_SM_CREATE);
+	bfa_fcs_lport_attach(&fabric->bport, fabric->fcs, FC_VF_ID_NULL, NULL);
+}
+
+void
+bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs)
+{
+	bfa_sm_send_event(&fcs->fabric, BFA_FCS_FABRIC_SM_CREATE);
 	bfa_trc(fcs, 0);
 }
 
@@ -890,6 +893,12 @@
 	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_loopback);
 }
 
+bfa_boolean_t
+bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric)
+{
+	return bfa_sm_cmp_state(fabric, bfa_fcs_fabric_sm_auth_failed);
+}
+
 enum bfa_pport_type
 bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric)
 {
@@ -1165,8 +1174,8 @@
 	reqlen = fc_flogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp),
 				    bfa_os_hton3b(FC_FABRIC_PORT),
 				    n2n_port->reply_oxid, pcfg->pwwn,
-				    pcfg->nwwn, bfa_pport_get_maxfrsize(bfa),
-				    bfa_pport_get_rx_bbcredit(bfa));
+				    pcfg->nwwn, bfa_fcport_get_maxfrsize(bfa),
+				    bfa_fcport_get_rx_bbcredit(bfa));
 
 	bfa_fcxp_send(fcxp, NULL, fabric->vf_id, bfa_lps_get_tag(fabric->lps),
 			BFA_FALSE, FC_CLASS_3, reqlen, &fchs,
@@ -1224,14 +1233,8 @@
 	wwn2str(pwwn_ptr, pwwn);
 	wwn2str(fwwn_ptr, fwwn);
 
-	switch (event) {
-	case BFA_PORT_AEN_FABRIC_NAME_CHANGE:
-		bfa_log(logmod, BFA_AEN_PORT_FABRIC_NAME_CHANGE, pwwn_ptr,
-			fwwn_ptr);
-		break;
-	default:
-		break;
-	}
+	bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_PORT, event),
+		pwwn_ptr, fwwn_ptr);
 
 	aen_data.port.pwwn = pwwn;
 	aen_data.port.fwwn = fwwn;
diff --git a/drivers/scsi/bfa/fcbuild.h b/drivers/scsi/bfa/fcbuild.h
index 8fa7f27..981d98d 100644
--- a/drivers/scsi/bfa/fcbuild.h
+++ b/drivers/scsi/bfa/fcbuild.h
@@ -72,6 +72,9 @@
 	case RPSC_OP_SPEED_8G:
 		return BFA_PPORT_SPEED_8GBPS;
 
+	case RPSC_OP_SPEED_10G:
+		return BFA_PPORT_SPEED_10GBPS;
+
 	default:
 		return BFA_PPORT_SPEED_UNKNOWN;
 	}
@@ -97,6 +100,9 @@
 	case BFA_PPORT_SPEED_8GBPS:
 		return RPSC_OP_SPEED_8G;
 
+	case BFA_PPORT_SPEED_10GBPS:
+		return RPSC_OP_SPEED_10G;
+
 	default:
 		return RPSC_OP_SPEED_NOT_EST;
 	}
diff --git a/drivers/scsi/bfa/fcpim.c b/drivers/scsi/bfa/fcpim.c
index 1f3c06e..8ae4a2cf 100644
--- a/drivers/scsi/bfa/fcpim.c
+++ b/drivers/scsi/bfa/fcpim.c
@@ -126,7 +126,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->fcs, event);
 	}
 
 }
@@ -161,7 +161,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->fcs, event);
 	}
 }
 
@@ -205,7 +205,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->fcs, event);
 	}
 }
 
@@ -240,7 +240,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->fcs, event);
 	}
 }
 
@@ -270,7 +270,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->fcs, event);
 	}
 }
 
@@ -298,7 +298,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->fcs, event);
 	}
 }
 
@@ -321,7 +321,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->fcs, event);
 	}
 }
 
@@ -354,7 +354,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(itnim->fcs, event);
 	}
 }
 
@@ -385,19 +385,8 @@
 	wwn2str(lpwwn_ptr, lpwwn);
 	wwn2str(rpwwn_ptr, rpwwn);
 
-	switch (event) {
-	case BFA_ITNIM_AEN_ONLINE:
-		bfa_log(logmod, BFA_AEN_ITNIM_ONLINE, rpwwn_ptr, lpwwn_ptr);
-		break;
-	case BFA_ITNIM_AEN_OFFLINE:
-		bfa_log(logmod, BFA_AEN_ITNIM_OFFLINE, rpwwn_ptr, lpwwn_ptr);
-		break;
-	case BFA_ITNIM_AEN_DISCONNECT:
-		bfa_log(logmod, BFA_AEN_ITNIM_DISCONNECT, rpwwn_ptr, lpwwn_ptr);
-		break;
-	default:
-		break;
-	}
+	bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_ITNIM, event),
+		rpwwn_ptr, lpwwn_ptr);
 
 	aen_data.itnim.vf_id = rport->port->fabric->vf_id;
 	aen_data.itnim.ppwwn =
@@ -689,7 +678,6 @@
 	struct bfa_fcs_itnim_s *itnim = (struct bfa_fcs_itnim_s *)cb_arg;
 
 	bfa_trc(itnim->fcs, itnim->rport->pwwn);
-	bfa_fcb_itnim_tov_begin(itnim->itnim_drv);
 }
 
 /**
@@ -822,22 +810,3 @@
 bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim)
 {
 }
-
-/**
- *   Module initialization
- */
-void
-bfa_fcs_fcpim_modinit(struct bfa_fcs_s *fcs)
-{
-}
-
-/**
- *   Module cleanup
- */
-void
-bfa_fcs_fcpim_modexit(struct bfa_fcs_s *fcs)
-{
-	bfa_fcs_modexit_comp(fcs);
-}
-
-
diff --git a/drivers/scsi/bfa/fcs_fabric.h b/drivers/scsi/bfa/fcs_fabric.h
index eee9608..244c3f0 100644
--- a/drivers/scsi/bfa/fcs_fabric.h
+++ b/drivers/scsi/bfa/fcs_fabric.h
@@ -29,6 +29,7 @@
 /*
 * fcs friend functions: only between fcs modules
  */
+void		bfa_fcs_fabric_attach(struct bfa_fcs_s *fcs);
 void            bfa_fcs_fabric_modinit(struct bfa_fcs_s *fcs);
 void            bfa_fcs_fabric_modexit(struct bfa_fcs_s *fcs);
 void            bfa_fcs_fabric_modsusp(struct bfa_fcs_s *fcs);
@@ -46,6 +47,7 @@
 			struct fchs_s *fchs, u16 len);
 u16        bfa_fcs_fabric_vport_count(struct bfa_fcs_fabric_s *fabric);
 bfa_boolean_t   bfa_fcs_fabric_is_loopback(struct bfa_fcs_fabric_s *fabric);
+bfa_boolean_t	bfa_fcs_fabric_is_auth_failed(struct bfa_fcs_fabric_s *fabric);
 enum bfa_pport_type bfa_fcs_fabric_port_type(struct bfa_fcs_fabric_s *fabric);
 void     	bfa_fcs_fabric_psymb_init(struct bfa_fcs_fabric_s *fabric);
 void bfa_fcs_fabric_port_delete_comp(struct bfa_fcs_fabric_s *fabric);
diff --git a/drivers/scsi/bfa/fcs_fcpim.h b/drivers/scsi/bfa/fcs_fcpim.h
index 61e9e26..11e6e7b 100644
--- a/drivers/scsi/bfa/fcs_fcpim.h
+++ b/drivers/scsi/bfa/fcs_fcpim.h
@@ -34,11 +34,6 @@
 void bfa_fcs_itnim_pause(struct bfa_fcs_itnim_s *itnim);
 void bfa_fcs_itnim_resume(struct bfa_fcs_itnim_s *itnim);
 
-/*
- * Modudle init/cleanup routines.
- */
-void bfa_fcs_fcpim_modinit(struct bfa_fcs_s *fcs);
-void bfa_fcs_fcpim_modexit(struct bfa_fcs_s *fcs);
 void bfa_fcs_fcpim_uf_recv(struct bfa_fcs_itnim_s *itnim, struct fchs_s *fchs,
 			u16 len);
 #endif /* __FCS_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/fcs_lport.h b/drivers/scsi/bfa/fcs_lport.h
index ae744ba..a6508c8 100644
--- a/drivers/scsi/bfa/fcs_lport.h
+++ b/drivers/scsi/bfa/fcs_lport.h
@@ -84,9 +84,10 @@
  * Following routines will be called by Fabric to indicate port
  * online/offline to vport.
  */
-void bfa_fcs_lport_init(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
-			u16 vf_id, struct bfa_port_cfg_s *port_cfg,
-			struct bfa_fcs_vport_s *vport);
+void bfa_fcs_lport_attach(struct bfa_fcs_port_s *lport, struct bfa_fcs_s *fcs,
+			uint16_t vf_id, struct bfa_fcs_vport_s *vport);
+void bfa_fcs_lport_init(struct bfa_fcs_port_s *lport,
+			struct bfa_port_cfg_s *port_cfg);
 void bfa_fcs_port_online(struct bfa_fcs_port_s *port);
 void bfa_fcs_port_offline(struct bfa_fcs_port_s *port);
 void bfa_fcs_port_delete(struct bfa_fcs_port_s *port);
diff --git a/drivers/scsi/bfa/fcs_port.h b/drivers/scsi/bfa/fcs_port.h
index abb6519..408c06a 100644
--- a/drivers/scsi/bfa/fcs_port.h
+++ b/drivers/scsi/bfa/fcs_port.h
@@ -26,7 +26,6 @@
 /*
  * fcs friend functions: only between fcs modules
  */
-void bfa_fcs_pport_modinit(struct bfa_fcs_s *fcs);
-void bfa_fcs_pport_modexit(struct bfa_fcs_s *fcs);
+void bfa_fcs_pport_attach(struct bfa_fcs_s *fcs);
 
 #endif /* __FCS_PPORT_H__ */
diff --git a/drivers/scsi/bfa/fcs_rport.h b/drivers/scsi/bfa/fcs_rport.h
index f601e9d..9c8d1d2 100644
--- a/drivers/scsi/bfa/fcs_rport.h
+++ b/drivers/scsi/bfa/fcs_rport.h
@@ -24,9 +24,6 @@
 
 #include <fcs/bfa_fcs_rport.h>
 
-void bfa_fcs_rport_modinit(struct bfa_fcs_s *fcs);
-void bfa_fcs_rport_modexit(struct bfa_fcs_s *fcs);
-
 void bfa_fcs_rport_uf_recv(struct bfa_fcs_rport_s *rport, struct fchs_s *fchs,
 			u16 len);
 void bfa_fcs_rport_scn(struct bfa_fcs_rport_s *rport);
diff --git a/drivers/scsi/bfa/fcs_uf.h b/drivers/scsi/bfa/fcs_uf.h
index 96f1bdc..f591072 100644
--- a/drivers/scsi/bfa/fcs_uf.h
+++ b/drivers/scsi/bfa/fcs_uf.h
@@ -26,7 +26,6 @@
 /*
  * fcs friend functions: only between fcs modules
  */
-void bfa_fcs_uf_modinit(struct bfa_fcs_s *fcs);
-void bfa_fcs_uf_modexit(struct bfa_fcs_s *fcs);
+void bfa_fcs_uf_attach(struct bfa_fcs_s *fcs);
 
 #endif /* __FCS_UF_H__ */
diff --git a/drivers/scsi/bfa/fcs_vport.h b/drivers/scsi/bfa/fcs_vport.h
index 9e80b6a..13c32ebf 100644
--- a/drivers/scsi/bfa/fcs_vport.h
+++ b/drivers/scsi/bfa/fcs_vport.h
@@ -22,18 +22,10 @@
 #include <fcs/bfa_fcs_vport.h>
 #include <defs/bfa_defs_pci.h>
 
-/*
- * Modudle init/cleanup routines.
- */
-
-void bfa_fcs_vport_modinit(struct bfa_fcs_s *fcs);
-void bfa_fcs_vport_modexit(struct bfa_fcs_s *fcs);
-
 void bfa_fcs_vport_cleanup(struct bfa_fcs_vport_s *vport);
 void bfa_fcs_vport_online(struct bfa_fcs_vport_s *vport);
 void bfa_fcs_vport_offline(struct bfa_fcs_vport_s *vport);
 void bfa_fcs_vport_delete_comp(struct bfa_fcs_vport_s *vport);
-u32 bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs);
 
 #endif /* __FCS_VPORT_H__ */
 
diff --git a/drivers/scsi/bfa/fdmi.c b/drivers/scsi/bfa/fdmi.c
index df2a1e5..8f17076 100644
--- a/drivers/scsi/bfa/fdmi.c
+++ b/drivers/scsi/bfa/fdmi.c
@@ -116,6 +116,9 @@
 			enum port_fdmi_event event);
 static void     bfa_fcs_port_fdmi_sm_online(struct bfa_fcs_port_fdmi_s *fdmi,
 			enum port_fdmi_event event);
+static void	bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi,
+			enum port_fdmi_event event);
+
 /**
  * 		Start in offline state - awaiting MS to send start.
  */
@@ -155,7 +158,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(port->fcs, event);
 	}
 }
 
@@ -180,7 +183,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(port->fcs, event);
 	}
 }
 
@@ -227,7 +230,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(port->fcs, event);
 	}
 }
 
@@ -255,7 +258,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(port->fcs, event);
 	}
 }
 
@@ -283,7 +286,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(port->fcs, event);
 	}
 }
 
@@ -328,7 +331,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(port->fcs, event);
 	}
 }
 
@@ -356,7 +359,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(port->fcs, event);
 	}
 }
 
@@ -384,7 +387,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(port->fcs, event);
 	}
 }
 
@@ -428,7 +431,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(port->fcs, event);
 	}
 }
 
@@ -456,7 +459,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(port->fcs, event);
 	}
 }
 
@@ -475,10 +478,24 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(port->fcs, event);
 	}
 }
 
+/**
+ *  FDMI is disabled state.
+ */
+static void
+bfa_fcs_port_fdmi_sm_disabled(struct bfa_fcs_port_fdmi_s *fdmi,
+				enum port_fdmi_event event)
+{
+	struct bfa_fcs_port_s *port = fdmi->ms->port;
+
+	bfa_trc(port->fcs, port->port_cfg.pwwn);
+	bfa_trc(port->fcs, event);
+
+	/* No op State. It can only be enabled at Driver Init. */
+}
 
 /**
 *   RHBA : Register HBA Attributes.
@@ -1097,36 +1114,23 @@
 {
 	struct bfa_fcs_port_s *port = fdmi->ms->port;
 	struct bfa_fcs_driver_info_s *driver_info = &port->fcs->driver_info;
-	struct bfa_adapter_attr_s adapter_attr;
 
 	bfa_os_memset(hba_attr, 0, sizeof(struct bfa_fcs_fdmi_hba_attr_s));
-	bfa_os_memset(&adapter_attr, 0, sizeof(struct bfa_adapter_attr_s));
 
-	bfa_ioc_get_adapter_attr(&port->fcs->bfa->ioc, &adapter_attr);
-
-	strncpy(hba_attr->manufacturer, adapter_attr.manufacturer,
-		sizeof(adapter_attr.manufacturer));
-
-	strncpy(hba_attr->serial_num, adapter_attr.serial_num,
-		sizeof(adapter_attr.serial_num));
-
-	strncpy(hba_attr->model, adapter_attr.model, sizeof(hba_attr->model));
-
-	strncpy(hba_attr->model_desc, adapter_attr.model_descr,
-		sizeof(hba_attr->model_desc));
-
-	strncpy(hba_attr->hw_version, adapter_attr.hw_ver,
-		sizeof(hba_attr->hw_version));
+	bfa_ioc_get_adapter_manufacturer(&port->fcs->bfa->ioc,
+		hba_attr->manufacturer);
+	bfa_ioc_get_adapter_serial_num(&port->fcs->bfa->ioc,
+						hba_attr->serial_num);
+	bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model);
+	bfa_ioc_get_adapter_model(&port->fcs->bfa->ioc, hba_attr->model_desc);
+	bfa_ioc_get_pci_chip_rev(&port->fcs->bfa->ioc, hba_attr->hw_version);
+	bfa_ioc_get_adapter_optrom_ver(&port->fcs->bfa->ioc,
+		hba_attr->option_rom_ver);
+	bfa_ioc_get_adapter_fw_ver(&port->fcs->bfa->ioc, hba_attr->fw_version);
 
 	strncpy(hba_attr->driver_version, (char *)driver_info->version,
 		sizeof(hba_attr->driver_version));
 
-	strncpy(hba_attr->option_rom_ver, adapter_attr.optrom_ver,
-		sizeof(hba_attr->option_rom_ver));
-
-	strncpy(hba_attr->fw_version, adapter_attr.fw_ver,
-		sizeof(hba_attr->fw_version));
-
 	strncpy(hba_attr->os_name, driver_info->host_os_name,
 		sizeof(hba_attr->os_name));
 
@@ -1158,7 +1162,7 @@
 	/*
 	 * get pport attributes from hal
 	 */
-	bfa_pport_get_attr(port->fcs->bfa, &pport_attr);
+	bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
 
 	/*
 	 * get FC4 type Bitmask
@@ -1201,7 +1205,10 @@
 	struct bfa_fcs_port_fdmi_s *fdmi = &ms->fdmi;
 
 	fdmi->ms = ms;
-	bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
+	if (ms->port->fcs->fdmi_enabled)
+		bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_offline);
+	else
+		bfa_sm_set_state(fdmi, bfa_fcs_port_fdmi_sm_disabled);
 }
 
 void
diff --git a/drivers/scsi/bfa/include/aen/bfa_aen.h b/drivers/scsi/bfa/include/aen/bfa_aen.h
index d9cbc2a..6abbab0 100644
--- a/drivers/scsi/bfa/include/aen/bfa_aen.h
+++ b/drivers/scsi/bfa/include/aen/bfa_aen.h
@@ -18,21 +18,24 @@
 #define __BFA_AEN_H__
 
 #include "defs/bfa_defs_aen.h"
+#include "defs/bfa_defs_status.h"
+#include "cs/bfa_debug.h"
 
-#define BFA_AEN_MAX_ENTRY   512
+#define BFA_AEN_MAX_ENTRY	512
 
-extern s32 bfa_aen_max_cfg_entry;
+extern int bfa_aen_max_cfg_entry;
 struct bfa_aen_s {
 	void		*bfad;
-	s32		max_entry;
-	s32		write_index;
-	s32		read_index;
-	u32	bfad_num;
-	u32	seq_num;
+	int		max_entry;
+	int		write_index;
+	int		read_index;
+	int		bfad_num;
+	int		seq_num;
 	void		(*aen_cb_notify)(void *bfad);
 	void		(*gettimeofday)(struct bfa_timeval_s *tv);
-	struct bfa_trc_mod_s 	*trcmod;
-	struct bfa_aen_entry_s	list[BFA_AEN_MAX_ENTRY]; /* Must be the last */
+	struct bfa_trc_mod_s *trcmod;
+	int		app_ri[BFA_AEN_MAX_APP]; /* For multiclient support */
+	struct bfa_aen_entry_s list[BFA_AEN_MAX_ENTRY]; /* Must be the last */
 };
 
 
@@ -45,48 +48,49 @@
 	bfa_aen_max_cfg_entry = max_entry;
 }
 
-static inline s32
+static inline int
 bfa_aen_get_max_cfg_entry(void)
 {
 	return bfa_aen_max_cfg_entry;
 }
 
-static inline s32
+static inline int
 bfa_aen_get_meminfo(void)
 {
 	return sizeof(struct bfa_aen_entry_s) * bfa_aen_get_max_cfg_entry();
 }
 
-static inline s32
+static inline int
 bfa_aen_get_wi(struct bfa_aen_s *aen)
 {
 	return aen->write_index;
 }
 
-static inline s32
+static inline int
 bfa_aen_get_ri(struct bfa_aen_s *aen)
 {
 	return aen->read_index;
 }
 
-static inline s32
-bfa_aen_fetch_count(struct bfa_aen_s *aen, s32 read_index)
+static inline int
+bfa_aen_fetch_count(struct bfa_aen_s *aen, enum bfa_aen_app  app_id)
 {
-	return ((aen->write_index + aen->max_entry) - read_index)
+	bfa_assert((app_id < BFA_AEN_MAX_APP) && (app_id >= bfa_aen_app_bcu));
+	return ((aen->write_index + aen->max_entry) - aen->app_ri[app_id])
 		% aen->max_entry;
 }
 
-s32 bfa_aen_init(struct bfa_aen_s *aen, struct bfa_trc_mod_s *trcmod,
-		void *bfad, u32 inst_id, void (*aen_cb_notify)(void *),
+int bfa_aen_init(struct bfa_aen_s *aen, struct bfa_trc_mod_s *trcmod,
+		void *bfad, int bfad_num, void (*aen_cb_notify)(void *),
 		void (*gettimeofday)(struct bfa_timeval_s *));
 
-s32 bfa_aen_post(struct bfa_aen_s *aen, enum bfa_aen_category aen_category,
+void bfa_aen_post(struct bfa_aen_s *aen, enum bfa_aen_category aen_category,
 		     int aen_type, union bfa_aen_data_u *aen_data);
 
-s32 bfa_aen_fetch(struct bfa_aen_s *aen, struct bfa_aen_entry_s *aen_entry,
-		      s32 entry_space, s32 rii, s32 *ri_arr,
-		      s32 ri_arr_cnt);
+bfa_status_t bfa_aen_fetch(struct bfa_aen_s *aen,
+			struct bfa_aen_entry_s *aen_entry,
+			int entry_req, enum bfa_aen_app app_id, int *entry_ret);
 
-s32 bfa_aen_get_inst(struct bfa_aen_s *aen);
+int bfa_aen_get_inst(struct bfa_aen_s *aen);
 
 #endif /* __BFA_AEN_H__ */
diff --git a/drivers/scsi/bfa/include/bfa.h b/drivers/scsi/bfa/include/bfa.h
index d4bc0d9..1f5966c 100644
--- a/drivers/scsi/bfa/include/bfa.h
+++ b/drivers/scsi/bfa/include/bfa.h
@@ -106,6 +106,26 @@
 	bfa_ioc_fetch_stats(&(__bfa)->ioc, __ioc_stats)
 #define bfa_ioc_clear_stats(__bfa)	\
 	bfa_ioc_clr_stats(&(__bfa)->ioc)
+#define bfa_get_nports(__bfa)   \
+	bfa_ioc_get_nports(&(__bfa)->ioc)
+#define bfa_get_adapter_manufacturer(__bfa, __manufacturer) \
+	bfa_ioc_get_adapter_manufacturer(&(__bfa)->ioc, __manufacturer)
+#define bfa_get_adapter_model(__bfa, __model)   \
+	bfa_ioc_get_adapter_model(&(__bfa)->ioc, __model)
+#define bfa_get_adapter_serial_num(__bfa, __serial_num) \
+	bfa_ioc_get_adapter_serial_num(&(__bfa)->ioc, __serial_num)
+#define bfa_get_adapter_fw_ver(__bfa, __fw_ver) \
+	bfa_ioc_get_adapter_fw_ver(&(__bfa)->ioc, __fw_ver)
+#define bfa_get_adapter_optrom_ver(__bfa, __optrom_ver) \
+	bfa_ioc_get_adapter_optrom_ver(&(__bfa)->ioc, __optrom_ver)
+#define bfa_get_pci_chip_rev(__bfa, __chip_rev) \
+	bfa_ioc_get_pci_chip_rev(&(__bfa)->ioc, __chip_rev)
+#define bfa_get_ioc_state(__bfa)    \
+	bfa_ioc_get_state(&(__bfa)->ioc)
+#define bfa_get_type(__bfa) \
+	bfa_ioc_get_type(&(__bfa)->ioc)
+#define bfa_get_mac(__bfa)  \
+	bfa_ioc_get_mac(&(__bfa)->ioc)
 
 /*
  * bfa API functions
@@ -161,6 +181,7 @@
 void bfa_iocfc_enable(struct bfa_s *bfa);
 void bfa_iocfc_disable(struct bfa_s *bfa);
 void bfa_ioc_auto_recover(bfa_boolean_t auto_recover);
+void bfa_chip_reset(struct bfa_s *bfa);
 void bfa_cb_ioc_disable(void *bfad);
 void bfa_timer_tick(struct bfa_s *bfa);
 #define bfa_timer_start(_bfa, _timer, _timercb, _arg, _timeout)	\
@@ -171,6 +192,7 @@
  */
 bfa_status_t bfa_debug_fwtrc(struct bfa_s *bfa, void *trcdata, int *trclen);
 bfa_status_t bfa_debug_fwsave(struct bfa_s *bfa, void *trcdata, int *trclen);
+void bfa_debug_fwsave_clear(struct bfa_s *bfa);
 
 #include "bfa_priv.h"
 
diff --git a/drivers/scsi/bfa/include/bfa_svc.h b/drivers/scsi/bfa/include/bfa_svc.h
index 268d956..1349b99 100644
--- a/drivers/scsi/bfa/include/bfa_svc.h
+++ b/drivers/scsi/bfa/include/bfa_svc.h
@@ -26,6 +26,7 @@
 #include <defs/bfa_defs_pport.h>
 #include <defs/bfa_defs_rport.h>
 #include <defs/bfa_defs_qos.h>
+#include <defs/bfa_defs_fcport.h>
 #include <cs/bfa_sm.h>
 #include <bfa.h>
 
@@ -35,7 +36,7 @@
 struct bfa_rport_info_s {
 	u16        max_frmsz;	/*  max rcv pdu size               */
 	u32        pid:24,		/*  remote port ID                 */
-			lp_tag:8;
+			lp_tag:8;	/*  tag                     */
 	u32        local_pid:24,	/*  local port ID		    */
 			cisc:8;		/*  CIRO supported		    */
 	u8         fc_class;	/*  supported FC classes. enum fc_cos */
@@ -54,7 +55,7 @@
 	void                  *rport_drv; /*  fcs/driver rport object */
 	u16              fw_handle; /*  firmware rport handle */
 	u16              rport_tag; /*  BFA rport tag */
-	struct bfa_rport_info_s rport_info; /*  rport info from *fcs/driver */
+	struct bfa_rport_info_s rport_info; /*  rport info from fcs/driver */
 	struct bfa_reqq_wait_s reqq_wait; /*  to wait for room in reqq */
 	struct bfa_cb_qe_s    hcb_qe;	 /*  BFA callback qelem */
 	struct bfa_rport_hal_stats_s stats; /*  BFA rport statistics  */
@@ -101,7 +102,7 @@
 struct bfa_uf_s {
 	struct list_head	qe;		/*  queue element	  */
 	struct bfa_s	*bfa;		/*  bfa instance	  */
-	u16        uf_tag;		/*  identifying tag f/w messages */
+	u16        uf_tag;		/*  identifying tag fw msgs     */
 	u16        vf_id;
 	u16        src_rport_handle;
 	u16        rsvd;
@@ -127,7 +128,7 @@
 	u8		reqq;		/*  lport request queue	*/
 	u8		alpa;		/*  ALPA for loop topologies	*/
 	u32	lp_pid;		/*  lport port ID		*/
-	bfa_boolean_t	fdisc;		/*  send FDISC instead of FLOGI*/
+	bfa_boolean_t	fdisc;		/*  send FDISC instead of FLOGI  */
 	bfa_boolean_t	auth_en;	/*  enable authentication	*/
 	bfa_boolean_t	auth_req;	/*  authentication required	*/
 	bfa_boolean_t	npiv_en;	/*  NPIV is allowed by peer	*/
@@ -151,60 +152,69 @@
 	bfa_eproto_status_t	ext_status;
 };
 
+#define BFA_FCPORT(_bfa)        (&((_bfa)->modules.port))
+
 /*
  * bfa pport API functions
  */
-bfa_status_t bfa_pport_enable(struct bfa_s *bfa);
-bfa_status_t bfa_pport_disable(struct bfa_s *bfa);
-bfa_status_t bfa_pport_cfg_speed(struct bfa_s *bfa,
+bfa_status_t bfa_fcport_enable(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_disable(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_speed(struct bfa_s *bfa,
 			enum bfa_pport_speed speed);
-enum bfa_pport_speed bfa_pport_get_speed(struct bfa_s *bfa);
-bfa_status_t bfa_pport_cfg_topology(struct bfa_s *bfa,
+enum bfa_pport_speed bfa_fcport_get_speed(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_topology(struct bfa_s *bfa,
 			enum bfa_pport_topology topo);
-enum bfa_pport_topology bfa_pport_get_topology(struct bfa_s *bfa);
-bfa_status_t bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
-bfa_boolean_t bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
-u8 bfa_pport_get_myalpa(struct bfa_s *bfa);
-bfa_status_t bfa_pport_clr_hardalpa(struct bfa_s *bfa);
-bfa_status_t bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize);
-u16 bfa_pport_get_maxfrsize(struct bfa_s *bfa);
-u32 bfa_pport_mypid(struct bfa_s *bfa);
-u8 bfa_pport_get_rx_bbcredit(struct bfa_s *bfa);
-bfa_status_t bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap);
-bfa_status_t bfa_pport_trunk_disable(struct bfa_s *bfa);
-bfa_boolean_t bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap);
-void bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr);
-wwn_t bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node);
-bfa_status_t bfa_pport_get_stats(struct bfa_s *bfa,
-			union bfa_pport_stats_u *stats,
-			bfa_cb_pport_t cbfn, void *cbarg);
-bfa_status_t bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
-			void *cbarg);
-void bfa_pport_event_register(struct bfa_s *bfa,
+enum bfa_pport_topology bfa_fcport_get_topology(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa);
+bfa_boolean_t bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa);
+u8 bfa_fcport_get_myalpa(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_clr_hardalpa(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxsize);
+u16 bfa_fcport_get_maxfrsize(struct bfa_s *bfa);
+u32 bfa_fcport_mypid(struct bfa_s *bfa);
+u8 bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_trunk_enable(struct bfa_s *bfa, u8 bitmap);
+bfa_status_t bfa_fcport_trunk_disable(struct bfa_s *bfa);
+bfa_boolean_t bfa_fcport_trunk_query(struct bfa_s *bfa, u32 *bitmap);
+void bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr);
+wwn_t bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node);
+void bfa_fcport_event_register(struct bfa_s *bfa,
 			void (*event_cbfn) (void *cbarg,
 			bfa_pport_event_t event), void *event_cbarg);
-bfa_boolean_t bfa_pport_is_disabled(struct bfa_s *bfa);
-void bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
-void bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
-bfa_status_t bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa,
+bfa_boolean_t bfa_fcport_is_disabled(struct bfa_s *bfa);
+void bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off);
+void bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off);
+bfa_status_t bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa,
 			enum bfa_pport_speed speed);
-enum bfa_pport_speed bfa_pport_get_ratelim_speed(struct bfa_s *bfa);
+enum bfa_pport_speed bfa_fcport_get_ratelim_speed(struct bfa_s *bfa);
 
-void bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
-void bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status);
-void bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
+void bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit);
+void bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status);
+void bfa_fcport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
 			bfa_boolean_t link_e2e_beacon);
 void bfa_cb_pport_event(void *cbarg, bfa_pport_event_t event);
-void bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr);
-void bfa_pport_qos_get_vc_attr(struct bfa_s *bfa,
+void bfa_fcport_qos_get_attr(struct bfa_s *bfa,
+			struct bfa_qos_attr_s *qos_attr);
+void bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
 			struct bfa_qos_vc_attr_s *qos_vc_attr);
-bfa_status_t bfa_pport_get_qos_stats(struct bfa_s *bfa,
-			union bfa_pport_stats_u *stats,
+bfa_status_t bfa_fcport_get_qos_stats(struct bfa_s *bfa,
+			union bfa_fcport_stats_u *stats,
 			bfa_cb_pport_t cbfn, void *cbarg);
-bfa_status_t bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
+bfa_status_t bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
 			void *cbarg);
-bfa_boolean_t     bfa_pport_is_ratelim(struct bfa_s *bfa);
-bfa_boolean_t	bfa_pport_is_linkup(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_get_fcoe_stats(struct bfa_s *bfa,
+			union bfa_fcport_stats_u *stats,
+			bfa_cb_pport_t cbfn, void *cbarg);
+bfa_status_t bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
+			void *cbarg);
+
+bfa_boolean_t     bfa_fcport_is_ratelim(struct bfa_s *bfa);
+bfa_boolean_t	bfa_fcport_is_linkup(struct bfa_s *bfa);
+bfa_status_t bfa_fcport_get_stats(struct bfa_s *bfa,
+		union bfa_fcport_stats_u *stats,
+		bfa_cb_pport_t cbfn, void *cbarg);
+bfa_status_t bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn,
+		void *cbarg);
 
 /*
  * bfa rport API functions
@@ -293,6 +303,7 @@
  * bfa lport service api
  */
 
+u32	bfa_lps_get_max_vport(struct bfa_s *bfa);
 struct bfa_lps_s *bfa_lps_alloc(struct bfa_s *bfa);
 void bfa_lps_delete(struct bfa_lps_s *lps);
 void bfa_lps_discard(struct bfa_lps_s *lps);
@@ -315,10 +326,12 @@
 wwn_t bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps);
 u8 bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps);
 u8 bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps);
+mac_t bfa_lps_get_lp_mac(struct bfa_lps_s *lps);
 void bfa_cb_lps_flogi_comp(void *bfad, void *uarg, bfa_status_t status);
 void bfa_cb_lps_flogo_comp(void *bfad, void *uarg);
 void bfa_cb_lps_fdisc_comp(void *bfad, void *uarg, bfa_status_t status);
 void bfa_cb_lps_fdisclogo_comp(void *bfad, void *uarg);
+void bfa_cb_lps_cvl_event(void *bfad, void *uarg);
 
 #endif /* __BFA_SVC_H__ */
 
diff --git a/drivers/scsi/bfa/include/bfa_timer.h b/drivers/scsi/bfa/include/bfa_timer.h
index e407103..f710874 100644
--- a/drivers/scsi/bfa/include/bfa_timer.h
+++ b/drivers/scsi/bfa/include/bfa_timer.h
@@ -41,7 +41,7 @@
 	struct list_head timer_q;
 };
 
-#define BFA_TIMER_FREQ 500 /**< specified in millisecs */
+#define BFA_TIMER_FREQ 200 /**< specified in millisecs */
 
 void bfa_timer_beat(struct bfa_timer_mod_s *mod);
 void bfa_timer_init(struct bfa_timer_mod_s *mod);
diff --git a/drivers/scsi/bfa/include/bfi/bfi.h b/drivers/scsi/bfa/include/bfi/bfi.h
index 7042c18..a550e80 100644
--- a/drivers/scsi/bfa/include/bfi/bfi.h
+++ b/drivers/scsi/bfa/include/bfi/bfi.h
@@ -143,8 +143,8 @@
 	BFI_MC_IOC		= 1,	/*  IO Controller (IOC)	    */
 	BFI_MC_DIAG		= 2,	/*  Diagnostic Msgs		    */
 	BFI_MC_FLASH		= 3,	/*  Flash message class	    */
-	BFI_MC_CEE		= 4,
-	BFI_MC_FC_PORT		= 5,	/*  FC port		   	    */
+	BFI_MC_CEE		= 4,	/*  CEE                     */
+	BFI_MC_FCPORT		= 5,	/*  FC port		   	    */
 	BFI_MC_IOCFC		= 6,	/*  FC - IO Controller (IOC)	    */
 	BFI_MC_LL		= 7,	/*  Link Layer		 	    */
 	BFI_MC_UF		= 8,	/*  Unsolicited frame receive	    */
diff --git a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h b/drivers/scsi/bfa/include/bfi/bfi_cbreg.h
index b3bb52b..a51ee61 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_cbreg.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_cbreg.h
@@ -177,7 +177,21 @@
 #define __PSS_LMEM_INIT_EN               0x00000100
 #define __PSS_LPU1_RESET                 0x00000002
 #define __PSS_LPU0_RESET                 0x00000001
-
+#define PSS_ERR_STATUS_REG		 0x00018810
+#define __PSS_LMEM1_CORR_ERR		 0x00000800
+#define __PSS_LMEM0_CORR_ERR             0x00000400
+#define __PSS_LMEM1_UNCORR_ERR           0x00000200
+#define __PSS_LMEM0_UNCORR_ERR           0x00000100
+#define __PSS_BAL_PERR                   0x00000080
+#define __PSS_DIP_IF_ERR                 0x00000040
+#define __PSS_IOH_IF_ERR                 0x00000020
+#define __PSS_TDS_IF_ERR                 0x00000010
+#define __PSS_RDS_IF_ERR                 0x00000008
+#define __PSS_SGM_IF_ERR                 0x00000004
+#define __PSS_LPU1_RAM_ERR               0x00000002
+#define __PSS_LPU0_RAM_ERR               0x00000001
+#define ERR_SET_REG			 0x00018818
+#define __PSS_ERR_STATUS_SET		 0x00000fff
 
 /*
  * These definitions are either in error/missing in spec. Its auto-generated
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
index d3caa58..57a8497 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_ctreg.h
@@ -430,6 +430,31 @@
 #define __PSS_LMEM_INIT_EN               0x00000100
 #define __PSS_LPU1_RESET                 0x00000002
 #define __PSS_LPU0_RESET                 0x00000001
+#define PSS_ERR_STATUS_REG               0x00018810
+#define __PSS_LPU1_TCM_READ_ERR          0x00200000
+#define __PSS_LPU0_TCM_READ_ERR          0x00100000
+#define __PSS_LMEM5_CORR_ERR             0x00080000
+#define __PSS_LMEM4_CORR_ERR             0x00040000
+#define __PSS_LMEM3_CORR_ERR             0x00020000
+#define __PSS_LMEM2_CORR_ERR             0x00010000
+#define __PSS_LMEM1_CORR_ERR             0x00008000
+#define __PSS_LMEM0_CORR_ERR             0x00004000
+#define __PSS_LMEM5_UNCORR_ERR           0x00002000
+#define __PSS_LMEM4_UNCORR_ERR           0x00001000
+#define __PSS_LMEM3_UNCORR_ERR           0x00000800
+#define __PSS_LMEM2_UNCORR_ERR           0x00000400
+#define __PSS_LMEM1_UNCORR_ERR           0x00000200
+#define __PSS_LMEM0_UNCORR_ERR           0x00000100
+#define __PSS_BAL_PERR                   0x00000080
+#define __PSS_DIP_IF_ERR                 0x00000040
+#define __PSS_IOH_IF_ERR                 0x00000020
+#define __PSS_TDS_IF_ERR                 0x00000010
+#define __PSS_RDS_IF_ERR                 0x00000008
+#define __PSS_SGM_IF_ERR                 0x00000004
+#define __PSS_LPU1_RAM_ERR               0x00000002
+#define __PSS_LPU0_RAM_ERR               0x00000001
+#define ERR_SET_REG			 0x00018818
+#define __PSS_ERR_STATUS_SET		 0x003fffff
 #define HQM_QSET0_RXQ_DRBL_P0            0x00038000
 #define __RXQ0_ADD_VECTORS_P             0x80000000
 #define __RXQ0_STOP_P                    0x40000000
@@ -589,6 +614,7 @@
 #define __HFN_INT_MBOX_LPU1                0x00200000U
 #define __HFN_INT_MBOX1_LPU0               0x00400000U
 #define __HFN_INT_MBOX1_LPU1               0x00800000U
+#define __HFN_INT_LL_HALT		   0x01000000U
 #define __HFN_INT_CPE_MASK                 0x000000ffU
 #define __HFN_INT_RME_MASK                 0x0000ff00U
 
diff --git a/drivers/scsi/bfa/include/bfi/bfi_ioc.h b/drivers/scsi/bfa/include/bfi/bfi_ioc.h
index 96ef056..a0158aa 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_ioc.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_ioc.h
@@ -123,7 +123,7 @@
 	BFI_IOC_DISABLING 	 = 5,	/*  IOC is being disabled           */
 	BFI_IOC_DISABLED 	 = 6,	/*  IOC is disabled                 */
 	BFI_IOC_CFG_DISABLED = 7,	/*  IOC is being disabled;transient */
-	BFI_IOC_HBFAIL       = 8,	/*  IOC heart-beat failure          */
+	BFI_IOC_FAIL       = 8,		/*  IOC heart-beat failure          */
 	BFI_IOC_MEMTEST      = 9,	/*  IOC is doing memtest            */
 };
 
diff --git a/drivers/scsi/bfa/include/bfi/bfi_lps.h b/drivers/scsi/bfa/include/bfi/bfi_lps.h
index c59d47b..7ed31bb 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_lps.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_lps.h
@@ -30,6 +30,7 @@
 enum bfi_lps_i2h_msgs {
 	BFI_LPS_H2I_LOGIN_RSP	= BFA_I2HM(1),
 	BFI_LPS_H2I_LOGOUT_RSP	= BFA_I2HM(2),
+	BFI_LPS_H2I_CVL_EVENT   = BFA_I2HM(3),
 };
 
 struct bfi_lps_login_req_s {
@@ -77,6 +78,12 @@
 	u8		rsvd[2];
 };
 
+struct bfi_lps_cvl_event_s {
+	struct bfi_mhdr_s  mh;      /* common msg header      */
+	u8		lp_tag;
+	u8		rsvd[3];
+};
+
 union bfi_lps_h2i_msg_u {
 	struct bfi_mhdr_s		*msg;
 	struct bfi_lps_login_req_s	*login_req;
@@ -87,6 +94,7 @@
 	struct bfi_msg_s		*msg;
 	struct bfi_lps_login_rsp_s	*login_rsp;
 	struct bfi_lps_logout_rsp_s	*logout_rsp;
+	struct bfi_lps_cvl_event_s	*cvl_event;
 };
 
 #pragma pack()
diff --git a/drivers/scsi/bfa/include/bfi/bfi_pport.h b/drivers/scsi/bfa/include/bfi/bfi_pport.h
index c96d246..50dcf45 100644
--- a/drivers/scsi/bfa/include/bfi/bfi_pport.h
+++ b/drivers/scsi/bfa/include/bfi/bfi_pport.h
@@ -22,163 +22,97 @@
 
 #pragma pack(1)
 
-enum bfi_pport_h2i {
-	BFI_PPORT_H2I_ENABLE_REQ		= (1),
-	BFI_PPORT_H2I_DISABLE_REQ		= (2),
-	BFI_PPORT_H2I_GET_STATS_REQ		= (3),
-	BFI_PPORT_H2I_CLEAR_STATS_REQ	= (4),
-	BFI_PPORT_H2I_SET_SVC_PARAMS_REQ	= (5),
-	BFI_PPORT_H2I_ENABLE_RX_VF_TAG_REQ	= (6),
-	BFI_PPORT_H2I_ENABLE_TX_VF_TAG_REQ	= (7),
-	BFI_PPORT_H2I_GET_QOS_STATS_REQ		= (8),
-	BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ	= (9),
+enum bfi_fcport_h2i {
+	BFI_FCPORT_H2I_ENABLE_REQ		= (1),
+	BFI_FCPORT_H2I_DISABLE_REQ		= (2),
+	BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ	= (3),
+	BFI_FCPORT_H2I_STATS_GET_REQ		= (4),
+	BFI_FCPORT_H2I_STATS_CLEAR_REQ		= (5),
 };
 
-enum bfi_pport_i2h {
-	BFI_PPORT_I2H_ENABLE_RSP		= BFA_I2HM(1),
-	BFI_PPORT_I2H_DISABLE_RSP		= BFA_I2HM(2),
-	BFI_PPORT_I2H_GET_STATS_RSP		= BFA_I2HM(3),
-	BFI_PPORT_I2H_CLEAR_STATS_RSP	= BFA_I2HM(4),
-	BFI_PPORT_I2H_SET_SVC_PARAMS_RSP	= BFA_I2HM(5),
-	BFI_PPORT_I2H_ENABLE_RX_VF_TAG_RSP	= BFA_I2HM(6),
-	BFI_PPORT_I2H_ENABLE_TX_VF_TAG_RSP	= BFA_I2HM(7),
-	BFI_PPORT_I2H_EVENT			= BFA_I2HM(8),
-	BFI_PPORT_I2H_GET_QOS_STATS_RSP		= BFA_I2HM(9),
-	BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP	= BFA_I2HM(10),
+enum bfi_fcport_i2h {
+	BFI_FCPORT_I2H_ENABLE_RSP		= BFA_I2HM(1),
+	BFI_FCPORT_I2H_DISABLE_RSP		= BFA_I2HM(2),
+	BFI_FCPORT_I2H_SET_SVC_PARAMS_RSP	= BFA_I2HM(3),
+	BFI_FCPORT_I2H_STATS_GET_RSP		= BFA_I2HM(4),
+	BFI_FCPORT_I2H_STATS_CLEAR_RSP		= BFA_I2HM(5),
+	BFI_FCPORT_I2H_EVENT			= BFA_I2HM(6),
 };
 
 /**
  * Generic REQ type
  */
-struct bfi_pport_generic_req_s {
+struct bfi_fcport_req_s {
 	struct bfi_mhdr_s  mh;		/*  msg header			    */
-	u32        msgtag;		/*  msgtag for reply		    */
+	u32	   msgtag;	/*  msgtag for reply		    */
 };
 
 /**
  * Generic RSP type
  */
-struct bfi_pport_generic_rsp_s {
+struct bfi_fcport_rsp_s {
 	struct bfi_mhdr_s  mh;		/*  common msg header		    */
-	u8         status;		/*  port enable status		    */
-	u8         rsvd[3];
-	u32        msgtag;		/*  msgtag for reply		    */
+	u8		   status;	/*  port enable status		    */
+	u8		   rsvd[3];
+	u32	   msgtag;	/*  msgtag for reply		    */
 };
 
 /**
- * BFI_PPORT_H2I_ENABLE_REQ
+ * BFI_FCPORT_H2I_ENABLE_REQ
  */
-struct bfi_pport_enable_req_s {
+struct bfi_fcport_enable_req_s {
 	struct bfi_mhdr_s  mh;		/*  msg header			    */
-	u32        rsvd1;
-	wwn_t           nwwn;		/*  node wwn of physical port	    */
-	wwn_t           pwwn;		/*  port wwn of physical port	    */
-	struct bfa_pport_cfg_s port_cfg;	/*  port configuration	    */
-	union bfi_addr_u  stats_dma_addr;	/*  DMA address for stats  */
-	u32        msgtag;		/*  msgtag for reply		    */
-	u32        rsvd2;
+	u32	   rsvd1;
+	wwn_t		   nwwn;	/*  node wwn of physical port	    */
+	wwn_t		   pwwn;	/*  port wwn of physical port	    */
+	struct bfa_pport_cfg_s port_cfg; /*  port configuration	    */
+	union bfi_addr_u   stats_dma_addr; /*  DMA address for stats	    */
+	u32	   msgtag;	/*  msgtag for reply		    */
+	u32	   rsvd2;
 };
 
 /**
- * BFI_PPORT_I2H_ENABLE_RSP
+ * BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ
  */
-#define bfi_pport_enable_rsp_t struct bfi_pport_generic_rsp_s
-
-/**
- * BFI_PPORT_H2I_DISABLE_REQ
- */
-#define bfi_pport_disable_req_t struct bfi_pport_generic_req_s
-
-/**
- * BFI_PPORT_I2H_DISABLE_RSP
- */
-#define bfi_pport_disable_rsp_t struct bfi_pport_generic_rsp_s
-
-/**
- * BFI_PPORT_H2I_GET_STATS_REQ
- */
-#define bfi_pport_get_stats_req_t struct bfi_pport_generic_req_s
-
-/**
- * BFI_PPORT_I2H_GET_STATS_RSP
- */
-#define bfi_pport_get_stats_rsp_t struct bfi_pport_generic_rsp_s
-
-/**
- * BFI_PPORT_H2I_CLEAR_STATS_REQ
- */
-#define bfi_pport_clear_stats_req_t struct bfi_pport_generic_req_s
-
-/**
- * BFI_PPORT_I2H_CLEAR_STATS_RSP
- */
-#define bfi_pport_clear_stats_rsp_t struct bfi_pport_generic_rsp_s
-
-/**
- * BFI_PPORT_H2I_GET_QOS_STATS_REQ
- */
-#define bfi_pport_get_qos_stats_req_t struct bfi_pport_generic_req_s
-
-/**
- * BFI_PPORT_H2I_GET_QOS_STATS_RSP
- */
-#define bfi_pport_get_qos_stats_rsp_t struct bfi_pport_generic_rsp_s
-
-/**
- * BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ
- */
-#define bfi_pport_clear_qos_stats_req_t struct bfi_pport_generic_req_s
-
-/**
- * BFI_PPORT_H2I_CLEAR_QOS_STATS_RSP
- */
-#define bfi_pport_clear_qos_stats_rsp_t struct bfi_pport_generic_rsp_s
-
-/**
- * BFI_PPORT_H2I_SET_SVC_PARAMS_REQ
- */
-struct bfi_pport_set_svc_params_req_s {
+struct bfi_fcport_set_svc_params_req_s {
 	struct bfi_mhdr_s  mh;		/*  msg header */
-	u16        tx_bbcredit;	/*  Tx credits */
-	u16        rsvd;
+	u16	   tx_bbcredit;	/*  Tx credits */
+	u16	   rsvd;
 };
 
 /**
- * BFI_PPORT_I2H_SET_SVC_PARAMS_RSP
+ * BFI_FCPORT_I2H_EVENT
  */
-
-/**
- * BFI_PPORT_I2H_EVENT
- */
-struct bfi_pport_event_s {
+struct bfi_fcport_event_s {
 	struct bfi_mhdr_s 	mh;	/*  common msg header */
 	struct bfa_pport_link_s	link_state;
 };
 
-union bfi_pport_h2i_msg_u {
+/**
+ * fcport H2I message
+ */
+union bfi_fcport_h2i_msg_u {
 	struct bfi_mhdr_s			*mhdr;
-	struct bfi_pport_enable_req_s		*penable;
-	struct bfi_pport_generic_req_s		*pdisable;
-	struct bfi_pport_generic_req_s		*pgetstats;
-	struct bfi_pport_generic_req_s		*pclearstats;
-	struct bfi_pport_set_svc_params_req_s	*psetsvcparams;
-	struct bfi_pport_get_qos_stats_req_s	*pgetqosstats;
-	struct bfi_pport_generic_req_s		*pclearqosstats;
+	struct bfi_fcport_enable_req_s		*penable;
+	struct bfi_fcport_req_s			*pdisable;
+	struct bfi_fcport_set_svc_params_req_s	*psetsvcparams;
+	struct bfi_fcport_req_s			*pstatsget;
+	struct bfi_fcport_req_s			*pstatsclear;
 };
 
-union bfi_pport_i2h_msg_u {
+/**
+ * fcport I2H message
+ */
+union bfi_fcport_i2h_msg_u {
 	struct bfi_msg_s			*msg;
-	struct bfi_pport_generic_rsp_s		*enable_rsp;
-	struct bfi_pport_disable_rsp_s		*disable_rsp;
-	struct bfi_pport_generic_rsp_s		*getstats_rsp;
-	struct bfi_pport_clear_stats_rsp_s	*clearstats_rsp;
-	struct bfi_pport_set_svc_params_rsp_s	*setsvcparasm_rsp;
-	struct bfi_pport_get_qos_stats_rsp_s	*getqosstats_rsp;
-	struct bfi_pport_clear_qos_stats_rsp_s	*clearqosstats_rsp;
-	struct bfi_pport_event_s		*event;
+	struct bfi_fcport_rsp_s			*penable_rsp;
+	struct bfi_fcport_rsp_s			*pdisable_rsp;
+	struct bfi_fcport_rsp_s			*psetsvcparams_rsp;
+	struct bfi_fcport_rsp_s			*pstatsget_rsp;
+	struct bfi_fcport_rsp_s			*pstatsclear_rsp;
+	struct bfi_fcport_event_s		*event;
 };
 
 #pragma pack()
 
 #endif /* __BFI_PPORT_H__ */
-
diff --git a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h b/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
index 43ba706..a75a1f3 100644
--- a/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
+++ b/drivers/scsi/bfa/include/cna/bfa_cna_trcmod.h
@@ -31,6 +31,10 @@
 enum {
 	BFA_TRC_CNA_CEE		= 1,
 	BFA_TRC_CNA_PORT	= 2,
+	BFA_TRC_CNA_IOC     = 3,
+	BFA_TRC_CNA_DIAG    = 4,
+	BFA_TRC_CNA_IOC_CB  = 5,
+	BFA_TRC_CNA_IOC_CT  = 6,
 };
 
 #endif /* __BFA_CNA_TRCMOD_H__ */
diff --git a/drivers/scsi/bfa/include/cs/bfa_log.h b/drivers/scsi/bfa/include/cs/bfa_log.h
index 761cbe2..bc334e0 100644
--- a/drivers/scsi/bfa/include/cs/bfa_log.h
+++ b/drivers/scsi/bfa/include/cs/bfa_log.h
@@ -157,7 +157,7 @@
 
 
 struct bfa_log_mod_s {
-	char		instance_info[16];	/*  instance info */
+	char		instance_info[BFA_STRING_32];	/*  instance info */
 	int		log_level[BFA_LOG_MODULE_ID_MAX + 1];
 						/*  log level for modules */
 	bfa_log_cb_t	cbfn; 			/*  callback function */
diff --git a/drivers/scsi/bfa/include/cs/bfa_plog.h b/drivers/scsi/bfa/include/cs/bfa_plog.h
index 670f86e..f5bef63 100644
--- a/drivers/scsi/bfa/include/cs/bfa_plog.h
+++ b/drivers/scsi/bfa/include/cs/bfa_plog.h
@@ -80,7 +80,8 @@
 	BFA_PL_MID_HAL_FCXP 	= 4,
 	BFA_PL_MID_HAL_UF 	= 5,
 	BFA_PL_MID_FCS 		= 6,
-	BFA_PL_MID_MAX 		= 7
+	BFA_PL_MID_LPS		= 7,
+	BFA_PL_MID_MAX 		= 8
 };
 
 #define BFA_PL_MID_STRLEN    8
@@ -118,7 +119,11 @@
 	BFA_PL_EID_RSCN 		= 17,
 	BFA_PL_EID_DEBUG 		= 18,
 	BFA_PL_EID_MISC 		= 19,
-	BFA_PL_EID_MAX 			= 20
+	BFA_PL_EID_FIP_FCF_DISC		= 20,
+	BFA_PL_EID_FIP_FCF_CVL		= 21,
+	BFA_PL_EID_LOGIN		= 22,
+	BFA_PL_EID_LOGO			= 23,
+	BFA_PL_EID_MAX			= 24
 };
 
 #define BFA_PL_ENAME_STRLEN    	8
diff --git a/drivers/scsi/bfa/include/cs/bfa_sm.h b/drivers/scsi/bfa/include/cs/bfa_sm.h
index b0a92ba..11fba90 100644
--- a/drivers/scsi/bfa/include/cs/bfa_sm.h
+++ b/drivers/scsi/bfa/include/cs/bfa_sm.h
@@ -23,6 +23,14 @@
 #define __BFA_SM_H__
 
 typedef void (*bfa_sm_t)(void *sm, int event);
+/**
+ * oc - object class eg. bfa_ioc
+ * st - state, eg. reset
+ * otype - object type, eg. struct bfa_ioc_s
+ * etype - object type, eg. enum ioc_event
+ */
+#define bfa_sm_state_decl(oc, st, otype, etype)         \
+	static void oc ## _sm_ ## st(otype * fsm, etype event)
 
 #define bfa_sm_set_state(_sm, _state)	((_sm)->sm = (bfa_sm_t)(_state))
 #define bfa_sm_send_event(_sm, _event)	((_sm)->sm((_sm), (_event)))
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h b/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
index 4c81a61..3524469 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_aen.h
@@ -30,6 +30,16 @@
 #include <defs/bfa_defs_audit.h>
 #include <defs/bfa_defs_ethport.h>
 
+#define BFA_AEN_MAX_APP         5
+
+enum bfa_aen_app {
+	bfa_aen_app_bcu = 0,    /* No thread for bcu */
+	bfa_aen_app_hcm = 1,
+	bfa_aen_app_cim = 2,
+	bfa_aen_app_snia = 3,
+	bfa_aen_app_test = 4,   /* To be removed after unit test */
+};
+
 enum bfa_aen_category {
 	BFA_AEN_CAT_ADAPTER 	= 1,
 	BFA_AEN_CAT_PORT 	= 2,
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
index dd19c83..45df328 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_auth.h
@@ -23,6 +23,7 @@
 #define PRIVATE_KEY			19009
 #define KEY_LEN				32399
 #define BFA_AUTH_SECRET_STRING_LEN	256
+#define BFA_AUTH_FAIL_NO_PASSWORD	0xFE
 #define BFA_AUTH_FAIL_TIMEOUT		0xFF
 
 /**
@@ -41,6 +42,27 @@
 	BFA_AUTH_STATUS_UNKNOWN = 9,	/*  authentication status unknown */
 };
 
+enum bfa_auth_rej_code {
+	BFA_AUTH_RJT_CODE_AUTH_FAILURE   = 1, /* auth failure */
+	BFA_AUTH_RJT_CODE_LOGICAL_ERR    = 2, /* logical error */
+};
+
+/**
+ * Authentication reject codes
+ */
+enum bfa_auth_rej_code_exp {
+	BFA_AUTH_MECH_NOT_USABLE        = 1, /* auth. mechanism not usable */
+	BFA_AUTH_DH_GROUP_NOT_USABLE    = 2, /* DH Group not usable */
+	BFA_AUTH_HASH_FUNC_NOT_USABLE   = 3, /* hash Function not usable */
+	BFA_AUTH_AUTH_XACT_STARTED      = 4, /* auth xact started */
+	BFA_AUTH_AUTH_FAILED            = 5, /* auth failed */
+	BFA_AUTH_INCORRECT_PLD          = 6, /* incorrect payload */
+	BFA_AUTH_INCORRECT_PROTO_MSG    = 7, /* incorrect proto msg */
+	BFA_AUTH_RESTART_AUTH_PROTO     = 8, /* restart auth protocol */
+	BFA_AUTH_AUTH_CONCAT_NOT_SUPP   = 9, /* auth concat not supported */
+	BFA_AUTH_PROTO_VER_NOT_SUPP     = 10,/* proto version not supported */
+};
+
 struct auth_proto_stats_s {
 	u32        auth_rjts;
 	u32        auth_negs;
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
index 520a22f..b0ac9ac 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_cee.h
@@ -28,10 +28,6 @@
 
 #define BFA_CEE_LLDP_MAX_STRING_LEN (128)
 
-
-/* FIXME: this is coming from the protocol spec. Can the host & apps share the
-   protocol .h files ?
- */
 #define BFA_CEE_LLDP_SYS_CAP_OTHER       0x0001
 #define BFA_CEE_LLDP_SYS_CAP_REPEATER    0x0002
 #define BFA_CEE_LLDP_SYS_CAP_MAC_BRIDGE  0x0004
@@ -94,9 +90,10 @@
 /* CEE status */
 /* Making this to tri-state for the benefit of port list command */
 enum bfa_cee_status_e {
-    CEE_PHY_DOWN = 0,
-    CEE_PHY_UP = 1,
-    CEE_UP = 2,
+	CEE_UP = 0,
+	CEE_PHY_UP = 1,
+	CEE_LOOPBACK = 2,
+	CEE_PHY_DOWN = 3,
 };
 
 /* CEE Query */
@@ -107,7 +104,8 @@
 	struct bfa_cee_dcbx_cfg_s dcbx_remote;
 	mac_t src_mac;
 	u8 link_speed;
-	u8 filler[3];
+	u8 nw_priority;
+	u8 filler[2];
 };
 
 
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
index 5704980..50382dd 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_driver.h
@@ -21,6 +21,7 @@
 /**
  * Driver statistics
  */
+struct bfa_driver_stats_s {
 	u16    tm_io_abort;
     u16    tm_io_abort_comp;
     u16    tm_lun_reset;
@@ -34,7 +35,7 @@
     u64    output_req;
     u64    input_words;
     u64    output_words;
-} bfa_driver_stats_t;
+};
 
 
 #endif /* __BFA_DEFS_DRIVER_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h b/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
index 79f9b3e..b4fa092 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_ethport.h
@@ -19,6 +19,7 @@
 #define __BFA_DEFS_ETHPORT_H__
 
 #include <defs/bfa_defs_status.h>
+#include <defs/bfa_defs_port.h>
 #include <protocol/types.h>
 #include <cna/pstats/phyport_defs.h>
 #include <cna/pstats/ethport_defs.h>
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h b/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h
new file mode 100644
index 0000000..a07ef4a
--- /dev/null
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_fcport.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
+ * All rights reserved
+ * www.brocade.com
+ *
+ *  bfa_defs_fcport.h
+ *
+ * Linux driver for Brocade Fibre Channel Host Bus Adapter.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License (GPL) Version 2 as
+ * published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ */
+#ifndef __BFA_DEFS_FCPORT_H__
+#define __BFA_DEFS_FCPORT_H__
+
+#include <defs/bfa_defs_types.h>
+#include <protocol/types.h>
+
+#pragma pack(1)
+
+/**
+ * FCoE statistics
+ */
+struct bfa_fcoe_stats_s {
+	u64	secs_reset;	/*  Seconds since stats reset	     */
+	u64	cee_linkups;	/*  CEE link up		     */
+	u64	cee_linkdns;	/*  CEE link down		     */
+	u64	fip_linkups;	/*  FIP link up		     */
+	u64	fip_linkdns;	/*  FIP link down		     */
+	u64	fip_fails;	/*  FIP failures		     */
+	u64	mac_invalids;	/*  Invalid mac assignments	     */
+	u64	vlan_req;	/*  Vlan requests		     */
+	u64	vlan_notify;	/*  Vlan notifications		     */
+	u64	vlan_err;	/*  Vlan notification errors	     */
+	u64	vlan_timeouts;	/*  Vlan request timeouts	     */
+	u64	vlan_invalids;	/*  Vlan invalids		     */
+	u64	disc_req;	/*  Discovery requests		     */
+	u64	disc_rsp;	/*  Discovery responses	     */
+	u64	disc_err;	/*  Discovery error frames	     */
+	u64	disc_unsol;	/*  Discovery unsolicited	     */
+	u64	disc_timeouts;	/*  Discovery timeouts		     */
+	u64	disc_fcf_unavail; /*  Discovery FCF not avail	     */
+	u64	linksvc_unsupp;	/*  FIP link service req unsupp.    */
+	u64	linksvc_err;	/*  FIP link service req errors     */
+	u64	logo_req;	/*  FIP logo			     */
+	u64	clrvlink_req;	/*  Clear virtual link requests     */
+	u64	op_unsupp;	/*  FIP operation unsupp.	     */
+	u64	untagged;	/*  FIP untagged frames	     */
+	u64	txf_ucast;	/*  Tx FCoE unicast frames	     */
+	u64	txf_ucast_vlan;	/*  Tx FCoE unicast vlan frames     */
+	u64	txf_ucast_octets; /*  Tx FCoE unicast octets	     */
+	u64	txf_mcast;	/*  Tx FCoE mutlicast frames	     */
+	u64	txf_mcast_vlan;	/*  Tx FCoE mutlicast vlan frames   */
+	u64	txf_mcast_octets; /*  Tx FCoE multicast octets	     */
+	u64	txf_bcast;	/*  Tx FCoE broadcast frames	     */
+	u64	txf_bcast_vlan;	/*  Tx FCoE broadcast vlan frames   */
+	u64	txf_bcast_octets; /*  Tx FCoE broadcast octets	     */
+	u64	txf_timeout;	/*  Tx timeouts		     */
+	u64	txf_parity_errors; /*  Transmit parity err	     */
+	u64	txf_fid_parity_errors; /*  Transmit FID parity err  */
+	u64	tx_pause;	/*  Tx pause frames		     */
+	u64	tx_zero_pause;	/*  Tx zero pause frames	     */
+	u64	tx_first_pause;	/*  Tx first pause frames	     */
+	u64	rx_pause;	/*  Rx pause frames		     */
+	u64	rx_zero_pause;	/*  Rx zero pause frames	     */
+	u64	rx_first_pause;	/*  Rx first pause frames	     */
+	u64	rxf_ucast_octets; /*  Rx unicast octets	     */
+	u64	rxf_ucast;	/*  Rx unicast frames		     */
+	u64	rxf_ucast_vlan;	/*  Rx unicast vlan frames	     */
+	u64	rxf_mcast_octets; /*  Rx multicast octets	     */
+	u64	rxf_mcast;	/*  Rx multicast frames	     */
+	u64	rxf_mcast_vlan;	/*  Rx multicast vlan frames	     */
+	u64	rxf_bcast_octets; /*  Rx broadcast octests	     */
+	u64	rxf_bcast;	/*  Rx broadcast frames	     */
+	u64	rxf_bcast_vlan;	/*  Rx broadcast vlan frames	     */
+};
+
+/**
+ * QoS or FCoE stats (fcport stats excluding physical FC port stats)
+ */
+union bfa_fcport_stats_u {
+	struct bfa_qos_stats_s	fcqos;
+	struct bfa_fcoe_stats_s	fcoe;
+};
+
+#pragma pack()
+
+#endif  /* __BFA_DEFS_FCPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h b/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h
deleted file mode 100644
index 9ccf53b..0000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_im_common.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_DEFS_IM_COMMON_H__
-#define __BFA_DEFS_IM_COMMON_H__
-
-#define	BFA_ADAPTER_NAME_LEN	256
-#define BFA_ADAPTER_GUID_LEN    256
-#define RESERVED_VLAN_NAME      L"PORT VLAN"
-#define PASSTHRU_VLAN_NAME      L"PASSTHRU VLAN"
-
-	u64	tx_pkt_cnt;
-	u64	rx_pkt_cnt;
-	u32	duration;
-	u8		status;
-} bfa_im_stats_t, *pbfa_im_stats_t;
-
-#endif /* __BFA_DEFS_IM_COMMON_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h b/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h
deleted file mode 100644
index a486a7e..0000000
--- a/drivers/scsi/bfa/include/defs/bfa_defs_im_team.h
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-#ifndef __BFA_DEFS_IM_TEAM_H__
-#define __BFA_DEFS_IM_TEAM_H__
-
-#include <protocol/types.h>
-
-#define	BFA_TEAM_MAX_PORTS	8
-#define	BFA_TEAM_NAME_LEN	256
-#define BFA_MAX_NUM_TEAMS	16
-#define BFA_TEAM_INVALID_DELAY -1
-
-	BFA_LACP_RATE_SLOW = 1,
-	BFA_LACP_RATE_FAST
-} bfa_im_lacp_rate_t;
-
-	BFA_TEAM_MODE_FAIL_OVER = 1,
-	BFA_TEAM_MODE_FAIL_BACK,
-	BFA_TEAM_MODE_LACP,
-	BFA_TEAM_MODE_NONE
-} bfa_im_team_mode_t;
-
-	BFA_XMIT_POLICY_L2 = 1,
-	BFA_XMIT_POLICY_L3_L4
-} bfa_im_xmit_policy_t;
-
-	bfa_im_team_mode_t     team_mode;
-	bfa_im_lacp_rate_t     lacp_rate;
-	bfa_im_xmit_policy_t   xmit_policy;
-	int   	          delay;
-	wchar_t    	  primary[BFA_ADAPTER_NAME_LEN];
-	wchar_t        	  preferred_primary[BFA_ADAPTER_NAME_LEN];
-	mac_t	          mac;
-	u16       	  num_ports;
-	u16          num_vlans;
-	u16 vlan_list[BFA_MAX_VLANS_PER_PORT];
-	wchar_t	 team_guid_list[BFA_TEAM_MAX_PORTS][BFA_ADAPTER_GUID_LEN];
-	wchar_t	 ioc_name_list[BFA_TEAM_MAX_PORTS][BFA_ADAPTER_NAME_LEN];
-} bfa_im_team_attr_t;
-
-	wchar_t		             team_name[BFA_TEAM_NAME_LEN];
-	bfa_im_xmit_policy_t	 xmit_policy;
-	int                 	 delay;
-	wchar_t                	 primary[BFA_ADAPTER_NAME_LEN];
-	wchar_t               	 preferred_primary[BFA_ADAPTER_NAME_LEN];
-} bfa_im_team_edit_t, *pbfa_im_team_edit_t;
-
-	wchar_t					team_name[BFA_TEAM_NAME_LEN];
-	bfa_im_team_mode_t      team_mode;
-	mac_t	               	mac;
-} bfa_im_team_info_t;
-
-	bfa_im_team_info_t 		team_info[BFA_MAX_NUM_TEAMS];
-	u16 				num_teams;
-} bfa_im_team_list_t, *pbfa_im_team_list_t;
-
-#endif /* __BFA_DEFS_IM_TEAM_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
index b1d532d..8d8e6a9 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_ioc.h
@@ -126,6 +126,7 @@
 	struct bfa_ioc_driver_attr_s 	driver_attr;	/*  driver attr    */
 	struct bfa_ioc_pci_attr_s	pci_attr;
 	u8				port_id;	/*  port number    */
+	u8				rsvd[7];	/*!< 64bit align    */
 };
 
 /**
@@ -143,8 +144,8 @@
  * BFA IOC level event data, now just a place holder
  */
 struct bfa_ioc_aen_data_s {
-	enum bfa_ioc_type_e ioc_type;
 	wwn_t	pwwn;
+	s16 ioc_type;
 	mac_t	mac;
 };
 
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
index d76bcbd..c290fb1 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_iocfc.h
@@ -26,6 +26,8 @@
 
 #define BFA_IOCFC_INTR_DELAY	1125
 #define BFA_IOCFC_INTR_LATENCY	225
+#define BFA_IOCFCOE_INTR_DELAY  25
+#define BFA_IOCFCOE_INTR_LATENCY 5
 
 /**
  * Interrupt coalescing configuration.
@@ -50,7 +52,7 @@
 	u16        num_fcxp_reqs;	/*  unassisted FC exchanges	*/
 	u16        num_uf_bufs;	/*  unsolicited recv buffers	*/
 	u8		num_cqs;
-	u8		rsvd;
+	u8		rsvd[5];
 };
 
 struct bfa_iocfc_drvcfg_s {
@@ -224,18 +226,24 @@
 
 
 struct bfa_fw_fip_stats_s {
+    u32    vlan_req;           /*  vlan discovery requests             */
+    u32    vlan_notify;        /*  vlan notifications                  */
+    u32    vlan_err;           /*  vlan response error                 */
+    u32    vlan_timeouts;      /*  vlan disvoery timeouts              */
+    u32    vlan_invalids;      /*  invalid vlan in discovery advert.   */
     u32    disc_req;           /*  Discovery solicit requests          */
     u32    disc_rsp;           /*  Discovery solicit response          */
     u32    disc_err;           /*  Discovery advt. parse errors        */
     u32    disc_unsol;         /*  Discovery unsolicited               */
     u32    disc_timeouts;      /*  Discovery timeouts                  */
+    u32    disc_fcf_unavail;   /*  Discovery FCF Not Avail.            */
     u32    linksvc_unsupp;     /*  Unsupported link service req        */
     u32    linksvc_err;        /*  Parse error in link service req     */
     u32    logo_req;           /*  Number of FIP logos received        */
     u32    clrvlink_req;       /*  Clear virtual link req              */
     u32    op_unsupp;          /*  Unsupported FIP operation           */
     u32    untagged;           /*  Untagged frames (ignored)           */
-    u32    rsvd;
+    u32	   invalid_version;    /*!< Invalid FIP version           */
 };
 
 
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h b/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
index 7359f82..0952a13 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_lport.h
@@ -59,8 +59,8 @@
  */
 struct bfa_lport_aen_data_s {
 	u16        vf_id;	/*  vf_id of this logical port */
-	u16        rsvd;
-	enum bfa_port_role roles;	/*  Logical port mode,IM/TM/IP etc */
+	s16         roles;  /*  Logical port mode,IM/TM/IP etc */
+	u32        rsvd;
 	wwn_t           ppwwn;	/*  WWN of its physical port */
 	wwn_t           lpwwn;	/*  WWN of this logical port */
 };
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
index 13fd4ab..c5bd9c3 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_mfg.h
@@ -22,7 +22,47 @@
 /**
  * Manufacturing block version
  */
-#define BFA_MFG_VERSION				1
+#define BFA_MFG_VERSION				2
+
+/**
+ * Manufacturing block encrypted version
+ */
+#define BFA_MFG_ENC_VER				2
+
+/**
+ * Manufacturing block version 1 length
+ */
+#define BFA_MFG_VER1_LEN			128
+
+/**
+ * Manufacturing block header length
+ */
+#define BFA_MFG_HDR_LEN				4
+
+/**
+ * Checksum size
+ */
+#define BFA_MFG_CHKSUM_SIZE			16
+
+/**
+ * Manufacturing block encrypted version
+ */
+#define BFA_MFG_ENC_VER				2
+
+/**
+ * Manufacturing block version 1 length
+ */
+#define BFA_MFG_VER1_LEN			128
+
+/**
+ * Manufacturing block header length
+ */
+#define BFA_MFG_HDR_LEN				4
+
+/**
+ * Checksum size
+ */
+#define BFA_MFG_CHKSUM_SIZE			16
 
 /**
  * Manufacturing block format
@@ -30,29 +70,74 @@
 #define BFA_MFG_SERIALNUM_SIZE			11
 #define BFA_MFG_PARTNUM_SIZE			14
 #define BFA_MFG_SUPPLIER_ID_SIZE		10
-#define BFA_MFG_SUPPLIER_PARTNUM_SIZE	20
-#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE	20
-#define BFA_MFG_SUPPLIER_REVISION_SIZE	4
+#define BFA_MFG_SUPPLIER_PARTNUM_SIZE		20
+#define BFA_MFG_SUPPLIER_SERIALNUM_SIZE		20
+#define BFA_MFG_SUPPLIER_REVISION_SIZE		4
 #define STRSZ(_n)	(((_n) + 4) & ~3)
 
 /**
+ * Manufacturing card type
+ */
+enum {
+	BFA_MFG_TYPE_CB_MAX  = 825,      /*  Crossbow card type max	*/
+	BFA_MFG_TYPE_FC8P2   = 825,      /*  8G 2port FC card		*/
+	BFA_MFG_TYPE_FC8P1   = 815,      /*  8G 1port FC card		*/
+	BFA_MFG_TYPE_FC4P2   = 425,      /*  4G 2port FC card		*/
+	BFA_MFG_TYPE_FC4P1   = 415,      /*  4G 1port FC card		*/
+	BFA_MFG_TYPE_CNA10P2 = 1020,     /*  10G 2port CNA card	*/
+	BFA_MFG_TYPE_CNA10P1 = 1010,     /*  10G 1port CNA card	*/
+};
+
+#pragma pack(1)
+
+/**
+ * Card type to port number conversion
+ */
+#define bfa_mfg_type2port_num(card_type) (((card_type) / 10) % 10)
+
+
+/**
+ * All numerical fields are in big-endian format.
+ */
+struct bfa_mfg_block_s {
+};
+
+/**
  * VPD data length
  */
-#define BFA_MFG_VPD_LEN     256
+#define BFA_MFG_VPD_LEN		512
+
+#define BFA_MFG_VPD_PCI_HDR_OFF		137
+#define BFA_MFG_VPD_PCI_VER_MASK	0x07	/*  version mask 3 bits */
+#define BFA_MFG_VPD_PCI_VDR_MASK	0xf8	/*  vendor mask 5 bits */
+
+/**
+ * VPD vendor tag
+ */
+enum {
+	BFA_MFG_VPD_UNKNOWN	= 0,     /*  vendor unknown 		*/
+	BFA_MFG_VPD_IBM 	= 1,     /*  vendor IBM 		*/
+	BFA_MFG_VPD_HP  	= 2,     /*  vendor HP  		*/
+	BFA_MFG_VPD_DELL        = 3,	 /*  vendor DELL                */
+	BFA_MFG_VPD_PCI_IBM 	= 0x08,  /*  PCI VPD IBM     		*/
+	BFA_MFG_VPD_PCI_HP  	= 0x10,  /*  PCI VPD HP			*/
+	BFA_MFG_VPD_PCI_DELL    = 0x20,  /*  PCI VPD DELL           	*/
+	BFA_MFG_VPD_PCI_BRCD 	= 0xf8,  /*  PCI VPD Brocade 		*/
+};
 
 /**
  * All numerical fields are in big-endian format.
  */
 struct bfa_mfg_vpd_s {
-    u8     version;    /*  vpd data version */
-    u8     vpd_sig[3]; /*  characters 'V', 'P', 'D' */
-    u8     chksum;     /*  u8 checksum */
-    u8     vendor;     /*  vendor */
-    u8     len;        /*  vpd data length excluding header */
-    u8     rsv;
-    u8     data[BFA_MFG_VPD_LEN];  /*  vpd data */
+	u8		version;	/*  vpd data version */
+	u8		vpd_sig[3];	/*  characters 'V', 'P', 'D' */
+	u8		chksum;		/*  u8 checksum */
+	u8		vendor;		/*  vendor */
+	u8 	len;		/*  vpd data length excluding header */
+	u8 	rsv;
+	u8		data[BFA_MFG_VPD_LEN];	/*  vpd data */
 };
 
-#pragma pack(1)
+#pragma pack()
 
 #endif /* __BFA_DEFS_MFG_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_port.h b/drivers/scsi/bfa/include/defs/bfa_defs_port.h
index de0696c..501bc97 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_port.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_port.h
@@ -185,6 +185,8 @@
 	wwn_t		fabric_name; /*  attached switch's nwwn */
 	u8		fabric_ip_addr[BFA_FCS_FABRIC_IPADDR_SZ]; /*  attached
 							* fabric's ip addr */
+	struct mac_s    fpma_mac;	/*  Lport's FPMA Mac address */
+	u16     authfail;		/*  auth failed state */
 };
 
 /**
@@ -232,14 +234,15 @@
 };
 
 struct bfa_port_aen_data_s {
-	enum bfa_ioc_type_e ioc_type;
-	wwn_t           pwwn;	      /*  WWN of the physical port */
-	wwn_t           fwwn;	      /*  WWN of the fabric port */
-	mac_t           mac;	      /*  MAC addres of the ethernet port,
-				       * applicable to CNA port only */
-	int             phy_port_num; /*! For SFP related events */
-	enum bfa_port_aen_sfp_pom level; /*  Only transitions will
-					  * be informed */
+	wwn_t           pwwn;         /*  WWN of the physical port */
+	wwn_t           fwwn;         /*  WWN of the fabric port */
+	s32         phy_port_num; /*! For SFP related events */
+	s16         ioc_type;
+	s16         level;        /*  Only transitions will
+					* be informed */
+	struct mac_s    mac;          /*  MAC address of the ethernet port,
+					* applicable to CNA port only */
+	s16         rsvd;
 };
 
 #endif /* __BFA_DEFS_PORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
index bf32041..26e5cc7 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_pport.h
@@ -232,7 +232,7 @@
 	u32        	pid;		/*  port ID */
 	enum bfa_pport_type 	port_type;	/*  current topology */
 	u32        	loopback;	/*  external loopback */
-	u32		rsvd1;
+	u32		authfail;	/* auth fail state */
 	u32		rsvd2;		/*  padding for 64 bit */
 };
 
@@ -240,73 +240,79 @@
  * 		FC Port statistics.
  */
 struct bfa_pport_fc_stats_s {
-	u64        secs_reset;	/*  seconds since stats is reset */
-	u64        tx_frames;	/*  transmitted frames */
-	u64        tx_words;	/*  transmitted words */
-	u64        rx_frames;	/*  received frames */
-	u64        rx_words;	/*  received words */
-	u64        lip_count;	/*  LIPs seen */
-	u64        nos_count;	/*  NOS count */
-	u64        error_frames;	/*  errored frames (sent?) */
-	u64        dropped_frames;	/*  dropped frames */
-	u64        link_failures;	/*  link failure count */
-	u64        loss_of_syncs;	/*  loss of sync count */
-	u64        loss_of_signals;/*  loss of signal count */
-	u64        primseq_errs;	/*  primitive sequence protocol */
-	u64        bad_os_count;	/*  invalid ordered set */
-	u64        err_enc_out;	/*  Encoding error outside frame */
-	u64        invalid_crcs;	/*  frames received with invalid CRC*/
-	u64	undersized_frm; /*  undersized frames */
-	u64	oversized_frm;	/*  oversized frames */
-	u64	bad_eof_frm;	/*  frames with bad EOF */
-	struct bfa_qos_stats_s	qos_stats;	/*  QoS statistics */
+	u64    secs_reset; /* Seconds since stats is reset     */
+	u64    tx_frames;  /* Tx frames                */
+	u64    tx_words;   /* Tx words                 */
+	u64    tx_lip;     /* TX LIP               */
+	u64    tx_nos;     /* Tx NOS               */
+	u64    tx_ols;     /* Tx OLS               */
+	u64    tx_lr;      /* Tx LR                */
+	u64    tx_lrr;     /* Tx LRR               */
+	u64    rx_frames;  /* Rx frames                */
+	u64    rx_words;   /* Rx words                 */
+	u64    lip_count;  /* Rx LIP                   */
+	u64    nos_count;  /* Rx NOS               */
+	u64    ols_count;  /* Rx OLS               */
+	u64    lr_count;   /* Rx LR                */
+	u64    lrr_count;  /* Rx LRR               */
+	u64    invalid_crcs;   /* Rx CRC err frames            */
+	u64    invalid_crc_gd_eof; /* Rx CRC err good EOF frames   */
+	u64    undersized_frm; /* Rx undersized frames         */
+	u64    oversized_frm;  /* Rx oversized frames          */
+	u64    bad_eof_frm;    /* Rx frames with bad EOF       */
+	u64    error_frames;   /* Errored frames           */
+	u64    dropped_frames; /* Dropped frames           */
+	u64    link_failures;  /* Link Failure (LF) count          */
+	u64    loss_of_syncs;  /* Loss of sync count           */
+	u64    loss_of_signals;/* Loss of signal count         */
+	u64    primseq_errs;   /* Primitive sequence protocol err. */
+	u64    bad_os_count;   /* Invalid ordered sets         */
+	u64    err_enc_out;    /* Encoding err nonframe_8b10b      */
+	u64    err_enc;    /* Encoding err frame_8b10b         */
 };
 
 /**
  * 		Eth Port statistics.
  */
 struct bfa_pport_eth_stats_s {
-	u64	secs_reset;	/*  seconds since stats is reset */
-	u64	frame_64;      /*  both rx and tx counter */
-	u64	frame_65_127;      /* both rx and tx counter */
-	u64	frame_128_255;     /* both rx and tx counter */
-	u64	frame_256_511;     /* both rx and tx counter */
-	u64	frame_512_1023;    /* both rx and tx counter */
-	u64	frame_1024_1518;   /* both rx and tx counter */
-	u64	frame_1519_1522;   /* both rx and tx counter */
-
-	u64	tx_bytes;
-	u64	tx_packets;
-	u64	tx_mcast_packets;
-	u64	tx_bcast_packets;
-	u64	tx_control_frame;
-	u64	tx_drop;
-	u64	tx_jabber;
-	u64	tx_fcs_error;
-	u64	tx_fragments;
-
-	u64	rx_bytes;
-	u64	rx_packets;
-	u64	rx_mcast_packets;
-	u64	rx_bcast_packets;
-	u64	rx_control_frames;
-	u64	rx_unknown_opcode;
-	u64	rx_drop;
-	u64	rx_jabber;
-	u64	rx_fcs_error;
-	u64	rx_alignment_error;
-	u64	rx_frame_length_error;
-	u64	rx_code_error;
-	u64	rx_fragments;
-
-	u64	rx_pause; /* BPC */
-	u64	rx_zero_pause; /*  BPC Pause cancellation */
-	u64	tx_pause;      /* BPC */
-	u64	tx_zero_pause; /*  BPC Pause cancellation */
-	u64	rx_fcoe_pause; /* BPC */
-	u64	rx_fcoe_zero_pause; /*  BPC Pause cancellation */
-	u64	tx_fcoe_pause;      /* BPC */
-	u64	tx_fcoe_zero_pause; /*  BPC Pause cancellation */
+	u64    secs_reset;   /* Seconds since stats is reset   */
+	u64    frame_64;     /* Frames 64 bytes            */
+	u64    frame_65_127;     /* Frames 65-127 bytes        */
+	u64    frame_128_255;    /* Frames 128-255 bytes       */
+	u64    frame_256_511;    /* Frames 256-511 bytes       */
+	u64    frame_512_1023;   /* Frames 512-1023 bytes          */
+	u64    frame_1024_1518;  /* Frames 1024-1518 bytes         */
+	u64    frame_1519_1522;  /* Frames 1519-1522 bytes         */
+	u64    tx_bytes;     /* Tx bytes               */
+	u64    tx_packets;   /* Tx packets             */
+	u64    tx_mcast_packets; /* Tx multicast packets       */
+	u64    tx_bcast_packets; /* Tx broadcast packets       */
+	u64    tx_control_frame; /* Tx control frame           */
+	u64    tx_drop;      /* Tx drops               */
+	u64    tx_jabber;    /* Tx jabber              */
+	u64    tx_fcs_error;     /* Tx FCS error           */
+	u64    tx_fragments;     /* Tx fragments           */
+	u64    rx_bytes;     /* Rx bytes               */
+	u64    rx_packets;   /* Rx packets             */
+	u64    rx_mcast_packets; /* Rx multicast packets       */
+	u64    rx_bcast_packets; /* Rx broadcast packets       */
+	u64    rx_control_frames; /* Rx control frames         */
+	u64    rx_unknown_opcode; /* Rx unknown opcode         */
+	u64    rx_drop;      /* Rx drops               */
+	u64    rx_jabber;    /* Rx jabber              */
+	u64    rx_fcs_error;     /* Rx FCS errors              */
+	u64    rx_alignment_error; /* Rx alignment errors          */
+	u64    rx_frame_length_error; /* Rx frame len errors       */
+	u64    rx_code_error;    /* Rx code errors             */
+	u64    rx_fragments;     /* Rx fragments           */
+	u64    rx_pause;     /* Rx pause               */
+	u64    rx_zero_pause;    /* Rx zero pause              */
+	u64    tx_pause;     /* Tx pause               */
+	u64    tx_zero_pause;    /* Tx zero pause              */
+	u64    rx_fcoe_pause;    /* Rx fcoe pause              */
+	u64    rx_fcoe_zero_pause; /* Rx FCoE zero pause       */
+	u64    tx_fcoe_pause;    /* Tx FCoE pause              */
+	u64    tx_fcoe_zero_pause; /* Tx FCoE zero pause       */
 };
 
 /**
@@ -333,8 +339,7 @@
 };
 
 /**
- *              Port RNID info.
- */
+ *              Port RNI	 */
 struct bfa_pport_rnid_s {
 	wwn_t             wwn;
 	u32          unittype;
@@ -347,6 +352,23 @@
 	u16          topologydiscoveryflags;
 };
 
+struct bfa_fcport_fcf_s {
+	wwn_t           name;           /* FCF name                 */
+	wwn_t           fabric_name;    /* Fabric Name              */
+	u8		fipenabled;     /* FIP enabled or not       */
+	u8		fipfailed;      /* FIP failed or not        */
+	u8         	resv[2];
+	u8         	pri;            /* FCF priority             */
+	u8         	version;        /* FIP version used         */
+	u8         	available;      /* Available  for  login    */
+	u8         	fka_disabled;   /* FKA is disabled          */
+	u8         	maxsz_verified; /* FCoE max size verified   */
+	u8         	fc_map[3];      /* FC map                   */
+	u16		vlan;           /* FCoE vlan tag/priority   */
+	u32        	fka_adv_per;    /* FIP  ka advert. period   */
+	struct mac_s	mac;            /* FCF mac                  */
+};
+
 /**
  * 		Link state information
  */
@@ -378,6 +400,7 @@
 			struct fc_alpabm_s     alpabm;	   /*  alpa bitmap */
 		} loop_info;
 	} tl;
+	struct bfa_fcport_fcf_s fcf;    /*!< FCF information (for FCoE) */
 };
 
 #endif /* __BFA_DEFS_PPORT_H__ */
diff --git a/drivers/scsi/bfa/include/defs/bfa_defs_status.h b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
index cdceaeb..4374494 100644
--- a/drivers/scsi/bfa/include/defs/bfa_defs_status.h
+++ b/drivers/scsi/bfa/include/defs/bfa_defs_status.h
@@ -180,8 +180,8 @@
 	BFA_STATUS_IM_ADAPT_ALREADY_IN_TEAM = 114, /*  Given adapter is part
 						    * of another team */
 	BFA_STATUS_IM_ADAPT_HAS_VLANS = 115, /*  Adapter has VLANs configured.
-					      * Delete all VLANs before
-					      * creating team */
+					      * Delete all VLANs to become
+					      * part of the team */
 	BFA_STATUS_IM_PVID_MISMATCH = 116, /*  Mismatching PVIDs configured
 					    * for adapters */
 	BFA_STATUS_IM_LINK_SPEED_MISMATCH = 117, /*  Mismatching link speeds
@@ -213,7 +213,7 @@
 					     * loaded */
 	BFA_STATUS_CARD_TYPE_MISMATCH = 131, /*  Card type mismatch */
 	BFA_STATUS_BAD_ASICBLK = 132, /*  Bad ASIC block */
-	BFA_STATUS_NO_DRIVER = 133, /*  Storage/Ethernet driver not loaded */
+	BFA_STATUS_NO_DRIVER = 133, /*  Brocade adapter/driver not installed or loaded */
 	BFA_STATUS_INVALID_MAC = 134, /*  Invalid mac address */
 	BFA_STATUS_IM_NO_VLAN = 135, /*  No VLANs configured on the adapter */
 	BFA_STATUS_IM_ETH_LB_FAILED = 136, /*  Ethernet loopback test failed */
@@ -228,8 +228,7 @@
 	BFA_STATUS_IM_GET_INETCFG_FAILED = 142, /*  Acquiring Network Subsytem
 						 * handle Failed. Please try
 						 * after some time */
-	BFA_STATUS_IM_NOT_BOUND = 143, /*  Brocade 10G Ethernet Service is not
-					* Enabled on this port */
+	BFA_STATUS_IM_NOT_BOUND = 143, /*  IM driver is not active */
 	BFA_STATUS_INSUFFICIENT_PERMS = 144, /*  User doesn't have sufficient
 					      * permissions to execute the BCU
 					      * application */
@@ -242,6 +241,14 @@
 					  * failed */
 	BFA_STATUS_IM_UNBIND_FAILED = 149, /* ! < IM Driver unbind operation
 					    * failed */
+	BFA_STATUS_IM_PORT_IN_TEAM = 150, /*  Port is already part of the
+					   * team */
+	BFA_STATUS_IM_VLAN_NOT_FOUND = 151, /*  VLAN ID doesn't exists */
+	BFA_STATUS_IM_TEAM_NOT_FOUND = 152, /*  Teaming configuration doesn't
+					     * exists */
+	BFA_STATUS_IM_TEAM_CFG_NOT_ALLOWED = 153, /*  Given settings are not
+						* allowed for the current
+						* Teaming mode */
 	BFA_STATUS_MAX_VAL		/*  Unknown error code */
 };
 #define bfa_status_t enum bfa_status
diff --git a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h b/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
index a6c70ae..52585d3 100644
--- a/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
+++ b/drivers/scsi/bfa/include/fcb/bfa_fcb_fcpim.h
@@ -70,7 +70,6 @@
  */
 void            bfa_fcb_itnim_offline(struct bfad_itnim_s *itnim_drv);
 
-void            bfa_fcb_itnim_tov_begin(struct bfad_itnim_s *itnim_drv);
 void            bfa_fcb_itnim_tov(struct bfad_itnim_s *itnim_drv);
 
 #endif /* __BFAD_FCB_FCPIM_H__ */
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs.h b/drivers/scsi/bfa/include/fcs/bfa_fcs.h
index 627669c..f2fd35f 100644
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs.h
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs.h
@@ -49,6 +49,7 @@
 	struct bfa_trc_mod_s  *trcmod;	/*  tracing module */
 	struct bfa_aen_s      *aen;	/*  aen component */
 	bfa_boolean_t   vf_enabled;	/*  VF mode is enabled */
+	bfa_boolean_t   fdmi_enabled;   /*!< FDMI is enabled */
 	bfa_boolean_t min_cfg;		/* min cfg enabled/disabled */
 	u16        port_vfid;	/*  port default VF ID */
 	struct bfa_fcs_driver_info_s driver_info;
@@ -60,10 +61,12 @@
 /*
  * bfa fcs API functions
  */
-void bfa_fcs_init(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
+void bfa_fcs_attach(struct bfa_fcs_s *fcs, struct bfa_s *bfa, struct bfad_s *bfad,
 			bfa_boolean_t min_cfg);
+void bfa_fcs_init(struct bfa_fcs_s *fcs);
 void bfa_fcs_driver_info_init(struct bfa_fcs_s *fcs,
 			struct bfa_fcs_driver_info_s *driver_info);
+void bfa_fcs_set_fdmi_param(struct bfa_fcs_s *fcs, bfa_boolean_t fdmi_enable);
 void bfa_fcs_exit(struct bfa_fcs_s *fcs);
 void bfa_fcs_trc_init(struct bfa_fcs_s *fcs, struct bfa_trc_mod_s *trcmod);
 void bfa_fcs_log_init(struct bfa_fcs_s *fcs, struct bfa_log_mod_s *logmod);
diff --git a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
index 967ceb0..ceaefd3 100644
--- a/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
+++ b/drivers/scsi/bfa/include/fcs/bfa_fcs_lport.h
@@ -34,14 +34,6 @@
 struct bfa_fcs_fabric_s;
 
 /*
-* @todo : need to move to a global config file.
- * Maximum Vports supported per physical port or vf.
- */
-#define BFA_FCS_MAX_VPORTS_SUPP_CB  255
-#define BFA_FCS_MAX_VPORTS_SUPP_CT  191
-
-/*
-* @todo : need to move to a global config file.
  * Maximum Rports supported per port (physical/logical).
  */
 #define BFA_FCS_MAX_RPORTS_SUPP  256	/* @todo : tentative value */
diff --git a/drivers/scsi/bfa/include/log/bfa_log_hal.h b/drivers/scsi/bfa/include/log/bfa_log_hal.h
index 0412aea..5f8f5e3 100644
--- a/drivers/scsi/bfa/include/log/bfa_log_hal.h
+++ b/drivers/scsi/bfa/include/log/bfa_log_hal.h
@@ -27,4 +27,10 @@
 	(((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 3)
 #define BFA_LOG_HAL_SM_ASSERT \
 	(((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 4)
+#define BFA_LOG_HAL_DRIVER_ERROR \
+	(((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 5)
+#define BFA_LOG_HAL_DRIVER_CONFIG_ERROR \
+	(((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 6)
+#define BFA_LOG_HAL_MBOX_ERROR \
+	(((u32) BFA_LOG_HAL_ID << BFA_LOG_MODID_OFFSET) | 7)
 #endif
diff --git a/drivers/scsi/bfa/include/log/bfa_log_linux.h b/drivers/scsi/bfa/include/log/bfa_log_linux.h
index 317c054..bd451db 100644
--- a/drivers/scsi/bfa/include/log/bfa_log_linux.h
+++ b/drivers/scsi/bfa/include/log/bfa_log_linux.h
@@ -41,4 +41,20 @@
 		(((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 10)
 #define BFA_LOG_LINUX_SCSI_ABORT_COMP \
 		(((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 11)
+#define BFA_LOG_LINUX_DRIVER_CONFIG_ERROR \
+		(((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 12)
+#define BFA_LOG_LINUX_BNA_STATE_MACHINE \
+		(((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 13)
+#define BFA_LOG_LINUX_IOC_ERROR \
+	(((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 14)
+#define BFA_LOG_LINUX_RESOURCE_ALLOC_ERROR \
+	(((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 15)
+#define BFA_LOG_LINUX_RING_BUFFER_ERROR \
+	(((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 16)
+#define BFA_LOG_LINUX_DRIVER_ERROR \
+	(((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 17)
+#define BFA_LOG_LINUX_DRIVER_DIAG \
+	(((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 18)
+#define BFA_LOG_LINUX_DRIVER_AEN \
+	(((u32) BFA_LOG_LINUX_ID << BFA_LOG_MODID_OFFSET) | 19)
 #endif
diff --git a/drivers/scsi/bfa/include/protocol/fc.h b/drivers/scsi/bfa/include/protocol/fc.h
index 14969ee..8d10380 100644
--- a/drivers/scsi/bfa/include/protocol/fc.h
+++ b/drivers/scsi/bfa/include/protocol/fc.h
@@ -50,6 +50,11 @@
 
 	u32        ro;		/* relative offset */
 };
+
+#define FC_SOF_LEN      4
+#define FC_EOF_LEN      4
+#define FC_CRC_LEN      4
+
 /*
  * Fibre Channel BB_E Header Structure
  */
diff --git a/drivers/scsi/bfa/include/protocol/pcifw.h b/drivers/scsi/bfa/include/protocol/pcifw.h
deleted file mode 100644
index 6830dc3..0000000
--- a/drivers/scsi/bfa/include/protocol/pcifw.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
- * All rights reserved
- * www.brocade.com
- *
- * Linux driver for Brocade Fibre Channel Host Bus Adapter.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License (GPL) Version 2 as
- * published by the Free Software Foundation
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
- * General Public License for more details.
- */
-
-/**
- *  pcifw.h PCI FW related headers
- */
-
-#ifndef __PCIFW_H__
-#define __PCIFW_H__
-
-#pragma pack(1)
-
-struct pnp_hdr_s{
-  u32	signature;	/* "$PnP" */
-  u8	rev;		/* Struct revision */
-  u8 	len;		/* Header structure len in multiples
-				 * of 16 bytes */
-  u16  off;		/* Offset to next header 00 if none */
-  u8	rsvd;		/* Reserved byte */
-  u8	cksum;		/* 8-bit checksum for this header */
-  u32	pnp_dev_id;	/* PnP Device Id */
-  u16  mfstr;		/* Pointer to manufacturer string */
-  u16	prstr;		/* Pointer to product string */
-  u8	devtype[3];	/* Device Type Code */
-  u8	devind;		/* Device Indicator */
-  u16	bcventr;	/* Bootstrap entry vector */
-  u16	rsvd2;		/* Reserved */
-  u16  sriv;		/* Static resource information vector */
-};
-
-struct pci_3_0_ds_s{
- u32	sig;   		/* Signature "PCIR" */
- u16	vendid;		/* Vendor ID */
- u16	devid;		/* Device ID */
- u16	devlistoff;	/* Device List Offset */
- u16	len;		/* PCI Data Structure Length */
- u8	rev;		/* PCI Data Structure Revision */
- u8	clcode[3];	/* Class Code */
- u16	imglen;		/* Code image length in multiples of
-				 * 512 bytes */
- u16	coderev;	/* Revision level of code/data */
- u8	codetype;	/* Code type 0x00 - BIOS */
- u8	indr;		/* Last image indicator */
- u16	mrtimglen;	/* Max Run Time Image Length */
- u16	cuoff;		/* Config Utility Code Header Offset */
- u16	dmtfclp;	/* DMTF CLP entry point offset */
-};
-
-struct pci_optrom_hdr_s{
- u16	sig;		/* Signature 0x55AA */
- u8	len;		/* Option ROM length in units of 512 bytes */
- u8	inivec[3];	/* Initialization vector */
- u8	rsvd[16];	/* Reserved field */
- u16	verptr;		/* Pointer to version string - private */
- u16	pcids;		/* Pointer to PCI data structure */
- u16	pnphdr;		/* Pointer to PnP expansion header */
-};
-
-#pragma pack()
-
-#endif
diff --git a/drivers/scsi/bfa/loop.c b/drivers/scsi/bfa/loop.c
index f7c7f4f3..f6342ef 100644
--- a/drivers/scsi/bfa/loop.c
+++ b/drivers/scsi/bfa/loop.c
@@ -162,7 +162,7 @@
 	len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), alpa,
 			     bfa_fcs_port_get_fcid(port), 0,
 			     port->port_cfg.pwwn, port->port_cfg.nwwn,
-				 bfa_pport_get_maxfrsize(port->fcs->bfa));
+				 bfa_fcport_get_maxfrsize(port->fcs->bfa));
 
 	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
 			  FC_CLASS_3, len, &fchs,
diff --git a/drivers/scsi/bfa/lport_api.c b/drivers/scsi/bfa/lport_api.c
index 1e06792..d3907d1 100644
--- a/drivers/scsi/bfa/lport_api.c
+++ b/drivers/scsi/bfa/lport_api.c
@@ -156,7 +156,7 @@
 	/*
 	 * Get Physical port's current speed
 	 */
-	bfa_pport_get_attr(port->fcs->bfa, &pport_attr);
+	bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
 	pport_speed = pport_attr.speed;
 	bfa_trc(fcs, pport_speed);
 
@@ -235,7 +235,8 @@
 		port_info->port_wwn = bfa_fcs_port_get_pwwn(port);
 		port_info->node_wwn = bfa_fcs_port_get_nwwn(port);
 
-		port_info->max_vports_supp = bfa_fcs_vport_get_max(port->fcs);
+		port_info->max_vports_supp =
+			bfa_lps_get_max_vport(port->fcs->bfa);
 		port_info->num_vports_inuse =
 			bfa_fcs_fabric_vport_count(port->fabric);
 		port_info->max_rports_supp = BFA_FCS_MAX_RPORTS_SUPP;
diff --git a/drivers/scsi/bfa/ms.c b/drivers/scsi/bfa/ms.c
index c96b3ca..5e8c8de 100644
--- a/drivers/scsi/bfa/ms.c
+++ b/drivers/scsi/bfa/ms.c
@@ -118,7 +118,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ms->port->fcs, event);
 	}
 }
 
@@ -141,7 +141,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ms->port->fcs, event);
 	}
 }
 
@@ -190,7 +190,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ms->port->fcs, event);
 	}
 }
 
@@ -216,7 +216,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ms->port->fcs, event);
 	}
 }
 
@@ -230,10 +230,6 @@
 	switch (event) {
 	case MSSM_EVENT_PORT_OFFLINE:
 		bfa_sm_set_state(ms, bfa_fcs_port_ms_sm_offline);
-		/*
-		 * now invoke MS related sub-modules
-		 */
-		bfa_fcs_port_fdmi_offline(ms);
 		break;
 
 	case MSSM_EVENT_PORT_FABRIC_RSCN:
@@ -243,7 +239,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ms->port->fcs, event);
 	}
 }
 
@@ -266,7 +262,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ms->port->fcs, event);
 	}
 }
 
@@ -304,7 +300,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ms->port->fcs, event);
 	}
 }
 
@@ -330,7 +326,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ms->port->fcs, event);
 	}
 }
 
@@ -466,7 +462,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ms->port->fcs, event);
 	}
 }
 
@@ -502,7 +498,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ms->port->fcs, event);
 	}
 }
 
@@ -528,7 +524,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ms->port->fcs, event);
 	}
 }
 
@@ -637,7 +633,7 @@
 			     bfa_os_hton3b(FC_MGMT_SERVER),
 			     bfa_fcs_port_get_fcid(port), 0,
 			     port->port_cfg.pwwn, port->port_cfg.nwwn,
-			     bfa_pport_get_maxfrsize(port->fcs->bfa));
+			     bfa_fcport_get_maxfrsize(port->fcs->bfa));
 
 	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
 		      FC_CLASS_3, len, &fchs, bfa_fcs_port_ms_plogi_response,
@@ -735,6 +731,7 @@
 
 	ms->port = port;
 	bfa_sm_send_event(ms, MSSM_EVENT_PORT_OFFLINE);
+	bfa_fcs_port_fdmi_offline(ms);
 }
 
 void
diff --git a/drivers/scsi/bfa/ns.c b/drivers/scsi/bfa/ns.c
index 2f8b880..d20dd7e 100644
--- a/drivers/scsi/bfa/ns.c
+++ b/drivers/scsi/bfa/ns.c
@@ -164,7 +164,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 
@@ -187,7 +187,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 
@@ -221,7 +221,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 
@@ -247,7 +247,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 
@@ -270,7 +270,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 
@@ -304,7 +304,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 
@@ -330,7 +330,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 
@@ -353,7 +353,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 
@@ -390,7 +390,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 
@@ -413,7 +413,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 
@@ -436,7 +436,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 
@@ -494,7 +494,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 
@@ -517,7 +517,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 static void
@@ -539,7 +539,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 
@@ -575,7 +575,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 
@@ -598,7 +598,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 
@@ -626,7 +626,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(ns->port->fcs, event);
 	}
 }
 
@@ -660,7 +660,7 @@
 			     bfa_os_hton3b(FC_NAME_SERVER),
 			     bfa_fcs_port_get_fcid(port), 0,
 			     port->port_cfg.pwwn, port->port_cfg.nwwn,
-			     bfa_pport_get_maxfrsize(port->fcs->bfa));
+			     bfa_fcport_get_maxfrsize(port->fcs->bfa));
 
 	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
 		      FC_CLASS_3, len, &fchs, bfa_fcs_port_ns_plogi_response,
diff --git a/drivers/scsi/bfa/rport.c b/drivers/scsi/bfa/rport.c
index 9cf58bb..8e73dd9 100644
--- a/drivers/scsi/bfa/rport.c
+++ b/drivers/scsi/bfa/rport.c
@@ -224,7 +224,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -276,7 +276,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -332,7 +332,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -406,7 +406,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -481,7 +481,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -534,7 +534,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -589,7 +589,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -646,7 +646,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -704,7 +704,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -754,7 +754,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -816,7 +816,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -846,7 +846,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -869,7 +869,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -905,7 +905,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -925,10 +925,17 @@
 	case RPSM_EVENT_HCB_OFFLINE:
 	case RPSM_EVENT_ADDRESS_CHANGE:
 		if (bfa_fcs_port_is_online(rport->port)) {
-			bfa_sm_set_state(rport,
-					 bfa_fcs_rport_sm_nsdisc_sending);
-			rport->ns_retries = 0;
-			bfa_fcs_rport_send_gidpn(rport, NULL);
+			if (bfa_fcs_fabric_is_switched(rport->port->fabric)) {
+				bfa_sm_set_state(rport,
+					bfa_fcs_rport_sm_nsdisc_sending);
+				rport->ns_retries = 0;
+				bfa_fcs_rport_send_gidpn(rport, NULL);
+			} else {
+				bfa_sm_set_state(rport,
+					bfa_fcs_rport_sm_plogi_sending);
+				rport->plogi_retries = 0;
+				bfa_fcs_rport_send_plogi(rport, NULL);
+			}
 		} else {
 			rport->pid = 0;
 			bfa_sm_set_state(rport, bfa_fcs_rport_sm_offline);
@@ -951,7 +958,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -1011,7 +1018,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -1038,7 +1045,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -1073,7 +1080,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -1132,7 +1139,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -1188,7 +1195,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -1249,7 +1256,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -1334,7 +1341,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -1366,7 +1373,7 @@
 	len = fc_plogi_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
 			     bfa_fcs_port_get_fcid(port), 0,
 			     port->port_cfg.pwwn, port->port_cfg.nwwn,
-			     bfa_pport_get_maxfrsize(port->fcs->bfa));
+			     bfa_fcport_get_maxfrsize(port->fcs->bfa));
 
 	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
 		      FC_CLASS_3, len, &fchs, bfa_fcs_rport_plogi_response,
@@ -1478,7 +1485,7 @@
 	len = fc_plogi_acc_build(&fchs, bfa_fcxp_get_reqbuf(fcxp), rport->pid,
 				 bfa_fcs_port_get_fcid(port), rport->reply_oxid,
 				 port->port_cfg.pwwn, port->port_cfg.nwwn,
-				 bfa_pport_get_maxfrsize(port->fcs->bfa));
+				 bfa_fcport_get_maxfrsize(port->fcs->bfa));
 
 	bfa_fcxp_send(fcxp, NULL, port->fabric->vf_id, port->lp_tag, BFA_FALSE,
 		      FC_CLASS_3, len, &fchs, NULL, NULL, FC_MAX_PDUSZ, 0);
@@ -1813,7 +1820,7 @@
 	/*
 	 * get curent speed from pport attributes from BFA
 	 */
-	bfa_pport_get_attr(port->fcs->bfa, &pport_attr);
+	bfa_fcport_get_attr(port->fcs->bfa, &pport_attr);
 
 	speeds.port_op_speed = fc_bfa_speed_to_rpsc_operspeed(pport_attr.speed);
 
@@ -2032,13 +2039,10 @@
 
 	switch (event) {
 	case BFA_RPORT_AEN_ONLINE:
-		bfa_log(logmod, BFA_AEN_RPORT_ONLINE, rpwwn_ptr, lpwwn_ptr);
-		break;
 	case BFA_RPORT_AEN_OFFLINE:
-		bfa_log(logmod, BFA_AEN_RPORT_OFFLINE, rpwwn_ptr, lpwwn_ptr);
-		break;
 	case BFA_RPORT_AEN_DISCONNECT:
-		bfa_log(logmod, BFA_AEN_RPORT_DISCONNECT, rpwwn_ptr, lpwwn_ptr);
+		bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_RPORT, event),
+			rpwwn_ptr, lpwwn_ptr);
 		break;
 	case BFA_RPORT_AEN_QOS_PRIO:
 		aen_data.rport.priv.qos = data->priv.qos;
@@ -2164,7 +2168,7 @@
 		bfa_trc(port->fcs, port->fabric->bb_credit);
 
 		port->fabric->bb_credit = bfa_os_ntohs(plogi->csp.bbcred);
-		bfa_pport_set_tx_bbcredit(port->fcs->bfa,
+		bfa_fcport_set_tx_bbcredit(port->fcs->bfa,
 					  port->fabric->bb_credit);
 	}
 
@@ -2575,23 +2579,6 @@
 }
 
 /**
- *   Module initialization
- */
-void
-bfa_fcs_rport_modinit(struct bfa_fcs_s *fcs)
-{
-}
-
-/**
- *   Module cleanup
- */
-void
-bfa_fcs_rport_modexit(struct bfa_fcs_s *fcs)
-{
-	bfa_fcs_modexit_comp(fcs);
-}
-
-/**
  * Return state of rport.
  */
 int
diff --git a/drivers/scsi/bfa/rport_api.c b/drivers/scsi/bfa/rport_api.c
index 3dae177..a441f41 100644
--- a/drivers/scsi/bfa/rport_api.c
+++ b/drivers/scsi/bfa/rport_api.c
@@ -102,7 +102,7 @@
 	rport_attr->qos_attr = qos_attr;
 
 	rport_attr->trl_enforced = BFA_FALSE;
-	if (bfa_pport_is_ratelim(port->fcs->bfa)) {
+	if (bfa_fcport_is_ratelim(port->fcs->bfa)) {
 		if ((rport->rpf.rpsc_speed == BFA_PPORT_SPEED_UNKNOWN) ||
 			(rport->rpf.rpsc_speed <
 			bfa_fcs_port_get_rport_max_speed(port)))
diff --git a/drivers/scsi/bfa/rport_ftrs.c b/drivers/scsi/bfa/rport_ftrs.c
index e1932c8..ae7bba6 100644
--- a/drivers/scsi/bfa/rport_ftrs.c
+++ b/drivers/scsi/bfa/rport_ftrs.c
@@ -91,7 +91,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -114,7 +114,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -160,7 +160,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -186,7 +186,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -206,7 +206,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 
@@ -229,7 +229,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(rport->fcs, event);
 	}
 }
 /**
diff --git a/drivers/scsi/bfa/scn.c b/drivers/scsi/bfa/scn.c
index bd4771f..8fe09ba 100644
--- a/drivers/scsi/bfa/scn.c
+++ b/drivers/scsi/bfa/scn.c
@@ -90,7 +90,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(scn->port->fcs, event);
 	}
 }
 
@@ -109,7 +109,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(scn->port->fcs, event);
 	}
 }
 
@@ -137,7 +137,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(scn->port->fcs, event);
 	}
 }
 
@@ -157,7 +157,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(scn->port->fcs, event);
 	}
 }
 
@@ -171,7 +171,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(scn->port->fcs, event);
 	}
 }
 
diff --git a/drivers/scsi/bfa/vport.c b/drivers/scsi/bfa/vport.c
index e90f1e3..27cd619 100644
--- a/drivers/scsi/bfa/vport.c
+++ b/drivers/scsi/bfa/vport.c
@@ -122,7 +122,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(__vport_fcs(vport), event);
 	}
 }
 
@@ -165,7 +165,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(__vport_fcs(vport), event);
 	}
 }
 
@@ -202,7 +202,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(__vport_fcs(vport), event);
 	}
 }
 
@@ -249,7 +249,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(__vport_fcs(vport), event);
 	}
 }
 
@@ -283,7 +283,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(__vport_fcs(vport), event);
 	}
 }
 
@@ -310,7 +310,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(__vport_fcs(vport), event);
 	}
 }
 
@@ -339,7 +339,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(__vport_fcs(vport), event);
 	}
 }
 
@@ -387,7 +387,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(__vport_fcs(vport), event);
 	}
 }
 
@@ -419,7 +419,7 @@
 		break;
 
 	default:
-		bfa_assert(0);
+		bfa_sm_fault(__vport_fcs(vport), event);
 	}
 }
 
@@ -447,22 +447,8 @@
 
 	bfa_assert(role <= BFA_PORT_ROLE_FCP_MAX);
 
-	switch (event) {
-	case BFA_LPORT_AEN_NPIV_DUP_WWN:
-		bfa_log(logmod, BFA_AEN_LPORT_NPIV_DUP_WWN, lpwwn_ptr,
-			role_str[role / 2]);
-		break;
-	case BFA_LPORT_AEN_NPIV_FABRIC_MAX:
-		bfa_log(logmod, BFA_AEN_LPORT_NPIV_FABRIC_MAX, lpwwn_ptr,
-			role_str[role / 2]);
-		break;
-	case BFA_LPORT_AEN_NPIV_UNKNOWN:
-		bfa_log(logmod, BFA_AEN_LPORT_NPIV_UNKNOWN, lpwwn_ptr,
-			role_str[role / 2]);
-		break;
-	default:
-		break;
-	}
+	bfa_log(logmod, BFA_LOG_CREATE_ID(BFA_AEN_CAT_LPORT, event), lpwwn_ptr,
+			role_str[role/2]);
 
 	aen_data.lport.vf_id = port->fabric->vf_id;
 	aen_data.lport.roles = role;
@@ -478,7 +464,7 @@
 bfa_fcs_vport_do_fdisc(struct bfa_fcs_vport_s *vport)
 {
 	bfa_lps_fdisc(vport->lps, vport,
-		      bfa_pport_get_maxfrsize(__vport_bfa(vport)),
+		      bfa_fcport_get_maxfrsize(__vport_bfa(vport)),
 		      __vport_pwwn(vport), __vport_nwwn(vport));
 	vport->vport_stats.fdisc_sent++;
 }
@@ -617,38 +603,6 @@
 }
 
 /**
- *   Module initialization
- */
-void
-bfa_fcs_vport_modinit(struct bfa_fcs_s *fcs)
-{
-}
-
-/**
- *   Module cleanup
- */
-void
-bfa_fcs_vport_modexit(struct bfa_fcs_s *fcs)
-{
-	bfa_fcs_modexit_comp(fcs);
-}
-
-u32
-bfa_fcs_vport_get_max(struct bfa_fcs_s *fcs)
-{
-	struct bfa_ioc_attr_s ioc_attr;
-
-	bfa_get_attr(fcs->bfa, &ioc_attr);
-
-	if (ioc_attr.pci_attr.device_id == BFA_PCI_DEVICE_ID_CT)
-		return BFA_FCS_MAX_VPORTS_SUPP_CT;
-	else
-		return BFA_FCS_MAX_VPORTS_SUPP_CB;
-}
-
-
-
-/**
  *  fcs_vport_api Virtual port API
  */
 
@@ -684,7 +638,7 @@
 		return BFA_STATUS_VPORT_EXISTS;
 
 	if (bfa_fcs_fabric_vport_count(&fcs->fabric) ==
-	    bfa_fcs_vport_get_max(fcs))
+		bfa_lps_get_max_vport(fcs->bfa))
 		return BFA_STATUS_VPORT_MAX;
 
 	vport->lps = bfa_lps_alloc(fcs->bfa);
@@ -694,7 +648,8 @@
 	vport->vport_drv = vport_drv;
 	bfa_sm_set_state(vport, bfa_fcs_vport_sm_uninit);
 
-	bfa_fcs_lport_init(&vport->lport, fcs, vf_id, vport_cfg, vport);
+	bfa_fcs_lport_attach(&vport->lport, fcs, vf_id, vport);
+	bfa_fcs_lport_init(&vport->lport, vport_cfg);
 
 	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_CREATE);
 
@@ -888,4 +843,15 @@
 	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_RSP_OK);
 }
 
+/**
+ * Received clear virtual link
+ */
+void
+bfa_cb_lps_cvl_event(void *bfad, void *uarg)
+{
+	struct bfa_fcs_vport_s *vport = uarg;
 
+	/* Send an Offline followed by an ONLINE */
+	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_OFFLINE);
+	bfa_sm_send_event(vport, BFA_FCS_VPORT_SM_ONLINE);
+}
diff --git a/drivers/scsi/bnx2i/bnx2i_iscsi.c b/drivers/scsi/bnx2i/bnx2i_iscsi.c
index 1c4d121..cb71dc9 100644
--- a/drivers/scsi/bnx2i/bnx2i_iscsi.c
+++ b/drivers/scsi/bnx2i/bnx2i_iscsi.c
@@ -1989,7 +1989,7 @@
 	.queuecommand		= iscsi_queuecommand,
 	.eh_abort_handler	= iscsi_eh_abort,
 	.eh_device_reset_handler = iscsi_eh_device_reset,
-	.eh_target_reset_handler = iscsi_eh_target_reset,
+	.eh_target_reset_handler = iscsi_eh_recover_target,
 	.change_queue_depth	= iscsi_change_queue_depth,
 	.can_queue		= 1024,
 	.max_sectors		= 127,
diff --git a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
index 412853c..b7c3058 100644
--- a/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
+++ b/drivers/scsi/cxgb3i/cxgb3i_iscsi.c
@@ -915,7 +915,7 @@
 	.cmd_per_lun		= ISCSI_DEF_CMD_PER_LUN,
 	.eh_abort_handler	= iscsi_eh_abort,
 	.eh_device_reset_handler = iscsi_eh_device_reset,
-	.eh_target_reset_handler = iscsi_eh_target_reset,
+	.eh_target_reset_handler = iscsi_eh_recover_target,
 	.target_alloc		= iscsi_target_alloc,
 	.use_clustering		= DISABLE_CLUSTERING,
 	.this_id		= -1,
diff --git a/drivers/scsi/device_handler/scsi_dh_emc.c b/drivers/scsi/device_handler/scsi_dh_emc.c
index 61966750..63032ec3 100644
--- a/drivers/scsi/device_handler/scsi_dh_emc.c
+++ b/drivers/scsi/device_handler/scsi_dh_emc.c
@@ -272,7 +272,7 @@
 	int len = 0;
 
 	rq = blk_get_request(sdev->request_queue,
-			(cmd == MODE_SELECT) ? WRITE : READ, GFP_NOIO);
+			(cmd != INQUIRY) ? WRITE : READ, GFP_NOIO);
 	if (!rq) {
 		sdev_printk(KERN_INFO, sdev, "get_req: blk_get_request failed");
 		return NULL;
@@ -286,14 +286,17 @@
 		len = sizeof(short_trespass);
 		rq->cmd_flags |= REQ_RW;
 		rq->cmd[1] = 0x10;
+		rq->cmd[4] = len;
 		break;
 	case MODE_SELECT_10:
 		len = sizeof(long_trespass);
 		rq->cmd_flags |= REQ_RW;
 		rq->cmd[1] = 0x10;
+		rq->cmd[8] = len;
 		break;
 	case INQUIRY:
 		len = CLARIION_BUFFER_SIZE;
+		rq->cmd[4] = len;
 		memset(buffer, 0, len);
 		break;
 	default:
@@ -301,7 +304,6 @@
 		break;
 	}
 
-	rq->cmd[4] = len;
 	rq->cmd_type = REQ_TYPE_BLOCK_PC;
 	rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT |
 			 REQ_FAILFAST_DRIVER;
diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
index 03697ba..183d3a4 100644
--- a/drivers/scsi/hpsa.c
+++ b/drivers/scsi/hpsa.c
@@ -43,6 +43,7 @@
 #include <scsi/scsi_cmnd.h>
 #include <scsi/scsi_device.h>
 #include <scsi/scsi_host.h>
+#include <scsi/scsi_tcq.h>
 #include <linux/cciss_ioctl.h>
 #include <linux/string.h>
 #include <linux/bitmap.h>
@@ -52,7 +53,7 @@
 #include "hpsa.h"
 
 /* HPSA_DRIVER_VERSION must be 3 byte values (0-255) separated by '.' */
-#define HPSA_DRIVER_VERSION "2.0.1-3"
+#define HPSA_DRIVER_VERSION "2.0.2-1"
 #define DRIVER_NAME "HP HPSA Driver (v " HPSA_DRIVER_VERSION ")"
 
 /* How long to wait (in milliseconds) for board to go into simple mode */
@@ -134,6 +135,8 @@
 static void hpsa_scan_start(struct Scsi_Host *);
 static int hpsa_scan_finished(struct Scsi_Host *sh,
 	unsigned long elapsed_time);
+static int hpsa_change_queue_depth(struct scsi_device *sdev,
+	int qdepth, int reason);
 
 static int hpsa_eh_device_reset_handler(struct scsi_cmnd *scsicmd);
 static int hpsa_slave_alloc(struct scsi_device *sdev);
@@ -182,8 +185,8 @@
 	.queuecommand		= hpsa_scsi_queue_command,
 	.scan_start		= hpsa_scan_start,
 	.scan_finished		= hpsa_scan_finished,
+	.change_queue_depth	= hpsa_change_queue_depth,
 	.this_id		= -1,
-	.sg_tablesize		= MAXSGENTRIES,
 	.use_clustering		= ENABLE_CLUSTERING,
 	.eh_device_reset_handler = hpsa_eh_device_reset_handler,
 	.ioctl			= hpsa_ioctl,
@@ -208,133 +211,6 @@
 	return (struct ctlr_info *) *priv;
 }
 
-static struct task_struct *hpsa_scan_thread;
-static DEFINE_MUTEX(hpsa_scan_mutex);
-static LIST_HEAD(hpsa_scan_q);
-static int hpsa_scan_func(void *data);
-
-/**
- * add_to_scan_list() - add controller to rescan queue
- * @h:		      Pointer to the controller.
- *
- * Adds the controller to the rescan queue if not already on the queue.
- *
- * returns 1 if added to the queue, 0 if skipped (could be on the
- * queue already, or the controller could be initializing or shutting
- * down).
- **/
-static int add_to_scan_list(struct ctlr_info *h)
-{
-	struct ctlr_info *test_h;
-	int found = 0;
-	int ret = 0;
-
-	if (h->busy_initializing)
-		return 0;
-
-	/*
-	 * If we don't get the lock, it means the driver is unloading
-	 * and there's no point in scheduling a new scan.
-	 */
-	if (!mutex_trylock(&h->busy_shutting_down))
-		return 0;
-
-	mutex_lock(&hpsa_scan_mutex);
-	list_for_each_entry(test_h, &hpsa_scan_q, scan_list) {
-		if (test_h == h) {
-			found = 1;
-			break;
-		}
-	}
-	if (!found && !h->busy_scanning) {
-		INIT_COMPLETION(h->scan_wait);
-		list_add_tail(&h->scan_list, &hpsa_scan_q);
-		ret = 1;
-	}
-	mutex_unlock(&hpsa_scan_mutex);
-	mutex_unlock(&h->busy_shutting_down);
-
-	return ret;
-}
-
-/**
- * remove_from_scan_list() - remove controller from rescan queue
- * @h:			   Pointer to the controller.
- *
- * Removes the controller from the rescan queue if present. Blocks if
- * the controller is currently conducting a rescan.  The controller
- * can be in one of three states:
- * 1. Doesn't need a scan
- * 2. On the scan list, but not scanning yet (we remove it)
- * 3. Busy scanning (and not on the list). In this case we want to wait for
- *    the scan to complete to make sure the scanning thread for this
- *    controller is completely idle.
- **/
-static void remove_from_scan_list(struct ctlr_info *h)
-{
-	struct ctlr_info *test_h, *tmp_h;
-
-	mutex_lock(&hpsa_scan_mutex);
-	list_for_each_entry_safe(test_h, tmp_h, &hpsa_scan_q, scan_list) {
-		if (test_h == h) { /* state 2. */
-			list_del(&h->scan_list);
-			complete_all(&h->scan_wait);
-			mutex_unlock(&hpsa_scan_mutex);
-			return;
-		}
-	}
-	if (h->busy_scanning) { /* state 3. */
-		mutex_unlock(&hpsa_scan_mutex);
-		wait_for_completion(&h->scan_wait);
-	} else { /* state 1, nothing to do. */
-		mutex_unlock(&hpsa_scan_mutex);
-	}
-}
-
-/* hpsa_scan_func() - kernel thread used to rescan controllers
- * @data:	 Ignored.
- *
- * A kernel thread used scan for drive topology changes on
- * controllers. The thread processes only one controller at a time
- * using a queue.  Controllers are added to the queue using
- * add_to_scan_list() and removed from the queue either after done
- * processing or using remove_from_scan_list().
- *
- * returns 0.
- **/
-static int hpsa_scan_func(__attribute__((unused)) void *data)
-{
-	struct ctlr_info *h;
-	int host_no;
-
-	while (1) {
-		set_current_state(TASK_INTERRUPTIBLE);
-		schedule();
-		if (kthread_should_stop())
-			break;
-
-		while (1) {
-			mutex_lock(&hpsa_scan_mutex);
-			if (list_empty(&hpsa_scan_q)) {
-				mutex_unlock(&hpsa_scan_mutex);
-				break;
-			}
-			h = list_entry(hpsa_scan_q.next, struct ctlr_info,
-					scan_list);
-			list_del(&h->scan_list);
-			h->busy_scanning = 1;
-			mutex_unlock(&hpsa_scan_mutex);
-			host_no = h->scsi_host ?  h->scsi_host->host_no : -1;
-			hpsa_scan_start(h->scsi_host);
-			complete_all(&h->scan_wait);
-			mutex_lock(&hpsa_scan_mutex);
-			h->busy_scanning = 0;
-			mutex_unlock(&hpsa_scan_mutex);
-		}
-	}
-	return 0;
-}
-
 static int check_for_unit_attention(struct ctlr_info *h,
 	struct CommandList *c)
 {
@@ -352,21 +228,8 @@
 		break;
 	case REPORT_LUNS_CHANGED:
 		dev_warn(&h->pdev->dev, "hpsa%d: report LUN data "
-			"changed\n", h->ctlr);
+			"changed, action required\n", h->ctlr);
 	/*
-	 * Here, we could call add_to_scan_list and wake up the scan thread,
-	 * except that it's quite likely that we will get more than one
-	 * REPORT_LUNS_CHANGED condition in quick succession, which means
-	 * that those which occur after the first one will likely happen
-	 * *during* the hpsa_scan_thread's rescan.  And the rescan code is not
-	 * robust enough to restart in the middle, undoing what it has already
-	 * done, and it's not clear that it's even possible to do this, since
-	 * part of what it does is notify the SCSI mid layer, which starts
-	 * doing it's own i/o to read partition tables and so on, and the
-	 * driver doesn't have visibility to know what might need undoing.
-	 * In any event, if possible, it is horribly complicated to get right
-	 * so we just don't do it for now.
-	 *
 	 * Note: this REPORT_LUNS_CHANGED condition only occurs on the MSA2012.
 	 */
 		break;
@@ -393,10 +256,7 @@
 	struct ctlr_info *h;
 	struct Scsi_Host *shost = class_to_shost(dev);
 	h = shost_to_hba(shost);
-	if (add_to_scan_list(h)) {
-		wake_up_process(hpsa_scan_thread);
-		wait_for_completion_interruptible(&h->scan_wait);
-	}
+	hpsa_scan_start(h->scsi_host);
 	return count;
 }
 
@@ -983,6 +843,76 @@
 	spin_lock_init(&h->devlock);
 }
 
+static void hpsa_free_sg_chain_blocks(struct ctlr_info *h)
+{
+	int i;
+
+	if (!h->cmd_sg_list)
+		return;
+	for (i = 0; i < h->nr_cmds; i++) {
+		kfree(h->cmd_sg_list[i]);
+		h->cmd_sg_list[i] = NULL;
+	}
+	kfree(h->cmd_sg_list);
+	h->cmd_sg_list = NULL;
+}
+
+static int hpsa_allocate_sg_chain_blocks(struct ctlr_info *h)
+{
+	int i;
+
+	if (h->chainsize <= 0)
+		return 0;
+
+	h->cmd_sg_list = kzalloc(sizeof(*h->cmd_sg_list) * h->nr_cmds,
+				GFP_KERNEL);
+	if (!h->cmd_sg_list)
+		return -ENOMEM;
+	for (i = 0; i < h->nr_cmds; i++) {
+		h->cmd_sg_list[i] = kmalloc(sizeof(*h->cmd_sg_list[i]) *
+						h->chainsize, GFP_KERNEL);
+		if (!h->cmd_sg_list[i])
+			goto clean;
+	}
+	return 0;
+
+clean:
+	hpsa_free_sg_chain_blocks(h);
+	return -ENOMEM;
+}
+
+static void hpsa_map_sg_chain_block(struct ctlr_info *h,
+	struct CommandList *c)
+{
+	struct SGDescriptor *chain_sg, *chain_block;
+	u64 temp64;
+
+	chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
+	chain_block = h->cmd_sg_list[c->cmdindex];
+	chain_sg->Ext = HPSA_SG_CHAIN;
+	chain_sg->Len = sizeof(*chain_sg) *
+		(c->Header.SGTotal - h->max_cmd_sg_entries);
+	temp64 = pci_map_single(h->pdev, chain_block, chain_sg->Len,
+				PCI_DMA_TODEVICE);
+	chain_sg->Addr.lower = (u32) (temp64 & 0x0FFFFFFFFULL);
+	chain_sg->Addr.upper = (u32) ((temp64 >> 32) & 0x0FFFFFFFFULL);
+}
+
+static void hpsa_unmap_sg_chain_block(struct ctlr_info *h,
+	struct CommandList *c)
+{
+	struct SGDescriptor *chain_sg;
+	union u64bit temp64;
+
+	if (c->Header.SGTotal <= h->max_cmd_sg_entries)
+		return;
+
+	chain_sg = &c->SG[h->max_cmd_sg_entries - 1];
+	temp64.val32.lower = chain_sg->Addr.lower;
+	temp64.val32.upper = chain_sg->Addr.upper;
+	pci_unmap_single(h->pdev, temp64.val, chain_sg->Len, PCI_DMA_TODEVICE);
+}
+
 static void complete_scsi_command(struct CommandList *cp,
 	int timeout, u32 tag)
 {
@@ -999,10 +929,12 @@
 	h = cp->h;
 
 	scsi_dma_unmap(cmd); /* undo the DMA mappings */
+	if (cp->Header.SGTotal > h->max_cmd_sg_entries)
+		hpsa_unmap_sg_chain_block(h, cp);
 
 	cmd->result = (DID_OK << 16); 		/* host byte */
 	cmd->result |= (COMMAND_COMPLETE << 8);	/* msg byte */
-	cmd->result |= (ei->ScsiStatus << 1);
+	cmd->result |= ei->ScsiStatus;
 
 	/* copy the sense data whether we need to or not. */
 	memcpy(cmd->sense_buffer, ei->SenseInfo,
@@ -1203,6 +1135,7 @@
 	sh->max_id = HPSA_MAX_LUN;
 	sh->can_queue = h->nr_cmds;
 	sh->cmd_per_lun = h->nr_cmds;
+	sh->sg_tablesize = h->maxsgentries;
 	h->scsi_host = sh;
 	sh->hostdata[0] = (unsigned long) h;
 	sh->irq = h->intr[PERF_MODE_INT];
@@ -1382,7 +1315,7 @@
 
 	if (c == NULL) {			/* trouble... */
 		dev_warn(&h->pdev->dev, "cmd_special_alloc returned NULL!\n");
-		return -1;
+		return -ENOMEM;
 	}
 
 	fill_cmd(c, HPSA_DEVICE_RESET_MSG, h, NULL, 0, 0, scsi3addr, TYPE_MSG);
@@ -1904,16 +1837,17 @@
  * dma mapping  and fills in the scatter gather entries of the
  * hpsa command, cp.
  */
-static int hpsa_scatter_gather(struct pci_dev *pdev,
+static int hpsa_scatter_gather(struct ctlr_info *h,
 		struct CommandList *cp,
 		struct scsi_cmnd *cmd)
 {
 	unsigned int len;
 	struct scatterlist *sg;
 	u64 addr64;
-	int use_sg, i;
+	int use_sg, i, sg_index, chained;
+	struct SGDescriptor *curr_sg;
 
-	BUG_ON(scsi_sg_count(cmd) > MAXSGENTRIES);
+	BUG_ON(scsi_sg_count(cmd) > h->maxsgentries);
 
 	use_sg = scsi_dma_map(cmd);
 	if (use_sg < 0)
@@ -1922,15 +1856,33 @@
 	if (!use_sg)
 		goto sglist_finished;
 
+	curr_sg = cp->SG;
+	chained = 0;
+	sg_index = 0;
 	scsi_for_each_sg(cmd, sg, use_sg, i) {
+		if (i == h->max_cmd_sg_entries - 1 &&
+			use_sg > h->max_cmd_sg_entries) {
+			chained = 1;
+			curr_sg = h->cmd_sg_list[cp->cmdindex];
+			sg_index = 0;
+		}
 		addr64 = (u64) sg_dma_address(sg);
 		len  = sg_dma_len(sg);
-		cp->SG[i].Addr.lower =
-			(u32) (addr64 & (u64) 0x00000000FFFFFFFF);
-		cp->SG[i].Addr.upper =
-			(u32) ((addr64 >> 32) & (u64) 0x00000000FFFFFFFF);
-		cp->SG[i].Len = len;
-		cp->SG[i].Ext = 0;  /* we are not chaining */
+		curr_sg->Addr.lower = (u32) (addr64 & 0x0FFFFFFFFULL);
+		curr_sg->Addr.upper = (u32) ((addr64 >> 32) & 0x0FFFFFFFFULL);
+		curr_sg->Len = len;
+		curr_sg->Ext = 0;  /* we are not chaining */
+		curr_sg++;
+	}
+
+	if (use_sg + chained > h->maxSG)
+		h->maxSG = use_sg + chained;
+
+	if (chained) {
+		cp->Header.SGList = h->max_cmd_sg_entries;
+		cp->Header.SGTotal = (u16) (use_sg + 1);
+		hpsa_map_sg_chain_block(h, cp);
+		return 0;
 	}
 
 sglist_finished:
@@ -2026,7 +1978,7 @@
 		break;
 	}
 
-	if (hpsa_scatter_gather(h->pdev, c, cmd) < 0) { /* Fill SG list */
+	if (hpsa_scatter_gather(h, c, cmd) < 0) { /* Fill SG list */
 		cmd_free(h, c);
 		return SCSI_MLQUEUE_HOST_BUSY;
 	}
@@ -2077,6 +2029,23 @@
 	return finished;
 }
 
+static int hpsa_change_queue_depth(struct scsi_device *sdev,
+	int qdepth, int reason)
+{
+	struct ctlr_info *h = sdev_to_hba(sdev);
+
+	if (reason != SCSI_QDEPTH_DEFAULT)
+		return -ENOTSUPP;
+
+	if (qdepth < 1)
+		qdepth = 1;
+	else
+		if (qdepth > h->nr_cmds)
+			qdepth = h->nr_cmds;
+	scsi_adjust_queue_depth(sdev, scsi_get_tag_type(sdev), qdepth);
+	return sdev->queue_depth;
+}
+
 static void hpsa_unregister_scsi(struct ctlr_info *h)
 {
 	/* we are being forcibly unloaded, and may not refuse. */
@@ -2961,7 +2930,7 @@
 	return IRQ_HANDLED;
 }
 
-/* Send a message CDB to the firmwart. */
+/* Send a message CDB to the firmware. */
 static __devinit int hpsa_message(struct pci_dev *pdev, unsigned char opcode,
 						unsigned char type)
 {
@@ -3296,7 +3265,7 @@
 	h->intr[PERF_MODE_INT] = pdev->irq;
 }
 
-static int hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
+static int __devinit hpsa_pci_init(struct ctlr_info *h, struct pci_dev *pdev)
 {
 	ushort subsystem_vendor_id, subsystem_device_id, command;
 	u32 board_id, scratchpad = 0;
@@ -3405,6 +3374,23 @@
 
 	h->board_id = board_id;
 	h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
+	h->maxsgentries = readl(&(h->cfgtable->MaxScatterGatherElements));
+
+	/*
+	 * Limit in-command s/g elements to 32 save dma'able memory.
+	 * Howvever spec says if 0, use 31
+	 */
+
+	h->max_cmd_sg_entries = 31;
+	if (h->maxsgentries > 512) {
+		h->max_cmd_sg_entries = 32;
+		h->chainsize = h->maxsgentries - h->max_cmd_sg_entries + 1;
+		h->maxsgentries--; /* save one for chain pointer */
+	} else {
+		h->maxsgentries = 31; /* default to traditional values */
+		h->chainsize = 0;
+	}
+
 	h->product_name = products[prod_index].product_name;
 	h->access = *(products[prod_index].access);
 	/* Allow room for some ioctls */
@@ -3532,8 +3518,6 @@
 	h->busy_initializing = 1;
 	INIT_HLIST_HEAD(&h->cmpQ);
 	INIT_HLIST_HEAD(&h->reqQ);
-	mutex_init(&h->busy_shutting_down);
-	init_completion(&h->scan_wait);
 	rc = hpsa_pci_init(h, pdev);
 	if (rc != 0)
 		goto clean1;
@@ -3587,6 +3571,8 @@
 		rc = -ENOMEM;
 		goto clean4;
 	}
+	if (hpsa_allocate_sg_chain_blocks(h))
+		goto clean4;
 	spin_lock_init(&h->lock);
 	spin_lock_init(&h->scan_lock);
 	init_waitqueue_head(&h->scan_wait_queue);
@@ -3609,6 +3595,7 @@
 	return 1;
 
 clean4:
+	hpsa_free_sg_chain_blocks(h);
 	kfree(h->cmd_pool_bits);
 	if (h->cmd_pool)
 		pci_free_consistent(h->pdev,
@@ -3681,11 +3668,10 @@
 		return;
 	}
 	h = pci_get_drvdata(pdev);
-	mutex_lock(&h->busy_shutting_down);
-	remove_from_scan_list(h);
 	hpsa_unregister_scsi(h);	/* unhook from SCSI subsystem */
 	hpsa_shutdown(pdev);
 	iounmap(h->vaddr);
+	hpsa_free_sg_chain_blocks(h);
 	pci_free_consistent(h->pdev,
 		h->nr_cmds * sizeof(struct CommandList),
 		h->cmd_pool, h->cmd_pool_dhandle);
@@ -3703,7 +3689,6 @@
 	 */
 	pci_release_regions(pdev);
 	pci_set_drvdata(pdev, NULL);
-	mutex_unlock(&h->busy_shutting_down);
 	kfree(h);
 }
 
@@ -3857,23 +3842,12 @@
  */
 static int __init hpsa_init(void)
 {
-	int err;
-	/* Start the scan thread */
-	hpsa_scan_thread = kthread_run(hpsa_scan_func, NULL, "hpsa_scan");
-	if (IS_ERR(hpsa_scan_thread)) {
-		err = PTR_ERR(hpsa_scan_thread);
-		return -ENODEV;
-	}
-	err = pci_register_driver(&hpsa_pci_driver);
-	if (err)
-		kthread_stop(hpsa_scan_thread);
-	return err;
+	return pci_register_driver(&hpsa_pci_driver);
 }
 
 static void __exit hpsa_cleanup(void)
 {
 	pci_unregister_driver(&hpsa_pci_driver);
-	kthread_stop(hpsa_scan_thread);
 }
 
 module_init(hpsa_init);
diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
index a0502b3..1bb5233 100644
--- a/drivers/scsi/hpsa.h
+++ b/drivers/scsi/hpsa.h
@@ -83,6 +83,10 @@
 	unsigned int maxQsinceinit;
 	unsigned int maxSG;
 	spinlock_t lock;
+	int maxsgentries;
+	u8 max_cmd_sg_entries;
+	int chainsize;
+	struct SGDescriptor **cmd_sg_list;
 
 	/* pointers to command and error info pool */
 	struct CommandList 	*cmd_pool;
@@ -97,9 +101,6 @@
 	int			scan_finished;
 	spinlock_t		scan_lock;
 	wait_queue_head_t	scan_wait_queue;
-	struct mutex		busy_shutting_down;
-	struct list_head	scan_list;
-	struct completion	scan_wait;
 
 	struct Scsi_Host *scsi_host;
 	spinlock_t devlock; /* to protect hba[ctlr]->dev[];  */
diff --git a/drivers/scsi/hpsa_cmd.h b/drivers/scsi/hpsa_cmd.h
index 3e0abdf..56fb982 100644
--- a/drivers/scsi/hpsa_cmd.h
+++ b/drivers/scsi/hpsa_cmd.h
@@ -23,7 +23,8 @@
 
 /* general boundary defintions */
 #define SENSEINFOBYTES          32 /* may vary between hbas */
-#define MAXSGENTRIES            31
+#define MAXSGENTRIES            32
+#define HPSA_SG_CHAIN		0x80000000
 #define MAXREPLYQS              256
 
 /* Command Status value */
@@ -305,20 +306,23 @@
 	int			   cmd_type;
 	long			   cmdindex;
 	struct hlist_node list;
-	struct CommandList *prev;
-	struct CommandList *next;
 	struct request *rq;
 	struct completion *waiting;
-	int	 retry_count;
 	void   *scsi_cmd;
 
 /* on 64 bit architectures, to get this to be 32-byte-aligned
- * it so happens we need no padding, on 32 bit systems,
- * we need 8 bytes of padding.   This does that.
+ * it so happens we need PAD_64 bytes of padding, on 32 bit systems,
+ * we need PAD_32 bytes of padding (see below).   This does that.
+ * If it happens that 64 bit and 32 bit systems need different
+ * padding, PAD_32 and PAD_64 can be set independently, and.
+ * the code below will do the right thing.
  */
-#define COMMANDLIST_PAD ((8 - sizeof(long))/4 * 8)
+#define IS_32_BIT ((8 - sizeof(long))/4)
+#define IS_64_BIT (!IS_32_BIT)
+#define PAD_32 (4)
+#define PAD_64 (4)
+#define COMMANDLIST_PAD (IS_32_BIT * PAD_32 + IS_64_BIT * PAD_64)
 	u8 pad[COMMANDLIST_PAD];
-
 };
 
 /* Configuration Table Structure */
diff --git a/drivers/scsi/ibmvscsi/ibmvfc.c b/drivers/scsi/ibmvscsi/ibmvfc.c
index 732f6d3..4e577e2 100644
--- a/drivers/scsi/ibmvscsi/ibmvfc.c
+++ b/drivers/scsi/ibmvscsi/ibmvfc.c
@@ -29,6 +29,7 @@
 #include <linux/interrupt.h>
 #include <linux/kthread.h>
 #include <linux/of.h>
+#include <linux/pm.h>
 #include <linux/stringify.h>
 #include <asm/firmware.h>
 #include <asm/irq.h>
@@ -4736,6 +4737,27 @@
 }
 
 /**
+ * ibmvfc_resume - Resume from suspend
+ * @dev:	device struct
+ *
+ * We may have lost an interrupt across suspend/resume, so kick the
+ * interrupt handler
+ *
+ */
+static int ibmvfc_resume(struct device *dev)
+{
+	unsigned long flags;
+	struct ibmvfc_host *vhost = dev_get_drvdata(dev);
+	struct vio_dev *vdev = to_vio_dev(dev);
+
+	spin_lock_irqsave(vhost->host->host_lock, flags);
+	vio_disable_interrupts(vdev);
+	tasklet_schedule(&vhost->tasklet);
+	spin_unlock_irqrestore(vhost->host->host_lock, flags);
+	return 0;
+}
+
+/**
  * ibmvfc_get_desired_dma - Calculate DMA resources needed by the driver
  * @vdev:	vio device struct
  *
@@ -4755,6 +4777,10 @@
 };
 MODULE_DEVICE_TABLE(vio, ibmvfc_device_table);
 
+static struct dev_pm_ops ibmvfc_pm_ops = {
+	.resume = ibmvfc_resume
+};
+
 static struct vio_driver ibmvfc_driver = {
 	.id_table = ibmvfc_device_table,
 	.probe = ibmvfc_probe,
@@ -4763,6 +4789,7 @@
 	.driver = {
 		.name = IBMVFC_NAME,
 		.owner = THIS_MODULE,
+		.pm = &ibmvfc_pm_ops,
 	}
 };
 
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index e3a18e0..dc1bcbe 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -71,6 +71,7 @@
 #include <linux/dma-mapping.h>
 #include <linux/delay.h>
 #include <linux/of.h>
+#include <linux/pm.h>
 #include <asm/firmware.h>
 #include <asm/vio.h>
 #include <scsi/scsi.h>
@@ -1991,6 +1992,19 @@
 }
 
 /**
+ * ibmvscsi_resume: Resume from suspend
+ * @dev:	device struct
+ *
+ * We may have lost an interrupt across suspend/resume, so kick the
+ * interrupt handler
+ */
+static int ibmvscsi_resume(struct device *dev)
+{
+	struct ibmvscsi_host_data *hostdata = dev_get_drvdata(dev);
+	return ibmvscsi_ops->resume(hostdata);
+}
+
+/**
  * ibmvscsi_device_table: Used by vio.c to match devices in the device tree we 
  * support.
  */
@@ -2000,6 +2014,10 @@
 };
 MODULE_DEVICE_TABLE(vio, ibmvscsi_device_table);
 
+static struct dev_pm_ops ibmvscsi_pm_ops = {
+	.resume = ibmvscsi_resume
+};
+
 static struct vio_driver ibmvscsi_driver = {
 	.id_table = ibmvscsi_device_table,
 	.probe = ibmvscsi_probe,
@@ -2008,6 +2026,7 @@
 	.driver = {
 		.name = "ibmvscsi",
 		.owner = THIS_MODULE,
+		.pm = &ibmvscsi_pm_ops,
 	}
 };
 
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.h b/drivers/scsi/ibmvscsi/ibmvscsi.h
index 7642530..9cb7c6a 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.h
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.h
@@ -120,6 +120,7 @@
 				  struct ibmvscsi_host_data *hostdata);
 	int (*send_crq)(struct ibmvscsi_host_data *hostdata,
 		       u64 word1, u64 word2);
+	int (*resume) (struct ibmvscsi_host_data *hostdata);
 };
 
 extern struct ibmvscsi_ops iseriesvscsi_ops;
diff --git a/drivers/scsi/ibmvscsi/iseries_vscsi.c b/drivers/scsi/ibmvscsi/iseries_vscsi.c
index 0775fde..f477645 100644
--- a/drivers/scsi/ibmvscsi/iseries_vscsi.c
+++ b/drivers/scsi/ibmvscsi/iseries_vscsi.c
@@ -158,10 +158,16 @@
 					     0);
 }
 
+static int iseriesvscsi_resume(struct ibmvscsi_host_data *hostdata)
+{
+	return 0;
+}
+
 struct ibmvscsi_ops iseriesvscsi_ops = {
 	.init_crq_queue = iseriesvscsi_init_crq_queue,
 	.release_crq_queue = iseriesvscsi_release_crq_queue,
 	.reset_crq_queue = iseriesvscsi_reset_crq_queue,
 	.reenable_crq_queue = iseriesvscsi_reenable_crq_queue,
 	.send_crq = iseriesvscsi_send_crq,
+	.resume = iseriesvscsi_resume,
 };
diff --git a/drivers/scsi/ibmvscsi/rpa_vscsi.c b/drivers/scsi/ibmvscsi/rpa_vscsi.c
index 462a857..63a30cb 100644
--- a/drivers/scsi/ibmvscsi/rpa_vscsi.c
+++ b/drivers/scsi/ibmvscsi/rpa_vscsi.c
@@ -334,10 +334,23 @@
 	return rc;
 }
 
+/**
+ * rpavscsi_resume: - resume after suspend
+ * @hostdata:	ibmvscsi_host_data of host
+ *
+ */
+static int rpavscsi_resume(struct ibmvscsi_host_data *hostdata)
+{
+	vio_disable_interrupts(to_vio_dev(hostdata->dev));
+	tasklet_schedule(&hostdata->srp_task);
+	return 0;
+}
+
 struct ibmvscsi_ops rpavscsi_ops = {
 	.init_crq_queue = rpavscsi_init_crq_queue,
 	.release_crq_queue = rpavscsi_release_crq_queue,
 	.reset_crq_queue = rpavscsi_reset_crq_queue,
 	.reenable_crq_queue = rpavscsi_reenable_crq_queue,
 	.send_crq = rpavscsi_send_crq,
+	.resume = rpavscsi_resume,
 };
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 032f0d0..c79cd98 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -72,6 +72,8 @@
 #include <linux/moduleparam.h>
 #include <linux/libata.h>
 #include <linux/hdreg.h>
+#include <linux/reboot.h>
+#include <linux/stringify.h>
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/processor.h>
@@ -91,8 +93,8 @@
 static int ipr_testmode = 0;
 static unsigned int ipr_fastfail = 0;
 static unsigned int ipr_transop_timeout = 0;
-static unsigned int ipr_enable_cache = 1;
 static unsigned int ipr_debug = 0;
+static unsigned int ipr_max_devs = IPR_DEFAULT_SIS64_DEVS;
 static unsigned int ipr_dual_ioa_raid = 1;
 static DEFINE_SPINLOCK(ipr_driver_lock);
 
@@ -104,13 +106,20 @@
 		{
 			.set_interrupt_mask_reg = 0x0022C,
 			.clr_interrupt_mask_reg = 0x00230,
+			.clr_interrupt_mask_reg32 = 0x00230,
 			.sense_interrupt_mask_reg = 0x0022C,
+			.sense_interrupt_mask_reg32 = 0x0022C,
 			.clr_interrupt_reg = 0x00228,
+			.clr_interrupt_reg32 = 0x00228,
 			.sense_interrupt_reg = 0x00224,
+			.sense_interrupt_reg32 = 0x00224,
 			.ioarrin_reg = 0x00404,
 			.sense_uproc_interrupt_reg = 0x00214,
+			.sense_uproc_interrupt_reg32 = 0x00214,
 			.set_uproc_interrupt_reg = 0x00214,
-			.clr_uproc_interrupt_reg = 0x00218
+			.set_uproc_interrupt_reg32 = 0x00214,
+			.clr_uproc_interrupt_reg = 0x00218,
+			.clr_uproc_interrupt_reg32 = 0x00218
 		}
 	},
 	{ /* Snipe and Scamp */
@@ -119,25 +128,59 @@
 		{
 			.set_interrupt_mask_reg = 0x00288,
 			.clr_interrupt_mask_reg = 0x0028C,
+			.clr_interrupt_mask_reg32 = 0x0028C,
 			.sense_interrupt_mask_reg = 0x00288,
+			.sense_interrupt_mask_reg32 = 0x00288,
 			.clr_interrupt_reg = 0x00284,
+			.clr_interrupt_reg32 = 0x00284,
 			.sense_interrupt_reg = 0x00280,
+			.sense_interrupt_reg32 = 0x00280,
 			.ioarrin_reg = 0x00504,
 			.sense_uproc_interrupt_reg = 0x00290,
+			.sense_uproc_interrupt_reg32 = 0x00290,
 			.set_uproc_interrupt_reg = 0x00290,
-			.clr_uproc_interrupt_reg = 0x00294
+			.set_uproc_interrupt_reg32 = 0x00290,
+			.clr_uproc_interrupt_reg = 0x00294,
+			.clr_uproc_interrupt_reg32 = 0x00294
+		}
+	},
+	{ /* CRoC */
+		.mailbox = 0x00040,
+		.cache_line_size = 0x20,
+		{
+			.set_interrupt_mask_reg = 0x00010,
+			.clr_interrupt_mask_reg = 0x00018,
+			.clr_interrupt_mask_reg32 = 0x0001C,
+			.sense_interrupt_mask_reg = 0x00010,
+			.sense_interrupt_mask_reg32 = 0x00014,
+			.clr_interrupt_reg = 0x00008,
+			.clr_interrupt_reg32 = 0x0000C,
+			.sense_interrupt_reg = 0x00000,
+			.sense_interrupt_reg32 = 0x00004,
+			.ioarrin_reg = 0x00070,
+			.sense_uproc_interrupt_reg = 0x00020,
+			.sense_uproc_interrupt_reg32 = 0x00024,
+			.set_uproc_interrupt_reg = 0x00020,
+			.set_uproc_interrupt_reg32 = 0x00024,
+			.clr_uproc_interrupt_reg = 0x00028,
+			.clr_uproc_interrupt_reg32 = 0x0002C,
+			.init_feedback_reg = 0x0005C,
+			.dump_addr_reg = 0x00064,
+			.dump_data_reg = 0x00068
 		}
 	},
 };
 
 static const struct ipr_chip_t ipr_chip[] = {
-	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, &ipr_chip_cfg[0] },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, &ipr_chip_cfg[0] },
-	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, &ipr_chip_cfg[0] },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, &ipr_chip_cfg[0] },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, &ipr_chip_cfg[1] },
-	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, &ipr_chip_cfg[1] }
+	{ PCI_VENDOR_ID_MYLEX, PCI_DEVICE_ID_IBM_GEMSTONE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CITRINE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
+	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[0] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E, IPR_USE_MSI, IPR_SIS32, &ipr_chip_cfg[0] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SNIPE, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
+	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP, IPR_USE_LSI, IPR_SIS32, &ipr_chip_cfg[1] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2, IPR_USE_MSI, IPR_SIS64, &ipr_chip_cfg[2] }
 };
 
 static int ipr_max_bus_speeds [] = {
@@ -156,12 +199,13 @@
 MODULE_PARM_DESC(fastfail, "Reduce timeouts and retries");
 module_param_named(transop_timeout, ipr_transop_timeout, int, 0);
 MODULE_PARM_DESC(transop_timeout, "Time in seconds to wait for adapter to come operational (default: 300)");
-module_param_named(enable_cache, ipr_enable_cache, int, 0);
-MODULE_PARM_DESC(enable_cache, "Enable adapter's non-volatile write cache (default: 1)");
 module_param_named(debug, ipr_debug, int, S_IRUGO | S_IWUSR);
 MODULE_PARM_DESC(debug, "Enable device driver debugging logging. Set to 1 to enable. (default: 0)");
 module_param_named(dual_ioa_raid, ipr_dual_ioa_raid, int, 0);
 MODULE_PARM_DESC(dual_ioa_raid, "Enable dual adapter RAID support. Set to 1 to enable. (default: 1)");
+module_param_named(max_devs, ipr_max_devs, int, 0);
+MODULE_PARM_DESC(max_devs, "Specify the maximum number of physical devices. "
+		 "[Default=" __stringify(IPR_DEFAULT_SIS64_DEVS) "]");
 MODULE_LICENSE("GPL");
 MODULE_VERSION(IPR_DRIVER_VERSION);
 
@@ -180,6 +224,20 @@
 	"FFFE: Soft device bus error recovered by the IOA"},
 	{0x01088100, 0, IPR_DEFAULT_LOG_LEVEL,
 	"4101: Soft device bus fabric error"},
+	{0x01100100, 0, IPR_DEFAULT_LOG_LEVEL,
+	"FFFC: Logical block guard error recovered by the device"},
+	{0x01100300, 0, IPR_DEFAULT_LOG_LEVEL,
+	"FFFC: Logical block reference tag error recovered by the device"},
+	{0x01108300, 0, IPR_DEFAULT_LOG_LEVEL,
+	"4171: Recovered scatter list tag / sequence number error"},
+	{0x01109000, 0, IPR_DEFAULT_LOG_LEVEL,
+	"FF3D: Recovered logical block CRC error on IOA to Host transfer"},
+	{0x01109200, 0, IPR_DEFAULT_LOG_LEVEL,
+	"4171: Recovered logical block sequence number error on IOA to Host transfer"},
+	{0x0110A000, 0, IPR_DEFAULT_LOG_LEVEL,
+	"FFFD: Recovered logical block reference tag error detected by the IOA"},
+	{0x0110A100, 0, IPR_DEFAULT_LOG_LEVEL,
+	"FFFD: Logical block guard error recovered by the IOA"},
 	{0x01170600, 0, IPR_DEFAULT_LOG_LEVEL,
 	"FFF9: Device sector reassign successful"},
 	{0x01170900, 0, IPR_DEFAULT_LOG_LEVEL,
@@ -236,12 +294,28 @@
 	"3120: SCSI bus is not operational"},
 	{0x04088100, 0, IPR_DEFAULT_LOG_LEVEL,
 	"4100: Hard device bus fabric error"},
+	{0x04100100, 0, IPR_DEFAULT_LOG_LEVEL,
+	"310C: Logical block guard error detected by the device"},
+	{0x04100300, 0, IPR_DEFAULT_LOG_LEVEL,
+	"310C: Logical block reference tag error detected by the device"},
+	{0x04108300, 1, IPR_DEFAULT_LOG_LEVEL,
+	"4170: Scatter list tag / sequence number error"},
+	{0x04109000, 1, IPR_DEFAULT_LOG_LEVEL,
+	"8150: Logical block CRC error on IOA to Host transfer"},
+	{0x04109200, 1, IPR_DEFAULT_LOG_LEVEL,
+	"4170: Logical block sequence number error on IOA to Host transfer"},
+	{0x0410A000, 0, IPR_DEFAULT_LOG_LEVEL,
+	"310D: Logical block reference tag error detected by the IOA"},
+	{0x0410A100, 0, IPR_DEFAULT_LOG_LEVEL,
+	"310D: Logical block guard error detected by the IOA"},
 	{0x04118000, 0, IPR_DEFAULT_LOG_LEVEL,
 	"9000: IOA reserved area data check"},
 	{0x04118100, 0, IPR_DEFAULT_LOG_LEVEL,
 	"9001: IOA reserved area invalid data pattern"},
 	{0x04118200, 0, IPR_DEFAULT_LOG_LEVEL,
 	"9002: IOA reserved area LRC error"},
+	{0x04118300, 1, IPR_DEFAULT_LOG_LEVEL,
+	"Hardware Error, IOA metadata access error"},
 	{0x04320000, 0, IPR_DEFAULT_LOG_LEVEL,
 	"102E: Out of alternate sectors for disk storage"},
 	{0x04330000, 1, IPR_DEFAULT_LOG_LEVEL,
@@ -306,6 +380,8 @@
 	"Illegal request, commands not allowed to this device"},
 	{0x05258100, 0, 0,
 	"Illegal request, command not allowed to a secondary adapter"},
+	{0x05258200, 0, 0,
+	"Illegal request, command not allowed to a non-optimized resource"},
 	{0x05260000, 0, 0,
 	"Illegal request, invalid field in parameter list"},
 	{0x05260100, 0, 0,
@@ -468,7 +544,10 @@
 	trace_entry->time = jiffies;
 	trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
 	trace_entry->type = type;
-	trace_entry->ata_op_code = ipr_cmd->ioarcb.add_data.u.regs.command;
+	if (ipr_cmd->ioa_cfg->sis64)
+		trace_entry->ata_op_code = ipr_cmd->i.ata_ioadl.regs.command;
+	else
+		trace_entry->ata_op_code = ipr_cmd->ioarcb.u.add_data.u.regs.command;
 	trace_entry->cmd_index = ipr_cmd->cmd_index & 0xff;
 	trace_entry->res_handle = ipr_cmd->ioarcb.res_handle;
 	trace_entry->u.add_data = add_data;
@@ -488,16 +567,23 @@
 {
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
-	dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
+	dma_addr_t dma_addr = ipr_cmd->dma_addr;
 
 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
-	ioarcb->write_data_transfer_length = 0;
+	ioarcb->data_transfer_length = 0;
 	ioarcb->read_data_transfer_length = 0;
-	ioarcb->write_ioadl_len = 0;
+	ioarcb->ioadl_len = 0;
 	ioarcb->read_ioadl_len = 0;
-	ioarcb->write_ioadl_addr =
-		cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
-	ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+
+	if (ipr_cmd->ioa_cfg->sis64)
+		ioarcb->u.sis64_addr_data.data_ioadl_addr =
+			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
+	else {
+		ioarcb->write_ioadl_addr =
+			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
+		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+	}
+
 	ioasa->ioasc = 0;
 	ioasa->residual_data_len = 0;
 	ioasa->u.gata.status = 0;
@@ -562,10 +648,15 @@
 	ioa_cfg->allow_interrupts = 0;
 
 	/* Set interrupt mask to stop all new interrupts */
-	writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
+	if (ioa_cfg->sis64)
+		writeq(~0, ioa_cfg->regs.set_interrupt_mask_reg);
+	else
+		writel(~0, ioa_cfg->regs.set_interrupt_mask_reg);
 
 	/* Clear any pending interrupts */
-	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg);
+	if (ioa_cfg->sis64)
+		writel(~0, ioa_cfg->regs.clr_interrupt_reg);
+	writel(clr_ints, ioa_cfg->regs.clr_interrupt_reg32);
 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
 }
 
@@ -693,6 +784,35 @@
 }
 
 /**
+ * ipr_send_command -  Send driver initiated requests.
+ * @ipr_cmd:		ipr command struct
+ *
+ * This function sends a command to the adapter using the correct write call.
+ * In the case of sis64, calculate the ioarcb size required. Then or in the
+ * appropriate bits.
+ *
+ * Return value:
+ * 	none
+ **/
+static void ipr_send_command(struct ipr_cmnd *ipr_cmd)
+{
+	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+	dma_addr_t send_dma_addr = ipr_cmd->dma_addr;
+
+	if (ioa_cfg->sis64) {
+		/* The default size is 256 bytes */
+		send_dma_addr |= 0x1;
+
+		/* If the number of ioadls * size of ioadl > 128 bytes,
+		   then use a 512 byte ioarcb */
+		if (ipr_cmd->dma_use_sg * sizeof(struct ipr_ioadl64_desc) > 128 )
+			send_dma_addr |= 0x4;
+		writeq(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
+	} else
+		writel(send_dma_addr, ioa_cfg->regs.ioarrin_reg);
+}
+
+/**
  * ipr_do_req -  Send driver initiated requests.
  * @ipr_cmd:		ipr command struct
  * @done:			done function
@@ -724,8 +844,8 @@
 	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, 0);
 
 	mb();
-	writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
-	       ioa_cfg->regs.ioarrin_reg);
+
+	ipr_send_command(ipr_cmd);
 }
 
 /**
@@ -747,6 +867,51 @@
 }
 
 /**
+ * ipr_init_ioadl - initialize the ioadl for the correct SIS type
+ * @ipr_cmd:	ipr command struct
+ * @dma_addr:	dma address
+ * @len:	transfer length
+ * @flags:	ioadl flag value
+ *
+ * This function initializes an ioadl in the case where there is only a single
+ * descriptor.
+ *
+ * Return value:
+ * 	nothing
+ **/
+static void ipr_init_ioadl(struct ipr_cmnd *ipr_cmd, dma_addr_t dma_addr,
+			   u32 len, int flags)
+{
+	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
+	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
+
+	ipr_cmd->dma_use_sg = 1;
+
+	if (ipr_cmd->ioa_cfg->sis64) {
+		ioadl64->flags = cpu_to_be32(flags);
+		ioadl64->data_len = cpu_to_be32(len);
+		ioadl64->address = cpu_to_be64(dma_addr);
+
+		ipr_cmd->ioarcb.ioadl_len =
+		       	cpu_to_be32(sizeof(struct ipr_ioadl64_desc));
+		ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
+	} else {
+		ioadl->flags_and_data_len = cpu_to_be32(flags | len);
+		ioadl->address = cpu_to_be32(dma_addr);
+
+		if (flags == IPR_IOADL_FLAGS_READ_LAST) {
+			ipr_cmd->ioarcb.read_ioadl_len =
+				cpu_to_be32(sizeof(struct ipr_ioadl_desc));
+			ipr_cmd->ioarcb.read_data_transfer_length = cpu_to_be32(len);
+		} else {
+			ipr_cmd->ioarcb.ioadl_len =
+			       	cpu_to_be32(sizeof(struct ipr_ioadl_desc));
+			ipr_cmd->ioarcb.data_transfer_length = cpu_to_be32(len);
+		}
+	}
+}
+
+/**
  * ipr_send_blocking_cmd - Send command and sleep on its completion.
  * @ipr_cmd:	ipr command struct
  * @timeout_func:	function to invoke if command times out
@@ -803,11 +968,8 @@
 		ioarcb->cmd_pkt.cdb[7] = (sizeof(hostrcb->hcam) >> 8) & 0xff;
 		ioarcb->cmd_pkt.cdb[8] = sizeof(hostrcb->hcam) & 0xff;
 
-		ioarcb->read_data_transfer_length = cpu_to_be32(sizeof(hostrcb->hcam));
-		ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
-		ipr_cmd->ioadl[0].flags_and_data_len =
-			cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(hostrcb->hcam));
-		ipr_cmd->ioadl[0].address = cpu_to_be32(hostrcb->hostrcb_dma);
+		ipr_init_ioadl(ipr_cmd, hostrcb->hostrcb_dma,
+			       sizeof(hostrcb->hcam), IPR_IOADL_FLAGS_READ_LAST);
 
 		if (type == IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE)
 			ipr_cmd->done = ipr_process_ccn;
@@ -817,22 +979,54 @@
 		ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_IOA_RES_ADDR);
 
 		mb();
-		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
-		       ioa_cfg->regs.ioarrin_reg);
+
+		ipr_send_command(ipr_cmd);
 	} else {
 		list_add_tail(&hostrcb->queue, &ioa_cfg->hostrcb_free_q);
 	}
 }
 
 /**
- * ipr_init_res_entry - Initialize a resource entry struct.
+ * ipr_update_ata_class - Update the ata class in the resource entry
  * @res:	resource entry struct
+ * @proto:	cfgte device bus protocol value
  *
  * Return value:
  * 	none
  **/
-static void ipr_init_res_entry(struct ipr_resource_entry *res)
+static void ipr_update_ata_class(struct ipr_resource_entry *res, unsigned int proto)
 {
+	switch(proto) {
+	case IPR_PROTO_SATA:
+	case IPR_PROTO_SAS_STP:
+		res->ata_class = ATA_DEV_ATA;
+		break;
+	case IPR_PROTO_SATA_ATAPI:
+	case IPR_PROTO_SAS_STP_ATAPI:
+		res->ata_class = ATA_DEV_ATAPI;
+		break;
+	default:
+		res->ata_class = ATA_DEV_UNKNOWN;
+		break;
+	};
+}
+
+/**
+ * ipr_init_res_entry - Initialize a resource entry struct.
+ * @res:	resource entry struct
+ * @cfgtew:	config table entry wrapper struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ipr_init_res_entry(struct ipr_resource_entry *res,
+			       struct ipr_config_table_entry_wrapper *cfgtew)
+{
+	int found = 0;
+	unsigned int proto;
+	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
+	struct ipr_resource_entry *gscsi_res = NULL;
+
 	res->needs_sync_complete = 0;
 	res->in_erp = 0;
 	res->add_to_ml = 0;
@@ -840,6 +1034,205 @@
 	res->resetting_device = 0;
 	res->sdev = NULL;
 	res->sata_port = NULL;
+
+	if (ioa_cfg->sis64) {
+		proto = cfgtew->u.cfgte64->proto;
+		res->res_flags = cfgtew->u.cfgte64->res_flags;
+		res->qmodel = IPR_QUEUEING_MODEL64(res);
+		res->type = cfgtew->u.cfgte64->res_type & 0x0f;
+
+		memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
+			sizeof(res->res_path));
+
+		res->bus = 0;
+		res->lun = scsilun_to_int(&res->dev_lun);
+
+		if (res->type == IPR_RES_TYPE_GENERIC_SCSI) {
+			list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue) {
+				if (gscsi_res->dev_id == cfgtew->u.cfgte64->dev_id) {
+					found = 1;
+					res->target = gscsi_res->target;
+					break;
+				}
+			}
+			if (!found) {
+				res->target = find_first_zero_bit(ioa_cfg->target_ids,
+								  ioa_cfg->max_devs_supported);
+				set_bit(res->target, ioa_cfg->target_ids);
+			}
+
+			memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
+				sizeof(res->dev_lun.scsi_lun));
+		} else if (res->type == IPR_RES_TYPE_IOAFP) {
+			res->bus = IPR_IOAFP_VIRTUAL_BUS;
+			res->target = 0;
+		} else if (res->type == IPR_RES_TYPE_ARRAY) {
+			res->bus = IPR_ARRAY_VIRTUAL_BUS;
+			res->target = find_first_zero_bit(ioa_cfg->array_ids,
+							  ioa_cfg->max_devs_supported);
+			set_bit(res->target, ioa_cfg->array_ids);
+		} else if (res->type == IPR_RES_TYPE_VOLUME_SET) {
+			res->bus = IPR_VSET_VIRTUAL_BUS;
+			res->target = find_first_zero_bit(ioa_cfg->vset_ids,
+							  ioa_cfg->max_devs_supported);
+			set_bit(res->target, ioa_cfg->vset_ids);
+		} else {
+			res->target = find_first_zero_bit(ioa_cfg->target_ids,
+							  ioa_cfg->max_devs_supported);
+			set_bit(res->target, ioa_cfg->target_ids);
+		}
+	} else {
+		proto = cfgtew->u.cfgte->proto;
+		res->qmodel = IPR_QUEUEING_MODEL(res);
+		res->flags = cfgtew->u.cfgte->flags;
+		if (res->flags & IPR_IS_IOA_RESOURCE)
+			res->type = IPR_RES_TYPE_IOAFP;
+		else
+			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
+
+		res->bus = cfgtew->u.cfgte->res_addr.bus;
+		res->target = cfgtew->u.cfgte->res_addr.target;
+		res->lun = cfgtew->u.cfgte->res_addr.lun;
+	}
+
+	ipr_update_ata_class(res, proto);
+}
+
+/**
+ * ipr_is_same_device - Determine if two devices are the same.
+ * @res:	resource entry struct
+ * @cfgtew:	config table entry wrapper struct
+ *
+ * Return value:
+ * 	1 if the devices are the same / 0 otherwise
+ **/
+static int ipr_is_same_device(struct ipr_resource_entry *res,
+			      struct ipr_config_table_entry_wrapper *cfgtew)
+{
+	if (res->ioa_cfg->sis64) {
+		if (!memcmp(&res->dev_id, &cfgtew->u.cfgte64->dev_id,
+					sizeof(cfgtew->u.cfgte64->dev_id)) &&
+			!memcmp(&res->lun, &cfgtew->u.cfgte64->lun,
+					sizeof(cfgtew->u.cfgte64->lun))) {
+			return 1;
+		}
+	} else {
+		if (res->bus == cfgtew->u.cfgte->res_addr.bus &&
+		    res->target == cfgtew->u.cfgte->res_addr.target &&
+		    res->lun == cfgtew->u.cfgte->res_addr.lun)
+			return 1;
+	}
+
+	return 0;
+}
+
+/**
+ * ipr_format_resource_path - Format the resource path for printing.
+ * @res_path:	resource path
+ * @buf:	buffer
+ *
+ * Return value:
+ * 	pointer to buffer
+ **/
+static char *ipr_format_resource_path(u8 *res_path, char *buffer)
+{
+	int i;
+
+	sprintf(buffer, "%02X", res_path[0]);
+	for (i=1; res_path[i] != 0xff; i++)
+		sprintf(buffer, "%s-%02X", buffer, res_path[i]);
+
+	return buffer;
+}
+
+/**
+ * ipr_update_res_entry - Update the resource entry.
+ * @res:	resource entry struct
+ * @cfgtew:	config table entry wrapper struct
+ *
+ * Return value:
+ *      none
+ **/
+static void ipr_update_res_entry(struct ipr_resource_entry *res,
+				 struct ipr_config_table_entry_wrapper *cfgtew)
+{
+	char buffer[IPR_MAX_RES_PATH_LENGTH];
+	unsigned int proto;
+	int new_path = 0;
+
+	if (res->ioa_cfg->sis64) {
+		res->flags = cfgtew->u.cfgte64->flags;
+		res->res_flags = cfgtew->u.cfgte64->res_flags;
+		res->type = cfgtew->u.cfgte64->res_type & 0x0f;
+
+		memcpy(&res->std_inq_data, &cfgtew->u.cfgte64->std_inq_data,
+			sizeof(struct ipr_std_inq_data));
+
+		res->qmodel = IPR_QUEUEING_MODEL64(res);
+		proto = cfgtew->u.cfgte64->proto;
+		res->res_handle = cfgtew->u.cfgte64->res_handle;
+		res->dev_id = cfgtew->u.cfgte64->dev_id;
+
+		memcpy(&res->dev_lun.scsi_lun, &cfgtew->u.cfgte64->lun,
+			sizeof(res->dev_lun.scsi_lun));
+
+		if (memcmp(res->res_path, &cfgtew->u.cfgte64->res_path,
+					sizeof(res->res_path))) {
+			memcpy(res->res_path, &cfgtew->u.cfgte64->res_path,
+				sizeof(res->res_path));
+			new_path = 1;
+		}
+
+		if (res->sdev && new_path)
+			sdev_printk(KERN_INFO, res->sdev, "Resource path: %s\n",
+				    ipr_format_resource_path(&res->res_path[0], &buffer[0]));
+	} else {
+		res->flags = cfgtew->u.cfgte->flags;
+		if (res->flags & IPR_IS_IOA_RESOURCE)
+			res->type = IPR_RES_TYPE_IOAFP;
+		else
+			res->type = cfgtew->u.cfgte->rsvd_subtype & 0x0f;
+
+		memcpy(&res->std_inq_data, &cfgtew->u.cfgte->std_inq_data,
+			sizeof(struct ipr_std_inq_data));
+
+		res->qmodel = IPR_QUEUEING_MODEL(res);
+		proto = cfgtew->u.cfgte->proto;
+		res->res_handle = cfgtew->u.cfgte->res_handle;
+	}
+
+	ipr_update_ata_class(res, proto);
+}
+
+/**
+ * ipr_clear_res_target - Clear the bit in the bit map representing the target
+ * 			  for the resource.
+ * @res:	resource entry struct
+ * @cfgtew:	config table entry wrapper struct
+ *
+ * Return value:
+ *      none
+ **/
+static void ipr_clear_res_target(struct ipr_resource_entry *res)
+{
+	struct ipr_resource_entry *gscsi_res = NULL;
+	struct ipr_ioa_cfg *ioa_cfg = res->ioa_cfg;
+
+	if (!ioa_cfg->sis64)
+		return;
+
+	if (res->bus == IPR_ARRAY_VIRTUAL_BUS)
+		clear_bit(res->target, ioa_cfg->array_ids);
+	else if (res->bus == IPR_VSET_VIRTUAL_BUS)
+		clear_bit(res->target, ioa_cfg->vset_ids);
+	else if (res->bus == 0 && res->type == IPR_RES_TYPE_GENERIC_SCSI) {
+		list_for_each_entry(gscsi_res, &ioa_cfg->used_res_q, queue)
+			if (gscsi_res->dev_id == res->dev_id && gscsi_res != res)
+				return;
+		clear_bit(res->target, ioa_cfg->target_ids);
+
+	} else if (res->bus == 0)
+		clear_bit(res->target, ioa_cfg->target_ids);
 }
 
 /**
@@ -851,17 +1244,24 @@
  * 	none
  **/
 static void ipr_handle_config_change(struct ipr_ioa_cfg *ioa_cfg,
-			      struct ipr_hostrcb *hostrcb)
+				     struct ipr_hostrcb *hostrcb)
 {
 	struct ipr_resource_entry *res = NULL;
-	struct ipr_config_table_entry *cfgte;
+	struct ipr_config_table_entry_wrapper cfgtew;
+	__be32 cc_res_handle;
+
 	u32 is_ndn = 1;
 
-	cfgte = &hostrcb->hcam.u.ccn.cfgte;
+	if (ioa_cfg->sis64) {
+		cfgtew.u.cfgte64 = &hostrcb->hcam.u.ccn.u.cfgte64;
+		cc_res_handle = cfgtew.u.cfgte64->res_handle;
+	} else {
+		cfgtew.u.cfgte = &hostrcb->hcam.u.ccn.u.cfgte;
+		cc_res_handle = cfgtew.u.cfgte->res_handle;
+	}
 
 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
-		if (!memcmp(&res->cfgte.res_addr, &cfgte->res_addr,
-			    sizeof(cfgte->res_addr))) {
+		if (res->res_handle == cc_res_handle) {
 			is_ndn = 0;
 			break;
 		}
@@ -879,20 +1279,22 @@
 				 struct ipr_resource_entry, queue);
 
 		list_del(&res->queue);
-		ipr_init_res_entry(res);
+		ipr_init_res_entry(res, &cfgtew);
 		list_add_tail(&res->queue, &ioa_cfg->used_res_q);
 	}
 
-	memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
+	ipr_update_res_entry(res, &cfgtew);
 
 	if (hostrcb->hcam.notify_type == IPR_HOST_RCB_NOTIF_TYPE_REM_ENTRY) {
 		if (res->sdev) {
 			res->del_from_ml = 1;
-			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
+			res->res_handle = IPR_INVALID_RES_HANDLE;
 			if (ioa_cfg->allow_ml_add_del)
 				schedule_work(&ioa_cfg->work_q);
-		} else
+		} else {
+			ipr_clear_res_target(res);
 			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+		}
 	} else if (!res->sdev) {
 		res->add_to_ml = 1;
 		if (ioa_cfg->allow_ml_add_del)
@@ -1044,8 +1446,12 @@
 static void ipr_log_enhanced_cache_error(struct ipr_ioa_cfg *ioa_cfg,
 					 struct ipr_hostrcb *hostrcb)
 {
-	struct ipr_hostrcb_type_12_error *error =
-		&hostrcb->hcam.u.error.u.type_12_error;
+	struct ipr_hostrcb_type_12_error *error;
+
+	if (ioa_cfg->sis64)
+		error = &hostrcb->hcam.u.error64.u.type_12_error;
+	else
+		error = &hostrcb->hcam.u.error.u.type_12_error;
 
 	ipr_err("-----Current Configuration-----\n");
 	ipr_err("Cache Directory Card Information:\n");
@@ -1138,6 +1544,48 @@
 }
 
 /**
+ * ipr_log_sis64_config_error - Log a device error.
+ * @ioa_cfg:	ioa config struct
+ * @hostrcb:	hostrcb struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ipr_log_sis64_config_error(struct ipr_ioa_cfg *ioa_cfg,
+				       struct ipr_hostrcb *hostrcb)
+{
+	int errors_logged, i;
+	struct ipr_hostrcb64_device_data_entry_enhanced *dev_entry;
+	struct ipr_hostrcb_type_23_error *error;
+	char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+	error = &hostrcb->hcam.u.error64.u.type_23_error;
+	errors_logged = be32_to_cpu(error->errors_logged);
+
+	ipr_err("Device Errors Detected/Logged: %d/%d\n",
+		be32_to_cpu(error->errors_detected), errors_logged);
+
+	dev_entry = error->dev;
+
+	for (i = 0; i < errors_logged; i++, dev_entry++) {
+		ipr_err_separator;
+
+		ipr_err("Device %d : %s", i + 1,
+			 ipr_format_resource_path(&dev_entry->res_path[0], &buffer[0]));
+		ipr_log_ext_vpd(&dev_entry->vpd);
+
+		ipr_err("-----New Device Information-----\n");
+		ipr_log_ext_vpd(&dev_entry->new_vpd);
+
+		ipr_err("Cache Directory Card Information:\n");
+		ipr_log_ext_vpd(&dev_entry->ioa_last_with_dev_vpd);
+
+		ipr_err("Adapter Card Information:\n");
+		ipr_log_ext_vpd(&dev_entry->cfc_last_with_dev_vpd);
+	}
+}
+
+/**
  * ipr_log_config_error - Log a configuration error.
  * @ioa_cfg:	ioa config struct
  * @hostrcb:	hostrcb struct
@@ -1331,7 +1779,11 @@
 {
 	struct ipr_hostrcb_type_17_error *error;
 
-	error = &hostrcb->hcam.u.error.u.type_17_error;
+	if (ioa_cfg->sis64)
+		error = &hostrcb->hcam.u.error64.u.type_17_error;
+	else
+		error = &hostrcb->hcam.u.error.u.type_17_error;
+
 	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
 	strim(error->failure_reason);
 
@@ -1438,6 +1890,42 @@
 		fabric->ioa_port, fabric->cascaded_expander, fabric->phy);
 }
 
+/**
+ * ipr_log64_fabric_path - Log a fabric path error
+ * @hostrcb:	hostrcb struct
+ * @fabric:		fabric descriptor
+ *
+ * Return value:
+ * 	none
+ **/
+static void ipr_log64_fabric_path(struct ipr_hostrcb *hostrcb,
+				  struct ipr_hostrcb64_fabric_desc *fabric)
+{
+	int i, j;
+	u8 path_state = fabric->path_state;
+	u8 active = path_state & IPR_PATH_ACTIVE_MASK;
+	u8 state = path_state & IPR_PATH_STATE_MASK;
+	char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+	for (i = 0; i < ARRAY_SIZE(path_active_desc); i++) {
+		if (path_active_desc[i].active != active)
+			continue;
+
+		for (j = 0; j < ARRAY_SIZE(path_state_desc); j++) {
+			if (path_state_desc[j].state != state)
+				continue;
+
+			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s\n",
+				     path_active_desc[i].desc, path_state_desc[j].desc,
+				     ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
+			return;
+		}
+	}
+
+	ipr_err("Path state=%02X Resource Path=%s\n", path_state,
+		ipr_format_resource_path(&fabric->res_path[0], &buffer[0]));
+}
+
 static const struct {
 	u8 type;
 	char *desc;
@@ -1547,6 +2035,49 @@
 }
 
 /**
+ * ipr_log64_path_elem - Log a fabric path element.
+ * @hostrcb:	hostrcb struct
+ * @cfg:		fabric path element struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ipr_log64_path_elem(struct ipr_hostrcb *hostrcb,
+				struct ipr_hostrcb64_config_element *cfg)
+{
+	int i, j;
+	u8 desc_id = cfg->descriptor_id & IPR_DESCRIPTOR_MASK;
+	u8 type = cfg->type_status & IPR_PATH_CFG_TYPE_MASK;
+	u8 status = cfg->type_status & IPR_PATH_CFG_STATUS_MASK;
+	char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+	if (type == IPR_PATH_CFG_NOT_EXIST || desc_id != IPR_DESCRIPTOR_SIS64)
+		return;
+
+	for (i = 0; i < ARRAY_SIZE(path_type_desc); i++) {
+		if (path_type_desc[i].type != type)
+			continue;
+
+		for (j = 0; j < ARRAY_SIZE(path_status_desc); j++) {
+			if (path_status_desc[j].status != status)
+				continue;
+
+			ipr_hcam_err(hostrcb, "%s %s: Resource Path=%s, Link rate=%s, WWN=%08X%08X\n",
+				     path_status_desc[j].desc, path_type_desc[i].desc,
+				     ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
+				     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+				     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+			return;
+		}
+	}
+	ipr_hcam_err(hostrcb, "Path element=%02X: Resource Path=%s, Link rate=%s "
+		     "WWN=%08X%08X\n", cfg->type_status,
+		     ipr_format_resource_path(&cfg->res_path[0], &buffer[0]),
+		     link_rate[cfg->link_rate & IPR_PHY_LINK_RATE_MASK],
+		     be32_to_cpu(cfg->wwid[0]), be32_to_cpu(cfg->wwid[1]));
+}
+
+/**
  * ipr_log_fabric_error - Log a fabric error.
  * @ioa_cfg:	ioa config struct
  * @hostrcb:	hostrcb struct
@@ -1584,6 +2115,96 @@
 }
 
 /**
+ * ipr_log_sis64_array_error - Log a sis64 array error.
+ * @ioa_cfg:	ioa config struct
+ * @hostrcb:	hostrcb struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ipr_log_sis64_array_error(struct ipr_ioa_cfg *ioa_cfg,
+				      struct ipr_hostrcb *hostrcb)
+{
+	int i, num_entries;
+	struct ipr_hostrcb_type_24_error *error;
+	struct ipr_hostrcb64_array_data_entry *array_entry;
+	char buffer[IPR_MAX_RES_PATH_LENGTH];
+	const u8 zero_sn[IPR_SERIAL_NUM_LEN] = { [0 ... IPR_SERIAL_NUM_LEN-1] = '0' };
+
+	error = &hostrcb->hcam.u.error64.u.type_24_error;
+
+	ipr_err_separator;
+
+	ipr_err("RAID %s Array Configuration: %s\n",
+		error->protection_level,
+		ipr_format_resource_path(&error->last_res_path[0], &buffer[0]));
+
+	ipr_err_separator;
+
+	array_entry = error->array_member;
+	num_entries = min_t(u32, be32_to_cpu(error->num_entries),
+			    sizeof(error->array_member));
+
+	for (i = 0; i < num_entries; i++, array_entry++) {
+
+		if (!memcmp(array_entry->vpd.vpd.sn, zero_sn, IPR_SERIAL_NUM_LEN))
+			continue;
+
+		if (error->exposed_mode_adn == i)
+			ipr_err("Exposed Array Member %d:\n", i);
+		else
+			ipr_err("Array Member %d:\n", i);
+
+		ipr_err("Array Member %d:\n", i);
+		ipr_log_ext_vpd(&array_entry->vpd);
+		ipr_err("Current Location: %s",
+			 ipr_format_resource_path(&array_entry->res_path[0], &buffer[0]));
+		ipr_err("Expected Location: %s",
+			 ipr_format_resource_path(&array_entry->expected_res_path[0], &buffer[0]));
+
+		ipr_err_separator;
+	}
+}
+
+/**
+ * ipr_log_sis64_fabric_error - Log a sis64 fabric error.
+ * @ioa_cfg:	ioa config struct
+ * @hostrcb:	hostrcb struct
+ *
+ * Return value:
+ * 	none
+ **/
+static void ipr_log_sis64_fabric_error(struct ipr_ioa_cfg *ioa_cfg,
+				       struct ipr_hostrcb *hostrcb)
+{
+	struct ipr_hostrcb_type_30_error *error;
+	struct ipr_hostrcb64_fabric_desc *fabric;
+	struct ipr_hostrcb64_config_element *cfg;
+	int i, add_len;
+
+	error = &hostrcb->hcam.u.error64.u.type_30_error;
+
+	error->failure_reason[sizeof(error->failure_reason) - 1] = '\0';
+	ipr_hcam_err(hostrcb, "%s\n", error->failure_reason);
+
+	add_len = be32_to_cpu(hostrcb->hcam.length) -
+		(offsetof(struct ipr_hostrcb64_error, u) +
+		 offsetof(struct ipr_hostrcb_type_30_error, desc));
+
+	for (i = 0, fabric = error->desc; i < error->num_entries; i++) {
+		ipr_log64_fabric_path(hostrcb, fabric);
+		for_each_fabric_cfg(fabric, cfg)
+			ipr_log64_path_elem(hostrcb, cfg);
+
+		add_len -= be16_to_cpu(fabric->length);
+		fabric = (struct ipr_hostrcb64_fabric_desc *)
+			((unsigned long)fabric + be16_to_cpu(fabric->length));
+	}
+
+	ipr_log_hex_data(ioa_cfg, (u32 *)fabric, add_len);
+}
+
+/**
  * ipr_log_generic_error - Log an adapter error.
  * @ioa_cfg:	ioa config struct
  * @hostrcb:	hostrcb struct
@@ -1642,13 +2263,16 @@
 	if (hostrcb->hcam.notifications_lost == IPR_HOST_RCB_NOTIFICATIONS_LOST)
 		dev_err(&ioa_cfg->pdev->dev, "Error notifications lost\n");
 
-	ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
+	if (ioa_cfg->sis64)
+		ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
+	else
+		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
 
-	if (ioasc == IPR_IOASC_BUS_WAS_RESET ||
-	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER) {
+	if (!ioa_cfg->sis64 && (ioasc == IPR_IOASC_BUS_WAS_RESET ||
+	    ioasc == IPR_IOASC_BUS_WAS_RESET_BY_OTHER)) {
 		/* Tell the midlayer we had a bus reset so it will handle the UA properly */
 		scsi_report_bus_reset(ioa_cfg->host,
-				      hostrcb->hcam.u.error.failing_dev_res_addr.bus);
+				      hostrcb->hcam.u.error.fd_res_addr.bus);
 	}
 
 	error_index = ipr_get_error(ioasc);
@@ -1696,6 +2320,16 @@
 	case IPR_HOST_RCB_OVERLAY_ID_20:
 		ipr_log_fabric_error(ioa_cfg, hostrcb);
 		break;
+	case IPR_HOST_RCB_OVERLAY_ID_23:
+		ipr_log_sis64_config_error(ioa_cfg, hostrcb);
+		break;
+	case IPR_HOST_RCB_OVERLAY_ID_24:
+	case IPR_HOST_RCB_OVERLAY_ID_26:
+		ipr_log_sis64_array_error(ioa_cfg, hostrcb);
+		break;
+	case IPR_HOST_RCB_OVERLAY_ID_30:
+		ipr_log_sis64_fabric_error(ioa_cfg, hostrcb);
+		break;
 	case IPR_HOST_RCB_OVERLAY_ID_1:
 	case IPR_HOST_RCB_OVERLAY_ID_DEFAULT:
 	default:
@@ -1720,7 +2354,12 @@
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 	struct ipr_hostrcb *hostrcb = ipr_cmd->u.hostrcb;
 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
-	u32 fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
+	u32 fd_ioasc;
+
+	if (ioa_cfg->sis64)
+		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error64.fd_ioasc);
+	else
+		fd_ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
 
 	list_del(&hostrcb->queue);
 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
@@ -1845,12 +2484,14 @@
 ipr_find_ses_entry(struct ipr_resource_entry *res)
 {
 	int i, j, matches;
+	struct ipr_std_inq_vpids *vpids;
 	const struct ipr_ses_table_entry *ste = ipr_ses_table;
 
 	for (i = 0; i < ARRAY_SIZE(ipr_ses_table); i++, ste++) {
 		for (j = 0, matches = 0; j < IPR_PROD_ID_LEN; j++) {
 			if (ste->compare_product_id_byte[j] == 'X') {
-				if (res->cfgte.std_inq_data.vpids.product_id[j] == ste->product_id[j])
+				vpids = &res->std_inq_data.vpids;
+				if (vpids->product_id[j] == ste->product_id[j])
 					matches++;
 				else
 					break;
@@ -1885,10 +2526,10 @@
 
 	/* Loop through each config table entry in the config table buffer */
 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
-		if (!(IPR_IS_SES_DEVICE(res->cfgte.std_inq_data)))
+		if (!(IPR_IS_SES_DEVICE(res->std_inq_data)))
 			continue;
 
-		if (bus != res->cfgte.res_addr.bus)
+		if (bus != res->bus)
 			continue;
 
 		if (!(ste = ipr_find_ses_entry(res)))
@@ -1934,6 +2575,31 @@
 }
 
 /**
+ * ipr_get_sis64_dump_data_section - Dump IOA memory
+ * @ioa_cfg:			ioa config struct
+ * @start_addr:			adapter address to dump
+ * @dest:			destination kernel buffer
+ * @length_in_words:		length to dump in 4 byte words
+ *
+ * Return value:
+ * 	0 on success
+ **/
+static int ipr_get_sis64_dump_data_section(struct ipr_ioa_cfg *ioa_cfg,
+					   u32 start_addr,
+					   __be32 *dest, u32 length_in_words)
+{
+	int i;
+
+	for (i = 0; i < length_in_words; i++) {
+		writel(start_addr+(i*4), ioa_cfg->regs.dump_addr_reg);
+		*dest = cpu_to_be32(readl(ioa_cfg->regs.dump_data_reg));
+		dest++;
+	}
+
+	return 0;
+}
+
+/**
  * ipr_get_ldump_data_section - Dump IOA memory
  * @ioa_cfg:			ioa config struct
  * @start_addr:			adapter address to dump
@@ -1950,9 +2616,13 @@
 	volatile u32 temp_pcii_reg;
 	int i, delay = 0;
 
+	if (ioa_cfg->sis64)
+		return ipr_get_sis64_dump_data_section(ioa_cfg, start_addr,
+						       dest, length_in_words);
+
 	/* Write IOA interrupt reg starting LDUMP state  */
 	writel((IPR_UPROCI_RESET_ALERT | IPR_UPROCI_IO_DEBUG_ALERT),
-	       ioa_cfg->regs.set_uproc_interrupt_reg);
+	       ioa_cfg->regs.set_uproc_interrupt_reg32);
 
 	/* Wait for IO debug acknowledge */
 	if (ipr_wait_iodbg_ack(ioa_cfg,
@@ -1971,7 +2641,7 @@
 
 	/* Signal address valid - clear IOA Reset alert */
 	writel(IPR_UPROCI_RESET_ALERT,
-	       ioa_cfg->regs.clr_uproc_interrupt_reg);
+	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
 
 	for (i = 0; i < length_in_words; i++) {
 		/* Wait for IO debug acknowledge */
@@ -1996,10 +2666,10 @@
 
 	/* Signal end of block transfer. Set reset alert then clear IO debug ack */
 	writel(IPR_UPROCI_RESET_ALERT,
-	       ioa_cfg->regs.set_uproc_interrupt_reg);
+	       ioa_cfg->regs.set_uproc_interrupt_reg32);
 
 	writel(IPR_UPROCI_IO_DEBUG_ALERT,
-	       ioa_cfg->regs.clr_uproc_interrupt_reg);
+	       ioa_cfg->regs.clr_uproc_interrupt_reg32);
 
 	/* Signal dump data received - Clear IO debug Ack */
 	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE,
@@ -2008,7 +2678,7 @@
 	/* Wait for IOA to signal LDUMP exit - IOA reset alert will be cleared */
 	while (delay < IPR_LDUMP_MAX_SHORT_ACK_DELAY_IN_USEC) {
 		temp_pcii_reg =
-		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
+		    readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
 
 		if (!(temp_pcii_reg & IPR_UPROCI_RESET_ALERT))
 			return 0;
@@ -2207,6 +2877,7 @@
 	u32 num_entries, start_off, end_off;
 	u32 bytes_to_copy, bytes_copied, rc;
 	struct ipr_sdt *sdt;
+	int valid = 1;
 	int i;
 
 	ENTER;
@@ -2220,7 +2891,7 @@
 
 	start_addr = readl(ioa_cfg->ioa_mailbox);
 
-	if (!ipr_sdt_is_fmt2(start_addr)) {
+	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(start_addr)) {
 		dev_err(&ioa_cfg->pdev->dev,
 			"Invalid dump table format: %lx\n", start_addr);
 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -2249,7 +2920,6 @@
 
 	/* IOA Dump entry */
 	ipr_init_dump_entry_hdr(&ioa_dump->hdr);
-	ioa_dump->format = IPR_SDT_FMT2;
 	ioa_dump->hdr.len = 0;
 	ioa_dump->hdr.data_type = IPR_DUMP_DATA_TYPE_BINARY;
 	ioa_dump->hdr.id = IPR_DUMP_IOA_DUMP_ID;
@@ -2264,7 +2934,8 @@
 					sizeof(struct ipr_sdt) / sizeof(__be32));
 
 	/* Smart Dump table is ready to use and the first entry is valid */
-	if (rc || (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE)) {
+	if (rc || ((be32_to_cpu(sdt->hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
+	    (be32_to_cpu(sdt->hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
 		dev_err(&ioa_cfg->pdev->dev,
 			"Dump of IOA failed. Dump table not valid: %d, %X.\n",
 			rc, be32_to_cpu(sdt->hdr.state));
@@ -2288,12 +2959,19 @@
 		}
 
 		if (sdt->entry[i].flags & IPR_SDT_VALID_ENTRY) {
-			sdt_word = be32_to_cpu(sdt->entry[i].bar_str_offset);
-			start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
-			end_off = be32_to_cpu(sdt->entry[i].end_offset);
+			sdt_word = be32_to_cpu(sdt->entry[i].start_token);
+			if (ioa_cfg->sis64)
+				bytes_to_copy = be32_to_cpu(sdt->entry[i].end_token);
+			else {
+				start_off = sdt_word & IPR_FMT2_MBX_ADDR_MASK;
+				end_off = be32_to_cpu(sdt->entry[i].end_token);
 
-			if (ipr_sdt_is_fmt2(sdt_word) && sdt_word) {
-				bytes_to_copy = end_off - start_off;
+				if (ipr_sdt_is_fmt2(sdt_word) && sdt_word)
+					bytes_to_copy = end_off - start_off;
+				else
+					valid = 0;
+			}
+			if (valid) {
 				if (bytes_to_copy > IPR_MAX_IOA_DUMP_SIZE) {
 					sdt->entry[i].flags &= ~IPR_SDT_VALID_ENTRY;
 					continue;
@@ -2422,9 +3100,9 @@
 
 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
 		if (res->add_to_ml) {
-			bus = res->cfgte.res_addr.bus;
-			target = res->cfgte.res_addr.target;
-			lun = res->cfgte.res_addr.lun;
+			bus = res->bus;
+			target = res->target;
+			lun = res->lun;
 			res->add_to_ml = 0;
 			spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 			scsi_add_device(ioa_cfg->host, bus, target, lun);
@@ -2478,105 +3156,6 @@
 };
 #endif
 
-static const struct {
-	enum ipr_cache_state state;
-	char *name;
-} cache_state [] = {
-	{ CACHE_NONE, "none" },
-	{ CACHE_DISABLED, "disabled" },
-	{ CACHE_ENABLED, "enabled" }
-};
-
-/**
- * ipr_show_write_caching - Show the write caching attribute
- * @dev:	device struct
- * @buf:	buffer
- *
- * Return value:
- *	number of bytes printed to buffer
- **/
-static ssize_t ipr_show_write_caching(struct device *dev,
-				      struct device_attribute *attr, char *buf)
-{
-	struct Scsi_Host *shost = class_to_shost(dev);
-	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
-	unsigned long lock_flags = 0;
-	int i, len = 0;
-
-	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
-	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
-		if (cache_state[i].state == ioa_cfg->cache_state) {
-			len = snprintf(buf, PAGE_SIZE, "%s\n", cache_state[i].name);
-			break;
-		}
-	}
-	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-	return len;
-}
-
-
-/**
- * ipr_store_write_caching - Enable/disable adapter write cache
- * @dev:	device struct
- * @buf:	buffer
- * @count:	buffer size
- *
- * This function will enable/disable adapter write cache.
- *
- * Return value:
- * 	count on success / other on failure
- **/
-static ssize_t ipr_store_write_caching(struct device *dev,
-				       struct device_attribute *attr,
-				       const char *buf, size_t count)
-{
-	struct Scsi_Host *shost = class_to_shost(dev);
-	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)shost->hostdata;
-	unsigned long lock_flags = 0;
-	enum ipr_cache_state new_state = CACHE_INVALID;
-	int i;
-
-	if (!capable(CAP_SYS_ADMIN))
-		return -EACCES;
-	if (ioa_cfg->cache_state == CACHE_NONE)
-		return -EINVAL;
-
-	for (i = 0; i < ARRAY_SIZE(cache_state); i++) {
-		if (!strncmp(cache_state[i].name, buf, strlen(cache_state[i].name))) {
-			new_state = cache_state[i].state;
-			break;
-		}
-	}
-
-	if (new_state != CACHE_DISABLED && new_state != CACHE_ENABLED)
-		return -EINVAL;
-
-	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
-	if (ioa_cfg->cache_state == new_state) {
-		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-		return count;
-	}
-
-	ioa_cfg->cache_state = new_state;
-	dev_info(&ioa_cfg->pdev->dev, "%s adapter write cache.\n",
-		 new_state == CACHE_ENABLED ? "Enabling" : "Disabling");
-	if (!ioa_cfg->in_reset_reload)
-		ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NORMAL);
-	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
-	wait_event(ioa_cfg->reset_wait_q, !ioa_cfg->in_reset_reload);
-
-	return count;
-}
-
-static struct device_attribute ipr_ioa_cache_attr = {
-	.attr = {
-		.name =		"write_cache",
-		.mode =		S_IRUGO | S_IWUSR,
-	},
-	.show = ipr_show_write_caching,
-	.store = ipr_store_write_caching
-};
-
 /**
  * ipr_show_fw_version - Show the firmware version
  * @dev:	class device struct
@@ -2976,6 +3555,37 @@
 }
 
 /**
+ * ipr_build_ucode_ioadl64 - Build a microcode download IOADL
+ * @ipr_cmd:		ipr command struct
+ * @sglist:		scatter/gather list
+ *
+ * Builds a microcode download IOA data list (IOADL).
+ *
+ **/
+static void ipr_build_ucode_ioadl64(struct ipr_cmnd *ipr_cmd,
+				    struct ipr_sglist *sglist)
+{
+	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
+	struct scatterlist *scatterlist = sglist->scatterlist;
+	int i;
+
+	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
+	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
+
+	ioarcb->ioadl_len =
+		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
+	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
+		ioadl64[i].flags = cpu_to_be32(IPR_IOADL_FLAGS_WRITE);
+		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(&scatterlist[i]));
+		ioadl64[i].address = cpu_to_be64(sg_dma_address(&scatterlist[i]));
+	}
+
+	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+}
+
+/**
  * ipr_build_ucode_ioadl - Build a microcode download IOADL
  * @ipr_cmd:	ipr command struct
  * @sglist:		scatter/gather list
@@ -2987,14 +3597,15 @@
 				  struct ipr_sglist *sglist)
 {
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
-	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
+	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
 	struct scatterlist *scatterlist = sglist->scatterlist;
 	int i;
 
 	ipr_cmd->dma_use_sg = sglist->num_dma_sg;
 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
-	ioarcb->write_data_transfer_length = cpu_to_be32(sglist->buffer_len);
-	ioarcb->write_ioadl_len =
+	ioarcb->data_transfer_length = cpu_to_be32(sglist->buffer_len);
+
+	ioarcb->ioadl_len =
 		cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
 
 	for (i = 0; i < ipr_cmd->dma_use_sg; i++) {
@@ -3146,7 +3757,6 @@
 	&ipr_ioa_state_attr,
 	&ipr_ioa_reset_attr,
 	&ipr_update_fw_attr,
-	&ipr_ioa_cache_attr,
 	NULL,
 };
 
@@ -3450,7 +4060,7 @@
 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 	res = (struct ipr_resource_entry *)sdev->hostdata;
 	if (res)
-		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->cfgte.res_handle);
+		len = snprintf(buf, PAGE_SIZE, "%08X\n", res->res_handle);
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 	return len;
 }
@@ -3463,8 +4073,43 @@
 	.show = ipr_show_adapter_handle
 };
 
+/**
+ * ipr_show_resource_path - Show the resource path for this device.
+ * @dev:	device struct
+ * @buf:	buffer
+ *
+ * Return value:
+ * 	number of bytes printed to buffer
+ **/
+static ssize_t ipr_show_resource_path(struct device *dev, struct device_attribute *attr, char *buf)
+{
+	struct scsi_device *sdev = to_scsi_device(dev);
+	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)sdev->host->hostdata;
+	struct ipr_resource_entry *res;
+	unsigned long lock_flags = 0;
+	ssize_t len = -ENXIO;
+	char buffer[IPR_MAX_RES_PATH_LENGTH];
+
+	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
+	res = (struct ipr_resource_entry *)sdev->hostdata;
+	if (res)
+		len = snprintf(buf, PAGE_SIZE, "%s\n",
+			       ipr_format_resource_path(&res->res_path[0], &buffer[0]));
+	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+	return len;
+}
+
+static struct device_attribute ipr_resource_path_attr = {
+	.attr = {
+		.name = 	"resource_path",
+		.mode =		S_IRUSR,
+	},
+	.show = ipr_show_resource_path
+};
+
 static struct device_attribute *ipr_dev_attrs[] = {
 	&ipr_adapter_handle_attr,
+	&ipr_resource_path_attr,
 	NULL,
 };
 
@@ -3517,9 +4162,9 @@
 	struct ipr_resource_entry *res;
 
 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
-		if ((res->cfgte.res_addr.bus == starget->channel) &&
-		    (res->cfgte.res_addr.target == starget->id) &&
-		    (res->cfgte.res_addr.lun == 0)) {
+		if ((res->bus == starget->channel) &&
+		    (res->target == starget->id) &&
+		    (res->lun == 0)) {
 			return res;
 		}
 	}
@@ -3589,6 +4234,17 @@
 static void ipr_target_destroy(struct scsi_target *starget)
 {
 	struct ipr_sata_port *sata_port = starget->hostdata;
+	struct Scsi_Host *shost = dev_to_shost(&starget->dev);
+	struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *) shost->hostdata;
+
+	if (ioa_cfg->sis64) {
+		if (starget->channel == IPR_ARRAY_VIRTUAL_BUS)
+			clear_bit(starget->id, ioa_cfg->array_ids);
+		else if (starget->channel == IPR_VSET_VIRTUAL_BUS)
+			clear_bit(starget->id, ioa_cfg->vset_ids);
+		else if (starget->channel == 0)
+			clear_bit(starget->id, ioa_cfg->target_ids);
+	}
 
 	if (sata_port) {
 		starget->hostdata = NULL;
@@ -3610,9 +4266,9 @@
 	struct ipr_resource_entry *res;
 
 	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
-		if ((res->cfgte.res_addr.bus == sdev->channel) &&
-		    (res->cfgte.res_addr.target == sdev->id) &&
-		    (res->cfgte.res_addr.lun == sdev->lun))
+		if ((res->bus == sdev->channel) &&
+		    (res->target == sdev->id) &&
+		    (res->lun == sdev->lun))
 			return res;
 	}
 
@@ -3661,6 +4317,7 @@
 	struct ipr_resource_entry *res;
 	struct ata_port *ap = NULL;
 	unsigned long lock_flags = 0;
+	char buffer[IPR_MAX_RES_PATH_LENGTH];
 
 	spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
 	res = sdev->hostdata;
@@ -3687,6 +4344,9 @@
 			ata_sas_slave_configure(sdev, ap);
 		} else
 			scsi_adjust_queue_depth(sdev, 0, sdev->host->cmd_per_lun);
+		if (ioa_cfg->sis64)
+			sdev_printk(KERN_INFO, sdev, "Resource path: %s\n",
+			            ipr_format_resource_path(&res->res_path[0], &buffer[0]));
 		return 0;
 	}
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -3828,14 +4488,19 @@
 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
 	ioarcb = &ipr_cmd->ioarcb;
 	cmd_pkt = &ioarcb->cmd_pkt;
-	regs = &ioarcb->add_data.u.regs;
 
-	ioarcb->res_handle = res->cfgte.res_handle;
+	if (ipr_cmd->ioa_cfg->sis64) {
+		regs = &ipr_cmd->i.ata_ioadl.regs;
+		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
+	} else
+		regs = &ioarcb->u.add_data.u.regs;
+
+	ioarcb->res_handle = res->res_handle;
 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
 	cmd_pkt->cdb[0] = IPR_RESET_DEVICE;
 	if (ipr_is_gata(res)) {
 		cmd_pkt->cdb[2] = IPR_ATA_PHY_RESET;
-		ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(regs->flags));
+		ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(regs->flags));
 		regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
 	}
 
@@ -3880,19 +4545,7 @@
 	res = sata_port->res;
 	if (res) {
 		rc = ipr_device_reset(ioa_cfg, res);
-		switch(res->cfgte.proto) {
-		case IPR_PROTO_SATA:
-		case IPR_PROTO_SAS_STP:
-			*classes = ATA_DEV_ATA;
-			break;
-		case IPR_PROTO_SATA_ATAPI:
-		case IPR_PROTO_SAS_STP_ATAPI:
-			*classes = ATA_DEV_ATAPI;
-			break;
-		default:
-			*classes = ATA_DEV_UNKNOWN;
-			break;
-		};
+		*classes = res->ata_class;
 	}
 
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
@@ -3937,7 +4590,7 @@
 		return FAILED;
 
 	list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
-		if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
+		if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
 			if (ipr_cmd->scsi_cmd)
 				ipr_cmd->done = ipr_scsi_eh_done;
 			if (ipr_cmd->qc)
@@ -3959,7 +4612,7 @@
 		spin_lock_irq(scsi_cmd->device->host->host_lock);
 
 		list_for_each_entry(ipr_cmd, &ioa_cfg->pending_q, queue) {
-			if (ipr_cmd->ioarcb.res_handle == res->cfgte.res_handle) {
+			if (ipr_cmd->ioarcb.res_handle == res->res_handle) {
 				rc = -EIO;
 				break;
 			}
@@ -3998,13 +4651,13 @@
 	struct ipr_resource_entry *res;
 
 	ENTER;
-	list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
-		if (!memcmp(&res->cfgte.res_handle, &ipr_cmd->ioarcb.res_handle,
-			    sizeof(res->cfgte.res_handle))) {
-			scsi_report_bus_reset(ioa_cfg->host, res->cfgte.res_addr.bus);
-			break;
+	if (!ioa_cfg->sis64)
+		list_for_each_entry(res, &ioa_cfg->used_res_q, queue) {
+			if (res->res_handle == ipr_cmd->ioarcb.res_handle) {
+				scsi_report_bus_reset(ioa_cfg->host, res->bus);
+				break;
+			}
 		}
-	}
 
 	/*
 	 * If abort has not completed, indicate the reset has, else call the
@@ -4102,7 +4755,7 @@
 		return SUCCESS;
 
 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
-	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
+	ipr_cmd->ioarcb.res_handle = res->res_handle;
 	cmd_pkt = &ipr_cmd->ioarcb.cmd_pkt;
 	cmd_pkt->request_type = IPR_RQTYPE_IOACMD;
 	cmd_pkt->cdb[0] = IPR_CANCEL_ALL_REQUESTS;
@@ -4239,11 +4892,29 @@
 		return IRQ_NONE;
 	}
 
-	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
-	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+	int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
+	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
 
-	/* If an interrupt on the adapter did not occur, ignore it */
+	/* If an interrupt on the adapter did not occur, ignore it.
+	 * Or in the case of SIS 64, check for a stage change interrupt.
+	 */
 	if (unlikely((int_reg & IPR_PCII_OPER_INTERRUPTS) == 0)) {
+		if (ioa_cfg->sis64) {
+			int_mask_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+			int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+			if (int_reg & IPR_PCII_IPL_STAGE_CHANGE) {
+
+				/* clear stage change */
+				writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_reg);
+				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+				list_del(&ioa_cfg->reset_cmd->queue);
+				del_timer(&ioa_cfg->reset_cmd->timer);
+				ipr_reset_ioa_job(ioa_cfg->reset_cmd);
+				spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+				return IRQ_HANDLED;
+			}
+		}
+
 		spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 		return IRQ_NONE;
 	}
@@ -4286,8 +4957,8 @@
 		if (ipr_cmd != NULL) {
 			/* Clear the PCI interrupt */
 			do {
-				writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
-				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+				writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg32);
+				int_reg = readl(ioa_cfg->regs.sense_interrupt_reg32) & ~int_mask_reg;
 			} while (int_reg & IPR_PCII_HRRQ_UPDATED &&
 					num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
 
@@ -4309,15 +4980,15 @@
 }
 
 /**
- * ipr_build_ioadl - Build a scatter/gather list and map the buffer
+ * ipr_build_ioadl64 - Build a scatter/gather list and map the buffer
  * @ioa_cfg:	ioa config struct
  * @ipr_cmd:	ipr command struct
  *
  * Return value:
  * 	0 on success / -1 on failure
  **/
-static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
-			   struct ipr_cmnd *ipr_cmd)
+static int ipr_build_ioadl64(struct ipr_ioa_cfg *ioa_cfg,
+			     struct ipr_cmnd *ipr_cmd)
 {
 	int i, nseg;
 	struct scatterlist *sg;
@@ -4325,7 +4996,7 @@
 	u32 ioadl_flags = 0;
 	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
-	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
+	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
 
 	length = scsi_bufflen(scsi_cmd);
 	if (!length)
@@ -4342,8 +5013,55 @@
 	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
-		ioarcb->write_data_transfer_length = cpu_to_be32(length);
-		ioarcb->write_ioadl_len =
+	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE)
+		ioadl_flags = IPR_IOADL_FLAGS_READ;
+
+	scsi_for_each_sg(scsi_cmd, sg, ipr_cmd->dma_use_sg, i) {
+		ioadl64[i].flags = cpu_to_be32(ioadl_flags);
+		ioadl64[i].data_len = cpu_to_be32(sg_dma_len(sg));
+		ioadl64[i].address = cpu_to_be64(sg_dma_address(sg));
+	}
+
+	ioadl64[i-1].flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+	return 0;
+}
+
+/**
+ * ipr_build_ioadl - Build a scatter/gather list and map the buffer
+ * @ioa_cfg:	ioa config struct
+ * @ipr_cmd:	ipr command struct
+ *
+ * Return value:
+ * 	0 on success / -1 on failure
+ **/
+static int ipr_build_ioadl(struct ipr_ioa_cfg *ioa_cfg,
+			   struct ipr_cmnd *ipr_cmd)
+{
+	int i, nseg;
+	struct scatterlist *sg;
+	u32 length;
+	u32 ioadl_flags = 0;
+	struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
+	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
+
+	length = scsi_bufflen(scsi_cmd);
+	if (!length)
+		return 0;
+
+	nseg = scsi_dma_map(scsi_cmd);
+	if (nseg < 0) {
+		dev_err(&ioa_cfg->pdev->dev, "pci_map_sg failed!\n");
+		return -1;
+	}
+
+	ipr_cmd->dma_use_sg = nseg;
+
+	if (scsi_cmd->sc_data_direction == DMA_TO_DEVICE) {
+		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
+		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+		ioarcb->data_transfer_length = cpu_to_be32(length);
+		ioarcb->ioadl_len =
 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
 	} else if (scsi_cmd->sc_data_direction == DMA_FROM_DEVICE) {
 		ioadl_flags = IPR_IOADL_FLAGS_READ;
@@ -4352,11 +5070,10 @@
 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
 	}
 
-	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->add_data.u.ioadl)) {
-		ioadl = ioarcb->add_data.u.ioadl;
-		ioarcb->write_ioadl_addr =
-			cpu_to_be32(be32_to_cpu(ioarcb->ioarcb_host_pci_addr) +
-				    offsetof(struct ipr_ioarcb, add_data));
+	if (ipr_cmd->dma_use_sg <= ARRAY_SIZE(ioarcb->u.add_data.u.ioadl)) {
+		ioadl = ioarcb->u.add_data.u.ioadl;
+		ioarcb->write_ioadl_addr = cpu_to_be32((ipr_cmd->dma_addr) +
+				    offsetof(struct ipr_ioarcb, u.add_data));
 		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
 	}
 
@@ -4446,18 +5163,24 @@
 {
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
 	struct ipr_ioasa *ioasa = &ipr_cmd->ioasa;
-	dma_addr_t dma_addr = be32_to_cpu(ioarcb->ioarcb_host_pci_addr);
+	dma_addr_t dma_addr = ipr_cmd->dma_addr;
 
 	memset(&ioarcb->cmd_pkt, 0, sizeof(struct ipr_cmd_pkt));
-	ioarcb->write_data_transfer_length = 0;
+	ioarcb->data_transfer_length = 0;
 	ioarcb->read_data_transfer_length = 0;
-	ioarcb->write_ioadl_len = 0;
+	ioarcb->ioadl_len = 0;
 	ioarcb->read_ioadl_len = 0;
 	ioasa->ioasc = 0;
 	ioasa->residual_data_len = 0;
-	ioarcb->write_ioadl_addr =
-		cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
-	ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+
+	if (ipr_cmd->ioa_cfg->sis64)
+		ioarcb->u.sis64_addr_data.data_ioadl_addr =
+			cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
+	else {
+		ioarcb->write_ioadl_addr =
+			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
+		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+	}
 }
 
 /**
@@ -4489,15 +5212,8 @@
 	cmd_pkt->flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
 	cmd_pkt->timeout = cpu_to_be16(IPR_REQUEST_SENSE_TIMEOUT / HZ);
 
-	ipr_cmd->ioadl[0].flags_and_data_len =
-		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | SCSI_SENSE_BUFFERSIZE);
-	ipr_cmd->ioadl[0].address =
-		cpu_to_be32(ipr_cmd->sense_buffer_dma);
-
-	ipr_cmd->ioarcb.read_ioadl_len =
-		cpu_to_be32(sizeof(struct ipr_ioadl_desc));
-	ipr_cmd->ioarcb.read_data_transfer_length =
-		cpu_to_be32(SCSI_SENSE_BUFFERSIZE);
+	ipr_init_ioadl(ipr_cmd, ipr_cmd->sense_buffer_dma,
+		       SCSI_SENSE_BUFFERSIZE, IPR_IOADL_FLAGS_READ_LAST);
 
 	ipr_do_req(ipr_cmd, ipr_erp_done, ipr_timeout,
 		   IPR_REQUEST_SENSE_TIMEOUT * 2);
@@ -4893,9 +5609,9 @@
 
 	memcpy(ioarcb->cmd_pkt.cdb, scsi_cmd->cmnd, scsi_cmd->cmd_len);
 	ipr_cmd->scsi_cmd = scsi_cmd;
-	ioarcb->res_handle = res->cfgte.res_handle;
+	ioarcb->res_handle = res->res_handle;
 	ipr_cmd->done = ipr_scsi_done;
-	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
+	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
 
 	if (ipr_is_gscsi(res) || ipr_is_vset_device(res)) {
 		if (scsi_cmd->underflow == 0)
@@ -4916,13 +5632,16 @@
 	    (!ipr_is_gscsi(res) || scsi_cmd->cmnd[0] == IPR_QUERY_RSRC_STATE))
 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
 
-	if (likely(rc == 0))
-		rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
+	if (likely(rc == 0)) {
+		if (ioa_cfg->sis64)
+			rc = ipr_build_ioadl64(ioa_cfg, ipr_cmd);
+		else
+			rc = ipr_build_ioadl(ioa_cfg, ipr_cmd);
+	}
 
 	if (likely(rc == 0)) {
 		mb();
-		writel(be32_to_cpu(ipr_cmd->ioarcb.ioarcb_host_pci_addr),
-		       ioa_cfg->regs.ioarrin_reg);
+		ipr_send_command(ipr_cmd);
 	} else {
 		 list_move_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
 		 return SCSI_MLQUEUE_HOST_BUSY;
@@ -5035,20 +5754,9 @@
 		goto out_unlock;
 	}
 
-	switch(res->cfgte.proto) {
-	case IPR_PROTO_SATA:
-	case IPR_PROTO_SAS_STP:
-		ap->link.device[0].class = ATA_DEV_ATA;
-		break;
-	case IPR_PROTO_SATA_ATAPI:
-	case IPR_PROTO_SAS_STP_ATAPI:
-		ap->link.device[0].class = ATA_DEV_ATAPI;
-		break;
-	default:
-		ap->link.device[0].class = ATA_DEV_UNKNOWN;
+	ap->link.device[0].class = res->ata_class;
+	if (ap->link.device[0].class == ATA_DEV_UNKNOWN)
 		ata_port_disable(ap);
-		break;
-	};
 
 out_unlock:
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
@@ -5134,8 +5842,7 @@
 	ipr_dump_ioasa(ioa_cfg, ipr_cmd, res);
 
 	if (be32_to_cpu(ipr_cmd->ioasa.ioasc_specific) & IPR_ATA_DEVICE_WAS_RESET)
-		scsi_report_device_reset(ioa_cfg->host, res->cfgte.res_addr.bus,
-					 res->cfgte.res_addr.target);
+		scsi_report_device_reset(ioa_cfg->host, res->bus, res->target);
 
 	if (IPR_IOASC_SENSE_KEY(ioasc) > RECOVERED_ERROR)
 		qc->err_mask |= __ac_err_mask(ipr_cmd->ioasa.u.gata.status);
@@ -5146,6 +5853,52 @@
 }
 
 /**
+ * ipr_build_ata_ioadl64 - Build an ATA scatter/gather list
+ * @ipr_cmd:	ipr command struct
+ * @qc:		ATA queued command
+ *
+ **/
+static void ipr_build_ata_ioadl64(struct ipr_cmnd *ipr_cmd,
+				  struct ata_queued_cmd *qc)
+{
+	u32 ioadl_flags = 0;
+	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
+	struct ipr_ioadl64_desc *ioadl64 = ipr_cmd->i.ioadl64;
+	struct ipr_ioadl64_desc *last_ioadl64 = NULL;
+	int len = qc->nbytes;
+	struct scatterlist *sg;
+	unsigned int si;
+	dma_addr_t dma_addr = ipr_cmd->dma_addr;
+
+	if (len == 0)
+		return;
+
+	if (qc->dma_dir == DMA_TO_DEVICE) {
+		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
+		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
+	} else if (qc->dma_dir == DMA_FROM_DEVICE)
+		ioadl_flags = IPR_IOADL_FLAGS_READ;
+
+	ioarcb->data_transfer_length = cpu_to_be32(len);
+	ioarcb->ioadl_len =
+		cpu_to_be32(sizeof(struct ipr_ioadl64_desc) * ipr_cmd->dma_use_sg);
+	ioarcb->u.sis64_addr_data.data_ioadl_addr =
+		cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ata_ioadl));
+
+	for_each_sg(qc->sg, sg, qc->n_elem, si) {
+		ioadl64->flags = cpu_to_be32(ioadl_flags);
+		ioadl64->data_len = cpu_to_be32(sg_dma_len(sg));
+		ioadl64->address = cpu_to_be64(sg_dma_address(sg));
+
+		last_ioadl64 = ioadl64;
+		ioadl64++;
+	}
+
+	if (likely(last_ioadl64))
+		last_ioadl64->flags |= cpu_to_be32(IPR_IOADL_FLAGS_LAST);
+}
+
+/**
  * ipr_build_ata_ioadl - Build an ATA scatter/gather list
  * @ipr_cmd:	ipr command struct
  * @qc:		ATA queued command
@@ -5156,7 +5909,7 @@
 {
 	u32 ioadl_flags = 0;
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
-	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
+	struct ipr_ioadl_desc *ioadl = ipr_cmd->i.ioadl;
 	struct ipr_ioadl_desc *last_ioadl = NULL;
 	int len = qc->nbytes;
 	struct scatterlist *sg;
@@ -5168,8 +5921,8 @@
 	if (qc->dma_dir == DMA_TO_DEVICE) {
 		ioadl_flags = IPR_IOADL_FLAGS_WRITE;
 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
-		ioarcb->write_data_transfer_length = cpu_to_be32(len);
-		ioarcb->write_ioadl_len =
+		ioarcb->data_transfer_length = cpu_to_be32(len);
+		ioarcb->ioadl_len =
 			cpu_to_be32(sizeof(struct ipr_ioadl_desc) * ipr_cmd->dma_use_sg);
 	} else if (qc->dma_dir == DMA_FROM_DEVICE) {
 		ioadl_flags = IPR_IOADL_FLAGS_READ;
@@ -5212,25 +5965,34 @@
 
 	ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
 	ioarcb = &ipr_cmd->ioarcb;
-	regs = &ioarcb->add_data.u.regs;
 
-	memset(&ioarcb->add_data, 0, sizeof(ioarcb->add_data));
-	ioarcb->add_cmd_parms_len = cpu_to_be32(sizeof(ioarcb->add_data.u.regs));
+	if (ioa_cfg->sis64) {
+		regs = &ipr_cmd->i.ata_ioadl.regs;
+		ioarcb->add_cmd_parms_offset = cpu_to_be16(sizeof(*ioarcb));
+	} else
+		regs = &ioarcb->u.add_data.u.regs;
+
+	memset(regs, 0, sizeof(*regs));
+	ioarcb->add_cmd_parms_len = cpu_to_be16(sizeof(*regs));
 
 	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
 	ipr_cmd->qc = qc;
 	ipr_cmd->done = ipr_sata_done;
-	ipr_cmd->ioarcb.res_handle = res->cfgte.res_handle;
+	ipr_cmd->ioarcb.res_handle = res->res_handle;
 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_ATA_PASSTHRU;
 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_LINK_DESC;
 	ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_NO_ULEN_CHK;
 	ipr_cmd->dma_use_sg = qc->n_elem;
 
-	ipr_build_ata_ioadl(ipr_cmd, qc);
+	if (ioa_cfg->sis64)
+		ipr_build_ata_ioadl64(ipr_cmd, qc);
+	else
+		ipr_build_ata_ioadl(ipr_cmd, qc);
+
 	regs->flags |= IPR_ATA_FLAG_STATUS_ON_GOOD_COMPLETION;
 	ipr_copy_sata_tf(regs, &qc->tf);
 	memcpy(ioarcb->cmd_pkt.cdb, qc->cdb, IPR_MAX_CDB_LEN);
-	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_PHYS_LOC(res->cfgte.res_addr));
+	ipr_trc_hook(ipr_cmd, IPR_TRACE_START, IPR_GET_RES_PHYS_LOC(res));
 
 	switch (qc->tf.protocol) {
 	case ATA_PROT_NODATA:
@@ -5257,8 +6019,9 @@
 	}
 
 	mb();
-	writel(be32_to_cpu(ioarcb->ioarcb_host_pci_addr),
-	       ioa_cfg->regs.ioarrin_reg);
+
+	ipr_send_command(ipr_cmd);
+
 	return 0;
 }
 
@@ -5459,7 +6222,7 @@
  * ipr_set_supported_devs - Send Set Supported Devices for a device
  * @ipr_cmd:	ipr command struct
  *
- * This function send a Set Supported Devices to the adapter
+ * This function sends a Set Supported Devices to the adapter
  *
  * Return value:
  * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
@@ -5468,7 +6231,6 @@
 {
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 	struct ipr_supported_device *supp_dev = &ioa_cfg->vpd_cbs->supp_dev;
-	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
 	struct ipr_resource_entry *res = ipr_cmd->u.res;
 
@@ -5479,28 +6241,28 @@
 			continue;
 
 		ipr_cmd->u.res = res;
-		ipr_set_sup_dev_dflt(supp_dev, &res->cfgte.std_inq_data.vpids);
+		ipr_set_sup_dev_dflt(supp_dev, &res->std_inq_data.vpids);
 
 		ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
 		ioarcb->cmd_pkt.flags_hi |= IPR_FLAGS_HI_WRITE_NOT_READ;
 		ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
 
 		ioarcb->cmd_pkt.cdb[0] = IPR_SET_SUPPORTED_DEVICES;
+		ioarcb->cmd_pkt.cdb[1] = IPR_SET_ALL_SUPPORTED_DEVICES;
 		ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_supported_device) >> 8) & 0xff;
 		ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_supported_device) & 0xff;
 
-		ioadl->flags_and_data_len = cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST |
-							sizeof(struct ipr_supported_device));
-		ioadl->address = cpu_to_be32(ioa_cfg->vpd_cbs_dma +
-					     offsetof(struct ipr_misc_cbs, supp_dev));
-		ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
-		ioarcb->write_data_transfer_length =
-			cpu_to_be32(sizeof(struct ipr_supported_device));
+		ipr_init_ioadl(ipr_cmd,
+			       ioa_cfg->vpd_cbs_dma +
+				 offsetof(struct ipr_misc_cbs, supp_dev),
+			       sizeof(struct ipr_supported_device),
+			       IPR_IOADL_FLAGS_WRITE_LAST);
 
 		ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
 			   IPR_SET_SUP_DEVICE_TIMEOUT);
 
-		ipr_cmd->job_step = ipr_set_supported_devs;
+		if (!ioa_cfg->sis64)
+			ipr_cmd->job_step = ipr_set_supported_devs;
 		return IPR_RC_JOB_RETURN;
 	}
 
@@ -5508,36 +6270,6 @@
 }
 
 /**
- * ipr_setup_write_cache - Disable write cache if needed
- * @ipr_cmd:	ipr command struct
- *
- * This function sets up adapters write cache to desired setting
- *
- * Return value:
- * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
- **/
-static int ipr_setup_write_cache(struct ipr_cmnd *ipr_cmd)
-{
-	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-
-	ipr_cmd->job_step = ipr_set_supported_devs;
-	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
-				    struct ipr_resource_entry, queue);
-
-	if (ioa_cfg->cache_state != CACHE_DISABLED)
-		return IPR_RC_JOB_CONTINUE;
-
-	ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
-	ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
-	ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
-	ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
-
-	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
-
-	return IPR_RC_JOB_RETURN;
-}
-
-/**
  * ipr_get_mode_page - Locate specified mode page
  * @mode_pages:	mode page buffer
  * @page_code:	page code to find
@@ -5695,10 +6427,9 @@
  * 	none
  **/
 static void ipr_build_mode_select(struct ipr_cmnd *ipr_cmd,
-				  __be32 res_handle, u8 parm, u32 dma_addr,
-				  u8 xfer_len)
+				  __be32 res_handle, u8 parm,
+				  dma_addr_t dma_addr, u8 xfer_len)
 {
-	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
 
 	ioarcb->res_handle = res_handle;
@@ -5708,11 +6439,7 @@
 	ioarcb->cmd_pkt.cdb[1] = parm;
 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
 
-	ioadl->flags_and_data_len =
-		cpu_to_be32(IPR_IOADL_FLAGS_WRITE_LAST | xfer_len);
-	ioadl->address = cpu_to_be32(dma_addr);
-	ioarcb->write_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
-	ioarcb->write_data_transfer_length = cpu_to_be32(xfer_len);
+	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_WRITE_LAST);
 }
 
 /**
@@ -5742,7 +6469,9 @@
 			      ioa_cfg->vpd_cbs_dma + offsetof(struct ipr_misc_cbs, mode_pages),
 			      length);
 
-	ipr_cmd->job_step = ipr_setup_write_cache;
+	ipr_cmd->job_step = ipr_set_supported_devs;
+	ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
+				    struct ipr_resource_entry, queue);
 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
 
 	LEAVE;
@@ -5762,9 +6491,8 @@
  **/
 static void ipr_build_mode_sense(struct ipr_cmnd *ipr_cmd,
 				 __be32 res_handle,
-				 u8 parm, u32 dma_addr, u8 xfer_len)
+				 u8 parm, dma_addr_t dma_addr, u8 xfer_len)
 {
-	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
 
 	ioarcb->res_handle = res_handle;
@@ -5773,11 +6501,7 @@
 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
 
-	ioadl->flags_and_data_len =
-		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
-	ioadl->address = cpu_to_be32(dma_addr);
-	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
-	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
+	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
 }
 
 /**
@@ -5815,10 +6539,13 @@
  **/
 static int ipr_reset_mode_sense_failed(struct ipr_cmnd *ipr_cmd)
 {
+	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 	u32 ioasc = be32_to_cpu(ipr_cmd->ioasa.ioasc);
 
 	if (ioasc == IPR_IOASC_IR_INVALID_REQ_TYPE_OR_PKT) {
-		ipr_cmd->job_step = ipr_setup_write_cache;
+		ipr_cmd->job_step = ipr_set_supported_devs;
+		ipr_cmd->u.res = list_entry(ioa_cfg->used_res_q.next,
+					    struct ipr_resource_entry, queue);
 		return IPR_RC_JOB_CONTINUE;
 	}
 
@@ -5958,24 +6685,36 @@
 {
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 	struct ipr_resource_entry *res, *temp;
-	struct ipr_config_table_entry *cfgte;
-	int found, i;
+	struct ipr_config_table_entry_wrapper cfgtew;
+	int entries, found, flag, i;
 	LIST_HEAD(old_res);
 
 	ENTER;
-	if (ioa_cfg->cfg_table->hdr.flags & IPR_UCODE_DOWNLOAD_REQ)
+	if (ioa_cfg->sis64)
+		flag = ioa_cfg->u.cfg_table64->hdr64.flags;
+	else
+		flag = ioa_cfg->u.cfg_table->hdr.flags;
+
+	if (flag & IPR_UCODE_DOWNLOAD_REQ)
 		dev_err(&ioa_cfg->pdev->dev, "Microcode download required\n");
 
 	list_for_each_entry_safe(res, temp, &ioa_cfg->used_res_q, queue)
 		list_move_tail(&res->queue, &old_res);
 
-	for (i = 0; i < ioa_cfg->cfg_table->hdr.num_entries; i++) {
-		cfgte = &ioa_cfg->cfg_table->dev[i];
+	if (ioa_cfg->sis64)
+		entries = ioa_cfg->u.cfg_table64->hdr64.num_entries;
+	else
+		entries = ioa_cfg->u.cfg_table->hdr.num_entries;
+
+	for (i = 0; i < entries; i++) {
+		if (ioa_cfg->sis64)
+			cfgtew.u.cfgte64 = &ioa_cfg->u.cfg_table64->dev[i];
+		else
+			cfgtew.u.cfgte = &ioa_cfg->u.cfg_table->dev[i];
 		found = 0;
 
 		list_for_each_entry_safe(res, temp, &old_res, queue) {
-			if (!memcmp(&res->cfgte.res_addr,
-				    &cfgte->res_addr, sizeof(cfgte->res_addr))) {
+			if (ipr_is_same_device(res, &cfgtew)) {
 				list_move_tail(&res->queue, &ioa_cfg->used_res_q);
 				found = 1;
 				break;
@@ -5992,24 +6731,27 @@
 			res = list_entry(ioa_cfg->free_res_q.next,
 					 struct ipr_resource_entry, queue);
 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
-			ipr_init_res_entry(res);
+			ipr_init_res_entry(res, &cfgtew);
 			res->add_to_ml = 1;
 		}
 
 		if (found)
-			memcpy(&res->cfgte, cfgte, sizeof(struct ipr_config_table_entry));
+			ipr_update_res_entry(res, &cfgtew);
 	}
 
 	list_for_each_entry_safe(res, temp, &old_res, queue) {
 		if (res->sdev) {
 			res->del_from_ml = 1;
-			res->cfgte.res_handle = IPR_INVALID_RES_HANDLE;
+			res->res_handle = IPR_INVALID_RES_HANDLE;
 			list_move_tail(&res->queue, &ioa_cfg->used_res_q);
-		} else {
-			list_move_tail(&res->queue, &ioa_cfg->free_res_q);
 		}
 	}
 
+	list_for_each_entry_safe(res, temp, &old_res, queue) {
+		ipr_clear_res_target(res);
+		list_move_tail(&res->queue, &ioa_cfg->free_res_q);
+	}
+
 	if (ioa_cfg->dual_raid && ipr_dual_ioa_raid)
 		ipr_cmd->job_step = ipr_ioafp_mode_sense_page24;
 	else
@@ -6033,7 +6775,6 @@
 {
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
-	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
 	struct ipr_inquiry_page3 *ucode_vpd = &ioa_cfg->vpd_cbs->page3_data;
 	struct ipr_inquiry_cap *cap = &ioa_cfg->vpd_cbs->cap;
 
@@ -6047,16 +6788,11 @@
 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
 
 	ioarcb->cmd_pkt.cdb[0] = IPR_QUERY_IOA_CONFIG;
-	ioarcb->cmd_pkt.cdb[7] = (sizeof(struct ipr_config_table) >> 8) & 0xff;
-	ioarcb->cmd_pkt.cdb[8] = sizeof(struct ipr_config_table) & 0xff;
+	ioarcb->cmd_pkt.cdb[7] = (ioa_cfg->cfg_table_size >> 8) & 0xff;
+	ioarcb->cmd_pkt.cdb[8] = ioa_cfg->cfg_table_size & 0xff;
 
-	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
-	ioarcb->read_data_transfer_length =
-		cpu_to_be32(sizeof(struct ipr_config_table));
-
-	ioadl->address = cpu_to_be32(ioa_cfg->cfg_table_dma);
-	ioadl->flags_and_data_len =
-		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | sizeof(struct ipr_config_table));
+	ipr_init_ioadl(ipr_cmd, ioa_cfg->cfg_table_dma, ioa_cfg->cfg_table_size,
+		       IPR_IOADL_FLAGS_READ_LAST);
 
 	ipr_cmd->job_step = ipr_init_res_table;
 
@@ -6076,10 +6812,9 @@
  * 	none
  **/
 static void ipr_ioafp_inquiry(struct ipr_cmnd *ipr_cmd, u8 flags, u8 page,
-			      u32 dma_addr, u8 xfer_len)
+			      dma_addr_t dma_addr, u8 xfer_len)
 {
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
-	struct ipr_ioadl_desc *ioadl = ipr_cmd->ioadl;
 
 	ENTER;
 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_SCSICDB;
@@ -6090,12 +6825,7 @@
 	ioarcb->cmd_pkt.cdb[2] = page;
 	ioarcb->cmd_pkt.cdb[4] = xfer_len;
 
-	ioarcb->read_ioadl_len = cpu_to_be32(sizeof(struct ipr_ioadl_desc));
-	ioarcb->read_data_transfer_length = cpu_to_be32(xfer_len);
-
-	ioadl->address = cpu_to_be32(dma_addr);
-	ioadl->flags_and_data_len =
-		cpu_to_be32(IPR_IOADL_FLAGS_READ_LAST | xfer_len);
+	ipr_init_ioadl(ipr_cmd, dma_addr, xfer_len, IPR_IOADL_FLAGS_READ_LAST);
 
 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
 	LEAVE;
@@ -6166,13 +6896,9 @@
 static int ipr_ioafp_page3_inquiry(struct ipr_cmnd *ipr_cmd)
 {
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
-	struct ipr_inquiry_page0 *page0 = &ioa_cfg->vpd_cbs->page0_data;
 
 	ENTER;
 
-	if (!ipr_inquiry_page_supported(page0, 1))
-		ioa_cfg->cache_state = CACHE_NONE;
-
 	ipr_cmd->job_step = ipr_ioafp_cap_inquiry;
 
 	ipr_ioafp_inquiry(ipr_cmd, 1, 3,
@@ -6240,7 +6966,7 @@
 }
 
 /**
- * ipr_ioafp_indentify_hrrq - Send Identify Host RRQ.
+ * ipr_ioafp_identify_hrrq - Send Identify Host RRQ.
  * @ipr_cmd:	ipr command struct
  *
  * This function send an Identify Host Request Response Queue
@@ -6249,7 +6975,7 @@
  * Return value:
  * 	IPR_RC_JOB_RETURN
  **/
-static int ipr_ioafp_indentify_hrrq(struct ipr_cmnd *ipr_cmd)
+static int ipr_ioafp_identify_hrrq(struct ipr_cmnd *ipr_cmd)
 {
 	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
 	struct ipr_ioarcb *ioarcb = &ipr_cmd->ioarcb;
@@ -6261,19 +6987,32 @@
 	ioarcb->res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
 
 	ioarcb->cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+	if (ioa_cfg->sis64)
+		ioarcb->cmd_pkt.cdb[1] = 0x1;
 	ioarcb->cmd_pkt.cdb[2] =
-		((u32) ioa_cfg->host_rrq_dma >> 24) & 0xff;
+		((u64) ioa_cfg->host_rrq_dma >> 24) & 0xff;
 	ioarcb->cmd_pkt.cdb[3] =
-		((u32) ioa_cfg->host_rrq_dma >> 16) & 0xff;
+		((u64) ioa_cfg->host_rrq_dma >> 16) & 0xff;
 	ioarcb->cmd_pkt.cdb[4] =
-		((u32) ioa_cfg->host_rrq_dma >> 8) & 0xff;
+		((u64) ioa_cfg->host_rrq_dma >> 8) & 0xff;
 	ioarcb->cmd_pkt.cdb[5] =
-		((u32) ioa_cfg->host_rrq_dma) & 0xff;
+		((u64) ioa_cfg->host_rrq_dma) & 0xff;
 	ioarcb->cmd_pkt.cdb[7] =
 		((sizeof(u32) * IPR_NUM_CMD_BLKS) >> 8) & 0xff;
 	ioarcb->cmd_pkt.cdb[8] =
 		(sizeof(u32) * IPR_NUM_CMD_BLKS) & 0xff;
 
+	if (ioa_cfg->sis64) {
+		ioarcb->cmd_pkt.cdb[10] =
+			((u64) ioa_cfg->host_rrq_dma >> 56) & 0xff;
+		ioarcb->cmd_pkt.cdb[11] =
+			((u64) ioa_cfg->host_rrq_dma >> 48) & 0xff;
+		ioarcb->cmd_pkt.cdb[12] =
+			((u64) ioa_cfg->host_rrq_dma >> 40) & 0xff;
+		ioarcb->cmd_pkt.cdb[13] =
+			((u64) ioa_cfg->host_rrq_dma >> 32) & 0xff;
+	}
+
 	ipr_cmd->job_step = ipr_ioafp_std_inquiry;
 
 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout, IPR_INTERNAL_TIMEOUT);
@@ -6354,7 +7093,58 @@
 	ioa_cfg->toggle_bit = 1;
 
 	/* Zero out config table */
-	memset(ioa_cfg->cfg_table, 0, sizeof(struct ipr_config_table));
+	memset(ioa_cfg->u.cfg_table, 0, ioa_cfg->cfg_table_size);
+}
+
+/**
+ * ipr_reset_next_stage - Process IPL stage change based on feedback register.
+ * @ipr_cmd:	ipr command struct
+ *
+ * Return value:
+ * 	IPR_RC_JOB_CONTINUE / IPR_RC_JOB_RETURN
+ **/
+static int ipr_reset_next_stage(struct ipr_cmnd *ipr_cmd)
+{
+	unsigned long stage, stage_time;
+	u32 feedback;
+	volatile u32 int_reg;
+	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+	u64 maskval = 0;
+
+	feedback = readl(ioa_cfg->regs.init_feedback_reg);
+	stage = feedback & IPR_IPL_INIT_STAGE_MASK;
+	stage_time = feedback & IPR_IPL_INIT_STAGE_TIME_MASK;
+
+	ipr_dbg("IPL stage = 0x%lx, IPL stage time = %ld\n", stage, stage_time);
+
+	/* sanity check the stage_time value */
+	if (stage_time < IPR_IPL_INIT_MIN_STAGE_TIME)
+		stage_time = IPR_IPL_INIT_MIN_STAGE_TIME;
+	else if (stage_time > IPR_LONG_OPERATIONAL_TIMEOUT)
+		stage_time = IPR_LONG_OPERATIONAL_TIMEOUT;
+
+	if (stage == IPR_IPL_INIT_STAGE_UNKNOWN) {
+		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.set_interrupt_mask_reg);
+		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+		stage_time = ioa_cfg->transop_timeout;
+		ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
+	} else if (stage == IPR_IPL_INIT_STAGE_TRANSOP) {
+		ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
+		maskval = IPR_PCII_IPL_STAGE_CHANGE;
+		maskval = (maskval << 32) | IPR_PCII_IOA_TRANS_TO_OPER;
+		writeq(maskval, ioa_cfg->regs.set_interrupt_mask_reg);
+		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
+		return IPR_RC_JOB_CONTINUE;
+	}
+
+	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
+	ipr_cmd->timer.expires = jiffies + stage_time * HZ;
+	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
+	ipr_cmd->done = ipr_reset_ioa_job;
+	add_timer(&ipr_cmd->timer);
+	list_add_tail(&ipr_cmd->queue, &ioa_cfg->pending_q);
+
+	return IPR_RC_JOB_RETURN;
 }
 
 /**
@@ -6373,7 +7163,7 @@
 	volatile u32 int_reg;
 
 	ENTER;
-	ipr_cmd->job_step = ipr_ioafp_indentify_hrrq;
+	ipr_cmd->job_step = ipr_ioafp_identify_hrrq;
 	ipr_init_ioa_mem(ioa_cfg);
 
 	ioa_cfg->allow_interrupts = 1;
@@ -6381,19 +7171,27 @@
 
 	if (int_reg & IPR_PCII_IOA_TRANS_TO_OPER) {
 		writel((IPR_PCII_ERROR_INTERRUPTS | IPR_PCII_HRRQ_UPDATED),
-		       ioa_cfg->regs.clr_interrupt_mask_reg);
+		       ioa_cfg->regs.clr_interrupt_mask_reg32);
 		int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
 		return IPR_RC_JOB_CONTINUE;
 	}
 
 	/* Enable destructive diagnostics on IOA */
-	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg);
+	writel(ioa_cfg->doorbell, ioa_cfg->regs.set_uproc_interrupt_reg32);
 
-	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg);
+	writel(IPR_PCII_OPER_INTERRUPTS, ioa_cfg->regs.clr_interrupt_mask_reg32);
+	if (ioa_cfg->sis64)
+		writel(IPR_PCII_IPL_STAGE_CHANGE, ioa_cfg->regs.clr_interrupt_mask_reg);
+
 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
 
 	dev_info(&ioa_cfg->pdev->dev, "Initializing IOA.\n");
 
+	if (ioa_cfg->sis64) {
+		ipr_cmd->job_step = ipr_reset_next_stage;
+		return IPR_RC_JOB_CONTINUE;
+	}
+
 	ipr_cmd->timer.data = (unsigned long) ipr_cmd;
 	ipr_cmd->timer.expires = jiffies + (ioa_cfg->transop_timeout * HZ);
 	ipr_cmd->timer.function = (void (*)(unsigned long))ipr_oper_timeout;
@@ -6463,7 +7261,7 @@
 
 	mailbox = readl(ioa_cfg->ioa_mailbox);
 
-	if (!ipr_sdt_is_fmt2(mailbox)) {
+	if (!ioa_cfg->sis64 && !ipr_sdt_is_fmt2(mailbox)) {
 		ipr_unit_check_no_data(ioa_cfg);
 		return;
 	}
@@ -6472,15 +7270,20 @@
 	rc = ipr_get_ldump_data_section(ioa_cfg, mailbox, (__be32 *) &sdt,
 					(sizeof(struct ipr_uc_sdt)) / sizeof(__be32));
 
-	if (rc || (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE) ||
-	    !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY)) {
+	if (rc || !(sdt.entry[0].flags & IPR_SDT_VALID_ENTRY) ||
+	    ((be32_to_cpu(sdt.hdr.state) != IPR_FMT3_SDT_READY_TO_USE) &&
+	    (be32_to_cpu(sdt.hdr.state) != IPR_FMT2_SDT_READY_TO_USE))) {
 		ipr_unit_check_no_data(ioa_cfg);
 		return;
 	}
 
 	/* Find length of the first sdt entry (UC buffer) */
-	length = (be32_to_cpu(sdt.entry[0].end_offset) -
-		  be32_to_cpu(sdt.entry[0].bar_str_offset)) & IPR_FMT2_MBX_ADDR_MASK;
+	if (be32_to_cpu(sdt.hdr.state) == IPR_FMT3_SDT_READY_TO_USE)
+		length = be32_to_cpu(sdt.entry[0].end_token);
+	else
+		length = (be32_to_cpu(sdt.entry[0].end_token) -
+			  be32_to_cpu(sdt.entry[0].start_token)) &
+			  IPR_FMT2_MBX_ADDR_MASK;
 
 	hostrcb = list_entry(ioa_cfg->hostrcb_free_q.next,
 			     struct ipr_hostrcb, queue);
@@ -6488,13 +7291,13 @@
 	memset(&hostrcb->hcam, 0, sizeof(hostrcb->hcam));
 
 	rc = ipr_get_ldump_data_section(ioa_cfg,
-					be32_to_cpu(sdt.entry[0].bar_str_offset),
+					be32_to_cpu(sdt.entry[0].start_token),
 					(__be32 *)&hostrcb->hcam,
 					min(length, (int)sizeof(hostrcb->hcam)) / sizeof(__be32));
 
 	if (!rc) {
 		ipr_handle_log_data(ioa_cfg, hostrcb);
-		ioasc = be32_to_cpu(hostrcb->hcam.u.error.failing_dev_ioasc);
+		ioasc = be32_to_cpu(hostrcb->hcam.u.error.fd_ioasc);
 		if (ioasc == IPR_IOASC_NR_IOA_RESET_REQUIRED &&
 		    ioa_cfg->sdt_state == GET_DUMP)
 			ioa_cfg->sdt_state = WAIT_FOR_DUMP;
@@ -6722,7 +7525,7 @@
 
 	if ((rc == PCIBIOS_SUCCESSFUL) && (cmd_reg & PCI_COMMAND_MEMORY)) {
 		ipr_mask_and_clear_interrupts(ioa_cfg, ~0);
-		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg);
+		writel(IPR_UPROCI_RESET_ALERT, ioa_cfg->regs.set_uproc_interrupt_reg32);
 		ipr_cmd->job_step = ipr_reset_wait_to_start_bist;
 	} else {
 		ipr_cmd->job_step = ioa_cfg->reset;
@@ -6785,7 +7588,10 @@
 	ipr_cmd->ioarcb.cmd_pkt.cdb[7] = (sglist->buffer_len & 0x00ff00) >> 8;
 	ipr_cmd->ioarcb.cmd_pkt.cdb[8] = sglist->buffer_len & 0x0000ff;
 
-	ipr_build_ucode_ioadl(ipr_cmd, sglist);
+	if (ioa_cfg->sis64)
+		ipr_build_ucode_ioadl64(ipr_cmd, sglist);
+	else
+		ipr_build_ucode_ioadl(ipr_cmd, sglist);
 	ipr_cmd->job_step = ipr_reset_ucode_download_done;
 
 	ipr_do_req(ipr_cmd, ipr_reset_ioa_job, ipr_timeout,
@@ -7154,8 +7960,8 @@
 	ipr_free_cmd_blks(ioa_cfg);
 	pci_free_consistent(ioa_cfg->pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
-	pci_free_consistent(ioa_cfg->pdev, sizeof(struct ipr_config_table),
-			    ioa_cfg->cfg_table,
+	pci_free_consistent(ioa_cfg->pdev, ioa_cfg->cfg_table_size,
+			    ioa_cfg->u.cfg_table,
 			    ioa_cfg->cfg_table_dma);
 
 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
@@ -7209,7 +8015,7 @@
 	int i;
 
 	ioa_cfg->ipr_cmd_pool = pci_pool_create (IPR_NAME, ioa_cfg->pdev,
-						 sizeof(struct ipr_cmnd), 8, 0);
+						 sizeof(struct ipr_cmnd), 16, 0);
 
 	if (!ioa_cfg->ipr_cmd_pool)
 		return -ENOMEM;
@@ -7227,13 +8033,25 @@
 		ioa_cfg->ipr_cmnd_list_dma[i] = dma_addr;
 
 		ioarcb = &ipr_cmd->ioarcb;
-		ioarcb->ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
+		ipr_cmd->dma_addr = dma_addr;
+		if (ioa_cfg->sis64)
+			ioarcb->a.ioarcb_host_pci_addr64 = cpu_to_be64(dma_addr);
+		else
+			ioarcb->a.ioarcb_host_pci_addr = cpu_to_be32(dma_addr);
+
 		ioarcb->host_response_handle = cpu_to_be32(i << 2);
-		ioarcb->write_ioadl_addr =
-			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioadl));
-		ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
-		ioarcb->ioasa_host_pci_addr =
-			cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
+		if (ioa_cfg->sis64) {
+			ioarcb->u.sis64_addr_data.data_ioadl_addr =
+				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, i.ioadl64));
+			ioarcb->u.sis64_addr_data.ioasa_host_pci_addr =
+				cpu_to_be64(dma_addr + offsetof(struct ipr_cmnd, ioasa));
+		} else {
+			ioarcb->write_ioadl_addr =
+				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, i.ioadl));
+			ioarcb->read_ioadl_addr = ioarcb->write_ioadl_addr;
+			ioarcb->ioasa_host_pci_addr =
+				cpu_to_be32(dma_addr + offsetof(struct ipr_cmnd, ioasa));
+		}
 		ioarcb->ioasa_len = cpu_to_be16(sizeof(struct ipr_ioasa));
 		ipr_cmd->cmd_index = i;
 		ipr_cmd->ioa_cfg = ioa_cfg;
@@ -7260,13 +8078,24 @@
 
 	ENTER;
 	ioa_cfg->res_entries = kzalloc(sizeof(struct ipr_resource_entry) *
-				       IPR_MAX_PHYSICAL_DEVS, GFP_KERNEL);
+				       ioa_cfg->max_devs_supported, GFP_KERNEL);
 
 	if (!ioa_cfg->res_entries)
 		goto out;
 
-	for (i = 0; i < IPR_MAX_PHYSICAL_DEVS; i++)
+	if (ioa_cfg->sis64) {
+		ioa_cfg->target_ids = kzalloc(sizeof(unsigned long) *
+					      BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
+		ioa_cfg->array_ids = kzalloc(sizeof(unsigned long) *
+					     BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
+		ioa_cfg->vset_ids = kzalloc(sizeof(unsigned long) *
+					    BITS_TO_LONGS(ioa_cfg->max_devs_supported), GFP_KERNEL);
+	}
+
+	for (i = 0; i < ioa_cfg->max_devs_supported; i++) {
 		list_add_tail(&ioa_cfg->res_entries[i].queue, &ioa_cfg->free_res_q);
+		ioa_cfg->res_entries[i].ioa_cfg = ioa_cfg;
+	}
 
 	ioa_cfg->vpd_cbs = pci_alloc_consistent(ioa_cfg->pdev,
 						sizeof(struct ipr_misc_cbs),
@@ -7285,11 +8114,11 @@
 	if (!ioa_cfg->host_rrq)
 		goto out_ipr_free_cmd_blocks;
 
-	ioa_cfg->cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
-						  sizeof(struct ipr_config_table),
-						  &ioa_cfg->cfg_table_dma);
+	ioa_cfg->u.cfg_table = pci_alloc_consistent(ioa_cfg->pdev,
+						    ioa_cfg->cfg_table_size,
+						    &ioa_cfg->cfg_table_dma);
 
-	if (!ioa_cfg->cfg_table)
+	if (!ioa_cfg->u.cfg_table)
 		goto out_free_host_rrq;
 
 	for (i = 0; i < IPR_NUM_HCAMS; i++) {
@@ -7323,8 +8152,9 @@
 				    ioa_cfg->hostrcb[i],
 				    ioa_cfg->hostrcb_dma[i]);
 	}
-	pci_free_consistent(pdev, sizeof(struct ipr_config_table),
-			    ioa_cfg->cfg_table, ioa_cfg->cfg_table_dma);
+	pci_free_consistent(pdev, ioa_cfg->cfg_table_size,
+			    ioa_cfg->u.cfg_table,
+			    ioa_cfg->cfg_table_dma);
 out_free_host_rrq:
 	pci_free_consistent(pdev, sizeof(u32) * IPR_NUM_CMD_BLKS,
 			    ioa_cfg->host_rrq, ioa_cfg->host_rrq_dma);
@@ -7399,15 +8229,21 @@
 	init_waitqueue_head(&ioa_cfg->reset_wait_q);
 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
 	ioa_cfg->sdt_state = INACTIVE;
-	if (ipr_enable_cache)
-		ioa_cfg->cache_state = CACHE_ENABLED;
-	else
-		ioa_cfg->cache_state = CACHE_DISABLED;
 
 	ipr_initialize_bus_attr(ioa_cfg);
+	ioa_cfg->max_devs_supported = ipr_max_devs;
 
-	host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
-	host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
+	if (ioa_cfg->sis64) {
+		host->max_id = IPR_MAX_SIS64_TARGETS_PER_BUS;
+		host->max_lun = IPR_MAX_SIS64_LUNS_PER_TARGET;
+		if (ipr_max_devs > IPR_MAX_SIS64_DEVS)
+			ioa_cfg->max_devs_supported = IPR_MAX_SIS64_DEVS;
+	} else {
+		host->max_id = IPR_MAX_NUM_TARGETS_PER_BUS;
+		host->max_lun = IPR_MAX_NUM_LUNS_PER_TARGET;
+		if (ipr_max_devs > IPR_MAX_PHYSICAL_DEVS)
+			ioa_cfg->max_devs_supported = IPR_MAX_PHYSICAL_DEVS;
+	}
 	host->max_channel = IPR_MAX_BUS_TO_SCAN;
 	host->unique_id = host->host_no;
 	host->max_cmd_len = IPR_MAX_CDB_LEN;
@@ -7419,13 +8255,26 @@
 
 	t->set_interrupt_mask_reg = base + p->set_interrupt_mask_reg;
 	t->clr_interrupt_mask_reg = base + p->clr_interrupt_mask_reg;
+	t->clr_interrupt_mask_reg32 = base + p->clr_interrupt_mask_reg32;
 	t->sense_interrupt_mask_reg = base + p->sense_interrupt_mask_reg;
+	t->sense_interrupt_mask_reg32 = base + p->sense_interrupt_mask_reg32;
 	t->clr_interrupt_reg = base + p->clr_interrupt_reg;
+	t->clr_interrupt_reg32 = base + p->clr_interrupt_reg32;
 	t->sense_interrupt_reg = base + p->sense_interrupt_reg;
+	t->sense_interrupt_reg32 = base + p->sense_interrupt_reg32;
 	t->ioarrin_reg = base + p->ioarrin_reg;
 	t->sense_uproc_interrupt_reg = base + p->sense_uproc_interrupt_reg;
+	t->sense_uproc_interrupt_reg32 = base + p->sense_uproc_interrupt_reg32;
 	t->set_uproc_interrupt_reg = base + p->set_uproc_interrupt_reg;
+	t->set_uproc_interrupt_reg32 = base + p->set_uproc_interrupt_reg32;
 	t->clr_uproc_interrupt_reg = base + p->clr_uproc_interrupt_reg;
+	t->clr_uproc_interrupt_reg32 = base + p->clr_uproc_interrupt_reg32;
+
+	if (ioa_cfg->sis64) {
+		t->init_feedback_reg = base + p->init_feedback_reg;
+		t->dump_addr_reg = base + p->dump_addr_reg;
+		t->dump_data_reg = base + p->dump_data_reg;
+	}
 }
 
 /**
@@ -7497,7 +8346,7 @@
 	init_waitqueue_head(&ioa_cfg->msi_wait_q);
 	ioa_cfg->msi_received = 0;
 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
-	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg);
+	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.clr_interrupt_mask_reg32);
 	int_reg = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
 	spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
 
@@ -7508,7 +8357,7 @@
 	} else if (ipr_debug)
 		dev_info(&pdev->dev, "IRQ assigned: %d\n", pdev->irq);
 
-	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg);
+	writel(IPR_PCII_IO_DEBUG_ACKNOWLEDGE, ioa_cfg->regs.sense_interrupt_reg32);
 	int_reg = readl(ioa_cfg->regs.sense_interrupt_reg);
 	wait_event_timeout(ioa_cfg->msi_wait_q, ioa_cfg->msi_received, HZ);
 	ipr_mask_and_clear_interrupts(ioa_cfg, ~IPR_PCII_IOA_TRANS_TO_OPER);
@@ -7578,6 +8427,8 @@
 		goto out_scsi_host_put;
 	}
 
+	/* set SIS 32 or SIS 64 */
+	ioa_cfg->sis64 = ioa_cfg->ipr_chip->sis_type == IPR_SIS64 ? 1 : 0;
 	ioa_cfg->chip_cfg = ioa_cfg->ipr_chip->cfg;
 
 	if (ipr_transop_timeout)
@@ -7615,7 +8466,16 @@
 
 	pci_set_master(pdev);
 
-	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+	if (ioa_cfg->sis64) {
+		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
+		if (rc < 0) {
+			dev_dbg(&pdev->dev, "Failed to set 64 bit PCI DMA mask\n");
+			rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+		}
+
+	} else
+		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
+
 	if (rc < 0) {
 		dev_err(&pdev->dev, "Failed to set PCI DMA mask\n");
 		goto cleanup_nomem;
@@ -7657,6 +8517,15 @@
 	if ((rc = ipr_set_pcix_cmd_reg(ioa_cfg)))
 		goto cleanup_nomem;
 
+	if (ioa_cfg->sis64)
+		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr64)
+				+ ((sizeof(struct ipr_config_table_entry64)
+				* ioa_cfg->max_devs_supported)));
+	else
+		ioa_cfg->cfg_table_size = (sizeof(struct ipr_config_table_hdr)
+				+ ((sizeof(struct ipr_config_table_entry)
+				* ioa_cfg->max_devs_supported)));
+
 	rc = ipr_alloc_mem(ioa_cfg);
 	if (rc < 0) {
 		dev_err(&pdev->dev,
@@ -7668,9 +8537,9 @@
 	 * If HRRQ updated interrupt is not masked, or reset alert is set,
 	 * the card is in an unknown state and needs a hard reset
 	 */
-	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg);
-	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg);
-	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg);
+	mask = readl(ioa_cfg->regs.sense_interrupt_mask_reg32);
+	interrupts = readl(ioa_cfg->regs.sense_interrupt_reg32);
+	uproc = readl(ioa_cfg->regs.sense_uproc_interrupt_reg32);
 	if ((mask & IPR_PCII_HRRQ_UPDATED) == 0 || (uproc & IPR_UPROCI_RESET_ALERT))
 		ioa_cfg->needs_hard_reset = 1;
 	if (interrupts & IPR_PCII_ERROR_INTERRUPTS)
@@ -7958,9 +8827,6 @@
 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574E, 0, 0,
 	      IPR_USE_LONG_TRANSOP_TIMEOUT },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
-	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0,
-	      IPR_USE_LONG_TRANSOP_TIMEOUT },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B3, 0, 0, 0 },
 	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_OBSIDIAN_E,
 	      PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B7, 0, 0,
@@ -7975,9 +8841,22 @@
 	{ PCI_VENDOR_ID_ADAPTEC2, PCI_DEVICE_ID_ADAPTEC2_SCAMP,
 		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_572F, 0, 0,
 		IPR_USE_LONG_TRANSOP_TIMEOUT },
-	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_SCAMP_E,
-		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0,
-		IPR_USE_LONG_TRANSOP_TIMEOUT },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B5, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_574D, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_FPGA_E2,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B2, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B4, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57B1, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57C6, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_575D, 0, 0, 0 },
+	{ PCI_VENDOR_ID_IBM, PCI_DEVICE_ID_IBM_CROC_ASIC_E2,
+		PCI_VENDOR_ID_IBM, IPR_SUBS_DEV_ID_57CE, 0, 0, 0 },
 	{ }
 };
 MODULE_DEVICE_TABLE(pci, ipr_pci_table);
@@ -7997,6 +8876,61 @@
 };
 
 /**
+ * ipr_halt_done - Shutdown prepare completion
+ *
+ * Return value:
+ * 	none
+ **/
+static void ipr_halt_done(struct ipr_cmnd *ipr_cmd)
+{
+	struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
+
+	list_add_tail(&ipr_cmd->queue, &ioa_cfg->free_q);
+}
+
+/**
+ * ipr_halt - Issue shutdown prepare to all adapters
+ *
+ * Return value:
+ * 	NOTIFY_OK on success / NOTIFY_DONE on failure
+ **/
+static int ipr_halt(struct notifier_block *nb, ulong event, void *buf)
+{
+	struct ipr_cmnd *ipr_cmd;
+	struct ipr_ioa_cfg *ioa_cfg;
+	unsigned long flags = 0;
+
+	if (event != SYS_RESTART && event != SYS_HALT && event != SYS_POWER_OFF)
+		return NOTIFY_DONE;
+
+	spin_lock(&ipr_driver_lock);
+
+	list_for_each_entry(ioa_cfg, &ipr_ioa_head, queue) {
+		spin_lock_irqsave(ioa_cfg->host->host_lock, flags);
+		if (!ioa_cfg->allow_cmds) {
+			spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+			continue;
+		}
+
+		ipr_cmd = ipr_get_free_ipr_cmnd(ioa_cfg);
+		ipr_cmd->ioarcb.res_handle = cpu_to_be32(IPR_IOA_RES_HANDLE);
+		ipr_cmd->ioarcb.cmd_pkt.request_type = IPR_RQTYPE_IOACMD;
+		ipr_cmd->ioarcb.cmd_pkt.cdb[0] = IPR_IOA_SHUTDOWN;
+		ipr_cmd->ioarcb.cmd_pkt.cdb[1] = IPR_SHUTDOWN_PREPARE_FOR_NORMAL;
+
+		ipr_do_req(ipr_cmd, ipr_halt_done, ipr_timeout, IPR_DEVICE_RESET_TIMEOUT);
+		spin_unlock_irqrestore(ioa_cfg->host->host_lock, flags);
+	}
+	spin_unlock(&ipr_driver_lock);
+
+	return NOTIFY_OK;
+}
+
+static struct notifier_block ipr_notifier = {
+	ipr_halt, NULL, 0
+};
+
+/**
  * ipr_init - Module entry point
  *
  * Return value:
@@ -8007,6 +8941,7 @@
 	ipr_info("IBM Power RAID SCSI Device Driver version: %s %s\n",
 		 IPR_DRIVER_VERSION, IPR_DRIVER_DATE);
 
+	register_reboot_notifier(&ipr_notifier);
 	return pci_register_driver(&ipr_driver);
 }
 
@@ -8020,6 +8955,7 @@
  **/
 static void __exit ipr_exit(void)
 {
+	unregister_reboot_notifier(&ipr_notifier);
 	pci_unregister_driver(&ipr_driver);
 }
 
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 19bbcf3..4c267b5 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -37,8 +37,8 @@
 /*
  * Literals
  */
-#define IPR_DRIVER_VERSION "2.4.3"
-#define IPR_DRIVER_DATE "(June 10, 2009)"
+#define IPR_DRIVER_VERSION "2.5.0"
+#define IPR_DRIVER_DATE "(February 11, 2010)"
 
 /*
  * IPR_MAX_CMD_PER_LUN: This defines the maximum number of outstanding
@@ -55,7 +55,9 @@
 #define IPR_NUM_BASE_CMD_BLKS				100
 
 #define PCI_DEVICE_ID_IBM_OBSIDIAN_E	0x0339
-#define PCI_DEVICE_ID_IBM_SCAMP_E		0x034A
+
+#define PCI_DEVICE_ID_IBM_CROC_FPGA_E2          0x033D
+#define PCI_DEVICE_ID_IBM_CROC_ASIC_E2          0x034A
 
 #define IPR_SUBS_DEV_ID_2780	0x0264
 #define IPR_SUBS_DEV_ID_5702	0x0266
@@ -70,15 +72,24 @@
 #define IPR_SUBS_DEV_ID_572A	0x02C1
 #define IPR_SUBS_DEV_ID_572B	0x02C2
 #define IPR_SUBS_DEV_ID_572F	0x02C3
-#define IPR_SUBS_DEV_ID_574D	0x030B
 #define IPR_SUBS_DEV_ID_574E	0x030A
 #define IPR_SUBS_DEV_ID_575B	0x030D
 #define IPR_SUBS_DEV_ID_575C	0x0338
-#define IPR_SUBS_DEV_ID_575D	0x033E
 #define IPR_SUBS_DEV_ID_57B3	0x033A
 #define IPR_SUBS_DEV_ID_57B7	0x0360
 #define IPR_SUBS_DEV_ID_57B8	0x02C2
 
+#define IPR_SUBS_DEV_ID_57B4    0x033B
+#define IPR_SUBS_DEV_ID_57B2    0x035F
+#define IPR_SUBS_DEV_ID_57C6    0x0357
+
+#define IPR_SUBS_DEV_ID_57B5    0x033C
+#define IPR_SUBS_DEV_ID_57CE    0x035E
+#define IPR_SUBS_DEV_ID_57B1    0x0355
+
+#define IPR_SUBS_DEV_ID_574D    0x0356
+#define IPR_SUBS_DEV_ID_575D    0x035D
+
 #define IPR_NAME				"ipr"
 
 /*
@@ -118,6 +129,10 @@
 #define IPR_NUM_LOG_HCAMS				2
 #define IPR_NUM_CFG_CHG_HCAMS				2
 #define IPR_NUM_HCAMS	(IPR_NUM_LOG_HCAMS + IPR_NUM_CFG_CHG_HCAMS)
+
+#define IPR_MAX_SIS64_TARGETS_PER_BUS			1024
+#define IPR_MAX_SIS64_LUNS_PER_TARGET			0xffffffff
+
 #define IPR_MAX_NUM_TARGETS_PER_BUS			256
 #define IPR_MAX_NUM_LUNS_PER_TARGET			256
 #define IPR_MAX_NUM_VSET_LUNS_PER_TARGET	8
@@ -132,13 +147,15 @@
 
 /* We need resources for HCAMS, IOA reset, IOA bringdown, and ERP */
 #define IPR_NUM_INTERNAL_CMD_BLKS	(IPR_NUM_HCAMS + \
-                                     ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 3)
+                                     ((IPR_NUM_RESET_RELOAD_RETRIES + 1) * 2) + 4)
 
 #define IPR_MAX_COMMANDS		IPR_NUM_BASE_CMD_BLKS
 #define IPR_NUM_CMD_BLKS		(IPR_NUM_BASE_CMD_BLKS + \
 						IPR_NUM_INTERNAL_CMD_BLKS)
 
 #define IPR_MAX_PHYSICAL_DEVS				192
+#define IPR_DEFAULT_SIS64_DEVS				1024
+#define IPR_MAX_SIS64_DEVS				4096
 
 #define IPR_MAX_SGLIST					64
 #define IPR_IOA_MAX_SECTORS				32767
@@ -173,6 +190,7 @@
 #define	IPR_HCAM_CDB_OP_CODE_CONFIG_CHANGE	0x01
 #define	IPR_HCAM_CDB_OP_CODE_LOG_DATA		0x02
 #define IPR_SET_SUPPORTED_DEVICES			0xFB
+#define IPR_SET_ALL_SUPPORTED_DEVICES			0x80
 #define IPR_IOA_SHUTDOWN				0xF7
 #define	IPR_WR_BUF_DOWNLOAD_AND_SAVE			0x05
 
@@ -221,9 +239,17 @@
 #define IPR_SDT_FMT2_BAR5_SEL				0x5
 #define IPR_SDT_FMT2_EXP_ROM_SEL			0x8
 #define IPR_FMT2_SDT_READY_TO_USE			0xC4D4E3F2
+#define IPR_FMT3_SDT_READY_TO_USE			0xC4D4E3F3
 #define IPR_DOORBELL					0x82800000
 #define IPR_RUNTIME_RESET				0x40000000
 
+#define IPR_IPL_INIT_MIN_STAGE_TIME			5
+#define IPR_IPL_INIT_STAGE_UNKNOWN			0x0
+#define IPR_IPL_INIT_STAGE_TRANSOP			0xB0000000
+#define IPR_IPL_INIT_STAGE_MASK				0xff000000
+#define IPR_IPL_INIT_STAGE_TIME_MASK			0x0000ffff
+#define IPR_PCII_IPL_STAGE_CHANGE			(0x80000000 >> 0)
+
 #define IPR_PCII_IOA_TRANS_TO_OPER			(0x80000000 >> 0)
 #define IPR_PCII_IOARCB_XFER_FAILED			(0x80000000 >> 3)
 #define IPR_PCII_IOA_UNIT_CHECKED			(0x80000000 >> 4)
@@ -318,27 +344,27 @@
 	u8 serial_num[IPR_SERIAL_NUM_LEN];
 }__attribute__ ((packed));
 
+#define IPR_RES_TYPE_AF_DASD		0x00
+#define IPR_RES_TYPE_GENERIC_SCSI	0x01
+#define IPR_RES_TYPE_VOLUME_SET		0x02
+#define IPR_RES_TYPE_REMOTE_AF_DASD	0x03
+#define IPR_RES_TYPE_GENERIC_ATA	0x04
+#define IPR_RES_TYPE_ARRAY		0x05
+#define IPR_RES_TYPE_IOAFP		0xff
+
 struct ipr_config_table_entry {
 	u8 proto;
 #define IPR_PROTO_SATA			0x02
 #define IPR_PROTO_SATA_ATAPI		0x03
 #define IPR_PROTO_SAS_STP		0x06
-#define IPR_PROTO_SAS_STP_ATAPI	0x07
+#define IPR_PROTO_SAS_STP_ATAPI		0x07
 	u8 array_id;
 	u8 flags;
-#define IPR_IS_IOA_RESOURCE	0x80
-#define IPR_IS_ARRAY_MEMBER 0x20
-#define IPR_IS_HOT_SPARE	0x10
-
+#define IPR_IS_IOA_RESOURCE		0x80
 	u8 rsvd_subtype;
-#define IPR_RES_SUBTYPE(res) (((res)->cfgte.rsvd_subtype) & 0x0f)
-#define IPR_SUBTYPE_AF_DASD			0
-#define IPR_SUBTYPE_GENERIC_SCSI	1
-#define IPR_SUBTYPE_VOLUME_SET		2
-#define IPR_SUBTYPE_GENERIC_ATA	4
 
-#define IPR_QUEUEING_MODEL(res)	((((res)->cfgte.flags) & 0x70) >> 4)
-#define IPR_QUEUE_FROZEN_MODEL	0
+#define IPR_QUEUEING_MODEL(res)	((((res)->flags) & 0x70) >> 4)
+#define IPR_QUEUE_FROZEN_MODEL		0
 #define IPR_QUEUE_NACA_MODEL		1
 
 	struct ipr_res_addr res_addr;
@@ -347,6 +373,28 @@
 	struct ipr_std_inq_data std_inq_data;
 }__attribute__ ((packed, aligned (4)));
 
+struct ipr_config_table_entry64 {
+	u8 res_type;
+	u8 proto;
+	u8 vset_num;
+	u8 array_id;
+	__be16 flags;
+	__be16 res_flags;
+#define IPR_QUEUEING_MODEL64(res) ((((res)->res_flags) & 0x7000) >> 12)
+	__be32 res_handle;
+	u8 dev_id_type;
+	u8 reserved[3];
+	__be64 dev_id;
+	__be64 lun;
+	__be64 lun_wwn[2];
+#define IPR_MAX_RES_PATH_LENGTH		24
+	__be64 res_path;
+	struct ipr_std_inq_data std_inq_data;
+	u8 reserved2[4];
+	__be64 reserved3[2]; // description text
+	u8 reserved4[8];
+}__attribute__ ((packed, aligned (8)));
+
 struct ipr_config_table_hdr {
 	u8 num_entries;
 	u8 flags;
@@ -354,13 +402,35 @@
 	__be16 reserved;
 }__attribute__((packed, aligned (4)));
 
-struct ipr_config_table {
-	struct ipr_config_table_hdr hdr;
-	struct ipr_config_table_entry dev[IPR_MAX_PHYSICAL_DEVS];
+struct ipr_config_table_hdr64 {
+	__be16 num_entries;
+	__be16 reserved;
+	u8 flags;
+	u8 reserved2[11];
 }__attribute__((packed, aligned (4)));
 
+struct ipr_config_table {
+	struct ipr_config_table_hdr hdr;
+	struct ipr_config_table_entry dev[0];
+}__attribute__((packed, aligned (4)));
+
+struct ipr_config_table64 {
+	struct ipr_config_table_hdr64 hdr64;
+	struct ipr_config_table_entry64 dev[0];
+}__attribute__((packed, aligned (8)));
+
+struct ipr_config_table_entry_wrapper {
+	union {
+		struct ipr_config_table_entry *cfgte;
+		struct ipr_config_table_entry64 *cfgte64;
+	} u;
+};
+
 struct ipr_hostrcb_cfg_ch_not {
-	struct ipr_config_table_entry cfgte;
+	union {
+		struct ipr_config_table_entry cfgte;
+		struct ipr_config_table_entry64 cfgte64;
+	} u;
 	u8 reserved[936];
 }__attribute__((packed, aligned (4)));
 
@@ -381,7 +451,7 @@
 #define IPR_RQTYPE_HCAM			0x02
 #define IPR_RQTYPE_ATA_PASSTHRU	0x04
 
-	u8 luntar_luntrn;
+	u8 reserved2;
 
 	u8 flags_hi;
 #define IPR_FLAGS_HI_WRITE_NOT_READ		0x80
@@ -403,7 +473,7 @@
 	__be16 timeout;
 }__attribute__ ((packed, aligned(4)));
 
-struct ipr_ioarcb_ata_regs {
+struct ipr_ioarcb_ata_regs {	/* 22 bytes */
 	u8 flags;
 #define IPR_ATA_FLAG_PACKET_CMD			0x80
 #define IPR_ATA_FLAG_XFER_TYPE_DMA			0x40
@@ -442,28 +512,49 @@
 	__be32 address;
 }__attribute__((packed, aligned (8)));
 
+struct ipr_ioadl64_desc {
+	__be32 flags;
+	__be32 data_len;
+	__be64 address;
+}__attribute__((packed, aligned (16)));
+
+struct ipr_ata64_ioadl {
+	struct ipr_ioarcb_ata_regs regs;
+	u16 reserved[5];
+	struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
+}__attribute__((packed, aligned (16)));
+
 struct ipr_ioarcb_add_data {
 	union {
 		struct ipr_ioarcb_ata_regs regs;
 		struct ipr_ioadl_desc ioadl[5];
 		__be32 add_cmd_parms[10];
-	}u;
-}__attribute__ ((packed, aligned(4)));
+	} u;
+}__attribute__ ((packed, aligned (4)));
+
+struct ipr_ioarcb_sis64_add_addr_ecb {
+	__be64 ioasa_host_pci_addr;
+	__be64 data_ioadl_addr;
+	__be64 reserved;
+	__be32 ext_control_buf[4];
+}__attribute__((packed, aligned (8)));
 
 /* IOA Request Control Block    128 bytes  */
 struct ipr_ioarcb {
-	__be32 ioarcb_host_pci_addr;
-	__be32 reserved;
+	union {
+		__be32 ioarcb_host_pci_addr;
+		__be64 ioarcb_host_pci_addr64;
+	} a;
 	__be32 res_handle;
 	__be32 host_response_handle;
 	__be32 reserved1;
 	__be32 reserved2;
 	__be32 reserved3;
 
-	__be32 write_data_transfer_length;
+	__be32 data_transfer_length;
 	__be32 read_data_transfer_length;
 	__be32 write_ioadl_addr;
-	__be32 write_ioadl_len;
+	__be32 ioadl_len;
 	__be32 read_ioadl_addr;
 	__be32 read_ioadl_len;
 
@@ -473,8 +564,14 @@
 
 	struct ipr_cmd_pkt cmd_pkt;
 
-	__be32 add_cmd_parms_len;
-	struct ipr_ioarcb_add_data add_data;
+	__be16 add_cmd_parms_offset;
+	__be16 add_cmd_parms_len;
+
+	union {
+		struct ipr_ioarcb_add_data add_data;
+		struct ipr_ioarcb_sis64_add_addr_ecb sis64_addr_data;
+	} u;
+
 }__attribute__((packed, aligned (4)));
 
 struct ipr_ioasa_vset {
@@ -676,12 +773,29 @@
 	struct ipr_ext_vpd cfc_last_with_dev_vpd;
 }__attribute__((packed, aligned (4)));
 
+struct ipr_hostrcb64_device_data_entry_enhanced {
+	struct ipr_ext_vpd vpd;
+	u8 ccin[4];
+	u8 res_path[8];
+	struct ipr_ext_vpd new_vpd;
+	u8 new_ccin[4];
+	struct ipr_ext_vpd ioa_last_with_dev_vpd;
+	struct ipr_ext_vpd cfc_last_with_dev_vpd;
+}__attribute__((packed, aligned (4)));
+
 struct ipr_hostrcb_array_data_entry {
 	struct ipr_vpd vpd;
 	struct ipr_res_addr expected_dev_res_addr;
 	struct ipr_res_addr dev_res_addr;
 }__attribute__((packed, aligned (4)));
 
+struct ipr_hostrcb64_array_data_entry {
+	struct ipr_ext_vpd vpd;
+	u8 ccin[4];
+	u8 expected_res_path[8];
+	u8 res_path[8];
+}__attribute__((packed, aligned (4)));
+
 struct ipr_hostrcb_array_data_entry_enhanced {
 	struct ipr_ext_vpd vpd;
 	u8 ccin[4];
@@ -733,6 +847,14 @@
 	struct ipr_hostrcb_device_data_entry_enhanced dev[3];
 }__attribute__((packed, aligned (4)));
 
+struct ipr_hostrcb_type_23_error {
+	struct ipr_ext_vpd ioa_vpd;
+	struct ipr_ext_vpd cfc_vpd;
+	__be32 errors_detected;
+	__be32 errors_logged;
+	struct ipr_hostrcb64_device_data_entry_enhanced dev[3];
+}__attribute__((packed, aligned (4)));
+
 struct ipr_hostrcb_type_04_error {
 	struct ipr_vpd ioa_vpd;
 	struct ipr_vpd cfc_vpd;
@@ -760,6 +882,22 @@
 	struct ipr_hostrcb_array_data_entry_enhanced array_member[18];
 }__attribute__((packed, aligned (4)));
 
+struct ipr_hostrcb_type_24_error {
+	struct ipr_ext_vpd ioa_vpd;
+	struct ipr_ext_vpd cfc_vpd;
+	u8 reserved[2];
+	u8 exposed_mode_adn;
+#define IPR_INVALID_ARRAY_DEV_NUM		0xff
+	u8 array_id;
+	u8 last_res_path[8];
+	u8 protection_level[8];
+	struct ipr_ext_vpd array_vpd;
+	u8 description[16];
+	u8 reserved2[3];
+	u8 num_entries;
+	struct ipr_hostrcb64_array_data_entry array_member[32];
+}__attribute__((packed, aligned (4)));
+
 struct ipr_hostrcb_type_07_error {
 	u8 failure_reason[64];
 	struct ipr_vpd vpd;
@@ -797,6 +935,22 @@
 	__be32 wwid[2];
 }__attribute__((packed, aligned (4)));
 
+struct ipr_hostrcb64_config_element {
+	__be16 length;
+	u8 descriptor_id;
+#define IPR_DESCRIPTOR_MASK		0xC0
+#define IPR_DESCRIPTOR_SIS64		0x00
+
+	u8 reserved;
+	u8 type_status;
+
+	u8 reserved2[2];
+	u8 link_rate;
+
+	u8 res_path[8];
+	__be32 wwid[2];
+}__attribute__((packed, aligned (8)));
+
 struct ipr_hostrcb_fabric_desc {
 	__be16 length;
 	u8 ioa_port;
@@ -818,6 +972,20 @@
 	struct ipr_hostrcb_config_element elem[1];
 }__attribute__((packed, aligned (4)));
 
+struct ipr_hostrcb64_fabric_desc {
+	__be16 length;
+	u8 descriptor_id;
+
+	u8 reserved;
+	u8 path_state;
+
+	u8 reserved2[2];
+	u8 res_path[8];
+	u8 reserved3[6];
+	__be16 num_entries;
+	struct ipr_hostrcb64_config_element elem[1];
+}__attribute__((packed, aligned (8)));
+
 #define for_each_fabric_cfg(fabric, cfg) \
 		for (cfg = (fabric)->elem; \
 			cfg < ((fabric)->elem + be16_to_cpu((fabric)->num_entries)); \
@@ -830,10 +998,17 @@
 	struct ipr_hostrcb_fabric_desc desc[1];
 }__attribute__((packed, aligned (4)));
 
+struct ipr_hostrcb_type_30_error {
+	u8 failure_reason[64];
+	u8 reserved[3];
+	u8 num_entries;
+	struct ipr_hostrcb64_fabric_desc desc[1];
+}__attribute__((packed, aligned (4)));
+
 struct ipr_hostrcb_error {
-	__be32 failing_dev_ioasc;
-	struct ipr_res_addr failing_dev_res_addr;
-	__be32 failing_dev_res_handle;
+	__be32 fd_ioasc;
+	struct ipr_res_addr fd_res_addr;
+	__be32 fd_res_handle;
 	__be32 prc;
 	union {
 		struct ipr_hostrcb_type_ff_error type_ff_error;
@@ -850,6 +1025,26 @@
 	} u;
 }__attribute__((packed, aligned (4)));
 
+struct ipr_hostrcb64_error {
+	__be32 fd_ioasc;
+	__be32 ioa_fw_level;
+	__be32 fd_res_handle;
+	__be32 prc;
+	__be64 fd_dev_id;
+	__be64 fd_lun;
+	u8 fd_res_path[8];
+	__be64 time_stamp;
+	u8 reserved[2];
+	union {
+		struct ipr_hostrcb_type_ff_error type_ff_error;
+		struct ipr_hostrcb_type_12_error type_12_error;
+		struct ipr_hostrcb_type_17_error type_17_error;
+		struct ipr_hostrcb_type_23_error type_23_error;
+		struct ipr_hostrcb_type_24_error type_24_error;
+		struct ipr_hostrcb_type_30_error type_30_error;
+	} u;
+}__attribute__((packed, aligned (8)));
+
 struct ipr_hostrcb_raw {
 	__be32 data[sizeof(struct ipr_hostrcb_error)/sizeof(__be32)];
 }__attribute__((packed, aligned (4)));
@@ -887,7 +1082,11 @@
 #define IPR_HOST_RCB_OVERLAY_ID_16				0x16
 #define IPR_HOST_RCB_OVERLAY_ID_17				0x17
 #define IPR_HOST_RCB_OVERLAY_ID_20				0x20
-#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT			0xFF
+#define IPR_HOST_RCB_OVERLAY_ID_23				0x23
+#define IPR_HOST_RCB_OVERLAY_ID_24				0x24
+#define IPR_HOST_RCB_OVERLAY_ID_26				0x26
+#define IPR_HOST_RCB_OVERLAY_ID_30				0x30
+#define IPR_HOST_RCB_OVERLAY_ID_DEFAULT				0xFF
 
 	u8 reserved1[3];
 	__be32 ilid;
@@ -897,6 +1096,7 @@
 
 	union {
 		struct ipr_hostrcb_error error;
+		struct ipr_hostrcb64_error error64;
 		struct ipr_hostrcb_cfg_ch_not ccn;
 		struct ipr_hostrcb_raw raw;
 	} u;
@@ -907,14 +1107,14 @@
 	dma_addr_t hostrcb_dma;
 	struct list_head queue;
 	struct ipr_ioa_cfg *ioa_cfg;
+	char rp_buffer[IPR_MAX_RES_PATH_LENGTH];
 };
 
 /* IPR smart dump table structures */
 struct ipr_sdt_entry {
-	__be32 bar_str_offset;
-	__be32 end_offset;
-	u8 entry_byte;
-	u8 reserved[3];
+	__be32 start_token;
+	__be32 end_token;
+	u8 reserved[4];
 
 	u8 flags;
 #define IPR_SDT_ENDIAN		0x80
@@ -960,28 +1160,48 @@
 };
 
 struct ipr_resource_entry {
-	struct ipr_config_table_entry cfgte;
 	u8 needs_sync_complete:1;
 	u8 in_erp:1;
 	u8 add_to_ml:1;
 	u8 del_from_ml:1;
 	u8 resetting_device:1;
 
+	u32 bus;		/* AKA channel */
+	u32 target;		/* AKA id */
+	u32 lun;
+#define IPR_ARRAY_VIRTUAL_BUS			0x1
+#define IPR_VSET_VIRTUAL_BUS			0x2
+#define IPR_IOAFP_VIRTUAL_BUS			0x3
+
+#define IPR_GET_RES_PHYS_LOC(res) \
+	(((res)->bus << 24) | ((res)->target << 8) | (res)->lun)
+
+	u8 ata_class;
+
+	u8 flags;
+	__be16 res_flags;
+
+	__be32 type;
+
+	u8 qmodel;
+	struct ipr_std_inq_data std_inq_data;
+
+	__be32 res_handle;
+	__be64 dev_id;
+	struct scsi_lun dev_lun;
+	u8 res_path[8];
+
+	struct ipr_ioa_cfg *ioa_cfg;
 	struct scsi_device *sdev;
 	struct ipr_sata_port *sata_port;
 	struct list_head queue;
-};
+}; /* struct ipr_resource_entry */
 
 struct ipr_resource_hdr {
 	u16 num_entries;
 	u16 reserved;
 };
 
-struct ipr_resource_table {
-	struct ipr_resource_hdr hdr;
-	struct ipr_resource_entry dev[IPR_MAX_PHYSICAL_DEVS];
-};
-
 struct ipr_misc_cbs {
 	struct ipr_ioa_vpd ioa_vpd;
 	struct ipr_inquiry_page0 page0_data;
@@ -994,27 +1214,51 @@
 struct ipr_interrupt_offsets {
 	unsigned long set_interrupt_mask_reg;
 	unsigned long clr_interrupt_mask_reg;
+	unsigned long clr_interrupt_mask_reg32;
 	unsigned long sense_interrupt_mask_reg;
+	unsigned long sense_interrupt_mask_reg32;
 	unsigned long clr_interrupt_reg;
+	unsigned long clr_interrupt_reg32;
 
 	unsigned long sense_interrupt_reg;
+	unsigned long sense_interrupt_reg32;
 	unsigned long ioarrin_reg;
 	unsigned long sense_uproc_interrupt_reg;
+	unsigned long sense_uproc_interrupt_reg32;
 	unsigned long set_uproc_interrupt_reg;
+	unsigned long set_uproc_interrupt_reg32;
 	unsigned long clr_uproc_interrupt_reg;
+	unsigned long clr_uproc_interrupt_reg32;
+
+	unsigned long init_feedback_reg;
+
+	unsigned long dump_addr_reg;
+	unsigned long dump_data_reg;
 };
 
 struct ipr_interrupts {
 	void __iomem *set_interrupt_mask_reg;
 	void __iomem *clr_interrupt_mask_reg;
+	void __iomem *clr_interrupt_mask_reg32;
 	void __iomem *sense_interrupt_mask_reg;
+	void __iomem *sense_interrupt_mask_reg32;
 	void __iomem *clr_interrupt_reg;
+	void __iomem *clr_interrupt_reg32;
 
 	void __iomem *sense_interrupt_reg;
+	void __iomem *sense_interrupt_reg32;
 	void __iomem *ioarrin_reg;
 	void __iomem *sense_uproc_interrupt_reg;
+	void __iomem *sense_uproc_interrupt_reg32;
 	void __iomem *set_uproc_interrupt_reg;
+	void __iomem *set_uproc_interrupt_reg32;
 	void __iomem *clr_uproc_interrupt_reg;
+	void __iomem *clr_uproc_interrupt_reg32;
+
+	void __iomem *init_feedback_reg;
+
+	void __iomem *dump_addr_reg;
+	void __iomem *dump_data_reg;
 };
 
 struct ipr_chip_cfg_t {
@@ -1029,6 +1273,9 @@
 	u16 intr_type;
 #define IPR_USE_LSI			0x00
 #define IPR_USE_MSI			0x01
+	u16 sis_type;
+#define IPR_SIS32			0x00
+#define IPR_SIS64			0x01
 	const struct ipr_chip_cfg_t *cfg;
 };
 
@@ -1073,13 +1320,6 @@
 	DUMP_OBTAINED
 };
 
-enum ipr_cache_state {
-	CACHE_NONE,
-	CACHE_DISABLED,
-	CACHE_ENABLED,
-	CACHE_INVALID
-};
-
 /* Per-controller data */
 struct ipr_ioa_cfg {
 	char eye_catcher[8];
@@ -1099,10 +1339,17 @@
 	u8 dual_raid:1;
 	u8 needs_warm_reset:1;
 	u8 msi_received:1;
+	u8 sis64:1;
 
 	u8 revid;
 
-	enum ipr_cache_state cache_state;
+	/*
+	 * Bitmaps for SIS64 generated target values
+	 */
+	unsigned long *target_ids;
+	unsigned long *array_ids;
+	unsigned long *vset_ids;
+
 	u16 type; /* CCIN of the card */
 
 	u8 log_level;
@@ -1133,8 +1380,13 @@
 
 	char cfg_table_start[8];
 #define IPR_CFG_TBL_START		"cfg"
-	struct ipr_config_table *cfg_table;
+	union {
+		struct ipr_config_table *cfg_table;
+		struct ipr_config_table64 *cfg_table64;
+	} u;
 	dma_addr_t cfg_table_dma;
+	u32 cfg_table_size;
+	u32 max_devs_supported;
 
 	char resource_table_label[8];
 #define IPR_RES_TABLE_LABEL		"res_tbl"
@@ -1202,13 +1454,17 @@
 	char ipr_cmd_label[8];
 #define IPR_CMD_LABEL		"ipr_cmd"
 	struct ipr_cmnd *ipr_cmnd_list[IPR_NUM_CMD_BLKS];
-	u32 ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS];
-};
+	dma_addr_t ipr_cmnd_list_dma[IPR_NUM_CMD_BLKS];
+}; /* struct ipr_ioa_cfg */
 
 struct ipr_cmnd {
 	struct ipr_ioarcb ioarcb;
+	union {
+		struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES];
+		struct ipr_ioadl64_desc ioadl64[IPR_NUM_IOADL_ENTRIES];
+		struct ipr_ata64_ioadl ata_ioadl;
+	} i;
 	struct ipr_ioasa ioasa;
-	struct ipr_ioadl_desc ioadl[IPR_NUM_IOADL_ENTRIES];
 	struct list_head queue;
 	struct scsi_cmnd *scsi_cmd;
 	struct ata_queued_cmd *qc;
@@ -1221,7 +1477,7 @@
 	u8 sense_buffer[SCSI_SENSE_BUFFERSIZE];
 	dma_addr_t sense_buffer_dma;
 	unsigned short dma_use_sg;
-	dma_addr_t dma_handle;
+	dma_addr_t dma_addr;
 	struct ipr_cmnd *sibling;
 	union {
 		enum ipr_shutdown_type shutdown_type;
@@ -1314,8 +1570,6 @@
 	u32 next_page_index;
 	u32 page_offset;
 	u32 format;
-#define IPR_SDT_FMT2		2
-#define IPR_SDT_UNKNOWN		3
 }__attribute__((packed, aligned (4)));
 
 struct ipr_dump {
@@ -1377,6 +1631,13 @@
 #define ipr_info(...) printk(KERN_INFO IPR_NAME ": "__VA_ARGS__)
 #define ipr_dbg(...) IPR_DBG_CMD(printk(KERN_INFO IPR_NAME ": "__VA_ARGS__))
 
+#define ipr_res_printk(level, ioa_cfg, bus, target, lun, fmt, ...) \
+	printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \
+		bus, target, lun, ##__VA_ARGS__)
+
+#define ipr_res_err(ioa_cfg, res, fmt, ...) \
+	ipr_res_printk(KERN_ERR, ioa_cfg, (res)->bus, (res)->target, (res)->lun, fmt, ##__VA_ARGS__)
+
 #define ipr_ra_printk(level, ioa_cfg, ra, fmt, ...) \
 	printk(level IPR_NAME ": %d:%d:%d:%d: " fmt, (ioa_cfg)->host->host_no, \
 		(ra).bus, (ra).target, (ra).lun, ##__VA_ARGS__)
@@ -1384,9 +1645,6 @@
 #define ipr_ra_err(ioa_cfg, ra, fmt, ...) \
 	ipr_ra_printk(KERN_ERR, ioa_cfg, ra, fmt, ##__VA_ARGS__)
 
-#define ipr_res_err(ioa_cfg, res, fmt, ...) \
-	ipr_ra_err(ioa_cfg, (res)->cfgte.res_addr, fmt, ##__VA_ARGS__)
-
 #define ipr_phys_res_err(ioa_cfg, res, fmt, ...)			\
 {									\
 	if ((res).bus >= IPR_MAX_NUM_BUSES) {				\
@@ -1399,14 +1657,21 @@
 }
 
 #define ipr_hcam_err(hostrcb, fmt, ...)					\
-{													\
-	if (ipr_is_device(&(hostrcb)->hcam.u.error.failing_dev_res_addr)) {		\
-		ipr_ra_err((hostrcb)->ioa_cfg,							\
-				(hostrcb)->hcam.u.error.failing_dev_res_addr,			\
-				fmt, ##__VA_ARGS__);							\
-	} else {											\
-		dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, ##__VA_ARGS__);		\
-	}												\
+{									\
+	if (ipr_is_device(hostrcb)) {					\
+		if ((hostrcb)->ioa_cfg->sis64) {			\
+			printk(KERN_ERR IPR_NAME ": %s: " fmt, 		\
+				ipr_format_resource_path(&hostrcb->hcam.u.error64.fd_res_path[0], \
+					&hostrcb->rp_buffer[0]),	\
+				__VA_ARGS__);				\
+		} else {						\
+			ipr_ra_err((hostrcb)->ioa_cfg,			\
+				(hostrcb)->hcam.u.error.fd_res_addr,	\
+				fmt, __VA_ARGS__);			\
+		}							\
+	} else {							\
+		dev_err(&(hostrcb)->ioa_cfg->pdev->dev, fmt, __VA_ARGS__); \
+	}								\
 }
 
 #define ipr_trace ipr_dbg("%s: %s: Line: %d\n",\
@@ -1432,7 +1697,7 @@
  **/
 static inline int ipr_is_ioa_resource(struct ipr_resource_entry *res)
 {
-	return (res->cfgte.flags & IPR_IS_IOA_RESOURCE) ? 1 : 0;
+	return res->type == IPR_RES_TYPE_IOAFP;
 }
 
 /**
@@ -1444,12 +1709,8 @@
  **/
 static inline int ipr_is_af_dasd_device(struct ipr_resource_entry *res)
 {
-	if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) &&
-	    !ipr_is_ioa_resource(res) &&
-	    IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_AF_DASD)
-		return 1;
-	else
-		return 0;
+	return res->type == IPR_RES_TYPE_AF_DASD ||
+		res->type == IPR_RES_TYPE_REMOTE_AF_DASD;
 }
 
 /**
@@ -1461,12 +1722,7 @@
  **/
 static inline int ipr_is_vset_device(struct ipr_resource_entry *res)
 {
-	if (IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data) &&
-	    !ipr_is_ioa_resource(res) &&
-	    IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_VOLUME_SET)
-		return 1;
-	else
-		return 0;
+	return res->type == IPR_RES_TYPE_VOLUME_SET;
 }
 
 /**
@@ -1478,11 +1734,7 @@
  **/
 static inline int ipr_is_gscsi(struct ipr_resource_entry *res)
 {
-	if (!ipr_is_ioa_resource(res) &&
-	    IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_SCSI)
-		return 1;
-	else
-		return 0;
+	return res->type == IPR_RES_TYPE_GENERIC_SCSI;
 }
 
 /**
@@ -1495,7 +1747,7 @@
 static inline int ipr_is_scsi_disk(struct ipr_resource_entry *res)
 {
 	if (ipr_is_af_dasd_device(res) ||
-	    (ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->cfgte.std_inq_data)))
+	    (ipr_is_gscsi(res) && IPR_IS_DASD_DEVICE(res->std_inq_data)))
 		return 1;
 	else
 		return 0;
@@ -1510,11 +1762,7 @@
  **/
 static inline int ipr_is_gata(struct ipr_resource_entry *res)
 {
-	if (!ipr_is_ioa_resource(res) &&
-	    IPR_RES_SUBTYPE(res) == IPR_SUBTYPE_GENERIC_ATA)
-		return 1;
-	else
-		return 0;
+	return res->type == IPR_RES_TYPE_GENERIC_ATA;
 }
 
 /**
@@ -1526,24 +1774,35 @@
  **/
 static inline int ipr_is_naca_model(struct ipr_resource_entry *res)
 {
-	if (ipr_is_gscsi(res) && IPR_QUEUEING_MODEL(res) == IPR_QUEUE_NACA_MODEL)
+	if (ipr_is_gscsi(res) && res->qmodel == IPR_QUEUE_NACA_MODEL)
 		return 1;
 	return 0;
 }
 
 /**
- * ipr_is_device - Determine if resource address is that of a device
- * @res_addr:	resource address struct
+ * ipr_is_device - Determine if the hostrcb structure is related to a device
+ * @hostrcb:	host resource control blocks struct
  *
  * Return value:
  * 	1 if AF / 0 if not AF
  **/
-static inline int ipr_is_device(struct ipr_res_addr *res_addr)
+static inline int ipr_is_device(struct ipr_hostrcb *hostrcb)
 {
-	if ((res_addr->bus < IPR_MAX_NUM_BUSES) &&
-	    (res_addr->target < (IPR_MAX_NUM_TARGETS_PER_BUS - 1)))
-		return 1;
+	struct ipr_res_addr *res_addr;
+	u8 *res_path;
 
+	if (hostrcb->ioa_cfg->sis64) {
+		res_path = &hostrcb->hcam.u.error64.fd_res_path[0];
+		if ((res_path[0] == 0x00 || res_path[0] == 0x80 ||
+		    res_path[0] == 0x81) && res_path[2] != 0xFF)
+			return 1;
+	} else {
+		res_addr = &hostrcb->hcam.u.error.fd_res_addr;
+
+		if ((res_addr->bus < IPR_MAX_NUM_BUSES) &&
+		    (res_addr->target < (IPR_MAX_NUM_TARGETS_PER_BUS - 1)))
+			return 1;
+	}
 	return 0;
 }
 
diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c
index 8a89ba9..249053a 100644
--- a/drivers/scsi/iscsi_tcp.c
+++ b/drivers/scsi/iscsi_tcp.c
@@ -874,7 +874,7 @@
 	.cmd_per_lun		= ISCSI_DEF_CMD_PER_LUN,
 	.eh_abort_handler       = iscsi_eh_abort,
 	.eh_device_reset_handler= iscsi_eh_device_reset,
-	.eh_target_reset_handler= iscsi_eh_target_reset,
+	.eh_target_reset_handler = iscsi_eh_recover_target,
 	.use_clustering         = DISABLE_CLUSTERING,
 	.slave_alloc            = iscsi_sw_tcp_slave_alloc,
 	.slave_configure        = iscsi_sw_tcp_slave_configure,
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 703eb6a..685eaec 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2338,7 +2338,7 @@
  * This function will wait for a relogin, session termination from
  * userspace, or a recovery/replacement timeout.
  */
-static int iscsi_eh_session_reset(struct scsi_cmnd *sc)
+int iscsi_eh_session_reset(struct scsi_cmnd *sc)
 {
 	struct iscsi_cls_session *cls_session;
 	struct iscsi_session *session;
@@ -2389,6 +2389,7 @@
 	mutex_unlock(&session->eh_mutex);
 	return SUCCESS;
 }
+EXPORT_SYMBOL_GPL(iscsi_eh_session_reset);
 
 static void iscsi_prep_tgt_reset_pdu(struct scsi_cmnd *sc, struct iscsi_tm *hdr)
 {
@@ -2403,8 +2404,7 @@
  * iscsi_eh_target_reset - reset target
  * @sc: scsi command
  *
- * This will attempt to send a warm target reset. If that fails
- * then we will drop the session and attempt ERL0 recovery.
+ * This will attempt to send a warm target reset.
  */
 int iscsi_eh_target_reset(struct scsi_cmnd *sc)
 {
@@ -2476,12 +2476,27 @@
 	ISCSI_DBG_EH(session, "tgt %s reset result = %s\n", session->targetname,
 		     rc == SUCCESS ? "SUCCESS" : "FAILED");
 	mutex_unlock(&session->eh_mutex);
+	return rc;
+}
+EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
 
+/**
+ * iscsi_eh_recover_target - reset target and possibly the session
+ * @sc: scsi command
+ *
+ * This will attempt to send a warm target reset. If that fails,
+ * we will escalate to ERL0 session recovery.
+ */
+int iscsi_eh_recover_target(struct scsi_cmnd *sc)
+{
+	int rc;
+
+	rc = iscsi_eh_target_reset(sc);
 	if (rc == FAILED)
 		rc = iscsi_eh_session_reset(sc);
 	return rc;
 }
-EXPORT_SYMBOL_GPL(iscsi_eh_target_reset);
+EXPORT_SYMBOL_GPL(iscsi_eh_recover_target);
 
 /*
  * Pre-allocate a pool of @max items of @item_size. By default, the pool
diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
index 84b6964..565e16d 100644
--- a/drivers/scsi/lpfc/lpfc.h
+++ b/drivers/scsi/lpfc/lpfc.h
@@ -37,6 +37,9 @@
 					   the NameServer  before giving up. */
 #define LPFC_CMD_PER_LUN	3	/* max outstanding cmds per lun */
 #define LPFC_DEFAULT_SG_SEG_CNT 64	/* sg element count per scsi cmnd */
+#define LPFC_DEFAULT_MENLO_SG_SEG_CNT 128	/* sg element count per scsi
+		cmnd for menlo needs nearly twice as for firmware
+		downloads using bsg */
 #define LPFC_DEFAULT_PROT_SG_SEG_CNT 4096 /* sg protection elements count */
 #define LPFC_MAX_SG_SEG_CNT	4096	/* sg element count per scsi cmnd */
 #define LPFC_MAX_PROT_SG_SEG_CNT 4096	/* prot sg element count per scsi cmd*/
@@ -509,7 +512,6 @@
 	int (*lpfc_hba_down_link)
 		(struct lpfc_hba *);
 
-
 	/* SLI4 specific HBA data structure */
 	struct lpfc_sli4_hba sli4_hba;
 
@@ -623,6 +625,9 @@
 	uint32_t cfg_log_verbose;
 	uint32_t cfg_aer_support;
 	uint32_t cfg_suppress_link_up;
+#define LPFC_INITIALIZE_LINK              0	/* do normal init_link mbox */
+#define LPFC_DELAY_INIT_LINK              1	/* layered driver hold off */
+#define LPFC_DELAY_INIT_LINK_INDEFINITELY 2	/* wait, manual intervention */
 
 	lpfc_vpd_t vpd;		/* vital product data */
 
@@ -804,6 +809,9 @@
 	struct list_head ct_ev_waiters;
 	struct unsol_rcv_ct_ctx ct_ctx[64];
 	uint32_t ctx_idx;
+
+	uint8_t menlo_flag;	/* menlo generic flags */
+#define HBA_MENLO_SUPPORT	0x1 /* HBA supports menlo commands */
 };
 
 static inline struct Scsi_Host *
diff --git a/drivers/scsi/lpfc/lpfc_attr.c b/drivers/scsi/lpfc/lpfc_attr.c
index c992e83..64cd17e 100644
--- a/drivers/scsi/lpfc/lpfc_attr.c
+++ b/drivers/scsi/lpfc/lpfc_attr.c
@@ -1939,7 +1939,9 @@
 #            0x2  = never bring up link
 # Default value is 0.
 */
-LPFC_ATTR_R(suppress_link_up, 0, 0, 2, "Suppress Link Up at initialization");
+LPFC_ATTR_R(suppress_link_up, LPFC_INITIALIZE_LINK, LPFC_INITIALIZE_LINK,
+		LPFC_DELAY_INIT_LINK_INDEFINITELY,
+		"Suppress Link Up at initialization");
 
 /*
 # lpfc_nodev_tmo: If set, it will hold all I/O errors on devices that disappear
@@ -1966,8 +1968,7 @@
 {
 	struct Scsi_Host  *shost = class_to_shost(dev);
 	struct lpfc_vport *vport = (struct lpfc_vport *) shost->hostdata;
-	int val = 0;
-	val = vport->cfg_devloss_tmo;
+
 	return snprintf(buf, PAGE_SIZE, "%d\n",	vport->cfg_devloss_tmo);
 }
 
diff --git a/drivers/scsi/lpfc/lpfc_bsg.c b/drivers/scsi/lpfc/lpfc_bsg.c
index f3f1bf1..692c29f 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.c
+++ b/drivers/scsi/lpfc/lpfc_bsg.c
@@ -83,15 +83,28 @@
 	struct fc_bsg_job *set_job;
 };
 
+#define MENLO_DID 0x0000FC0E
+
+struct lpfc_bsg_menlo {
+	struct lpfc_iocbq *cmdiocbq;
+	struct lpfc_iocbq *rspiocbq;
+	struct lpfc_dmabuf *bmp;
+
+	/* job waiting for this iocb to finish */
+	struct fc_bsg_job *set_job;
+};
+
 #define TYPE_EVT 	1
 #define TYPE_IOCB	2
 #define TYPE_MBOX	3
+#define TYPE_MENLO	4
 struct bsg_job_data {
 	uint32_t type;
 	union {
 		struct lpfc_bsg_event *evt;
 		struct lpfc_bsg_iocb iocb;
 		struct lpfc_bsg_mbox mbox;
+		struct lpfc_bsg_menlo menlo;
 	} context_un;
 };
 
@@ -2456,6 +2469,18 @@
 	case MBX_PORT_IOV_CONTROL:
 		break;
 	case MBX_SET_VARIABLE:
+		lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+			"1226 mbox: set_variable 0x%x, 0x%x\n",
+			mb->un.varWords[0],
+			mb->un.varWords[1]);
+		if ((mb->un.varWords[0] == SETVAR_MLOMNT)
+			&& (mb->un.varWords[1] == 1)) {
+			phba->wait_4_mlo_maint_flg = 1;
+		} else if (mb->un.varWords[0] == SETVAR_MLORST) {
+			phba->link_flag &= ~LS_LOOPBACK_MODE;
+			phba->fc_topology = TOPOLOGY_PT_PT;
+		}
+		break;
 	case MBX_RUN_BIU_DIAG64:
 	case MBX_READ_EVENT_LOG:
 	case MBX_READ_SPARM64:
@@ -2638,6 +2663,297 @@
 }
 
 /**
+ * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
+ * @phba: Pointer to HBA context object.
+ * @cmdiocbq: Pointer to command iocb.
+ * @rspiocbq: Pointer to response iocb.
+ *
+ * This function is the completion handler for iocbs issued using
+ * lpfc_menlo_cmd function. This function is called by the
+ * ring event handler function without any lock held. This function
+ * can be called from both worker thread context and interrupt
+ * context. This function also can be called from another thread which
+ * cleans up the SLI layer objects.
+ * This function copies the contents of the response iocb to the
+ * response iocb memory object provided by the caller of
+ * lpfc_sli_issue_iocb_wait and then wakes up the thread which
+ * sleeps for the iocb completion.
+ **/
+static void
+lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
+			struct lpfc_iocbq *cmdiocbq,
+			struct lpfc_iocbq *rspiocbq)
+{
+	struct bsg_job_data *dd_data;
+	struct fc_bsg_job *job;
+	IOCB_t *rsp;
+	struct lpfc_dmabuf *bmp;
+	struct lpfc_bsg_menlo *menlo;
+	unsigned long flags;
+	struct menlo_response *menlo_resp;
+	int rc = 0;
+
+	spin_lock_irqsave(&phba->ct_ev_lock, flags);
+	dd_data = cmdiocbq->context1;
+	if (!dd_data) {
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		return;
+	}
+
+	menlo = &dd_data->context_un.menlo;
+	job = menlo->set_job;
+	job->dd_data = NULL; /* so timeout handler does not reply */
+
+	spin_lock_irqsave(&phba->hbalock, flags);
+	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
+	if (cmdiocbq->context2 && rspiocbq)
+		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
+		       &rspiocbq->iocb, sizeof(IOCB_t));
+	spin_unlock_irqrestore(&phba->hbalock, flags);
+
+	bmp = menlo->bmp;
+	rspiocbq = menlo->rspiocbq;
+	rsp = &rspiocbq->iocb;
+
+	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+	/* always return the xri, this would be used in the case
+	 * of a menlo download to allow the data to be sent as a continuation
+	 * of the exchange.
+	 */
+	menlo_resp = (struct menlo_response *)
+		job->reply->reply_data.vendor_reply.vendor_rsp;
+	menlo_resp->xri = rsp->ulpContext;
+	if (rsp->ulpStatus) {
+		if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
+			switch (rsp->un.ulpWord[4] & 0xff) {
+			case IOERR_SEQUENCE_TIMEOUT:
+				rc = -ETIMEDOUT;
+				break;
+			case IOERR_INVALID_RPI:
+				rc = -EFAULT;
+				break;
+			default:
+				rc = -EACCES;
+				break;
+			}
+		} else
+			rc = -EACCES;
+	} else
+		job->reply->reply_payload_rcv_len =
+			rsp->un.genreq64.bdl.bdeSize;
+
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+	lpfc_sli_release_iocbq(phba, rspiocbq);
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+	kfree(bmp);
+	kfree(dd_data);
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	/* complete the job back to userspace */
+	job->job_done(job);
+	spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+	return;
+}
+
+/**
+ * lpfc_menlo_cmd - send an ioctl for menlo hardware
+ * @job: fc_bsg_job to handle
+ *
+ * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
+ * all the command completions will return the xri for the command.
+ * For menlo data requests a gen request 64 CX is used to continue the exchange
+ * supplied in the menlo request header xri field.
+ **/
+static int
+lpfc_menlo_cmd(struct fc_bsg_job *job)
+{
+	struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
+	struct lpfc_hba *phba = vport->phba;
+	struct lpfc_iocbq *cmdiocbq, *rspiocbq;
+	IOCB_t *cmd, *rsp;
+	int rc = 0;
+	struct menlo_command *menlo_cmd;
+	struct menlo_response *menlo_resp;
+	struct lpfc_dmabuf *bmp = NULL;
+	int request_nseg;
+	int reply_nseg;
+	struct scatterlist *sgel = NULL;
+	int numbde;
+	dma_addr_t busaddr;
+	struct bsg_job_data *dd_data;
+	struct ulp_bde64 *bpl = NULL;
+
+	/* in case no data is returned return just the return code */
+	job->reply->reply_payload_rcv_len = 0;
+
+	if (job->request_len <
+	    sizeof(struct fc_bsg_request) +
+		sizeof(struct menlo_command)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2784 Received MENLO_CMD request below "
+				"minimum size\n");
+		rc = -ERANGE;
+		goto no_dd_data;
+	}
+
+	if (job->reply_len <
+	    sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2785 Received MENLO_CMD reply below "
+				"minimum size\n");
+		rc = -ERANGE;
+		goto no_dd_data;
+	}
+
+	if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2786 Adapter does not support menlo "
+				"commands\n");
+		rc = -EPERM;
+		goto no_dd_data;
+	}
+
+	menlo_cmd = (struct menlo_command *)
+		job->request->rqst_data.h_vendor.vendor_cmd;
+
+	menlo_resp = (struct menlo_response *)
+		job->reply->reply_data.vendor_reply.vendor_rsp;
+
+	/* allocate our bsg tracking structure */
+	dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
+	if (!dd_data) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
+				"2787 Failed allocation of dd_data\n");
+		rc = -ENOMEM;
+		goto no_dd_data;
+	}
+
+	bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
+	if (!bmp) {
+		rc = -ENOMEM;
+		goto free_dd;
+	}
+
+	cmdiocbq = lpfc_sli_get_iocbq(phba);
+	if (!cmdiocbq) {
+		rc = -ENOMEM;
+		goto free_bmp;
+	}
+
+	rspiocbq = lpfc_sli_get_iocbq(phba);
+	if (!rspiocbq) {
+		rc = -ENOMEM;
+		goto free_cmdiocbq;
+	}
+
+	rsp = &rspiocbq->iocb;
+
+	bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
+	if (!bmp->virt) {
+		rc = -ENOMEM;
+		goto free_rspiocbq;
+	}
+
+	INIT_LIST_HEAD(&bmp->list);
+	bpl = (struct ulp_bde64 *) bmp->virt;
+	request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
+				  job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
+		busaddr = sg_dma_address(sgel);
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
+		bpl->tus.f.bdeSize = sg_dma_len(sgel);
+		bpl->tus.w = cpu_to_le32(bpl->tus.w);
+		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+		bpl++;
+	}
+
+	reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
+				job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+	for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
+		busaddr = sg_dma_address(sgel);
+		bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
+		bpl->tus.f.bdeSize = sg_dma_len(sgel);
+		bpl->tus.w = cpu_to_le32(bpl->tus.w);
+		bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
+		bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
+		bpl++;
+	}
+
+	cmd = &cmdiocbq->iocb;
+	cmd->un.genreq64.bdl.ulpIoTag32 = 0;
+	cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
+	cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
+	cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
+	cmd->un.genreq64.bdl.bdeSize =
+	    (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
+	cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
+	cmd->un.genreq64.w5.hcsw.Dfctl = 0;
+	cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
+	cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
+	cmd->ulpBdeCount = 1;
+	cmd->ulpClass = CLASS3;
+	cmd->ulpOwner = OWN_CHIP;
+	cmd->ulpLe = 1; /* Limited Edition */
+	cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
+	cmdiocbq->vport = phba->pport;
+	/* We want the firmware to timeout before we do */
+	cmd->ulpTimeout = MENLO_TIMEOUT - 5;
+	cmdiocbq->context3 = bmp;
+	cmdiocbq->context2 = rspiocbq;
+	cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
+	cmdiocbq->context1 = dd_data;
+	cmdiocbq->context2 = rspiocbq;
+	if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
+		cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
+		cmd->ulpPU = MENLO_PU; /* 3 */
+		cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
+		cmd->ulpContext = MENLO_CONTEXT; /* 0 */
+	} else {
+		cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
+		cmd->ulpPU = 1;
+		cmd->un.ulpWord[4] = 0;
+		cmd->ulpContext = menlo_cmd->xri;
+	}
+
+	dd_data->type = TYPE_MENLO;
+	dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
+	dd_data->context_un.menlo.rspiocbq = rspiocbq;
+	dd_data->context_un.menlo.set_job = job;
+	dd_data->context_un.menlo.bmp = bmp;
+
+	rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
+		MENLO_TIMEOUT - 5);
+	if (rc == IOCB_SUCCESS)
+		return 0; /* done for now */
+
+	/* iocb failed so cleanup */
+	pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
+		     job->request_payload.sg_cnt, DMA_TO_DEVICE);
+	pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
+		     job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
+
+	lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
+
+free_rspiocbq:
+	lpfc_sli_release_iocbq(phba, rspiocbq);
+free_cmdiocbq:
+	lpfc_sli_release_iocbq(phba, cmdiocbq);
+free_bmp:
+	kfree(bmp);
+free_dd:
+	kfree(dd_data);
+no_dd_data:
+	/* make error code available to userspace */
+	job->reply->result = rc;
+	job->dd_data = NULL;
+	return rc;
+}
+/**
  * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
  * @job: fc_bsg_job to handle
  **/
@@ -2669,6 +2985,10 @@
 	case LPFC_BSG_VENDOR_MBOX:
 		rc = lpfc_bsg_mbox_cmd(job);
 		break;
+	case LPFC_BSG_VENDOR_MENLO_CMD:
+	case LPFC_BSG_VENDOR_MENLO_DATA:
+		rc = lpfc_menlo_cmd(job);
+		break;
 	default:
 		rc = -EINVAL;
 		job->reply->reply_payload_rcv_len = 0;
@@ -2728,6 +3048,7 @@
 	struct lpfc_bsg_event *evt;
 	struct lpfc_bsg_iocb *iocb;
 	struct lpfc_bsg_mbox *mbox;
+	struct lpfc_bsg_menlo *menlo;
 	struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
 	struct bsg_job_data *dd_data;
 	unsigned long flags;
@@ -2775,6 +3096,17 @@
 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 		job->job_done(job);
 		break;
+	case TYPE_MENLO:
+		menlo = &dd_data->context_un.menlo;
+		cmdiocb = menlo->cmdiocbq;
+		/* hint to completion handler that the job timed out */
+		job->reply->result = -EAGAIN;
+		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
+		/* this will call our completion handler */
+		spin_lock_irq(&phba->hbalock);
+		lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
+		spin_unlock_irq(&phba->hbalock);
+		break;
 	default:
 		spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
 		break;
diff --git a/drivers/scsi/lpfc/lpfc_bsg.h b/drivers/scsi/lpfc/lpfc_bsg.h
index 6c8f87e..5bc6308 100644
--- a/drivers/scsi/lpfc/lpfc_bsg.h
+++ b/drivers/scsi/lpfc/lpfc_bsg.h
@@ -31,6 +31,8 @@
 #define LPFC_BSG_VENDOR_DIAG_TEST	5
 #define LPFC_BSG_VENDOR_GET_MGMT_REV	6
 #define LPFC_BSG_VENDOR_MBOX		7
+#define LPFC_BSG_VENDOR_MENLO_CMD	8
+#define LPFC_BSG_VENDOR_MENLO_DATA	9
 
 struct set_ct_event {
 	uint32_t command;
@@ -96,3 +98,13 @@
 	uint8_t mbOffset;
 };
 
+/* Used for menlo command or menlo data. The xri is only used for menlo data */
+struct menlo_command {
+	uint32_t cmd;
+	uint32_t xri;
+};
+
+struct menlo_response {
+	uint32_t xri; /* return the xri of the iocb exchange */
+};
+
diff --git a/drivers/scsi/lpfc/lpfc_crtn.h b/drivers/scsi/lpfc/lpfc_crtn.h
index 6f0fb51..5087c42 100644
--- a/drivers/scsi/lpfc/lpfc_crtn.h
+++ b/drivers/scsi/lpfc/lpfc_crtn.h
@@ -63,6 +63,7 @@
 void lpfc_port_link_failure(struct lpfc_vport *);
 void lpfc_mbx_cmpl_read_la(struct lpfc_hba *, LPFC_MBOXQ_t *);
 void lpfc_init_vpi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *);
 void lpfc_retry_pport_discovery(struct lpfc_hba *);
 
 void lpfc_mbx_cmpl_reg_login(struct lpfc_hba *, LPFC_MBOXQ_t *);
@@ -221,6 +222,10 @@
 void lpfc_unregister_unused_fcf(struct lpfc_hba *);
 int lpfc_sli4_redisc_fcf_table(struct lpfc_hba *);
 void lpfc_fcf_redisc_wait_start_timer(struct lpfc_hba *);
+void lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *);
+uint16_t lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *);
+int lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *, uint16_t);
+void lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *, uint16_t);
 
 int lpfc_mem_alloc(struct lpfc_hba *, int align);
 void lpfc_mem_free(struct lpfc_hba *);
@@ -385,7 +390,7 @@
 int lpfc_parse_vpd(struct lpfc_hba *, uint8_t *, int);
 void lpfc_start_fdiscs(struct lpfc_hba *phba);
 struct lpfc_vport *lpfc_find_vport_by_vpid(struct lpfc_hba *, uint16_t);
-
+struct lpfc_sglq *__lpfc_get_active_sglq(struct lpfc_hba *, uint16_t);
 #define ScsiResult(host_code, scsi_code) (((host_code) << 16) | scsi_code)
 #define HBA_EVENT_RSCN                   5
 #define HBA_EVENT_LINK_UP                2
diff --git a/drivers/scsi/lpfc/lpfc_els.c b/drivers/scsi/lpfc/lpfc_els.c
index 2a40a6e..ee980bd 100644
--- a/drivers/scsi/lpfc/lpfc_els.c
+++ b/drivers/scsi/lpfc/lpfc_els.c
@@ -771,6 +771,7 @@
 	struct lpfc_nodelist *ndlp = cmdiocb->context1;
 	struct lpfc_dmabuf *pcmd = cmdiocb->context2, *prsp;
 	struct serv_parm *sp;
+	uint16_t fcf_index;
 	int rc;
 
 	/* Check to see if link went down during discovery */
@@ -788,6 +789,54 @@
 		vport->port_state);
 
 	if (irsp->ulpStatus) {
+		/*
+		 * In case of FIP mode, perform round robin FCF failover
+		 * due to new FCF discovery
+		 */
+		if ((phba->hba_flag & HBA_FIP_SUPPORT) &&
+		    (phba->fcf.fcf_flag & FCF_DISCOVERY)) {
+			lpfc_printf_log(phba, KERN_WARNING, LOG_FIP | LOG_ELS,
+					"2611 FLOGI failed on registered "
+					"FCF record fcf_index:%d, trying "
+					"to perform round robin failover\n",
+					phba->fcf.current_rec.fcf_indx);
+			fcf_index = lpfc_sli4_fcf_rr_next_index_get(phba);
+			if (fcf_index == LPFC_FCOE_FCF_NEXT_NONE) {
+				/*
+				 * Exhausted the eligible FCF record list,
+				 * fail through to retry FLOGI on current
+				 * FCF record.
+				 */
+				lpfc_printf_log(phba, KERN_WARNING,
+						LOG_FIP | LOG_ELS,
+						"2760 FLOGI exhausted FCF "
+						"round robin failover list, "
+						"retry FLOGI on the current "
+						"registered FCF index:%d\n",
+						phba->fcf.current_rec.fcf_indx);
+				spin_lock_irq(&phba->hbalock);
+				phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+				spin_unlock_irq(&phba->hbalock);
+			} else {
+				rc = lpfc_sli4_fcf_rr_read_fcf_rec(phba,
+								   fcf_index);
+				if (rc) {
+					lpfc_printf_log(phba, KERN_WARNING,
+							LOG_FIP | LOG_ELS,
+							"2761 FLOGI round "
+							"robin FCF failover "
+							"read FCF failed "
+							"rc:x%x, fcf_index:"
+							"%d\n", rc,
+						phba->fcf.current_rec.fcf_indx);
+					spin_lock_irq(&phba->hbalock);
+					phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+					spin_unlock_irq(&phba->hbalock);
+				} else
+					goto out;
+			}
+		}
+
 		/* Check for retry */
 		if (lpfc_els_retry(phba, cmdiocb, rspiocb))
 			goto out;
@@ -806,9 +855,8 @@
 		}
 
 		/* FLOGI failure */
-		lpfc_printf_vlog(vport, KERN_INFO, LOG_ELS,
-				 "0100 FLOGI failure Data: x%x x%x "
-				 "x%x\n",
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "0100 FLOGI failure Status:x%x/x%x TMO:x%x\n",
 				 irsp->ulpStatus, irsp->un.ulpWord[4],
 				 irsp->ulpTimeout);
 		goto flogifail;
@@ -842,8 +890,18 @@
 		else
 			rc = lpfc_cmpl_els_flogi_nport(vport, ndlp, sp);
 
-		if (!rc)
+		if (!rc) {
+			/* Mark the FCF discovery process done */
+			lpfc_printf_vlog(vport, KERN_INFO, LOG_FIP | LOG_ELS,
+					 "2769 FLOGI successful on FCF record: "
+					 "current_fcf_index:x%x, terminate FCF "
+					 "round robin failover process\n",
+					 phba->fcf.current_rec.fcf_indx);
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+			spin_unlock_irq(&phba->hbalock);
 			goto out;
+		}
 	}
 
 flogifail:
@@ -1409,6 +1467,10 @@
 			goto out;
 		}
 		/* PLOGI failed */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2753 PLOGI failure DID:%06X Status:x%x/x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
 		if (lpfc_error_lost_link(irsp))
 			rc = NLP_STE_FREED_NODE;
@@ -1577,6 +1639,10 @@
 			goto out;
 		}
 		/* PRLI failed */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2754 PRLI failure DID:%06X Status:x%x/x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
 		if (lpfc_error_lost_link(irsp))
 			goto out;
@@ -1860,6 +1926,10 @@
 			goto out;
 		}
 		/* ADISC failed */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2755 ADISC failure DID:%06X Status:x%x/x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
 		if (!lpfc_error_lost_link(irsp))
 			lpfc_disc_state_machine(vport, ndlp, cmdiocb,
@@ -2009,6 +2079,10 @@
 			/* ELS command is being retried */
 			goto out;
 		/* LOGO failed */
+		lpfc_printf_vlog(vport, KERN_ERR, LOG_ELS,
+				 "2756 LOGO failure DID:%06X Status:x%x/x%x\n",
+				 ndlp->nlp_DID, irsp->ulpStatus,
+				 irsp->un.ulpWord[4]);
 		/* Do not call DSM for lpfc_els_abort'ed ELS cmds */
 		if (lpfc_error_lost_link(irsp))
 			goto out;
@@ -5989,7 +6063,12 @@
 			if (phba->sli_rev < LPFC_SLI_REV4)
 				lpfc_issue_fabric_reglogin(vport);
 			else {
-				lpfc_start_fdiscs(phba);
+				/*
+				 * If the physical port is instantiated using
+				 * FDISC, do not start vport discovery.
+				 */
+				if (vport->port_state != LPFC_FDISC)
+					lpfc_start_fdiscs(phba);
 				lpfc_do_scr_ns_plogi(phba, vport);
 			}
 		} else
@@ -6055,21 +6134,18 @@
 }
 
 /**
- * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
+ * lpfc_cancel_all_vport_retry_delay_timer - Cancel all vport retry delay timer
  * @phba: pointer to lpfc hba data structure.
  *
- * This routine abort all pending discovery commands and
- * start a timer to retry FLOGI for the physical port
- * discovery.
+ * This routine cancels the retry delay timers to all the vports.
  **/
 void
-lpfc_retry_pport_discovery(struct lpfc_hba *phba)
+lpfc_cancel_all_vport_retry_delay_timer(struct lpfc_hba *phba)
 {
 	struct lpfc_vport **vports;
 	struct lpfc_nodelist *ndlp;
-	struct Scsi_Host  *shost;
-	int i;
 	uint32_t link_state;
+	int i;
 
 	/* Treat this failure as linkdown for all vports */
 	link_state = phba->link_state;
@@ -6087,13 +6163,30 @@
 		}
 		lpfc_destroy_vport_work_array(phba, vports);
 	}
+}
+
+/**
+ * lpfc_retry_pport_discovery - Start timer to retry FLOGI.
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine abort all pending discovery commands and
+ * start a timer to retry FLOGI for the physical port
+ * discovery.
+ **/
+void
+lpfc_retry_pport_discovery(struct lpfc_hba *phba)
+{
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host  *shost;
+
+	/* Cancel the all vports retry delay retry timers */
+	lpfc_cancel_all_vport_retry_delay_timer(phba);
 
 	/* If fabric require FLOGI, then re-instantiate physical login */
 	ndlp = lpfc_findnode_did(phba->pport, Fabric_DID);
 	if (!ndlp)
 		return;
 
-
 	shost = lpfc_shost_from_vport(phba->pport);
 	mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
 	spin_lock_irq(shost->host_lock);
@@ -6219,7 +6312,8 @@
 		lpfc_mbx_unreg_vpi(vport);
 		spin_lock_irq(shost->host_lock);
 		vport->fc_flag |= FC_VPORT_NEEDS_REG_VPI;
-		vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
+		if (phba->sli_rev == LPFC_SLI_REV4)
+			vport->fc_flag |= FC_VPORT_NEEDS_INIT_VPI;
 		spin_unlock_irq(shost->host_lock);
 	}
 
@@ -6797,21 +6891,27 @@
 	struct lpfc_sglq *sglq_entry = NULL, *sglq_next = NULL;
 	unsigned long iflag = 0;
 
-	spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock, iflag);
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
 	list_for_each_entry_safe(sglq_entry, sglq_next,
 			&phba->sli4_hba.lpfc_abts_els_sgl_list, list) {
 		if (sglq_entry->sli4_xritag == xri) {
 			list_del(&sglq_entry->list);
-			spin_unlock_irqrestore(
-					&phba->sli4_hba.abts_sgl_list_lock,
-					 iflag);
-			spin_lock_irqsave(&phba->hbalock, iflag);
-
 			list_add_tail(&sglq_entry->list,
 				&phba->sli4_hba.lpfc_sgl_list);
+			sglq_entry->state = SGL_FREED;
+			spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
 			spin_unlock_irqrestore(&phba->hbalock, iflag);
 			return;
 		}
 	}
-	spin_unlock_irqrestore(&phba->sli4_hba.abts_sgl_list_lock, iflag);
+	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
+	sglq_entry = __lpfc_get_active_sglq(phba, xri);
+	if (!sglq_entry || (sglq_entry->sli4_xritag != xri)) {
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return;
+	}
+	sglq_entry->state = SGL_XRI_ABORTED;
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
+	return;
 }
diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c
index 2359d0b..c555e3b 100644
--- a/drivers/scsi/lpfc/lpfc_hbadisc.c
+++ b/drivers/scsi/lpfc/lpfc_hbadisc.c
@@ -1481,8 +1481,6 @@
 int
 lpfc_check_pending_fcoe_event(struct lpfc_hba *phba, uint8_t unreg_fcf)
 {
-	LPFC_MBOXQ_t *mbox;
-	int rc;
 	/*
 	 * If the Link is up and no FCoE events while in the
 	 * FCF discovery, no need to restart FCF discovery.
@@ -1491,86 +1489,70 @@
 		(phba->fcoe_eventtag == phba->fcoe_eventtag_at_fcf_scan))
 		return 0;
 
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2768 Pending link or FCF event during current "
+			"handling of the previous event: link_state:x%x, "
+			"evt_tag_at_scan:x%x, evt_tag_current:x%x\n",
+			phba->link_state, phba->fcoe_eventtag_at_fcf_scan,
+			phba->fcoe_eventtag);
+
 	spin_lock_irq(&phba->hbalock);
 	phba->fcf.fcf_flag &= ~FCF_AVAILABLE;
 	spin_unlock_irq(&phba->hbalock);
 
-	if (phba->link_state >= LPFC_LINK_UP)
-		lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
-	else {
+	if (phba->link_state >= LPFC_LINK_UP) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+				"2780 Restart FCF table scan due to "
+				"pending FCF event:evt_tag_at_scan:x%x, "
+				"evt_tag_current:x%x\n",
+				phba->fcoe_eventtag_at_fcf_scan,
+				phba->fcoe_eventtag);
+		lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
+	} else {
 		/*
 		 * Do not continue FCF discovery and clear FCF_DISC_INPROGRESS
 		 * flag
 		 */
 		spin_lock_irq(&phba->hbalock);
 		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
-		phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
+		phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV | FCF_DISCOVERY);
 		spin_unlock_irq(&phba->hbalock);
 	}
 
+	/* Unregister the currently registered FCF if required */
 	if (unreg_fcf) {
 		spin_lock_irq(&phba->hbalock);
 		phba->fcf.fcf_flag &= ~FCF_REGISTERED;
 		spin_unlock_irq(&phba->hbalock);
-		mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-		if (!mbox) {
-			lpfc_printf_log(phba, KERN_ERR,
-				LOG_DISCOVERY|LOG_MBOX,
-				"2610 UNREG_FCFI mbox allocation failed\n");
-			return 1;
-		}
-		lpfc_unreg_fcfi(mbox, phba->fcf.fcfi);
-		mbox->vport = phba->pport;
-		mbox->mbox_cmpl = lpfc_unregister_fcfi_cmpl;
-		rc = lpfc_sli_issue_mbox(phba, mbox, MBX_NOWAIT);
-		if (rc == MBX_NOT_FINISHED) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
-				"2611 UNREG_FCFI issue mbox failed\n");
-			mempool_free(mbox, phba->mbox_mem_pool);
-		}
+		lpfc_sli4_unregister_fcf(phba);
 	}
-
 	return 1;
 }
 
 /**
- * lpfc_mbx_cmpl_read_fcf_record - Completion handler for read_fcf mbox.
+ * lpfc_sli4_fcf_rec_mbox_parse - parse non-embedded fcf record mailbox command
  * @phba: pointer to lpfc hba data structure.
  * @mboxq: pointer to mailbox object.
+ * @next_fcf_index: pointer to holder of next fcf index.
  *
- * This function iterate through all the fcf records available in
- * HBA and choose the optimal FCF record for discovery. After finding
- * the FCF for discovery it register the FCF record and kick start
- * discovery.
- * If FCF_IN_USE flag is set in currently used FCF, the routine try to
- * use a FCF record which match fabric name and mac address of the
- * currently used FCF record.
- * If the driver support only one FCF, it will try to use the FCF record
- * used by BOOT_BIOS.
+ * This routine parses the non-embedded fcf mailbox command by performing the
+ * necessarily error checking, non-embedded read FCF record mailbox command
+ * SGE parsing, and endianness swapping.
+ *
+ * Returns the pointer to the new FCF record in the non-embedded mailbox
+ * command DMA memory if successfully, other NULL.
  */
-void
-lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+static struct fcf_record *
+lpfc_sli4_fcf_rec_mbox_parse(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq,
+			     uint16_t *next_fcf_index)
 {
 	void *virt_addr;
 	dma_addr_t phys_addr;
-	uint8_t *bytep;
 	struct lpfc_mbx_sge sge;
 	struct lpfc_mbx_read_fcf_tbl *read_fcf;
 	uint32_t shdr_status, shdr_add_status;
 	union lpfc_sli4_cfg_shdr *shdr;
 	struct fcf_record *new_fcf_record;
-	uint32_t boot_flag, addr_mode;
-	uint32_t next_fcf_index;
-	struct lpfc_fcf_rec *fcf_rec = NULL;
-	unsigned long iflags;
-	uint16_t vlan_id;
-	int rc;
-
-	/* If there is pending FCoE event restart FCF table scan */
-	if (lpfc_check_pending_fcoe_event(phba, 0)) {
-		lpfc_sli4_mbox_cmd_free(phba, mboxq);
-		return;
-	}
 
 	/* Get the first SGE entry from the non-embedded DMA memory. This
 	 * routine only uses a single SGE.
@@ -1581,59 +1563,183 @@
 		lpfc_printf_log(phba, KERN_ERR, LOG_MBOX,
 				"2524 Failed to get the non-embedded SGE "
 				"virtual address\n");
-		goto out;
+		return NULL;
 	}
 	virt_addr = mboxq->sge_array->addr[0];
 
 	shdr = (union lpfc_sli4_cfg_shdr *)virt_addr;
 	shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
-	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
-				 &shdr->response);
-	/*
-	 * The FCF Record was read and there is no reason for the driver
-	 * to maintain the FCF record data or memory. Instead, just need
-	 * to book keeping the FCFIs can be used.
-	 */
+	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
 	if (shdr_status || shdr_add_status) {
-		if (shdr_status == STATUS_FCF_TABLE_EMPTY) {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+		if (shdr_status == STATUS_FCF_TABLE_EMPTY)
+			lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
 					"2726 READ_FCF_RECORD Indicates empty "
 					"FCF table.\n");
-		} else {
-			lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+		else
+			lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
 					"2521 READ_FCF_RECORD mailbox failed "
-					"with status x%x add_status x%x, mbx\n",
-					shdr_status, shdr_add_status);
-		}
-		goto out;
+					"with status x%x add_status x%x, "
+					"mbx\n", shdr_status, shdr_add_status);
+		return NULL;
 	}
-	/* Interpreting the returned information of FCF records */
+
+	/* Interpreting the returned information of the FCF record */
 	read_fcf = (struct lpfc_mbx_read_fcf_tbl *)virt_addr;
 	lpfc_sli_pcimem_bcopy(read_fcf, read_fcf,
 			      sizeof(struct lpfc_mbx_read_fcf_tbl));
-	next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
-
+	*next_fcf_index = bf_get(lpfc_mbx_read_fcf_tbl_nxt_vindx, read_fcf);
 	new_fcf_record = (struct fcf_record *)(virt_addr +
 			  sizeof(struct lpfc_mbx_read_fcf_tbl));
 	lpfc_sli_pcimem_bcopy(new_fcf_record, new_fcf_record,
 			      sizeof(struct fcf_record));
-	bytep = virt_addr + sizeof(union lpfc_sli4_cfg_shdr);
 
+	return new_fcf_record;
+}
+
+/**
+ * lpfc_sli4_log_fcf_record_info - Log the information of a fcf record
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_record: pointer to the fcf record.
+ * @vlan_id: the lowest vlan identifier associated to this fcf record.
+ * @next_fcf_index: the index to the next fcf record in hba's fcf table.
+ *
+ * This routine logs the detailed FCF record if the LOG_FIP loggin is
+ * enabled.
+ **/
+static void
+lpfc_sli4_log_fcf_record_info(struct lpfc_hba *phba,
+			      struct fcf_record *fcf_record,
+			      uint16_t vlan_id,
+			      uint16_t next_fcf_index)
+{
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2764 READ_FCF_RECORD:\n"
+			"\tFCF_Index     : x%x\n"
+			"\tFCF_Avail     : x%x\n"
+			"\tFCF_Valid     : x%x\n"
+			"\tFIP_Priority  : x%x\n"
+			"\tMAC_Provider  : x%x\n"
+			"\tLowest VLANID : x%x\n"
+			"\tFCF_MAC Addr  : x%x:%x:%x:%x:%x:%x\n"
+			"\tFabric_Name   : x%x:%x:%x:%x:%x:%x:%x:%x\n"
+			"\tSwitch_Name   : x%x:%x:%x:%x:%x:%x:%x:%x\n"
+			"\tNext_FCF_Index: x%x\n",
+			bf_get(lpfc_fcf_record_fcf_index, fcf_record),
+			bf_get(lpfc_fcf_record_fcf_avail, fcf_record),
+			bf_get(lpfc_fcf_record_fcf_valid, fcf_record),
+			fcf_record->fip_priority,
+			bf_get(lpfc_fcf_record_mac_addr_prov, fcf_record),
+			vlan_id,
+			bf_get(lpfc_fcf_record_mac_0, fcf_record),
+			bf_get(lpfc_fcf_record_mac_1, fcf_record),
+			bf_get(lpfc_fcf_record_mac_2, fcf_record),
+			bf_get(lpfc_fcf_record_mac_3, fcf_record),
+			bf_get(lpfc_fcf_record_mac_4, fcf_record),
+			bf_get(lpfc_fcf_record_mac_5, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_0, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_1, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_2, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_3, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_4, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_5, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_6, fcf_record),
+			bf_get(lpfc_fcf_record_fab_name_7, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_0, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_1, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_2, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_3, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_4, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_5, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_6, fcf_record),
+			bf_get(lpfc_fcf_record_switch_name_7, fcf_record),
+			next_fcf_index);
+}
+
+/**
+ * lpfc_mbx_cmpl_fcf_scan_read_fcf_rec - fcf scan read_fcf mbox cmpl handler.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This function iterates through all the fcf records available in
+ * HBA and chooses the optimal FCF record for discovery. After finding
+ * the FCF for discovery it registers the FCF record and kicks start
+ * discovery.
+ * If FCF_IN_USE flag is set in currently used FCF, the routine tries to
+ * use an FCF record which matches fabric name and mac address of the
+ * currently used FCF record.
+ * If the driver supports only one FCF, it will try to use the FCF record
+ * used by BOOT_BIOS.
+ */
+void
+lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct fcf_record *new_fcf_record;
+	uint32_t boot_flag, addr_mode;
+	uint16_t fcf_index, next_fcf_index;
+	struct lpfc_fcf_rec *fcf_rec = NULL;
+	uint16_t vlan_id;
+	int rc;
+
+	/* If there is pending FCoE event restart FCF table scan */
+	if (lpfc_check_pending_fcoe_event(phba, 0)) {
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		return;
+	}
+
+	/* Parse the FCF record from the non-embedded mailbox command */
+	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
+						      &next_fcf_index);
+	if (!new_fcf_record) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+				"2765 Mailbox command READ_FCF_RECORD "
+				"failed to retrieve a FCF record.\n");
+		/* Let next new FCF event trigger fast failover */
+		spin_lock_irq(&phba->hbalock);
+		phba->hba_flag &= ~FCF_DISC_INPROGRESS;
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		return;
+	}
+
+	/* Check the FCF record against the connection list */
 	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
 				      &addr_mode, &vlan_id);
+
+	/* Log the FCF record information if turned on */
+	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
+				      next_fcf_index);
+
 	/*
 	 * If the fcf record does not match with connect list entries
-	 * read the next entry.
+	 * read the next entry; otherwise, this is an eligible FCF
+	 * record for round robin FCF failover.
 	 */
-	if (!rc)
+	if (!rc) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+				"2781 FCF record fcf_index:x%x failed FCF "
+				"connection list check, fcf_avail:x%x, "
+				"fcf_valid:x%x\n",
+				bf_get(lpfc_fcf_record_fcf_index,
+				       new_fcf_record),
+				bf_get(lpfc_fcf_record_fcf_avail,
+				       new_fcf_record),
+				bf_get(lpfc_fcf_record_fcf_valid,
+				       new_fcf_record));
 		goto read_next_fcf;
+	} else {
+		fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+		rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
+		if (rc)
+			goto read_next_fcf;
+	}
+
 	/*
 	 * If this is not the first FCF discovery of the HBA, use last
 	 * FCF record for the discovery. The condition that a rescan
 	 * matches the in-use FCF record: fabric name, switch name, mac
 	 * address, and vlan_id.
 	 */
-	spin_lock_irqsave(&phba->hbalock, iflags);
+	spin_lock_irq(&phba->hbalock);
 	if (phba->fcf.fcf_flag & FCF_IN_USE) {
 		if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name,
 					new_fcf_record) &&
@@ -1649,8 +1755,9 @@
 				__lpfc_sli4_stop_fcf_redisc_wait_timer(phba);
 			else if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
 				/* If in fast failover, mark it's completed */
-				phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
-			spin_unlock_irqrestore(&phba->hbalock, iflags);
+				phba->fcf.fcf_flag &= ~(FCF_REDISC_FOV |
+							FCF_DISCOVERY);
+			spin_unlock_irq(&phba->hbalock);
 			goto out;
 		}
 		/*
@@ -1661,7 +1768,7 @@
 		 * next candidate.
 		 */
 		if (!(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
-			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			spin_unlock_irq(&phba->hbalock);
 			goto read_next_fcf;
 		}
 	}
@@ -1669,14 +1776,9 @@
 	 * Update on failover FCF record only if it's in FCF fast-failover
 	 * period; otherwise, update on current FCF record.
 	 */
-	if (phba->fcf.fcf_flag & FCF_REDISC_FOV) {
-		/* Fast FCF failover only to the same fabric name */
-		if (lpfc_fab_name_match(phba->fcf.current_rec.fabric_name,
-					new_fcf_record))
-			fcf_rec = &phba->fcf.failover_rec;
-		else
-			goto read_next_fcf;
-	} else
+	if (phba->fcf.fcf_flag & FCF_REDISC_FOV)
+		fcf_rec = &phba->fcf.failover_rec;
+	else
 		fcf_rec = &phba->fcf.current_rec;
 
 	if (phba->fcf.fcf_flag & FCF_AVAILABLE) {
@@ -1689,7 +1791,7 @@
 			/* Choose this FCF record */
 			__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
 					addr_mode, vlan_id, BOOT_ENABLE);
-			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			spin_unlock_irq(&phba->hbalock);
 			goto read_next_fcf;
 		}
 		/*
@@ -1698,20 +1800,19 @@
 		 * the next FCF record.
 		 */
 		if (!boot_flag && (fcf_rec->flag & BOOT_ENABLE)) {
-			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			spin_unlock_irq(&phba->hbalock);
 			goto read_next_fcf;
 		}
 		/*
 		 * If the new hba FCF record has lower priority value
 		 * than the driver FCF record, use the new record.
 		 */
-		if (lpfc_fab_name_match(fcf_rec->fabric_name, new_fcf_record) &&
-		    (new_fcf_record->fip_priority < fcf_rec->priority)) {
+		if (new_fcf_record->fip_priority < fcf_rec->priority) {
 			/* Choose this FCF record */
 			__lpfc_update_fcf_record(phba, fcf_rec, new_fcf_record,
 					addr_mode, vlan_id, 0);
 		}
-		spin_unlock_irqrestore(&phba->hbalock, iflags);
+		spin_unlock_irq(&phba->hbalock);
 		goto read_next_fcf;
 	}
 	/*
@@ -1724,7 +1825,7 @@
 					 BOOT_ENABLE : 0));
 		phba->fcf.fcf_flag |= FCF_AVAILABLE;
 	}
-	spin_unlock_irqrestore(&phba->hbalock, iflags);
+	spin_unlock_irq(&phba->hbalock);
 	goto read_next_fcf;
 
 read_next_fcf:
@@ -1740,9 +1841,22 @@
 			 * FCF scan inprogress, and do nothing
 			 */
 			if (!(phba->fcf.failover_rec.flag & RECORD_VALID)) {
-				spin_lock_irqsave(&phba->hbalock, iflags);
+				lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+					       "2782 No suitable FCF record "
+					       "found during this round of "
+					       "post FCF rediscovery scan: "
+					       "fcf_evt_tag:x%x, fcf_index: "
+					       "x%x\n",
+					       phba->fcoe_eventtag_at_fcf_scan,
+					       bf_get(lpfc_fcf_record_fcf_index,
+						      new_fcf_record));
+				/*
+				 * Let next new FCF event trigger fast
+				 * failover
+				 */
+				spin_lock_irq(&phba->hbalock);
 				phba->hba_flag &= ~FCF_DISC_INPROGRESS;
-				spin_unlock_irqrestore(&phba->hbalock, iflags);
+				spin_unlock_irq(&phba->hbalock);
 				return;
 			}
 			/*
@@ -1754,16 +1868,23 @@
 			 * record.
 			 */
 
-			/* unregister the current in-use FCF record */
+			/* Unregister the current in-use FCF record */
 			lpfc_unregister_fcf(phba);
-			/* replace in-use record with the new record */
+
+			/* Replace in-use record with the new record */
 			memcpy(&phba->fcf.current_rec,
 			       &phba->fcf.failover_rec,
 			       sizeof(struct lpfc_fcf_rec));
 			/* mark the FCF fast failover completed */
-			spin_lock_irqsave(&phba->hbalock, iflags);
+			spin_lock_irq(&phba->hbalock);
 			phba->fcf.fcf_flag &= ~FCF_REDISC_FOV;
-			spin_unlock_irqrestore(&phba->hbalock, iflags);
+			spin_unlock_irq(&phba->hbalock);
+			/*
+			 * Set up the initial registered FCF index for FLOGI
+			 * round robin FCF failover.
+			 */
+			phba->fcf.fcf_rr_init_indx =
+					phba->fcf.failover_rec.fcf_indx;
 			/* Register to the new FCF record */
 			lpfc_register_fcf(phba);
 		} else {
@@ -1776,13 +1897,25 @@
 				return;
 			/*
 			 * Otherwise, initial scan or post linkdown rescan,
-			 * register with the best fit FCF record found so
-			 * far through the scanning process.
+			 * register with the best FCF record found so far
+			 * through the FCF scanning process.
 			 */
+
+			/* mark the initial FCF discovery completed */
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
+			spin_unlock_irq(&phba->hbalock);
+			/*
+			 * Set up the initial registered FCF index for FLOGI
+			 * round robin FCF failover
+			 */
+			phba->fcf.fcf_rr_init_indx =
+					phba->fcf.current_rec.fcf_indx;
+			/* Register to the new FCF record */
 			lpfc_register_fcf(phba);
 		}
 	} else
-		lpfc_sli4_read_fcf_record(phba, next_fcf_index);
+		lpfc_sli4_fcf_scan_read_fcf_rec(phba, next_fcf_index);
 	return;
 
 out:
@@ -1793,6 +1926,141 @@
 }
 
 /**
+ * lpfc_mbx_cmpl_fcf_rr_read_fcf_rec - fcf round robin read_fcf mbox cmpl hdler
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This is the callback function for FLOGI failure round robin FCF failover
+ * read FCF record mailbox command from the eligible FCF record bmask for
+ * performing the failover. If the FCF read back is not valid/available, it
+ * fails through to retrying FLOGI to the currently registered FCF again.
+ * Otherwise, if the FCF read back is valid and available, it will set the
+ * newly read FCF record to the failover FCF record, unregister currently
+ * registered FCF record, copy the failover FCF record to the current
+ * FCF record, and then register the current FCF record before proceeding
+ * to trying FLOGI on the new failover FCF.
+ */
+void
+lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct fcf_record *new_fcf_record;
+	uint32_t boot_flag, addr_mode;
+	uint16_t next_fcf_index;
+	uint16_t current_fcf_index;
+	uint16_t vlan_id;
+
+	/* If link state is not up, stop the round robin failover process */
+	if (phba->link_state < LPFC_LINK_UP) {
+		spin_lock_irq(&phba->hbalock);
+		phba->fcf.fcf_flag &= ~FCF_DISCOVERY;
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+		return;
+	}
+
+	/* Parse the FCF record from the non-embedded mailbox command */
+	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
+						      &next_fcf_index);
+	if (!new_fcf_record) {
+		lpfc_printf_log(phba, KERN_WARNING, LOG_FIP,
+				"2766 Mailbox command READ_FCF_RECORD "
+				"failed to retrieve a FCF record.\n");
+		goto out;
+	}
+
+	/* Get the needed parameters from FCF record */
+	lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+				 &addr_mode, &vlan_id);
+
+	/* Log the FCF record information if turned on */
+	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
+				      next_fcf_index);
+
+	/* Upload new FCF record to the failover FCF record */
+	spin_lock_irq(&phba->hbalock);
+	__lpfc_update_fcf_record(phba, &phba->fcf.failover_rec,
+				 new_fcf_record, addr_mode, vlan_id,
+				 (boot_flag ? BOOT_ENABLE : 0));
+	spin_unlock_irq(&phba->hbalock);
+
+	current_fcf_index = phba->fcf.current_rec.fcf_indx;
+
+	/* Unregister the current in-use FCF record */
+	lpfc_unregister_fcf(phba);
+
+	/* Replace in-use record with the new record */
+	memcpy(&phba->fcf.current_rec, &phba->fcf.failover_rec,
+	       sizeof(struct lpfc_fcf_rec));
+
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2783 FLOGI round robin FCF failover from FCF "
+			"(index:x%x) to FCF (index:x%x).\n",
+			current_fcf_index,
+			bf_get(lpfc_fcf_record_fcf_index, new_fcf_record));
+
+out:
+	lpfc_sli4_mbox_cmd_free(phba, mboxq);
+	lpfc_register_fcf(phba);
+}
+
+/**
+ * lpfc_mbx_cmpl_read_fcf_rec - read fcf completion handler.
+ * @phba: pointer to lpfc hba data structure.
+ * @mboxq: pointer to mailbox object.
+ *
+ * This is the callback function of read FCF record mailbox command for
+ * updating the eligible FCF bmask for FLOGI failure round robin FCF
+ * failover when a new FCF event happened. If the FCF read back is
+ * valid/available and it passes the connection list check, it updates
+ * the bmask for the eligible FCF record for round robin failover.
+ */
+void
+lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *phba, LPFC_MBOXQ_t *mboxq)
+{
+	struct fcf_record *new_fcf_record;
+	uint32_t boot_flag, addr_mode;
+	uint16_t fcf_index, next_fcf_index;
+	uint16_t vlan_id;
+	int rc;
+
+	/* If link state is not up, no need to proceed */
+	if (phba->link_state < LPFC_LINK_UP)
+		goto out;
+
+	/* If FCF discovery period is over, no need to proceed */
+	if (phba->fcf.fcf_flag & FCF_DISCOVERY)
+		goto out;
+
+	/* Parse the FCF record from the non-embedded mailbox command */
+	new_fcf_record = lpfc_sli4_fcf_rec_mbox_parse(phba, mboxq,
+						      &next_fcf_index);
+	if (!new_fcf_record) {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2767 Mailbox command READ_FCF_RECORD "
+				"failed to retrieve a FCF record.\n");
+		goto out;
+	}
+
+	/* Check the connection list for eligibility */
+	rc = lpfc_match_fcf_conn_list(phba, new_fcf_record, &boot_flag,
+				      &addr_mode, &vlan_id);
+
+	/* Log the FCF record information if turned on */
+	lpfc_sli4_log_fcf_record_info(phba, new_fcf_record, vlan_id,
+				      next_fcf_index);
+
+	if (!rc)
+		goto out;
+
+	/* Update the eligible FCF record index bmask */
+	fcf_index = bf_get(lpfc_fcf_record_fcf_index, new_fcf_record);
+	rc = lpfc_sli4_fcf_rr_index_set(phba, fcf_index);
+
+out:
+	lpfc_sli4_mbox_cmd_free(phba, mboxq);
+}
+
+/**
  * lpfc_init_vpi_cmpl - Completion handler for init_vpi mbox command.
  * @phba: pointer to lpfc hba data structure.
  * @mboxq: pointer to mailbox data structure.
@@ -2024,8 +2292,6 @@
 	int rc;
 	struct fcf_record *fcf_record;
 
-	sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
-
 	spin_lock_irq(&phba->hbalock);
 	switch (la->UlnkSpeed) {
 	case LA_1GHZ_LINK:
@@ -2117,18 +2383,24 @@
 	spin_unlock_irq(&phba->hbalock);
 
 	lpfc_linkup(phba);
-	if (sparam_mbox) {
-		lpfc_read_sparam(phba, sparam_mbox, 0);
-		sparam_mbox->vport = vport;
-		sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
-		rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
-		if (rc == MBX_NOT_FINISHED) {
-			mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
-			lpfc_mbuf_free(phba, mp->virt, mp->phys);
-			kfree(mp);
-			mempool_free(sparam_mbox, phba->mbox_mem_pool);
-			goto out;
-		}
+	sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!sparam_mbox)
+		goto out;
+
+	rc = lpfc_read_sparam(phba, sparam_mbox, 0);
+	if (rc) {
+		mempool_free(sparam_mbox, phba->mbox_mem_pool);
+		goto out;
+	}
+	sparam_mbox->vport = vport;
+	sparam_mbox->mbox_cmpl = lpfc_mbx_cmpl_read_sparam;
+	rc = lpfc_sli_issue_mbox(phba, sparam_mbox, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED) {
+		mp = (struct lpfc_dmabuf *) sparam_mbox->context1;
+		lpfc_mbuf_free(phba, mp->virt, mp->phys);
+		kfree(mp);
+		mempool_free(sparam_mbox, phba->mbox_mem_pool);
+		goto out;
 	}
 
 	if (!(phba->hba_flag & HBA_FCOE_SUPPORT)) {
@@ -2186,10 +2458,20 @@
 			spin_unlock_irq(&phba->hbalock);
 			return;
 		}
+		/* This is the initial FCF discovery scan */
+		phba->fcf.fcf_flag |= FCF_INIT_DISC;
 		spin_unlock_irq(&phba->hbalock);
-		rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
-		if (rc)
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+				"2778 Start FCF table scan at linkup\n");
+
+		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+						     LPFC_FCOE_FCF_GET_FIRST);
+		if (rc) {
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
+			spin_unlock_irq(&phba->hbalock);
 			goto out;
+		}
 	}
 
 	return;
@@ -3379,8 +3661,12 @@
 		shost = lpfc_shost_from_vport(vports[i]);
 		spin_lock_irq(shost->host_lock);
 		list_for_each_entry(ndlp, &vports[i]->fc_nodes, nlp_listp) {
-			if (ndlp->nlp_flag & NLP_RPI_VALID)
+			if (ndlp->nlp_flag & NLP_RPI_VALID) {
+				/* The mempool_alloc might sleep */
+				spin_unlock_irq(shost->host_lock);
 				lpfc_unreg_rpi(vports[i], ndlp);
+				spin_lock_irq(shost->host_lock);
+			}
 		}
 		spin_unlock_irq(shost->host_lock);
 	}
@@ -4756,6 +5042,7 @@
 		return;
 	/* Reset HBA FCF states after successful unregister FCF */
 	phba->fcf.fcf_flag = 0;
+	phba->fcf.current_rec.flag = 0;
 
 	/*
 	 * If driver is not unloading, check if there is any other
@@ -4765,13 +5052,21 @@
 	    (phba->link_state < LPFC_LINK_UP))
 		return;
 
-	rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+	/* This is considered as the initial FCF discovery scan */
+	spin_lock_irq(&phba->hbalock);
+	phba->fcf.fcf_flag |= FCF_INIT_DISC;
+	spin_unlock_irq(&phba->hbalock);
+	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
 
-	if (rc)
+	if (rc) {
+		spin_lock_irq(&phba->hbalock);
+		phba->fcf.fcf_flag &= ~FCF_INIT_DISC;
+		spin_unlock_irq(&phba->hbalock);
 		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY|LOG_MBOX,
 				"2553 lpfc_unregister_unused_fcf failed "
 				"to read FCF record HBA state x%x\n",
 				phba->pport->port_state);
+	}
 }
 
 /**
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index d29ac7c..ea44239 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -350,7 +350,12 @@
 	mb = &pmb->u.mb;
 
 	/* Get login parameters for NID.  */
-	lpfc_read_sparam(phba, pmb, 0);
+	rc = lpfc_read_sparam(phba, pmb, 0);
+	if (rc) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return -ENOMEM;
+	}
+
 	pmb->vport = vport;
 	if (lpfc_sli_issue_mbox(phba, pmb, MBX_POLL) != MBX_SUCCESS) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
@@ -359,7 +364,7 @@
 				mb->mbxCommand, mb->mbxStatus);
 		phba->link_state = LPFC_HBA_ERROR;
 		mp = (struct lpfc_dmabuf *) pmb->context1;
-		mempool_free( pmb, phba->mbox_mem_pool);
+		mempool_free(pmb, phba->mbox_mem_pool);
 		lpfc_mbuf_free(phba, mp->virt, mp->phys);
 		kfree(mp);
 		return -EIO;
@@ -544,7 +549,7 @@
 			mempool_free(pmb, phba->mbox_mem_pool);
 			return -EIO;
 		}
-	} else if (phba->cfg_suppress_link_up == 0) {
+	} else if (phba->cfg_suppress_link_up == LPFC_INITIALIZE_LINK) {
 		lpfc_init_link(phba, pmb, phba->cfg_topology,
 			phba->cfg_link_speed);
 		pmb->mbox_cmpl = lpfc_sli_def_mbox_cmpl;
@@ -571,6 +576,11 @@
 	}
 	/* MBOX buffer will be freed in mbox compl */
 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+
 	lpfc_config_async(phba, pmb, LPFC_ELS_RING);
 	pmb->mbox_cmpl = lpfc_config_async_cmpl;
 	pmb->vport = phba->pport;
@@ -588,6 +598,11 @@
 
 	/* Get Option rom version */
 	pmb = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!pmb) {
+		phba->link_state = LPFC_HBA_ERROR;
+		return -ENOMEM;
+	}
+
 	lpfc_dump_wakeup_param(phba, pmb);
 	pmb->mbox_cmpl = lpfc_dump_wakeup_param_cmpl;
 	pmb->vport = phba->pport;
@@ -652,7 +667,7 @@
 			mempool_free(pmb, phba->mbox_mem_pool);
 		return -EIO;
 	}
-	phba->cfg_suppress_link_up = 0;
+	phba->cfg_suppress_link_up = LPFC_INITIALIZE_LINK;
 
 	return 0;
 }
@@ -807,6 +822,8 @@
 	LIST_HEAD(aborts);
 	int ret;
 	unsigned long iflag = 0;
+	struct lpfc_sglq *sglq_entry = NULL;
+
 	ret = lpfc_hba_down_post_s3(phba);
 	if (ret)
 		return ret;
@@ -822,6 +839,10 @@
 	 * list.
 	 */
 	spin_lock(&phba->sli4_hba.abts_sgl_list_lock);
+	list_for_each_entry(sglq_entry,
+		&phba->sli4_hba.lpfc_abts_els_sgl_list, list)
+		sglq_entry->state = SGL_FREED;
+
 	list_splice_init(&phba->sli4_hba.lpfc_abts_els_sgl_list,
 			&phba->sli4_hba.lpfc_sgl_list);
 	spin_unlock(&phba->sli4_hba.abts_sgl_list_lock);
@@ -2178,8 +2199,10 @@
 void
 __lpfc_sli4_stop_fcf_redisc_wait_timer(struct lpfc_hba *phba)
 {
-	/* Clear pending FCF rediscovery wait timer */
-	phba->fcf.fcf_flag &= ~FCF_REDISC_PEND;
+	/* Clear pending FCF rediscovery wait and failover in progress flags */
+	phba->fcf.fcf_flag &= ~(FCF_REDISC_PEND |
+				FCF_DEAD_DISC |
+				FCF_ACVL_DISC);
 	/* Now, try to stop the timer */
 	del_timer(&phba->fcf.redisc_wait);
 }
@@ -2576,6 +2599,14 @@
 	init_timer(&vport->els_tmofunc);
 	vport->els_tmofunc.function = lpfc_els_timeout;
 	vport->els_tmofunc.data = (unsigned long)vport;
+	if (phba->pcidev->device == PCI_DEVICE_ID_HORNET) {
+		phba->menlo_flag |= HBA_MENLO_SUPPORT;
+		/* check for menlo minimum sg count */
+		if (phba->cfg_sg_seg_cnt < LPFC_DEFAULT_MENLO_SG_SEG_CNT) {
+			phba->cfg_sg_seg_cnt = LPFC_DEFAULT_MENLO_SG_SEG_CNT;
+			shost->sg_tablesize = phba->cfg_sg_seg_cnt;
+		}
+	}
 
 	error = scsi_add_host_with_dma(shost, dev, &phba->pcidev->dev);
 	if (error)
@@ -2912,6 +2943,9 @@
 	/* FCF rediscovery event to worker thread */
 	phba->fcf.fcf_flag |= FCF_REDISC_EVT;
 	spin_unlock_irq(&phba->hbalock);
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+			"2776 FCF rediscover wait timer expired, post "
+			"a worker thread event for FCF table scan\n");
 	/* wake up worker thread */
 	lpfc_worker_wake_up(phba);
 }
@@ -3183,6 +3217,68 @@
 }
 
 /**
+ * lpfc_sli4_perform_vport_cvl - Perform clear virtual link on a vport
+ * @vport: pointer to vport data structure.
+ *
+ * This routine is to perform Clear Virtual Link (CVL) on a vport in
+ * response to a CVL event.
+ *
+ * Return the pointer to the ndlp with the vport if successful, otherwise
+ * return NULL.
+ **/
+static struct lpfc_nodelist *
+lpfc_sli4_perform_vport_cvl(struct lpfc_vport *vport)
+{
+	struct lpfc_nodelist *ndlp;
+	struct Scsi_Host *shost;
+	struct lpfc_hba *phba;
+
+	if (!vport)
+		return NULL;
+	ndlp = lpfc_findnode_did(vport, Fabric_DID);
+	if (!ndlp)
+		return NULL;
+	phba = vport->phba;
+	if (!phba)
+		return NULL;
+	if (phba->pport->port_state <= LPFC_FLOGI)
+		return NULL;
+	/* If virtual link is not yet instantiated ignore CVL */
+	if (vport->port_state <= LPFC_FDISC)
+		return NULL;
+	shost = lpfc_shost_from_vport(vport);
+	if (!shost)
+		return NULL;
+	lpfc_linkdown_port(vport);
+	lpfc_cleanup_pending_mbox(vport);
+	spin_lock_irq(shost->host_lock);
+	vport->fc_flag |= FC_VPORT_CVL_RCVD;
+	spin_unlock_irq(shost->host_lock);
+
+	return ndlp;
+}
+
+/**
+ * lpfc_sli4_perform_all_vport_cvl - Perform clear virtual link on all vports
+ * @vport: pointer to lpfc hba data structure.
+ *
+ * This routine is to perform Clear Virtual Link (CVL) on all vports in
+ * response to a FCF dead event.
+ **/
+static void
+lpfc_sli4_perform_all_vport_cvl(struct lpfc_hba *phba)
+{
+	struct lpfc_vport **vports;
+	int i;
+
+	vports = lpfc_create_vport_work_array(phba);
+	if (vports)
+		for (i = 0; i <= phba->max_vports && vports[i] != NULL; i++)
+			lpfc_sli4_perform_vport_cvl(vports[i]);
+	lpfc_destroy_vport_work_array(phba, vports);
+}
+
+/**
  * lpfc_sli4_async_fcoe_evt - Process the asynchronous fcoe event
  * @phba: pointer to lpfc hba data structure.
  * @acqe_link: pointer to the async fcoe completion queue entry.
@@ -3198,7 +3294,6 @@
 	struct lpfc_vport *vport;
 	struct lpfc_nodelist *ndlp;
 	struct Scsi_Host  *shost;
-	uint32_t link_state;
 	int active_vlink_present;
 	struct lpfc_vport **vports;
 	int i;
@@ -3208,10 +3303,11 @@
 	switch (event_type) {
 	case LPFC_FCOE_EVENT_TYPE_NEW_FCF:
 	case LPFC_FCOE_EVENT_TYPE_FCF_PARAM_MOD:
-		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-			"2546 New FCF found index 0x%x tag 0x%x\n",
-			acqe_fcoe->index,
-			acqe_fcoe->event_tag);
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+			"2546 New FCF found/FCF parameter modified event: "
+			"evt_tag:x%x, fcf_index:x%x\n",
+			acqe_fcoe->event_tag, acqe_fcoe->index);
+
 		spin_lock_irq(&phba->hbalock);
 		if ((phba->fcf.fcf_flag & FCF_SCAN_DONE) ||
 		    (phba->hba_flag & FCF_DISC_INPROGRESS)) {
@@ -3222,6 +3318,7 @@
 			spin_unlock_irq(&phba->hbalock);
 			break;
 		}
+
 		if (phba->fcf.fcf_flag & FCF_REDISC_EVT) {
 			/*
 			 * If fast FCF failover rescan event is pending,
@@ -3232,12 +3329,33 @@
 		}
 		spin_unlock_irq(&phba->hbalock);
 
-		/* Read the FCF table and re-discover SAN. */
-		rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+		if ((phba->fcf.fcf_flag & FCF_DISCOVERY) &&
+		    !(phba->fcf.fcf_flag & FCF_REDISC_FOV)) {
+			/*
+			 * During period of FCF discovery, read the FCF
+			 * table record indexed by the event to update
+			 * FCF round robin failover eligible FCF bmask.
+			 */
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
+					LOG_DISCOVERY,
+					"2779 Read new FCF record with "
+					"fcf_index:x%x for updating FCF "
+					"round robin failover bmask\n",
+					acqe_fcoe->index);
+			rc = lpfc_sli4_read_fcf_rec(phba, acqe_fcoe->index);
+		}
+
+		/* Otherwise, scan the entire FCF table and re-discover SAN */
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+				"2770 Start FCF table scan due to new FCF "
+				"event: evt_tag:x%x, fcf_index:x%x\n",
+				acqe_fcoe->event_tag, acqe_fcoe->index);
+		rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba,
+						     LPFC_FCOE_FCF_GET_FIRST);
 		if (rc)
-			lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-					"2547 Read FCF record failed 0x%x\n",
-					rc);
+			lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+					"2547 Issue FCF scan read FCF mailbox "
+					"command failed 0x%x\n", rc);
 		break;
 
 	case LPFC_FCOE_EVENT_TYPE_FCF_TABLE_FULL:
@@ -3248,47 +3366,63 @@
 		break;
 
 	case LPFC_FCOE_EVENT_TYPE_FCF_DEAD:
-		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
 			"2549 FCF disconnected from network index 0x%x"
 			" tag 0x%x\n", acqe_fcoe->index,
 			acqe_fcoe->event_tag);
 		/* If the event is not for currently used fcf do nothing */
 		if (phba->fcf.current_rec.fcf_indx != acqe_fcoe->index)
 			break;
-		/*
-		 * Currently, driver support only one FCF - so treat this as
-		 * a link down, but save the link state because we don't want
-		 * it to be changed to Link Down unless it is already down.
+		/* We request port to rediscover the entire FCF table for
+		 * a fast recovery from case that the current FCF record
+		 * is no longer valid if we are not in the middle of FCF
+		 * failover process already.
 		 */
-		link_state = phba->link_state;
-		lpfc_linkdown(phba);
-		phba->link_state = link_state;
-		/* Unregister FCF if no devices connected to it */
-		lpfc_unregister_unused_fcf(phba);
+		spin_lock_irq(&phba->hbalock);
+		if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+			spin_unlock_irq(&phba->hbalock);
+			/* Update FLOGI FCF failover eligible FCF bmask */
+			lpfc_sli4_fcf_rr_index_clear(phba, acqe_fcoe->index);
+			break;
+		}
+		/* Mark the fast failover process in progress */
+		phba->fcf.fcf_flag |= FCF_DEAD_DISC;
+		spin_unlock_irq(&phba->hbalock);
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+				"2771 Start FCF fast failover process due to "
+				"FCF DEAD event: evt_tag:x%x, fcf_index:x%x "
+				"\n", acqe_fcoe->event_tag, acqe_fcoe->index);
+		rc = lpfc_sli4_redisc_fcf_table(phba);
+		if (rc) {
+			lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+					LOG_DISCOVERY,
+					"2772 Issue FCF rediscover mabilbox "
+					"command failed, fail through to FCF "
+					"dead event\n");
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+			spin_unlock_irq(&phba->hbalock);
+			/*
+			 * Last resort will fail over by treating this
+			 * as a link down to FCF registration.
+			 */
+			lpfc_sli4_fcf_dead_failthrough(phba);
+		} else
+			/* Handling fast FCF failover to a DEAD FCF event
+			 * is considered equalivant to receiving CVL to all
+			 * vports.
+			 */
+			lpfc_sli4_perform_all_vport_cvl(phba);
 		break;
 	case LPFC_FCOE_EVENT_TYPE_CVL:
-		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
 			"2718 Clear Virtual Link Received for VPI 0x%x"
 			" tag 0x%x\n", acqe_fcoe->index, acqe_fcoe->event_tag);
 		vport = lpfc_find_vport_by_vpid(phba,
 				acqe_fcoe->index - phba->vpi_base);
-		if (!vport)
-			break;
-		ndlp = lpfc_findnode_did(vport, Fabric_DID);
+		ndlp = lpfc_sli4_perform_vport_cvl(vport);
 		if (!ndlp)
 			break;
-		shost = lpfc_shost_from_vport(vport);
-		if (phba->pport->port_state <= LPFC_FLOGI)
-			break;
-		/* If virtual link is not yet instantiated ignore CVL */
-		if (vport->port_state <= LPFC_FDISC)
-			break;
-
-		lpfc_linkdown_port(vport);
-		lpfc_cleanup_pending_mbox(vport);
-		spin_lock_irq(shost->host_lock);
-		vport->fc_flag |= FC_VPORT_CVL_RCVD;
-		spin_unlock_irq(shost->host_lock);
 		active_vlink_present = 0;
 
 		vports = lpfc_create_vport_work_array(phba);
@@ -3311,6 +3445,7 @@
 			 * re-instantiate the Vlink using FDISC.
 			 */
 			mod_timer(&ndlp->nlp_delayfunc, jiffies + HZ);
+			shost = lpfc_shost_from_vport(vport);
 			spin_lock_irq(shost->host_lock);
 			ndlp->nlp_flag |= NLP_DELAY_TMO;
 			spin_unlock_irq(shost->host_lock);
@@ -3321,15 +3456,38 @@
 			 * Otherwise, we request port to rediscover
 			 * the entire FCF table for a fast recovery
 			 * from possible case that the current FCF
-			 * is no longer valid.
+			 * is no longer valid if we are not already
+			 * in the FCF failover process.
 			 */
+			spin_lock_irq(&phba->hbalock);
+			if (phba->fcf.fcf_flag & FCF_DISCOVERY) {
+				spin_unlock_irq(&phba->hbalock);
+				break;
+			}
+			/* Mark the fast failover process in progress */
+			phba->fcf.fcf_flag |= FCF_ACVL_DISC;
+			spin_unlock_irq(&phba->hbalock);
+			lpfc_printf_log(phba, KERN_INFO, LOG_FIP |
+					LOG_DISCOVERY,
+					"2773 Start FCF fast failover due "
+					"to CVL event: evt_tag:x%x\n",
+					acqe_fcoe->event_tag);
 			rc = lpfc_sli4_redisc_fcf_table(phba);
-			if (rc)
+			if (rc) {
+				lpfc_printf_log(phba, KERN_ERR, LOG_FIP |
+						LOG_DISCOVERY,
+						"2774 Issue FCF rediscover "
+						"mabilbox command failed, "
+						"through to CVL event\n");
+				spin_lock_irq(&phba->hbalock);
+				phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
+				spin_unlock_irq(&phba->hbalock);
 				/*
 				 * Last resort will be re-try on the
 				 * the current registered FCF entry.
 				 */
 				lpfc_retry_pport_discovery(phba);
+			}
 		}
 		break;
 	default:
@@ -3426,11 +3584,14 @@
 	spin_unlock_irq(&phba->hbalock);
 
 	/* Scan FCF table from the first entry to re-discover SAN */
-	rc = lpfc_sli4_read_fcf_record(phba, LPFC_FCOE_FCF_GET_FIRST);
+	lpfc_printf_log(phba, KERN_INFO, LOG_FIP | LOG_DISCOVERY,
+			"2777 Start FCF table scan after FCF "
+			"rediscovery quiescent period over\n");
+	rc = lpfc_sli4_fcf_scan_read_fcf_rec(phba, LPFC_FCOE_FCF_GET_FIRST);
 	if (rc)
-		lpfc_printf_log(phba, KERN_ERR, LOG_DISCOVERY,
-				"2747 Post FCF rediscovery read FCF record "
-				"failed 0x%x\n", rc);
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_DISCOVERY,
+				"2747 Issue FCF scan read FCF mailbox "
+				"command failed 0x%x\n", rc);
 }
 
 /**
@@ -3722,6 +3883,7 @@
 	int rc, i, hbq_count, buf_size, dma_buf_size, max_buf_size;
 	uint8_t pn_page[LPFC_MAX_SUPPORTED_PAGES] = {0};
 	struct lpfc_mqe *mqe;
+	int longs;
 
 	/* Before proceed, wait for POST done and device ready */
 	rc = lpfc_sli4_post_status_check(phba);
@@ -3898,13 +4060,24 @@
 		goto out_free_active_sgl;
 	}
 
+	/* Allocate eligible FCF bmask memory for FCF round robin failover */
+	longs = (LPFC_SLI4_FCF_TBL_INDX_MAX + BITS_PER_LONG - 1)/BITS_PER_LONG;
+	phba->fcf.fcf_rr_bmask = kzalloc(longs * sizeof(unsigned long),
+					 GFP_KERNEL);
+	if (!phba->fcf.fcf_rr_bmask) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2759 Failed allocate memory for FCF round "
+				"robin failover bmask\n");
+		goto out_remove_rpi_hdrs;
+	}
+
 	phba->sli4_hba.fcp_eq_hdl = kzalloc((sizeof(struct lpfc_fcp_eq_hdl) *
 				    phba->cfg_fcp_eq_count), GFP_KERNEL);
 	if (!phba->sli4_hba.fcp_eq_hdl) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 				"2572 Failed allocate memory for fast-path "
 				"per-EQ handle array\n");
-		goto out_remove_rpi_hdrs;
+		goto out_free_fcf_rr_bmask;
 	}
 
 	phba->sli4_hba.msix_entries = kzalloc((sizeof(struct msix_entry) *
@@ -3957,6 +4130,8 @@
 
 out_free_fcp_eq_hdl:
 	kfree(phba->sli4_hba.fcp_eq_hdl);
+out_free_fcf_rr_bmask:
+	kfree(phba->fcf.fcf_rr_bmask);
 out_remove_rpi_hdrs:
 	lpfc_sli4_remove_rpi_hdrs(phba);
 out_free_active_sgl:
@@ -4002,6 +4177,9 @@
 	lpfc_sli4_remove_rpi_hdrs(phba);
 	lpfc_sli4_remove_rpis(phba);
 
+	/* Free eligible FCF index bmask */
+	kfree(phba->fcf.fcf_rr_bmask);
+
 	/* Free the ELS sgl list */
 	lpfc_free_active_sgl(phba);
 	lpfc_free_sgl_list(phba);
@@ -4397,6 +4575,7 @@
 
 		/* The list order is used by later block SGL registraton */
 		spin_lock_irq(&phba->hbalock);
+		sglq_entry->state = SGL_FREED;
 		list_add_tail(&sglq_entry->list, &phba->sli4_hba.lpfc_sgl_list);
 		phba->sli4_hba.lpfc_els_sgl_array[i] = sglq_entry;
 		phba->sli4_hba.total_sglq_bufs++;
diff --git a/drivers/scsi/lpfc/lpfc_logmsg.h b/drivers/scsi/lpfc/lpfc_logmsg.h
index 954ba57..bb59e92 100644
--- a/drivers/scsi/lpfc/lpfc_logmsg.h
+++ b/drivers/scsi/lpfc/lpfc_logmsg.h
@@ -35,6 +35,7 @@
 #define LOG_VPORT	0x00004000	/* NPIV events */
 #define LOF_SECURITY	0x00008000	/* Security events */
 #define LOG_EVENT	0x00010000	/* CT,TEMP,DUMP, logging */
+#define LOG_FIP		0x00020000	/* FIP events */
 #define LOG_ALL_MSG	0xffffffff	/* LOG all messages */
 
 #define lpfc_printf_vlog(vport, level, mask, fmt, arg...) \
diff --git a/drivers/scsi/lpfc/lpfc_mbox.c b/drivers/scsi/lpfc/lpfc_mbox.c
index 6c4dce1..1e61ae3 100644
--- a/drivers/scsi/lpfc/lpfc_mbox.c
+++ b/drivers/scsi/lpfc/lpfc_mbox.c
@@ -1748,7 +1748,7 @@
 }
 
 /**
- * lpfc_sli4_mbx_read_fcf_record - Allocate and construct read fcf mbox cmd
+ * lpfc_sli4_mbx_read_fcf_rec - Allocate and construct read fcf mbox cmd
  * @phba: pointer to lpfc hba data structure.
  * @fcf_index: index to fcf table.
  *
@@ -1759,9 +1759,9 @@
  * NULL.
  **/
 int
-lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *phba,
-			      struct lpfcMboxq *mboxq,
-			      uint16_t fcf_index)
+lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *phba,
+			   struct lpfcMboxq *mboxq,
+			   uint16_t fcf_index)
 {
 	void *virt_addr;
 	dma_addr_t phys_addr;
diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
index 483fb74..b16bb2c 100644
--- a/drivers/scsi/lpfc/lpfc_scsi.c
+++ b/drivers/scsi/lpfc/lpfc_scsi.c
@@ -620,23 +620,40 @@
 	uint16_t xri = bf_get(lpfc_wcqe_xa_xri, axri);
 	struct lpfc_scsi_buf *psb, *next_psb;
 	unsigned long iflag = 0;
+	struct lpfc_iocbq *iocbq;
+	int i;
 
-	spin_lock_irqsave(&phba->sli4_hba.abts_scsi_buf_list_lock, iflag);
+	spin_lock_irqsave(&phba->hbalock, iflag);
+	spin_lock(&phba->sli4_hba.abts_scsi_buf_list_lock);
 	list_for_each_entry_safe(psb, next_psb,
 		&phba->sli4_hba.lpfc_abts_scsi_buf_list, list) {
 		if (psb->cur_iocbq.sli4_xritag == xri) {
 			list_del(&psb->list);
 			psb->exch_busy = 0;
 			psb->status = IOSTAT_SUCCESS;
-			spin_unlock_irqrestore(
-				&phba->sli4_hba.abts_scsi_buf_list_lock,
-				iflag);
+			spin_unlock(
+				&phba->sli4_hba.abts_scsi_buf_list_lock);
+			spin_unlock_irqrestore(&phba->hbalock, iflag);
 			lpfc_release_scsi_buf_s4(phba, psb);
 			return;
 		}
 	}
-	spin_unlock_irqrestore(&phba->sli4_hba.abts_scsi_buf_list_lock,
-				iflag);
+	spin_unlock(&phba->sli4_hba.abts_scsi_buf_list_lock);
+	for (i = 1; i <= phba->sli.last_iotag; i++) {
+		iocbq = phba->sli.iocbq_lookup[i];
+
+		if (!(iocbq->iocb_flag &  LPFC_IO_FCP) ||
+			(iocbq->iocb_flag & LPFC_IO_LIBDFC))
+			continue;
+		if (iocbq->sli4_xritag != xri)
+			continue;
+		psb = container_of(iocbq, struct lpfc_scsi_buf, cur_iocbq);
+		psb->exch_busy = 0;
+		spin_unlock_irqrestore(&phba->hbalock, iflag);
+		return;
+
+	}
+	spin_unlock_irqrestore(&phba->hbalock, iflag);
 }
 
 /**
@@ -1006,6 +1023,7 @@
 	struct scatterlist *sgel = NULL;
 	struct fcp_cmnd *fcp_cmnd = lpfc_cmd->fcp_cmnd;
 	struct ulp_bde64 *bpl = lpfc_cmd->fcp_bpl;
+	struct lpfc_iocbq *iocbq = &lpfc_cmd->cur_iocbq;
 	IOCB_t *iocb_cmd = &lpfc_cmd->cur_iocbq.iocb;
 	struct ulp_bde64 *data_bde = iocb_cmd->unsli3.fcp_ext.dbde;
 	dma_addr_t physaddr;
@@ -1056,6 +1074,7 @@
 			physaddr = sg_dma_address(sgel);
 			if (phba->sli_rev == 3 &&
 			    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+			    !(iocbq->iocb_flag & DSS_SECURITY_OP) &&
 			    nseg <= LPFC_EXT_DATA_BDE_COUNT) {
 				data_bde->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
 				data_bde->tus.f.bdeSize = sg_dma_len(sgel);
@@ -1082,7 +1101,8 @@
 	 * explicitly reinitialized since all iocb memory resources are reused.
 	 */
 	if (phba->sli_rev == 3 &&
-	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED)) {
+	    !(phba->sli3_options & LPFC_SLI3_BG_ENABLED) &&
+	    !(iocbq->iocb_flag & DSS_SECURITY_OP)) {
 		if (num_bde > LPFC_EXT_DATA_BDE_COUNT) {
 			/*
 			 * The extended IOCB format can only fit 3 BDE or a BPL.
@@ -1107,6 +1127,7 @@
 	} else {
 		iocb_cmd->un.fcpi64.bdl.bdeSize =
 			((num_bde + 2) * sizeof(struct ulp_bde64));
+		iocb_cmd->unsli3.fcp_ext.ebde_count = (num_bde + 1);
 	}
 	fcp_cmnd->fcpDl = cpu_to_be32(scsi_bufflen(scsi_cmnd));
 
@@ -2079,8 +2100,7 @@
 
 	if (resp_info & RSP_LEN_VALID) {
 		rsplen = be32_to_cpu(fcprsp->rspRspLen);
-		if ((rsplen != 0 && rsplen != 4 && rsplen != 8) ||
-		    (fcprsp->rspInfo3 != RSP_NO_FAILURE)) {
+		if (rsplen != 0 && rsplen != 4 && rsplen != 8) {
 			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
 				 "2719 Invalid response length: "
 				 "tgt x%x lun x%x cmnd x%x rsplen x%x\n",
@@ -2090,6 +2110,17 @@
 			host_status = DID_ERROR;
 			goto out;
 		}
+		if (fcprsp->rspInfo3 != RSP_NO_FAILURE) {
+			lpfc_printf_vlog(vport, KERN_ERR, LOG_FCP,
+				 "2757 Protocol failure detected during "
+				 "processing of FCP I/O op: "
+				 "tgt x%x lun x%x cmnd x%x rspInfo3 x%x\n",
+				 cmnd->device->id,
+				 cmnd->device->lun, cmnd->cmnd[0],
+				 fcprsp->rspInfo3);
+			host_status = DID_ERROR;
+			goto out;
+		}
 	}
 
 	if ((resp_info & SNS_LEN_VALID) && fcprsp->rspSnsLen) {
diff --git a/drivers/scsi/lpfc/lpfc_sli.c b/drivers/scsi/lpfc/lpfc_sli.c
index 35e3b96..fe6660c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.c
+++ b/drivers/scsi/lpfc/lpfc_sli.c
@@ -494,7 +494,7 @@
  *
  * Returns sglq ponter = success, NULL = Failure.
  **/
-static struct lpfc_sglq *
+struct lpfc_sglq *
 __lpfc_get_active_sglq(struct lpfc_hba *phba, uint16_t xritag)
 {
 	uint16_t adj_xri;
@@ -526,6 +526,7 @@
 		return NULL;
 	adj_xri = sglq->sli4_xritag - phba->sli4_hba.max_cfg_param.xri_base;
 	phba->sli4_hba.lpfc_sglq_active_list[adj_xri] = sglq;
+	sglq->state = SGL_ALLOCATED;
 	return sglq;
 }
 
@@ -580,15 +581,18 @@
 	else
 		sglq = __lpfc_clear_active_sglq(phba, iocbq->sli4_xritag);
 	if (sglq)  {
-		if (iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) {
+		if ((iocbq->iocb_flag & LPFC_EXCHANGE_BUSY) &&
+			(sglq->state != SGL_XRI_ABORTED)) {
 			spin_lock_irqsave(&phba->sli4_hba.abts_sgl_list_lock,
 					iflag);
 			list_add(&sglq->list,
 				&phba->sli4_hba.lpfc_abts_els_sgl_list);
 			spin_unlock_irqrestore(
 				&phba->sli4_hba.abts_sgl_list_lock, iflag);
-		} else
+		} else {
+			sglq->state = SGL_FREED;
 			list_add(&sglq->list, &phba->sli4_hba.lpfc_sgl_list);
+		}
 	}
 
 
@@ -2258,41 +2262,56 @@
 					spin_unlock_irqrestore(&phba->hbalock,
 							       iflag);
 				}
-				if ((phba->sli_rev == LPFC_SLI_REV4) &&
-				    (saveq->iocb_flag & LPFC_EXCHANGE_BUSY)) {
-					/* Set cmdiocb flag for the exchange
-					 * busy so sgl (xri) will not be
-					 * released until the abort xri is
-					 * received from hba, clear the
-					 * LPFC_DRIVER_ABORTED bit in case
-					 * it was driver initiated abort.
-					 */
-					spin_lock_irqsave(&phba->hbalock,
-							  iflag);
-					cmdiocbp->iocb_flag &=
-						~LPFC_DRIVER_ABORTED;
-					cmdiocbp->iocb_flag |=
-						LPFC_EXCHANGE_BUSY;
-					spin_unlock_irqrestore(&phba->hbalock,
-							       iflag);
-					cmdiocbp->iocb.ulpStatus =
-						IOSTAT_LOCAL_REJECT;
-					cmdiocbp->iocb.un.ulpWord[4] =
-						IOERR_ABORT_REQUESTED;
-					/*
-					 * For SLI4, irsiocb contains NO_XRI
-					 * in sli_xritag, it shall not affect
-					 * releasing sgl (xri) process.
-					 */
-					saveq->iocb.ulpStatus =
-						IOSTAT_LOCAL_REJECT;
-					saveq->iocb.un.ulpWord[4] =
-						IOERR_SLI_ABORTED;
-					spin_lock_irqsave(&phba->hbalock,
-							  iflag);
-					saveq->iocb_flag |= LPFC_DELAY_MEM_FREE;
-					spin_unlock_irqrestore(&phba->hbalock,
-							       iflag);
+				if (phba->sli_rev == LPFC_SLI_REV4) {
+					if (saveq->iocb_flag &
+					    LPFC_EXCHANGE_BUSY) {
+						/* Set cmdiocb flag for the
+						 * exchange busy so sgl (xri)
+						 * will not be released until
+						 * the abort xri is received
+						 * from hba.
+						 */
+						spin_lock_irqsave(
+							&phba->hbalock, iflag);
+						cmdiocbp->iocb_flag |=
+							LPFC_EXCHANGE_BUSY;
+						spin_unlock_irqrestore(
+							&phba->hbalock, iflag);
+					}
+					if (cmdiocbp->iocb_flag &
+					    LPFC_DRIVER_ABORTED) {
+						/*
+						 * Clear LPFC_DRIVER_ABORTED
+						 * bit in case it was driver
+						 * initiated abort.
+						 */
+						spin_lock_irqsave(
+							&phba->hbalock, iflag);
+						cmdiocbp->iocb_flag &=
+							~LPFC_DRIVER_ABORTED;
+						spin_unlock_irqrestore(
+							&phba->hbalock, iflag);
+						cmdiocbp->iocb.ulpStatus =
+							IOSTAT_LOCAL_REJECT;
+						cmdiocbp->iocb.un.ulpWord[4] =
+							IOERR_ABORT_REQUESTED;
+						/*
+						 * For SLI4, irsiocb contains
+						 * NO_XRI in sli_xritag, it
+						 * shall not affect releasing
+						 * sgl (xri) process.
+						 */
+						saveq->iocb.ulpStatus =
+							IOSTAT_LOCAL_REJECT;
+						saveq->iocb.un.ulpWord[4] =
+							IOERR_SLI_ABORTED;
+						spin_lock_irqsave(
+							&phba->hbalock, iflag);
+						saveq->iocb_flag |=
+							LPFC_DELAY_MEM_FREE;
+						spin_unlock_irqrestore(
+							&phba->hbalock, iflag);
+					}
 				}
 			}
 			(cmdiocbp->iocb_cmpl) (phba, cmdiocbp, saveq);
@@ -2515,14 +2534,16 @@
 
 			cmdiocbq = lpfc_sli_iocbq_lookup(phba, pring,
 							 &rspiocbq);
-			if ((cmdiocbq) && (cmdiocbq->iocb_cmpl)) {
-					spin_unlock_irqrestore(&phba->hbalock,
-							       iflag);
-					(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
-							      &rspiocbq);
-					spin_lock_irqsave(&phba->hbalock,
-							  iflag);
-				}
+			if (unlikely(!cmdiocbq))
+				break;
+			if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED)
+				cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+			if (cmdiocbq->iocb_cmpl) {
+				spin_unlock_irqrestore(&phba->hbalock, iflag);
+				(cmdiocbq->iocb_cmpl)(phba, cmdiocbq,
+						      &rspiocbq);
+				spin_lock_irqsave(&phba->hbalock, iflag);
+			}
 			break;
 		case LPFC_UNSOL_IOCB:
 			spin_unlock_irqrestore(&phba->hbalock, iflag);
@@ -3091,6 +3112,12 @@
 
 	/* Check to see if any errors occurred during init */
 	if ((status & HS_FFERM) || (i >= 20)) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
+				"2751 Adapter failed to restart, "
+				"status reg x%x, FW Data: A8 x%x AC x%x\n",
+				status,
+				readl(phba->MBslimaddr + 0xa8),
+				readl(phba->MBslimaddr + 0xac));
 		phba->link_state = LPFC_HBA_ERROR;
 		retval = 1;
 	}
@@ -3278,6 +3305,9 @@
 	if (retval != MBX_SUCCESS) {
 		if (retval != MBX_BUSY)
 			mempool_free(pmb, phba->mbox_mem_pool);
+		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+				"2752 KILL_BOARD command failed retval %d\n",
+				retval);
 		spin_lock_irq(&phba->hbalock);
 		phba->link_flag &= ~LS_IGNORE_ERATT;
 		spin_unlock_irq(&phba->hbalock);
@@ -4035,7 +4065,7 @@
 
 lpfc_sli_hba_setup_error:
 	phba->link_state = LPFC_HBA_ERROR;
-	lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
+	lpfc_printf_log(phba, KERN_ERR, LOG_INIT,
 			"0445 Firmware initialization failed\n");
 	return rc;
 }
@@ -4388,7 +4418,13 @@
 	spin_unlock_irq(&phba->hbalock);
 
 	/* Read the port's service parameters. */
-	lpfc_read_sparam(phba, mboxq, vport->vpi);
+	rc = lpfc_read_sparam(phba, mboxq, vport->vpi);
+	if (rc) {
+		phba->link_state = LPFC_HBA_ERROR;
+		rc = -ENOMEM;
+		goto out_free_vpd;
+	}
+
 	mboxq->vport = vport;
 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_POLL);
 	mp = (struct lpfc_dmabuf *) mboxq->context1;
@@ -4483,6 +4519,10 @@
 	/* Post receive buffers to the device */
 	lpfc_sli4_rb_setup(phba);
 
+	/* Reset HBA FCF states after HBA reset */
+	phba->fcf.fcf_flag = 0;
+	phba->fcf.current_rec.flag = 0;
+
 	/* Start the ELS watchdog timer */
 	mod_timer(&vport->els_tmofunc,
 		  jiffies + HZ * (phba->fc_ratov * 2));
@@ -7436,6 +7476,7 @@
 {
 	wait_queue_head_t *pdone_q;
 	unsigned long iflags;
+	struct lpfc_scsi_buf *lpfc_cmd;
 
 	spin_lock_irqsave(&phba->hbalock, iflags);
 	cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
@@ -7443,6 +7484,14 @@
 		memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
 		       &rspiocbq->iocb, sizeof(IOCB_t));
 
+	/* Set the exchange busy flag for task management commands */
+	if ((cmdiocbq->iocb_flag & LPFC_IO_FCP) &&
+		!(cmdiocbq->iocb_flag & LPFC_IO_LIBDFC)) {
+		lpfc_cmd = container_of(cmdiocbq, struct lpfc_scsi_buf,
+			cur_iocbq);
+		lpfc_cmd->exch_busy = rspiocbq->iocb_flag & LPFC_EXCHANGE_BUSY;
+	}
+
 	pdone_q = cmdiocbq->context_un.wait_queue;
 	if (pdone_q)
 		wake_up(pdone_q);
@@ -9061,6 +9110,12 @@
 	/* Fake the irspiocb and copy necessary response information */
 	lpfc_sli4_iocb_param_transfer(phba, &irspiocbq, cmdiocbq, wcqe);
 
+	if (cmdiocbq->iocb_flag & LPFC_DRIVER_ABORTED) {
+		spin_lock_irqsave(&phba->hbalock, iflags);
+		cmdiocbq->iocb_flag &= ~LPFC_DRIVER_ABORTED;
+		spin_unlock_irqrestore(&phba->hbalock, iflags);
+	}
+
 	/* Pass the cmd_iocb and the rsp state to the upper layer */
 	(cmdiocbq->iocb_cmpl)(phba, cmdiocbq, &irspiocbq);
 }
@@ -11941,15 +11996,19 @@
 }
 
 /**
- * lpfc_sli4_read_fcf_record - Read the driver's default FCF Record.
+ * lpfc_sli4_fcf_scan_read_fcf_rec - Read hba fcf record for fcf scan.
  * @phba: pointer to lpfc hba data structure.
  * @fcf_index: FCF table entry offset.
  *
- * This routine is invoked to read up to @fcf_num of FCF record from the
- * device starting with the given @fcf_index.
+ * This routine is invoked to scan the entire FCF table by reading FCF
+ * record and processing it one at a time starting from the @fcf_index
+ * for initial FCF discovery or fast FCF failover rediscovery.
+ *
+ * Return 0 if the mailbox command is submitted sucessfully, none 0
+ * otherwise.
  **/
 int
-lpfc_sli4_read_fcf_record(struct lpfc_hba *phba, uint16_t fcf_index)
+lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
 {
 	int rc = 0, error;
 	LPFC_MBOXQ_t *mboxq;
@@ -11961,17 +12020,17 @@
 				"2000 Failed to allocate mbox for "
 				"READ_FCF cmd\n");
 		error = -ENOMEM;
-		goto fail_fcfscan;
+		goto fail_fcf_scan;
 	}
 	/* Construct the read FCF record mailbox command */
-	rc = lpfc_sli4_mbx_read_fcf_record(phba, mboxq, fcf_index);
+	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
 	if (rc) {
 		error = -EINVAL;
-		goto fail_fcfscan;
+		goto fail_fcf_scan;
 	}
 	/* Issue the mailbox command asynchronously */
 	mboxq->vport = phba->pport;
-	mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_record;
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_scan_read_fcf_rec;
 	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
 	if (rc == MBX_NOT_FINISHED)
 		error = -EIO;
@@ -11979,9 +12038,13 @@
 		spin_lock_irq(&phba->hbalock);
 		phba->hba_flag |= FCF_DISC_INPROGRESS;
 		spin_unlock_irq(&phba->hbalock);
+		/* Reset FCF round robin index bmask for new scan */
+		if (fcf_index == LPFC_FCOE_FCF_GET_FIRST)
+			memset(phba->fcf.fcf_rr_bmask, 0,
+			       sizeof(*phba->fcf.fcf_rr_bmask));
 		error = 0;
 	}
-fail_fcfscan:
+fail_fcf_scan:
 	if (error) {
 		if (mboxq)
 			lpfc_sli4_mbox_cmd_free(phba, mboxq);
@@ -11994,6 +12057,181 @@
 }
 
 /**
+ * lpfc_sli4_fcf_rr_read_fcf_rec - Read hba fcf record for round robin fcf.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read an FCF record indicated by @fcf_index
+ * and to use it for FLOGI round robin FCF failover.
+ *
+ * Return 0 if the mailbox command is submitted sucessfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	int rc = 0, error;
+	LPFC_MBOXQ_t *mboxq;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
+				"2763 Failed to allocate mbox for "
+				"READ_FCF cmd\n");
+		error = -ENOMEM;
+		goto fail_fcf_read;
+	}
+	/* Construct the read FCF record mailbox command */
+	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+	if (rc) {
+		error = -EINVAL;
+		goto fail_fcf_read;
+	}
+	/* Issue the mailbox command asynchronously */
+	mboxq->vport = phba->pport;
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_fcf_rr_read_fcf_rec;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED)
+		error = -EIO;
+	else
+		error = 0;
+
+fail_fcf_read:
+	if (error && mboxq)
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+	return error;
+}
+
+/**
+ * lpfc_sli4_read_fcf_rec - Read hba fcf record for update eligible fcf bmask.
+ * @phba: pointer to lpfc hba data structure.
+ * @fcf_index: FCF table entry offset.
+ *
+ * This routine is invoked to read an FCF record indicated by @fcf_index to
+ * determine whether it's eligible for FLOGI round robin failover list.
+ *
+ * Return 0 if the mailbox command is submitted sucessfully, none 0
+ * otherwise.
+ **/
+int
+lpfc_sli4_read_fcf_rec(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	int rc = 0, error;
+	LPFC_MBOXQ_t *mboxq;
+
+	mboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
+	if (!mboxq) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP | LOG_INIT,
+				"2758 Failed to allocate mbox for "
+				"READ_FCF cmd\n");
+				error = -ENOMEM;
+				goto fail_fcf_read;
+	}
+	/* Construct the read FCF record mailbox command */
+	rc = lpfc_sli4_mbx_read_fcf_rec(phba, mboxq, fcf_index);
+	if (rc) {
+		error = -EINVAL;
+		goto fail_fcf_read;
+	}
+	/* Issue the mailbox command asynchronously */
+	mboxq->vport = phba->pport;
+	mboxq->mbox_cmpl = lpfc_mbx_cmpl_read_fcf_rec;
+	rc = lpfc_sli_issue_mbox(phba, mboxq, MBX_NOWAIT);
+	if (rc == MBX_NOT_FINISHED)
+		error = -EIO;
+	else
+		error = 0;
+
+fail_fcf_read:
+	if (error && mboxq)
+		lpfc_sli4_mbox_cmd_free(phba, mboxq);
+	return error;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_next_index_get - Get next eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine is to get the next eligible FCF record index in a round
+ * robin fashion. If the next eligible FCF record index equals to the
+ * initial round robin FCF record index, LPFC_FCOE_FCF_NEXT_NONE (0xFFFF)
+ * shall be returned, otherwise, the next eligible FCF record's index
+ * shall be returned.
+ **/
+uint16_t
+lpfc_sli4_fcf_rr_next_index_get(struct lpfc_hba *phba)
+{
+	uint16_t next_fcf_index;
+
+	/* Search from the currently registered FCF index */
+	next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
+				       LPFC_SLI4_FCF_TBL_INDX_MAX,
+				       phba->fcf.current_rec.fcf_indx);
+	/* Wrap around condition on phba->fcf.fcf_rr_bmask */
+	if (next_fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX)
+		next_fcf_index = find_next_bit(phba->fcf.fcf_rr_bmask,
+					       LPFC_SLI4_FCF_TBL_INDX_MAX, 0);
+	/* Round robin failover stop condition */
+	if (next_fcf_index == phba->fcf.fcf_rr_init_indx)
+		return LPFC_FCOE_FCF_NEXT_NONE;
+
+	return next_fcf_index;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_index_set - Set bmask with eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine sets the FCF record index in to the eligible bmask for
+ * round robin failover search. It checks to make sure that the index
+ * does not go beyond the range of the driver allocated bmask dimension
+ * before setting the bit.
+ *
+ * Returns 0 if the index bit successfully set, otherwise, it returns
+ * -EINVAL.
+ **/
+int
+lpfc_sli4_fcf_rr_index_set(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+				"2610 HBA FCF index reached driver's "
+				"book keeping dimension: fcf_index:%d, "
+				"driver_bmask_max:%d\n",
+				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
+		return -EINVAL;
+	}
+	/* Set the eligible FCF record index bmask */
+	set_bit(fcf_index, phba->fcf.fcf_rr_bmask);
+
+	return 0;
+}
+
+/**
+ * lpfc_sli4_fcf_rr_index_set - Clear bmask from eligible fcf record index
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This routine clears the FCF record index from the eligible bmask for
+ * round robin failover search. It checks to make sure that the index
+ * does not go beyond the range of the driver allocated bmask dimension
+ * before clearing the bit.
+ **/
+void
+lpfc_sli4_fcf_rr_index_clear(struct lpfc_hba *phba, uint16_t fcf_index)
+{
+	if (fcf_index >= LPFC_SLI4_FCF_TBL_INDX_MAX) {
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
+				"2762 HBA FCF index goes beyond driver's "
+				"book keeping dimension: fcf_index:%d, "
+				"driver_bmask_max:%d\n",
+				fcf_index, LPFC_SLI4_FCF_TBL_INDX_MAX);
+		return;
+	}
+	/* Clear the eligible FCF record index bmask */
+	clear_bit(fcf_index, phba->fcf.fcf_rr_bmask);
+}
+
+/**
  * lpfc_mbx_cmpl_redisc_fcf_table - completion routine for rediscover FCF table
  * @phba: pointer to lpfc hba data structure.
  *
@@ -12014,21 +12252,40 @@
 	shdr_add_status = bf_get(lpfc_mbox_hdr_add_status,
 			     &redisc_fcf->header.cfg_shdr.response);
 	if (shdr_status || shdr_add_status) {
-		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
+		lpfc_printf_log(phba, KERN_ERR, LOG_FIP,
 				"2746 Requesting for FCF rediscovery failed "
 				"status x%x add_status x%x\n",
 				shdr_status, shdr_add_status);
-		/*
-		 * Request failed, last resort to re-try current
-		 * registered FCF entry
-		 */
-		lpfc_retry_pport_discovery(phba);
-	} else
+		if (phba->fcf.fcf_flag & FCF_ACVL_DISC) {
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_ACVL_DISC;
+			spin_unlock_irq(&phba->hbalock);
+			/*
+			 * CVL event triggered FCF rediscover request failed,
+			 * last resort to re-try current registered FCF entry.
+			 */
+			lpfc_retry_pport_discovery(phba);
+		} else {
+			spin_lock_irq(&phba->hbalock);
+			phba->fcf.fcf_flag &= ~FCF_DEAD_DISC;
+			spin_unlock_irq(&phba->hbalock);
+			/*
+			 * DEAD FCF event triggered FCF rediscover request
+			 * failed, last resort to fail over as a link down
+			 * to FCF registration.
+			 */
+			lpfc_sli4_fcf_dead_failthrough(phba);
+		}
+	} else {
+		lpfc_printf_log(phba, KERN_INFO, LOG_FIP,
+				"2775 Start FCF rediscovery quiescent period "
+				"wait timer before scaning FCF table\n");
 		/*
 		 * Start FCF rediscovery wait timer for pending FCF
 		 * before rescan FCF record table.
 		 */
 		lpfc_fcf_redisc_wait_start_timer(phba);
+	}
 
 	mempool_free(mbox, phba->mbox_mem_pool);
 }
@@ -12047,6 +12304,9 @@
 	struct lpfc_mbx_redisc_fcf_tbl *redisc_fcf;
 	int rc, length;
 
+	/* Cancel retry delay timers to all vports before FCF rediscover */
+	lpfc_cancel_all_vport_retry_delay_timer(phba);
+
 	mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
 	if (!mbox) {
 		lpfc_printf_log(phba, KERN_ERR, LOG_SLI,
@@ -12078,6 +12338,31 @@
 }
 
 /**
+ * lpfc_sli4_fcf_dead_failthrough - Failthrough routine to fcf dead event
+ * @phba: pointer to lpfc hba data structure.
+ *
+ * This function is the failover routine as a last resort to the FCF DEAD
+ * event when driver failed to perform fast FCF failover.
+ **/
+void
+lpfc_sli4_fcf_dead_failthrough(struct lpfc_hba *phba)
+{
+	uint32_t link_state;
+
+	/*
+	 * Last resort as FCF DEAD event failover will treat this as
+	 * a link down, but save the link state because we don't want
+	 * it to be changed to Link Down unless it is already down.
+	 */
+	link_state = phba->link_state;
+	lpfc_linkdown(phba);
+	phba->link_state = link_state;
+
+	/* Unregister FCF if no devices connected to it */
+	lpfc_unregister_unused_fcf(phba);
+}
+
+/**
  * lpfc_sli_read_link_ste - Read region 23 to decide if link is disabled.
  * @phba: pointer to lpfc hba data structure.
  *
diff --git a/drivers/scsi/lpfc/lpfc_sli.h b/drivers/scsi/lpfc/lpfc_sli.h
index dfcf543..b4a639c 100644
--- a/drivers/scsi/lpfc/lpfc_sli.h
+++ b/drivers/scsi/lpfc/lpfc_sli.h
@@ -62,6 +62,7 @@
 #define LPFC_DELAY_MEM_FREE	0x20    /* Defer free'ing of FC data */
 #define LPFC_EXCHANGE_BUSY	0x40    /* SLI4 hba reported XB in response */
 #define LPFC_USE_FCPWQIDX	0x80    /* Submit to specified FCPWQ index */
+#define DSS_SECURITY_OP		0x100	/* security IO */
 
 #define LPFC_FIP_ELS_ID_MASK	0xc000	/* ELS_ID range 0-3, non-shifted mask */
 #define LPFC_FIP_ELS_ID_SHIFT	14
diff --git a/drivers/scsi/lpfc/lpfc_sli4.h b/drivers/scsi/lpfc/lpfc_sli4.h
index 86308836..4a35e7b 100644
--- a/drivers/scsi/lpfc/lpfc_sli4.h
+++ b/drivers/scsi/lpfc/lpfc_sli4.h
@@ -153,15 +153,27 @@
 #define FCF_REGISTERED	0x02 /* FCF registered with FW */
 #define FCF_SCAN_DONE	0x04 /* FCF table scan done */
 #define FCF_IN_USE	0x08 /* Atleast one discovery completed */
-#define FCF_REDISC_PEND	0x10 /* FCF rediscovery pending */
-#define FCF_REDISC_EVT	0x20 /* FCF rediscovery event to worker thread */
-#define FCF_REDISC_FOV	0x40 /* Post FCF rediscovery fast failover */
+#define FCF_INIT_DISC	0x10 /* Initial FCF discovery */
+#define FCF_DEAD_DISC	0x20 /* FCF DEAD fast FCF failover discovery */
+#define FCF_ACVL_DISC	0x40 /* All CVL fast FCF failover discovery */
+#define FCF_DISCOVERY	(FCF_INIT_DISC | FCF_DEAD_DISC | FCF_ACVL_DISC)
+#define FCF_REDISC_PEND	0x80 /* FCF rediscovery pending */
+#define FCF_REDISC_EVT	0x100 /* FCF rediscovery event to worker thread */
+#define FCF_REDISC_FOV	0x200 /* Post FCF rediscovery fast failover */
 	uint32_t addr_mode;
+	uint16_t fcf_rr_init_indx;
 	struct lpfc_fcf_rec current_rec;
 	struct lpfc_fcf_rec failover_rec;
 	struct timer_list redisc_wait;
+	unsigned long *fcf_rr_bmask; /* Eligible FCF indexes for RR failover */
 };
 
+/*
+ * Maximum FCF table index, it is for driver internal book keeping, it
+ * just needs to be no less than the supported HBA's FCF table size.
+ */
+#define LPFC_SLI4_FCF_TBL_INDX_MAX	32
+
 #define LPFC_REGION23_SIGNATURE "RG23"
 #define LPFC_REGION23_VERSION	1
 #define LPFC_REGION23_LAST_REC  0xff
@@ -431,11 +443,18 @@
 	SCSI_BUFF_TYPE
 };
 
+enum lpfc_sgl_state {
+	SGL_FREED,
+	SGL_ALLOCATED,
+	SGL_XRI_ABORTED
+};
+
 struct lpfc_sglq {
 	/* lpfc_sglqs are used in double linked lists */
 	struct list_head list;
 	struct list_head clist;
 	enum lpfc_sge_type buff_type; /* is this a scsi sgl */
+	enum lpfc_sgl_state state;
 	uint16_t iotag;         /* pre-assigned IO tag */
 	uint16_t sli4_xritag;   /* pre-assigned XRI, (OXID) tag. */
 	struct sli4_sge *sgl;	/* pre-assigned SGL */
@@ -463,8 +482,8 @@
 void lpfc_sli4_mbx_sge_set(struct lpfcMboxq *, uint32_t, dma_addr_t, uint32_t);
 void lpfc_sli4_mbx_sge_get(struct lpfcMboxq *, uint32_t,
 			   struct lpfc_mbx_sge *);
-int lpfc_sli4_mbx_read_fcf_record(struct lpfc_hba *, struct lpfcMboxq *,
-				  uint16_t);
+int lpfc_sli4_mbx_read_fcf_rec(struct lpfc_hba *, struct lpfcMboxq *,
+			       uint16_t);
 
 void lpfc_sli4_hba_reset(struct lpfc_hba *);
 struct lpfc_queue *lpfc_sli4_queue_alloc(struct lpfc_hba *, uint32_t,
@@ -523,8 +542,13 @@
 uint32_t lpfc_sli4_cq_release(struct lpfc_queue *, bool);
 uint32_t lpfc_sli4_eq_release(struct lpfc_queue *, bool);
 void lpfc_sli4_fcfi_unreg(struct lpfc_hba *, uint16_t);
-int lpfc_sli4_read_fcf_record(struct lpfc_hba *, uint16_t);
-void lpfc_mbx_cmpl_read_fcf_record(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_fcf_scan_read_fcf_rec(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_fcf_rr_read_fcf_rec(struct lpfc_hba *, uint16_t);
+int lpfc_sli4_read_fcf_rec(struct lpfc_hba *, uint16_t);
+void lpfc_mbx_cmpl_fcf_scan_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_fcf_rr_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
+void lpfc_mbx_cmpl_read_fcf_rec(struct lpfc_hba *, LPFC_MBOXQ_t *);
+int lpfc_sli4_unregister_fcf(struct lpfc_hba *);
 int lpfc_sli4_post_status_check(struct lpfc_hba *);
 uint8_t lpfc_sli4_mbox_opcode_get(struct lpfc_hba *, struct lpfcMboxq *);
 
diff --git a/drivers/scsi/lpfc/lpfc_version.h b/drivers/scsi/lpfc/lpfc_version.h
index ac276aa..013deec 100644
--- a/drivers/scsi/lpfc/lpfc_version.h
+++ b/drivers/scsi/lpfc/lpfc_version.h
@@ -18,7 +18,7 @@
  * included with this package.                                     *
  *******************************************************************/
 
-#define LPFC_DRIVER_VERSION "8.3.9"
+#define LPFC_DRIVER_VERSION "8.3.10"
 #define LPFC_DRIVER_NAME		"lpfc"
 #define LPFC_SP_DRIVER_HANDLER_NAME	"lpfc:sp"
 #define LPFC_FP_DRIVER_HANDLER_NAME	"lpfc:fp"
diff --git a/drivers/scsi/lpfc/lpfc_vport.c b/drivers/scsi/lpfc/lpfc_vport.c
index dc86e87..869f76c 100644
--- a/drivers/scsi/lpfc/lpfc_vport.c
+++ b/drivers/scsi/lpfc/lpfc_vport.c
@@ -123,7 +123,12 @@
 	}
 	mb = &pmb->u.mb;
 
-	lpfc_read_sparam(phba, pmb, vport->vpi);
+	rc = lpfc_read_sparam(phba, pmb, vport->vpi);
+	if (rc) {
+		mempool_free(pmb, phba->mbox_mem_pool);
+		return -ENOMEM;
+	}
+
 	/*
 	 * Grab buffer pointer and clear context1 so we can use
 	 * lpfc_sli_issue_box_wait
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index 2422347..60de850 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -1433,6 +1433,10 @@
 	cdbh->command_specific_options |= or->attributes_mode;
 	if (or->attributes_mode == OSD_CDB_GET_ATTR_PAGE_SET_ONE) {
 		ret = _osd_req_finalize_attr_page(or);
+		if (ret) {
+			OSD_DEBUG("_osd_req_finalize_attr_page failed\n");
+			return ret;
+		}
 	} else {
 		/* TODO: I think that for the GET_ATTR command these 2 should
 		 * be reversed to keep them in execution order (for embeded
diff --git a/drivers/scsi/pcmcia/nsp_cs.c b/drivers/scsi/pcmcia/nsp_cs.c
index c2341af..0212464 100644
--- a/drivers/scsi/pcmcia/nsp_cs.c
+++ b/drivers/scsi/pcmcia/nsp_cs.c
@@ -1717,6 +1717,7 @@
 	cfg_mem->data = data;
 
 	ret = pcmcia_loop_config(link, nsp_cs_config_check, cfg_mem);
+	if (ret)
 		goto cs_failed;
 
 	if (link->conf.Attributes & CONF_ENABLE_IRQ) {
diff --git a/drivers/scsi/raid_class.c b/drivers/scsi/raid_class.c
index bd88349..2c146b44 100644
--- a/drivers/scsi/raid_class.c
+++ b/drivers/scsi/raid_class.c
@@ -63,6 +63,7 @@
 	 * emulated RAID devices, so start with SCSI */
 	struct raid_internal *i = ac_to_raid_internal(cont);
 
+#if defined(CONFIG_SCSI) || defined(CONFIG_SCSI_MODULE)
 	if (scsi_is_sdev_device(dev)) {
 		struct scsi_device *sdev = to_scsi_device(dev);
 
@@ -71,6 +72,7 @@
 
 		return i->f->is_raid(dev);
 	}
+#endif
 	/* FIXME: look at other subsystems too */
 	return 0;
 }
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 79660ee..1d5b721 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -1232,6 +1232,15 @@
 {
 	struct fc_vport *vport = transport_class_to_vport(dev);
 	struct Scsi_Host *shost = vport_to_shost(vport);
+	unsigned long flags;
+
+	spin_lock_irqsave(shost->host_lock, flags);
+	if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING)) {
+		spin_unlock_irqrestore(shost->host_lock, flags);
+		return -EBUSY;
+	}
+	vport->flags |= FC_VPORT_DELETING;
+	spin_unlock_irqrestore(shost->host_lock, flags);
 
 	fc_queue_work(shost, &vport->vport_delete_work);
 	return count;
@@ -1821,6 +1830,9 @@
 	list_for_each_entry(vport, &fc_host->vports, peers) {
 		if ((vport->channel == 0) &&
 		    (vport->port_name == wwpn) && (vport->node_name == wwnn)) {
+			if (vport->flags & (FC_VPORT_DEL | FC_VPORT_CREATING))
+				break;
+			vport->flags |= FC_VPORT_DELETING;
 			match = 1;
 			break;
 		}
@@ -3370,18 +3382,6 @@
 	unsigned long flags;
 	int stat;
 
-	spin_lock_irqsave(shost->host_lock, flags);
-	if (vport->flags & FC_VPORT_CREATING) {
-		spin_unlock_irqrestore(shost->host_lock, flags);
-		return -EBUSY;
-	}
-	if (vport->flags & (FC_VPORT_DEL)) {
-		spin_unlock_irqrestore(shost->host_lock, flags);
-		return -EALREADY;
-	}
-	vport->flags |= FC_VPORT_DELETING;
-	spin_unlock_irqrestore(shost->host_lock, flags);
-
 	if (i->f->vport_delete)
 		stat = i->f->vport_delete(vport);
 	else
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 83881df..7b75c8a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1948,7 +1948,7 @@
 {
 	struct request_queue *q = sdkp->disk->queue;
 	unsigned int sector_sz = sdkp->device->sector_size;
-	const int vpd_len = 32;
+	const int vpd_len = 64;
 	unsigned char *buffer = kmalloc(vpd_len, GFP_KERNEL);
 
 	if (!buffer ||
@@ -1998,7 +1998,7 @@
 {
 	unsigned char *buffer;
 	u16 rot;
-	const int vpd_len = 32;
+	const int vpd_len = 64;
 
 	buffer = kmalloc(vpd_len, GFP_KERNEL);
 
diff --git a/drivers/serial/cpm_uart/cpm_uart_cpm2.c b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
index a9802e7..722eac1 100644
--- a/drivers/serial/cpm_uart/cpm_uart_cpm2.c
+++ b/drivers/serial/cpm_uart/cpm_uart_cpm2.c
@@ -61,7 +61,7 @@
 	void __iomem *pram;
 	unsigned long offset;
 	struct resource res;
-	unsigned long len;
+	resource_size_t len;
 
 	/* Don't remap parameter RAM if it has already been initialized
 	 * during console setup.
@@ -74,7 +74,7 @@
 	if (of_address_to_resource(np, 1, &res))
 		return NULL;
 
-	len = 1 + res.end - res.start;
+	len = resource_size(&res);
 	pram = ioremap(res.start, len);
 	if (!pram)
 		return NULL;
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index e91db4b..175d202 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -745,6 +745,7 @@
 	PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29),
 	PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719),
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4),
+	PCMCIA_PFC_DEVICE_PROD_ID12(1, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e),
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff),
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c),
 	PCMCIA_PFC_DEVICE_PROD_ID12(1, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae),
diff --git a/drivers/serial/sh-sci.c b/drivers/serial/sh-sci.c
index 980f394..f7b9aff 100644
--- a/drivers/serial/sh-sci.c
+++ b/drivers/serial/sh-sci.c
@@ -50,7 +50,6 @@
 #include <linux/list.h>
 #include <linux/dmaengine.h>
 #include <linux/scatterlist.h>
-#include <linux/timer.h>
 
 #ifdef CONFIG_SUPERH
 #include <asm/sh_bios.h>
diff --git a/drivers/serial/sunsab.c b/drivers/serial/sunsab.c
index d514e28..d2e0321 100644
--- a/drivers/serial/sunsab.c
+++ b/drivers/serial/sunsab.c
@@ -474,7 +474,7 @@
 {
 	struct uart_sunsab_port *up = (struct uart_sunsab_port *) port;
 
-	up->interrupt_mask0 |= SAB82532_ISR0_TCD;
+	up->interrupt_mask0 |= SAB82532_IMR0_TCD;
 	writeb(up->interrupt_mask1, &up->regs->w.imr0);
 }
 
diff --git a/drivers/serial/uartlite.c b/drivers/serial/uartlite.c
index ab2ab3c..f0a6c61 100644
--- a/drivers/serial/uartlite.c
+++ b/drivers/serial/uartlite.c
@@ -19,7 +19,7 @@
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <asm/io.h>
-#if defined(CONFIG_OF)
+#if defined(CONFIG_OF) && (defined(CONFIG_PPC32) || defined(CONFIG_MICROBLAZE))
 #include <linux/of.h>
 #include <linux/of_device.h>
 #include <linux/of_platform.h>
@@ -581,7 +581,7 @@
 /* ---------------------------------------------------------------------
  * OF bus bindings
  */
-#if defined(CONFIG_OF)
+#if defined(CONFIG_OF) && (defined(CONFIG_PPC32) || defined(CONFIG_MICROBLAZE))
 static int __devinit
 ulite_of_probe(struct of_device *op, const struct of_device_id *match)
 {
@@ -631,11 +631,11 @@
 {
 	of_unregister_platform_driver(&ulite_of_driver);
 }
-#else /* CONFIG_OF */
-/* CONFIG_OF not enabled; do nothing helpers */
+#else /* CONFIG_OF && (CONFIG_PPC32 || CONFIG_MICROBLAZE) */
+/* Appropriate config not enabled; do nothing helpers */
 static inline int __init ulite_of_register(void) { return 0; }
 static inline void __exit ulite_of_unregister(void) { }
-#endif /* CONFIG_OF */
+#endif /* CONFIG_OF && (CONFIG_PPC32 || CONFIG_MICROBLAZE) */
 
 /* ---------------------------------------------------------------------
  * Module setup/teardown
diff --git a/drivers/sh/intc.c b/drivers/sh/intc.c
index c275039..a3d8677 100644
--- a/drivers/sh/intc.c
+++ b/drivers/sh/intc.c
@@ -2,7 +2,7 @@
  * Shared interrupt handling code for IPR and INTC2 types of IRQs.
  *
  * Copyright (C) 2007, 2008 Magnus Damm
- * Copyright (C) 2009 Paul Mundt
+ * Copyright (C) 2009, 2010 Paul Mundt
  *
  * Based on intc2.c and ipr.c
  *
@@ -26,6 +26,7 @@
 #include <linux/list.h>
 #include <linux/topology.h>
 #include <linux/bitmap.h>
+#include <linux/cpumask.h>
 
 #define _INTC_MK(fn, mode, addr_e, addr_d, width, shift) \
 	((shift) | ((width) << 5) | ((fn) << 9) | ((mode) << 13) | \
@@ -234,6 +235,10 @@
 	unsigned int cpu;
 
 	for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_E(handle)); cpu++) {
+#ifdef CONFIG_SMP
+		if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
+			continue;
+#endif
 		addr = INTC_REG(d, _INTC_ADDR_E(handle), cpu);
 		intc_enable_fns[_INTC_MODE(handle)](addr, handle, intc_reg_fns\
 						    [_INTC_FN(handle)], irq);
@@ -253,6 +258,10 @@
 	unsigned int cpu;
 
 	for (cpu = 0; cpu < SMP_NR(d, _INTC_ADDR_D(handle)); cpu++) {
+#ifdef CONFIG_SMP
+		if (!cpumask_test_cpu(cpu, irq_to_desc(irq)->affinity))
+			continue;
+#endif
 		addr = INTC_REG(d, _INTC_ADDR_D(handle), cpu);
 		intc_disable_fns[_INTC_MODE(handle)](addr, handle,intc_reg_fns\
 						     [_INTC_FN(handle)], irq);
@@ -301,6 +310,23 @@
 	return 0; /* allow wakeup, but setup hardware in intc_suspend() */
 }
 
+#ifdef CONFIG_SMP
+/*
+ * This is held with the irq desc lock held, so we don't require any
+ * additional locking here at the intc desc level. The affinity mask is
+ * later tested in the enable/disable paths.
+ */
+static int intc_set_affinity(unsigned int irq, const struct cpumask *cpumask)
+{
+	if (!cpumask_intersects(cpumask, cpu_online_mask))
+		return -1;
+
+	cpumask_copy(irq_to_desc(irq)->affinity, cpumask);
+
+	return 0;
+}
+#endif
+
 static void intc_mask_ack(unsigned int irq)
 {
 	struct intc_desc_int *d = get_intc_desc(irq);
@@ -847,6 +873,9 @@
 	d->chip.shutdown = intc_disable;
 	d->chip.set_type = intc_set_sense;
 	d->chip.set_wake = intc_set_wake;
+#ifdef CONFIG_SMP
+	d->chip.set_affinity = intc_set_affinity;
+#endif
 
 	if (hw->ack_regs) {
 		for (i = 0; i < hw->nr_ack_regs; i++)
diff --git a/drivers/spi/omap2_mcspi.c b/drivers/spi/omap2_mcspi.c
index 715c518..4dd786b 100644
--- a/drivers/spi/omap2_mcspi.c
+++ b/drivers/spi/omap2_mcspi.c
@@ -578,6 +578,7 @@
 	struct spi_master *spi_cntrl;
 	u32 l = 0, div = 0;
 	u8 word_len = spi->bits_per_word;
+	u32 speed_hz = spi->max_speed_hz;
 
 	mcspi = spi_master_get_devdata(spi->master);
 	spi_cntrl = mcspi->master;
@@ -587,9 +588,12 @@
 
 	cs->word_len = word_len;
 
-	if (spi->max_speed_hz) {
+	if (t && t->speed_hz)
+		speed_hz = t->speed_hz;
+
+	if (speed_hz) {
 		while (div <= 15 && (OMAP2_MCSPI_MAX_FREQ / (1 << div))
-					> spi->max_speed_hz)
+					> speed_hz)
 			div++;
 	} else
 		div = 15;
@@ -751,11 +755,13 @@
 	mcspi = spi_master_get_devdata(spi->master);
 	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
 
-	/* Unlink controller state from context save list */
-	cs = spi->controller_state;
-	list_del(&cs->node);
+	if (spi->controller_state) {
+		/* Unlink controller state from context save list */
+		cs = spi->controller_state;
+		list_del(&cs->node);
 
-	kfree(spi->controller_state);
+		kfree(spi->controller_state);
+	}
 
 	if (mcspi_dma->dma_rx_channel != -1) {
 		omap_free_dma(mcspi_dma->dma_rx_channel);
diff --git a/drivers/staging/samsung-laptop/samsung-laptop.c b/drivers/staging/samsung-laptop/samsung-laptop.c
index dd7ea4c0..eb44b60 100644
--- a/drivers/staging/samsung-laptop/samsung-laptop.c
+++ b/drivers/staging/samsung-laptop/samsung-laptop.c
@@ -394,6 +394,7 @@
 
 static int __init samsung_init(void)
 {
+	struct backlight_properties props;
 	struct sabi_retval sretval;
 	const char *testStr = "SECLINUX";
 	void __iomem *memcheck;
@@ -486,12 +487,14 @@
 		goto error_no_platform;
 
 	/* create a backlight device to talk to this one */
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = MAX_BRIGHT;
 	backlight_device = backlight_device_register("samsung", &sdev->dev,
-						     NULL, &backlight_ops);
+						     NULL, &backlight_ops,
+						     &props);
 	if (IS_ERR(backlight_device))
 		goto error_no_backlight;
 
-	backlight_device->props.max_brightness = MAX_BRIGHT;
 	backlight_device->props.brightness = read_brightness();
 	backlight_device->props.power = FB_BLANK_UNBLANK;
 	backlight_update_status(backlight_device);
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 975d556..be6331e 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1441,7 +1441,7 @@
 			wb = acm->delayed_wb;
 			acm->delayed_wb = NULL;
 			spin_unlock_irq(&acm->write_lock);
-			acm_start_wb(acm, acm->delayed_wb);
+			acm_start_wb(acm, wb);
 		} else {
 			spin_unlock_irq(&acm->write_lock);
 		}
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 18aafcb..189141ca 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -52,7 +52,8 @@
 #define WDM_READ		4
 #define WDM_INT_STALL		5
 #define WDM_POLL_RUNNING	6
-
+#define WDM_RESPONDING		7
+#define WDM_SUSPENDING		8
 
 #define WDM_MAX			16
 
@@ -87,9 +88,7 @@
 	int			count;
 	dma_addr_t		shandle;
 	dma_addr_t		ihandle;
-	struct mutex		wlock;
-	struct mutex		rlock;
-	struct mutex		plock;
+	struct mutex		lock;
 	wait_queue_head_t	wait;
 	struct work_struct	rxwork;
 	int			werr;
@@ -117,21 +116,22 @@
 	int status = urb->status;
 
 	spin_lock(&desc->iuspin);
+	clear_bit(WDM_RESPONDING, &desc->flags);
 
 	if (status) {
 		switch (status) {
 		case -ENOENT:
 			dev_dbg(&desc->intf->dev,
 				"nonzero urb status received: -ENOENT");
-			break;
+			goto skip_error;
 		case -ECONNRESET:
 			dev_dbg(&desc->intf->dev,
 				"nonzero urb status received: -ECONNRESET");
-			break;
+			goto skip_error;
 		case -ESHUTDOWN:
 			dev_dbg(&desc->intf->dev,
 				"nonzero urb status received: -ESHUTDOWN");
-			break;
+			goto skip_error;
 		case -EPIPE:
 			dev_err(&desc->intf->dev,
 				"nonzero urb status received: -EPIPE\n");
@@ -147,6 +147,7 @@
 	desc->reslength = urb->actual_length;
 	memmove(desc->ubuf + desc->length, desc->inbuf, desc->reslength);
 	desc->length += desc->reslength;
+skip_error:
 	wake_up(&desc->wait);
 
 	set_bit(WDM_READ, &desc->flags);
@@ -229,13 +230,16 @@
 	desc->response->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
 	spin_lock(&desc->iuspin);
 	clear_bit(WDM_READ, &desc->flags);
-	if (!test_bit(WDM_DISCONNECTING, &desc->flags)) {
+	set_bit(WDM_RESPONDING, &desc->flags);
+	if (!test_bit(WDM_DISCONNECTING, &desc->flags)
+		&& !test_bit(WDM_SUSPENDING, &desc->flags)) {
 		rv = usb_submit_urb(desc->response, GFP_ATOMIC);
 		dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d",
 			__func__, rv);
 	}
 	spin_unlock(&desc->iuspin);
 	if (rv < 0) {
+		clear_bit(WDM_RESPONDING, &desc->flags);
 		if (rv == -EPERM)
 			return;
 		if (rv == -ENOMEM) {
@@ -305,14 +309,38 @@
 	if (we < 0)
 		return -EIO;
 
-	r = mutex_lock_interruptible(&desc->wlock); /* concurrent writes */
-	rv = -ERESTARTSYS;
-	if (r)
+	desc->outbuf = buf = kmalloc(count, GFP_KERNEL);
+	if (!buf) {
+		rv = -ENOMEM;
 		goto outnl;
+	}
+
+	r = copy_from_user(buf, buffer, count);
+	if (r > 0) {
+		kfree(buf);
+		rv = -EFAULT;
+		goto outnl;
+	}
+
+	/* concurrent writes and disconnect */
+	r = mutex_lock_interruptible(&desc->lock);
+	rv = -ERESTARTSYS;
+	if (r) {
+		kfree(buf);
+		goto outnl;
+	}
+
+	if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
+		kfree(buf);
+		rv = -ENODEV;
+		goto outnp;
+	}
 
 	r = usb_autopm_get_interface(desc->intf);
-	if (r < 0)
+	if (r < 0) {
+		kfree(buf);
 		goto outnp;
+	}
 
 	if (!file->f_flags && O_NONBLOCK)
 		r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
@@ -320,24 +348,8 @@
 	else
 		if (test_bit(WDM_IN_USE, &desc->flags))
 			r = -EAGAIN;
-	if (r < 0)
-		goto out;
-
-	if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
-		rv = -ENODEV;
-		goto out;
-	}
-
-	desc->outbuf = buf = kmalloc(count, GFP_KERNEL);
-	if (!buf) {
-		rv = -ENOMEM;
-		goto out;
-	}
-
-	r = copy_from_user(buf, buffer, count);
-	if (r > 0) {
+	if (r < 0) {
 		kfree(buf);
-		rv = -EFAULT;
 		goto out;
 	}
 
@@ -374,7 +386,7 @@
 out:
 	usb_autopm_put_interface(desc->intf);
 outnp:
-	mutex_unlock(&desc->wlock);
+	mutex_unlock(&desc->lock);
 outnl:
 	return rv < 0 ? rv : count;
 }
@@ -387,7 +399,7 @@
 	struct wdm_device *desc = file->private_data;
 
 
-	rv = mutex_lock_interruptible(&desc->rlock); /*concurrent reads */
+	rv = mutex_lock_interruptible(&desc->lock); /*concurrent reads */
 	if (rv < 0)
 		return -ERESTARTSYS;
 
@@ -424,11 +436,8 @@
 		spin_lock_irq(&desc->iuspin);
 
 		if (desc->rerr) { /* read completed, error happened */
-			int t = desc->rerr;
 			desc->rerr = 0;
 			spin_unlock_irq(&desc->iuspin);
-			dev_err(&desc->intf->dev,
-				"reading had resulted in %d\n", t);
 			rv = -EIO;
 			goto err;
 		}
@@ -465,9 +474,7 @@
 	rv = cntr;
 
 err:
-	mutex_unlock(&desc->rlock);
-	if (rv < 0 && rv != -EAGAIN)
-		dev_err(&desc->intf->dev, "wdm_read: exit error\n");
+	mutex_unlock(&desc->lock);
 	return rv;
 }
 
@@ -533,7 +540,7 @@
 	}
 	intf->needs_remote_wakeup = 1;
 
-	mutex_lock(&desc->plock);
+	mutex_lock(&desc->lock);
 	if (!desc->count++) {
 		rv = usb_submit_urb(desc->validity, GFP_KERNEL);
 		if (rv < 0) {
@@ -544,7 +551,7 @@
 	} else {
 		rv = 0;
 	}
-	mutex_unlock(&desc->plock);
+	mutex_unlock(&desc->lock);
 	usb_autopm_put_interface(desc->intf);
 out:
 	mutex_unlock(&wdm_mutex);
@@ -556,9 +563,9 @@
 	struct wdm_device *desc = file->private_data;
 
 	mutex_lock(&wdm_mutex);
-	mutex_lock(&desc->plock);
+	mutex_lock(&desc->lock);
 	desc->count--;
-	mutex_unlock(&desc->plock);
+	mutex_unlock(&desc->lock);
 
 	if (!desc->count) {
 		dev_dbg(&desc->intf->dev, "wdm_release: cleanup");
@@ -655,9 +662,7 @@
 	desc = kzalloc(sizeof(struct wdm_device), GFP_KERNEL);
 	if (!desc)
 		goto out;
-	mutex_init(&desc->wlock);
-	mutex_init(&desc->rlock);
-	mutex_init(&desc->plock);
+	mutex_init(&desc->lock);
 	spin_lock_init(&desc->iuspin);
 	init_waitqueue_head(&desc->wait);
 	desc->wMaxCommand = maxcom;
@@ -771,14 +776,17 @@
 	/* to terminate pending flushes */
 	clear_bit(WDM_IN_USE, &desc->flags);
 	spin_unlock_irqrestore(&desc->iuspin, flags);
-	cancel_work_sync(&desc->rxwork);
+	mutex_lock(&desc->lock);
 	kill_urbs(desc);
+	cancel_work_sync(&desc->rxwork);
+	mutex_unlock(&desc->lock);
 	wake_up_all(&desc->wait);
 	if (!desc->count)
 		cleanup(desc);
 	mutex_unlock(&wdm_mutex);
 }
 
+#ifdef CONFIG_PM
 static int wdm_suspend(struct usb_interface *intf, pm_message_t message)
 {
 	struct wdm_device *desc = usb_get_intfdata(intf);
@@ -786,22 +794,30 @@
 
 	dev_dbg(&desc->intf->dev, "wdm%d_suspend\n", intf->minor);
 
-	mutex_lock(&desc->plock);
-#ifdef CONFIG_PM
+	/* if this is an autosuspend the caller does the locking */
+	if (!(message.event & PM_EVENT_AUTO))
+		mutex_lock(&desc->lock);
+	spin_lock_irq(&desc->iuspin);
+
 	if ((message.event & PM_EVENT_AUTO) &&
-			test_bit(WDM_IN_USE, &desc->flags)) {
+			(test_bit(WDM_IN_USE, &desc->flags)
+			|| test_bit(WDM_RESPONDING, &desc->flags))) {
+		spin_unlock_irq(&desc->iuspin);
 		rv = -EBUSY;
 	} else {
-#endif
-		cancel_work_sync(&desc->rxwork);
+
+		set_bit(WDM_SUSPENDING, &desc->flags);
+		spin_unlock_irq(&desc->iuspin);
+		/* callback submits work - order is essential */
 		kill_urbs(desc);
-#ifdef CONFIG_PM
+		cancel_work_sync(&desc->rxwork);
 	}
-#endif
-	mutex_unlock(&desc->plock);
+	if (!(message.event & PM_EVENT_AUTO))
+		mutex_unlock(&desc->lock);
 
 	return rv;
 }
+#endif
 
 static int recover_from_urb_loss(struct wdm_device *desc)
 {
@@ -815,23 +831,27 @@
 	}
 	return rv;
 }
+
+#ifdef CONFIG_PM
 static int wdm_resume(struct usb_interface *intf)
 {
 	struct wdm_device *desc = usb_get_intfdata(intf);
 	int rv;
 
 	dev_dbg(&desc->intf->dev, "wdm%d_resume\n", intf->minor);
-	mutex_lock(&desc->plock);
+
+	clear_bit(WDM_SUSPENDING, &desc->flags);
 	rv = recover_from_urb_loss(desc);
-	mutex_unlock(&desc->plock);
+
 	return rv;
 }
+#endif
 
 static int wdm_pre_reset(struct usb_interface *intf)
 {
 	struct wdm_device *desc = usb_get_intfdata(intf);
 
-	mutex_lock(&desc->plock);
+	mutex_lock(&desc->lock);
 	return 0;
 }
 
@@ -841,7 +861,7 @@
 	int rv;
 
 	rv = recover_from_urb_loss(desc);
-	mutex_unlock(&desc->plock);
+	mutex_unlock(&desc->lock);
 	return 0;
 }
 
@@ -849,9 +869,11 @@
 	.name =		"cdc_wdm",
 	.probe =	wdm_probe,
 	.disconnect =	wdm_disconnect,
+#ifdef CONFIG_PM
 	.suspend =	wdm_suspend,
 	.resume =	wdm_resume,
 	.reset_resume =	wdm_resume,
+#endif
 	.pre_reset =	wdm_pre_reset,
 	.post_reset =	wdm_post_reset,
 	.id_table =	wdm_ids,
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index e909ff7..3466fdc 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1207,6 +1207,13 @@
 			free_async(as);
 			return -ENOMEM;
 		}
+		/* Isochronous input data may end up being discontiguous
+		 * if some of the packets are short.  Clear the buffer so
+		 * that the gaps don't leak kernel data to userspace.
+		 */
+		if (is_in && uurb->type == USBDEVFS_URB_TYPE_ISO)
+			memset(as->urb->transfer_buffer, 0,
+					uurb->buffer_length);
 	}
 	as->urb->dev = ps->dev;
 	as->urb->pipe = (uurb->type << 30) |
@@ -1345,10 +1352,14 @@
 	void __user *addr = as->userurb;
 	unsigned int i;
 
-	if (as->userbuffer && urb->actual_length)
-		if (copy_to_user(as->userbuffer, urb->transfer_buffer,
-				 urb->actual_length))
+	if (as->userbuffer && urb->actual_length) {
+		if (urb->number_of_packets > 0)		/* Isochronous */
+			i = urb->transfer_buffer_length;
+		else					/* Non-Isoc */
+			i = urb->actual_length;
+		if (copy_to_user(as->userbuffer, urb->transfer_buffer, i))
 			goto err_out;
+	}
 	if (put_user(as->status, &userurb->status))
 		goto err_out;
 	if (put_user(urb->actual_length, &userurb->actual_length))
diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c
index 2708056..45a32da 100644
--- a/drivers/usb/core/urb.c
+++ b/drivers/usb/core/urb.c
@@ -453,6 +453,7 @@
 			if (urb->interval > (1 << 15))
 				return -EINVAL;
 			max = 1 << 15;
+			break;
 		case USB_SPEED_WIRELESS:
 			if (urb->interval > 16)
 				return -EINVAL;
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 7460cd7..11a3e0f 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -747,7 +747,7 @@
 	  which may be used with composite framework.
 
 	  Say "y" to link the driver statically, or "m" to build
-	  a dynamically linked module called "g_file_storage".  If unsure,
+	  a dynamically linked module called "g_mass_storage".  If unsure,
 	  consider File-backed Storage Gadget.
 
 config USB_G_SERIAL
diff --git a/drivers/usb/gadget/epautoconf.c b/drivers/usb/gadget/epautoconf.c
index 65a5f94..3568de2 100644
--- a/drivers/usb/gadget/epautoconf.c
+++ b/drivers/usb/gadget/epautoconf.c
@@ -266,7 +266,7 @@
 		}
 
 #ifdef CONFIG_BLACKFIN
-	} else if (gadget_is_musbhsfc(gadget) || gadget_is_musbhdrc(gadget)) {
+	} else if (gadget_is_musbhdrc(gadget)) {
 		if ((USB_ENDPOINT_XFER_BULK == type) ||
 		    (USB_ENDPOINT_XFER_ISOC == type)) {
 			if (USB_DIR_IN & desc->bEndpointAddress)
diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c
index 5a3cdd0..f4911c0 100644
--- a/drivers/usb/gadget/f_mass_storage.c
+++ b/drivers/usb/gadget/f_mass_storage.c
@@ -2910,7 +2910,7 @@
 }
 
 
-static int fsg_bind(struct usb_configuration *c, struct usb_function *f)
+static int __init fsg_bind(struct usb_configuration *c, struct usb_function *f)
 {
 	struct fsg_dev		*fsg = fsg_from_func(f);
 	struct usb_gadget	*gadget = c->cdev->gadget;
@@ -2954,7 +2954,6 @@
 autoconf_fail:
 	ERROR(fsg, "unable to autoconfigure all endpoints\n");
 	rc = -ENOTSUPP;
-	fsg_unbind(c, f);
 	return rc;
 }
 
diff --git a/drivers/usb/gadget/gadget_chips.h b/drivers/usb/gadget/gadget_chips.h
index 1edbc12..e511fec 100644
--- a/drivers/usb/gadget/gadget_chips.h
+++ b/drivers/usb/gadget/gadget_chips.h
@@ -136,6 +136,12 @@
 #define	gadget_is_r8a66597(g)	0
 #endif
 
+#ifdef CONFIG_USB_S3C_HSOTG
+#define gadget_is_s3c_hsotg(g)    (!strcmp("s3c-hsotg", (g)->name))
+#else
+#define gadget_is_s3c_hsotg(g)    0
+#endif
+
 
 /**
  * usb_gadget_controller_number - support bcdDevice id convention
@@ -192,6 +198,8 @@
 		return 0x24;
 	else if (gadget_is_r8a66597(gadget))
 		return 0x25;
+	else if (gadget_is_s3c_hsotg(gadget))
+		return 0x26;
 	return -ENOENT;
 }
 
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index e8edc64..1088d08 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1768,7 +1768,7 @@
 	 * usb_gadget_driver_{register,unregister}() must change.
 	 */
 	if (the_controller) {
-		WARNING(dev, "ignoring %s\n", pci_name(pdev));
+		pr_warning("ignoring %s\n", pci_name(pdev));
 		return -EBUSY;
 	}
 	if (!pdev->irq) {
diff --git a/drivers/usb/gadget/multi.c b/drivers/usb/gadget/multi.c
index 76496f5..a930d7f 100644
--- a/drivers/usb/gadget/multi.c
+++ b/drivers/usb/gadget/multi.c
@@ -211,8 +211,6 @@
 	ret = fsg_add(c->cdev, c, fsg_common);
 	if (ret < 0)
 		return ret;
-	if (ret < 0)
-		return ret;
 
 	return 0;
 }
diff --git a/drivers/usb/gadget/r8a66597-udc.c b/drivers/usb/gadget/r8a66597-udc.c
index 8b45145..5e13d23 100644
--- a/drivers/usb/gadget/r8a66597-udc.c
+++ b/drivers/usb/gadget/r8a66597-udc.c
@@ -23,6 +23,7 @@
 #include <linux/module.h>
 #include <linux/interrupt.h>
 #include <linux/delay.h>
+#include <linux/err.h>
 #include <linux/io.h>
 #include <linux/platform_device.h>
 #include <linux/clk.h>
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index 4e0c67f..b6315aa 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -12,7 +12,7 @@
 ifeq ($(CONFIG_FHCI_DEBUG),y)
 fhci-objs += fhci-dbg.o
 endif
-xhci-objs := xhci-hcd.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o
+xhci-hcd-objs := xhci.o xhci-mem.o xhci-pci.o xhci-ring.o xhci-hub.o xhci-dbg.o
 
 obj-$(CONFIG_USB_WHCI_HCD)	+= whci/
 
@@ -25,7 +25,7 @@
 obj-$(CONFIG_USB_OHCI_HCD)	+= ohci-hcd.o
 obj-$(CONFIG_USB_UHCI_HCD)	+= uhci-hcd.o
 obj-$(CONFIG_USB_FHCI_HCD)	+= fhci.o
-obj-$(CONFIG_USB_XHCI_HCD)	+= xhci.o
+obj-$(CONFIG_USB_XHCI_HCD)	+= xhci-hcd.o
 obj-$(CONFIG_USB_SL811_HCD)	+= sl811-hcd.o
 obj-$(CONFIG_USB_SL811_CS)	+= sl811_cs.o
 obj-$(CONFIG_USB_U132_HCD)	+= u132-hcd.o
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index d8d6d34..dc55a62 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -995,7 +995,7 @@
 	/* endpoints can be iso streams.  for now, we don't
 	 * accelerate iso completions ... so spin a while.
 	 */
-	if (qh->hw->hw_info1 == 0) {
+	if (qh->hw == NULL) {
 		ehci_vdbg (ehci, "iso delay\n");
 		goto idle_timeout;
 	}
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c
index 39340ae..a0aaaaf 100644
--- a/drivers/usb/host/ehci-sched.c
+++ b/drivers/usb/host/ehci-sched.c
@@ -1123,8 +1123,8 @@
 					urb->interval);
 		}
 
-	/* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */
-	} else if (unlikely (stream->hw_info1 != 0)) {
+	/* if dev->ep [epnum] is a QH, hw is set */
+	} else if (unlikely (stream->hw != NULL)) {
 		ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
 			urb->dev->devpath, epnum,
 			usb_pipein(urb->pipe) ? "in" : "out");
@@ -1565,13 +1565,27 @@
 static inline void
 itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
 {
-	/* always prepend ITD/SITD ... only QH tree is order-sensitive */
-	itd->itd_next = ehci->pshadow [frame];
-	itd->hw_next = ehci->periodic [frame];
-	ehci->pshadow [frame].itd = itd;
+	union ehci_shadow	*prev = &ehci->pshadow[frame];
+	__hc32			*hw_p = &ehci->periodic[frame];
+	union ehci_shadow	here = *prev;
+	__hc32			type = 0;
+
+	/* skip any iso nodes which might belong to previous microframes */
+	while (here.ptr) {
+		type = Q_NEXT_TYPE(ehci, *hw_p);
+		if (type == cpu_to_hc32(ehci, Q_TYPE_QH))
+			break;
+		prev = periodic_next_shadow(ehci, prev, type);
+		hw_p = shadow_next_periodic(ehci, &here, type);
+		here = *prev;
+	}
+
+	itd->itd_next = here;
+	itd->hw_next = *hw_p;
+	prev->itd = itd;
 	itd->frame = frame;
 	wmb ();
-	ehci->periodic[frame] = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
+	*hw_p = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD);
 }
 
 /* fit urb's itds into the selected schedule slot; activate as needed */
diff --git a/drivers/usb/host/ehci.h b/drivers/usb/host/ehci.h
index 2d85e21..b1dce96 100644
--- a/drivers/usb/host/ehci.h
+++ b/drivers/usb/host/ehci.h
@@ -394,9 +394,8 @@
  * acts like a qh would, if EHCI had them for ISO.
  */
 struct ehci_iso_stream {
-	/* first two fields match QH, but info1 == 0 */
-	__hc32			hw_next;
-	__hc32			hw_info1;
+	/* first field matches ehci_hq, but is NULL */
+	struct ehci_qh_hw	*hw;
 
 	u32			refcount;
 	u8			bEndpointAddress;
diff --git a/drivers/usb/host/r8a66597-hcd.c b/drivers/usb/host/r8a66597-hcd.c
index bee558ae..f71a73a 100644
--- a/drivers/usb/host/r8a66597-hcd.c
+++ b/drivers/usb/host/r8a66597-hcd.c
@@ -418,7 +418,7 @@
 
 /* this function must be called with interrupt disabled */
 static void free_usb_address(struct r8a66597 *r8a66597,
-			     struct r8a66597_device *dev)
+			     struct r8a66597_device *dev, int reset)
 {
 	int port;
 
@@ -430,7 +430,13 @@
 	dev->state = USB_STATE_DEFAULT;
 	r8a66597->address_map &= ~(1 << dev->address);
 	dev->address = 0;
-	dev_set_drvdata(&dev->udev->dev, NULL);
+	/*
+	 * Only when resetting USB, it is necessary to erase drvdata. When
+	 * a usb device with usb hub is disconnect, "dev->udev" is already
+	 * freed on usb_desconnect(). So we cannot access the data.
+	 */
+	if (reset)
+		dev_set_drvdata(&dev->udev->dev, NULL);
 	list_del(&dev->device_list);
 	kfree(dev);
 
@@ -1069,7 +1075,7 @@
 	struct r8a66597_device *dev = r8a66597->root_hub[port].dev;
 
 	disable_r8a66597_pipe_all(r8a66597, dev);
-	free_usb_address(r8a66597, dev);
+	free_usb_address(r8a66597, dev, 0);
 
 	start_root_hub_sampling(r8a66597, port, 0);
 }
@@ -2085,7 +2091,7 @@
 				spin_lock_irqsave(&r8a66597->lock, flags);
 				dev = get_r8a66597_device(r8a66597, addr);
 				disable_r8a66597_pipe_all(r8a66597, dev);
-				free_usb_address(r8a66597, dev);
+				free_usb_address(r8a66597, dev, 0);
 				put_child_connect_map(r8a66597, addr);
 				spin_unlock_irqrestore(&r8a66597->lock, flags);
 			}
@@ -2228,7 +2234,7 @@
 			rh->port |= (1 << USB_PORT_FEAT_RESET);
 
 			disable_r8a66597_pipe_all(r8a66597, dev);
-			free_usb_address(r8a66597, dev);
+			free_usb_address(r8a66597, dev, 1);
 
 			r8a66597_mdfy(r8a66597, USBRST, USBRST | UACT,
 				      get_dvstctr_reg(port));
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 49f7d72..bba9b19 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -566,8 +566,13 @@
 			if (interval < 3)
 				interval = 3;
 			if ((1 << interval) != 8*ep->desc.bInterval)
-				dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n",
-						ep->desc.bEndpointAddress, 1 << interval);
+				dev_warn(&udev->dev,
+						"ep %#x - rounding interval"
+						" to %d microframes, "
+						"ep desc says %d microframes\n",
+						ep->desc.bEndpointAddress,
+						1 << interval,
+						8*ep->desc.bInterval);
 		}
 		break;
 	default:
diff --git a/drivers/usb/host/xhci-hcd.c b/drivers/usb/host/xhci.c
similarity index 99%
rename from drivers/usb/host/xhci-hcd.c
rename to drivers/usb/host/xhci.c
index 4cb69e0..492a61c 100644
--- a/drivers/usb/host/xhci-hcd.c
+++ b/drivers/usb/host/xhci.c
@@ -1173,6 +1173,7 @@
 		cmd_completion = &virt_dev->cmd_completion;
 		cmd_status = &virt_dev->cmd_status;
 	}
+	init_completion(cmd_completion);
 
 	if (!ctx_change)
 		ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c
index 4d2952f..3adab04 100644
--- a/drivers/usb/misc/appledisplay.c
+++ b/drivers/usb/misc/appledisplay.c
@@ -202,6 +202,7 @@
 static int appledisplay_probe(struct usb_interface *iface,
 	const struct usb_device_id *id)
 {
+	struct backlight_properties props;
 	struct appledisplay *pdata;
 	struct usb_device *udev = interface_to_usbdev(iface);
 	struct usb_host_interface *iface_desc;
@@ -279,16 +280,16 @@
 	/* Register backlight device */
 	snprintf(bl_name, sizeof(bl_name), "appledisplay%d",
 		atomic_inc_return(&count_displays) - 1);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 0xff;
 	pdata->bd = backlight_device_register(bl_name, NULL, pdata,
-						&appledisplay_bl_data);
+					      &appledisplay_bl_data, &props);
 	if (IS_ERR(pdata->bd)) {
 		dev_err(&iface->dev, "Backlight registration failed\n");
 		retval = PTR_ERR(pdata->bd);
 		goto error;
 	}
 
-	pdata->bd->props.max_brightness = 0xff;
-
 	/* Try to get brightness */
 	brightness = appledisplay_bl_get_brightness(pdata->bd);
 
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index b4bbf8f..0e8b8ab 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -379,7 +379,6 @@
 				u8 devctl, u8 power)
 {
 	irqreturn_t handled = IRQ_NONE;
-	void __iomem *mbase = musb->mregs;
 
 	DBG(3, "<== Power=%02x, DevCtl=%02x, int_usb=0x%x\n", power, devctl,
 		int_usb);
@@ -394,6 +393,8 @@
 
 		if (devctl & MUSB_DEVCTL_HM) {
 #ifdef CONFIG_USB_MUSB_HDRC_HCD
+			void __iomem *mbase = musb->mregs;
+
 			switch (musb->xceiv->state) {
 			case OTG_STATE_A_SUSPEND:
 				/* remote wakeup?  later, GetPortStatus
@@ -471,6 +472,8 @@
 #ifdef CONFIG_USB_MUSB_HDRC_HCD
 	/* see manual for the order of the tests */
 	if (int_usb & MUSB_INTR_SESSREQ) {
+		void __iomem *mbase = musb->mregs;
+
 		DBG(1, "SESSION_REQUEST (%s)\n", otg_state_string(musb));
 
 		/* IRQ arrives from ID pin sense or (later, if VBUS power
@@ -519,6 +522,8 @@
 		case OTG_STATE_A_WAIT_BCON:
 		case OTG_STATE_A_WAIT_VRISE:
 			if (musb->vbuserr_retry) {
+				void __iomem *mbase = musb->mregs;
+
 				musb->vbuserr_retry--;
 				ignore = 1;
 				devctl |= MUSB_DEVCTL_SESSION;
@@ -622,6 +627,7 @@
 
 	if (int_usb & MUSB_INTR_CONNECT) {
 		struct usb_hcd *hcd = musb_to_hcd(musb);
+		void __iomem *mbase = musb->mregs;
 
 		handled = IRQ_HANDLED;
 		musb->is_active = 1;
@@ -2007,7 +2013,6 @@
 	/* host side needs more setup */
 	if (is_host_enabled(musb)) {
 		struct usb_hcd	*hcd = musb_to_hcd(musb);
-		u8 busctl;
 
 		otg_set_host(musb->xceiv, &hcd->self);
 
@@ -2018,9 +2023,9 @@
 
 		/* program PHY to use external vBus if required */
 		if (plat->extvbus) {
-			busctl = musb_readb(musb->mregs, MUSB_ULPI_BUSCONTROL);
+			u8 busctl = musb_read_ulpi_buscontrol(musb->mregs);
 			busctl |= MUSB_ULPI_USE_EXTVBUS;
-			musb_writeb(musb->mregs, MUSB_ULPI_BUSCONTROL, busctl);
+			musb_write_ulpi_buscontrol(musb->mregs, busctl);
 		}
 	}
 
diff --git a/drivers/usb/musb/musb_core.h b/drivers/usb/musb/musb_core.h
index d849fb8..cd9f4a9 100644
--- a/drivers/usb/musb/musb_core.h
+++ b/drivers/usb/musb/musb_core.h
@@ -469,7 +469,7 @@
 
 struct musb_context_registers {
 
-#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430)
+#ifdef CONFIG_PM
 	u32 otg_sysconfig, otg_forcestandby;
 #endif
 	u8 power;
@@ -483,7 +483,7 @@
 	struct musb_csr_regs index_regs[MUSB_C_NUM_EPS];
 };
 
-#if defined(CONFIG_ARCH_OMAP34XX) || defined(CONFIG_ARCH_OMAP2430)
+#ifdef CONFIG_PM
 extern void musb_platform_save_context(struct musb *musb,
 		struct musb_context_registers *musb_context);
 extern void musb_platform_restore_context(struct musb *musb,
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 3421cf9..dec896e 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -1689,7 +1689,7 @@
 				dma->desired_mode = 1;
 			if (rx_count < hw_ep->max_packet_sz_rx) {
 				length = rx_count;
-				dma->bDesiredMode = 0;
+				dma->desired_mode = 0;
 			} else {
 				length = urb->transfer_buffer_length;
 			}
diff --git a/drivers/usb/musb/musb_regs.h b/drivers/usb/musb/musb_regs.h
index 8d8062b..fa55aac 100644
--- a/drivers/usb/musb/musb_regs.h
+++ b/drivers/usb/musb/musb_regs.h
@@ -326,6 +326,11 @@
 	musb_writew(mbase, MUSB_RXFIFOADD, c_off);
 }
 
+static inline void musb_write_ulpi_buscontrol(void __iomem *mbase, u8 val)
+{
+	musb_writeb(mbase, MUSB_ULPI_BUSCONTROL, val);
+}
+
 static inline u8 musb_read_txfifosz(void __iomem *mbase)
 {
 	return musb_readb(mbase, MUSB_TXFIFOSZ);
@@ -346,6 +351,11 @@
 	return musb_readw(mbase, MUSB_RXFIFOADD);
 }
 
+static inline u8 musb_read_ulpi_buscontrol(void __iomem *mbase)
+{
+	return musb_readb(mbase, MUSB_ULPI_BUSCONTROL);
+}
+
 static inline u8 musb_read_configdata(void __iomem *mbase)
 {
 	musb_writeb(mbase, MUSB_INDEX, 0);
@@ -510,20 +520,33 @@
 {
 }
 
+static inline void musb_write_ulpi_buscontrol(void __iomem *mbase, u8 val)
+{
+}
+
 static inline u8 musb_read_txfifosz(void __iomem *mbase)
 {
+	return 0;
 }
 
 static inline u16 musb_read_txfifoadd(void __iomem *mbase)
 {
+	return 0;
 }
 
 static inline u8 musb_read_rxfifosz(void __iomem *mbase)
 {
+	return 0;
 }
 
 static inline u16  musb_read_rxfifoadd(void __iomem *mbase)
 {
+	return 0;
+}
+
+static inline u8 musb_read_ulpi_buscontrol(void __iomem *mbase)
+{
+	return 0;
 }
 
 static inline u8 musb_read_configdata(void __iomem *mbase)
@@ -577,22 +600,27 @@
 
 static inline u8 musb_read_rxfunaddr(void __iomem *mbase, u8 epnum)
 {
+	return 0;
 }
 
 static inline u8 musb_read_rxhubaddr(void __iomem *mbase, u8 epnum)
 {
+	return 0;
 }
 
 static inline u8 musb_read_rxhubport(void __iomem *mbase, u8 epnum)
 {
+	return 0;
 }
 
 static inline u8  musb_read_txfunaddr(void __iomem *mbase, u8 epnum)
 {
+	return 0;
 }
 
 static inline u8  musb_read_txhubaddr(void __iomem *mbase, u8 epnum)
 {
+	return 0;
 }
 
 static inline void  musb_read_txhubport(void __iomem *mbase, u8 epnum)
diff --git a/drivers/usb/serial/Kconfig b/drivers/usb/serial/Kconfig
index c78b255..a0ecb42 100644
--- a/drivers/usb/serial/Kconfig
+++ b/drivers/usb/serial/Kconfig
@@ -474,14 +474,14 @@
 
 config USB_SERIAL_QCAUX
 	tristate "USB Qualcomm Auxiliary Serial Port Driver"
-	---help---
+	help
 	  Say Y here if you want to use the auxiliary serial ports provided
 	  by many modems based on Qualcomm chipsets.  These ports often use
 	  a proprietary protocol called DM and cannot be used for AT- or
 	  PPP-based communication.
 
 	  To compile this driver as a module, choose M here: the
-	  module will be called moto_modem.  If unsure, choose N.
+	  module will be called qcaux.  If unsure, choose N.
 
 config USB_SERIAL_QUALCOMM
 	tristate "USB Qualcomm Serial modem"
diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
index b22ac32..f347da2 100644
--- a/drivers/usb/serial/console.c
+++ b/drivers/usb/serial/console.c
@@ -181,6 +181,7 @@
 	/* The console is special in terms of closing the device so
 	 * indicate this port is now acting as a system console. */
 	port->console = 1;
+	port->port.console = 1;
 
 	mutex_unlock(&serial->disc_mutex);
 	return retval;
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 507382b..ec9b044 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -313,11 +313,6 @@
 		return -EPROTO;
 	}
 
-	/* Single data value */
-	result = usb_control_msg(serial->dev,
-			usb_sndctrlpipe(serial->dev, 0),
-			request, REQTYPE_HOST_TO_DEVICE, data[0],
-			0, NULL, 0, 300);
 	return 0;
 }
 
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 6af0dfa..1d7c4fa 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -91,7 +91,7 @@
 	unsigned long tx_outstanding_bytes;
 	unsigned long tx_outstanding_urbs;
 	unsigned short max_packet_size;
-	struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() */
+	struct mutex cfg_lock; /* Avoid mess by parallel calls of config ioctl() and change_speed() */
 };
 
 /* struct ftdi_sio_quirk is used by devices requiring special attention. */
@@ -658,6 +658,7 @@
 	{ USB_DEVICE(EVOLUTION_VID, EVOLUTION_ER1_PID) },
 	{ USB_DEVICE(EVOLUTION_VID, EVO_HYBRID_PID) },
 	{ USB_DEVICE(EVOLUTION_VID, EVO_RCM4_PID) },
+	{ USB_DEVICE(CONTEC_VID, CONTEC_COM1USBH_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_ARTEMIS_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16_PID) },
 	{ USB_DEVICE(FTDI_VID, FTDI_ATIK_ATK16C_PID) },
@@ -1272,8 +1273,8 @@
 	     (priv->flags & ASYNC_SPD_MASK)) ||
 	    (((priv->flags & ASYNC_SPD_MASK) == ASYNC_SPD_CUST) &&
 	     (old_priv.custom_divisor != priv->custom_divisor))) {
-		mutex_unlock(&priv->cfg_lock);
 		change_speed(tty, port);
+		mutex_unlock(&priv->cfg_lock);
 	}
 	else
 		mutex_unlock(&priv->cfg_lock);
@@ -2264,9 +2265,11 @@
 		clear_mctrl(port, TIOCM_DTR | TIOCM_RTS);
 	} else {
 		/* set the baudrate determined before */
+		mutex_lock(&priv->cfg_lock);
 		if (change_speed(tty, port))
 			dev_err(&port->dev, "%s urb failed to set baudrate\n",
 				__func__);
+		mutex_unlock(&priv->cfg_lock);
 		/* Ensure RTS and DTR are raised when baudrate changed from 0 */
 		if (!old_termios || (old_termios->c_cflag & CBAUD) == B0)
 			set_mctrl(port, TIOCM_DTR | TIOCM_RTS);
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 0727e19..75482cb 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -501,6 +501,13 @@
 #define CONTEC_COM1USBH_PID	0x8311	/* COM-1(USB)H */
 
 /*
+ * Contec products (http://www.contec.com)
+ * Submitted by Daniel Sangorrin
+ */
+#define CONTEC_VID		0x06CE	/* Vendor ID */
+#define CONTEC_COM1USBH_PID	0x8311	/* COM-1(USB)H */
+
+/*
  * Definitions for B&B Electronics products.
  */
 #define BANDB_VID		0x0856	/* B&B Electronics Vendor ID */
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 89fac36..f804acb 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -130,7 +130,7 @@
 	spin_unlock_irqrestore(&port->lock, flags);
 
 	/* if we have a bulk endpoint, start reading from it */
-	if (serial->num_bulk_in) {
+	if (port->bulk_in_size) {
 		/* Start reading from the device */
 		usb_fill_bulk_urb(port->read_urb, serial->dev,
 				   usb_rcvbulkpipe(serial->dev,
@@ -159,10 +159,10 @@
 	dbg("%s - port %d", __func__, port->number);
 
 	if (serial->dev) {
-		/* shutdown any bulk reads that might be going on */
-		if (serial->num_bulk_out)
+		/* shutdown any bulk transfers that might be going on */
+		if (port->bulk_out_size)
 			usb_kill_urb(port->write_urb);
-		if (serial->num_bulk_in)
+		if (port->bulk_in_size)
 			usb_kill_urb(port->read_urb);
 	}
 }
@@ -333,15 +333,15 @@
 
 	dbg("%s - port %d", __func__, port->number);
 
+	/* only do something if we have a bulk out endpoint */
+	if (!port->bulk_out_size)
+		return -ENODEV;
+
 	if (count == 0) {
 		dbg("%s - write request of 0 bytes", __func__);
 		return 0;
 	}
 
-	/* only do something if we have a bulk out endpoint */
-	if (!serial->num_bulk_out)
-		return 0;
-
 	if (serial->type->max_in_flight_urbs)
 		return usb_serial_multi_urb_write(tty, port,
 						  buf, count);
@@ -364,14 +364,19 @@
 	int room = 0;
 
 	dbg("%s - port %d", __func__, port->number);
+
+	if (!port->bulk_out_size)
+		return 0;
+
 	spin_lock_irqsave(&port->lock, flags);
 	if (serial->type->max_in_flight_urbs) {
 		if (port->urbs_in_flight < serial->type->max_in_flight_urbs)
 			room = port->bulk_out_size *
 				(serial->type->max_in_flight_urbs -
 				 port->urbs_in_flight);
-	} else if (serial->num_bulk_out)
+	} else {
 		room = kfifo_avail(&port->write_fifo);
+	}
 	spin_unlock_irqrestore(&port->lock, flags);
 
 	dbg("%s - returns %d", __func__, room);
@@ -382,15 +387,18 @@
 {
 	struct usb_serial_port *port = tty->driver_data;
 	struct usb_serial *serial = port->serial;
-	int chars = 0;
 	unsigned long flags;
+	int chars;
 
 	dbg("%s - port %d", __func__, port->number);
 
+	if (!port->bulk_out_size)
+		return 0;
+
 	spin_lock_irqsave(&port->lock, flags);
 	if (serial->type->max_in_flight_urbs)
 		chars = port->tx_bytes_flight;
-	else if (serial->num_bulk_out)
+	else
 		chars = kfifo_len(&port->write_fifo);
 	spin_unlock_irqrestore(&port->lock, flags);
 
@@ -415,11 +423,13 @@
 			   ((serial->type->read_bulk_callback) ?
 			     serial->type->read_bulk_callback :
 			     usb_serial_generic_read_bulk_callback), port);
+
 	result = usb_submit_urb(urb, mem_flags);
-	if (result)
+	if (result && result != -EPERM) {
 		dev_err(&port->dev,
 			"%s - failed resubmitting read urb, error %d\n",
 							__func__, result);
+	}
 }
 EXPORT_SYMBOL_GPL(usb_serial_generic_resubmit_read_urb);
 
@@ -498,23 +508,18 @@
 		if (port->urbs_in_flight < 0)
 			port->urbs_in_flight = 0;
 		spin_unlock_irqrestore(&port->lock, flags);
-
-		if (status) {
-			dbg("%s - nonzero multi-urb write bulk status "
-				"received: %d", __func__, status);
-			return;
-		}
 	} else {
 		port->write_urb_busy = 0;
 
-		if (status) {
-			dbg("%s - nonzero multi-urb write bulk status "
-				"received: %d", __func__, status);
+		if (status)
 			kfifo_reset_out(&port->write_fifo);
-		} else
+		else
 			usb_serial_generic_write_start(port);
 	}
 
+	if (status)
+		dbg("%s - non-zero urb status: %d", __func__, status);
+
 	usb_serial_port_softint(port);
 }
 EXPORT_SYMBOL_GPL(usb_serial_generic_write_bulk_callback);
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 847b805..950cb31 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -288,7 +288,9 @@
 
 #define QUALCOMM_VENDOR_ID			0x05C6
 
-#define MAXON_VENDOR_ID				0x16d8
+#define CMOTECH_VENDOR_ID			0x16d8
+#define CMOTECH_PRODUCT_6008			0x6008
+#define CMOTECH_PRODUCT_6280			0x6280
 
 #define TELIT_VENDOR_ID				0x1bc7
 #define TELIT_PRODUCT_UC864E			0x1003
@@ -309,6 +311,7 @@
 #define DLINK_VENDOR_ID				0x1186
 #define DLINK_PRODUCT_DWM_652			0x3e04
 #define DLINK_PRODUCT_DWM_652_U5		0xce16
+#define DLINK_PRODUCT_DWM_652_U5A		0xce1e
 
 #define QISDA_VENDOR_ID				0x1da5
 #define QISDA_PRODUCT_H21_4512			0x4512
@@ -332,6 +335,24 @@
 #define ALCATEL_VENDOR_ID			0x1bbb
 #define ALCATEL_PRODUCT_X060S			0x0000
 
+#define PIRELLI_VENDOR_ID			0x1266
+#define PIRELLI_PRODUCT_C100_1			0x1002
+#define PIRELLI_PRODUCT_C100_2			0x1003
+#define PIRELLI_PRODUCT_1004			0x1004
+#define PIRELLI_PRODUCT_1005			0x1005
+#define PIRELLI_PRODUCT_1006			0x1006
+#define PIRELLI_PRODUCT_1007			0x1007
+#define PIRELLI_PRODUCT_1008			0x1008
+#define PIRELLI_PRODUCT_1009			0x1009
+#define PIRELLI_PRODUCT_100A			0x100a
+#define PIRELLI_PRODUCT_100B			0x100b
+#define PIRELLI_PRODUCT_100C			0x100c
+#define PIRELLI_PRODUCT_100D			0x100d
+#define PIRELLI_PRODUCT_100E			0x100e
+#define PIRELLI_PRODUCT_100F			0x100f
+#define PIRELLI_PRODUCT_1011			0x1011
+#define PIRELLI_PRODUCT_1012			0x1012
+
 /* Airplus products */
 #define AIRPLUS_VENDOR_ID			0x1011
 #define AIRPLUS_PRODUCT_MCD650			0x3198
@@ -547,7 +568,8 @@
 	{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
 	{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
-	{ USB_DEVICE(MAXON_VENDOR_ID, 0x6280) }, /* BP3-USB & BP3-EXT HSDPA */
+	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */
+	{ USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864E) },
 	{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UC864G) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
@@ -659,6 +681,7 @@
 	{ USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
 	{ USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
 	{ USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
+	{ USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5A) },
 	{ USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
 	{ USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) },
 	{ USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) },
@@ -666,7 +689,6 @@
 	{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_G450) },
 	{ USB_DEVICE(TOSHIBA_VENDOR_ID, TOSHIBA_PRODUCT_HSDPA_MINICARD ) }, /* Toshiba 3G HSDPA == Novatel Expedite EU870D MiniCard */
 	{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
-	{ USB_DEVICE(ALINK_VENDOR_ID, 0xce16) },
 	{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
 	{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S) },
 	{ USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) },
@@ -675,6 +697,24 @@
   	  .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
   	},
 	{ USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
+	/* Pirelli  */
+	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1)},
+	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2)},
+	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1004)},
+	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1005)},
+	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1006)},
+	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1007)},
+	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1008)},
+	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1009)},
+	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100A)},
+	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100B) },
+	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100C) },
+	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100D) },
+	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100E) },
+	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_100F) },
+	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1011)},
+	{ USB_DEVICE(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_1012)},
+
 	{ } /* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, option_ids);
@@ -798,12 +838,19 @@
 			const struct usb_device_id *id)
 {
 	struct option_intf_private *data;
+
 	/* D-Link DWM 652 still exposes CD-Rom emulation interface in modem mode */
 	if (serial->dev->descriptor.idVendor == DLINK_VENDOR_ID &&
 		serial->dev->descriptor.idProduct == DLINK_PRODUCT_DWM_652 &&
 		serial->interface->cur_altsetting->desc.bInterfaceClass == 0x8)
 		return -ENODEV;
 
+	/* Bandrich modem and AT command interface is 0xff */
+	if ((serial->dev->descriptor.idVendor == BANDRICH_VENDOR_ID ||
+		serial->dev->descriptor.idVendor == PIRELLI_VENDOR_ID) &&
+		serial->interface->cur_altsetting->desc.bInterfaceClass != 0xff)
+		return -ENODEV;
+
 	data = serial->private = kzalloc(sizeof(struct option_intf_private), GFP_KERNEL);
 	if (!data)
 		return -ENOMEM;
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 310ff6e..53a2d5a 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -47,6 +47,35 @@
 	{USB_DEVICE(0x05c6, 0x9221)},	/* Generic Gobi QDL device */
 	{USB_DEVICE(0x05c6, 0x9231)},	/* Generic Gobi QDL device */
 	{USB_DEVICE(0x1f45, 0x0001)},	/* Unknown Gobi QDL device */
+	{USB_DEVICE(0x413c, 0x8185)},	/* Dell Gobi 2000 QDL device (N0218, VU936) */
+	{USB_DEVICE(0x413c, 0x8186)},	/* Dell Gobi 2000 Modem device (N0218, VU936) */
+	{USB_DEVICE(0x05c6, 0x9224)},	/* Sony Gobi 2000 QDL device (N0279, VU730) */
+	{USB_DEVICE(0x05c6, 0x9225)},	/* Sony Gobi 2000 Modem device (N0279, VU730) */
+	{USB_DEVICE(0x05c6, 0x9244)},	/* Samsung Gobi 2000 QDL device (VL176) */
+	{USB_DEVICE(0x05c6, 0x9245)},	/* Samsung Gobi 2000 Modem device (VL176) */
+	{USB_DEVICE(0x03f0, 0x241d)},	/* HP Gobi 2000 QDL device (VP412) */
+	{USB_DEVICE(0x03f0, 0x251d)},	/* HP Gobi 2000 Modem device (VP412) */
+	{USB_DEVICE(0x05c6, 0x9214)},	/* Acer Gobi 2000 QDL device (VP413) */
+	{USB_DEVICE(0x05c6, 0x9215)},	/* Acer Gobi 2000 Modem device (VP413) */
+	{USB_DEVICE(0x05c6, 0x9264)},	/* Asus Gobi 2000 QDL device (VR305) */
+	{USB_DEVICE(0x05c6, 0x9265)},	/* Asus Gobi 2000 Modem device (VR305) */
+	{USB_DEVICE(0x05c6, 0x9234)},	/* Top Global Gobi 2000 QDL device (VR306) */
+	{USB_DEVICE(0x05c6, 0x9235)},	/* Top Global Gobi 2000 Modem device (VR306) */
+	{USB_DEVICE(0x05c6, 0x9274)},	/* iRex Technologies Gobi 2000 QDL device (VR307) */
+	{USB_DEVICE(0x05c6, 0x9275)},	/* iRex Technologies Gobi 2000 Modem device (VR307) */
+	{USB_DEVICE(0x1199, 0x9000)},	/* Sierra Wireless Gobi 2000 QDL device (VT773) */
+	{USB_DEVICE(0x1199, 0x9001)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */
+	{USB_DEVICE(0x1199, 0x9002)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */
+	{USB_DEVICE(0x1199, 0x9003)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */
+	{USB_DEVICE(0x1199, 0x9004)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */
+	{USB_DEVICE(0x1199, 0x9005)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */
+	{USB_DEVICE(0x1199, 0x9006)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */
+	{USB_DEVICE(0x1199, 0x9007)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */
+	{USB_DEVICE(0x1199, 0x9008)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */
+	{USB_DEVICE(0x1199, 0x9009)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */
+	{USB_DEVICE(0x1199, 0x900a)},	/* Sierra Wireless Gobi 2000 Modem device (VT773) */
+	{USB_DEVICE(0x16d8, 0x8001)},	/* CMDTech Gobi 2000 QDL device (VU922) */
+	{USB_DEVICE(0x16d8, 0x8002)},	/* CMDTech Gobi 2000 Modem device (VU922) */
 	{ }				/* Terminating entry */
 };
 MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 98b549b..ccf1dbb 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -374,6 +374,15 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_FIX_INQUIRY),
 
+/* Reported by Ondrej Zary <linux@rainbow-software.org>
+ * The device reports one sector more and breaks when that sector is accessed
+ */
+UNUSUAL_DEV(  0x04ce, 0x0002, 0x026c, 0x026c,
+		"ScanLogic",
+		"SL11R-IDE",
+		US_SC_DEVICE, US_PR_DEVICE, NULL,
+		US_FL_FIX_CAPACITY),
+
 /* Reported by Kriston Fincher <kriston@airmail.net>
  * Patch submitted by Sean Millichamp <sean@bruenor.org>
  * This is to support the Panasonic PalmCam PV-SD4090
@@ -1380,20 +1389,6 @@
 		US_SC_DEVICE, US_PR_DEVICE, NULL,
 		US_FL_IGNORE_RESIDUE ),
 
-/* Jeremy Katz <katzj@redhat.com>:
- * The Blackberry Pearl can run in two modes; a usb-storage only mode
- * and a mode that allows access via mass storage and to its database.
- * The berry_charge module will set the device to dual mode and thus we
- * should ignore its native mode if that module is built
- */
-#ifdef CONFIG_USB_BERRY_CHARGE
-UNUSUAL_DEV(  0x0fca, 0x0006, 0x0001, 0x0001,
-		"RIM",
-		"Blackberry Pearl",
-		US_SC_DEVICE, US_PR_DEVICE, NULL,
-		US_FL_IGNORE_DEVICE ),
-#endif
-
 /* Reported by Michael Stattmann <michael@stattmann.com> */
 UNUSUAL_DEV(  0x0fce, 0xd008, 0x0000, 0x0000,
 		"Sony Ericsson",
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index e7eeb63..b409c228f 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -891,7 +891,7 @@
 }
 
 /** USB device ID's that we handle */
-static struct usb_device_id hwarc_id_table[] = {
+static const struct usb_device_id hwarc_id_table[] = {
 	/* D-Link DUB-1210 */
 	{ USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3d02, 0xe0, 0x01, 0x02),
 	  .driver_info = WUSB_QUIRK_WHCI_CMD_EVT },
diff --git a/drivers/uwb/i1480/dfu/usb.c b/drivers/uwb/i1480/dfu/usb.c
index 0bb665a..a99e211 100644
--- a/drivers/uwb/i1480/dfu/usb.c
+++ b/drivers/uwb/i1480/dfu/usb.c
@@ -120,8 +120,7 @@
 		result = usb_control_msg(
 			i1480_usb->usb_dev, usb_sndctrlpipe(i1480_usb->usb_dev, 0),
 			0xf0, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-			cpu_to_le16(memory_address & 0xffff),
-			cpu_to_le16((memory_address >> 16) & 0xffff),
+			memory_address,	(memory_address >> 16),
 			i1480->cmd_buf, buffer_size, 100 /* FIXME: arbitrary */);
 		if (result < 0)
 			break;
@@ -166,8 +165,7 @@
 		result = usb_control_msg(
 			i1480_usb->usb_dev, usb_rcvctrlpipe(i1480_usb->usb_dev, 0),
 			0xf0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE,
-			cpu_to_le16(itr_addr & 0xffff),
-			cpu_to_le16((itr_addr >> 16) & 0xffff),
+			itr_addr, (itr_addr >> 16),
 			i1480->cmd_buf + itr, itr_size,
 			100 /* FIXME: arbitrary */);
 		if (result < 0) {
@@ -413,6 +411,10 @@
 	return result;
 }
 
+MODULE_FIRMWARE("i1480-pre-phy-0.0.bin");
+MODULE_FIRMWARE("i1480-usb-0.0.bin");
+MODULE_FIRMWARE("i1480-phy-0.0.bin");
+
 #define i1480_USB_DEV(v, p)				\
 {							\
 	.match_flags = USB_DEVICE_ID_MATCH_DEVICE	\
@@ -430,7 +432,7 @@
 
 
 /** USB device ID's that we handle */
-static struct usb_device_id i1480_usb_id_table[] = {
+static const struct usb_device_id i1480_usb_id_table[] = {
 	i1480_USB_DEV(0x8086, 0xdf3b),
 	i1480_USB_DEV(0x15a9, 0x0005),
 	i1480_USB_DEV(0x07d1, 0x3802),
diff --git a/drivers/uwb/wlp/messages.c b/drivers/uwb/wlp/messages.c
index aa42fce..7516486 100644
--- a/drivers/uwb/wlp/messages.c
+++ b/drivers/uwb/wlp/messages.c
@@ -259,6 +259,63 @@
 }
 
 
+static ssize_t wlp_get_attribute(struct wlp *wlp, u16 type_code,
+	struct wlp_attr_hdr *attr_hdr, void *value, ssize_t value_len,
+	ssize_t buflen)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	ssize_t attr_len = sizeof(*attr_hdr) + value_len;
+	if (buflen < 0)
+		return -EINVAL;
+	if (buflen < attr_len) {
+		dev_err(dev, "WLP: Not enough space in buffer to parse"
+			" attribute field. Need %d, received %zu\n",
+			(int)attr_len, buflen);
+		return -EIO;
+	}
+	if (wlp_check_attr_hdr(wlp, attr_hdr, type_code, value_len) < 0) {
+		dev_err(dev, "WLP: Header verification failed. \n");
+		return -EINVAL;
+	}
+	memcpy(value, (void *)attr_hdr + sizeof(*attr_hdr), value_len);
+	return attr_len;
+}
+
+static ssize_t wlp_vget_attribute(struct wlp *wlp, u16 type_code,
+	struct wlp_attr_hdr *attr_hdr, void *value, ssize_t max_value_len,
+	ssize_t buflen)
+{
+	struct device *dev = &wlp->rc->uwb_dev.dev;
+	size_t len;
+	if (buflen < 0)
+		return -EINVAL;
+	if (buflen < sizeof(*attr_hdr)) {
+		dev_err(dev, "WLP: Not enough space in buffer to parse"
+			" header.\n");
+		return -EIO;
+	}
+	if (le16_to_cpu(attr_hdr->type) != type_code) {
+		dev_err(dev, "WLP: Unexpected attribute type. Got %u, "
+			"expected %u.\n", le16_to_cpu(attr_hdr->type),
+			type_code);
+		return -EINVAL;
+	}
+	len = le16_to_cpu(attr_hdr->length);
+	if (len > max_value_len) {
+		dev_err(dev, "WLP: Attribute larger than maximum "
+			"allowed. Received %zu, max is %d.\n", len,
+			(int)max_value_len);
+		return -EFBIG;
+	}
+	if (buflen < sizeof(*attr_hdr) + len) {
+		dev_err(dev, "WLP: Not enough space in buffer to parse "
+			"variable data.\n");
+		return -EIO;
+	}
+	memcpy(value, (void *)attr_hdr + sizeof(*attr_hdr), len);
+	return sizeof(*attr_hdr) + len;
+}
+
 /**
  * Get value of attribute from fixed size attribute field.
  *
@@ -274,22 +331,8 @@
 ssize_t wlp_get_##name(struct wlp *wlp, struct wlp_attr_##name *attr,	\
 		      type *value, ssize_t buflen)			\
 {									\
-	struct device *dev = &wlp->rc->uwb_dev.dev;			\
-	if (buflen < 0)							\
-		return -EINVAL;						\
-	if (buflen < sizeof(*attr)) {					\
-		dev_err(dev, "WLP: Not enough space in buffer to parse"	\
-			" attribute field. Need %d, received %zu\n",	\
-			(int)sizeof(*attr), buflen);			\
-		return -EIO;						\
-	}								\
-	if (wlp_check_attr_hdr(wlp, &attr->hdr, type_code,		\
-			       sizeof(attr->name)) < 0) {		\
-		dev_err(dev, "WLP: Header verification failed. \n");	\
-		return -EINVAL;						\
-	}								\
-	*value = attr->name;						\
-	return sizeof(*attr);						\
+	return wlp_get_attribute(wlp, (type_code), &attr->hdr,		\
+				 value, sizeof(*value), buflen);	\
 }
 
 #define wlp_get_sparse(type, type_code, name) \
@@ -313,35 +356,8 @@
 			      struct wlp_attr_##name *attr,		\
 			      type_val *value, ssize_t buflen)		\
 {									\
-	struct device *dev = &wlp->rc->uwb_dev.dev;			\
-	size_t len;							\
-	if (buflen < 0)							\
-		return -EINVAL;						\
-	if (buflen < sizeof(*attr)) {					\
-		dev_err(dev, "WLP: Not enough space in buffer to parse"	\
-			" header.\n");					\
-		return -EIO;						\
-	}								\
-	if (le16_to_cpu(attr->hdr.type) != type_code) {			\
-		dev_err(dev, "WLP: Unexpected attribute type. Got %u, "	\
-			"expected %u.\n", le16_to_cpu(attr->hdr.type),	\
-			type_code);					\
-		return -EINVAL;						\
-	}								\
-	len = le16_to_cpu(attr->hdr.length);				\
-	if (len > max) {						\
-		dev_err(dev, "WLP: Attribute larger than maximum "	\
-			"allowed. Received %zu, max is %d.\n", len,	\
-			(int)max);					\
-		return -EFBIG;						\
-	}								\
-	if (buflen < sizeof(*attr) + len) {				\
-		dev_err(dev, "WLP: Not enough space in buffer to parse "\
-			"variable data.\n");				\
-		return -EIO;						\
-	}								\
-	memcpy(value, (void *) attr + sizeof(*attr), len);		\
-	return sizeof(*attr) + len;					\
+	return wlp_vget_attribute(wlp, (type_code), &attr->hdr, 	\
+			      value, (max), buflen);			\
 }
 
 wlp_get(u8, WLP_ATTR_WLP_VER, version)
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index dabe804..6e16244 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -914,7 +914,7 @@
 
 config FB_XVR1000
 	bool "Sun XVR-1000 support"
-	depends on SPARC64
+	depends on (FB = y) && SPARC64
 	select FB_CFB_FILLRECT
 	select FB_CFB_COPYAREA
 	select FB_CFB_IMAGEBLIT
@@ -1881,7 +1881,7 @@
 
 config FB_SH_MOBILE_LCDC
 	tristate "SuperH Mobile LCDC framebuffer support"
-	depends on FB && SUPERH && HAVE_CLK
+	depends on FB && (SUPERH || ARCH_SHMOBILE) && HAVE_CLK
 	select FB_SYS_FILLRECT
 	select FB_SYS_COPYAREA
 	select FB_SYS_IMAGEBLIT
diff --git a/drivers/video/amba-clcd.c b/drivers/video/amba-clcd.c
index a21efcd..afe21e6 100644
--- a/drivers/video/amba-clcd.c
+++ b/drivers/video/amba-clcd.c
@@ -65,16 +65,16 @@
 	if (fb->board->disable)
 		fb->board->disable(fb);
 
-	val = readl(fb->regs + CLCD_CNTL);
+	val = readl(fb->regs + fb->off_cntl);
 	if (val & CNTL_LCDPWR) {
 		val &= ~CNTL_LCDPWR;
-		writel(val, fb->regs + CLCD_CNTL);
+		writel(val, fb->regs + fb->off_cntl);
 
 		clcdfb_sleep(20);
 	}
 	if (val & CNTL_LCDEN) {
 		val &= ~CNTL_LCDEN;
-		writel(val, fb->regs + CLCD_CNTL);
+		writel(val, fb->regs + fb->off_cntl);
 	}
 
 	/*
@@ -94,7 +94,7 @@
 	 * Bring up by first enabling..
 	 */
 	cntl |= CNTL_LCDEN;
-	writel(cntl, fb->regs + CLCD_CNTL);
+	writel(cntl, fb->regs + fb->off_cntl);
 
 	clcdfb_sleep(20);
 
@@ -102,7 +102,7 @@
 	 * and now apply power.
 	 */
 	cntl |= CNTL_LCDPWR;
-	writel(cntl, fb->regs + CLCD_CNTL);
+	writel(cntl, fb->regs + fb->off_cntl);
 
 	/*
 	 * finally, enable the interface.
@@ -233,7 +233,7 @@
 		readl(fb->regs + CLCD_TIM0), readl(fb->regs + CLCD_TIM1),
 		readl(fb->regs + CLCD_TIM2), readl(fb->regs + CLCD_TIM3),
 		readl(fb->regs + CLCD_UBAS), readl(fb->regs + CLCD_LBAS),
-		readl(fb->regs + CLCD_IENB), readl(fb->regs + CLCD_CNTL));
+		readl(fb->regs + fb->off_ienb), readl(fb->regs + fb->off_cntl));
 #endif
 
 	return 0;
@@ -345,6 +345,23 @@
 {
 	int ret;
 
+	/*
+	 * ARM PL111 always has IENB at 0x1c; it's only PL110
+	 * which is reversed on some platforms.
+	 */
+	if (amba_manf(fb->dev) == 0x41 && amba_part(fb->dev) == 0x111) {
+		fb->off_ienb = CLCD_PL111_IENB;
+		fb->off_cntl = CLCD_PL111_CNTL;
+	} else {
+#ifdef CONFIG_ARCH_VERSATILE
+		fb->off_ienb = CLCD_PL111_IENB;
+		fb->off_cntl = CLCD_PL111_CNTL;
+#else
+		fb->off_ienb = CLCD_PL110_IENB;
+		fb->off_cntl = CLCD_PL110_CNTL;
+#endif
+	}
+
 	fb->clk = clk_get(&fb->dev->dev, NULL);
 	if (IS_ERR(fb->clk)) {
 		ret = PTR_ERR(fb->clk);
@@ -416,7 +433,7 @@
 	/*
 	 * Ensure interrupts are disabled.
 	 */
-	writel(0, fb->regs + CLCD_IENB);
+	writel(0, fb->regs + fb->off_ienb);
 
 	fb_set_var(&fb->fb, &fb->fb.var);
 
diff --git a/drivers/video/atmel_lcdfb.c b/drivers/video/atmel_lcdfb.c
index 3d886c6..11de3bf 100644
--- a/drivers/video/atmel_lcdfb.c
+++ b/drivers/video/atmel_lcdfb.c
@@ -117,6 +117,7 @@
 
 static void init_backlight(struct atmel_lcdfb_info *sinfo)
 {
+	struct backlight_properties props;
 	struct backlight_device	*bl;
 
 	sinfo->bl_power = FB_BLANK_UNBLANK;
@@ -124,8 +125,10 @@
 	if (sinfo->backlight)
 		return;
 
-	bl = backlight_device_register("backlight", &sinfo->pdev->dev,
-			sinfo, &atmel_lcdc_bl_ops);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 0xff;
+	bl = backlight_device_register("backlight", &sinfo->pdev->dev, sinfo,
+				       &atmel_lcdc_bl_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&sinfo->pdev->dev, "error %ld on backlight register\n",
 				PTR_ERR(bl));
@@ -135,7 +138,6 @@
 
 	bl->props.power = FB_BLANK_UNBLANK;
 	bl->props.fb_blank = FB_BLANK_UNBLANK;
-	bl->props.max_brightness = 0xff;
 	bl->props.brightness = atmel_bl_get_brightness(bl);
 }
 
diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
index 9ee67d6..a489be0 100644
--- a/drivers/video/aty/aty128fb.c
+++ b/drivers/video/aty/aty128fb.c
@@ -1802,6 +1802,7 @@
 
 static void aty128_bl_init(struct aty128fb_par *par)
 {
+	struct backlight_properties props;
 	struct fb_info *info = pci_get_drvdata(par->pdev);
 	struct backlight_device *bd;
 	char name[12];
@@ -1817,7 +1818,10 @@
 
 	snprintf(name, sizeof(name), "aty128bl%d", info->node);
 
-	bd = backlight_device_register(name, info->dev, par, &aty128_bl_data);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
+	bd = backlight_device_register(name, info->dev, par, &aty128_bl_data,
+				       &props);
 	if (IS_ERR(bd)) {
 		info->bl_dev = NULL;
 		printk(KERN_WARNING "aty128: Backlight registration failed\n");
@@ -1829,7 +1833,6 @@
 		 63 * FB_BACKLIGHT_MAX / MAX_LEVEL,
 		219 * FB_BACKLIGHT_MAX / MAX_LEVEL);
 
-	bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
 	bd->props.brightness = bd->props.max_brightness;
 	bd->props.power = FB_BLANK_UNBLANK;
 	backlight_update_status(bd);
diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
index e45ab8d..29d7285 100644
--- a/drivers/video/aty/atyfb_base.c
+++ b/drivers/video/aty/atyfb_base.c
@@ -2232,6 +2232,7 @@
 
 static void aty_bl_init(struct atyfb_par *par)
 {
+	struct backlight_properties props;
 	struct fb_info *info = pci_get_drvdata(par->pdev);
 	struct backlight_device *bd;
 	char name[12];
@@ -2243,7 +2244,10 @@
 
 	snprintf(name, sizeof(name), "atybl%d", info->node);
 
-	bd = backlight_device_register(name, info->dev, par, &aty_bl_data);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
+	bd = backlight_device_register(name, info->dev, par, &aty_bl_data,
+				       &props);
 	if (IS_ERR(bd)) {
 		info->bl_dev = NULL;
 		printk(KERN_WARNING "aty: Backlight registration failed\n");
@@ -2255,7 +2259,6 @@
 			    0x3F * FB_BACKLIGHT_MAX / MAX_LEVEL,
 			    0xFF * FB_BACKLIGHT_MAX / MAX_LEVEL);
 
-	bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
 	bd->props.brightness = bd->props.max_brightness;
 	bd->props.power = FB_BLANK_UNBLANK;
 	backlight_update_status(bd);
diff --git a/drivers/video/aty/radeon_backlight.c b/drivers/video/aty/radeon_backlight.c
index fa1198c..9fc8c66 100644
--- a/drivers/video/aty/radeon_backlight.c
+++ b/drivers/video/aty/radeon_backlight.c
@@ -134,6 +134,7 @@
 
 void radeonfb_bl_init(struct radeonfb_info *rinfo)
 {
+	struct backlight_properties props;
 	struct backlight_device *bd;
 	struct radeon_bl_privdata *pdata;
 	char name[12];
@@ -155,7 +156,10 @@
 
 	snprintf(name, sizeof(name), "radeonbl%d", rinfo->info->node);
 
-	bd = backlight_device_register(name, rinfo->info->dev, pdata, &radeon_bl_data);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
+	bd = backlight_device_register(name, rinfo->info->dev, pdata,
+				       &radeon_bl_data, &props);
 	if (IS_ERR(bd)) {
 		rinfo->info->bl_dev = NULL;
 		printk("radeonfb: Backlight registration failed\n");
@@ -185,7 +189,6 @@
 		 63 * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL,
 		217 * FB_BACKLIGHT_MAX / MAX_RADEON_LEVEL);
 
-	bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
 	bd->props.brightness = bd->props.max_brightness;
 	bd->props.power = FB_BLANK_UNBLANK;
 	backlight_update_status(bd);
diff --git a/drivers/video/backlight/88pm860x_bl.c b/drivers/video/backlight/88pm860x_bl.c
index b8f705c..93e25c7 100644
--- a/drivers/video/backlight/88pm860x_bl.c
+++ b/drivers/video/backlight/88pm860x_bl.c
@@ -187,6 +187,7 @@
 	struct pm860x_backlight_data *data;
 	struct backlight_device *bl;
 	struct resource *res;
+	struct backlight_properties props;
 	unsigned char value;
 	char name[MFD_NAME_SIZE];
 	int ret;
@@ -223,14 +224,15 @@
 		return -EINVAL;
 	}
 
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = MAX_BRIGHTNESS;
 	bl = backlight_device_register(name, &pdev->dev, data,
-					&pm860x_backlight_ops);
+					&pm860x_backlight_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&pdev->dev, "failed to register backlight\n");
 		kfree(data);
 		return PTR_ERR(bl);
 	}
-	bl->props.max_brightness = MAX_BRIGHTNESS;
 	bl->props.brightness = MAX_BRIGHTNESS;
 
 	platform_set_drvdata(pdev, bl);
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 0c77fc6..c025c84 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -31,6 +31,13 @@
 	  Say y here to support the LCD panels usually found on SHARP
 	  corgi (C7x0) and spitz (Cxx00) models.
 
+config LCD_L4F00242T03
+	tristate "Epson L4F00242T03 LCD"
+	depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO
+	help
+	  SPI driver for Epson L4F00242T03. This provides basic support
+	  for init and powering the LCD up/down through a sysfs interface.
+
 config LCD_LMS283GF05
 	tristate "Samsung LMS283GF05 LCD"
 	depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 6c704d4..09d1f14 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -3,6 +3,7 @@
 obj-$(CONFIG_LCD_CLASS_DEVICE)     += lcd.o
 obj-$(CONFIG_LCD_CORGI)		   += corgi_lcd.o
 obj-$(CONFIG_LCD_HP700)		   += jornada720_lcd.o
+obj-$(CONFIG_LCD_L4F00242T03)	   += l4f00242t03.o
 obj-$(CONFIG_LCD_LMS283GF05)	   += lms283gf05.o
 obj-$(CONFIG_LCD_LTV350QV)	   += ltv350qv.o
 obj-$(CONFIG_LCD_ILI9320)	   += ili9320.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 86d95c22..5183f0e 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -278,6 +278,7 @@
 
 static int __devinit adp5520_bl_probe(struct platform_device *pdev)
 {
+	struct backlight_properties props;
 	struct backlight_device *bl;
 	struct adp5520_bl *data;
 	int ret = 0;
@@ -300,17 +301,17 @@
 
 	mutex_init(&data->lock);
 
-	bl = backlight_device_register(pdev->name, data->master,
-			data, &adp5520_bl_ops);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = ADP5020_MAX_BRIGHTNESS;
+	bl = backlight_device_register(pdev->name, data->master, data,
+				       &adp5520_bl_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&pdev->dev, "failed to register backlight\n");
 		kfree(data);
 		return PTR_ERR(bl);
 	}
 
-	bl->props.max_brightness =
-		bl->props.brightness = ADP5020_MAX_BRIGHTNESS;
-
+	bl->props.brightness = ADP5020_MAX_BRIGHTNESS;
 	if (data->pdata->en_ambl_sens)
 		ret = sysfs_create_group(&bl->dev.kobj,
 			&adp5520_bl_attr_group);
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
index d769b0b..b0624b9 100644
--- a/drivers/video/backlight/adx_bl.c
+++ b/drivers/video/backlight/adx_bl.c
@@ -56,7 +56,7 @@
 	return brightness & 0xff;
 }
 
-static int adx_backlight_check_fb(struct fb_info *fb)
+static int adx_backlight_check_fb(struct backlight_device *bldev, struct fb_info *fb)
 {
 	return 1;
 }
@@ -70,6 +70,7 @@
 
 static int __devinit adx_backlight_probe(struct platform_device *pdev)
 {
+	struct backlight_properties props;
 	struct backlight_device *bldev;
 	struct resource *res;
 	struct adxbl *bl;
@@ -101,14 +102,15 @@
 		goto out;
 	}
 
-	bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, bl,
-			&adx_backlight_ops);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 0xff;
+	bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev,
+					  bl, &adx_backlight_ops, &props);
 	if (!bldev) {
 		ret = -ENOMEM;
 		goto out;
 	}
 
-	bldev->props.max_brightness = 0xff;
 	bldev->props.brightness = 0xff;
 	bldev->props.power = FB_BLANK_UNBLANK;
 
diff --git a/drivers/video/backlight/atmel-pwm-bl.c b/drivers/video/backlight/atmel-pwm-bl.c
index f625ffc..2d97605 100644
--- a/drivers/video/backlight/atmel-pwm-bl.c
+++ b/drivers/video/backlight/atmel-pwm-bl.c
@@ -120,6 +120,7 @@
 
 static int atmel_pwm_bl_probe(struct platform_device *pdev)
 {
+	struct backlight_properties props;
 	const struct atmel_pwm_bl_platform_data *pdata;
 	struct backlight_device *bldev;
 	struct atmel_pwm_bl *pwmbl;
@@ -165,8 +166,10 @@
 			goto err_free_gpio;
 	}
 
-	bldev = backlight_device_register("atmel-pwm-bl",
-			&pdev->dev, pwmbl, &atmel_pwm_bl_ops);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min;
+	bldev = backlight_device_register("atmel-pwm-bl", &pdev->dev, pwmbl,
+					  &atmel_pwm_bl_ops, &props);
 	if (IS_ERR(bldev)) {
 		retval = PTR_ERR(bldev);
 		goto err_free_gpio;
@@ -178,7 +181,6 @@
 
 	/* Power up the backlight by default at middle intesity. */
 	bldev->props.power = FB_BLANK_UNBLANK;
-	bldev->props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min;
 	bldev->props.brightness = bldev->props.max_brightness / 2;
 
 	retval = atmel_pwm_bl_init_pwm(pwmbl);
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 18829cf..68bb838 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -38,7 +38,7 @@
 	mutex_lock(&bd->ops_lock);
 	if (bd->ops)
 		if (!bd->ops->check_fb ||
-		    bd->ops->check_fb(evdata->info)) {
+		    bd->ops->check_fb(bd, evdata->info)) {
 			bd->props.fb_blank = *(int *)evdata->data;
 			if (bd->props.fb_blank == FB_BLANK_UNBLANK)
 				bd->props.state &= ~BL_CORE_FBBLANK;
@@ -269,7 +269,8 @@
  * ERR_PTR() or a pointer to the newly allocated device.
  */
 struct backlight_device *backlight_device_register(const char *name,
-		struct device *parent, void *devdata, const struct backlight_ops *ops)
+	struct device *parent, void *devdata, const struct backlight_ops *ops,
+	const struct backlight_properties *props)
 {
 	struct backlight_device *new_bd;
 	int rc;
@@ -289,6 +290,11 @@
 	dev_set_name(&new_bd->dev, name);
 	dev_set_drvdata(&new_bd->dev, devdata);
 
+	/* Set default properties */
+	if (props)
+		memcpy(&new_bd->props, props,
+		       sizeof(struct backlight_properties));
+
 	rc = device_register(&new_bd->dev);
 	if (rc) {
 		kfree(new_bd);
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index b4bcf80..73bdd84 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -533,6 +533,7 @@
 
 static int __devinit corgi_lcd_probe(struct spi_device *spi)
 {
+	struct backlight_properties props;
 	struct corgi_lcd_platform_data *pdata = spi->dev.platform_data;
 	struct corgi_lcd *lcd;
 	int ret = 0;
@@ -559,13 +560,14 @@
 	lcd->power = FB_BLANK_POWERDOWN;
 	lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA;
 
-	lcd->bl_dev = backlight_device_register("corgi_bl", &spi->dev,
-					lcd, &corgi_bl_ops);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = pdata->max_intensity;
+	lcd->bl_dev = backlight_device_register("corgi_bl", &spi->dev, lcd,
+						&corgi_bl_ops, &props);
 	if (IS_ERR(lcd->bl_dev)) {
 		ret = PTR_ERR(lcd->bl_dev);
 		goto err_unregister_lcd;
 	}
-	lcd->bl_dev->props.max_brightness = pdata->max_intensity;
 	lcd->bl_dev->props.brightness = pdata->default_intensity;
 	lcd->bl_dev->props.power = FB_BLANK_UNBLANK;
 
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index da86db4..1cce6031 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -170,6 +170,7 @@
 
 static int cr_backlight_probe(struct platform_device *pdev)
 {
+	struct backlight_properties props;
 	struct backlight_device *bdp;
 	struct lcd_device *ldp;
 	struct cr_panel *crp;
@@ -190,8 +191,9 @@
 		return -ENODEV;
 	}
 
-	bdp = backlight_device_register("cr-backlight",
-					&pdev->dev, NULL, &cr_backlight_ops);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	bdp = backlight_device_register("cr-backlight", &pdev->dev, NULL,
+					&cr_backlight_ops, &props);
 	if (IS_ERR(bdp)) {
 		pci_dev_put(lpc_dev);
 		return PTR_ERR(bdp);
@@ -220,9 +222,7 @@
 	crp->cr_lcd_device = ldp;
 	crp->cr_backlight_device->props.power = FB_BLANK_UNBLANK;
 	crp->cr_backlight_device->props.brightness = 0;
-	crp->cr_backlight_device->props.max_brightness = 0;
 	cr_backlight_set_intensity(crp->cr_backlight_device);
-
 	cr_lcd_set_power(crp->cr_lcd_device, FB_BLANK_UNBLANK);
 
 	platform_set_drvdata(pdev, crp);
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 74cdc64..686e4a7 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -105,6 +105,7 @@
 	struct da9034_backlight_pdata *pdata = pdev->dev.platform_data;
 	struct da903x_backlight_data *data;
 	struct backlight_device *bl;
+	struct backlight_properties props;
 	int max_brightness;
 
 	data = kzalloc(sizeof(*data), GFP_KERNEL);
@@ -134,15 +135,15 @@
 		da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2,
 				DA9034_WLED_ISET(pdata->output_current));
 
-	bl = backlight_device_register(pdev->name, data->da903x_dev,
-			data, &da903x_backlight_ops);
+	props.max_brightness = max_brightness;
+	bl = backlight_device_register(pdev->name, data->da903x_dev, data,
+				       &da903x_backlight_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&pdev->dev, "failed to register backlight\n");
 		kfree(data);
 		return PTR_ERR(bl);
 	}
 
-	bl->props.max_brightness = max_brightness;
 	bl->props.brightness = max_brightness;
 
 	platform_set_drvdata(pdev, bl);
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index e6d348e..312ca61 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -78,6 +78,7 @@
 
 static int genericbl_probe(struct platform_device *pdev)
 {
+	struct backlight_properties props;
 	struct generic_bl_info *machinfo = pdev->dev.platform_data;
 	const char *name = "generic-bl";
 	struct backlight_device *bd;
@@ -89,14 +90,15 @@
 	if (machinfo->name)
 		name = machinfo->name;
 
-	bd = backlight_device_register (name,
-		&pdev->dev, NULL, &genericbl_ops);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = machinfo->max_intensity;
+	bd = backlight_device_register(name, &pdev->dev, NULL, &genericbl_ops,
+				       &props);
 	if (IS_ERR (bd))
 		return PTR_ERR (bd);
 
 	platform_set_drvdata(pdev, bd);
 
-	bd->props.max_brightness = machinfo->max_intensity;
 	bd->props.power = FB_BLANK_UNBLANK;
 	bd->props.brightness = machinfo->default_intensity;
 	backlight_update_status(bd);
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index f7cc528..267d23f 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -105,16 +105,18 @@
 
 static int __devinit hp680bl_probe(struct platform_device *pdev)
 {
+	struct backlight_properties props;
 	struct backlight_device *bd;
 
-	bd = backlight_device_register ("hp680-bl", &pdev->dev, NULL,
-		    &hp680bl_ops);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = HP680_MAX_INTENSITY;
+	bd = backlight_device_register("hp680-bl", &pdev->dev, NULL,
+				       &hp680bl_ops, &props);
 	if (IS_ERR(bd))
 		return PTR_ERR(bd);
 
 	platform_set_drvdata(pdev, bd);
 
-	bd->props.max_brightness = HP680_MAX_INTENSITY;
 	bd->props.brightness = HP680_DEFAULT_INTENSITY;
 	hp680bl_send_intensity(bd);
 
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index db9071f..2f177b3 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -101,10 +101,14 @@
 
 static int jornada_bl_probe(struct platform_device *pdev)
 {
+	struct backlight_properties props;
 	int ret;
 	struct backlight_device *bd;
 
-	bd = backlight_device_register(S1D_DEVICENAME, &pdev->dev, NULL, &jornada_bl_ops);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = BL_MAX_BRIGHT;
+	bd = backlight_device_register(S1D_DEVICENAME, &pdev->dev, NULL,
+				       &jornada_bl_ops, &props);
 
 	if (IS_ERR(bd)) {
 		ret = PTR_ERR(bd);
@@ -117,7 +121,6 @@
 	/* note. make sure max brightness is set otherwise
 	   you will get seemingly non-related errors when
 	   trying to change brightness */
-	bd->props.max_brightness = BL_MAX_BRIGHT;
 	jornada_bl_update_status(bd);
 
 	platform_set_drvdata(pdev, bd);
diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
index 939e7b8..f439a86 100644
--- a/drivers/video/backlight/kb3886_bl.c
+++ b/drivers/video/backlight/kb3886_bl.c
@@ -141,20 +141,24 @@
 
 static int kb3886bl_probe(struct platform_device *pdev)
 {
+	struct backlight_properties props;
 	struct kb3886bl_machinfo *machinfo = pdev->dev.platform_data;
 
 	bl_machinfo = machinfo;
 	if (!machinfo->limit_mask)
 		machinfo->limit_mask = -1;
 
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = machinfo->max_intensity;
 	kb3886_backlight_device = backlight_device_register("kb3886-bl",
-		&pdev->dev, NULL, &kb3886bl_ops);
+							    &pdev->dev, NULL,
+							    &kb3886bl_ops,
+							    &props);
 	if (IS_ERR(kb3886_backlight_device))
 		return PTR_ERR(kb3886_backlight_device);
 
 	platform_set_drvdata(pdev, kb3886_backlight_device);
 
-	kb3886_backlight_device->props.max_brightness = machinfo->max_intensity;
 	kb3886_backlight_device->props.power = FB_BLANK_UNBLANK;
 	kb3886_backlight_device->props.brightness = machinfo->default_intensity;
 	backlight_update_status(kb3886_backlight_device);
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
new file mode 100644
index 0000000..74abd69
--- /dev/null
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -0,0 +1,257 @@
+/*
+ * l4f00242t03.c -- support for Epson L4F00242T03 LCD
+ *
+ * Copyright 2007-2009 Freescale Semiconductor, Inc. All Rights Reserved.
+ *
+ * Copyright (c) 2009 Alberto Panizzo <maramaopercheseimorto@gmail.com>
+ * 	Inspired by Marek Vasut work in l4f00242t03.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/gpio.h>
+#include <linux/lcd.h>
+#include <linux/regulator/consumer.h>
+
+#include <linux/spi/spi.h>
+#include <linux/spi/l4f00242t03.h>
+
+struct l4f00242t03_priv {
+	struct spi_device	*spi;
+	struct lcd_device	*ld;
+	int lcd_on:1;
+	struct regulator *io_reg;
+	struct regulator *core_reg;
+};
+
+
+static void l4f00242t03_reset(unsigned int gpio)
+{
+	pr_debug("l4f00242t03_reset.\n");
+	gpio_set_value(gpio, 1);
+	mdelay(100);
+	gpio_set_value(gpio, 0);
+	mdelay(10);	/* tRES >= 100us */
+	gpio_set_value(gpio, 1);
+	mdelay(20);
+}
+
+#define param(x) ((x) | 0x100)
+
+static void l4f00242t03_lcd_init(struct spi_device *spi)
+{
+	struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
+	struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+	const u16 cmd[] = { 0x36, param(0), 0x3A, param(0x60) };
+
+	dev_dbg(&spi->dev, "initializing LCD\n");
+
+	if (priv->io_reg) {
+		regulator_set_voltage(priv->io_reg, 1800000, 1800000);
+		regulator_enable(priv->io_reg);
+	}
+
+	if (priv->core_reg) {
+		regulator_set_voltage(priv->core_reg, 2800000, 2800000);
+		regulator_enable(priv->core_reg);
+	}
+
+	gpio_set_value(pdata->data_enable_gpio, 1);
+	msleep(60);
+	spi_write(spi, (const u8 *)cmd, ARRAY_SIZE(cmd) * sizeof(u16));
+}
+
+static int l4f00242t03_lcd_power_set(struct lcd_device *ld, int power)
+{
+	struct l4f00242t03_priv *priv = lcd_get_data(ld);
+	struct spi_device *spi = priv->spi;
+
+	const u16 slpout = 0x11;
+	const u16 dison = 0x29;
+
+	const u16 slpin = 0x10;
+	const u16 disoff = 0x28;
+
+	if (power) {
+		if (priv->lcd_on)
+			return 0;
+
+		dev_dbg(&spi->dev, "turning on LCD\n");
+
+		spi_write(spi, (const u8 *)&slpout, sizeof(u16));
+		msleep(60);
+		spi_write(spi, (const u8 *)&dison, sizeof(u16));
+
+		priv->lcd_on = 1;
+	} else {
+		if (!priv->lcd_on)
+			return 0;
+
+		dev_dbg(&spi->dev, "turning off LCD\n");
+
+		spi_write(spi, (const u8 *)&disoff, sizeof(u16));
+		msleep(60);
+		spi_write(spi, (const u8 *)&slpin, sizeof(u16));
+
+		priv->lcd_on = 0;
+	}
+
+	return 0;
+}
+
+static struct lcd_ops l4f_ops = {
+	.set_power	= l4f00242t03_lcd_power_set,
+	.get_power	= NULL,
+};
+
+static int __devinit l4f00242t03_probe(struct spi_device *spi)
+{
+	struct l4f00242t03_priv *priv;
+	struct l4f00242t03_pdata *pdata = spi->dev.platform_data;
+	int ret;
+
+	if (pdata == NULL) {
+		dev_err(&spi->dev, "Uninitialized platform data.\n");
+		return -EINVAL;
+	}
+
+	priv = kzalloc(sizeof(struct l4f00242t03_priv), GFP_KERNEL);
+
+	if (priv == NULL) {
+		dev_err(&spi->dev, "No memory for this device.\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+
+	dev_set_drvdata(&spi->dev, priv);
+	spi->bits_per_word = 9;
+	spi_setup(spi);
+
+	priv->spi = spi;
+
+	ret = gpio_request(pdata->reset_gpio, "lcd l4f00242t03 reset");
+	if (ret) {
+		dev_err(&spi->dev,
+			"Unable to get the lcd l4f00242t03 reset gpio.\n");
+		return ret;
+	}
+
+	ret = gpio_direction_output(pdata->reset_gpio, 1);
+	if (ret)
+		goto err2;
+
+	ret = gpio_request(pdata->data_enable_gpio,
+				"lcd l4f00242t03 data enable");
+	if (ret) {
+		dev_err(&spi->dev,
+			"Unable to get the lcd l4f00242t03 data en gpio.\n");
+		return ret;
+	}
+
+	ret = gpio_direction_output(pdata->data_enable_gpio, 0);
+	if (ret)
+		goto err3;
+
+	if (pdata->io_supply) {
+		priv->io_reg = regulator_get(NULL, pdata->io_supply);
+
+		if (IS_ERR(priv->io_reg)) {
+			pr_err("%s: Unable to get the IO regulator\n",
+								__func__);
+			goto err3;
+		}
+	}
+
+	if (pdata->core_supply) {
+		priv->core_reg = regulator_get(NULL, pdata->core_supply);
+
+		if (IS_ERR(priv->core_reg)) {
+			pr_err("%s: Unable to get the core regulator\n",
+								__func__);
+			goto err4;
+		}
+	}
+
+	priv->ld = lcd_device_register("l4f00242t03",
+					&spi->dev, priv, &l4f_ops);
+	if (IS_ERR(priv->ld)) {
+		ret = PTR_ERR(priv->ld);
+		goto err5;
+	}
+
+	/* Init the LCD */
+	l4f00242t03_reset(pdata->reset_gpio);
+	l4f00242t03_lcd_init(spi);
+	l4f00242t03_lcd_power_set(priv->ld, 1);
+
+	dev_info(&spi->dev, "Epson l4f00242t03 lcd probed.\n");
+
+	return 0;
+
+err5:
+	if (priv->core_reg)
+		regulator_put(priv->core_reg);
+err4:
+	if (priv->io_reg)
+		regulator_put(priv->io_reg);
+err3:
+	gpio_free(pdata->data_enable_gpio);
+err2:
+	gpio_free(pdata->reset_gpio);
+err:
+	kfree(priv);
+
+	return ret;
+}
+
+static int __devexit l4f00242t03_remove(struct spi_device *spi)
+{
+	struct l4f00242t03_priv *priv = dev_get_drvdata(&spi->dev);
+	struct l4f00242t03_pdata *pdata = priv->spi->dev.platform_data;
+
+	l4f00242t03_lcd_power_set(priv->ld, 0);
+	lcd_device_unregister(priv->ld);
+
+	gpio_free(pdata->data_enable_gpio);
+	gpio_free(pdata->reset_gpio);
+
+	if (priv->io_reg)
+		regulator_put(priv->core_reg);
+	if (priv->core_reg)
+		regulator_put(priv->io_reg);
+
+	kfree(priv);
+
+	return 0;
+}
+
+static struct spi_driver l4f00242t03_driver = {
+	.driver = {
+		.name	= "l4f00242t03",
+		.owner	= THIS_MODULE,
+	},
+	.probe		= l4f00242t03_probe,
+	.remove		= __devexit_p(l4f00242t03_remove),
+};
+
+static __init int l4f00242t03_init(void)
+{
+	return spi_register_driver(&l4f00242t03_driver);
+}
+
+static __exit void l4f00242t03_exit(void)
+{
+	spi_unregister_driver(&l4f00242t03_driver);
+}
+
+module_init(l4f00242t03_init);
+module_exit(l4f00242t03_exit);
+
+MODULE_AUTHOR("Alberto Panizzo <maramaopercheseimorto@gmail.com>");
+MODULE_DESCRIPTION("EPSON L4F00242T03 LCD");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/locomolcd.c b/drivers/video/backlight/locomolcd.c
index 00a9591..7571bc2 100644
--- a/drivers/video/backlight/locomolcd.c
+++ b/drivers/video/backlight/locomolcd.c
@@ -167,6 +167,7 @@
 
 static int locomolcd_probe(struct locomo_dev *ldev)
 {
+	struct backlight_properties props;
 	unsigned long flags;
 
 	local_irq_save(flags);
@@ -182,13 +183,16 @@
 
 	local_irq_restore(flags);
 
-	locomolcd_bl_device = backlight_device_register("locomo-bl", &ldev->dev, NULL, &locomobl_data);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 4;
+	locomolcd_bl_device = backlight_device_register("locomo-bl",
+							&ldev->dev, NULL,
+							&locomobl_data, &props);
 
 	if (IS_ERR (locomolcd_bl_device))
 		return PTR_ERR (locomolcd_bl_device);
 
 	/* Set up frontlight so that screen is readable */
-	locomolcd_bl_device->props.max_brightness = 4,
 	locomolcd_bl_device->props.brightness = 2;
 	locomolcd_set_intensity(locomolcd_bl_device);
 
diff --git a/drivers/video/backlight/max8925_bl.c b/drivers/video/backlight/max8925_bl.c
index c267069..c91adaf 100644
--- a/drivers/video/backlight/max8925_bl.c
+++ b/drivers/video/backlight/max8925_bl.c
@@ -104,6 +104,7 @@
 	struct max8925_backlight_pdata *pdata = NULL;
 	struct max8925_backlight_data *data;
 	struct backlight_device *bl;
+	struct backlight_properties props;
 	struct resource *res;
 	char name[MAX8925_NAME_SIZE];
 	unsigned char value;
@@ -133,14 +134,15 @@
 	data->chip = chip;
 	data->current_brightness = 0;
 
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = MAX_BRIGHTNESS;
 	bl = backlight_device_register(name, &pdev->dev, data,
-					&max8925_backlight_ops);
+					&max8925_backlight_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&pdev->dev, "failed to register backlight\n");
 		kfree(data);
 		return PTR_ERR(bl);
 	}
-	bl->props.max_brightness = MAX_BRIGHTNESS;
 	bl->props.brightness = MAX_BRIGHTNESS;
 
 	platform_set_drvdata(pdev, bl);
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 2e78b07..1b5d3fe 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -139,6 +139,51 @@
 static const struct dmi_system_id __initdata mbp_device_table[] = {
 	{
 		.callback	= mbp_dmi_match,
+		.ident		= "MacBook 1,1",
+		.matches	= {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBook1,1"),
+		},
+		.driver_data	= (void *)&intel_chipset_data,
+	},
+	{
+		.callback	= mbp_dmi_match,
+		.ident		= "MacBook 2,1",
+		.matches	= {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBook2,1"),
+		},
+		.driver_data	= (void *)&intel_chipset_data,
+	},
+	{
+		.callback	= mbp_dmi_match,
+		.ident		= "MacBook 3,1",
+		.matches	= {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBook3,1"),
+		},
+		.driver_data	= (void *)&intel_chipset_data,
+	},
+	{
+		.callback	= mbp_dmi_match,
+		.ident		= "MacBook 4,1",
+		.matches	= {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,1"),
+		},
+		.driver_data	= (void *)&intel_chipset_data,
+	},
+	{
+		.callback	= mbp_dmi_match,
+		.ident		= "MacBook 4,2",
+		.matches	= {
+			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+			DMI_MATCH(DMI_PRODUCT_NAME, "MacBook4,2"),
+		},
+		.driver_data	= (void *)&intel_chipset_data,
+	},
+	{
+		.callback	= mbp_dmi_match,
 		.ident		= "MacBookPro 3,1",
 		.matches	= {
 			DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -250,6 +295,7 @@
 
 static int __init mbp_init(void)
 {
+	struct backlight_properties props;
 	if (!dmi_check_system(mbp_device_table))
 		return -ENODEV;
 
@@ -257,14 +303,17 @@
 						"Macbook Pro backlight"))
 		return -ENXIO;
 
-	mbp_backlight_device = backlight_device_register("mbp_backlight",
-					NULL, NULL, &driver_data->backlight_ops);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 15;
+	mbp_backlight_device = backlight_device_register("mbp_backlight", NULL,
+							 NULL,
+							 &driver_data->backlight_ops,
+							 &props);
 	if (IS_ERR(mbp_backlight_device)) {
 		release_region(driver_data->iostart, driver_data->iolen);
 		return PTR_ERR(mbp_backlight_device);
 	}
 
-	mbp_backlight_device->props.max_brightness = 15;
 	mbp_backlight_device->props.brightness =
 		driver_data->backlight_ops.get_brightness(mbp_backlight_device);
 	backlight_update_status(mbp_backlight_device);
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index a3a7f89..333d28e 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -132,6 +132,7 @@
 
 static int omapbl_probe(struct platform_device *pdev)
 {
+	struct backlight_properties props;
 	struct backlight_device *dev;
 	struct omap_backlight *bl;
 	struct omap_backlight_config *pdata = pdev->dev.platform_data;
@@ -143,7 +144,10 @@
 	if (unlikely(!bl))
 		return -ENOMEM;
 
-	dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = OMAPBL_MAX_INTENSITY;
+	dev = backlight_device_register("omap-bl", &pdev->dev, bl, &omapbl_ops,
+					&props);
 	if (IS_ERR(dev)) {
 		kfree(bl);
 		return PTR_ERR(dev);
@@ -160,7 +164,6 @@
 	omap_cfg_reg(PWL);	/* Conflicts with UART3 */
 
 	dev->props.fb_blank = FB_BLANK_UNBLANK;
-	dev->props.max_brightness = OMAPBL_MAX_INTENSITY;
 	dev->props.brightness = pdata->default_intensity;
 	omapbl_update_status(dev);
 
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 075786e..809278c 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -61,8 +61,10 @@
 
 static int progearbl_probe(struct platform_device *pdev)
 {
+	struct backlight_properties props;
 	u8 temp;
 	struct backlight_device *progear_backlight_device;
+	int ret;
 
 	pmu_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL);
 	if (!pmu_dev) {
@@ -73,28 +75,37 @@
 	sb_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
 	if (!sb_dev) {
 		printk("ALI 1533 SB not found.\n");
-		pci_dev_put(pmu_dev);
-		return -ENODEV;
+		ret = -ENODEV;
+		goto put_pmu;
 	}
 
 	/*     Set SB_MPS1 to enable brightness control. */
 	pci_read_config_byte(sb_dev, SB_MPS1, &temp);
 	pci_write_config_byte(sb_dev, SB_MPS1, temp | 0x20);
 
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = HW_LEVEL_MAX - HW_LEVEL_MIN;
 	progear_backlight_device = backlight_device_register("progear-bl",
 							     &pdev->dev, NULL,
-							     &progearbl_ops);
-	if (IS_ERR(progear_backlight_device))
-		return PTR_ERR(progear_backlight_device);
+							     &progearbl_ops,
+							     &props);
+	if (IS_ERR(progear_backlight_device)) {
+		ret = PTR_ERR(progear_backlight_device);
+		goto put_sb;
+	}
 
 	platform_set_drvdata(pdev, progear_backlight_device);
 
 	progear_backlight_device->props.power = FB_BLANK_UNBLANK;
 	progear_backlight_device->props.brightness = HW_LEVEL_MAX - HW_LEVEL_MIN;
-	progear_backlight_device->props.max_brightness = HW_LEVEL_MAX - HW_LEVEL_MIN;
 	progearbl_set_intensity(progear_backlight_device);
 
 	return 0;
+put_sb:
+	pci_dev_put(sb_dev);
+put_pmu:
+	pci_dev_put(pmu_dev);
+	return ret;
 }
 
 static int progearbl_remove(struct platform_device *pdev)
diff --git a/drivers/video/backlight/pwm_bl.c b/drivers/video/backlight/pwm_bl.c
index 9d2ec2a..b89eebc 100644
--- a/drivers/video/backlight/pwm_bl.c
+++ b/drivers/video/backlight/pwm_bl.c
@@ -65,6 +65,7 @@
 
 static int pwm_backlight_probe(struct platform_device *pdev)
 {
+	struct backlight_properties props;
 	struct platform_pwm_backlight_data *data = pdev->dev.platform_data;
 	struct backlight_device *bl;
 	struct pwm_bl_data *pb;
@@ -100,15 +101,16 @@
 	} else
 		dev_dbg(&pdev->dev, "got pwm for backlight\n");
 
-	bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev,
-			pb, &pwm_backlight_ops);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = data->max_brightness;
+	bl = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, pb,
+				       &pwm_backlight_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&pdev->dev, "failed to register backlight\n");
 		ret = PTR_ERR(bl);
 		goto err_bl;
 	}
 
-	bl->props.max_brightness = data->max_brightness;
 	bl->props.brightness = data->dft_brightness;
 	backlight_update_status(bl);
 
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index e14ce4d..f57bbf1 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -80,6 +80,7 @@
 static int __devinit tosa_bl_probe(struct i2c_client *client,
 		const struct i2c_device_id *id)
 {
+	struct backlight_properties props;
 	struct tosa_bl_data *data = kzalloc(sizeof(struct tosa_bl_data), GFP_KERNEL);
 	int ret = 0;
 	if (!data)
@@ -99,15 +100,16 @@
 	i2c_set_clientdata(client, data);
 	data->i2c = client;
 
-	data->bl = backlight_device_register("tosa-bl", &client->dev,
-			data, &bl_ops);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 512 - 1;
+	data->bl = backlight_device_register("tosa-bl", &client->dev, data,
+					     &bl_ops, &props);
 	if (IS_ERR(data->bl)) {
 		ret = PTR_ERR(data->bl);
 		goto err_reg;
 	}
 
 	data->bl->props.brightness = 69;
-	data->bl->props.max_brightness = 512 - 1;
 	data->bl->props.power = FB_BLANK_UNBLANK;
 
 	backlight_update_status(data->bl);
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index e32add3..a431270 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -125,6 +125,7 @@
 	struct wm831x_backlight_pdata *pdata;
 	struct wm831x_backlight_data *data;
 	struct backlight_device *bl;
+	struct backlight_properties props;
 	int ret, i, max_isel, isink_reg, dcdc_cfg;
 
 	/* We need platform data */
@@ -191,15 +192,15 @@
 	data->current_brightness = 0;
 	data->isink_reg = isink_reg;
 
-	bl = backlight_device_register("wm831x", &pdev->dev,
-			data, &wm831x_backlight_ops);
+	props.max_brightness = max_isel;
+	bl = backlight_device_register("wm831x", &pdev->dev, data,
+				       &wm831x_backlight_ops, &props);
 	if (IS_ERR(bl)) {
 		dev_err(&pdev->dev, "failed to register backlight\n");
 		kfree(data);
 		return PTR_ERR(bl);
 	}
 
-	bl->props.max_brightness = max_isel;
 	bl->props.brightness = max_isel;
 
 	platform_set_drvdata(pdev, bl);
diff --git a/drivers/video/bf54x-lq043fb.c b/drivers/video/bf54x-lq043fb.c
index 814312a..23b2a8c 100644
--- a/drivers/video/bf54x-lq043fb.c
+++ b/drivers/video/bf54x-lq043fb.c
@@ -433,7 +433,7 @@
 	return 0;
 }
 
-static struct backlight_ops bfin_lq043fb_bl_ops = {
+static const struct backlight_ops bfin_lq043fb_bl_ops = {
 	.get_brightness = bl_get_brightness,
 };
 
@@ -501,6 +501,7 @@
 
 static int __devinit bfin_bf54x_probe(struct platform_device *pdev)
 {
+	struct backlight_properties props;
 	struct bfin_bf54xfb_info *info;
 	struct fb_info *fbinfo;
 	int ret;
@@ -645,10 +646,16 @@
 		goto out8;
 	}
 #ifndef NO_BL_SUPPORT
-	bl_dev =
-	    backlight_device_register("bf54x-bl", NULL, NULL,
-				      &bfin_lq043fb_bl_ops);
-	bl_dev->props.max_brightness = 255;
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 255;
+	bl_dev = backlight_device_register("bf54x-bl", NULL, NULL,
+					   &bfin_lq043fb_bl_ops, &props);
+	if (IS_ERR(bl_dev)) {
+		printk(KERN_ERR DRIVER_NAME
+			": unable to register backlight.\n");
+		ret = -EINVAL;
+		goto out9;
+	}
 
 	lcd_dev = lcd_device_register(DRIVER_NAME, &pdev->dev, NULL, &bfin_lcd_ops);
 	lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n");
@@ -656,6 +663,8 @@
 
 	return 0;
 
+out9:
+	unregister_framebuffer(fbinfo);
 out8:
 	free_irq(info->irq, info);
 out7:
diff --git a/drivers/video/bfin-t350mcqb-fb.c b/drivers/video/bfin-t350mcqb-fb.c
index 5653d08..31a2dec 100644
--- a/drivers/video/bfin-t350mcqb-fb.c
+++ b/drivers/video/bfin-t350mcqb-fb.c
@@ -352,7 +352,7 @@
 	return 0;
 }
 
-static struct backlight_ops bfin_lq043fb_bl_ops = {
+static const struct backlight_ops bfin_lq043fb_bl_ops = {
 	.get_brightness = bl_get_brightness,
 };
 
@@ -419,6 +419,7 @@
 
 static int __devinit bfin_t350mcqb_probe(struct platform_device *pdev)
 {
+	struct backlight_properties props;
 	struct bfin_t350mcqbfb_info *info;
 	struct fb_info *fbinfo;
 	int ret;
@@ -540,10 +541,16 @@
 		goto out8;
 	}
 #ifndef NO_BL_SUPPORT
-	bl_dev =
-	    backlight_device_register("bf52x-bl", NULL, NULL,
-				      &bfin_lq043fb_bl_ops);
-	bl_dev->props.max_brightness = 255;
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = 255;
+	bl_dev = backlight_device_register("bf52x-bl", NULL, NULL,
+					   &bfin_lq043fb_bl_ops, &props);
+	if (IS_ERR(bl_dev)) {
+		printk(KERN_ERR DRIVER_NAME
+			": unable to register backlight.\n");
+		ret = -EINVAL;
+		goto out9;
+	}
 
 	lcd_dev = lcd_device_register(DRIVER_NAME, NULL, &bfin_lcd_ops);
 	lcd_dev->props.max_contrast = 255, printk(KERN_INFO "Done.\n");
@@ -551,6 +558,8 @@
 
 	return 0;
 
+out9:
+	unregister_framebuffer(fbinfo);
 out8:
 	free_irq(info->irq, info);
 out7:
diff --git a/drivers/video/nvidia/nv_backlight.c b/drivers/video/nvidia/nv_backlight.c
index 443e3c8..2fb552a 100644
--- a/drivers/video/nvidia/nv_backlight.c
+++ b/drivers/video/nvidia/nv_backlight.c
@@ -94,6 +94,7 @@
 
 void nvidia_bl_init(struct nvidia_par *par)
 {
+	struct backlight_properties props;
 	struct fb_info *info = pci_get_drvdata(par->pci_dev);
 	struct backlight_device *bd;
 	char name[12];
@@ -109,7 +110,10 @@
 
 	snprintf(name, sizeof(name), "nvidiabl%d", info->node);
 
-	bd = backlight_device_register(name, info->dev, par, &nvidia_bl_ops);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
+	bd = backlight_device_register(name, info->dev, par, &nvidia_bl_ops,
+				       &props);
 	if (IS_ERR(bd)) {
 		info->bl_dev = NULL;
 		printk(KERN_WARNING "nvidia: Backlight registration failed\n");
@@ -121,7 +125,6 @@
 		0x158 * FB_BACKLIGHT_MAX / MAX_LEVEL,
 		0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL);
 
-	bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
 	bd->props.brightness = bd->props.max_brightness;
 	bd->props.power = FB_BLANK_UNBLANK;
 	backlight_update_status(bd);
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index fcd6a61..59769e8 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -486,6 +486,7 @@
 
 static int taal_probe(struct omap_dss_device *dssdev)
 {
+	struct backlight_properties props;
 	struct taal_data *td;
 	struct backlight_device *bldev;
 	int r;
@@ -520,11 +521,16 @@
 
 	/* if no platform set_backlight() defined, presume DSI backlight
 	 * control */
+	memset(&props, 0, sizeof(struct backlight_properties));
 	if (!dssdev->set_backlight)
 		td->use_dsi_bl = true;
 
+	if (td->use_dsi_bl)
+		props.max_brightness = 255;
+	else
+		props.max_brightness = 127;
 	bldev = backlight_device_register("taal", &dssdev->dev, dssdev,
-			&taal_bl_ops);
+					  &taal_bl_ops, &props);
 	if (IS_ERR(bldev)) {
 		r = PTR_ERR(bldev);
 		goto err2;
@@ -534,13 +540,10 @@
 
 	bldev->props.fb_blank = FB_BLANK_UNBLANK;
 	bldev->props.power = FB_BLANK_UNBLANK;
-	if (td->use_dsi_bl) {
-		bldev->props.max_brightness = 255;
+	if (td->use_dsi_bl)
 		bldev->props.brightness = 255;
-	} else {
-		bldev->props.max_brightness = 127;
+	else
 		bldev->props.brightness = 127;
-	}
 
 	taal_bl_update_status(bldev);
 
diff --git a/drivers/video/riva/fbdev.c b/drivers/video/riva/fbdev.c
index d94c57f..618f36b 100644
--- a/drivers/video/riva/fbdev.c
+++ b/drivers/video/riva/fbdev.c
@@ -338,6 +338,7 @@
 
 static void riva_bl_init(struct riva_par *par)
 {
+	struct backlight_properties props;
 	struct fb_info *info = pci_get_drvdata(par->pdev);
 	struct backlight_device *bd;
 	char name[12];
@@ -353,7 +354,10 @@
 
 	snprintf(name, sizeof(name), "rivabl%d", info->node);
 
-	bd = backlight_device_register(name, info->dev, par, &riva_bl_ops);
+	memset(&props, 0, sizeof(struct backlight_properties));
+	props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
+	bd = backlight_device_register(name, info->dev, par, &riva_bl_ops,
+				       &props);
 	if (IS_ERR(bd)) {
 		info->bl_dev = NULL;
 		printk(KERN_WARNING "riva: Backlight registration failed\n");
@@ -365,7 +369,6 @@
 		MIN_LEVEL * FB_BACKLIGHT_MAX / MAX_LEVEL,
 		FB_BACKLIGHT_MAX);
 
-	bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1;
 	bd->props.brightness = bd->props.max_brightness;
 	bd->props.power = FB_BLANK_UNBLANK;
 	backlight_update_status(bd);