Merge branch 'qed-firmware-TLV'

Sudarsana Reddy Kalluru says:

====================
qed*: Add support for management firmware TLV request.

Management firmware (MFW) requires config and state information from
the driver. It queries this via TLV (type-length-value) request wherein
mfw specificies the list of required TLVs. Driver fills the TLV data
and responds back to MFW.
This patch series adds qed/qede/qedf/qedi driver implementation for
supporting the TLV queries from MFW.

Changes from previous versions:
-------------------------------
v2: Split patch (2) into multiple simpler patches.
v2: Update qed_tlv_parsed_buf->p_val datatype to void pointer to avoid
    bunch of unnecessary typecasts.

Please consider applying this series to "net-next".
====================

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/ethernet/qlogic/qed/Makefile b/drivers/net/ethernet/qlogic/qed/Makefile
index c70cf2a..a0acb94 100644
--- a/drivers/net/ethernet/qlogic/qed/Makefile
+++ b/drivers/net/ethernet/qlogic/qed/Makefile
@@ -3,7 +3,7 @@
 
 qed-y := qed_cxt.o qed_dev.o qed_hw.o qed_init_fw_funcs.o qed_init_ops.o \
 	 qed_int.o qed_main.o qed_mcp.o qed_sp_commands.o qed_spq.o qed_l2.o \
-	 qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o
+	 qed_selftest.o qed_dcbx.o qed_debug.o qed_ptp.o qed_mng_tlv.o
 qed-$(CONFIG_QED_SRIOV) += qed_sriov.o qed_vf.o
 qed-$(CONFIG_QED_LL2) += qed_ll2.o
 qed-$(CONFIG_QED_RDMA) += qed_roce.o qed_rdma.o qed_iwarp.o
diff --git a/drivers/net/ethernet/qlogic/qed/qed.h b/drivers/net/ethernet/qlogic/qed/qed.h
index adcff49..00db340 100644
--- a/drivers/net/ethernet/qlogic/qed/qed.h
+++ b/drivers/net/ethernet/qlogic/qed/qed.h
@@ -92,6 +92,8 @@
 struct qed_dev_info;
 union qed_mcp_protocol_stats;
 enum qed_mcp_protocol_type;
+enum qed_mfw_tlv_type;
+union qed_mfw_tlv_data;
 
 /* helpers */
 #define QED_MFW_GET_FIELD(name, field) \
@@ -513,6 +515,10 @@
 	void	(*func)(void *);
 };
 
+enum qed_slowpath_wq_flag {
+	QED_SLOWPATH_MFW_TLV_REQ,
+};
+
 struct qed_hwfn {
 	struct qed_dev			*cdev;
 	u8				my_id;          /* ID inside the PF */
@@ -642,6 +648,9 @@
 #endif
 
 	struct z_stream_s		*stream;
+	struct workqueue_struct *slowpath_wq;
+	struct delayed_work slowpath_task;
+	unsigned long slowpath_task_flags;
 };
 
 struct pci_params {
@@ -906,5 +915,9 @@
 			    union qed_mcp_protocol_stats *stats);
 int qed_slowpath_irq_req(struct qed_hwfn *hwfn);
 void qed_slowpath_irq_sync(struct qed_hwfn *p_hwfn);
+int qed_mfw_tlv_req(struct qed_hwfn *hwfn);
 
+int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn,
+			  enum qed_mfw_tlv_type type,
+			  union qed_mfw_tlv_data *tlv_data);
 #endif /* _QED_H */
diff --git a/drivers/net/ethernet/qlogic/qed/qed_hsi.h b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
index b5f70ef..8e1e6e1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_hsi.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_hsi.h
@@ -11863,6 +11863,8 @@
 	u32 running_bundle_id;
 	s32 external_temperature;
 	u32 mdump_reason;
+	u32 data_ptr;
+	u32 data_size;
 };
 
 struct fw_flr_mb {
@@ -12322,6 +12324,7 @@
 #define DRV_MSG_CODE_BIST_TEST			0x001e0000
 #define DRV_MSG_CODE_SET_LED_MODE		0x00200000
 #define DRV_MSG_CODE_RESOURCE_CMD	0x00230000
+#define DRV_MSG_CODE_GET_TLV_DONE		0x002f0000
 
 #define RESOURCE_CMD_REQ_RESC_MASK		0x0000001F
 #define RESOURCE_CMD_REQ_RESC_SHIFT		0
@@ -12523,6 +12526,7 @@
 	MFW_DRV_MSG_TRANSCEIVER_STATE_CHANGE,
 	MFW_DRV_MSG_BW_UPDATE11,
 	MFW_DRV_MSG_OEM_CFG_UPDATE,
+	MFW_DRV_MSG_GET_TLV_REQ,
 	MFW_DRV_MSG_MAX
 };
 
@@ -12558,6 +12562,233 @@
 	struct public_func func[MCP_GLOB_FUNC_MAX];
 };
 
+/* OCBB definitions */
+enum tlvs {
+	/* Category 1: Device Properties */
+	DRV_TLV_CLP_STR,
+	DRV_TLV_CLP_STR_CTD,
+	/* Category 6: Device Configuration */
+	DRV_TLV_SCSI_TO,
+	DRV_TLV_R_T_TOV,
+	DRV_TLV_R_A_TOV,
+	DRV_TLV_E_D_TOV,
+	DRV_TLV_CR_TOV,
+	DRV_TLV_BOOT_TYPE,
+	/* Category 8: Port Configuration */
+	DRV_TLV_NPIV_ENABLED,
+	/* Category 10: Function Configuration */
+	DRV_TLV_FEATURE_FLAGS,
+	DRV_TLV_LOCAL_ADMIN_ADDR,
+	DRV_TLV_ADDITIONAL_MAC_ADDR_1,
+	DRV_TLV_ADDITIONAL_MAC_ADDR_2,
+	DRV_TLV_LSO_MAX_OFFLOAD_SIZE,
+	DRV_TLV_LSO_MIN_SEGMENT_COUNT,
+	DRV_TLV_PROMISCUOUS_MODE,
+	DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE,
+	DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE,
+	DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG,
+	DRV_TLV_FLEX_NIC_OUTER_VLAN_ID,
+	DRV_TLV_OS_DRIVER_STATES,
+	DRV_TLV_PXE_BOOT_PROGRESS,
+	/* Category 12: FC/FCoE Configuration */
+	DRV_TLV_NPIV_STATE,
+	DRV_TLV_NUM_OF_NPIV_IDS,
+	DRV_TLV_SWITCH_NAME,
+	DRV_TLV_SWITCH_PORT_NUM,
+	DRV_TLV_SWITCH_PORT_ID,
+	DRV_TLV_VENDOR_NAME,
+	DRV_TLV_SWITCH_MODEL,
+	DRV_TLV_SWITCH_FW_VER,
+	DRV_TLV_QOS_PRIORITY_PER_802_1P,
+	DRV_TLV_PORT_ALIAS,
+	DRV_TLV_PORT_STATE,
+	DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE,
+	DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE,
+	DRV_TLV_LINK_FAILURE_COUNT,
+	DRV_TLV_FCOE_BOOT_PROGRESS,
+	/* Category 13: iSCSI Configuration */
+	DRV_TLV_TARGET_LLMNR_ENABLED,
+	DRV_TLV_HEADER_DIGEST_FLAG_ENABLED,
+	DRV_TLV_DATA_DIGEST_FLAG_ENABLED,
+	DRV_TLV_AUTHENTICATION_METHOD,
+	DRV_TLV_ISCSI_BOOT_TARGET_PORTAL,
+	DRV_TLV_MAX_FRAME_SIZE,
+	DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE,
+	DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE,
+	DRV_TLV_ISCSI_BOOT_PROGRESS,
+	/* Category 20: Device Data */
+	DRV_TLV_PCIE_BUS_RX_UTILIZATION,
+	DRV_TLV_PCIE_BUS_TX_UTILIZATION,
+	DRV_TLV_DEVICE_CPU_CORES_UTILIZATION,
+	DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED,
+	DRV_TLV_NCSI_RX_BYTES_RECEIVED,
+	DRV_TLV_NCSI_TX_BYTES_SENT,
+	/* Category 22: Base Port Data */
+	DRV_TLV_RX_DISCARDS,
+	DRV_TLV_RX_ERRORS,
+	DRV_TLV_TX_ERRORS,
+	DRV_TLV_TX_DISCARDS,
+	DRV_TLV_RX_FRAMES_RECEIVED,
+	DRV_TLV_TX_FRAMES_SENT,
+	/* Category 23: FC/FCoE Port Data */
+	DRV_TLV_RX_BROADCAST_PACKETS,
+	DRV_TLV_TX_BROADCAST_PACKETS,
+	/* Category 28: Base Function Data */
+	DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4,
+	DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6,
+	DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+	DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+	DRV_TLV_PF_RX_FRAMES_RECEIVED,
+	DRV_TLV_RX_BYTES_RECEIVED,
+	DRV_TLV_PF_TX_FRAMES_SENT,
+	DRV_TLV_TX_BYTES_SENT,
+	DRV_TLV_IOV_OFFLOAD,
+	DRV_TLV_PCI_ERRORS_CAP_ID,
+	DRV_TLV_UNCORRECTABLE_ERROR_STATUS,
+	DRV_TLV_UNCORRECTABLE_ERROR_MASK,
+	DRV_TLV_CORRECTABLE_ERROR_STATUS,
+	DRV_TLV_CORRECTABLE_ERROR_MASK,
+	DRV_TLV_PCI_ERRORS_AECC_REGISTER,
+	DRV_TLV_TX_QUEUES_EMPTY,
+	DRV_TLV_RX_QUEUES_EMPTY,
+	DRV_TLV_TX_QUEUES_FULL,
+	DRV_TLV_RX_QUEUES_FULL,
+	/* Category 29: FC/FCoE Function Data */
+	DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+	DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+	DRV_TLV_FCOE_RX_FRAMES_RECEIVED,
+	DRV_TLV_FCOE_RX_BYTES_RECEIVED,
+	DRV_TLV_FCOE_TX_FRAMES_SENT,
+	DRV_TLV_FCOE_TX_BYTES_SENT,
+	DRV_TLV_CRC_ERROR_COUNT,
+	DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID,
+	DRV_TLV_CRC_ERROR_1_TIMESTAMP,
+	DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID,
+	DRV_TLV_CRC_ERROR_2_TIMESTAMP,
+	DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID,
+	DRV_TLV_CRC_ERROR_3_TIMESTAMP,
+	DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID,
+	DRV_TLV_CRC_ERROR_4_TIMESTAMP,
+	DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID,
+	DRV_TLV_CRC_ERROR_5_TIMESTAMP,
+	DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT,
+	DRV_TLV_LOSS_OF_SIGNAL_ERRORS,
+	DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT,
+	DRV_TLV_DISPARITY_ERROR_COUNT,
+	DRV_TLV_CODE_VIOLATION_ERROR_COUNT,
+	DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1,
+	DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2,
+	DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3,
+	DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4,
+	DRV_TLV_LAST_FLOGI_TIMESTAMP,
+	DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1,
+	DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2,
+	DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3,
+	DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4,
+	DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP,
+	DRV_TLV_LAST_FLOGI_RJT,
+	DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP,
+	DRV_TLV_FDISCS_SENT_COUNT,
+	DRV_TLV_FDISC_ACCS_RECEIVED,
+	DRV_TLV_FDISC_RJTS_RECEIVED,
+	DRV_TLV_PLOGI_SENT_COUNT,
+	DRV_TLV_PLOGI_ACCS_RECEIVED,
+	DRV_TLV_PLOGI_RJTS_RECEIVED,
+	DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID,
+	DRV_TLV_PLOGI_1_TIMESTAMP,
+	DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID,
+	DRV_TLV_PLOGI_2_TIMESTAMP,
+	DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID,
+	DRV_TLV_PLOGI_3_TIMESTAMP,
+	DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID,
+	DRV_TLV_PLOGI_4_TIMESTAMP,
+	DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID,
+	DRV_TLV_PLOGI_5_TIMESTAMP,
+	DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID,
+	DRV_TLV_PLOGI_1_ACC_TIMESTAMP,
+	DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID,
+	DRV_TLV_PLOGI_2_ACC_TIMESTAMP,
+	DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID,
+	DRV_TLV_PLOGI_3_ACC_TIMESTAMP,
+	DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID,
+	DRV_TLV_PLOGI_4_ACC_TIMESTAMP,
+	DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID,
+	DRV_TLV_PLOGI_5_ACC_TIMESTAMP,
+	DRV_TLV_LOGOS_ISSUED,
+	DRV_TLV_LOGO_ACCS_RECEIVED,
+	DRV_TLV_LOGO_RJTS_RECEIVED,
+	DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID,
+	DRV_TLV_LOGO_1_TIMESTAMP,
+	DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID,
+	DRV_TLV_LOGO_2_TIMESTAMP,
+	DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID,
+	DRV_TLV_LOGO_3_TIMESTAMP,
+	DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID,
+	DRV_TLV_LOGO_4_TIMESTAMP,
+	DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID,
+	DRV_TLV_LOGO_5_TIMESTAMP,
+	DRV_TLV_LOGOS_RECEIVED,
+	DRV_TLV_ACCS_ISSUED,
+	DRV_TLV_PRLIS_ISSUED,
+	DRV_TLV_ACCS_RECEIVED,
+	DRV_TLV_ABTS_SENT_COUNT,
+	DRV_TLV_ABTS_ACCS_RECEIVED,
+	DRV_TLV_ABTS_RJTS_RECEIVED,
+	DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID,
+	DRV_TLV_ABTS_1_TIMESTAMP,
+	DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID,
+	DRV_TLV_ABTS_2_TIMESTAMP,
+	DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID,
+	DRV_TLV_ABTS_3_TIMESTAMP,
+	DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID,
+	DRV_TLV_ABTS_4_TIMESTAMP,
+	DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID,
+	DRV_TLV_ABTS_5_TIMESTAMP,
+	DRV_TLV_RSCNS_RECEIVED,
+	DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1,
+	DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2,
+	DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3,
+	DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4,
+	DRV_TLV_LUN_RESETS_ISSUED,
+	DRV_TLV_ABORT_TASK_SETS_ISSUED,
+	DRV_TLV_TPRLOS_SENT,
+	DRV_TLV_NOS_SENT_COUNT,
+	DRV_TLV_NOS_RECEIVED_COUNT,
+	DRV_TLV_OLS_COUNT,
+	DRV_TLV_LR_COUNT,
+	DRV_TLV_LRR_COUNT,
+	DRV_TLV_LIP_SENT_COUNT,
+	DRV_TLV_LIP_RECEIVED_COUNT,
+	DRV_TLV_EOFA_COUNT,
+	DRV_TLV_EOFNI_COUNT,
+	DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT,
+	DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT,
+	DRV_TLV_SCSI_STATUS_BUSY_COUNT,
+	DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT,
+	DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT,
+	DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT,
+	DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT,
+	DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT,
+	DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT,
+	DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ,
+	DRV_TLV_SCSI_CHECK_1_TIMESTAMP,
+	DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ,
+	DRV_TLV_SCSI_CHECK_2_TIMESTAMP,
+	DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ,
+	DRV_TLV_SCSI_CHECK_3_TIMESTAMP,
+	DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ,
+	DRV_TLV_SCSI_CHECK_4_TIMESTAMP,
+	DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ,
+	DRV_TLV_SCSI_CHECK_5_TIMESTAMP,
+	/* Category 30: iSCSI Function Data */
+	DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH,
+	DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH,
+	DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED,
+	DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED,
+	DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT,
+	DRV_TLV_ISCSI_PDU_TX_BYTES_SENT
+};
+
 struct nvm_cfg_mac_address {
 	u32 mac_addr_hi;
 #define NVM_CFG_MAC_ADDRESS_HI_MASK	0x0000FFFF
diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c
index 9feed3b..68c4399 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_main.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_main.c
@@ -946,6 +946,68 @@
 	}
 }
 
+static void qed_slowpath_wq_stop(struct qed_dev *cdev)
+{
+	int i;
+
+	if (IS_VF(cdev))
+		return;
+
+	for_each_hwfn(cdev, i) {
+		if (!cdev->hwfns[i].slowpath_wq)
+			continue;
+
+		flush_workqueue(cdev->hwfns[i].slowpath_wq);
+		destroy_workqueue(cdev->hwfns[i].slowpath_wq);
+	}
+}
+
+static void qed_slowpath_task(struct work_struct *work)
+{
+	struct qed_hwfn *hwfn = container_of(work, struct qed_hwfn,
+					     slowpath_task.work);
+	struct qed_ptt *ptt = qed_ptt_acquire(hwfn);
+
+	if (!ptt) {
+		queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
+		return;
+	}
+
+	if (test_and_clear_bit(QED_SLOWPATH_MFW_TLV_REQ,
+			       &hwfn->slowpath_task_flags))
+		qed_mfw_process_tlv_req(hwfn, ptt);
+
+	qed_ptt_release(hwfn, ptt);
+}
+
+static int qed_slowpath_wq_start(struct qed_dev *cdev)
+{
+	struct qed_hwfn *hwfn;
+	char name[NAME_SIZE];
+	int i;
+
+	if (IS_VF(cdev))
+		return 0;
+
+	for_each_hwfn(cdev, i) {
+		hwfn = &cdev->hwfns[i];
+
+		snprintf(name, NAME_SIZE, "slowpath-%02x:%02x.%02x",
+			 cdev->pdev->bus->number,
+			 PCI_SLOT(cdev->pdev->devfn), hwfn->abs_pf_id);
+
+		hwfn->slowpath_wq = alloc_workqueue(name, 0, 0);
+		if (!hwfn->slowpath_wq) {
+			DP_NOTICE(hwfn, "Cannot create slowpath workqueue\n");
+			return -ENOMEM;
+		}
+
+		INIT_DELAYED_WORK(&hwfn->slowpath_task, qed_slowpath_task);
+	}
+
+	return 0;
+}
+
 static int qed_slowpath_start(struct qed_dev *cdev,
 			      struct qed_slowpath_params *params)
 {
@@ -961,6 +1023,9 @@
 	if (qed_iov_wq_start(cdev))
 		goto err;
 
+	if (qed_slowpath_wq_start(cdev))
+		goto err;
+
 	if (IS_PF(cdev)) {
 		rc = request_firmware(&cdev->firmware, QED_FW_FILE_NAME,
 				      &cdev->pdev->dev);
@@ -1095,6 +1160,8 @@
 
 	qed_iov_wq_stop(cdev, false);
 
+	qed_slowpath_wq_stop(cdev);
+
 	return rc;
 }
 
@@ -1103,6 +1170,8 @@
 	if (!cdev)
 		return -ENODEV;
 
+	qed_slowpath_wq_stop(cdev);
+
 	qed_ll2_dealloc_if(cdev);
 
 	if (IS_PF(cdev)) {
@@ -2088,3 +2157,89 @@
 		return;
 	}
 }
+
+int qed_mfw_tlv_req(struct qed_hwfn *hwfn)
+{
+	DP_VERBOSE(hwfn->cdev, NETIF_MSG_DRV,
+		   "Scheduling slowpath task [Flag: %d]\n",
+		   QED_SLOWPATH_MFW_TLV_REQ);
+	smp_mb__before_atomic();
+	set_bit(QED_SLOWPATH_MFW_TLV_REQ, &hwfn->slowpath_task_flags);
+	smp_mb__after_atomic();
+	queue_delayed_work(hwfn->slowpath_wq, &hwfn->slowpath_task, 0);
+
+	return 0;
+}
+
+static void
+qed_fill_generic_tlv_data(struct qed_dev *cdev, struct qed_mfw_tlv_generic *tlv)
+{
+	struct qed_common_cb_ops *op = cdev->protocol_ops.common;
+	struct qed_eth_stats_common *p_common;
+	struct qed_generic_tlvs gen_tlvs;
+	struct qed_eth_stats stats;
+	int i;
+
+	memset(&gen_tlvs, 0, sizeof(gen_tlvs));
+	op->get_generic_tlv_data(cdev->ops_cookie, &gen_tlvs);
+
+	if (gen_tlvs.feat_flags & QED_TLV_IP_CSUM)
+		tlv->flags.ipv4_csum_offload = true;
+	if (gen_tlvs.feat_flags & QED_TLV_LSO)
+		tlv->flags.lso_supported = true;
+	tlv->flags.b_set = true;
+
+	for (i = 0; i < QED_TLV_MAC_COUNT; i++) {
+		if (is_valid_ether_addr(gen_tlvs.mac[i])) {
+			ether_addr_copy(tlv->mac[i], gen_tlvs.mac[i]);
+			tlv->mac_set[i] = true;
+		}
+	}
+
+	qed_get_vport_stats(cdev, &stats);
+	p_common = &stats.common;
+	tlv->rx_frames = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
+			 p_common->rx_bcast_pkts;
+	tlv->rx_frames_set = true;
+	tlv->rx_bytes = p_common->rx_ucast_bytes + p_common->rx_mcast_bytes +
+			p_common->rx_bcast_bytes;
+	tlv->rx_bytes_set = true;
+	tlv->tx_frames = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
+			 p_common->tx_bcast_pkts;
+	tlv->tx_frames_set = true;
+	tlv->tx_bytes = p_common->tx_ucast_bytes + p_common->tx_mcast_bytes +
+			p_common->tx_bcast_bytes;
+	tlv->rx_bytes_set = true;
+}
+
+int qed_mfw_fill_tlv_data(struct qed_hwfn *hwfn, enum qed_mfw_tlv_type type,
+			  union qed_mfw_tlv_data *tlv_buf)
+{
+	struct qed_dev *cdev = hwfn->cdev;
+	struct qed_common_cb_ops *ops;
+
+	ops = cdev->protocol_ops.common;
+	if (!ops || !ops->get_protocol_tlv_data || !ops->get_generic_tlv_data) {
+		DP_NOTICE(hwfn, "Can't collect TLV management info\n");
+		return -EINVAL;
+	}
+
+	switch (type) {
+	case QED_MFW_TLV_GENERIC:
+		qed_fill_generic_tlv_data(hwfn->cdev, &tlv_buf->generic);
+		break;
+	case QED_MFW_TLV_ETH:
+		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->eth);
+		break;
+	case QED_MFW_TLV_FCOE:
+		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->fcoe);
+		break;
+	case QED_MFW_TLV_ISCSI:
+		ops->get_protocol_tlv_data(cdev->ops_cookie, &tlv_buf->iscsi);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index e80f5e7..2612e3e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -1622,6 +1622,8 @@
 		case MFW_DRV_MSG_S_TAG_UPDATE:
 			qed_mcp_update_stag(p_hwfn, p_ptt);
 			break;
+		case MFW_DRV_MSG_GET_TLV_REQ:
+			qed_mfw_tlv_req(p_hwfn);
 			break;
 		default:
 			DP_INFO(p_hwfn, "Unimplemented MFW message %d\n", i);
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.h b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
index 250579b..632a838 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.h
@@ -213,6 +213,44 @@
 	QED_OV_WOL_ENABLED
 };
 
+enum qed_mfw_tlv_type {
+	QED_MFW_TLV_GENERIC = 0x1,	/* Core driver TLVs */
+	QED_MFW_TLV_ETH = 0x2,		/* L2 driver TLVs */
+	QED_MFW_TLV_FCOE = 0x4,		/* FCoE protocol TLVs */
+	QED_MFW_TLV_ISCSI = 0x8,	/* SCSI protocol TLVs */
+	QED_MFW_TLV_MAX = 0x16,
+};
+
+struct qed_mfw_tlv_generic {
+#define QED_MFW_TLV_FLAGS_SIZE	2
+	struct {
+		u8 ipv4_csum_offload;
+		u8 lso_supported;
+		bool b_set;
+	} flags;
+
+#define QED_MFW_TLV_MAC_COUNT 3
+	/* First entry for primary MAC, 2 secondary MACs possible */
+	u8 mac[QED_MFW_TLV_MAC_COUNT][6];
+	bool mac_set[QED_MFW_TLV_MAC_COUNT];
+
+	u64 rx_frames;
+	bool rx_frames_set;
+	u64 rx_bytes;
+	bool rx_bytes_set;
+	u64 tx_frames;
+	bool tx_frames_set;
+	u64 tx_bytes;
+	bool tx_bytes_set;
+};
+
+union qed_mfw_tlv_data {
+	struct qed_mfw_tlv_generic generic;
+	struct qed_mfw_tlv_eth eth;
+	struct qed_mfw_tlv_fcoe fcoe;
+	struct qed_mfw_tlv_iscsi iscsi;
+};
+
 /**
  * @brief - returns the link params of the hw function
  *
@@ -561,6 +599,17 @@
 				   struct bist_nvm_image_att *p_image_att,
 				   u32 image_index);
 
+/**
+ * @brief - Processes the TLV request from MFW i.e., get the required TLV info
+ *          from the qed client and send it to the MFW.
+ *
+ * @param p_hwfn
+ * @param p_ptt
+ *
+ * @param return 0 upon success.
+ */
+int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
+
 /* Using hwfn number (and not pf_num) is required since in CMT mode,
  * same pf_num may be used by two different hwfn
  * TODO - this shouldn't really be in .h file, but until all fields
@@ -621,6 +670,14 @@
 	u32			mcp_param;
 };
 
+struct qed_drv_tlv_hdr {
+	u8 tlv_type;
+	u8 tlv_length;	/* In dwords - not including this header */
+	u8 tlv_reserved;
+#define QED_DRV_TLV_FLAGS_CHANGED 0x01
+	u8 tlv_flags;
+};
+
 /**
  * @brief Initialize the interface with the MCP
  *
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
new file mode 100644
index 0000000..6c16158
--- /dev/null
+++ b/drivers/net/ethernet/qlogic/qed/qed_mng_tlv.c
@@ -0,0 +1,1337 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/bug.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/vmalloc.h>
+#include "qed.h"
+#include "qed_hw.h"
+#include "qed_mcp.h"
+#include "qed_reg_addr.h"
+
+#define TLV_TYPE(p)     (p[0])
+#define TLV_LENGTH(p)   (p[1])
+#define TLV_FLAGS(p)    (p[3])
+
+#define QED_TLV_DATA_MAX (14)
+struct qed_tlv_parsed_buf {
+	/* To be filled with the address to set in Value field */
+	void *p_val;
+
+	/* To be used internally in case the value has to be modified */
+	u8 data[QED_TLV_DATA_MAX];
+};
+
+static int qed_mfw_get_tlv_group(u8 tlv_type, u8 *tlv_group)
+{
+	switch (tlv_type) {
+	case DRV_TLV_FEATURE_FLAGS:
+	case DRV_TLV_LOCAL_ADMIN_ADDR:
+	case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+	case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+	case DRV_TLV_OS_DRIVER_STATES:
+	case DRV_TLV_PXE_BOOT_PROGRESS:
+	case DRV_TLV_RX_FRAMES_RECEIVED:
+	case DRV_TLV_RX_BYTES_RECEIVED:
+	case DRV_TLV_TX_FRAMES_SENT:
+	case DRV_TLV_TX_BYTES_SENT:
+	case DRV_TLV_NPIV_ENABLED:
+	case DRV_TLV_PCIE_BUS_RX_UTILIZATION:
+	case DRV_TLV_PCIE_BUS_TX_UTILIZATION:
+	case DRV_TLV_DEVICE_CPU_CORES_UTILIZATION:
+	case DRV_TLV_LAST_VALID_DCC_TLV_RECEIVED:
+	case DRV_TLV_NCSI_RX_BYTES_RECEIVED:
+	case DRV_TLV_NCSI_TX_BYTES_SENT:
+		*tlv_group |= QED_MFW_TLV_GENERIC;
+		break;
+	case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+	case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+	case DRV_TLV_PROMISCUOUS_MODE:
+	case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+	case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+	case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+	case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+	case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+	case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+	case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+	case DRV_TLV_IOV_OFFLOAD:
+	case DRV_TLV_TX_QUEUES_EMPTY:
+	case DRV_TLV_RX_QUEUES_EMPTY:
+	case DRV_TLV_TX_QUEUES_FULL:
+	case DRV_TLV_RX_QUEUES_FULL:
+		*tlv_group |= QED_MFW_TLV_ETH;
+		break;
+	case DRV_TLV_SCSI_TO:
+	case DRV_TLV_R_T_TOV:
+	case DRV_TLV_R_A_TOV:
+	case DRV_TLV_E_D_TOV:
+	case DRV_TLV_CR_TOV:
+	case DRV_TLV_BOOT_TYPE:
+	case DRV_TLV_NPIV_STATE:
+	case DRV_TLV_NUM_OF_NPIV_IDS:
+	case DRV_TLV_SWITCH_NAME:
+	case DRV_TLV_SWITCH_PORT_NUM:
+	case DRV_TLV_SWITCH_PORT_ID:
+	case DRV_TLV_VENDOR_NAME:
+	case DRV_TLV_SWITCH_MODEL:
+	case DRV_TLV_SWITCH_FW_VER:
+	case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+	case DRV_TLV_PORT_ALIAS:
+	case DRV_TLV_PORT_STATE:
+	case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+	case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+	case DRV_TLV_LINK_FAILURE_COUNT:
+	case DRV_TLV_FCOE_BOOT_PROGRESS:
+	case DRV_TLV_RX_BROADCAST_PACKETS:
+	case DRV_TLV_TX_BROADCAST_PACKETS:
+	case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+	case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+	case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+	case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+	case DRV_TLV_FCOE_TX_FRAMES_SENT:
+	case DRV_TLV_FCOE_TX_BYTES_SENT:
+	case DRV_TLV_CRC_ERROR_COUNT:
+	case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+	case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+	case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+	case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+	case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+	case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+	case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+	case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+	case DRV_TLV_DISPARITY_ERROR_COUNT:
+	case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+	case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+	case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+	case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+	case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+	case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+	case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+	case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+	case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+	case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+	case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+	case DRV_TLV_LAST_FLOGI_RJT:
+	case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+	case DRV_TLV_FDISCS_SENT_COUNT:
+	case DRV_TLV_FDISC_ACCS_RECEIVED:
+	case DRV_TLV_FDISC_RJTS_RECEIVED:
+	case DRV_TLV_PLOGI_SENT_COUNT:
+	case DRV_TLV_PLOGI_ACCS_RECEIVED:
+	case DRV_TLV_PLOGI_RJTS_RECEIVED:
+	case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_PLOGI_1_TIMESTAMP:
+	case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_PLOGI_2_TIMESTAMP:
+	case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_PLOGI_3_TIMESTAMP:
+	case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_PLOGI_4_TIMESTAMP:
+	case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_PLOGI_5_TIMESTAMP:
+	case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+	case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+	case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+	case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+	case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+	case DRV_TLV_LOGOS_ISSUED:
+	case DRV_TLV_LOGO_ACCS_RECEIVED:
+	case DRV_TLV_LOGO_RJTS_RECEIVED:
+	case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_LOGO_1_TIMESTAMP:
+	case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_LOGO_2_TIMESTAMP:
+	case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_LOGO_3_TIMESTAMP:
+	case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_LOGO_4_TIMESTAMP:
+	case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_LOGO_5_TIMESTAMP:
+	case DRV_TLV_LOGOS_RECEIVED:
+	case DRV_TLV_ACCS_ISSUED:
+	case DRV_TLV_PRLIS_ISSUED:
+	case DRV_TLV_ACCS_RECEIVED:
+	case DRV_TLV_ABTS_SENT_COUNT:
+	case DRV_TLV_ABTS_ACCS_RECEIVED:
+	case DRV_TLV_ABTS_RJTS_RECEIVED:
+	case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_ABTS_1_TIMESTAMP:
+	case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_ABTS_2_TIMESTAMP:
+	case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_ABTS_3_TIMESTAMP:
+	case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_ABTS_4_TIMESTAMP:
+	case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_ABTS_5_TIMESTAMP:
+	case DRV_TLV_RSCNS_RECEIVED:
+	case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+	case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+	case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+	case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+	case DRV_TLV_LUN_RESETS_ISSUED:
+	case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+	case DRV_TLV_TPRLOS_SENT:
+	case DRV_TLV_NOS_SENT_COUNT:
+	case DRV_TLV_NOS_RECEIVED_COUNT:
+	case DRV_TLV_OLS_COUNT:
+	case DRV_TLV_LR_COUNT:
+	case DRV_TLV_LRR_COUNT:
+	case DRV_TLV_LIP_SENT_COUNT:
+	case DRV_TLV_LIP_RECEIVED_COUNT:
+	case DRV_TLV_EOFA_COUNT:
+	case DRV_TLV_EOFNI_COUNT:
+	case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+	case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+	case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+	case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+	case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+	case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+	case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+	case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+	case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+	case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+	case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+	case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+	case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+	case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+	case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+	case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+	case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+	case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+	case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+		*tlv_group = QED_MFW_TLV_FCOE;
+		break;
+	case DRV_TLV_TARGET_LLMNR_ENABLED:
+	case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+	case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+	case DRV_TLV_AUTHENTICATION_METHOD:
+	case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+	case DRV_TLV_MAX_FRAME_SIZE:
+	case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+	case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+	case DRV_TLV_ISCSI_BOOT_PROGRESS:
+	case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+	case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+	case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+	case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+	case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+	case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+		*tlv_group |= QED_MFW_TLV_ISCSI;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+/* Returns size of the data buffer or, -1 in case TLV data is not available. */
+static int
+qed_mfw_get_gen_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+			  struct qed_mfw_tlv_generic *p_drv_buf,
+			  struct qed_tlv_parsed_buf *p_buf)
+{
+	switch (p_tlv->tlv_type) {
+	case DRV_TLV_FEATURE_FLAGS:
+		if (p_drv_buf->flags.b_set) {
+			memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX);
+			p_buf->data[0] = p_drv_buf->flags.ipv4_csum_offload ?
+			    1 : 0;
+			p_buf->data[0] |= (p_drv_buf->flags.lso_supported ?
+					   1 : 0) << 1;
+			p_buf->p_val = p_buf->data;
+			return QED_MFW_TLV_FLAGS_SIZE;
+		}
+		break;
+
+	case DRV_TLV_LOCAL_ADMIN_ADDR:
+	case DRV_TLV_ADDITIONAL_MAC_ADDR_1:
+	case DRV_TLV_ADDITIONAL_MAC_ADDR_2:
+		{
+			int idx = p_tlv->tlv_type - DRV_TLV_LOCAL_ADMIN_ADDR;
+
+			if (p_drv_buf->mac_set[idx]) {
+				p_buf->p_val = p_drv_buf->mac[idx];
+				return ETH_ALEN;
+			}
+			break;
+		}
+
+	case DRV_TLV_RX_FRAMES_RECEIVED:
+		if (p_drv_buf->rx_frames_set) {
+			p_buf->p_val = &p_drv_buf->rx_frames;
+			return sizeof(p_drv_buf->rx_frames);
+		}
+		break;
+	case DRV_TLV_RX_BYTES_RECEIVED:
+		if (p_drv_buf->rx_bytes_set) {
+			p_buf->p_val = &p_drv_buf->rx_bytes;
+			return sizeof(p_drv_buf->rx_bytes);
+		}
+		break;
+	case DRV_TLV_TX_FRAMES_SENT:
+		if (p_drv_buf->tx_frames_set) {
+			p_buf->p_val = &p_drv_buf->tx_frames;
+			return sizeof(p_drv_buf->tx_frames);
+		}
+		break;
+	case DRV_TLV_TX_BYTES_SENT:
+		if (p_drv_buf->tx_bytes_set) {
+			p_buf->p_val = &p_drv_buf->tx_bytes;
+			return sizeof(p_drv_buf->tx_bytes);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return -1;
+}
+
+static int
+qed_mfw_get_eth_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+			  struct qed_mfw_tlv_eth *p_drv_buf,
+			  struct qed_tlv_parsed_buf *p_buf)
+{
+	switch (p_tlv->tlv_type) {
+	case DRV_TLV_LSO_MAX_OFFLOAD_SIZE:
+		if (p_drv_buf->lso_maxoff_size_set) {
+			p_buf->p_val = &p_drv_buf->lso_maxoff_size;
+			return sizeof(p_drv_buf->lso_maxoff_size);
+		}
+		break;
+	case DRV_TLV_LSO_MIN_SEGMENT_COUNT:
+		if (p_drv_buf->lso_minseg_size_set) {
+			p_buf->p_val = &p_drv_buf->lso_minseg_size;
+			return sizeof(p_drv_buf->lso_minseg_size);
+		}
+		break;
+	case DRV_TLV_PROMISCUOUS_MODE:
+		if (p_drv_buf->prom_mode_set) {
+			p_buf->p_val = &p_drv_buf->prom_mode;
+			return sizeof(p_drv_buf->prom_mode);
+		}
+		break;
+	case DRV_TLV_TX_DESCRIPTORS_QUEUE_SIZE:
+		if (p_drv_buf->tx_descr_size_set) {
+			p_buf->p_val = &p_drv_buf->tx_descr_size;
+			return sizeof(p_drv_buf->tx_descr_size);
+		}
+		break;
+	case DRV_TLV_RX_DESCRIPTORS_QUEUE_SIZE:
+		if (p_drv_buf->rx_descr_size_set) {
+			p_buf->p_val = &p_drv_buf->rx_descr_size;
+			return sizeof(p_drv_buf->rx_descr_size);
+		}
+		break;
+	case DRV_TLV_NUM_OF_NET_QUEUE_VMQ_CFG:
+		if (p_drv_buf->netq_count_set) {
+			p_buf->p_val = &p_drv_buf->netq_count;
+			return sizeof(p_drv_buf->netq_count);
+		}
+		break;
+	case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV4:
+		if (p_drv_buf->tcp4_offloads_set) {
+			p_buf->p_val = &p_drv_buf->tcp4_offloads;
+			return sizeof(p_drv_buf->tcp4_offloads);
+		}
+		break;
+	case DRV_TLV_NUM_OFFLOADED_CONNECTIONS_TCP_IPV6:
+		if (p_drv_buf->tcp6_offloads_set) {
+			p_buf->p_val = &p_drv_buf->tcp6_offloads;
+			return sizeof(p_drv_buf->tcp6_offloads);
+		}
+		break;
+	case DRV_TLV_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+		if (p_drv_buf->tx_descr_qdepth_set) {
+			p_buf->p_val = &p_drv_buf->tx_descr_qdepth;
+			return sizeof(p_drv_buf->tx_descr_qdepth);
+		}
+		break;
+	case DRV_TLV_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+		if (p_drv_buf->rx_descr_qdepth_set) {
+			p_buf->p_val = &p_drv_buf->rx_descr_qdepth;
+			return sizeof(p_drv_buf->rx_descr_qdepth);
+		}
+		break;
+	case DRV_TLV_IOV_OFFLOAD:
+		if (p_drv_buf->iov_offload_set) {
+			p_buf->p_val = &p_drv_buf->iov_offload;
+			return sizeof(p_drv_buf->iov_offload);
+		}
+		break;
+	case DRV_TLV_TX_QUEUES_EMPTY:
+		if (p_drv_buf->txqs_empty_set) {
+			p_buf->p_val = &p_drv_buf->txqs_empty;
+			return sizeof(p_drv_buf->txqs_empty);
+		}
+		break;
+	case DRV_TLV_RX_QUEUES_EMPTY:
+		if (p_drv_buf->rxqs_empty_set) {
+			p_buf->p_val = &p_drv_buf->rxqs_empty;
+			return sizeof(p_drv_buf->rxqs_empty);
+		}
+		break;
+	case DRV_TLV_TX_QUEUES_FULL:
+		if (p_drv_buf->num_txqs_full_set) {
+			p_buf->p_val = &p_drv_buf->num_txqs_full;
+			return sizeof(p_drv_buf->num_txqs_full);
+		}
+		break;
+	case DRV_TLV_RX_QUEUES_FULL:
+		if (p_drv_buf->num_rxqs_full_set) {
+			p_buf->p_val = &p_drv_buf->num_rxqs_full;
+			return sizeof(p_drv_buf->num_rxqs_full);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return -1;
+}
+
+static int
+qed_mfw_get_tlv_time_value(struct qed_mfw_tlv_time *p_time,
+			   struct qed_tlv_parsed_buf *p_buf)
+{
+	if (!p_time->b_set)
+		return -1;
+
+	/* Validate numbers */
+	if (p_time->month > 12)
+		p_time->month = 0;
+	if (p_time->day > 31)
+		p_time->day = 0;
+	if (p_time->hour > 23)
+		p_time->hour = 0;
+	if (p_time->min > 59)
+		p_time->hour = 0;
+	if (p_time->msec > 999)
+		p_time->msec = 0;
+	if (p_time->usec > 999)
+		p_time->usec = 0;
+
+	memset(p_buf->data, 0, sizeof(u8) * QED_TLV_DATA_MAX);
+	snprintf(p_buf->data, 14, "%d%d%d%d%d%d",
+		 p_time->month, p_time->day,
+		 p_time->hour, p_time->min, p_time->msec, p_time->usec);
+
+	p_buf->p_val = p_buf->data;
+
+	return QED_MFW_TLV_TIME_SIZE;
+}
+
+static int
+qed_mfw_get_fcoe_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+			   struct qed_mfw_tlv_fcoe *p_drv_buf,
+			   struct qed_tlv_parsed_buf *p_buf)
+{
+	struct qed_mfw_tlv_time *p_time;
+	u8 idx;
+
+	switch (p_tlv->tlv_type) {
+	case DRV_TLV_SCSI_TO:
+		if (p_drv_buf->scsi_timeout_set) {
+			p_buf->p_val = &p_drv_buf->scsi_timeout;
+			return sizeof(p_drv_buf->scsi_timeout);
+		}
+		break;
+	case DRV_TLV_R_T_TOV:
+		if (p_drv_buf->rt_tov_set) {
+			p_buf->p_val = &p_drv_buf->rt_tov;
+			return sizeof(p_drv_buf->rt_tov);
+		}
+		break;
+	case DRV_TLV_R_A_TOV:
+		if (p_drv_buf->ra_tov_set) {
+			p_buf->p_val = &p_drv_buf->ra_tov;
+			return sizeof(p_drv_buf->ra_tov);
+		}
+		break;
+	case DRV_TLV_E_D_TOV:
+		if (p_drv_buf->ed_tov_set) {
+			p_buf->p_val = &p_drv_buf->ed_tov;
+			return sizeof(p_drv_buf->ed_tov);
+		}
+		break;
+	case DRV_TLV_CR_TOV:
+		if (p_drv_buf->cr_tov_set) {
+			p_buf->p_val = &p_drv_buf->cr_tov;
+			return sizeof(p_drv_buf->cr_tov);
+		}
+		break;
+	case DRV_TLV_BOOT_TYPE:
+		if (p_drv_buf->boot_type_set) {
+			p_buf->p_val = &p_drv_buf->boot_type;
+			return sizeof(p_drv_buf->boot_type);
+		}
+		break;
+	case DRV_TLV_NPIV_STATE:
+		if (p_drv_buf->npiv_state_set) {
+			p_buf->p_val = &p_drv_buf->npiv_state;
+			return sizeof(p_drv_buf->npiv_state);
+		}
+		break;
+	case DRV_TLV_NUM_OF_NPIV_IDS:
+		if (p_drv_buf->num_npiv_ids_set) {
+			p_buf->p_val = &p_drv_buf->num_npiv_ids;
+			return sizeof(p_drv_buf->num_npiv_ids);
+		}
+		break;
+	case DRV_TLV_SWITCH_NAME:
+		if (p_drv_buf->switch_name_set) {
+			p_buf->p_val = &p_drv_buf->switch_name;
+			return sizeof(p_drv_buf->switch_name);
+		}
+		break;
+	case DRV_TLV_SWITCH_PORT_NUM:
+		if (p_drv_buf->switch_portnum_set) {
+			p_buf->p_val = &p_drv_buf->switch_portnum;
+			return sizeof(p_drv_buf->switch_portnum);
+		}
+		break;
+	case DRV_TLV_SWITCH_PORT_ID:
+		if (p_drv_buf->switch_portid_set) {
+			p_buf->p_val = &p_drv_buf->switch_portid;
+			return sizeof(p_drv_buf->switch_portid);
+		}
+		break;
+	case DRV_TLV_VENDOR_NAME:
+		if (p_drv_buf->vendor_name_set) {
+			p_buf->p_val = &p_drv_buf->vendor_name;
+			return sizeof(p_drv_buf->vendor_name);
+		}
+		break;
+	case DRV_TLV_SWITCH_MODEL:
+		if (p_drv_buf->switch_model_set) {
+			p_buf->p_val = &p_drv_buf->switch_model;
+			return sizeof(p_drv_buf->switch_model);
+		}
+		break;
+	case DRV_TLV_SWITCH_FW_VER:
+		if (p_drv_buf->switch_fw_version_set) {
+			p_buf->p_val = &p_drv_buf->switch_fw_version;
+			return sizeof(p_drv_buf->switch_fw_version);
+		}
+		break;
+	case DRV_TLV_QOS_PRIORITY_PER_802_1P:
+		if (p_drv_buf->qos_pri_set) {
+			p_buf->p_val = &p_drv_buf->qos_pri;
+			return sizeof(p_drv_buf->qos_pri);
+		}
+		break;
+	case DRV_TLV_PORT_ALIAS:
+		if (p_drv_buf->port_alias_set) {
+			p_buf->p_val = &p_drv_buf->port_alias;
+			return sizeof(p_drv_buf->port_alias);
+		}
+		break;
+	case DRV_TLV_PORT_STATE:
+		if (p_drv_buf->port_state_set) {
+			p_buf->p_val = &p_drv_buf->port_state;
+			return sizeof(p_drv_buf->port_state);
+		}
+		break;
+	case DRV_TLV_FIP_TX_DESCRIPTORS_QUEUE_SIZE:
+		if (p_drv_buf->fip_tx_descr_size_set) {
+			p_buf->p_val = &p_drv_buf->fip_tx_descr_size;
+			return sizeof(p_drv_buf->fip_tx_descr_size);
+		}
+		break;
+	case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_SIZE:
+		if (p_drv_buf->fip_rx_descr_size_set) {
+			p_buf->p_val = &p_drv_buf->fip_rx_descr_size;
+			return sizeof(p_drv_buf->fip_rx_descr_size);
+		}
+		break;
+	case DRV_TLV_LINK_FAILURE_COUNT:
+		if (p_drv_buf->link_failures_set) {
+			p_buf->p_val = &p_drv_buf->link_failures;
+			return sizeof(p_drv_buf->link_failures);
+		}
+		break;
+	case DRV_TLV_FCOE_BOOT_PROGRESS:
+		if (p_drv_buf->fcoe_boot_progress_set) {
+			p_buf->p_val = &p_drv_buf->fcoe_boot_progress;
+			return sizeof(p_drv_buf->fcoe_boot_progress);
+		}
+		break;
+	case DRV_TLV_RX_BROADCAST_PACKETS:
+		if (p_drv_buf->rx_bcast_set) {
+			p_buf->p_val = &p_drv_buf->rx_bcast;
+			return sizeof(p_drv_buf->rx_bcast);
+		}
+		break;
+	case DRV_TLV_TX_BROADCAST_PACKETS:
+		if (p_drv_buf->tx_bcast_set) {
+			p_buf->p_val = &p_drv_buf->tx_bcast;
+			return sizeof(p_drv_buf->tx_bcast);
+		}
+		break;
+	case DRV_TLV_FCOE_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+		if (p_drv_buf->fcoe_txq_depth_set) {
+			p_buf->p_val = &p_drv_buf->fcoe_txq_depth;
+			return sizeof(p_drv_buf->fcoe_txq_depth);
+		}
+		break;
+	case DRV_TLV_FCOE_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+		if (p_drv_buf->fcoe_rxq_depth_set) {
+			p_buf->p_val = &p_drv_buf->fcoe_rxq_depth;
+			return sizeof(p_drv_buf->fcoe_rxq_depth);
+		}
+		break;
+	case DRV_TLV_FCOE_RX_FRAMES_RECEIVED:
+		if (p_drv_buf->fcoe_rx_frames_set) {
+			p_buf->p_val = &p_drv_buf->fcoe_rx_frames;
+			return sizeof(p_drv_buf->fcoe_rx_frames);
+		}
+		break;
+	case DRV_TLV_FCOE_RX_BYTES_RECEIVED:
+		if (p_drv_buf->fcoe_rx_bytes_set) {
+			p_buf->p_val = &p_drv_buf->fcoe_rx_bytes;
+			return sizeof(p_drv_buf->fcoe_rx_bytes);
+		}
+		break;
+	case DRV_TLV_FCOE_TX_FRAMES_SENT:
+		if (p_drv_buf->fcoe_tx_frames_set) {
+			p_buf->p_val = &p_drv_buf->fcoe_tx_frames;
+			return sizeof(p_drv_buf->fcoe_tx_frames);
+		}
+		break;
+	case DRV_TLV_FCOE_TX_BYTES_SENT:
+		if (p_drv_buf->fcoe_tx_bytes_set) {
+			p_buf->p_val = &p_drv_buf->fcoe_tx_bytes;
+			return sizeof(p_drv_buf->fcoe_tx_bytes);
+		}
+		break;
+	case DRV_TLV_CRC_ERROR_COUNT:
+		if (p_drv_buf->crc_count_set) {
+			p_buf->p_val = &p_drv_buf->crc_count;
+			return sizeof(p_drv_buf->crc_count);
+		}
+		break;
+	case DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_CRC_ERROR_2_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_CRC_ERROR_3_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_CRC_ERROR_4_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_CRC_ERROR_5_RECEIVED_SOURCE_FC_ID:
+		idx = (p_tlv->tlv_type -
+		       DRV_TLV_CRC_ERROR_1_RECEIVED_SOURCE_FC_ID) / 2;
+
+		if (p_drv_buf->crc_err_src_fcid_set[idx]) {
+			p_buf->p_val = &p_drv_buf->crc_err_src_fcid[idx];
+			return sizeof(p_drv_buf->crc_err_src_fcid[idx]);
+		}
+		break;
+	case DRV_TLV_CRC_ERROR_1_TIMESTAMP:
+	case DRV_TLV_CRC_ERROR_2_TIMESTAMP:
+	case DRV_TLV_CRC_ERROR_3_TIMESTAMP:
+	case DRV_TLV_CRC_ERROR_4_TIMESTAMP:
+	case DRV_TLV_CRC_ERROR_5_TIMESTAMP:
+		idx = (p_tlv->tlv_type - DRV_TLV_CRC_ERROR_1_TIMESTAMP) / 2;
+
+		return qed_mfw_get_tlv_time_value(&p_drv_buf->crc_err[idx],
+						  p_buf);
+	case DRV_TLV_LOSS_OF_SYNC_ERROR_COUNT:
+		if (p_drv_buf->losync_err_set) {
+			p_buf->p_val = &p_drv_buf->losync_err;
+			return sizeof(p_drv_buf->losync_err);
+		}
+		break;
+	case DRV_TLV_LOSS_OF_SIGNAL_ERRORS:
+		if (p_drv_buf->losig_err_set) {
+			p_buf->p_val = &p_drv_buf->losig_err;
+			return sizeof(p_drv_buf->losig_err);
+		}
+		break;
+	case DRV_TLV_PRIMITIVE_SEQUENCE_PROTOCOL_ERROR_COUNT:
+		if (p_drv_buf->primtive_err_set) {
+			p_buf->p_val = &p_drv_buf->primtive_err;
+			return sizeof(p_drv_buf->primtive_err);
+		}
+		break;
+	case DRV_TLV_DISPARITY_ERROR_COUNT:
+		if (p_drv_buf->disparity_err_set) {
+			p_buf->p_val = &p_drv_buf->disparity_err;
+			return sizeof(p_drv_buf->disparity_err);
+		}
+		break;
+	case DRV_TLV_CODE_VIOLATION_ERROR_COUNT:
+		if (p_drv_buf->code_violation_err_set) {
+			p_buf->p_val = &p_drv_buf->code_violation_err;
+			return sizeof(p_drv_buf->code_violation_err);
+		}
+		break;
+	case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1:
+	case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_2:
+	case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_3:
+	case DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_4:
+		idx = p_tlv->tlv_type -
+			DRV_TLV_LAST_FLOGI_ISSUED_COMMON_PARAMETERS_WORD_1;
+		if (p_drv_buf->flogi_param_set[idx]) {
+			p_buf->p_val = &p_drv_buf->flogi_param[idx];
+			return sizeof(p_drv_buf->flogi_param[idx]);
+		}
+		break;
+	case DRV_TLV_LAST_FLOGI_TIMESTAMP:
+		return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_tstamp,
+						  p_buf);
+	case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1:
+	case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_2:
+	case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_3:
+	case DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_4:
+		idx = p_tlv->tlv_type -
+			DRV_TLV_LAST_FLOGI_ACC_COMMON_PARAMETERS_WORD_1;
+
+		if (p_drv_buf->flogi_acc_param_set[idx]) {
+			p_buf->p_val = &p_drv_buf->flogi_acc_param[idx];
+			return sizeof(p_drv_buf->flogi_acc_param[idx]);
+		}
+		break;
+	case DRV_TLV_LAST_FLOGI_ACC_TIMESTAMP:
+		return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_acc_tstamp,
+						  p_buf);
+	case DRV_TLV_LAST_FLOGI_RJT:
+		if (p_drv_buf->flogi_rjt_set) {
+			p_buf->p_val = &p_drv_buf->flogi_rjt;
+			return sizeof(p_drv_buf->flogi_rjt);
+		}
+		break;
+	case DRV_TLV_LAST_FLOGI_RJT_TIMESTAMP:
+		return qed_mfw_get_tlv_time_value(&p_drv_buf->flogi_rjt_tstamp,
+						  p_buf);
+	case DRV_TLV_FDISCS_SENT_COUNT:
+		if (p_drv_buf->fdiscs_set) {
+			p_buf->p_val = &p_drv_buf->fdiscs;
+			return sizeof(p_drv_buf->fdiscs);
+		}
+		break;
+	case DRV_TLV_FDISC_ACCS_RECEIVED:
+		if (p_drv_buf->fdisc_acc_set) {
+			p_buf->p_val = &p_drv_buf->fdisc_acc;
+			return sizeof(p_drv_buf->fdisc_acc);
+		}
+		break;
+	case DRV_TLV_FDISC_RJTS_RECEIVED:
+		if (p_drv_buf->fdisc_rjt_set) {
+			p_buf->p_val = &p_drv_buf->fdisc_rjt;
+			return sizeof(p_drv_buf->fdisc_rjt);
+		}
+		break;
+	case DRV_TLV_PLOGI_SENT_COUNT:
+		if (p_drv_buf->plogi_set) {
+			p_buf->p_val = &p_drv_buf->plogi;
+			return sizeof(p_drv_buf->plogi);
+		}
+		break;
+	case DRV_TLV_PLOGI_ACCS_RECEIVED:
+		if (p_drv_buf->plogi_acc_set) {
+			p_buf->p_val = &p_drv_buf->plogi_acc;
+			return sizeof(p_drv_buf->plogi_acc);
+		}
+		break;
+	case DRV_TLV_PLOGI_RJTS_RECEIVED:
+		if (p_drv_buf->plogi_rjt_set) {
+			p_buf->p_val = &p_drv_buf->plogi_rjt;
+			return sizeof(p_drv_buf->plogi_rjt);
+		}
+		break;
+	case DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_PLOGI_2_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_PLOGI_3_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_PLOGI_4_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_PLOGI_5_SENT_DESTINATION_FC_ID:
+		idx = (p_tlv->tlv_type -
+		       DRV_TLV_PLOGI_1_SENT_DESTINATION_FC_ID) / 2;
+
+		if (p_drv_buf->plogi_dst_fcid_set[idx]) {
+			p_buf->p_val = &p_drv_buf->plogi_dst_fcid[idx];
+			return sizeof(p_drv_buf->plogi_dst_fcid[idx]);
+		}
+		break;
+	case DRV_TLV_PLOGI_1_TIMESTAMP:
+	case DRV_TLV_PLOGI_2_TIMESTAMP:
+	case DRV_TLV_PLOGI_3_TIMESTAMP:
+	case DRV_TLV_PLOGI_4_TIMESTAMP:
+	case DRV_TLV_PLOGI_5_TIMESTAMP:
+		idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_TIMESTAMP) / 2;
+
+		return qed_mfw_get_tlv_time_value(&p_drv_buf->plogi_tstamp[idx],
+						  p_buf);
+	case DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_PLOGI_2_ACC_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_PLOGI_3_ACC_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_PLOGI_4_ACC_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_PLOGI_5_ACC_RECEIVED_SOURCE_FC_ID:
+		idx = (p_tlv->tlv_type -
+		       DRV_TLV_PLOGI_1_ACC_RECEIVED_SOURCE_FC_ID) / 2;
+
+		if (p_drv_buf->plogi_acc_src_fcid_set[idx]) {
+			p_buf->p_val = &p_drv_buf->plogi_acc_src_fcid[idx];
+			return sizeof(p_drv_buf->plogi_acc_src_fcid[idx]);
+		}
+		break;
+	case DRV_TLV_PLOGI_1_ACC_TIMESTAMP:
+	case DRV_TLV_PLOGI_2_ACC_TIMESTAMP:
+	case DRV_TLV_PLOGI_3_ACC_TIMESTAMP:
+	case DRV_TLV_PLOGI_4_ACC_TIMESTAMP:
+	case DRV_TLV_PLOGI_5_ACC_TIMESTAMP:
+		idx = (p_tlv->tlv_type - DRV_TLV_PLOGI_1_ACC_TIMESTAMP) / 2;
+		p_time = &p_drv_buf->plogi_acc_tstamp[idx];
+
+		return qed_mfw_get_tlv_time_value(p_time, p_buf);
+	case DRV_TLV_LOGOS_ISSUED:
+		if (p_drv_buf->tx_plogos_set) {
+			p_buf->p_val = &p_drv_buf->tx_plogos;
+			return sizeof(p_drv_buf->tx_plogos);
+		}
+		break;
+	case DRV_TLV_LOGO_ACCS_RECEIVED:
+		if (p_drv_buf->plogo_acc_set) {
+			p_buf->p_val = &p_drv_buf->plogo_acc;
+			return sizeof(p_drv_buf->plogo_acc);
+		}
+		break;
+	case DRV_TLV_LOGO_RJTS_RECEIVED:
+		if (p_drv_buf->plogo_rjt_set) {
+			p_buf->p_val = &p_drv_buf->plogo_rjt;
+			return sizeof(p_drv_buf->plogo_rjt);
+		}
+		break;
+	case DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_LOGO_2_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_LOGO_3_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_LOGO_4_RECEIVED_SOURCE_FC_ID:
+	case DRV_TLV_LOGO_5_RECEIVED_SOURCE_FC_ID:
+		idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_RECEIVED_SOURCE_FC_ID) /
+			2;
+
+		if (p_drv_buf->plogo_src_fcid_set[idx]) {
+			p_buf->p_val = &p_drv_buf->plogo_src_fcid[idx];
+			return sizeof(p_drv_buf->plogo_src_fcid[idx]);
+		}
+		break;
+	case DRV_TLV_LOGO_1_TIMESTAMP:
+	case DRV_TLV_LOGO_2_TIMESTAMP:
+	case DRV_TLV_LOGO_3_TIMESTAMP:
+	case DRV_TLV_LOGO_4_TIMESTAMP:
+	case DRV_TLV_LOGO_5_TIMESTAMP:
+		idx = (p_tlv->tlv_type - DRV_TLV_LOGO_1_TIMESTAMP) / 2;
+
+		return qed_mfw_get_tlv_time_value(&p_drv_buf->plogo_tstamp[idx],
+						  p_buf);
+	case DRV_TLV_LOGOS_RECEIVED:
+		if (p_drv_buf->rx_logos_set) {
+			p_buf->p_val = &p_drv_buf->rx_logos;
+			return sizeof(p_drv_buf->rx_logos);
+		}
+		break;
+	case DRV_TLV_ACCS_ISSUED:
+		if (p_drv_buf->tx_accs_set) {
+			p_buf->p_val = &p_drv_buf->tx_accs;
+			return sizeof(p_drv_buf->tx_accs);
+		}
+		break;
+	case DRV_TLV_PRLIS_ISSUED:
+		if (p_drv_buf->tx_prlis_set) {
+			p_buf->p_val = &p_drv_buf->tx_prlis;
+			return sizeof(p_drv_buf->tx_prlis);
+		}
+		break;
+	case DRV_TLV_ACCS_RECEIVED:
+		if (p_drv_buf->rx_accs_set) {
+			p_buf->p_val = &p_drv_buf->rx_accs;
+			return sizeof(p_drv_buf->rx_accs);
+		}
+		break;
+	case DRV_TLV_ABTS_SENT_COUNT:
+		if (p_drv_buf->tx_abts_set) {
+			p_buf->p_val = &p_drv_buf->tx_abts;
+			return sizeof(p_drv_buf->tx_abts);
+		}
+		break;
+	case DRV_TLV_ABTS_ACCS_RECEIVED:
+		if (p_drv_buf->rx_abts_acc_set) {
+			p_buf->p_val = &p_drv_buf->rx_abts_acc;
+			return sizeof(p_drv_buf->rx_abts_acc);
+		}
+		break;
+	case DRV_TLV_ABTS_RJTS_RECEIVED:
+		if (p_drv_buf->rx_abts_rjt_set) {
+			p_buf->p_val = &p_drv_buf->rx_abts_rjt;
+			return sizeof(p_drv_buf->rx_abts_rjt);
+		}
+		break;
+	case DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_ABTS_2_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_ABTS_3_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_ABTS_4_SENT_DESTINATION_FC_ID:
+	case DRV_TLV_ABTS_5_SENT_DESTINATION_FC_ID:
+		idx = (p_tlv->tlv_type -
+		       DRV_TLV_ABTS_1_SENT_DESTINATION_FC_ID) / 2;
+
+		if (p_drv_buf->abts_dst_fcid_set[idx]) {
+			p_buf->p_val = &p_drv_buf->abts_dst_fcid[idx];
+			return sizeof(p_drv_buf->abts_dst_fcid[idx]);
+		}
+		break;
+	case DRV_TLV_ABTS_1_TIMESTAMP:
+	case DRV_TLV_ABTS_2_TIMESTAMP:
+	case DRV_TLV_ABTS_3_TIMESTAMP:
+	case DRV_TLV_ABTS_4_TIMESTAMP:
+	case DRV_TLV_ABTS_5_TIMESTAMP:
+		idx = (p_tlv->tlv_type - DRV_TLV_ABTS_1_TIMESTAMP) / 2;
+
+		return qed_mfw_get_tlv_time_value(&p_drv_buf->abts_tstamp[idx],
+						  p_buf);
+	case DRV_TLV_RSCNS_RECEIVED:
+		if (p_drv_buf->rx_rscn_set) {
+			p_buf->p_val = &p_drv_buf->rx_rscn;
+			return sizeof(p_drv_buf->rx_rscn);
+		}
+		break;
+	case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1:
+	case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_2:
+	case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_3:
+	case DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_4:
+		idx = p_tlv->tlv_type - DRV_TLV_LAST_RSCN_RECEIVED_N_PORT_1;
+
+		if (p_drv_buf->rx_rscn_nport_set[idx]) {
+			p_buf->p_val = &p_drv_buf->rx_rscn_nport[idx];
+			return sizeof(p_drv_buf->rx_rscn_nport[idx]);
+		}
+		break;
+	case DRV_TLV_LUN_RESETS_ISSUED:
+		if (p_drv_buf->tx_lun_rst_set) {
+			p_buf->p_val = &p_drv_buf->tx_lun_rst;
+			return sizeof(p_drv_buf->tx_lun_rst);
+		}
+		break;
+	case DRV_TLV_ABORT_TASK_SETS_ISSUED:
+		if (p_drv_buf->abort_task_sets_set) {
+			p_buf->p_val = &p_drv_buf->abort_task_sets;
+			return sizeof(p_drv_buf->abort_task_sets);
+		}
+		break;
+	case DRV_TLV_TPRLOS_SENT:
+		if (p_drv_buf->tx_tprlos_set) {
+			p_buf->p_val = &p_drv_buf->tx_tprlos;
+			return sizeof(p_drv_buf->tx_tprlos);
+		}
+		break;
+	case DRV_TLV_NOS_SENT_COUNT:
+		if (p_drv_buf->tx_nos_set) {
+			p_buf->p_val = &p_drv_buf->tx_nos;
+			return sizeof(p_drv_buf->tx_nos);
+		}
+		break;
+	case DRV_TLV_NOS_RECEIVED_COUNT:
+		if (p_drv_buf->rx_nos_set) {
+			p_buf->p_val = &p_drv_buf->rx_nos;
+			return sizeof(p_drv_buf->rx_nos);
+		}
+		break;
+	case DRV_TLV_OLS_COUNT:
+		if (p_drv_buf->ols_set) {
+			p_buf->p_val = &p_drv_buf->ols;
+			return sizeof(p_drv_buf->ols);
+		}
+		break;
+	case DRV_TLV_LR_COUNT:
+		if (p_drv_buf->lr_set) {
+			p_buf->p_val = &p_drv_buf->lr;
+			return sizeof(p_drv_buf->lr);
+		}
+		break;
+	case DRV_TLV_LRR_COUNT:
+		if (p_drv_buf->lrr_set) {
+			p_buf->p_val = &p_drv_buf->lrr;
+			return sizeof(p_drv_buf->lrr);
+		}
+		break;
+	case DRV_TLV_LIP_SENT_COUNT:
+		if (p_drv_buf->tx_lip_set) {
+			p_buf->p_val = &p_drv_buf->tx_lip;
+			return sizeof(p_drv_buf->tx_lip);
+		}
+		break;
+	case DRV_TLV_LIP_RECEIVED_COUNT:
+		if (p_drv_buf->rx_lip_set) {
+			p_buf->p_val = &p_drv_buf->rx_lip;
+			return sizeof(p_drv_buf->rx_lip);
+		}
+		break;
+	case DRV_TLV_EOFA_COUNT:
+		if (p_drv_buf->eofa_set) {
+			p_buf->p_val = &p_drv_buf->eofa;
+			return sizeof(p_drv_buf->eofa);
+		}
+		break;
+	case DRV_TLV_EOFNI_COUNT:
+		if (p_drv_buf->eofni_set) {
+			p_buf->p_val = &p_drv_buf->eofni;
+			return sizeof(p_drv_buf->eofni);
+		}
+		break;
+	case DRV_TLV_SCSI_STATUS_CHECK_CONDITION_COUNT:
+		if (p_drv_buf->scsi_chks_set) {
+			p_buf->p_val = &p_drv_buf->scsi_chks;
+			return sizeof(p_drv_buf->scsi_chks);
+		}
+		break;
+	case DRV_TLV_SCSI_STATUS_CONDITION_MET_COUNT:
+		if (p_drv_buf->scsi_cond_met_set) {
+			p_buf->p_val = &p_drv_buf->scsi_cond_met;
+			return sizeof(p_drv_buf->scsi_cond_met);
+		}
+		break;
+	case DRV_TLV_SCSI_STATUS_BUSY_COUNT:
+		if (p_drv_buf->scsi_busy_set) {
+			p_buf->p_val = &p_drv_buf->scsi_busy;
+			return sizeof(p_drv_buf->scsi_busy);
+		}
+		break;
+	case DRV_TLV_SCSI_STATUS_INTERMEDIATE_COUNT:
+		if (p_drv_buf->scsi_inter_set) {
+			p_buf->p_val = &p_drv_buf->scsi_inter;
+			return sizeof(p_drv_buf->scsi_inter);
+		}
+		break;
+	case DRV_TLV_SCSI_STATUS_INTERMEDIATE_CONDITION_MET_COUNT:
+		if (p_drv_buf->scsi_inter_cond_met_set) {
+			p_buf->p_val = &p_drv_buf->scsi_inter_cond_met;
+			return sizeof(p_drv_buf->scsi_inter_cond_met);
+		}
+		break;
+	case DRV_TLV_SCSI_STATUS_RESERVATION_CONFLICT_COUNT:
+		if (p_drv_buf->scsi_rsv_conflicts_set) {
+			p_buf->p_val = &p_drv_buf->scsi_rsv_conflicts;
+			return sizeof(p_drv_buf->scsi_rsv_conflicts);
+		}
+		break;
+	case DRV_TLV_SCSI_STATUS_TASK_SET_FULL_COUNT:
+		if (p_drv_buf->scsi_tsk_full_set) {
+			p_buf->p_val = &p_drv_buf->scsi_tsk_full;
+			return sizeof(p_drv_buf->scsi_tsk_full);
+		}
+		break;
+	case DRV_TLV_SCSI_STATUS_ACA_ACTIVE_COUNT:
+		if (p_drv_buf->scsi_aca_active_set) {
+			p_buf->p_val = &p_drv_buf->scsi_aca_active;
+			return sizeof(p_drv_buf->scsi_aca_active);
+		}
+		break;
+	case DRV_TLV_SCSI_STATUS_TASK_ABORTED_COUNT:
+		if (p_drv_buf->scsi_tsk_abort_set) {
+			p_buf->p_val = &p_drv_buf->scsi_tsk_abort;
+			return sizeof(p_drv_buf->scsi_tsk_abort);
+		}
+		break;
+	case DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ:
+	case DRV_TLV_SCSI_CHECK_CONDITION_2_RECEIVED_SK_ASC_ASCQ:
+	case DRV_TLV_SCSI_CHECK_CONDITION_3_RECEIVED_SK_ASC_ASCQ:
+	case DRV_TLV_SCSI_CHECK_CONDITION_4_RECEIVED_SK_ASC_ASCQ:
+	case DRV_TLV_SCSI_CHECK_CONDITION_5_RECEIVED_SK_ASC_ASCQ:
+		idx = (p_tlv->tlv_type -
+		       DRV_TLV_SCSI_CHECK_CONDITION_1_RECEIVED_SK_ASC_ASCQ) / 2;
+
+		if (p_drv_buf->scsi_rx_chk_set[idx]) {
+			p_buf->p_val = &p_drv_buf->scsi_rx_chk[idx];
+			return sizeof(p_drv_buf->scsi_rx_chk[idx]);
+		}
+		break;
+	case DRV_TLV_SCSI_CHECK_1_TIMESTAMP:
+	case DRV_TLV_SCSI_CHECK_2_TIMESTAMP:
+	case DRV_TLV_SCSI_CHECK_3_TIMESTAMP:
+	case DRV_TLV_SCSI_CHECK_4_TIMESTAMP:
+	case DRV_TLV_SCSI_CHECK_5_TIMESTAMP:
+		idx = (p_tlv->tlv_type - DRV_TLV_SCSI_CHECK_1_TIMESTAMP) / 2;
+		p_time = &p_drv_buf->scsi_chk_tstamp[idx];
+
+		return qed_mfw_get_tlv_time_value(p_time, p_buf);
+	default:
+		break;
+	}
+
+	return -1;
+}
+
+static int
+qed_mfw_get_iscsi_tlv_value(struct qed_drv_tlv_hdr *p_tlv,
+			    struct qed_mfw_tlv_iscsi *p_drv_buf,
+			    struct qed_tlv_parsed_buf *p_buf)
+{
+	switch (p_tlv->tlv_type) {
+	case DRV_TLV_TARGET_LLMNR_ENABLED:
+		if (p_drv_buf->target_llmnr_set) {
+			p_buf->p_val = &p_drv_buf->target_llmnr;
+			return sizeof(p_drv_buf->target_llmnr);
+		}
+		break;
+	case DRV_TLV_HEADER_DIGEST_FLAG_ENABLED:
+		if (p_drv_buf->header_digest_set) {
+			p_buf->p_val = &p_drv_buf->header_digest;
+			return sizeof(p_drv_buf->header_digest);
+		}
+		break;
+	case DRV_TLV_DATA_DIGEST_FLAG_ENABLED:
+		if (p_drv_buf->data_digest_set) {
+			p_buf->p_val = &p_drv_buf->data_digest;
+			return sizeof(p_drv_buf->data_digest);
+		}
+		break;
+	case DRV_TLV_AUTHENTICATION_METHOD:
+		if (p_drv_buf->auth_method_set) {
+			p_buf->p_val = &p_drv_buf->auth_method;
+			return sizeof(p_drv_buf->auth_method);
+		}
+		break;
+	case DRV_TLV_ISCSI_BOOT_TARGET_PORTAL:
+		if (p_drv_buf->boot_taget_portal_set) {
+			p_buf->p_val = &p_drv_buf->boot_taget_portal;
+			return sizeof(p_drv_buf->boot_taget_portal);
+		}
+		break;
+	case DRV_TLV_MAX_FRAME_SIZE:
+		if (p_drv_buf->frame_size_set) {
+			p_buf->p_val = &p_drv_buf->frame_size;
+			return sizeof(p_drv_buf->frame_size);
+		}
+		break;
+	case DRV_TLV_PDU_TX_DESCRIPTORS_QUEUE_SIZE:
+		if (p_drv_buf->tx_desc_size_set) {
+			p_buf->p_val = &p_drv_buf->tx_desc_size;
+			return sizeof(p_drv_buf->tx_desc_size);
+		}
+		break;
+	case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_SIZE:
+		if (p_drv_buf->rx_desc_size_set) {
+			p_buf->p_val = &p_drv_buf->rx_desc_size;
+			return sizeof(p_drv_buf->rx_desc_size);
+		}
+		break;
+	case DRV_TLV_ISCSI_BOOT_PROGRESS:
+		if (p_drv_buf->boot_progress_set) {
+			p_buf->p_val = &p_drv_buf->boot_progress;
+			return sizeof(p_drv_buf->boot_progress);
+		}
+		break;
+	case DRV_TLV_PDU_TX_DESCRIPTOR_QUEUE_AVG_DEPTH:
+		if (p_drv_buf->tx_desc_qdepth_set) {
+			p_buf->p_val = &p_drv_buf->tx_desc_qdepth;
+			return sizeof(p_drv_buf->tx_desc_qdepth);
+		}
+		break;
+	case DRV_TLV_PDU_RX_DESCRIPTORS_QUEUE_AVG_DEPTH:
+		if (p_drv_buf->rx_desc_qdepth_set) {
+			p_buf->p_val = &p_drv_buf->rx_desc_qdepth;
+			return sizeof(p_drv_buf->rx_desc_qdepth);
+		}
+		break;
+	case DRV_TLV_ISCSI_PDU_RX_FRAMES_RECEIVED:
+		if (p_drv_buf->rx_frames_set) {
+			p_buf->p_val = &p_drv_buf->rx_frames;
+			return sizeof(p_drv_buf->rx_frames);
+		}
+		break;
+	case DRV_TLV_ISCSI_PDU_RX_BYTES_RECEIVED:
+		if (p_drv_buf->rx_bytes_set) {
+			p_buf->p_val = &p_drv_buf->rx_bytes;
+			return sizeof(p_drv_buf->rx_bytes);
+		}
+		break;
+	case DRV_TLV_ISCSI_PDU_TX_FRAMES_SENT:
+		if (p_drv_buf->tx_frames_set) {
+			p_buf->p_val = &p_drv_buf->tx_frames;
+			return sizeof(p_drv_buf->tx_frames);
+		}
+		break;
+	case DRV_TLV_ISCSI_PDU_TX_BYTES_SENT:
+		if (p_drv_buf->tx_bytes_set) {
+			p_buf->p_val = &p_drv_buf->tx_bytes;
+			return sizeof(p_drv_buf->tx_bytes);
+		}
+		break;
+	default:
+		break;
+	}
+
+	return -1;
+}
+
+static int qed_mfw_update_tlvs(struct qed_hwfn *p_hwfn,
+			       u8 tlv_group, u8 *p_mfw_buf, u32 size)
+{
+	union qed_mfw_tlv_data *p_tlv_data;
+	struct qed_tlv_parsed_buf buffer;
+	struct qed_drv_tlv_hdr tlv;
+	int len = 0;
+	u32 offset;
+	u8 *p_tlv;
+
+	p_tlv_data = vzalloc(sizeof(*p_tlv_data));
+	if (!p_tlv_data)
+		return -ENOMEM;
+
+	if (qed_mfw_fill_tlv_data(p_hwfn, tlv_group, p_tlv_data)) {
+		vfree(p_tlv_data);
+		return -EINVAL;
+	}
+
+	memset(&tlv, 0, sizeof(tlv));
+	for (offset = 0; offset < size;
+	     offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
+		p_tlv = &p_mfw_buf[offset];
+		tlv.tlv_type = TLV_TYPE(p_tlv);
+		tlv.tlv_length = TLV_LENGTH(p_tlv);
+		tlv.tlv_flags = TLV_FLAGS(p_tlv);
+
+		DP_VERBOSE(p_hwfn, QED_MSG_SP,
+			   "Type %d length = %d flags = 0x%x\n", tlv.tlv_type,
+			   tlv.tlv_length, tlv.tlv_flags);
+
+		if (tlv_group == QED_MFW_TLV_GENERIC)
+			len = qed_mfw_get_gen_tlv_value(&tlv,
+							&p_tlv_data->generic,
+							&buffer);
+		else if (tlv_group == QED_MFW_TLV_ETH)
+			len = qed_mfw_get_eth_tlv_value(&tlv,
+							&p_tlv_data->eth,
+							&buffer);
+		else if (tlv_group == QED_MFW_TLV_FCOE)
+			len = qed_mfw_get_fcoe_tlv_value(&tlv,
+							 &p_tlv_data->fcoe,
+							 &buffer);
+		else
+			len = qed_mfw_get_iscsi_tlv_value(&tlv,
+							  &p_tlv_data->iscsi,
+							  &buffer);
+
+		if (len > 0) {
+			WARN(len > 4 * tlv.tlv_length,
+			     "Incorrect MFW TLV length %d, it shouldn't be greater than %d\n",
+			     len, 4 * tlv.tlv_length);
+			len = min_t(int, len, 4 * tlv.tlv_length);
+			tlv.tlv_flags |= QED_DRV_TLV_FLAGS_CHANGED;
+			TLV_FLAGS(p_tlv) = tlv.tlv_flags;
+			memcpy(p_mfw_buf + offset + sizeof(tlv),
+			       buffer.p_val, len);
+		}
+	}
+
+	vfree(p_tlv_data);
+
+	return 0;
+}
+
+int qed_mfw_process_tlv_req(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
+{
+	u32 addr, size, offset, resp, param, val, global_offsize, global_addr;
+	u8 tlv_group = 0, id, *p_mfw_buf = NULL, *p_temp;
+	struct qed_drv_tlv_hdr tlv;
+	int rc;
+
+	addr = SECTION_OFFSIZE_ADDR(p_hwfn->mcp_info->public_base,
+				    PUBLIC_GLOBAL);
+	global_offsize = qed_rd(p_hwfn, p_ptt, addr);
+	global_addr = SECTION_ADDR(global_offsize, 0);
+	addr = global_addr + offsetof(struct public_global, data_ptr);
+	addr = qed_rd(p_hwfn, p_ptt, addr);
+	size = qed_rd(p_hwfn, p_ptt, global_addr +
+		      offsetof(struct public_global, data_size));
+
+	if (!size) {
+		DP_NOTICE(p_hwfn, "Invalid TLV req size = %d\n", size);
+		goto drv_done;
+	}
+
+	p_mfw_buf = vzalloc(size);
+	if (!p_mfw_buf) {
+		DP_NOTICE(p_hwfn, "Failed allocate memory for p_mfw_buf\n");
+		goto drv_done;
+	}
+
+	/* Read the TLV request to local buffer. MFW represents the TLV in
+	 * little endian format and mcp returns it bigendian format. Hence
+	 * driver need to convert data to little endian first and then do the
+	 * memcpy (casting) to preserve the MFW TLV format in the driver buffer.
+	 *
+	 */
+	for (offset = 0; offset < size; offset += sizeof(u32)) {
+		val = qed_rd(p_hwfn, p_ptt, addr + offset);
+		val = be32_to_cpu(val);
+		memcpy(&p_mfw_buf[offset], &val, sizeof(u32));
+	}
+
+	/* Parse the headers to enumerate the requested TLV groups */
+	for (offset = 0; offset < size;
+	     offset += sizeof(tlv) + sizeof(u32) * tlv.tlv_length) {
+		p_temp = &p_mfw_buf[offset];
+		tlv.tlv_type = TLV_TYPE(p_temp);
+		tlv.tlv_length = TLV_LENGTH(p_temp);
+		if (qed_mfw_get_tlv_group(tlv.tlv_type, &tlv_group))
+			DP_VERBOSE(p_hwfn, NETIF_MSG_DRV,
+				   "Un recognized TLV %d\n", tlv.tlv_type);
+	}
+
+	/* Sanitize the TLV groups according to personality */
+	if ((tlv_group & QED_MFW_TLV_ETH) && !QED_IS_L2_PERSONALITY(p_hwfn)) {
+		DP_VERBOSE(p_hwfn, QED_MSG_SP,
+			   "Skipping L2 TLVs for non-L2 function\n");
+		tlv_group &= ~QED_MFW_TLV_ETH;
+	}
+
+	if ((tlv_group & QED_MFW_TLV_FCOE) &&
+	    p_hwfn->hw_info.personality != QED_PCI_FCOE) {
+		DP_VERBOSE(p_hwfn, QED_MSG_SP,
+			   "Skipping FCoE TLVs for non-FCoE function\n");
+		tlv_group &= ~QED_MFW_TLV_FCOE;
+	}
+
+	if ((tlv_group & QED_MFW_TLV_ISCSI) &&
+	    p_hwfn->hw_info.personality != QED_PCI_ISCSI) {
+		DP_VERBOSE(p_hwfn, QED_MSG_SP,
+			   "Skipping iSCSI TLVs for non-iSCSI function\n");
+		tlv_group &= ~QED_MFW_TLV_ISCSI;
+	}
+
+	/* Update the TLV values in the local buffer */
+	for (id = QED_MFW_TLV_GENERIC; id < QED_MFW_TLV_MAX; id <<= 1) {
+		if (tlv_group & id)
+			if (qed_mfw_update_tlvs(p_hwfn, id, p_mfw_buf, size))
+				goto drv_done;
+	}
+
+	/* Write the TLV data to shared memory. The stream of 4 bytes first need
+	 * to be mem-copied to u32 element to make it as LSB format. And then
+	 * converted to big endian as required by mcp-write.
+	 */
+	for (offset = 0; offset < size; offset += sizeof(u32)) {
+		memcpy(&val, &p_mfw_buf[offset], sizeof(u32));
+		val = cpu_to_be32(val);
+		qed_wr(p_hwfn, p_ptt, addr + offset, val);
+	}
+
+drv_done:
+	rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_GET_TLV_DONE, 0, &resp,
+			 &param);
+
+	vfree(p_mfw_buf);
+
+	return rc;
+}
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 7abaf27..9e70f71 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -133,6 +133,9 @@
 static void qede_remove(struct pci_dev *pdev);
 static void qede_shutdown(struct pci_dev *pdev);
 static void qede_link_update(void *dev, struct qed_link_output *link);
+static void qede_get_eth_tlv_data(void *edev, void *data);
+static void qede_get_generic_tlv_data(void *edev,
+				      struct qed_generic_tlvs *data);
 
 /* The qede lock is used to protect driver state change and driver flows that
  * are not reentrant.
@@ -228,6 +231,8 @@
 		.arfs_filter_op = qede_arfs_filter_op,
 #endif
 		.link_update = qede_link_update,
+		.get_generic_tlv_data = qede_get_generic_tlv_data,
+		.get_protocol_tlv_data = qede_get_eth_tlv_data,
 	},
 	.force_mac = qede_force_mac,
 	.ports_update = qede_udp_ports_update,
@@ -2131,3 +2136,99 @@
 		}
 	}
 }
+
+static bool qede_is_txq_full(struct qede_dev *edev, struct qede_tx_queue *txq)
+{
+	struct netdev_queue *netdev_txq;
+
+	netdev_txq = netdev_get_tx_queue(edev->ndev, txq->index);
+	if (netif_xmit_stopped(netdev_txq))
+		return true;
+
+	return false;
+}
+
+static void qede_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
+{
+	struct qede_dev *edev = dev;
+	struct netdev_hw_addr *ha;
+	int i;
+
+	if (edev->ndev->features & NETIF_F_IP_CSUM)
+		data->feat_flags |= QED_TLV_IP_CSUM;
+	if (edev->ndev->features & NETIF_F_TSO)
+		data->feat_flags |= QED_TLV_LSO;
+
+	ether_addr_copy(data->mac[0], edev->ndev->dev_addr);
+	memset(data->mac[1], 0, ETH_ALEN);
+	memset(data->mac[2], 0, ETH_ALEN);
+	/* Copy the first two UC macs */
+	netif_addr_lock_bh(edev->ndev);
+	i = 1;
+	netdev_for_each_uc_addr(ha, edev->ndev) {
+		ether_addr_copy(data->mac[i++], ha->addr);
+		if (i == QED_TLV_MAC_COUNT)
+			break;
+	}
+
+	netif_addr_unlock_bh(edev->ndev);
+}
+
+static void qede_get_eth_tlv_data(void *dev, void *data)
+{
+	struct qed_mfw_tlv_eth *etlv = data;
+	struct qede_dev *edev = dev;
+	struct qede_fastpath *fp;
+	int i;
+
+	etlv->lso_maxoff_size = 0XFFFF;
+	etlv->lso_maxoff_size_set = true;
+	etlv->lso_minseg_size = (u16)ETH_TX_LSO_WINDOW_MIN_LEN;
+	etlv->lso_minseg_size_set = true;
+	etlv->prom_mode = !!(edev->ndev->flags & IFF_PROMISC);
+	etlv->prom_mode_set = true;
+	etlv->tx_descr_size = QEDE_TSS_COUNT(edev);
+	etlv->tx_descr_size_set = true;
+	etlv->rx_descr_size = QEDE_RSS_COUNT(edev);
+	etlv->rx_descr_size_set = true;
+	etlv->iov_offload = QED_MFW_TLV_IOV_OFFLOAD_VEB;
+	etlv->iov_offload_set = true;
+
+	/* Fill information regarding queues; Should be done under the qede
+	 * lock to guarantee those don't change beneath our feet.
+	 */
+	etlv->txqs_empty = true;
+	etlv->rxqs_empty = true;
+	etlv->num_txqs_full = 0;
+	etlv->num_rxqs_full = 0;
+
+	__qede_lock(edev);
+	for_each_queue(i) {
+		fp = &edev->fp_array[i];
+		if (fp->type & QEDE_FASTPATH_TX) {
+			if (fp->txq->sw_tx_cons != fp->txq->sw_tx_prod)
+				etlv->txqs_empty = false;
+			if (qede_is_txq_full(edev, fp->txq))
+				etlv->num_txqs_full++;
+		}
+		if (fp->type & QEDE_FASTPATH_RX) {
+			if (qede_has_rx_work(fp->rxq))
+				etlv->rxqs_empty = false;
+
+			/* This one is a bit tricky; Firmware might stop
+			 * placing packets if ring is not yet full.
+			 * Give an approximation.
+			 */
+			if (le16_to_cpu(*fp->rxq->hw_cons_ptr) -
+			    qed_chain_get_cons_idx(&fp->rxq->rx_comp_ring) >
+			    RX_RING_SIZE - 100)
+				etlv->num_rxqs_full++;
+		}
+	}
+	__qede_unlock(edev);
+
+	etlv->txqs_empty_set = true;
+	etlv->rxqs_empty_set = true;
+	etlv->num_txqs_full_set = true;
+	etlv->num_rxqs_full_set = true;
+}
diff --git a/drivers/scsi/qedf/qedf.h b/drivers/scsi/qedf/qedf.h
index c105a2e..cabb6af 100644
--- a/drivers/scsi/qedf/qedf.h
+++ b/drivers/scsi/qedf/qedf.h
@@ -383,11 +383,16 @@
 	u32 flogi_failed;
 
 	/* Used for fc statistics */
+	struct mutex stats_mutex;
 	u64 input_requests;
 	u64 output_requests;
 	u64 control_requests;
 	u64 packet_aborts;
 	u64 alloc_failures;
+	u8 lun_resets;
+	u8 target_resets;
+	u8 task_set_fulls;
+	u8 busy;
 };
 
 struct io_bdt {
@@ -496,7 +501,9 @@
 extern void qedf_process_seq_cleanup_compl(struct qedf_ctx *qedf,
 	struct fcoe_cqe *cqe, struct qedf_ioreq *io_req);
 extern int qedf_send_flogi(struct qedf_ctx *qedf);
+extern void qedf_get_protocol_tlv_data(void *dev, void *data);
 extern void qedf_fp_io_handler(struct work_struct *work);
+extern void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data);
 
 #define FCOE_WORD_TO_BYTE  4
 #define QEDF_MAX_TASK_NUM	0xFFFF
diff --git a/drivers/scsi/qedf/qedf_debugfs.c b/drivers/scsi/qedf/qedf_debugfs.c
index c539a7a..5789ce1 100644
--- a/drivers/scsi/qedf/qedf_debugfs.c
+++ b/drivers/scsi/qedf/qedf_debugfs.c
@@ -439,7 +439,6 @@
 	return single_open(file, qedf_offload_stats_show, qedf);
 }
 
-
 const struct file_operations qedf_dbg_fops[] = {
 	qedf_dbg_fileops(qedf, fp_int),
 	qedf_dbg_fileops_seq(qedf, io_trace),
diff --git a/drivers/scsi/qedf/qedf_io.c b/drivers/scsi/qedf/qedf_io.c
index 50a50c4..3fe579d 100644
--- a/drivers/scsi/qedf/qedf_io.c
+++ b/drivers/scsi/qedf/qedf_io.c
@@ -1200,6 +1200,12 @@
 					fcport->retry_delay_timestamp =
 					    jiffies + (qualifier * HZ / 10);
 				}
+				/* Record stats */
+				if (io_req->cdb_status ==
+				    SAM_STAT_TASK_SET_FULL)
+					qedf->task_set_fulls++;
+				else
+					qedf->busy++;
 			}
 		}
 		if (io_req->fcp_resid)
@@ -1866,6 +1872,11 @@
 		goto reset_tmf_err;
 	}
 
+	if (tm_flags == FCP_TMF_LUN_RESET)
+		qedf->lun_resets++;
+	else if (tm_flags == FCP_TMF_TGT_RESET)
+		qedf->target_resets++;
+
 	/* Initialize rest of io_req fields */
 	io_req->sc_cmd = sc_cmd;
 	io_req->fcport = fcport;
diff --git a/drivers/scsi/qedf/qedf_main.c b/drivers/scsi/qedf/qedf_main.c
index 6c19015..d3f73d8 100644
--- a/drivers/scsi/qedf/qedf_main.c
+++ b/drivers/scsi/qedf/qedf_main.c
@@ -566,6 +566,8 @@
 	{
 		.link_update = qedf_link_update,
 		.dcbx_aen = qedf_dcbx_handler,
+		.get_generic_tlv_data = qedf_get_generic_tlv_data,
+		.get_protocol_tlv_data = qedf_get_protocol_tlv_data,
 	}
 };
 
@@ -1746,6 +1748,8 @@
 		goto out;
 	}
 
+	mutex_lock(&qedf->stats_mutex);
+
 	/* Query firmware for offload stats */
 	qed_ops->get_stats(qedf->cdev, fw_fcoe_stats);
 
@@ -1779,6 +1783,7 @@
 	qedf_stats->fcp_packet_aborts += qedf->packet_aborts;
 	qedf_stats->fcp_frame_alloc_failures += qedf->alloc_failures;
 
+	mutex_unlock(&qedf->stats_mutex);
 	kfree(fw_fcoe_stats);
 out:
 	return qedf_stats;
@@ -2948,6 +2953,7 @@
 		qedf->stop_io_on_error = false;
 		pci_set_drvdata(pdev, qedf);
 		init_completion(&qedf->fipvlan_compl);
+		mutex_init(&qedf->stats_mutex);
 
 		QEDF_INFO(&(qedf->dbg_ctx), QEDF_LOG_INFO,
 		   "QLogic FastLinQ FCoE Module qedf %s, "
@@ -3393,6 +3399,104 @@
 }
 
 /*
+ * Protocol TLV handler
+ */
+void qedf_get_protocol_tlv_data(void *dev, void *data)
+{
+	struct qedf_ctx *qedf = dev;
+	struct qed_mfw_tlv_fcoe *fcoe = data;
+	struct fc_lport *lport = qedf->lport;
+	struct Scsi_Host *host = lport->host;
+	struct fc_host_attrs *fc_host = shost_to_fc_host(host);
+	struct fc_host_statistics *hst;
+
+	/* Force a refresh of the fc_host stats including offload stats */
+	hst = qedf_fc_get_host_stats(host);
+
+	fcoe->qos_pri_set = true;
+	fcoe->qos_pri = 3; /* Hard coded to 3 in driver */
+
+	fcoe->ra_tov_set = true;
+	fcoe->ra_tov = lport->r_a_tov;
+
+	fcoe->ed_tov_set = true;
+	fcoe->ed_tov = lport->e_d_tov;
+
+	fcoe->npiv_state_set = true;
+	fcoe->npiv_state = 1; /* NPIV always enabled */
+
+	fcoe->num_npiv_ids_set = true;
+	fcoe->num_npiv_ids = fc_host->npiv_vports_inuse;
+
+	/* Certain attributes we only want to set if we've selected an FCF */
+	if (qedf->ctlr.sel_fcf) {
+		fcoe->switch_name_set = true;
+		u64_to_wwn(qedf->ctlr.sel_fcf->switch_name, fcoe->switch_name);
+	}
+
+	fcoe->port_state_set = true;
+	/* For qedf we're either link down or fabric attach */
+	if (lport->link_up)
+		fcoe->port_state = QED_MFW_TLV_PORT_STATE_FABRIC;
+	else
+		fcoe->port_state = QED_MFW_TLV_PORT_STATE_OFFLINE;
+
+	fcoe->link_failures_set = true;
+	fcoe->link_failures = (u16)hst->link_failure_count;
+
+	fcoe->fcoe_txq_depth_set = true;
+	fcoe->fcoe_rxq_depth_set = true;
+	fcoe->fcoe_rxq_depth = FCOE_PARAMS_NUM_TASKS;
+	fcoe->fcoe_txq_depth = FCOE_PARAMS_NUM_TASKS;
+
+	fcoe->fcoe_rx_frames_set = true;
+	fcoe->fcoe_rx_frames = hst->rx_frames;
+
+	fcoe->fcoe_tx_frames_set = true;
+	fcoe->fcoe_tx_frames = hst->tx_frames;
+
+	fcoe->fcoe_rx_bytes_set = true;
+	fcoe->fcoe_rx_bytes = hst->fcp_input_megabytes * 1000000;
+
+	fcoe->fcoe_tx_bytes_set = true;
+	fcoe->fcoe_tx_bytes = hst->fcp_output_megabytes * 1000000;
+
+	fcoe->crc_count_set = true;
+	fcoe->crc_count = hst->invalid_crc_count;
+
+	fcoe->tx_abts_set = true;
+	fcoe->tx_abts = hst->fcp_packet_aborts;
+
+	fcoe->tx_lun_rst_set = true;
+	fcoe->tx_lun_rst = qedf->lun_resets;
+
+	fcoe->abort_task_sets_set = true;
+	fcoe->abort_task_sets = qedf->packet_aborts;
+
+	fcoe->scsi_busy_set = true;
+	fcoe->scsi_busy = qedf->busy;
+
+	fcoe->scsi_tsk_full_set = true;
+	fcoe->scsi_tsk_full = qedf->task_set_fulls;
+}
+
+/* Generic TLV data callback */
+void qedf_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
+{
+	struct qedf_ctx *qedf;
+
+	if (!dev) {
+		QEDF_INFO(NULL, QEDF_LOG_EVT,
+			  "dev is NULL so ignoring get_generic_tlv_data request.\n");
+		return;
+	}
+	qedf = (struct qedf_ctx *)dev;
+
+	memset(data, 0, sizeof(struct qed_generic_tlvs));
+	ether_addr_copy(data->mac[0], qedf->mac);
+}
+
+/*
  * Module Init/Remove
  */
 
diff --git a/drivers/scsi/qedi/qedi.h b/drivers/scsi/qedi/qedi.h
index b8b22ce..fc3babc 100644
--- a/drivers/scsi/qedi/qedi.h
+++ b/drivers/scsi/qedi/qedi.h
@@ -353,6 +353,9 @@
 #define IPV6_LEN	41
 #define IPV4_LEN	17
 	struct iscsi_boot_kset *boot_kset;
+
+	/* Used for iscsi statistics */
+	struct mutex stats_lock;
 };
 
 struct qedi_work {
diff --git a/drivers/scsi/qedi/qedi_iscsi.h b/drivers/scsi/qedi/qedi_iscsi.h
index ea13151..1126077 100644
--- a/drivers/scsi/qedi/qedi_iscsi.h
+++ b/drivers/scsi/qedi/qedi_iscsi.h
@@ -223,6 +223,12 @@
 	struct work_struct *ptr_tmf_work;
 };
 
+struct qedi_boot_target {
+	char ip_addr[64];
+	char iscsi_name[255];
+	u32 ipv6_en;
+};
+
 #define qedi_set_itt(task_id, itt) ((u32)(((task_id) & 0xffff) | ((itt) << 16)))
 #define qedi_get_itt(cqe) (cqe.iscsi_hdr.cmd.itt >> 16)
 
diff --git a/drivers/scsi/qedi/qedi_main.c b/drivers/scsi/qedi/qedi_main.c
index 4da3592..32ee7f6 100644
--- a/drivers/scsi/qedi/qedi_main.c
+++ b/drivers/scsi/qedi/qedi_main.c
@@ -55,6 +55,7 @@
 static struct qedi_cmd *qedi_get_cmd_from_tid(struct qedi_ctx *qedi, u32 tid);
 static void qedi_reset_uio_rings(struct qedi_uio_dev *udev);
 static void qedi_ll2_free_skbs(struct qedi_ctx *qedi);
+static struct nvm_iscsi_block *qedi_get_nvram_block(struct qedi_ctx *qedi);
 
 static int qedi_iscsi_event_cb(void *context, u8 fw_event_code, void *fw_handle)
 {
@@ -879,6 +880,201 @@
 	kfree(qedi->global_queues);
 }
 
+static void qedi_get_boot_tgt_info(struct nvm_iscsi_block *block,
+				   struct qedi_boot_target *tgt, u8 index)
+{
+	u32 ipv6_en;
+
+	ipv6_en = !!(block->generic.ctrl_flags &
+		     NVM_ISCSI_CFG_GEN_IPV6_ENABLED);
+
+	snprintf(tgt->iscsi_name, NVM_ISCSI_CFG_ISCSI_NAME_MAX_LEN, "%s\n",
+		 block->target[index].target_name.byte);
+
+	tgt->ipv6_en = ipv6_en;
+
+	if (ipv6_en)
+		snprintf(tgt->ip_addr, IPV6_LEN, "%pI6\n",
+			 block->target[index].ipv6_addr.byte);
+	else
+		snprintf(tgt->ip_addr, IPV4_LEN, "%pI4\n",
+			 block->target[index].ipv4_addr.byte);
+}
+
+static int qedi_find_boot_info(struct qedi_ctx *qedi,
+			       struct qed_mfw_tlv_iscsi *iscsi,
+			       struct nvm_iscsi_block *block)
+{
+	struct qedi_boot_target *pri_tgt = NULL, *sec_tgt = NULL;
+	u32 pri_ctrl_flags = 0, sec_ctrl_flags = 0, found = 0;
+	struct iscsi_cls_session *cls_sess;
+	struct iscsi_cls_conn *cls_conn;
+	struct qedi_conn *qedi_conn;
+	struct iscsi_session *sess;
+	struct iscsi_conn *conn;
+	char ep_ip_addr[64];
+	int i, ret = 0;
+
+	pri_ctrl_flags = !!(block->target[0].ctrl_flags &
+					NVM_ISCSI_CFG_TARGET_ENABLED);
+	if (pri_ctrl_flags) {
+		pri_tgt = kzalloc(sizeof(*pri_tgt), GFP_KERNEL);
+		if (!pri_tgt)
+			return -1;
+		qedi_get_boot_tgt_info(block, pri_tgt, 0);
+	}
+
+	sec_ctrl_flags = !!(block->target[1].ctrl_flags &
+					NVM_ISCSI_CFG_TARGET_ENABLED);
+	if (sec_ctrl_flags) {
+		sec_tgt = kzalloc(sizeof(*sec_tgt), GFP_KERNEL);
+		if (!sec_tgt) {
+			ret = -1;
+			goto free_tgt;
+		}
+		qedi_get_boot_tgt_info(block, sec_tgt, 1);
+	}
+
+	for (i = 0; i < qedi->max_active_conns; i++) {
+		qedi_conn = qedi_get_conn_from_id(qedi, i);
+		if (!qedi_conn)
+			continue;
+
+		if (qedi_conn->ep->ip_type == TCP_IPV4)
+			snprintf(ep_ip_addr, IPV4_LEN, "%pI4\n",
+				 qedi_conn->ep->dst_addr);
+		else
+			snprintf(ep_ip_addr, IPV6_LEN, "%pI6\n",
+				 qedi_conn->ep->dst_addr);
+
+		cls_conn = qedi_conn->cls_conn;
+		conn = cls_conn->dd_data;
+		cls_sess = iscsi_conn_to_session(cls_conn);
+		sess = cls_sess->dd_data;
+
+		if (pri_ctrl_flags) {
+			if (!strcmp(pri_tgt->iscsi_name, sess->targetname) &&
+			    !strcmp(pri_tgt->ip_addr, ep_ip_addr)) {
+				found = 1;
+				break;
+			}
+		}
+
+		if (sec_ctrl_flags) {
+			if (!strcmp(sec_tgt->iscsi_name, sess->targetname) &&
+			    !strcmp(sec_tgt->ip_addr, ep_ip_addr)) {
+				found = 1;
+				break;
+			}
+		}
+	}
+
+	if (found) {
+		if (conn->hdrdgst_en) {
+			iscsi->header_digest_set = true;
+			iscsi->header_digest = 1;
+		}
+
+		if (conn->datadgst_en) {
+			iscsi->data_digest_set = true;
+			iscsi->data_digest = 1;
+		}
+		iscsi->boot_taget_portal_set = true;
+		iscsi->boot_taget_portal = sess->tpgt;
+
+	} else {
+		ret = -1;
+	}
+
+	if (sec_ctrl_flags)
+		kfree(sec_tgt);
+free_tgt:
+	if (pri_ctrl_flags)
+		kfree(pri_tgt);
+
+	return ret;
+}
+
+static void qedi_get_generic_tlv_data(void *dev, struct qed_generic_tlvs *data)
+{
+	struct qedi_ctx *qedi;
+
+	if (!dev) {
+		QEDI_INFO(NULL, QEDI_LOG_EVT,
+			  "dev is NULL so ignoring get_generic_tlv_data request.\n");
+		return;
+	}
+	qedi = (struct qedi_ctx *)dev;
+
+	memset(data, 0, sizeof(struct qed_generic_tlvs));
+	ether_addr_copy(data->mac[0], qedi->mac);
+}
+
+/*
+ * Protocol TLV handler
+ */
+static void qedi_get_protocol_tlv_data(void *dev, void *data)
+{
+	struct qed_mfw_tlv_iscsi *iscsi = data;
+	struct qed_iscsi_stats *fw_iscsi_stats;
+	struct nvm_iscsi_block *block = NULL;
+	u32 chap_en = 0, mchap_en = 0;
+	struct qedi_ctx *qedi = dev;
+	int rval = 0;
+
+	fw_iscsi_stats = kmalloc(sizeof(*fw_iscsi_stats), GFP_KERNEL);
+	if (!fw_iscsi_stats) {
+		QEDI_ERR(&qedi->dbg_ctx,
+			 "Could not allocate memory for fw_iscsi_stats.\n");
+		goto exit_get_data;
+	}
+
+	mutex_lock(&qedi->stats_lock);
+	/* Query firmware for offload stats */
+	qedi_ops->get_stats(qedi->cdev, fw_iscsi_stats);
+	mutex_unlock(&qedi->stats_lock);
+
+	iscsi->rx_frames_set = true;
+	iscsi->rx_frames = fw_iscsi_stats->iscsi_rx_packet_cnt;
+	iscsi->rx_bytes_set = true;
+	iscsi->rx_bytes = fw_iscsi_stats->iscsi_rx_bytes_cnt;
+	iscsi->tx_frames_set = true;
+	iscsi->tx_frames = fw_iscsi_stats->iscsi_tx_packet_cnt;
+	iscsi->tx_bytes_set = true;
+	iscsi->tx_bytes = fw_iscsi_stats->iscsi_tx_bytes_cnt;
+	iscsi->frame_size_set = true;
+	iscsi->frame_size = qedi->ll2_mtu;
+	block = qedi_get_nvram_block(qedi);
+	if (block) {
+		chap_en = !!(block->generic.ctrl_flags &
+			     NVM_ISCSI_CFG_GEN_CHAP_ENABLED);
+		mchap_en = !!(block->generic.ctrl_flags &
+			      NVM_ISCSI_CFG_GEN_CHAP_MUTUAL_ENABLED);
+
+		iscsi->auth_method_set = (chap_en || mchap_en) ? true : false;
+		iscsi->auth_method = 1;
+		if (chap_en)
+			iscsi->auth_method = 2;
+		if (mchap_en)
+			iscsi->auth_method = 3;
+
+		iscsi->tx_desc_size_set = true;
+		iscsi->tx_desc_size = QEDI_SQ_SIZE;
+		iscsi->rx_desc_size_set = true;
+		iscsi->rx_desc_size = QEDI_CQ_SIZE;
+
+		/* tpgt, hdr digest, data digest */
+		rval = qedi_find_boot_info(qedi, iscsi, block);
+		if (rval)
+			QEDI_INFO(&qedi->dbg_ctx, QEDI_LOG_INFO,
+				  "Boot target not set");
+	}
+
+	kfree(fw_iscsi_stats);
+exit_get_data:
+	return;
+}
+
 static void qedi_link_update(void *dev, struct qed_link_output *link)
 {
 	struct qedi_ctx *qedi = (struct qedi_ctx *)dev;
@@ -896,6 +1092,8 @@
 static struct qed_iscsi_cb_ops qedi_cb_ops = {
 	{
 		.link_update =		qedi_link_update,
+		.get_protocol_tlv_data = qedi_get_protocol_tlv_data,
+		.get_generic_tlv_data = qedi_get_generic_tlv_data,
 	}
 };
 
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index 907976f..44af652 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -182,6 +182,272 @@
 	QED_LED_MODE_RESTORE
 };
 
+struct qed_mfw_tlv_eth {
+	u16 lso_maxoff_size;
+	bool lso_maxoff_size_set;
+	u16 lso_minseg_size;
+	bool lso_minseg_size_set;
+	u8 prom_mode;
+	bool prom_mode_set;
+	u16 tx_descr_size;
+	bool tx_descr_size_set;
+	u16 rx_descr_size;
+	bool rx_descr_size_set;
+	u16 netq_count;
+	bool netq_count_set;
+	u32 tcp4_offloads;
+	bool tcp4_offloads_set;
+	u32 tcp6_offloads;
+	bool tcp6_offloads_set;
+	u16 tx_descr_qdepth;
+	bool tx_descr_qdepth_set;
+	u16 rx_descr_qdepth;
+	bool rx_descr_qdepth_set;
+	u8 iov_offload;
+#define QED_MFW_TLV_IOV_OFFLOAD_NONE            (0)
+#define QED_MFW_TLV_IOV_OFFLOAD_MULTIQUEUE      (1)
+#define QED_MFW_TLV_IOV_OFFLOAD_VEB             (2)
+#define QED_MFW_TLV_IOV_OFFLOAD_VEPA            (3)
+	bool iov_offload_set;
+	u8 txqs_empty;
+	bool txqs_empty_set;
+	u8 rxqs_empty;
+	bool rxqs_empty_set;
+	u8 num_txqs_full;
+	bool num_txqs_full_set;
+	u8 num_rxqs_full;
+	bool num_rxqs_full_set;
+};
+
+#define QED_MFW_TLV_TIME_SIZE	14
+struct qed_mfw_tlv_time {
+	bool b_set;
+	u8 month;
+	u8 day;
+	u8 hour;
+	u8 min;
+	u16 msec;
+	u16 usec;
+};
+
+struct qed_mfw_tlv_fcoe {
+	u8 scsi_timeout;
+	bool scsi_timeout_set;
+	u32 rt_tov;
+	bool rt_tov_set;
+	u32 ra_tov;
+	bool ra_tov_set;
+	u32 ed_tov;
+	bool ed_tov_set;
+	u32 cr_tov;
+	bool cr_tov_set;
+	u8 boot_type;
+	bool boot_type_set;
+	u8 npiv_state;
+	bool npiv_state_set;
+	u32 num_npiv_ids;
+	bool num_npiv_ids_set;
+	u8 switch_name[8];
+	bool switch_name_set;
+	u16 switch_portnum;
+	bool switch_portnum_set;
+	u8 switch_portid[3];
+	bool switch_portid_set;
+	u8 vendor_name[8];
+	bool vendor_name_set;
+	u8 switch_model[8];
+	bool switch_model_set;
+	u8 switch_fw_version[8];
+	bool switch_fw_version_set;
+	u8 qos_pri;
+	bool qos_pri_set;
+	u8 port_alias[3];
+	bool port_alias_set;
+	u8 port_state;
+#define QED_MFW_TLV_PORT_STATE_OFFLINE  (0)
+#define QED_MFW_TLV_PORT_STATE_LOOP             (1)
+#define QED_MFW_TLV_PORT_STATE_P2P              (2)
+#define QED_MFW_TLV_PORT_STATE_FABRIC           (3)
+	bool port_state_set;
+	u16 fip_tx_descr_size;
+	bool fip_tx_descr_size_set;
+	u16 fip_rx_descr_size;
+	bool fip_rx_descr_size_set;
+	u16 link_failures;
+	bool link_failures_set;
+	u8 fcoe_boot_progress;
+	bool fcoe_boot_progress_set;
+	u64 rx_bcast;
+	bool rx_bcast_set;
+	u64 tx_bcast;
+	bool tx_bcast_set;
+	u16 fcoe_txq_depth;
+	bool fcoe_txq_depth_set;
+	u16 fcoe_rxq_depth;
+	bool fcoe_rxq_depth_set;
+	u64 fcoe_rx_frames;
+	bool fcoe_rx_frames_set;
+	u64 fcoe_rx_bytes;
+	bool fcoe_rx_bytes_set;
+	u64 fcoe_tx_frames;
+	bool fcoe_tx_frames_set;
+	u64 fcoe_tx_bytes;
+	bool fcoe_tx_bytes_set;
+	u16 crc_count;
+	bool crc_count_set;
+	u32 crc_err_src_fcid[5];
+	bool crc_err_src_fcid_set[5];
+	struct qed_mfw_tlv_time crc_err[5];
+	u16 losync_err;
+	bool losync_err_set;
+	u16 losig_err;
+	bool losig_err_set;
+	u16 primtive_err;
+	bool primtive_err_set;
+	u16 disparity_err;
+	bool disparity_err_set;
+	u16 code_violation_err;
+	bool code_violation_err_set;
+	u32 flogi_param[4];
+	bool flogi_param_set[4];
+	struct qed_mfw_tlv_time flogi_tstamp;
+	u32 flogi_acc_param[4];
+	bool flogi_acc_param_set[4];
+	struct qed_mfw_tlv_time flogi_acc_tstamp;
+	u32 flogi_rjt;
+	bool flogi_rjt_set;
+	struct qed_mfw_tlv_time flogi_rjt_tstamp;
+	u32 fdiscs;
+	bool fdiscs_set;
+	u8 fdisc_acc;
+	bool fdisc_acc_set;
+	u8 fdisc_rjt;
+	bool fdisc_rjt_set;
+	u8 plogi;
+	bool plogi_set;
+	u8 plogi_acc;
+	bool plogi_acc_set;
+	u8 plogi_rjt;
+	bool plogi_rjt_set;
+	u32 plogi_dst_fcid[5];
+	bool plogi_dst_fcid_set[5];
+	struct qed_mfw_tlv_time plogi_tstamp[5];
+	u32 plogi_acc_src_fcid[5];
+	bool plogi_acc_src_fcid_set[5];
+	struct qed_mfw_tlv_time plogi_acc_tstamp[5];
+	u8 tx_plogos;
+	bool tx_plogos_set;
+	u8 plogo_acc;
+	bool plogo_acc_set;
+	u8 plogo_rjt;
+	bool plogo_rjt_set;
+	u32 plogo_src_fcid[5];
+	bool plogo_src_fcid_set[5];
+	struct qed_mfw_tlv_time plogo_tstamp[5];
+	u8 rx_logos;
+	bool rx_logos_set;
+	u8 tx_accs;
+	bool tx_accs_set;
+	u8 tx_prlis;
+	bool tx_prlis_set;
+	u8 rx_accs;
+	bool rx_accs_set;
+	u8 tx_abts;
+	bool tx_abts_set;
+	u8 rx_abts_acc;
+	bool rx_abts_acc_set;
+	u8 rx_abts_rjt;
+	bool rx_abts_rjt_set;
+	u32 abts_dst_fcid[5];
+	bool abts_dst_fcid_set[5];
+	struct qed_mfw_tlv_time abts_tstamp[5];
+	u8 rx_rscn;
+	bool rx_rscn_set;
+	u32 rx_rscn_nport[4];
+	bool rx_rscn_nport_set[4];
+	u8 tx_lun_rst;
+	bool tx_lun_rst_set;
+	u8 abort_task_sets;
+	bool abort_task_sets_set;
+	u8 tx_tprlos;
+	bool tx_tprlos_set;
+	u8 tx_nos;
+	bool tx_nos_set;
+	u8 rx_nos;
+	bool rx_nos_set;
+	u8 ols;
+	bool ols_set;
+	u8 lr;
+	bool lr_set;
+	u8 lrr;
+	bool lrr_set;
+	u8 tx_lip;
+	bool tx_lip_set;
+	u8 rx_lip;
+	bool rx_lip_set;
+	u8 eofa;
+	bool eofa_set;
+	u8 eofni;
+	bool eofni_set;
+	u8 scsi_chks;
+	bool scsi_chks_set;
+	u8 scsi_cond_met;
+	bool scsi_cond_met_set;
+	u8 scsi_busy;
+	bool scsi_busy_set;
+	u8 scsi_inter;
+	bool scsi_inter_set;
+	u8 scsi_inter_cond_met;
+	bool scsi_inter_cond_met_set;
+	u8 scsi_rsv_conflicts;
+	bool scsi_rsv_conflicts_set;
+	u8 scsi_tsk_full;
+	bool scsi_tsk_full_set;
+	u8 scsi_aca_active;
+	bool scsi_aca_active_set;
+	u8 scsi_tsk_abort;
+	bool scsi_tsk_abort_set;
+	u32 scsi_rx_chk[5];
+	bool scsi_rx_chk_set[5];
+	struct qed_mfw_tlv_time scsi_chk_tstamp[5];
+};
+
+struct qed_mfw_tlv_iscsi {
+	u8 target_llmnr;
+	bool target_llmnr_set;
+	u8 header_digest;
+	bool header_digest_set;
+	u8 data_digest;
+	bool data_digest_set;
+	u8 auth_method;
+#define QED_MFW_TLV_AUTH_METHOD_NONE            (1)
+#define QED_MFW_TLV_AUTH_METHOD_CHAP            (2)
+#define QED_MFW_TLV_AUTH_METHOD_MUTUAL_CHAP     (3)
+	bool auth_method_set;
+	u16 boot_taget_portal;
+	bool boot_taget_portal_set;
+	u16 frame_size;
+	bool frame_size_set;
+	u16 tx_desc_size;
+	bool tx_desc_size_set;
+	u16 rx_desc_size;
+	bool rx_desc_size_set;
+	u8 boot_progress;
+	bool boot_progress_set;
+	u16 tx_desc_qdepth;
+	bool tx_desc_qdepth_set;
+	u16 rx_desc_qdepth;
+	bool rx_desc_qdepth_set;
+	u64 rx_frames;
+	bool rx_frames_set;
+	u64 rx_bytes;
+	bool rx_bytes_set;
+	u64 tx_frames;
+	bool tx_frames_set;
+	u64 tx_bytes;
+	bool tx_bytes_set;
+};
+
 #define DIRECT_REG_WR(reg_addr, val) writel((u32)val, \
 					    (void __iomem *)(reg_addr))
 
@@ -485,6 +751,14 @@
 	u8			used_cnt;
 };
 
+struct qed_generic_tlvs {
+#define QED_TLV_IP_CSUM         BIT(0)
+#define QED_TLV_LSO             BIT(1)
+	u16 feat_flags;
+#define QED_TLV_MAC_COUNT	3
+	u8 mac[QED_TLV_MAC_COUNT][ETH_ALEN];
+};
+
 #define QED_NVM_SIGNATURE 0x12435687
 
 enum qed_nvm_flash_cmd {
@@ -499,6 +773,8 @@
 	void	(*link_update)(void			*dev,
 			       struct qed_link_output	*link);
 	void	(*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
+	void (*get_generic_tlv_data)(void *dev, struct qed_generic_tlvs *data);
+	void (*get_protocol_tlv_data)(void *dev, void *data);
 };
 
 struct qed_selftest_ops {