[SCSI] bfa: Revised Fabric Assigned Address(FAA) feature implementation.

Made changes to the Fabric Assigned Address(FAA) feature implementation.
Introduced the IOCFC state machine, which now handles the FAA logic,
IOC and BFA sub-modules enablement.
Removed un-wanted FAA enable/disable routines; FAA is enabled by default.

Signed-off-by: Krishna Gudipati <kgudipat@brocade.com>
Signed-off-by: James Bottomley <JBottomley@Parallels.com>
diff --git a/drivers/scsi/bfa/bfa_core.c b/drivers/scsi/bfa/bfa_core.c
index 035c9d5..456e576 100644
--- a/drivers/scsi/bfa/bfa_core.c
+++ b/drivers/scsi/bfa/bfa_core.c
@@ -200,13 +200,431 @@
 #define DEF_CFG_NUM_SBOOT_LUNS		16
 
 /*
+ * IOCFC state machine definitions/declarations
+ */
+bfa_fsm_state_decl(bfa_iocfc, stopped, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, initing, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, dconf_read, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_cfg_wait,
+		   struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_cfg_done,
+		   struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, operational,
+		   struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, dconf_write,
+		   struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, stopping, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, enabling, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, cfg_wait, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, disabling, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, disabled, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, failed, struct bfa_iocfc_s, enum iocfc_event);
+bfa_fsm_state_decl(bfa_iocfc, init_failed,
+		   struct bfa_iocfc_s, enum iocfc_event);
+
+/*
  * forward declaration for IOC FC functions
  */
+static void bfa_iocfc_start_submod(struct bfa_s *bfa);
+static void bfa_iocfc_disable_submod(struct bfa_s *bfa);
+static void bfa_iocfc_send_cfg(void *bfa_arg);
 static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
 static void bfa_iocfc_disable_cbfn(void *bfa_arg);
 static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
 static void bfa_iocfc_reset_cbfn(void *bfa_arg);
 static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
+static void bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete);
+static void bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl);
+static void bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl);
+static void bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl);
+
+static void
+bfa_iocfc_sm_stopped_entry(struct bfa_iocfc_s *iocfc)
+{
+}
+
+static void
+bfa_iocfc_sm_stopped(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_INIT:
+	case IOCFC_E_ENABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_initing);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_initing_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_ioc_enable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_initing(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_IOC_ENABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_dconf_read_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_dconf_modinit(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_dconf_read(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_DCONF_DONE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_wait);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_init_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_iocfc_send_cfg(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_CFG_DONE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_cfg_done);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_init_failed);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_init_cfg_done_entry(struct bfa_iocfc_s *iocfc)
+{
+	iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+	bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
+		     bfa_iocfc_init_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_cfg_done(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_START:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
+		break;
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+		break;
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_operational_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_fcport_init(iocfc->bfa);
+	bfa_iocfc_start_submod(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_operational(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+		break;
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_dconf_write_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_dconf_modexit(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_dconf_write(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_DCONF_DONE:
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_stopping_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_ioc_disable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_stopping(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_IOC_DISABLED:
+		bfa_isr_disable(iocfc->bfa);
+		bfa_iocfc_disable_submod(iocfc->bfa);
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
+		iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+		bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.stop_hcb_qe,
+			     bfa_iocfc_stop_cb, iocfc->bfa);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_enabling_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_ioc_enable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_enabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_IOC_ENABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+
+		if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+			break;
+
+		iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+		bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+			     bfa_iocfc_enable_cb, iocfc->bfa);
+		iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_cfg_wait_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_iocfc_send_cfg(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_cfg_wait(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_CFG_DONE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_operational);
+		if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+			break;
+
+		iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+		bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+			     bfa_iocfc_enable_cb, iocfc->bfa);
+		iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+		break;
+	case IOCFC_E_IOC_FAILED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_failed);
+		if (iocfc->bfa->iocfc.cb_reqd == BFA_FALSE)
+			break;
+
+		iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+		bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.en_hcb_qe,
+			     bfa_iocfc_enable_cb, iocfc->bfa);
+		iocfc->bfa->iocfc.cb_reqd = BFA_FALSE;
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_disabling_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_ioc_disable(&iocfc->bfa->ioc);
+}
+
+static void
+bfa_iocfc_sm_disabling(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_IOC_DISABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabled);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_disabled_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_isr_disable(iocfc->bfa);
+	bfa_iocfc_disable_submod(iocfc->bfa);
+	iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+	bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
+		     bfa_iocfc_disable_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_disabled(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+		break;
+	case IOCFC_E_ENABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_enabling);
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_failed_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_isr_disable(iocfc->bfa);
+	bfa_iocfc_disable_submod(iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_write);
+		break;
+	case IOCFC_E_DISABLE:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_disabling);
+		break;
+	case IOCFC_E_IOC_ENABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_cfg_wait);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
+
+static void
+bfa_iocfc_sm_init_failed_entry(struct bfa_iocfc_s *iocfc)
+{
+	bfa_isr_disable(iocfc->bfa);
+	iocfc->bfa->iocfc.op_status = BFA_STATUS_FAILED;
+	bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.init_hcb_qe,
+		     bfa_iocfc_init_cb, iocfc->bfa);
+}
+
+static void
+bfa_iocfc_sm_init_failed(struct bfa_iocfc_s *iocfc, enum iocfc_event event)
+{
+	bfa_trc(iocfc->bfa, event);
+
+	switch (event) {
+	case IOCFC_E_STOP:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopping);
+		break;
+	case IOCFC_E_DISABLE:
+		bfa_ioc_disable(&iocfc->bfa->ioc);
+		break;
+	case IOCFC_E_IOC_ENABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_dconf_read);
+		break;
+	case IOCFC_E_IOC_DISABLED:
+		bfa_fsm_set_state(iocfc, bfa_iocfc_sm_stopped);
+		iocfc->bfa->iocfc.op_status = BFA_STATUS_OK;
+		bfa_cb_queue(iocfc->bfa, &iocfc->bfa->iocfc.dis_hcb_qe,
+			     bfa_iocfc_disable_cb, iocfc->bfa);
+		break;
+	case IOCFC_E_IOC_FAILED:
+		break;
+	default:
+		bfa_sm_fault(iocfc->bfa, event);
+		break;
+	}
+}
 
 /*
  * BFA Interrupt handling functions
@@ -532,11 +950,9 @@
 	 * Enable interrupt coalescing if it is driver init path
 	 * and not ioc disable/enable path.
 	 */
-	if (!iocfc->cfgdone)
+	if (bfa_fsm_cmp_state(iocfc, bfa_iocfc_sm_init_cfg_wait))
 		cfg_info->intr_attr.coalesce = BFA_TRUE;
 
-	iocfc->cfgdone = BFA_FALSE;
-
 	/*
 	 * dma map IOC configuration itself
 	 */
@@ -556,8 +972,6 @@
 
 	bfa->bfad = bfad;
 	iocfc->bfa = bfa;
-	iocfc->action = BFA_IOCFC_ACT_NONE;
-
 	iocfc->cfg = *cfg;
 
 	/*
@@ -690,6 +1104,8 @@
 
 	for (i = 0; hal_mods[i]; i++)
 		hal_mods[i]->start(bfa);
+
+	bfa->iocfc.submod_enabled = BFA_TRUE;
 }
 
 /*
@@ -700,8 +1116,13 @@
 {
 	int		i;
 
+	if (bfa->iocfc.submod_enabled == BFA_FALSE)
+		return;
+
 	for (i = 0; hal_mods[i]; i++)
 		hal_mods[i]->iocdisable(bfa);
+
+	bfa->iocfc.submod_enabled = BFA_FALSE;
 }
 
 static void
@@ -709,15 +1130,8 @@
 {
 	struct bfa_s	*bfa = bfa_arg;
 
-	if (complete) {
-		if (bfa->iocfc.cfgdone && BFA_DCONF_MOD(bfa)->flashdone)
-			bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
-		else
-			bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
-	} else {
-		if (bfa->iocfc.cfgdone)
-			bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
-	}
+	if (complete)
+		bfa_cb_init(bfa->bfad, bfa->iocfc.op_status);
 }
 
 static void
@@ -728,8 +1142,6 @@
 
 	if (compl)
 		complete(&bfad->comp);
-	else
-		bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
 }
 
 static void
@@ -801,8 +1213,6 @@
 	fwcfg->num_uf_bufs    = be16_to_cpu(fwcfg->num_uf_bufs);
 	fwcfg->num_rports     = be16_to_cpu(fwcfg->num_rports);
 
-	iocfc->cfgdone = BFA_TRUE;
-
 	/*
 	 * configure queue register offsets as learnt from firmware
 	 */
@@ -818,22 +1228,13 @@
 	 */
 	bfa_msix_queue_install(bfa);
 
-	/*
-	 * Configuration is complete - initialize/start submodules
-	 */
-	bfa_fcport_init(bfa);
-
-	if (iocfc->action == BFA_IOCFC_ACT_INIT) {
-		if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
-			bfa_cb_queue(bfa, &iocfc->init_hcb_qe,
-				bfa_iocfc_init_cb, bfa);
-	} else {
-		if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
-			bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
-					bfa_iocfc_enable_cb, bfa);
-		bfa_iocfc_start_submod(bfa);
+	if (bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn != 0) {
+		bfa->ioc.attr->pwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_pwwn;
+		bfa->ioc.attr->nwwn = bfa->iocfc.cfgrsp->pbc_cfg.pbc_nwwn;
+		bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
 	}
 }
+
 void
 bfa_iocfc_reset_queues(struct bfa_s *bfa)
 {
@@ -847,6 +1248,23 @@
 	}
 }
 
+/*
+ *	Process FAA pwwn msg from fw.
+ */
+static void
+bfa_iocfc_process_faa_addr(struct bfa_s *bfa, struct bfi_faa_addr_msg_s *msg)
+{
+	struct bfa_iocfc_s		*iocfc   = &bfa->iocfc;
+	struct bfi_iocfc_cfgrsp_s	*cfgrsp  = iocfc->cfgrsp;
+
+	cfgrsp->pbc_cfg.pbc_pwwn = msg->pwwn;
+	cfgrsp->pbc_cfg.pbc_nwwn = msg->nwwn;
+
+	bfa->ioc.attr->pwwn = msg->pwwn;
+	bfa->ioc.attr->nwwn = msg->nwwn;
+	bfa_fsm_send_event(iocfc, IOCFC_E_CFG_DONE);
+}
+
 /* Fabric Assigned Address specific functions */
 
 /*
@@ -862,84 +1280,13 @@
 		if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
 			return BFA_STATUS_FEATURE_NOT_SUPPORTED;
 	} else {
-		if (!bfa_ioc_is_acq_addr(&bfa->ioc))
-			return BFA_STATUS_IOC_NON_OP;
+		return BFA_STATUS_IOC_NON_OP;
 	}
 
 	return BFA_STATUS_OK;
 }
 
 bfa_status_t
-bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
-{
-	struct bfi_faa_en_dis_s faa_enable_req;
-	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
-	bfa_status_t		status;
-
-	iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
-	iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
-
-	status = bfa_faa_validate_request(bfa);
-	if (status != BFA_STATUS_OK)
-		return status;
-
-	if (iocfc->faa_args.busy == BFA_TRUE)
-		return BFA_STATUS_DEVBUSY;
-
-	if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
-		return BFA_STATUS_FAA_ENABLED;
-
-	if (bfa_fcport_is_trunk_enabled(bfa))
-		return BFA_STATUS_ERROR_TRUNK_ENABLED;
-
-	bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
-	iocfc->faa_args.busy = BFA_TRUE;
-
-	memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
-	bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
-		BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa));
-
-	bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
-			sizeof(struct bfi_faa_en_dis_s));
-
-	return BFA_STATUS_OK;
-}
-
-bfa_status_t
-bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
-		void *cbarg)
-{
-	struct bfi_faa_en_dis_s faa_disable_req;
-	struct bfa_iocfc_s	*iocfc = &bfa->iocfc;
-	bfa_status_t		status;
-
-	iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
-	iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
-
-	status = bfa_faa_validate_request(bfa);
-	if (status != BFA_STATUS_OK)
-		return status;
-
-	if (iocfc->faa_args.busy == BFA_TRUE)
-		return BFA_STATUS_DEVBUSY;
-
-	if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
-		return BFA_STATUS_FAA_DISABLED;
-
-	bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
-	iocfc->faa_args.busy = BFA_TRUE;
-
-	memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
-	bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
-		BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa));
-
-	bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
-		sizeof(struct bfi_faa_en_dis_s));
-
-	return BFA_STATUS_OK;
-}
-
-bfa_status_t
 bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
 		bfa_cb_iocfc_t cbfn, void *cbarg)
 {
@@ -970,38 +1317,6 @@
 }
 
 /*
- *	FAA enable response
- */
-static void
-bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
-		struct bfi_faa_en_dis_rsp_s *rsp)
-{
-	void	*cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
-	bfa_status_t	status = rsp->status;
-
-	WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
-
-	iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
-	iocfc->faa_args.busy = BFA_FALSE;
-}
-
-/*
- *	FAA disable response
- */
-static void
-bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
-		struct bfi_faa_en_dis_rsp_s *rsp)
-{
-	void	*cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
-	bfa_status_t	status = rsp->status;
-
-	WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
-
-	iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
-	iocfc->faa_args.busy = BFA_FALSE;
-}
-
-/*
  *	FAA query response
  */
 static void
@@ -1030,25 +1345,10 @@
 {
 	struct bfa_s	*bfa = bfa_arg;
 
-	if (status == BFA_STATUS_FAA_ACQ_ADDR) {
-		bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
-				bfa_iocfc_init_cb, bfa);
-		return;
-	}
-
-	if (status != BFA_STATUS_OK) {
-		bfa_isr_disable(bfa);
-		if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
-			bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
-				     bfa_iocfc_init_cb, bfa);
-		else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
-			bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
-					bfa_iocfc_enable_cb, bfa);
-		return;
-	}
-
-	bfa_iocfc_send_cfg(bfa);
-	bfa_dconf_modinit(bfa);
+	if (status == BFA_STATUS_OK)
+		bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_ENABLED);
+	else
+		bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
 }
 
 /*
@@ -1059,17 +1359,7 @@
 {
 	struct bfa_s	*bfa = bfa_arg;
 
-	bfa_isr_disable(bfa);
-	bfa_iocfc_disable_submod(bfa);
-
-	if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
-		bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
-			     bfa);
-	else {
-		WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
-		bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
-			     bfa);
-	}
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_DISABLED);
 }
 
 /*
@@ -1081,13 +1371,7 @@
 	struct bfa_s	*bfa = bfa_arg;
 
 	bfa->queue_process = BFA_FALSE;
-
-	bfa_isr_disable(bfa);
-	bfa_iocfc_disable_submod(bfa);
-
-	if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
-		bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
-			     bfa);
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_IOC_FAILED);
 }
 
 /*
@@ -1102,7 +1386,6 @@
 	bfa_isr_enable(bfa);
 }
 
-
 /*
  * Query IOC memory requirement information.
  */
@@ -1178,6 +1461,12 @@
 	INIT_LIST_HEAD(&bfa->comp_q);
 	for (i = 0; i < BFI_IOC_MAX_CQS; i++)
 		INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
+
+	bfa->iocfc.cb_reqd = BFA_FALSE;
+	bfa->iocfc.op_status = BFA_STATUS_OK;
+	bfa->iocfc.submod_enabled = BFA_FALSE;
+
+	bfa_fsm_set_state(&bfa->iocfc, bfa_iocfc_sm_stopped);
 }
 
 /*
@@ -1186,8 +1475,7 @@
 void
 bfa_iocfc_init(struct bfa_s *bfa)
 {
-	bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
-	bfa_ioc_enable(&bfa->ioc);
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_INIT);
 }
 
 /*
@@ -1197,8 +1485,7 @@
 void
 bfa_iocfc_start(struct bfa_s *bfa)
 {
-	if (bfa->iocfc.cfgdone)
-		bfa_iocfc_start_submod(bfa);
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_START);
 }
 
 /*
@@ -1208,12 +1495,8 @@
 void
 bfa_iocfc_stop(struct bfa_s *bfa)
 {
-	bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
-
 	bfa->queue_process = BFA_FALSE;
-	bfa_dconf_modexit(bfa);
-	if (BFA_DCONF_MOD(bfa)->flashdone == BFA_TRUE)
-		bfa_ioc_disable(&bfa->ioc);
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_STOP);
 }
 
 void
@@ -1233,13 +1516,9 @@
 	case BFI_IOCFC_I2H_UPDATEQ_RSP:
 		iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
 		break;
-	case BFI_IOCFC_I2H_FAA_ENABLE_RSP:
-		bfa_faa_enable_reply(iocfc,
-			(struct bfi_faa_en_dis_rsp_s *)msg);
-		break;
-	case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
-		bfa_faa_disable_reply(iocfc,
-			(struct bfi_faa_en_dis_rsp_s *)msg);
+	case BFI_IOCFC_I2H_ADDR_MSG:
+		bfa_iocfc_process_faa_addr(bfa,
+				(struct bfi_faa_addr_msg_s *)msg);
 		break;
 	case BFI_IOCFC_I2H_FAA_QUERY_RSP:
 		bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
@@ -1313,8 +1592,8 @@
 {
 	bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
 		     "IOC Enable");
-	bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE;
-	bfa_ioc_enable(&bfa->ioc);
+	bfa->iocfc.cb_reqd = BFA_TRUE;
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_ENABLE);
 }
 
 void
@@ -1322,17 +1601,16 @@
 {
 	bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
 		     "IOC Disable");
-	bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
 
 	bfa->queue_process = BFA_FALSE;
-	bfa_ioc_disable(&bfa->ioc);
+	bfa_fsm_send_event(&bfa->iocfc, IOCFC_E_DISABLE);
 }
 
-
 bfa_boolean_t
 bfa_iocfc_is_operational(struct bfa_s *bfa)
 {
-	return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
+	return bfa_ioc_is_operational(&bfa->ioc) &&
+		bfa_fsm_cmp_state(&bfa->iocfc, bfa_iocfc_sm_operational);
 }
 
 /*
@@ -1574,16 +1852,6 @@
 	}
 }
 
-void
-bfa_iocfc_cb_dconf_modinit(struct bfa_s *bfa, bfa_status_t status)
-{
-	if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT) {
-		if (bfa->iocfc.cfgdone == BFA_TRUE)
-			bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
-				bfa_iocfc_init_cb, bfa);
-	}
-}
-
 /*
  * Return the list of PCI vendor/device id lists supported by this
  * BFA instance.