blob: c1c9e70eec2ff03835c47bfeb55ad444f8541560 [file] [log] [blame]
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18
19#include "bfa_ioc.h"
20#include "cna.h"
21#include "bfi.h"
22#include "bfi_ctreg.h"
23#include "bfa_defs.h"
24
25/**
26 * IOC local definitions
27 */
28
Rasesh Mody8b230ed2010-08-23 20:24:12 -070029/**
30 * Asic specific macros : see bfa_hw_cb.c and bfa_hw_ct.c for details.
31 */
32
33#define bfa_ioc_firmware_lock(__ioc) \
34 ((__ioc)->ioc_hwif->ioc_firmware_lock(__ioc))
35#define bfa_ioc_firmware_unlock(__ioc) \
36 ((__ioc)->ioc_hwif->ioc_firmware_unlock(__ioc))
37#define bfa_ioc_reg_init(__ioc) ((__ioc)->ioc_hwif->ioc_reg_init(__ioc))
38#define bfa_ioc_map_port(__ioc) ((__ioc)->ioc_hwif->ioc_map_port(__ioc))
Rasesh Mody1d32f762010-12-23 21:45:09 +000039#define bfa_ioc_notify_fail(__ioc) \
40 ((__ioc)->ioc_hwif->ioc_notify_fail(__ioc))
41#define bfa_ioc_sync_join(__ioc) \
42 ((__ioc)->ioc_hwif->ioc_sync_join(__ioc))
43#define bfa_ioc_sync_leave(__ioc) \
44 ((__ioc)->ioc_hwif->ioc_sync_leave(__ioc))
45#define bfa_ioc_sync_ack(__ioc) \
46 ((__ioc)->ioc_hwif->ioc_sync_ack(__ioc))
47#define bfa_ioc_sync_complete(__ioc) \
48 ((__ioc)->ioc_hwif->ioc_sync_complete(__ioc))
Rasesh Mody8b230ed2010-08-23 20:24:12 -070049
Rasesh Mody8b230ed2010-08-23 20:24:12 -070050#define bfa_ioc_mbox_cmd_pending(__ioc) \
51 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \
52 readl((__ioc)->ioc_regs.hfn_mbox_cmd))
53
Rasesh Modyb7ee31c52010-10-05 15:46:05 +000054static bool bfa_nw_auto_recover = true;
Rasesh Mody8b230ed2010-08-23 20:24:12 -070055
56/*
57 * forward declarations
58 */
59static void bfa_ioc_hw_sem_get(struct bfa_ioc *ioc);
60static void bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc);
61static void bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force);
62static void bfa_ioc_send_enable(struct bfa_ioc *ioc);
63static void bfa_ioc_send_disable(struct bfa_ioc *ioc);
64static void bfa_ioc_send_getattr(struct bfa_ioc *ioc);
65static void bfa_ioc_hb_monitor(struct bfa_ioc *ioc);
66static void bfa_ioc_hb_stop(struct bfa_ioc *ioc);
67static void bfa_ioc_reset(struct bfa_ioc *ioc, bool force);
68static void bfa_ioc_mbox_poll(struct bfa_ioc *ioc);
69static void bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc);
70static void bfa_ioc_recover(struct bfa_ioc *ioc);
71static void bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc);
72static void bfa_ioc_disable_comp(struct bfa_ioc *ioc);
73static void bfa_ioc_lpu_stop(struct bfa_ioc *ioc);
Rasesh Mody1d32f762010-12-23 21:45:09 +000074static void bfa_ioc_fail_notify(struct bfa_ioc *ioc);
75static void bfa_ioc_pf_enabled(struct bfa_ioc *ioc);
76static void bfa_ioc_pf_disabled(struct bfa_ioc *ioc);
77static void bfa_ioc_pf_initfailed(struct bfa_ioc *ioc);
78static void bfa_ioc_pf_failed(struct bfa_ioc *ioc);
79static void bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc);
Rasesh Mody8a891422010-08-25 23:00:27 -070080static void bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type,
81 u32 boot_param);
82static u32 bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr);
83static u32 bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr);
84static void bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc,
85 char *serial_num);
86static void bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc,
87 char *fw_ver);
88static void bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc,
89 char *chip_rev);
90static void bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc,
91 char *optrom_ver);
92static void bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc,
93 char *manufacturer);
94static void bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model);
95static u64 bfa_ioc_get_pwwn(struct bfa_ioc *ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -070096
97/**
Rasesh Mody1d32f762010-12-23 21:45:09 +000098 * IOC state machine definitions/declarations
Rasesh Mody8b230ed2010-08-23 20:24:12 -070099 */
100enum ioc_event {
Rasesh Mody1d32f762010-12-23 21:45:09 +0000101 IOC_E_RESET = 1, /*!< IOC reset request */
102 IOC_E_ENABLE = 2, /*!< IOC enable request */
103 IOC_E_DISABLE = 3, /*!< IOC disable request */
104 IOC_E_DETACH = 4, /*!< driver detach cleanup */
105 IOC_E_ENABLED = 5, /*!< f/w enabled */
106 IOC_E_FWRSP_GETATTR = 6, /*!< IOC get attribute response */
107 IOC_E_DISABLED = 7, /*!< f/w disabled */
108 IOC_E_INITFAILED = 8, /*!< failure notice by iocpf sm */
109 IOC_E_PFAILED = 9, /*!< failure notice by iocpf sm */
110 IOC_E_HBFAIL = 10, /*!< heartbeat failure */
111 IOC_E_HWERROR = 11, /*!< hardware error interrupt */
112 IOC_E_TIMEOUT = 12, /*!< timeout */
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700113};
114
Rasesh Mody1d32f762010-12-23 21:45:09 +0000115bfa_fsm_state_decl(bfa_ioc, uninit, struct bfa_ioc, enum ioc_event);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700116bfa_fsm_state_decl(bfa_ioc, reset, struct bfa_ioc, enum ioc_event);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700117bfa_fsm_state_decl(bfa_ioc, enabling, struct bfa_ioc, enum ioc_event);
118bfa_fsm_state_decl(bfa_ioc, getattr, struct bfa_ioc, enum ioc_event);
119bfa_fsm_state_decl(bfa_ioc, op, struct bfa_ioc, enum ioc_event);
Rasesh Mody1d32f762010-12-23 21:45:09 +0000120bfa_fsm_state_decl(bfa_ioc, fail_retry, struct bfa_ioc, enum ioc_event);
121bfa_fsm_state_decl(bfa_ioc, fail, struct bfa_ioc, enum ioc_event);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700122bfa_fsm_state_decl(bfa_ioc, disabling, struct bfa_ioc, enum ioc_event);
123bfa_fsm_state_decl(bfa_ioc, disabled, struct bfa_ioc, enum ioc_event);
124
125static struct bfa_sm_table ioc_sm_table[] = {
Rasesh Mody1d32f762010-12-23 21:45:09 +0000126 {BFA_SM(bfa_ioc_sm_uninit), BFA_IOC_UNINIT},
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700127 {BFA_SM(bfa_ioc_sm_reset), BFA_IOC_RESET},
Rasesh Mody1d32f762010-12-23 21:45:09 +0000128 {BFA_SM(bfa_ioc_sm_enabling), BFA_IOC_ENABLING},
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700129 {BFA_SM(bfa_ioc_sm_getattr), BFA_IOC_GETATTR},
130 {BFA_SM(bfa_ioc_sm_op), BFA_IOC_OPERATIONAL},
Rasesh Mody1d32f762010-12-23 21:45:09 +0000131 {BFA_SM(bfa_ioc_sm_fail_retry), BFA_IOC_INITFAIL},
132 {BFA_SM(bfa_ioc_sm_fail), BFA_IOC_FAIL},
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700133 {BFA_SM(bfa_ioc_sm_disabling), BFA_IOC_DISABLING},
134 {BFA_SM(bfa_ioc_sm_disabled), BFA_IOC_DISABLED},
135};
136
137/**
Rasesh Mody1d32f762010-12-23 21:45:09 +0000138 * IOCPF state machine definitions/declarations
139 */
140
141/*
142 * Forward declareations for iocpf state machine
143 */
144static void bfa_iocpf_enable(struct bfa_ioc *ioc);
145static void bfa_iocpf_disable(struct bfa_ioc *ioc);
146static void bfa_iocpf_fail(struct bfa_ioc *ioc);
147static void bfa_iocpf_initfail(struct bfa_ioc *ioc);
148static void bfa_iocpf_getattrfail(struct bfa_ioc *ioc);
149static void bfa_iocpf_stop(struct bfa_ioc *ioc);
150
151/**
152 * IOCPF state machine events
153 */
154enum iocpf_event {
155 IOCPF_E_ENABLE = 1, /*!< IOCPF enable request */
156 IOCPF_E_DISABLE = 2, /*!< IOCPF disable request */
157 IOCPF_E_STOP = 3, /*!< stop on driver detach */
158 IOCPF_E_FWREADY = 4, /*!< f/w initialization done */
159 IOCPF_E_FWRSP_ENABLE = 5, /*!< enable f/w response */
160 IOCPF_E_FWRSP_DISABLE = 6, /*!< disable f/w response */
161 IOCPF_E_FAIL = 7, /*!< failure notice by ioc sm */
162 IOCPF_E_INITFAIL = 8, /*!< init fail notice by ioc sm */
163 IOCPF_E_GETATTRFAIL = 9, /*!< init fail notice by ioc sm */
164 IOCPF_E_SEMLOCKED = 10, /*!< h/w semaphore is locked */
165 IOCPF_E_TIMEOUT = 11, /*!< f/w response timeout */
166};
167
168/**
169 * IOCPF states
170 */
171enum bfa_iocpf_state {
172 BFA_IOCPF_RESET = 1, /*!< IOC is in reset state */
173 BFA_IOCPF_SEMWAIT = 2, /*!< Waiting for IOC h/w semaphore */
174 BFA_IOCPF_HWINIT = 3, /*!< IOC h/w is being initialized */
175 BFA_IOCPF_READY = 4, /*!< IOCPF is initialized */
176 BFA_IOCPF_INITFAIL = 5, /*!< IOCPF failed */
177 BFA_IOCPF_FAIL = 6, /*!< IOCPF failed */
178 BFA_IOCPF_DISABLING = 7, /*!< IOCPF is being disabled */
179 BFA_IOCPF_DISABLED = 8, /*!< IOCPF is disabled */
180 BFA_IOCPF_FWMISMATCH = 9, /*!< IOC f/w different from drivers */
181};
182
183bfa_fsm_state_decl(bfa_iocpf, reset, struct bfa_iocpf, enum iocpf_event);
184bfa_fsm_state_decl(bfa_iocpf, fwcheck, struct bfa_iocpf, enum iocpf_event);
185bfa_fsm_state_decl(bfa_iocpf, mismatch, struct bfa_iocpf, enum iocpf_event);
186bfa_fsm_state_decl(bfa_iocpf, semwait, struct bfa_iocpf, enum iocpf_event);
187bfa_fsm_state_decl(bfa_iocpf, hwinit, struct bfa_iocpf, enum iocpf_event);
188bfa_fsm_state_decl(bfa_iocpf, enabling, struct bfa_iocpf, enum iocpf_event);
189bfa_fsm_state_decl(bfa_iocpf, ready, struct bfa_iocpf, enum iocpf_event);
190bfa_fsm_state_decl(bfa_iocpf, initfail_sync, struct bfa_iocpf,
191 enum iocpf_event);
192bfa_fsm_state_decl(bfa_iocpf, initfail, struct bfa_iocpf, enum iocpf_event);
193bfa_fsm_state_decl(bfa_iocpf, fail_sync, struct bfa_iocpf, enum iocpf_event);
194bfa_fsm_state_decl(bfa_iocpf, fail, struct bfa_iocpf, enum iocpf_event);
195bfa_fsm_state_decl(bfa_iocpf, disabling, struct bfa_iocpf, enum iocpf_event);
196bfa_fsm_state_decl(bfa_iocpf, disabling_sync, struct bfa_iocpf,
197 enum iocpf_event);
198bfa_fsm_state_decl(bfa_iocpf, disabled, struct bfa_iocpf, enum iocpf_event);
199
200static struct bfa_sm_table iocpf_sm_table[] = {
201 {BFA_SM(bfa_iocpf_sm_reset), BFA_IOCPF_RESET},
202 {BFA_SM(bfa_iocpf_sm_fwcheck), BFA_IOCPF_FWMISMATCH},
203 {BFA_SM(bfa_iocpf_sm_mismatch), BFA_IOCPF_FWMISMATCH},
204 {BFA_SM(bfa_iocpf_sm_semwait), BFA_IOCPF_SEMWAIT},
205 {BFA_SM(bfa_iocpf_sm_hwinit), BFA_IOCPF_HWINIT},
206 {BFA_SM(bfa_iocpf_sm_enabling), BFA_IOCPF_HWINIT},
207 {BFA_SM(bfa_iocpf_sm_ready), BFA_IOCPF_READY},
208 {BFA_SM(bfa_iocpf_sm_initfail_sync), BFA_IOCPF_INITFAIL},
209 {BFA_SM(bfa_iocpf_sm_initfail), BFA_IOCPF_INITFAIL},
210 {BFA_SM(bfa_iocpf_sm_fail_sync), BFA_IOCPF_FAIL},
211 {BFA_SM(bfa_iocpf_sm_fail), BFA_IOCPF_FAIL},
212 {BFA_SM(bfa_iocpf_sm_disabling), BFA_IOCPF_DISABLING},
213 {BFA_SM(bfa_iocpf_sm_disabling_sync), BFA_IOCPF_DISABLING},
214 {BFA_SM(bfa_iocpf_sm_disabled), BFA_IOCPF_DISABLED},
215};
216
217/**
218 * IOC State Machine
219 */
220
221/**
222 * Beginning state. IOC uninit state.
223 */
224static void
225bfa_ioc_sm_uninit_entry(struct bfa_ioc *ioc)
226{
227}
228
229/**
230 * IOC is in uninit state.
231 */
232static void
233bfa_ioc_sm_uninit(struct bfa_ioc *ioc, enum ioc_event event)
234{
235 switch (event) {
236 case IOC_E_RESET:
237 bfa_fsm_set_state(ioc, bfa_ioc_sm_reset);
238 break;
239
240 default:
241 bfa_sm_fault(ioc, event);
242 }
243}
244
245/**
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700246 * Reset entry actions -- initialize state machine
247 */
248static void
249bfa_ioc_sm_reset_entry(struct bfa_ioc *ioc)
250{
Rasesh Mody1d32f762010-12-23 21:45:09 +0000251 bfa_fsm_set_state(&ioc->iocpf, bfa_iocpf_sm_reset);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700252}
253
254/**
Rasesh Mody1d32f762010-12-23 21:45:09 +0000255 * IOC is in reset state.
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700256 */
257static void
258bfa_ioc_sm_reset(struct bfa_ioc *ioc, enum ioc_event event)
259{
260 switch (event) {
261 case IOC_E_ENABLE:
Rasesh Mody1d32f762010-12-23 21:45:09 +0000262 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700263 break;
264
265 case IOC_E_DISABLE:
266 bfa_ioc_disable_comp(ioc);
267 break;
268
269 case IOC_E_DETACH:
Rasesh Mody1d32f762010-12-23 21:45:09 +0000270 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
271 break;
272
273 default:
274 bfa_sm_fault(ioc, event);
275 }
276}
277
278static void
279bfa_ioc_sm_enabling_entry(struct bfa_ioc *ioc)
280{
281 bfa_iocpf_enable(ioc);
282}
283
284/**
285 * Host IOC function is being enabled, awaiting response from firmware.
286 * Semaphore is acquired.
287 */
288static void
289bfa_ioc_sm_enabling(struct bfa_ioc *ioc, enum ioc_event event)
290{
291 switch (event) {
292 case IOC_E_ENABLED:
293 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
294 break;
295
296 case IOC_E_PFAILED:
297 /* !!! fall through !!! */
298 case IOC_E_HWERROR:
299 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
300 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
301 if (event != IOC_E_PFAILED)
302 bfa_iocpf_initfail(ioc);
303 break;
304
305 case IOC_E_DISABLE:
306 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
307 break;
308
309 case IOC_E_DETACH:
310 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
311 bfa_iocpf_stop(ioc);
312 break;
313
314 case IOC_E_ENABLE:
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700315 break;
316
317 default:
318 bfa_sm_fault(ioc, event);
319 }
320}
321
322/**
323 * Semaphore should be acquired for version check.
324 */
325static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700326bfa_ioc_sm_getattr_entry(struct bfa_ioc *ioc)
327{
Rasesh Mody1d32f762010-12-23 21:45:09 +0000328 mod_timer(&ioc->ioc_timer, jiffies +
329 msecs_to_jiffies(BFA_IOC_TOV));
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700330 bfa_ioc_send_getattr(ioc);
331}
332
333/**
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700334 * IOC configuration in progress. Timer is active.
335 */
336static void
337bfa_ioc_sm_getattr(struct bfa_ioc *ioc, enum ioc_event event)
338{
339 switch (event) {
340 case IOC_E_FWRSP_GETATTR:
Rasesh Mody1d32f762010-12-23 21:45:09 +0000341 del_timer(&ioc->ioc_timer);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700342 bfa_ioc_check_attr_wwns(ioc);
343 bfa_fsm_set_state(ioc, bfa_ioc_sm_op);
344 break;
345
Rasesh Mody1d32f762010-12-23 21:45:09 +0000346 case IOC_E_PFAILED:
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700347 case IOC_E_HWERROR:
Rasesh Mody1d32f762010-12-23 21:45:09 +0000348 del_timer(&ioc->ioc_timer);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700349 /* fall through */
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700350 case IOC_E_TIMEOUT:
Rasesh Mody1d32f762010-12-23 21:45:09 +0000351 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
352 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
353 if (event != IOC_E_PFAILED)
354 bfa_iocpf_getattrfail(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700355 break;
356
357 case IOC_E_DISABLE:
Rasesh Mody1d32f762010-12-23 21:45:09 +0000358 del_timer(&ioc->ioc_timer);
359 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
360 break;
361
362 case IOC_E_ENABLE:
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700363 break;
364
365 default:
366 bfa_sm_fault(ioc, event);
367 }
368}
369
370static void
371bfa_ioc_sm_op_entry(struct bfa_ioc *ioc)
372{
373 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_OK);
374 bfa_ioc_hb_monitor(ioc);
375}
376
377static void
378bfa_ioc_sm_op(struct bfa_ioc *ioc, enum ioc_event event)
379{
380 switch (event) {
381 case IOC_E_ENABLE:
382 break;
383
384 case IOC_E_DISABLE:
385 bfa_ioc_hb_stop(ioc);
386 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
387 break;
388
Rasesh Mody1d32f762010-12-23 21:45:09 +0000389 case IOC_E_PFAILED:
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700390 case IOC_E_HWERROR:
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700391 bfa_ioc_hb_stop(ioc);
392 /* !!! fall through !!! */
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700393 case IOC_E_HBFAIL:
Rasesh Mody1d32f762010-12-23 21:45:09 +0000394 bfa_ioc_fail_notify(ioc);
395 if (ioc->iocpf.auto_recover)
396 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail_retry);
397 else
398 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
399
400 if (event != IOC_E_PFAILED)
401 bfa_iocpf_fail(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700402 break;
403
404 default:
405 bfa_sm_fault(ioc, event);
406 }
407}
408
409static void
410bfa_ioc_sm_disabling_entry(struct bfa_ioc *ioc)
411{
Rasesh Mody1d32f762010-12-23 21:45:09 +0000412 bfa_iocpf_disable(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700413}
414
415/**
Rasesh Mody1d32f762010-12-23 21:45:09 +0000416 * IOC is being desabled
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700417 */
418static void
419bfa_ioc_sm_disabling(struct bfa_ioc *ioc, enum ioc_event event)
420{
421 switch (event) {
Rasesh Mody1d32f762010-12-23 21:45:09 +0000422 case IOC_E_DISABLED:
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700423 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabled);
424 break;
425
426 case IOC_E_HWERROR:
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700427 /*
Rasesh Mody1d32f762010-12-23 21:45:09 +0000428 * No state change. Will move to disabled state
429 * after iocpf sm completes failure processing and
430 * moves to disabled state.
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700431 */
Rasesh Mody1d32f762010-12-23 21:45:09 +0000432 bfa_iocpf_fail(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700433 break;
434
435 default:
436 bfa_sm_fault(ioc, event);
437 }
438}
439
440/**
Rasesh Mody1d32f762010-12-23 21:45:09 +0000441 * IOC desable completion entry.
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700442 */
443static void
444bfa_ioc_sm_disabled_entry(struct bfa_ioc *ioc)
445{
446 bfa_ioc_disable_comp(ioc);
447}
448
449static void
450bfa_ioc_sm_disabled(struct bfa_ioc *ioc, enum ioc_event event)
451{
452 switch (event) {
453 case IOC_E_ENABLE:
Rasesh Mody1d32f762010-12-23 21:45:09 +0000454 bfa_fsm_set_state(ioc, bfa_ioc_sm_enabling);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700455 break;
456
457 case IOC_E_DISABLE:
458 ioc->cbfn->disable_cbfn(ioc->bfa);
459 break;
460
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700461 case IOC_E_DETACH:
Rasesh Mody1d32f762010-12-23 21:45:09 +0000462 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
463 bfa_iocpf_stop(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700464 break;
465
466 default:
467 bfa_sm_fault(ioc, event);
468 }
469}
470
471static void
Rasesh Mody1d32f762010-12-23 21:45:09 +0000472bfa_ioc_sm_fail_retry_entry(struct bfa_ioc *ioc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700473{
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700474}
475
476/**
Rasesh Mody1d32f762010-12-23 21:45:09 +0000477 * Hardware initialization retry.
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700478 */
479static void
Rasesh Mody1d32f762010-12-23 21:45:09 +0000480bfa_ioc_sm_fail_retry(struct bfa_ioc *ioc, enum ioc_event event)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700481{
482 switch (event) {
Rasesh Mody1d32f762010-12-23 21:45:09 +0000483 case IOC_E_ENABLED:
484 bfa_fsm_set_state(ioc, bfa_ioc_sm_getattr);
485 break;
486
487 case IOC_E_PFAILED:
488 case IOC_E_HWERROR:
489 /**
490 * Initialization retry failed.
491 */
492 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
493 if (event != IOC_E_PFAILED)
494 bfa_iocpf_initfail(ioc);
495 break;
496
497 case IOC_E_INITFAILED:
498 bfa_fsm_set_state(ioc, bfa_ioc_sm_fail);
499 break;
500
501 case IOC_E_ENABLE:
502 break;
503
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700504 case IOC_E_DISABLE:
Rasesh Mody1d32f762010-12-23 21:45:09 +0000505 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700506 break;
507
508 case IOC_E_DETACH:
Rasesh Mody1d32f762010-12-23 21:45:09 +0000509 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
510 bfa_iocpf_stop(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700511 break;
512
513 default:
514 bfa_sm_fault(ioc, event);
515 }
516}
517
518static void
Rasesh Mody1d32f762010-12-23 21:45:09 +0000519bfa_ioc_sm_fail_entry(struct bfa_ioc *ioc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700520{
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700521}
522
523/**
Rasesh Mody1d32f762010-12-23 21:45:09 +0000524 * IOC failure.
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700525 */
526static void
Rasesh Mody1d32f762010-12-23 21:45:09 +0000527bfa_ioc_sm_fail(struct bfa_ioc *ioc, enum ioc_event event)
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700528{
529 switch (event) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700530 case IOC_E_ENABLE:
531 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
532 break;
533
534 case IOC_E_DISABLE:
Rasesh Mody1d32f762010-12-23 21:45:09 +0000535 bfa_fsm_set_state(ioc, bfa_ioc_sm_disabling);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700536 break;
537
Rasesh Mody1d32f762010-12-23 21:45:09 +0000538 case IOC_E_DETACH:
539 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
540 bfa_iocpf_stop(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700541 break;
542
543 case IOC_E_HWERROR:
Rasesh Mody1d32f762010-12-23 21:45:09 +0000544 /* HB failure notification, ignore. */
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700545 break;
Rasesh Mody1d32f762010-12-23 21:45:09 +0000546
Rasesh Mody8b230ed2010-08-23 20:24:12 -0700547 default:
548 bfa_sm_fault(ioc, event);
549 }
550}
551
552/**
Rasesh Mody1d32f762010-12-23 21:45:09 +0000553 * IOCPF State Machine
554 */
555
556/**
557 * Reset entry actions -- initialize state machine
558 */
559static void
560bfa_iocpf_sm_reset_entry(struct bfa_iocpf *iocpf)
561{
562 iocpf->retry_count = 0;
563 iocpf->auto_recover = bfa_nw_auto_recover;
564}
565
566/**
567 * Beginning state. IOC is in reset state.
568 */
569static void
570bfa_iocpf_sm_reset(struct bfa_iocpf *iocpf, enum iocpf_event event)
571{
572 switch (event) {
573 case IOCPF_E_ENABLE:
574 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
575 break;
576
577 case IOCPF_E_STOP:
578 break;
579
580 default:
581 bfa_sm_fault(iocpf->ioc, event);
582 }
583}
584
585/**
586 * Semaphore should be acquired for version check.
587 */
588static void
589bfa_iocpf_sm_fwcheck_entry(struct bfa_iocpf *iocpf)
590{
591 bfa_ioc_hw_sem_get(iocpf->ioc);
592}
593
594/**
595 * Awaiting h/w semaphore to continue with version check.
596 */
597static void
598bfa_iocpf_sm_fwcheck(struct bfa_iocpf *iocpf, enum iocpf_event event)
599{
600 struct bfa_ioc *ioc = iocpf->ioc;
601
602 switch (event) {
603 case IOCPF_E_SEMLOCKED:
604 if (bfa_ioc_firmware_lock(ioc)) {
605 if (bfa_ioc_sync_complete(ioc)) {
606 iocpf->retry_count = 0;
607 bfa_ioc_sync_join(ioc);
608 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
609 } else {
610 bfa_ioc_firmware_unlock(ioc);
611 bfa_nw_ioc_hw_sem_release(ioc);
612 mod_timer(&ioc->sem_timer, jiffies +
613 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
614 }
615 } else {
616 bfa_nw_ioc_hw_sem_release(ioc);
617 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_mismatch);
618 }
619 break;
620
621 case IOCPF_E_DISABLE:
622 bfa_ioc_hw_sem_get_cancel(ioc);
623 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
624 bfa_ioc_pf_disabled(ioc);
625 break;
626
627 case IOCPF_E_STOP:
628 bfa_ioc_hw_sem_get_cancel(ioc);
629 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
630 break;
631
632 default:
633 bfa_sm_fault(ioc, event);
634 }
635}
636
637/**
638 * Notify enable completion callback
639 */
640static void
641bfa_iocpf_sm_mismatch_entry(struct bfa_iocpf *iocpf)
642{
643 /* Call only the first time sm enters fwmismatch state. */
644 if (iocpf->retry_count == 0)
645 bfa_ioc_pf_fwmismatch(iocpf->ioc);
646
647 iocpf->retry_count++;
648 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
649 msecs_to_jiffies(BFA_IOC_TOV));
650}
651
652/**
653 * Awaiting firmware version match.
654 */
655static void
656bfa_iocpf_sm_mismatch(struct bfa_iocpf *iocpf, enum iocpf_event event)
657{
658 struct bfa_ioc *ioc = iocpf->ioc;
659
660 switch (event) {
661 case IOCPF_E_TIMEOUT:
662 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fwcheck);
663 break;
664
665 case IOCPF_E_DISABLE:
666 del_timer(&ioc->iocpf_timer);
667 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
668 bfa_ioc_pf_disabled(ioc);
669 break;
670
671 case IOCPF_E_STOP:
672 del_timer(&ioc->iocpf_timer);
673 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
674 break;
675
676 default:
677 bfa_sm_fault(ioc, event);
678 }
679}
680
681/**
682 * Request for semaphore.
683 */
684static void
685bfa_iocpf_sm_semwait_entry(struct bfa_iocpf *iocpf)
686{
687 bfa_ioc_hw_sem_get(iocpf->ioc);
688}
689
690/**
691 * Awaiting semaphore for h/w initialzation.
692 */
693static void
694bfa_iocpf_sm_semwait(struct bfa_iocpf *iocpf, enum iocpf_event event)
695{
696 struct bfa_ioc *ioc = iocpf->ioc;
697
698 switch (event) {
699 case IOCPF_E_SEMLOCKED:
700 if (bfa_ioc_sync_complete(ioc)) {
701 bfa_ioc_sync_join(ioc);
702 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
703 } else {
704 bfa_nw_ioc_hw_sem_release(ioc);
705 mod_timer(&ioc->sem_timer, jiffies +
706 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
707 }
708 break;
709
710 case IOCPF_E_DISABLE:
711 bfa_ioc_hw_sem_get_cancel(ioc);
712 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
713 break;
714
715 default:
716 bfa_sm_fault(ioc, event);
717 }
718}
719
720static void
721bfa_iocpf_sm_hwinit_entry(struct bfa_iocpf *iocpf)
722{
723 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
724 msecs_to_jiffies(BFA_IOC_TOV));
725 bfa_ioc_reset(iocpf->ioc, 0);
726}
727
728/**
729 * Hardware is being initialized. Interrupts are enabled.
730 * Holding hardware semaphore lock.
731 */
732static void
733bfa_iocpf_sm_hwinit(struct bfa_iocpf *iocpf, enum iocpf_event event)
734{
735 struct bfa_ioc *ioc = iocpf->ioc;
736
737 switch (event) {
738 case IOCPF_E_FWREADY:
739 del_timer(&ioc->iocpf_timer);
740 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_enabling);
741 break;
742
743 case IOCPF_E_INITFAIL:
744 del_timer(&ioc->iocpf_timer);
745 /*
746 * !!! fall through !!!
747 */
748
749 case IOCPF_E_TIMEOUT:
750 bfa_nw_ioc_hw_sem_release(ioc);
751 if (event == IOCPF_E_TIMEOUT)
752 bfa_ioc_pf_failed(ioc);
753 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
754 break;
755
756 case IOCPF_E_DISABLE:
757 del_timer(&ioc->iocpf_timer);
758 bfa_ioc_sync_leave(ioc);
759 bfa_nw_ioc_hw_sem_release(ioc);
760 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
761 break;
762
763 default:
764 bfa_sm_fault(ioc, event);
765 }
766}
767
768static void
769bfa_iocpf_sm_enabling_entry(struct bfa_iocpf *iocpf)
770{
771 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
772 msecs_to_jiffies(BFA_IOC_TOV));
773 bfa_ioc_send_enable(iocpf->ioc);
774}
775
776/**
777 * Host IOC function is being enabled, awaiting response from firmware.
778 * Semaphore is acquired.
779 */
780static void
781bfa_iocpf_sm_enabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
782{
783 struct bfa_ioc *ioc = iocpf->ioc;
784
785 switch (event) {
786 case IOCPF_E_FWRSP_ENABLE:
787 del_timer(&ioc->iocpf_timer);
788 bfa_nw_ioc_hw_sem_release(ioc);
789 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_ready);
790 break;
791
792 case IOCPF_E_INITFAIL:
793 del_timer(&ioc->iocpf_timer);
794 /*
795 * !!! fall through !!!
796 */
797 case IOCPF_E_TIMEOUT:
798 bfa_nw_ioc_hw_sem_release(ioc);
799 if (event == IOCPF_E_TIMEOUT)
800 bfa_ioc_pf_failed(ioc);
801 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
802 break;
803
804 case IOCPF_E_DISABLE:
805 del_timer(&ioc->iocpf_timer);
806 bfa_nw_ioc_hw_sem_release(ioc);
807 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
808 break;
809
810 case IOCPF_E_FWREADY:
811 bfa_ioc_send_enable(ioc);
812 break;
813
814 default:
815 bfa_sm_fault(ioc, event);
816 }
817}
818
819static bool
820bfa_nw_ioc_is_operational(struct bfa_ioc *ioc)
821{
822 return bfa_fsm_cmp_state(ioc, bfa_ioc_sm_op);
823}
824
825static void
826bfa_iocpf_sm_ready_entry(struct bfa_iocpf *iocpf)
827{
828 bfa_ioc_pf_enabled(iocpf->ioc);
829}
830
831static void
832bfa_iocpf_sm_ready(struct bfa_iocpf *iocpf, enum iocpf_event event)
833{
834 struct bfa_ioc *ioc = iocpf->ioc;
835
836 switch (event) {
837 case IOCPF_E_DISABLE:
838 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling);
839 break;
840
841 case IOCPF_E_GETATTRFAIL:
842 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
843 break;
844
845 case IOCPF_E_FAIL:
846 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
847 break;
848
849 case IOCPF_E_FWREADY:
850 bfa_ioc_pf_failed(ioc);
851 if (bfa_nw_ioc_is_operational(ioc))
852 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail_sync);
853 else
854 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail_sync);
855 break;
856
857 default:
858 bfa_sm_fault(ioc, event);
859 }
860}
861
862static void
863bfa_iocpf_sm_disabling_entry(struct bfa_iocpf *iocpf)
864{
865 mod_timer(&(iocpf->ioc)->iocpf_timer, jiffies +
866 msecs_to_jiffies(BFA_IOC_TOV));
867 bfa_ioc_send_disable(iocpf->ioc);
868}
869
870/**
871 * IOC is being disabled
872 */
873static void
874bfa_iocpf_sm_disabling(struct bfa_iocpf *iocpf, enum iocpf_event event)
875{
876 struct bfa_ioc *ioc = iocpf->ioc;
877
878 switch (event) {
879 case IOCPF_E_FWRSP_DISABLE:
880 case IOCPF_E_FWREADY:
881 del_timer(&ioc->iocpf_timer);
882 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
883 break;
884
885 case IOCPF_E_FAIL:
886 del_timer(&ioc->iocpf_timer);
887 /*
888 * !!! fall through !!!
889 */
890
891 case IOCPF_E_TIMEOUT:
892 writel(BFI_IOC_FAIL, ioc->ioc_regs.ioc_fwstate);
893 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
894 break;
895
896 case IOCPF_E_FWRSP_ENABLE:
897 break;
898
899 default:
900 bfa_sm_fault(ioc, event);
901 }
902}
903
904static void
905bfa_iocpf_sm_disabling_sync_entry(struct bfa_iocpf *iocpf)
906{
907 bfa_ioc_hw_sem_get(iocpf->ioc);
908}
909
910/**
911 * IOC hb ack request is being removed.
912 */
913static void
914bfa_iocpf_sm_disabling_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
915{
916 struct bfa_ioc *ioc = iocpf->ioc;
917
918 switch (event) {
919 case IOCPF_E_SEMLOCKED:
920 bfa_ioc_sync_leave(ioc);
921 bfa_nw_ioc_hw_sem_release(ioc);
922 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
923 break;
924
925 case IOCPF_E_FAIL:
926 break;
927
928 default:
929 bfa_sm_fault(ioc, event);
930 }
931}
932
933/**
934 * IOC disable completion entry.
935 */
936static void
937bfa_iocpf_sm_disabled_entry(struct bfa_iocpf *iocpf)
938{
939 bfa_ioc_pf_disabled(iocpf->ioc);
940}
941
942static void
943bfa_iocpf_sm_disabled(struct bfa_iocpf *iocpf, enum iocpf_event event)
944{
945 struct bfa_ioc *ioc = iocpf->ioc;
946
947 switch (event) {
948 case IOCPF_E_ENABLE:
949 iocpf->retry_count = 0;
950 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
951 break;
952
953 case IOCPF_E_STOP:
954 bfa_ioc_firmware_unlock(ioc);
955 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
956 break;
957
958 default:
959 bfa_sm_fault(ioc, event);
960 }
961}
962
963static void
964bfa_iocpf_sm_initfail_sync_entry(struct bfa_iocpf *iocpf)
965{
966 bfa_ioc_hw_sem_get(iocpf->ioc);
967}
968
969/**
970 * Hardware initialization failed.
971 */
972static void
973bfa_iocpf_sm_initfail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
974{
975 struct bfa_ioc *ioc = iocpf->ioc;
976
977 switch (event) {
978 case IOCPF_E_SEMLOCKED:
979 bfa_ioc_notify_fail(ioc);
980 bfa_ioc_sync_ack(ioc);
981 iocpf->retry_count++;
982 if (iocpf->retry_count >= BFA_IOC_HWINIT_MAX) {
983 bfa_ioc_sync_leave(ioc);
984 bfa_nw_ioc_hw_sem_release(ioc);
985 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_initfail);
986 } else {
987 if (bfa_ioc_sync_complete(ioc))
988 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
989 else {
990 bfa_nw_ioc_hw_sem_release(ioc);
991 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
992 }
993 }
994 break;
995
996 case IOCPF_E_DISABLE:
997 bfa_ioc_hw_sem_get_cancel(ioc);
998 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
999 break;
1000
1001 case IOCPF_E_STOP:
1002 bfa_ioc_hw_sem_get_cancel(ioc);
1003 bfa_ioc_firmware_unlock(ioc);
1004 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1005 break;
1006
1007 case IOCPF_E_FAIL:
1008 break;
1009
1010 default:
1011 bfa_sm_fault(ioc, event);
1012 }
1013}
1014
1015static void
1016bfa_iocpf_sm_initfail_entry(struct bfa_iocpf *iocpf)
1017{
1018 bfa_ioc_pf_initfailed(iocpf->ioc);
1019}
1020
1021/**
1022 * Hardware initialization failed.
1023 */
1024static void
1025bfa_iocpf_sm_initfail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1026{
1027 struct bfa_ioc *ioc = iocpf->ioc;
1028
1029 switch (event) {
1030 case IOCPF_E_DISABLE:
1031 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1032 break;
1033
1034 case IOCPF_E_STOP:
1035 bfa_ioc_firmware_unlock(ioc);
1036 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_reset);
1037 break;
1038
1039 default:
1040 bfa_sm_fault(ioc, event);
1041 }
1042}
1043
1044static void
1045bfa_iocpf_sm_fail_sync_entry(struct bfa_iocpf *iocpf)
1046{
1047 /**
1048 * Mark IOC as failed in hardware and stop firmware.
1049 */
1050 bfa_ioc_lpu_stop(iocpf->ioc);
1051
1052 /**
1053 * Flush any queued up mailbox requests.
1054 */
1055 bfa_ioc_mbox_hbfail(iocpf->ioc);
1056 bfa_ioc_hw_sem_get(iocpf->ioc);
1057}
1058
1059/**
1060 * IOC is in failed state.
1061 */
1062static void
1063bfa_iocpf_sm_fail_sync(struct bfa_iocpf *iocpf, enum iocpf_event event)
1064{
1065 struct bfa_ioc *ioc = iocpf->ioc;
1066
1067 switch (event) {
1068 case IOCPF_E_SEMLOCKED:
1069 iocpf->retry_count = 0;
1070 bfa_ioc_sync_ack(ioc);
1071 bfa_ioc_notify_fail(ioc);
1072 if (!iocpf->auto_recover) {
1073 bfa_ioc_sync_leave(ioc);
1074 bfa_nw_ioc_hw_sem_release(ioc);
1075 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_fail);
1076 } else {
1077 if (bfa_ioc_sync_complete(ioc))
1078 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_hwinit);
1079 else {
1080 bfa_nw_ioc_hw_sem_release(ioc);
1081 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_semwait);
1082 }
1083 }
1084 break;
1085
1086 case IOCPF_E_DISABLE:
1087 bfa_ioc_hw_sem_get_cancel(ioc);
1088 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabling_sync);
1089 break;
1090
1091 case IOCPF_E_FAIL:
1092 break;
1093
1094 default:
1095 bfa_sm_fault(ioc, event);
1096 }
1097}
1098
1099static void
1100bfa_iocpf_sm_fail_entry(struct bfa_iocpf *iocpf)
1101{
1102}
1103
1104/**
1105 * @brief
1106 * IOC is in failed state.
1107 */
1108static void
1109bfa_iocpf_sm_fail(struct bfa_iocpf *iocpf, enum iocpf_event event)
1110{
1111 switch (event) {
1112 case IOCPF_E_DISABLE:
1113 bfa_fsm_set_state(iocpf, bfa_iocpf_sm_disabled);
1114 break;
1115
1116 default:
1117 bfa_sm_fault(iocpf->ioc, event);
1118 }
1119}
1120
1121/**
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001122 * BFA IOC private functions
1123 */
1124
1125static void
1126bfa_ioc_disable_comp(struct bfa_ioc *ioc)
1127{
1128 struct list_head *qe;
1129 struct bfa_ioc_hbfail_notify *notify;
1130
1131 ioc->cbfn->disable_cbfn(ioc->bfa);
1132
1133 /**
1134 * Notify common modules registered for notification.
1135 */
1136 list_for_each(qe, &ioc->hb_notify_q) {
1137 notify = (struct bfa_ioc_hbfail_notify *) qe;
1138 notify->cbfn(notify->cbarg);
1139 }
1140}
1141
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001142bool
Rasesh Mody8a891422010-08-25 23:00:27 -07001143bfa_nw_ioc_sem_get(void __iomem *sem_reg)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001144{
1145 u32 r32;
1146 int cnt = 0;
1147#define BFA_SEM_SPINCNT 3000
1148
1149 r32 = readl(sem_reg);
1150
1151 while (r32 && (cnt < BFA_SEM_SPINCNT)) {
1152 cnt++;
1153 udelay(2);
1154 r32 = readl(sem_reg);
1155 }
1156
1157 if (r32 == 0)
1158 return true;
1159
1160 BUG_ON(!(cnt < BFA_SEM_SPINCNT));
1161 return false;
1162}
1163
1164void
Rasesh Mody8a891422010-08-25 23:00:27 -07001165bfa_nw_ioc_sem_release(void __iomem *sem_reg)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001166{
1167 writel(1, sem_reg);
1168}
1169
1170static void
1171bfa_ioc_hw_sem_get(struct bfa_ioc *ioc)
1172{
1173 u32 r32;
1174
1175 /**
1176 * First read to the semaphore register will return 0, subsequent reads
1177 * will return 1. Semaphore is released by writing 1 to the register
1178 */
1179 r32 = readl(ioc->ioc_regs.ioc_sem_reg);
1180 if (r32 == 0) {
Rasesh Mody1d32f762010-12-23 21:45:09 +00001181 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_SEMLOCKED);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001182 return;
1183 }
1184
1185 mod_timer(&ioc->sem_timer, jiffies +
1186 msecs_to_jiffies(BFA_IOC_HWSEM_TOV));
1187}
1188
1189void
Rasesh Mody8a891422010-08-25 23:00:27 -07001190bfa_nw_ioc_hw_sem_release(struct bfa_ioc *ioc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001191{
1192 writel(1, ioc->ioc_regs.ioc_sem_reg);
1193}
1194
1195static void
1196bfa_ioc_hw_sem_get_cancel(struct bfa_ioc *ioc)
1197{
1198 del_timer(&ioc->sem_timer);
1199}
1200
1201/**
1202 * @brief
1203 * Initialize LPU local memory (aka secondary memory / SRAM)
1204 */
1205static void
1206bfa_ioc_lmem_init(struct bfa_ioc *ioc)
1207{
1208 u32 pss_ctl;
1209 int i;
1210#define PSS_LMEM_INIT_TIME 10000
1211
1212 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1213 pss_ctl &= ~__PSS_LMEM_RESET;
1214 pss_ctl |= __PSS_LMEM_INIT_EN;
1215
1216 /*
1217 * i2c workaround 12.5khz clock
1218 */
1219 pss_ctl |= __PSS_I2C_CLK_DIV(3UL);
1220 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1221
1222 /**
1223 * wait for memory initialization to be complete
1224 */
1225 i = 0;
1226 do {
1227 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1228 i++;
1229 } while (!(pss_ctl & __PSS_LMEM_INIT_DONE) && (i < PSS_LMEM_INIT_TIME));
1230
1231 /**
1232 * If memory initialization is not successful, IOC timeout will catch
1233 * such failures.
1234 */
1235 BUG_ON(!(pss_ctl & __PSS_LMEM_INIT_DONE));
1236
1237 pss_ctl &= ~(__PSS_LMEM_INIT_DONE | __PSS_LMEM_INIT_EN);
1238 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1239}
1240
1241static void
1242bfa_ioc_lpu_start(struct bfa_ioc *ioc)
1243{
1244 u32 pss_ctl;
1245
1246 /**
1247 * Take processor out of reset.
1248 */
1249 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1250 pss_ctl &= ~__PSS_LPU0_RESET;
1251
1252 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1253}
1254
1255static void
1256bfa_ioc_lpu_stop(struct bfa_ioc *ioc)
1257{
1258 u32 pss_ctl;
1259
1260 /**
1261 * Put processors in reset.
1262 */
1263 pss_ctl = readl(ioc->ioc_regs.pss_ctl_reg);
1264 pss_ctl |= (__PSS_LPU0_RESET | __PSS_LPU1_RESET);
1265
1266 writel(pss_ctl, ioc->ioc_regs.pss_ctl_reg);
1267}
1268
1269/**
1270 * Get driver and firmware versions.
1271 */
1272void
Rasesh Mody8a891422010-08-25 23:00:27 -07001273bfa_nw_ioc_fwver_get(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001274{
David S. Miller58598542011-04-17 16:51:36 -07001275 u32 pgnum;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001276 u32 loff = 0;
1277 int i;
1278 u32 *fwsig = (u32 *) fwhdr;
1279
1280 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001281 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1282
1283 for (i = 0; i < (sizeof(struct bfi_ioc_image_hdr) / sizeof(u32));
1284 i++) {
1285 fwsig[i] =
1286 swab32(readl((loff) + (ioc->ioc_regs.smem_page_start)));
1287 loff += sizeof(u32);
1288 }
1289}
1290
1291/**
1292 * Returns TRUE if same.
1293 */
1294bool
Rasesh Mody8a891422010-08-25 23:00:27 -07001295bfa_nw_ioc_fwver_cmp(struct bfa_ioc *ioc, struct bfi_ioc_image_hdr *fwhdr)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001296{
1297 struct bfi_ioc_image_hdr *drv_fwhdr;
1298 int i;
1299
1300 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1301 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1302
1303 for (i = 0; i < BFI_IOC_MD5SUM_SZ; i++) {
1304 if (fwhdr->md5sum[i] != drv_fwhdr->md5sum[i])
1305 return false;
1306 }
1307
1308 return true;
1309}
1310
1311/**
1312 * Return true if current running version is valid. Firmware signature and
1313 * execution context (driver/bios) must match.
1314 */
1315static bool
1316bfa_ioc_fwver_valid(struct bfa_ioc *ioc)
1317{
1318 struct bfi_ioc_image_hdr fwhdr, *drv_fwhdr;
1319
Rasesh Mody8a891422010-08-25 23:00:27 -07001320 bfa_nw_ioc_fwver_get(ioc, &fwhdr);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001321 drv_fwhdr = (struct bfi_ioc_image_hdr *)
1322 bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), 0);
1323
1324 if (fwhdr.signature != drv_fwhdr->signature)
1325 return false;
1326
1327 if (fwhdr.exec != drv_fwhdr->exec)
1328 return false;
1329
Rasesh Mody8a891422010-08-25 23:00:27 -07001330 return bfa_nw_ioc_fwver_cmp(ioc, &fwhdr);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001331}
1332
1333/**
1334 * Conditionally flush any pending message from firmware at start.
1335 */
1336static void
1337bfa_ioc_msgflush(struct bfa_ioc *ioc)
1338{
1339 u32 r32;
1340
1341 r32 = readl(ioc->ioc_regs.lpu_mbox_cmd);
1342 if (r32)
1343 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1344}
1345
1346/**
1347 * @img ioc_init_logic.jpg
1348 */
1349static void
1350bfa_ioc_hwinit(struct bfa_ioc *ioc, bool force)
1351{
1352 enum bfi_ioc_state ioc_fwstate;
1353 bool fwvalid;
1354
1355 ioc_fwstate = readl(ioc->ioc_regs.ioc_fwstate);
1356
1357 if (force)
1358 ioc_fwstate = BFI_IOC_UNINIT;
1359
1360 /**
1361 * check if firmware is valid
1362 */
1363 fwvalid = (ioc_fwstate == BFI_IOC_UNINIT) ?
1364 false : bfa_ioc_fwver_valid(ioc);
1365
1366 if (!fwvalid) {
1367 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
1368 return;
1369 }
1370
1371 /**
1372 * If hardware initialization is in progress (initialized by other IOC),
1373 * just wait for an initialization completion interrupt.
1374 */
1375 if (ioc_fwstate == BFI_IOC_INITING) {
1376 ioc->cbfn->reset_cbfn(ioc->bfa);
1377 return;
1378 }
1379
1380 /**
1381 * If IOC function is disabled and firmware version is same,
1382 * just re-enable IOC.
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001383 */
Rasesh Mody2c7d3822010-12-23 21:45:06 +00001384 if (ioc_fwstate == BFI_IOC_DISABLED || ioc_fwstate == BFI_IOC_OP) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001385 /**
1386 * When using MSI-X any pending firmware ready event should
1387 * be flushed. Otherwise MSI-X interrupts are not delivered.
1388 */
1389 bfa_ioc_msgflush(ioc);
1390 ioc->cbfn->reset_cbfn(ioc->bfa);
Rasesh Mody1d32f762010-12-23 21:45:09 +00001391 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FWREADY);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001392 return;
1393 }
1394
1395 /**
1396 * Initialize the h/w for any other states.
1397 */
1398 bfa_ioc_boot(ioc, BFI_BOOT_TYPE_NORMAL, ioc->pcidev.device_id);
1399}
1400
1401void
Rasesh Mody8a891422010-08-25 23:00:27 -07001402bfa_nw_ioc_timeout(void *ioc_arg)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001403{
1404 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
1405
1406 bfa_fsm_send_event(ioc, IOC_E_TIMEOUT);
1407}
1408
Rasesh Mody8a891422010-08-25 23:00:27 -07001409static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001410bfa_ioc_mbox_send(struct bfa_ioc *ioc, void *ioc_msg, int len)
1411{
1412 u32 *msgp = (u32 *) ioc_msg;
1413 u32 i;
1414
1415 BUG_ON(!(len <= BFI_IOC_MSGLEN_MAX));
1416
1417 /*
1418 * first write msg to mailbox registers
1419 */
1420 for (i = 0; i < len / sizeof(u32); i++)
1421 writel(cpu_to_le32(msgp[i]),
1422 ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1423
1424 for (; i < BFI_IOC_MSGLEN_MAX / sizeof(u32); i++)
1425 writel(0, ioc->ioc_regs.hfn_mbox + i * sizeof(u32));
1426
1427 /*
1428 * write 1 to mailbox CMD to trigger LPU event
1429 */
1430 writel(1, ioc->ioc_regs.hfn_mbox_cmd);
1431 (void) readl(ioc->ioc_regs.hfn_mbox_cmd);
1432}
1433
1434static void
1435bfa_ioc_send_enable(struct bfa_ioc *ioc)
1436{
1437 struct bfi_ioc_ctrl_req enable_req;
1438 struct timeval tv;
1439
1440 bfi_h2i_set(enable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_ENABLE_REQ,
1441 bfa_ioc_portid(ioc));
1442 enable_req.ioc_class = ioc->ioc_mc;
1443 do_gettimeofday(&tv);
1444 enable_req.tv_sec = ntohl(tv.tv_sec);
1445 bfa_ioc_mbox_send(ioc, &enable_req, sizeof(struct bfi_ioc_ctrl_req));
1446}
1447
1448static void
1449bfa_ioc_send_disable(struct bfa_ioc *ioc)
1450{
1451 struct bfi_ioc_ctrl_req disable_req;
1452
1453 bfi_h2i_set(disable_req.mh, BFI_MC_IOC, BFI_IOC_H2I_DISABLE_REQ,
1454 bfa_ioc_portid(ioc));
1455 bfa_ioc_mbox_send(ioc, &disable_req, sizeof(struct bfi_ioc_ctrl_req));
1456}
1457
1458static void
1459bfa_ioc_send_getattr(struct bfa_ioc *ioc)
1460{
1461 struct bfi_ioc_getattr_req attr_req;
1462
1463 bfi_h2i_set(attr_req.mh, BFI_MC_IOC, BFI_IOC_H2I_GETATTR_REQ,
1464 bfa_ioc_portid(ioc));
1465 bfa_dma_be_addr_set(attr_req.attr_addr, ioc->attr_dma.pa);
1466 bfa_ioc_mbox_send(ioc, &attr_req, sizeof(attr_req));
1467}
1468
1469void
Rasesh Mody8a891422010-08-25 23:00:27 -07001470bfa_nw_ioc_hb_check(void *cbarg)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001471{
1472 struct bfa_ioc *ioc = cbarg;
1473 u32 hb_count;
1474
1475 hb_count = readl(ioc->ioc_regs.heartbeat);
1476 if (ioc->hb_count == hb_count) {
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001477 bfa_ioc_recover(ioc);
1478 return;
1479 } else {
1480 ioc->hb_count = hb_count;
1481 }
1482
1483 bfa_ioc_mbox_poll(ioc);
1484 mod_timer(&ioc->hb_timer, jiffies +
1485 msecs_to_jiffies(BFA_IOC_HB_TOV));
1486}
1487
1488static void
1489bfa_ioc_hb_monitor(struct bfa_ioc *ioc)
1490{
1491 ioc->hb_count = readl(ioc->ioc_regs.heartbeat);
1492 mod_timer(&ioc->hb_timer, jiffies +
1493 msecs_to_jiffies(BFA_IOC_HB_TOV));
1494}
1495
1496static void
1497bfa_ioc_hb_stop(struct bfa_ioc *ioc)
1498{
1499 del_timer(&ioc->hb_timer);
1500}
1501
1502/**
1503 * @brief
1504 * Initiate a full firmware download.
1505 */
1506static void
1507bfa_ioc_download_fw(struct bfa_ioc *ioc, u32 boot_type,
1508 u32 boot_param)
1509{
1510 u32 *fwimg;
David S. Miller58598542011-04-17 16:51:36 -07001511 u32 pgnum;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001512 u32 loff = 0;
1513 u32 chunkno = 0;
1514 u32 i;
1515
1516 /**
1517 * Initialize LMEM first before code download
1518 */
1519 bfa_ioc_lmem_init(ioc);
1520
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001521 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc), chunkno);
1522
1523 pgnum = bfa_ioc_smem_pgnum(ioc, loff);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001524
1525 writel(pgnum, ioc->ioc_regs.host_page_num_fn);
1526
1527 for (i = 0; i < bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)); i++) {
1528 if (BFA_IOC_FLASH_CHUNK_NO(i) != chunkno) {
1529 chunkno = BFA_IOC_FLASH_CHUNK_NO(i);
1530 fwimg = bfa_cb_image_get_chunk(BFA_IOC_FWIMG_TYPE(ioc),
1531 BFA_IOC_FLASH_CHUNK_ADDR(chunkno));
1532 }
1533
1534 /**
1535 * write smem
1536 */
1537 writel((swab32(fwimg[BFA_IOC_FLASH_OFFSET_IN_CHUNK(i)])),
1538 ((ioc->ioc_regs.smem_page_start) + (loff)));
1539
1540 loff += sizeof(u32);
1541
1542 /**
1543 * handle page offset wrap around
1544 */
1545 loff = PSS_SMEM_PGOFF(loff);
1546 if (loff == 0) {
1547 pgnum++;
1548 writel(pgnum,
1549 ioc->ioc_regs.host_page_num_fn);
1550 }
1551 }
1552
1553 writel(bfa_ioc_smem_pgnum(ioc, 0),
1554 ioc->ioc_regs.host_page_num_fn);
1555
1556 /*
1557 * Set boot type and boot param at the end.
1558 */
1559 writel((swab32(swab32(boot_type))), ((ioc->ioc_regs.smem_page_start)
1560 + (BFI_BOOT_TYPE_OFF)));
1561 writel((swab32(swab32(boot_param))), ((ioc->ioc_regs.smem_page_start)
1562 + (BFI_BOOT_PARAM_OFF)));
1563}
1564
1565static void
1566bfa_ioc_reset(struct bfa_ioc *ioc, bool force)
1567{
1568 bfa_ioc_hwinit(ioc, force);
1569}
1570
1571/**
1572 * @brief
1573 * Update BFA configuration from firmware configuration.
1574 */
1575static void
1576bfa_ioc_getattr_reply(struct bfa_ioc *ioc)
1577{
1578 struct bfi_ioc_attr *attr = ioc->attr;
1579
1580 attr->adapter_prop = ntohl(attr->adapter_prop);
1581 attr->card_type = ntohl(attr->card_type);
1582 attr->maxfrsize = ntohs(attr->maxfrsize);
1583
1584 bfa_fsm_send_event(ioc, IOC_E_FWRSP_GETATTR);
1585}
1586
1587/**
1588 * Attach time initialization of mbox logic.
1589 */
1590static void
1591bfa_ioc_mbox_attach(struct bfa_ioc *ioc)
1592{
1593 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1594 int mc;
1595
1596 INIT_LIST_HEAD(&mod->cmd_q);
1597 for (mc = 0; mc < BFI_MC_MAX; mc++) {
1598 mod->mbhdlr[mc].cbfn = NULL;
1599 mod->mbhdlr[mc].cbarg = ioc->bfa;
1600 }
1601}
1602
1603/**
1604 * Mbox poll timer -- restarts any pending mailbox requests.
1605 */
1606static void
1607bfa_ioc_mbox_poll(struct bfa_ioc *ioc)
1608{
1609 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1610 struct bfa_mbox_cmd *cmd;
1611 u32 stat;
1612
1613 /**
1614 * If no command pending, do nothing
1615 */
1616 if (list_empty(&mod->cmd_q))
1617 return;
1618
1619 /**
1620 * If previous command is not yet fetched by firmware, do nothing
1621 */
1622 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1623 if (stat)
1624 return;
1625
1626 /**
1627 * Enqueue command to firmware.
1628 */
1629 bfa_q_deq(&mod->cmd_q, &cmd);
1630 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1631}
1632
1633/**
1634 * Cleanup any pending requests.
1635 */
1636static void
1637bfa_ioc_mbox_hbfail(struct bfa_ioc *ioc)
1638{
1639 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1640 struct bfa_mbox_cmd *cmd;
1641
1642 while (!list_empty(&mod->cmd_q))
1643 bfa_q_deq(&mod->cmd_q, &cmd);
1644}
1645
Rasesh Mody1d32f762010-12-23 21:45:09 +00001646static void
1647bfa_ioc_fail_notify(struct bfa_ioc *ioc)
1648{
1649 struct list_head *qe;
1650 struct bfa_ioc_hbfail_notify *notify;
1651
1652 /**
1653 * Notify driver and common modules registered for notification.
1654 */
1655 ioc->cbfn->hbfail_cbfn(ioc->bfa);
1656 list_for_each(qe, &ioc->hb_notify_q) {
1657 notify = (struct bfa_ioc_hbfail_notify *) qe;
1658 notify->cbfn(notify->cbarg);
1659 }
1660}
1661
1662static void
1663bfa_ioc_pf_enabled(struct bfa_ioc *ioc)
1664{
1665 bfa_fsm_send_event(ioc, IOC_E_ENABLED);
1666}
1667
1668static void
1669bfa_ioc_pf_disabled(struct bfa_ioc *ioc)
1670{
1671 bfa_fsm_send_event(ioc, IOC_E_DISABLED);
1672}
1673
1674static void
1675bfa_ioc_pf_initfailed(struct bfa_ioc *ioc)
1676{
1677 bfa_fsm_send_event(ioc, IOC_E_INITFAILED);
1678}
1679
1680static void
1681bfa_ioc_pf_failed(struct bfa_ioc *ioc)
1682{
1683 bfa_fsm_send_event(ioc, IOC_E_PFAILED);
1684}
1685
1686static void
1687bfa_ioc_pf_fwmismatch(struct bfa_ioc *ioc)
1688{
1689 /**
1690 * Provide enable completion callback and AEN notification.
1691 */
1692 ioc->cbfn->enable_cbfn(ioc->bfa, BFA_STATUS_IOC_FAILURE);
1693}
1694
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001695/**
1696 * IOC public
1697 */
Rasesh Mody8a891422010-08-25 23:00:27 -07001698static enum bfa_status
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001699bfa_ioc_pll_init(struct bfa_ioc *ioc)
1700{
1701 /*
1702 * Hold semaphore so that nobody can access the chip during init.
1703 */
Rasesh Mody8a891422010-08-25 23:00:27 -07001704 bfa_nw_ioc_sem_get(ioc->ioc_regs.ioc_init_sem_reg);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001705
1706 bfa_ioc_pll_init_asic(ioc);
1707
1708 ioc->pllinit = true;
1709 /*
1710 * release semaphore.
1711 */
Rasesh Mody8a891422010-08-25 23:00:27 -07001712 bfa_nw_ioc_sem_release(ioc->ioc_regs.ioc_init_sem_reg);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001713
1714 return BFA_STATUS_OK;
1715}
1716
1717/**
1718 * Interface used by diag module to do firmware boot with memory test
1719 * as the entry vector.
1720 */
Rasesh Mody8a891422010-08-25 23:00:27 -07001721static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001722bfa_ioc_boot(struct bfa_ioc *ioc, u32 boot_type, u32 boot_param)
1723{
1724 void __iomem *rb;
1725
1726 bfa_ioc_stats(ioc, ioc_boots);
1727
1728 if (bfa_ioc_pll_init(ioc) != BFA_STATUS_OK)
1729 return;
1730
1731 /**
1732 * Initialize IOC state of all functions on a chip reset.
1733 */
1734 rb = ioc->pcidev.pci_bar_kva;
1735 if (boot_param == BFI_BOOT_TYPE_MEMTEST) {
1736 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC0_STATE_REG));
1737 writel(BFI_IOC_MEMTEST, (rb + BFA_IOC1_STATE_REG));
1738 } else {
1739 writel(BFI_IOC_INITING, (rb + BFA_IOC0_STATE_REG));
1740 writel(BFI_IOC_INITING, (rb + BFA_IOC1_STATE_REG));
1741 }
1742
1743 bfa_ioc_msgflush(ioc);
1744 bfa_ioc_download_fw(ioc, boot_type, boot_param);
1745
1746 /**
1747 * Enable interrupts just before starting LPU
1748 */
1749 ioc->cbfn->reset_cbfn(ioc->bfa);
1750 bfa_ioc_lpu_start(ioc);
1751}
1752
1753/**
1754 * Enable/disable IOC failure auto recovery.
1755 */
1756void
Rasesh Mody8a891422010-08-25 23:00:27 -07001757bfa_nw_ioc_auto_recover(bool auto_recover)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001758{
Rasesh Mody8a891422010-08-25 23:00:27 -07001759 bfa_nw_auto_recover = auto_recover;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001760}
1761
Rasesh Mody8a891422010-08-25 23:00:27 -07001762static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001763bfa_ioc_msgget(struct bfa_ioc *ioc, void *mbmsg)
1764{
1765 u32 *msgp = mbmsg;
1766 u32 r32;
1767 int i;
1768
1769 /**
1770 * read the MBOX msg
1771 */
1772 for (i = 0; i < (sizeof(union bfi_ioc_i2h_msg_u) / sizeof(u32));
1773 i++) {
1774 r32 = readl(ioc->ioc_regs.lpu_mbox +
1775 i * sizeof(u32));
1776 msgp[i] = htonl(r32);
1777 }
1778
1779 /**
1780 * turn off mailbox interrupt by clearing mailbox status
1781 */
1782 writel(1, ioc->ioc_regs.lpu_mbox_cmd);
1783 readl(ioc->ioc_regs.lpu_mbox_cmd);
1784}
1785
Rasesh Mody8a891422010-08-25 23:00:27 -07001786static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001787bfa_ioc_isr(struct bfa_ioc *ioc, struct bfi_mbmsg *m)
1788{
1789 union bfi_ioc_i2h_msg_u *msg;
Rasesh Mody1d32f762010-12-23 21:45:09 +00001790 struct bfa_iocpf *iocpf = &ioc->iocpf;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001791
1792 msg = (union bfi_ioc_i2h_msg_u *) m;
1793
1794 bfa_ioc_stats(ioc, ioc_isrs);
1795
1796 switch (msg->mh.msg_id) {
1797 case BFI_IOC_I2H_HBEAT:
1798 break;
1799
1800 case BFI_IOC_I2H_READY_EVENT:
Rasesh Mody1d32f762010-12-23 21:45:09 +00001801 bfa_fsm_send_event(iocpf, IOCPF_E_FWREADY);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001802 break;
1803
1804 case BFI_IOC_I2H_ENABLE_REPLY:
Rasesh Mody1d32f762010-12-23 21:45:09 +00001805 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_ENABLE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001806 break;
1807
1808 case BFI_IOC_I2H_DISABLE_REPLY:
Rasesh Mody1d32f762010-12-23 21:45:09 +00001809 bfa_fsm_send_event(iocpf, IOCPF_E_FWRSP_DISABLE);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001810 break;
1811
1812 case BFI_IOC_I2H_GETATTR_REPLY:
1813 bfa_ioc_getattr_reply(ioc);
1814 break;
1815
1816 default:
1817 BUG_ON(1);
1818 }
1819}
1820
1821/**
1822 * IOC attach time initialization and setup.
1823 *
1824 * @param[in] ioc memory for IOC
1825 * @param[in] bfa driver instance structure
1826 */
1827void
Rasesh Mody8a891422010-08-25 23:00:27 -07001828bfa_nw_ioc_attach(struct bfa_ioc *ioc, void *bfa, struct bfa_ioc_cbfn *cbfn)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001829{
1830 ioc->bfa = bfa;
1831 ioc->cbfn = cbfn;
1832 ioc->fcmode = false;
1833 ioc->pllinit = false;
1834 ioc->dbg_fwsave_once = true;
Rasesh Mody1d32f762010-12-23 21:45:09 +00001835 ioc->iocpf.ioc = ioc;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001836
1837 bfa_ioc_mbox_attach(ioc);
1838 INIT_LIST_HEAD(&ioc->hb_notify_q);
1839
Rasesh Mody1d32f762010-12-23 21:45:09 +00001840 bfa_fsm_set_state(ioc, bfa_ioc_sm_uninit);
1841 bfa_fsm_send_event(ioc, IOC_E_RESET);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001842}
1843
1844/**
1845 * Driver detach time IOC cleanup.
1846 */
1847void
Rasesh Mody8a891422010-08-25 23:00:27 -07001848bfa_nw_ioc_detach(struct bfa_ioc *ioc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001849{
1850 bfa_fsm_send_event(ioc, IOC_E_DETACH);
1851}
1852
1853/**
1854 * Setup IOC PCI properties.
1855 *
1856 * @param[in] pcidev PCI device information for this IOC
1857 */
1858void
Rasesh Mody8a891422010-08-25 23:00:27 -07001859bfa_nw_ioc_pci_init(struct bfa_ioc *ioc, struct bfa_pcidev *pcidev,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001860 enum bfi_mclass mc)
1861{
1862 ioc->ioc_mc = mc;
1863 ioc->pcidev = *pcidev;
1864 ioc->ctdev = bfa_asic_id_ct(ioc->pcidev.device_id);
1865 ioc->cna = ioc->ctdev && !ioc->fcmode;
1866
Rasesh Mody8a891422010-08-25 23:00:27 -07001867 bfa_nw_ioc_set_ct_hwif(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001868
1869 bfa_ioc_map_port(ioc);
1870 bfa_ioc_reg_init(ioc);
1871}
1872
1873/**
1874 * Initialize IOC dma memory
1875 *
1876 * @param[in] dm_kva kernel virtual address of IOC dma memory
1877 * @param[in] dm_pa physical address of IOC dma memory
1878 */
1879void
Rasesh Mody8a891422010-08-25 23:00:27 -07001880bfa_nw_ioc_mem_claim(struct bfa_ioc *ioc, u8 *dm_kva, u64 dm_pa)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001881{
1882 /**
1883 * dma memory for firmware attribute
1884 */
1885 ioc->attr_dma.kva = dm_kva;
1886 ioc->attr_dma.pa = dm_pa;
1887 ioc->attr = (struct bfi_ioc_attr *) dm_kva;
1888}
1889
1890/**
1891 * Return size of dma memory required.
1892 */
1893u32
Rasesh Mody8a891422010-08-25 23:00:27 -07001894bfa_nw_ioc_meminfo(void)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001895{
1896 return roundup(sizeof(struct bfi_ioc_attr), BFA_DMA_ALIGN_SZ);
1897}
1898
1899void
Rasesh Mody8a891422010-08-25 23:00:27 -07001900bfa_nw_ioc_enable(struct bfa_ioc *ioc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001901{
1902 bfa_ioc_stats(ioc, ioc_enables);
1903 ioc->dbg_fwsave_once = true;
1904
1905 bfa_fsm_send_event(ioc, IOC_E_ENABLE);
1906}
1907
1908void
Rasesh Mody8a891422010-08-25 23:00:27 -07001909bfa_nw_ioc_disable(struct bfa_ioc *ioc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001910{
1911 bfa_ioc_stats(ioc, ioc_disables);
1912 bfa_fsm_send_event(ioc, IOC_E_DISABLE);
1913}
1914
Rasesh Mody8a891422010-08-25 23:00:27 -07001915static u32
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001916bfa_ioc_smem_pgnum(struct bfa_ioc *ioc, u32 fmaddr)
1917{
1918 return PSS_SMEM_PGNUM(ioc->ioc_regs.smem_pg0, fmaddr);
1919}
1920
Rasesh Mody8a891422010-08-25 23:00:27 -07001921static u32
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001922bfa_ioc_smem_pgoff(struct bfa_ioc *ioc, u32 fmaddr)
1923{
1924 return PSS_SMEM_PGOFF(fmaddr);
1925}
1926
1927/**
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001928 * Register mailbox message handler function, to be called by common modules
1929 */
1930void
Rasesh Mody8a891422010-08-25 23:00:27 -07001931bfa_nw_ioc_mbox_regisr(struct bfa_ioc *ioc, enum bfi_mclass mc,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001932 bfa_ioc_mbox_mcfunc_t cbfn, void *cbarg)
1933{
1934 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1935
1936 mod->mbhdlr[mc].cbfn = cbfn;
1937 mod->mbhdlr[mc].cbarg = cbarg;
1938}
1939
1940/**
1941 * Queue a mailbox command request to firmware. Waits if mailbox is busy.
1942 * Responsibility of caller to serialize
1943 *
1944 * @param[in] ioc IOC instance
1945 * @param[i] cmd Mailbox command
1946 */
1947void
Rasesh Mody8a891422010-08-25 23:00:27 -07001948bfa_nw_ioc_mbox_queue(struct bfa_ioc *ioc, struct bfa_mbox_cmd *cmd)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001949{
1950 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1951 u32 stat;
1952
1953 /**
1954 * If a previous command is pending, queue new command
1955 */
1956 if (!list_empty(&mod->cmd_q)) {
1957 list_add_tail(&cmd->qe, &mod->cmd_q);
1958 return;
1959 }
1960
1961 /**
1962 * If mailbox is busy, queue command for poll timer
1963 */
1964 stat = readl(ioc->ioc_regs.hfn_mbox_cmd);
1965 if (stat) {
1966 list_add_tail(&cmd->qe, &mod->cmd_q);
1967 return;
1968 }
1969
1970 /**
1971 * mailbox is free -- queue command to firmware
1972 */
1973 bfa_ioc_mbox_send(ioc, cmd->msg, sizeof(cmd->msg));
1974}
1975
1976/**
1977 * Handle mailbox interrupts
1978 */
1979void
Rasesh Mody8a891422010-08-25 23:00:27 -07001980bfa_nw_ioc_mbox_isr(struct bfa_ioc *ioc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001981{
1982 struct bfa_ioc_mbox_mod *mod = &ioc->mbox_mod;
1983 struct bfi_mbmsg m;
1984 int mc;
1985
1986 bfa_ioc_msgget(ioc, &m);
1987
1988 /**
1989 * Treat IOC message class as special.
1990 */
1991 mc = m.mh.msg_class;
1992 if (mc == BFI_MC_IOC) {
1993 bfa_ioc_isr(ioc, &m);
1994 return;
1995 }
1996
Dan Carpenter07465562010-09-19 11:25:54 -07001997 if ((mc >= BFI_MC_MAX) || (mod->mbhdlr[mc].cbfn == NULL))
Rasesh Mody8b230ed2010-08-23 20:24:12 -07001998 return;
1999
2000 mod->mbhdlr[mc].cbfn(mod->mbhdlr[mc].cbarg, &m);
2001}
2002
2003void
Rasesh Mody8a891422010-08-25 23:00:27 -07002004bfa_nw_ioc_error_isr(struct bfa_ioc *ioc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002005{
2006 bfa_fsm_send_event(ioc, IOC_E_HWERROR);
2007}
2008
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002009/**
2010 * Add to IOC heartbeat failure notification queue. To be used by common
2011 * modules such as cee, port, diag.
2012 */
2013void
Rasesh Mody8a891422010-08-25 23:00:27 -07002014bfa_nw_ioc_hbfail_register(struct bfa_ioc *ioc,
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002015 struct bfa_ioc_hbfail_notify *notify)
2016{
2017 list_add_tail(&notify->qe, &ioc->hb_notify_q);
2018}
2019
2020#define BFA_MFG_NAME "Brocade"
Rasesh Mody8a891422010-08-25 23:00:27 -07002021static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002022bfa_ioc_get_adapter_attr(struct bfa_ioc *ioc,
2023 struct bfa_adapter_attr *ad_attr)
2024{
2025 struct bfi_ioc_attr *ioc_attr;
2026
2027 ioc_attr = ioc->attr;
2028
2029 bfa_ioc_get_adapter_serial_num(ioc, ad_attr->serial_num);
2030 bfa_ioc_get_adapter_fw_ver(ioc, ad_attr->fw_ver);
2031 bfa_ioc_get_adapter_optrom_ver(ioc, ad_attr->optrom_ver);
2032 bfa_ioc_get_adapter_manufacturer(ioc, ad_attr->manufacturer);
2033 memcpy(&ad_attr->vpd, &ioc_attr->vpd,
2034 sizeof(struct bfa_mfg_vpd));
2035
2036 ad_attr->nports = bfa_ioc_get_nports(ioc);
2037 ad_attr->max_speed = bfa_ioc_speed_sup(ioc);
2038
2039 bfa_ioc_get_adapter_model(ioc, ad_attr->model);
2040 /* For now, model descr uses same model string */
2041 bfa_ioc_get_adapter_model(ioc, ad_attr->model_descr);
2042
2043 ad_attr->card_type = ioc_attr->card_type;
2044 ad_attr->is_mezz = bfa_mfg_is_mezz(ioc_attr->card_type);
2045
2046 if (BFI_ADAPTER_IS_SPECIAL(ioc_attr->adapter_prop))
2047 ad_attr->prototype = 1;
2048 else
2049 ad_attr->prototype = 0;
2050
2051 ad_attr->pwwn = bfa_ioc_get_pwwn(ioc);
Rasesh Mody8a891422010-08-25 23:00:27 -07002052 ad_attr->mac = bfa_nw_ioc_get_mac(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002053
2054 ad_attr->pcie_gen = ioc_attr->pcie_gen;
2055 ad_attr->pcie_lanes = ioc_attr->pcie_lanes;
2056 ad_attr->pcie_lanes_orig = ioc_attr->pcie_lanes_orig;
2057 ad_attr->asic_rev = ioc_attr->asic_rev;
2058
2059 bfa_ioc_get_pci_chip_rev(ioc, ad_attr->hw_ver);
2060
2061 ad_attr->cna_capable = ioc->cna;
2062 ad_attr->trunk_capable = (ad_attr->nports > 1) && !ioc->cna;
2063}
2064
Rasesh Mody8a891422010-08-25 23:00:27 -07002065static enum bfa_ioc_type
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002066bfa_ioc_get_type(struct bfa_ioc *ioc)
2067{
2068 if (!ioc->ctdev || ioc->fcmode)
2069 return BFA_IOC_TYPE_FC;
2070 else if (ioc->ioc_mc == BFI_MC_IOCFC)
2071 return BFA_IOC_TYPE_FCoE;
2072 else if (ioc->ioc_mc == BFI_MC_LL)
2073 return BFA_IOC_TYPE_LL;
2074 else {
2075 BUG_ON(!(ioc->ioc_mc == BFI_MC_LL));
2076 return BFA_IOC_TYPE_LL;
2077 }
2078}
2079
Rasesh Mody8a891422010-08-25 23:00:27 -07002080static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002081bfa_ioc_get_adapter_serial_num(struct bfa_ioc *ioc, char *serial_num)
2082{
2083 memset(serial_num, 0, BFA_ADAPTER_SERIAL_NUM_LEN);
2084 memcpy(serial_num,
2085 (void *)ioc->attr->brcd_serialnum,
2086 BFA_ADAPTER_SERIAL_NUM_LEN);
2087}
2088
Rasesh Mody8a891422010-08-25 23:00:27 -07002089static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002090bfa_ioc_get_adapter_fw_ver(struct bfa_ioc *ioc, char *fw_ver)
2091{
2092 memset(fw_ver, 0, BFA_VERSION_LEN);
2093 memcpy(fw_ver, ioc->attr->fw_version, BFA_VERSION_LEN);
2094}
2095
Rasesh Mody8a891422010-08-25 23:00:27 -07002096static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002097bfa_ioc_get_pci_chip_rev(struct bfa_ioc *ioc, char *chip_rev)
2098{
2099 BUG_ON(!(chip_rev));
2100
2101 memset(chip_rev, 0, BFA_IOC_CHIP_REV_LEN);
2102
2103 chip_rev[0] = 'R';
2104 chip_rev[1] = 'e';
2105 chip_rev[2] = 'v';
2106 chip_rev[3] = '-';
2107 chip_rev[4] = ioc->attr->asic_rev;
2108 chip_rev[5] = '\0';
2109}
2110
Rasesh Mody8a891422010-08-25 23:00:27 -07002111static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002112bfa_ioc_get_adapter_optrom_ver(struct bfa_ioc *ioc, char *optrom_ver)
2113{
2114 memset(optrom_ver, 0, BFA_VERSION_LEN);
2115 memcpy(optrom_ver, ioc->attr->optrom_version,
2116 BFA_VERSION_LEN);
2117}
2118
Rasesh Mody8a891422010-08-25 23:00:27 -07002119static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002120bfa_ioc_get_adapter_manufacturer(struct bfa_ioc *ioc, char *manufacturer)
2121{
2122 memset(manufacturer, 0, BFA_ADAPTER_MFG_NAME_LEN);
2123 memcpy(manufacturer, BFA_MFG_NAME, BFA_ADAPTER_MFG_NAME_LEN);
2124}
2125
Rasesh Mody8a891422010-08-25 23:00:27 -07002126static void
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002127bfa_ioc_get_adapter_model(struct bfa_ioc *ioc, char *model)
2128{
2129 struct bfi_ioc_attr *ioc_attr;
2130
2131 BUG_ON(!(model));
2132 memset(model, 0, BFA_ADAPTER_MODEL_NAME_LEN);
2133
2134 ioc_attr = ioc->attr;
2135
2136 /**
2137 * model name
2138 */
2139 snprintf(model, BFA_ADAPTER_MODEL_NAME_LEN, "%s-%u",
2140 BFA_MFG_NAME, ioc_attr->card_type);
2141}
2142
Rasesh Mody8a891422010-08-25 23:00:27 -07002143static enum bfa_ioc_state
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002144bfa_ioc_get_state(struct bfa_ioc *ioc)
2145{
Rasesh Mody1d32f762010-12-23 21:45:09 +00002146 enum bfa_iocpf_state iocpf_st;
2147 enum bfa_ioc_state ioc_st = bfa_sm_to_state(ioc_sm_table, ioc->fsm);
2148
2149 if (ioc_st == BFA_IOC_ENABLING ||
2150 ioc_st == BFA_IOC_FAIL || ioc_st == BFA_IOC_INITFAIL) {
2151
2152 iocpf_st = bfa_sm_to_state(iocpf_sm_table, ioc->iocpf.fsm);
2153
2154 switch (iocpf_st) {
2155 case BFA_IOCPF_SEMWAIT:
2156 ioc_st = BFA_IOC_SEMWAIT;
2157 break;
2158
2159 case BFA_IOCPF_HWINIT:
2160 ioc_st = BFA_IOC_HWINIT;
2161 break;
2162
2163 case BFA_IOCPF_FWMISMATCH:
2164 ioc_st = BFA_IOC_FWMISMATCH;
2165 break;
2166
2167 case BFA_IOCPF_FAIL:
2168 ioc_st = BFA_IOC_FAIL;
2169 break;
2170
2171 case BFA_IOCPF_INITFAIL:
2172 ioc_st = BFA_IOC_INITFAIL;
2173 break;
2174
2175 default:
2176 break;
2177 }
2178 }
2179 return ioc_st;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002180}
2181
2182void
Rasesh Mody8a891422010-08-25 23:00:27 -07002183bfa_nw_ioc_get_attr(struct bfa_ioc *ioc, struct bfa_ioc_attr *ioc_attr)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002184{
2185 memset((void *)ioc_attr, 0, sizeof(struct bfa_ioc_attr));
2186
2187 ioc_attr->state = bfa_ioc_get_state(ioc);
2188 ioc_attr->port_id = ioc->port_id;
2189
2190 ioc_attr->ioc_type = bfa_ioc_get_type(ioc);
2191
2192 bfa_ioc_get_adapter_attr(ioc, &ioc_attr->adapter_attr);
2193
2194 ioc_attr->pci_attr.device_id = ioc->pcidev.device_id;
2195 ioc_attr->pci_attr.pcifn = ioc->pcidev.pci_func;
2196 bfa_ioc_get_pci_chip_rev(ioc, ioc_attr->pci_attr.chip_rev);
2197}
2198
2199/**
2200 * WWN public
2201 */
Rasesh Mody8a891422010-08-25 23:00:27 -07002202static u64
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002203bfa_ioc_get_pwwn(struct bfa_ioc *ioc)
2204{
2205 return ioc->attr->pwwn;
2206}
2207
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002208mac_t
Rasesh Mody8a891422010-08-25 23:00:27 -07002209bfa_nw_ioc_get_mac(struct bfa_ioc *ioc)
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002210{
Rasesh Mody2c7d3822010-12-23 21:45:06 +00002211 return ioc->attr->mac;
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002212}
2213
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002214/**
2215 * Firmware failure detected. Start recovery actions.
2216 */
2217static void
2218bfa_ioc_recover(struct bfa_ioc *ioc)
2219{
Rasesh Mody1e581482011-04-04 08:29:59 +00002220 pr_crit("Heart Beat of IOC has failed\n");
2221 bfa_ioc_stats(ioc, ioc_hbfails);
2222 bfa_fsm_send_event(ioc, IOC_E_HBFAIL);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002223}
2224
2225static void
2226bfa_ioc_check_attr_wwns(struct bfa_ioc *ioc)
2227{
2228 if (bfa_ioc_get_type(ioc) == BFA_IOC_TYPE_LL)
2229 return;
Rasesh Mody1d32f762010-12-23 21:45:09 +00002230}
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002231
Rasesh Mody1d32f762010-12-23 21:45:09 +00002232/**
2233 * @dg hal_iocpf_pvt BFA IOC PF private functions
2234 * @{
2235 */
2236
2237static void
2238bfa_iocpf_enable(struct bfa_ioc *ioc)
2239{
2240 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_ENABLE);
2241}
2242
2243static void
2244bfa_iocpf_disable(struct bfa_ioc *ioc)
2245{
2246 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_DISABLE);
2247}
2248
2249static void
2250bfa_iocpf_fail(struct bfa_ioc *ioc)
2251{
2252 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_FAIL);
2253}
2254
2255static void
2256bfa_iocpf_initfail(struct bfa_ioc *ioc)
2257{
2258 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_INITFAIL);
2259}
2260
2261static void
2262bfa_iocpf_getattrfail(struct bfa_ioc *ioc)
2263{
2264 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_GETATTRFAIL);
2265}
2266
2267static void
2268bfa_iocpf_stop(struct bfa_ioc *ioc)
2269{
2270 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_STOP);
2271}
2272
2273void
2274bfa_nw_iocpf_timeout(void *ioc_arg)
2275{
2276 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2277
2278 bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
2279}
2280
2281void
2282bfa_nw_iocpf_sem_timeout(void *ioc_arg)
2283{
2284 struct bfa_ioc *ioc = (struct bfa_ioc *) ioc_arg;
2285
2286 bfa_ioc_hw_sem_get(ioc);
Rasesh Mody8b230ed2010-08-23 20:24:12 -07002287}