blob: aa1dc749b28195eca325ee97921f91bdeaa95c42 [file] [log] [blame]
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include "bfa_os_inc.h"
19#include "bfa_plog.h"
20#include "bfa_cs.h"
21#include "bfa_modules.h"
22#include "bfad_drv.h"
23
24BFA_TRC_FILE(HAL, FCXP);
25BFA_MODULE(fcxp);
26BFA_MODULE(sgpg);
27BFA_MODULE(lps);
28BFA_MODULE(fcport);
29BFA_MODULE(rport);
30BFA_MODULE(uf);
31
32/**
33 * LPS related definitions
34 */
35#define BFA_LPS_MIN_LPORTS (1)
36#define BFA_LPS_MAX_LPORTS (256)
37
38/*
39 * Maximum Vports supported per physical port or vf.
40 */
41#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
42#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
43
44/**
45 * lps_pvt BFA LPS private functions
46 */
47
48enum bfa_lps_event {
49 BFA_LPS_SM_LOGIN = 1, /* login request from user */
50 BFA_LPS_SM_LOGOUT = 2, /* logout request from user */
51 BFA_LPS_SM_FWRSP = 3, /* f/w response to login/logout */
52 BFA_LPS_SM_RESUME = 4, /* space present in reqq queue */
53 BFA_LPS_SM_DELETE = 5, /* lps delete from user */
54 BFA_LPS_SM_OFFLINE = 6, /* Link is offline */
55 BFA_LPS_SM_RX_CVL = 7, /* Rx clear virtual link */
56};
57
58/**
59 * FC PORT related definitions
60 */
61/*
62 * The port is considered disabled if corresponding physical port or IOC are
63 * disabled explicitly
64 */
65#define BFA_PORT_IS_DISABLED(bfa) \
66 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
67 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
68
69
70/**
71 * BFA port state machine events
72 */
73enum bfa_fcport_sm_event {
74 BFA_FCPORT_SM_START = 1, /* start port state machine */
75 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
76 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
77 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
78 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
79 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
80 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
81 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
82 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
83};
84
85/**
86 * BFA port link notification state machine events
87 */
88
89enum bfa_fcport_ln_sm_event {
90 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
91 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
92 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
93};
94
95/**
96 * RPORT related definitions
97 */
98#define bfa_rport_offline_cb(__rp) do { \
99 if ((__rp)->bfa->fcs) \
100 bfa_cb_rport_offline((__rp)->rport_drv); \
101 else { \
102 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
103 __bfa_cb_rport_offline, (__rp)); \
104 } \
105} while (0)
106
107#define bfa_rport_online_cb(__rp) do { \
108 if ((__rp)->bfa->fcs) \
109 bfa_cb_rport_online((__rp)->rport_drv); \
110 else { \
111 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
112 __bfa_cb_rport_online, (__rp)); \
113 } \
114} while (0)
115
116
117enum bfa_rport_event {
118 BFA_RPORT_SM_CREATE = 1, /* rport create event */
119 BFA_RPORT_SM_DELETE = 2, /* deleting an existing rport */
120 BFA_RPORT_SM_ONLINE = 3, /* rport is online */
121 BFA_RPORT_SM_OFFLINE = 4, /* rport is offline */
122 BFA_RPORT_SM_FWRSP = 5, /* firmware response */
123 BFA_RPORT_SM_HWFAIL = 6, /* IOC h/w failure */
124 BFA_RPORT_SM_QOS_SCN = 7, /* QoS SCN from firmware */
125 BFA_RPORT_SM_SET_SPEED = 8, /* Set Rport Speed */
126 BFA_RPORT_SM_QRESUME = 9, /* space in requeue queue */
127};
128
129/**
130 * forward declarations FCXP related functions
131 */
132static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
133static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
134 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
135static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
136 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
137static void bfa_fcxp_qresume(void *cbarg);
138static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
139 struct bfi_fcxp_send_req_s *send_req);
140
141/**
142 * forward declarations for LPS functions
143 */
144static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
145 u32 *dm_len);
146static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
147 struct bfa_iocfc_cfg_s *cfg,
148 struct bfa_meminfo_s *meminfo,
149 struct bfa_pcidev_s *pcidev);
150static void bfa_lps_detach(struct bfa_s *bfa);
151static void bfa_lps_start(struct bfa_s *bfa);
152static void bfa_lps_stop(struct bfa_s *bfa);
153static void bfa_lps_iocdisable(struct bfa_s *bfa);
154static void bfa_lps_login_rsp(struct bfa_s *bfa,
155 struct bfi_lps_login_rsp_s *rsp);
156static void bfa_lps_logout_rsp(struct bfa_s *bfa,
157 struct bfi_lps_logout_rsp_s *rsp);
158static void bfa_lps_reqq_resume(void *lps_arg);
159static void bfa_lps_free(struct bfa_lps_s *lps);
160static void bfa_lps_send_login(struct bfa_lps_s *lps);
161static void bfa_lps_send_logout(struct bfa_lps_s *lps);
162static void bfa_lps_login_comp(struct bfa_lps_s *lps);
163static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
164static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
165
166/**
167 * forward declaration for LPS state machine
168 */
169static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
170static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
171static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
172 event);
173static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
174static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
175static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
176 event);
177
178/**
179 * forward declaration for FC Port functions
180 */
181static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
182static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
183static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
184static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
185static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
186static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
187static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
188 enum bfa_port_linkstate event, bfa_boolean_t trunk);
189static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
190 enum bfa_port_linkstate event);
191static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
192static void bfa_fcport_stats_get_timeout(void *cbarg);
193static void bfa_fcport_stats_clr_timeout(void *cbarg);
194static void bfa_trunk_iocdisable(struct bfa_s *bfa);
195
196/**
197 * forward declaration for FC PORT state machine
198 */
199static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
200 enum bfa_fcport_sm_event event);
201static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
202 enum bfa_fcport_sm_event event);
203static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
204 enum bfa_fcport_sm_event event);
205static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
206 enum bfa_fcport_sm_event event);
207static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
208 enum bfa_fcport_sm_event event);
209static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
210 enum bfa_fcport_sm_event event);
211static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
212 enum bfa_fcport_sm_event event);
213static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
214 enum bfa_fcport_sm_event event);
215static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
216 enum bfa_fcport_sm_event event);
217static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
218 enum bfa_fcport_sm_event event);
219static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
220 enum bfa_fcport_sm_event event);
221static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
222 enum bfa_fcport_sm_event event);
223
224static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
225 enum bfa_fcport_ln_sm_event event);
226static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
227 enum bfa_fcport_ln_sm_event event);
228static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
229 enum bfa_fcport_ln_sm_event event);
230static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
231 enum bfa_fcport_ln_sm_event event);
232static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
233 enum bfa_fcport_ln_sm_event event);
234static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
235 enum bfa_fcport_ln_sm_event event);
236static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
237 enum bfa_fcport_ln_sm_event event);
238
239static struct bfa_sm_table_s hal_port_sm_table[] = {
240 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
241 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
242 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
243 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
244 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
245 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
246 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
247 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
248 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
249 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
250 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
251 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
252};
253
254
255/**
256 * forward declaration for RPORT related functions
257 */
258static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
259static void bfa_rport_free(struct bfa_rport_s *rport);
260static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
261static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
262static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
263static void __bfa_cb_rport_online(void *cbarg,
264 bfa_boolean_t complete);
265static void __bfa_cb_rport_offline(void *cbarg,
266 bfa_boolean_t complete);
267
268/**
269 * forward declaration for RPORT state machine
270 */
271static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
272 enum bfa_rport_event event);
273static void bfa_rport_sm_created(struct bfa_rport_s *rp,
274 enum bfa_rport_event event);
275static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
276 enum bfa_rport_event event);
277static void bfa_rport_sm_online(struct bfa_rport_s *rp,
278 enum bfa_rport_event event);
279static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
280 enum bfa_rport_event event);
281static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
282 enum bfa_rport_event event);
283static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
284 enum bfa_rport_event event);
285static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
286 enum bfa_rport_event event);
287static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
288 enum bfa_rport_event event);
289static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
290 enum bfa_rport_event event);
291static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
292 enum bfa_rport_event event);
293static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
294 enum bfa_rport_event event);
295static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
296 enum bfa_rport_event event);
297
298/**
299 * PLOG related definitions
300 */
301static int
302plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
303{
304 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
305 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
306 return 1;
307
308 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
309 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
310 return 1;
311
312 return 0;
313}
314
315static void
316bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
317{
318 u16 tail;
319 struct bfa_plog_rec_s *pl_recp;
320
321 if (plog->plog_enabled == 0)
322 return;
323
324 if (plkd_validate_logrec(pl_rec)) {
325 bfa_assert(0);
326 return;
327 }
328
329 tail = plog->tail;
330
331 pl_recp = &(plog->plog_recs[tail]);
332
333 bfa_os_memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
334
335 pl_recp->tv = bfa_os_get_log_time();
336 BFA_PL_LOG_REC_INCR(plog->tail);
337
338 if (plog->head == plog->tail)
339 BFA_PL_LOG_REC_INCR(plog->head);
340}
341
342void
343bfa_plog_init(struct bfa_plog_s *plog)
344{
345 bfa_os_memset((char *)plog, 0, sizeof(struct bfa_plog_s));
346
347 bfa_os_memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
348 plog->head = plog->tail = 0;
349 plog->plog_enabled = 1;
350}
351
352void
353bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
354 enum bfa_plog_eid event,
355 u16 misc, char *log_str)
356{
357 struct bfa_plog_rec_s lp;
358
359 if (plog->plog_enabled) {
360 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
361 lp.mid = mid;
362 lp.eid = event;
363 lp.log_type = BFA_PL_LOG_TYPE_STRING;
364 lp.misc = misc;
365 strncpy(lp.log_entry.string_log, log_str,
366 BFA_PL_STRING_LOG_SZ - 1);
367 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
368 bfa_plog_add(plog, &lp);
369 }
370}
371
372void
373bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
374 enum bfa_plog_eid event,
375 u16 misc, u32 *intarr, u32 num_ints)
376{
377 struct bfa_plog_rec_s lp;
378 u32 i;
379
380 if (num_ints > BFA_PL_INT_LOG_SZ)
381 num_ints = BFA_PL_INT_LOG_SZ;
382
383 if (plog->plog_enabled) {
384 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
385 lp.mid = mid;
386 lp.eid = event;
387 lp.log_type = BFA_PL_LOG_TYPE_INT;
388 lp.misc = misc;
389
390 for (i = 0; i < num_ints; i++)
391 bfa_os_assign(lp.log_entry.int_log[i],
392 intarr[i]);
393
394 lp.log_num_ints = (u8) num_ints;
395
396 bfa_plog_add(plog, &lp);
397 }
398}
399
400void
401bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
402 enum bfa_plog_eid event,
403 u16 misc, struct fchs_s *fchdr)
404{
405 struct bfa_plog_rec_s lp;
406 u32 *tmp_int = (u32 *) fchdr;
407 u32 ints[BFA_PL_INT_LOG_SZ];
408
409 if (plog->plog_enabled) {
410 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
411
412 ints[0] = tmp_int[0];
413 ints[1] = tmp_int[1];
414 ints[2] = tmp_int[4];
415
416 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
417 }
418}
419
420void
421bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
422 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
423 u32 pld_w0)
424{
425 struct bfa_plog_rec_s lp;
426 u32 *tmp_int = (u32 *) fchdr;
427 u32 ints[BFA_PL_INT_LOG_SZ];
428
429 if (plog->plog_enabled) {
430 bfa_os_memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
431
432 ints[0] = tmp_int[0];
433 ints[1] = tmp_int[1];
434 ints[2] = tmp_int[4];
435 ints[3] = pld_w0;
436
437 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
438 }
439}
440
441void
442bfa_plog_clear(struct bfa_plog_s *plog)
443{
444 plog->head = plog->tail = 0;
445}
446
447void
448bfa_plog_enable(struct bfa_plog_s *plog)
449{
450 plog->plog_enabled = 1;
451}
452
453void
454bfa_plog_disable(struct bfa_plog_s *plog)
455{
456 plog->plog_enabled = 0;
457}
458
459bfa_boolean_t
460bfa_plog_get_setting(struct bfa_plog_s *plog)
461{
462 return (bfa_boolean_t)plog->plog_enabled;
463}
464
465/**
466 * fcxp_pvt BFA FCXP private functions
467 */
468
469static void
470claim_fcxp_req_rsp_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
471{
472 u8 *dm_kva = NULL;
473 u64 dm_pa;
474 u32 buf_pool_sz;
475
476 dm_kva = bfa_meminfo_dma_virt(mi);
477 dm_pa = bfa_meminfo_dma_phys(mi);
478
479 buf_pool_sz = mod->req_pld_sz * mod->num_fcxps;
480
481 /*
482 * Initialize the fcxp req payload list
483 */
484 mod->req_pld_list_kva = dm_kva;
485 mod->req_pld_list_pa = dm_pa;
486 dm_kva += buf_pool_sz;
487 dm_pa += buf_pool_sz;
488 bfa_os_memset(mod->req_pld_list_kva, 0, buf_pool_sz);
489
490 /*
491 * Initialize the fcxp rsp payload list
492 */
493 buf_pool_sz = mod->rsp_pld_sz * mod->num_fcxps;
494 mod->rsp_pld_list_kva = dm_kva;
495 mod->rsp_pld_list_pa = dm_pa;
496 dm_kva += buf_pool_sz;
497 dm_pa += buf_pool_sz;
498 bfa_os_memset(mod->rsp_pld_list_kva, 0, buf_pool_sz);
499
500 bfa_meminfo_dma_virt(mi) = dm_kva;
501 bfa_meminfo_dma_phys(mi) = dm_pa;
502}
503
504static void
505claim_fcxps_mem(struct bfa_fcxp_mod_s *mod, struct bfa_meminfo_s *mi)
506{
507 u16 i;
508 struct bfa_fcxp_s *fcxp;
509
510 fcxp = (struct bfa_fcxp_s *) bfa_meminfo_kva(mi);
511 bfa_os_memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
512
513 INIT_LIST_HEAD(&mod->fcxp_free_q);
514 INIT_LIST_HEAD(&mod->fcxp_active_q);
515
516 mod->fcxp_list = fcxp;
517
518 for (i = 0; i < mod->num_fcxps; i++) {
519 fcxp->fcxp_mod = mod;
520 fcxp->fcxp_tag = i;
521
522 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
523 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
524 fcxp->reqq_waiting = BFA_FALSE;
525
526 fcxp = fcxp + 1;
527 }
528
529 bfa_meminfo_kva(mi) = (void *)fcxp;
530}
531
532static void
533bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
534 u32 *dm_len)
535{
536 u16 num_fcxp_reqs = cfg->fwcfg.num_fcxp_reqs;
537
538 if (num_fcxp_reqs == 0)
539 return;
540
541 /*
542 * Account for req/rsp payload
543 */
544 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
545 if (cfg->drvcfg.min_cfg)
546 *dm_len += BFA_FCXP_MAX_IBUF_SZ * num_fcxp_reqs;
547 else
548 *dm_len += BFA_FCXP_MAX_LBUF_SZ * num_fcxp_reqs;
549
550 /*
551 * Account for fcxp structs
552 */
553 *ndm_len += sizeof(struct bfa_fcxp_s) * num_fcxp_reqs;
554}
555
556static void
557bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
558 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
559{
560 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
561
562 bfa_os_memset(mod, 0, sizeof(struct bfa_fcxp_mod_s));
563 mod->bfa = bfa;
564 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
565
566 /**
567 * Initialize FCXP request and response payload sizes.
568 */
569 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
570 if (!cfg->drvcfg.min_cfg)
571 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
572
573 INIT_LIST_HEAD(&mod->wait_q);
574
575 claim_fcxp_req_rsp_mem(mod, meminfo);
576 claim_fcxps_mem(mod, meminfo);
577}
578
579static void
580bfa_fcxp_detach(struct bfa_s *bfa)
581{
582}
583
584static void
585bfa_fcxp_start(struct bfa_s *bfa)
586{
587}
588
589static void
590bfa_fcxp_stop(struct bfa_s *bfa)
591{
592}
593
594static void
595bfa_fcxp_iocdisable(struct bfa_s *bfa)
596{
597 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
598 struct bfa_fcxp_s *fcxp;
599 struct list_head *qe, *qen;
600
601 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
602 fcxp = (struct bfa_fcxp_s *) qe;
603 if (fcxp->caller == NULL) {
604 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
605 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
606 bfa_fcxp_free(fcxp);
607 } else {
608 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
609 bfa_cb_queue(bfa, &fcxp->hcb_qe,
610 __bfa_fcxp_send_cbfn, fcxp);
611 }
612 }
613}
614
615static struct bfa_fcxp_s *
616bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
617{
618 struct bfa_fcxp_s *fcxp;
619
620 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
621
622 if (fcxp)
623 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
624
625 return fcxp;
626}
627
628static void
629bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
630 struct bfa_s *bfa,
631 u8 *use_ibuf,
632 u32 *nr_sgles,
633 bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
634 bfa_fcxp_get_sglen_t *r_sglen_cbfn,
635 struct list_head *r_sgpg_q,
636 int n_sgles,
637 bfa_fcxp_get_sgaddr_t sga_cbfn,
638 bfa_fcxp_get_sglen_t sglen_cbfn)
639{
640
641 bfa_assert(bfa != NULL);
642
643 bfa_trc(bfa, fcxp->fcxp_tag);
644
645 if (n_sgles == 0) {
646 *use_ibuf = 1;
647 } else {
648 bfa_assert(*sga_cbfn != NULL);
649 bfa_assert(*sglen_cbfn != NULL);
650
651 *use_ibuf = 0;
652 *r_sga_cbfn = sga_cbfn;
653 *r_sglen_cbfn = sglen_cbfn;
654
655 *nr_sgles = n_sgles;
656
657 /*
658 * alloc required sgpgs
659 */
660 if (n_sgles > BFI_SGE_INLINE)
661 bfa_assert(0);
662 }
663
664}
665
666static void
667bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
668 void *caller, struct bfa_s *bfa, int nreq_sgles,
669 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
670 bfa_fcxp_get_sglen_t req_sglen_cbfn,
671 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
672 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
673{
674
675 bfa_assert(bfa != NULL);
676
677 bfa_trc(bfa, fcxp->fcxp_tag);
678
679 fcxp->caller = caller;
680
681 bfa_fcxp_init_reqrsp(fcxp, bfa,
682 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
683 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
684 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
685
686 bfa_fcxp_init_reqrsp(fcxp, bfa,
687 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
688 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
689 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
690
691}
692
693static void
694bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
695{
696 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
697 struct bfa_fcxp_wqe_s *wqe;
698
699 bfa_q_deq(&mod->wait_q, &wqe);
700 if (wqe) {
701 bfa_trc(mod->bfa, fcxp->fcxp_tag);
702
703 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
704 wqe->nrsp_sgles, wqe->req_sga_cbfn,
705 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
706 wqe->rsp_sglen_cbfn);
707
708 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
709 return;
710 }
711
712 bfa_assert(bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
713 list_del(&fcxp->qe);
714 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
715}
716
717static void
718bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
719 bfa_status_t req_status, u32 rsp_len,
720 u32 resid_len, struct fchs_s *rsp_fchs)
721{
722 /* discarded fcxp completion */
723}
724
725static void
726__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
727{
728 struct bfa_fcxp_s *fcxp = cbarg;
729
730 if (complete) {
731 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
732 fcxp->rsp_status, fcxp->rsp_len,
733 fcxp->residue_len, &fcxp->rsp_fchs);
734 } else {
735 bfa_fcxp_free(fcxp);
736 }
737}
738
739static void
740hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
741{
742 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
743 struct bfa_fcxp_s *fcxp;
744 u16 fcxp_tag = bfa_os_ntohs(fcxp_rsp->fcxp_tag);
745
746 bfa_trc(bfa, fcxp_tag);
747
748 fcxp_rsp->rsp_len = bfa_os_ntohl(fcxp_rsp->rsp_len);
749
750 /**
751 * @todo f/w should not set residue to non-0 when everything
752 * is received.
753 */
754 if (fcxp_rsp->req_status == BFA_STATUS_OK)
755 fcxp_rsp->residue_len = 0;
756 else
757 fcxp_rsp->residue_len = bfa_os_ntohl(fcxp_rsp->residue_len);
758
759 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
760
761 bfa_assert(fcxp->send_cbfn != NULL);
762
763 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
764
765 if (fcxp->send_cbfn != NULL) {
766 bfa_trc(mod->bfa, (NULL == fcxp->caller));
767 if (fcxp->caller == NULL) {
768 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
769 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
770 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
771 /*
772 * fcxp automatically freed on return from the callback
773 */
774 bfa_fcxp_free(fcxp);
775 } else {
776 fcxp->rsp_status = fcxp_rsp->req_status;
777 fcxp->rsp_len = fcxp_rsp->rsp_len;
778 fcxp->residue_len = fcxp_rsp->residue_len;
779 fcxp->rsp_fchs = fcxp_rsp->fchs;
780
781 bfa_cb_queue(bfa, &fcxp->hcb_qe,
782 __bfa_fcxp_send_cbfn, fcxp);
783 }
784 } else {
785 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
786 }
787}
788
789static void
790hal_fcxp_set_local_sges(struct bfi_sge_s *sge, u32 reqlen, u64 req_pa)
791{
792 union bfi_addr_u sga_zero = { {0} };
793
794 sge->sg_len = reqlen;
795 sge->flags = BFI_SGE_DATA_LAST;
796 bfa_dma_addr_set(sge[0].sga, req_pa);
797 bfa_sge_to_be(sge);
798 sge++;
799
800 sge->sga = sga_zero;
801 sge->sg_len = reqlen;
802 sge->flags = BFI_SGE_PGDLEN;
803 bfa_sge_to_be(sge);
804}
805
806static void
807hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
808 struct fchs_s *fchs)
809{
810 /*
811 * TODO: TX ox_id
812 */
813 if (reqlen > 0) {
814 if (fcxp->use_ireqbuf) {
815 u32 pld_w0 =
816 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
817
818 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
819 BFA_PL_EID_TX,
820 reqlen + sizeof(struct fchs_s), fchs,
821 pld_w0);
822 } else {
823 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
824 BFA_PL_EID_TX,
825 reqlen + sizeof(struct fchs_s),
826 fchs);
827 }
828 } else {
829 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
830 reqlen + sizeof(struct fchs_s), fchs);
831 }
832}
833
834static void
835hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
836 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
837{
838 if (fcxp_rsp->rsp_len > 0) {
839 if (fcxp->use_irspbuf) {
840 u32 pld_w0 =
841 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
842
843 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
844 BFA_PL_EID_RX,
845 (u16) fcxp_rsp->rsp_len,
846 &fcxp_rsp->fchs, pld_w0);
847 } else {
848 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
849 BFA_PL_EID_RX,
850 (u16) fcxp_rsp->rsp_len,
851 &fcxp_rsp->fchs);
852 }
853 } else {
854 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
855 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
856 }
857}
858
859/**
860 * Handler to resume sending fcxp when space in available in cpe queue.
861 */
862static void
863bfa_fcxp_qresume(void *cbarg)
864{
865 struct bfa_fcxp_s *fcxp = cbarg;
866 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
867 struct bfi_fcxp_send_req_s *send_req;
868
869 fcxp->reqq_waiting = BFA_FALSE;
870 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
871 bfa_fcxp_queue(fcxp, send_req);
872}
873
874/**
875 * Queue fcxp send request to foimrware.
876 */
877static void
878bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
879{
880 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
881 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
882 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
883 struct bfa_rport_s *rport = reqi->bfa_rport;
884
885 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
886 bfa_lpuid(bfa));
887
888 send_req->fcxp_tag = bfa_os_htons(fcxp->fcxp_tag);
889 if (rport) {
890 send_req->rport_fw_hndl = rport->fw_handle;
891 send_req->max_frmsz = bfa_os_htons(rport->rport_info.max_frmsz);
892 if (send_req->max_frmsz == 0)
893 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
894 } else {
895 send_req->rport_fw_hndl = 0;
896 send_req->max_frmsz = bfa_os_htons(FC_MAX_PDUSZ);
897 }
898
899 send_req->vf_id = bfa_os_htons(reqi->vf_id);
900 send_req->lp_tag = reqi->lp_tag;
901 send_req->class = reqi->class;
902 send_req->rsp_timeout = rspi->rsp_timeout;
903 send_req->cts = reqi->cts;
904 send_req->fchs = reqi->fchs;
905
906 send_req->req_len = bfa_os_htonl(reqi->req_tot_len);
907 send_req->rsp_maxlen = bfa_os_htonl(rspi->rsp_maxlen);
908
909 /*
910 * setup req sgles
911 */
912 if (fcxp->use_ireqbuf == 1) {
913 hal_fcxp_set_local_sges(send_req->req_sge, reqi->req_tot_len,
914 BFA_FCXP_REQ_PLD_PA(fcxp));
915 } else {
916 if (fcxp->nreq_sgles > 0) {
917 bfa_assert(fcxp->nreq_sgles == 1);
918 hal_fcxp_set_local_sges(send_req->req_sge,
919 reqi->req_tot_len,
920 fcxp->req_sga_cbfn(fcxp->caller,
921 0));
922 } else {
923 bfa_assert(reqi->req_tot_len == 0);
924 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
925 }
926 }
927
928 /*
929 * setup rsp sgles
930 */
931 if (fcxp->use_irspbuf == 1) {
932 bfa_assert(rspi->rsp_maxlen <= BFA_FCXP_MAX_LBUF_SZ);
933
934 hal_fcxp_set_local_sges(send_req->rsp_sge, rspi->rsp_maxlen,
935 BFA_FCXP_RSP_PLD_PA(fcxp));
936
937 } else {
938 if (fcxp->nrsp_sgles > 0) {
939 bfa_assert(fcxp->nrsp_sgles == 1);
940 hal_fcxp_set_local_sges(send_req->rsp_sge,
941 rspi->rsp_maxlen,
942 fcxp->rsp_sga_cbfn(fcxp->caller,
943 0));
944 } else {
945 bfa_assert(rspi->rsp_maxlen == 0);
946 hal_fcxp_set_local_sges(send_req->rsp_sge, 0, 0);
947 }
948 }
949
950 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
951
952 bfa_reqq_produce(bfa, BFA_REQQ_FCXP);
953
954 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
955 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
956}
957
958/**
959 * hal_fcxp_api BFA FCXP API
960 */
961
962/**
963 * Allocate an FCXP instance to send a response or to send a request
964 * that has a response. Request/response buffers are allocated by caller.
965 *
966 * @param[in] bfa BFA bfa instance
967 * @param[in] nreq_sgles Number of SG elements required for request
968 * buffer. 0, if fcxp internal buffers are used.
969 * Use bfa_fcxp_get_reqbuf() to get the
970 * internal req buffer.
971 * @param[in] req_sgles SG elements describing request buffer. Will be
972 * copied in by BFA and hence can be freed on
973 * return from this function.
974 * @param[in] get_req_sga function ptr to be called to get a request SG
975 * Address (given the sge index).
976 * @param[in] get_req_sglen function ptr to be called to get a request SG
977 * len (given the sge index).
978 * @param[in] get_rsp_sga function ptr to be called to get a response SG
979 * Address (given the sge index).
980 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
981 * len (given the sge index).
982 *
983 * @return FCXP instance. NULL on failure.
984 */
985struct bfa_fcxp_s *
986bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
987 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
988 bfa_fcxp_get_sglen_t req_sglen_cbfn,
989 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
990 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
991{
992 struct bfa_fcxp_s *fcxp = NULL;
993
994 bfa_assert(bfa != NULL);
995
996 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
997 if (fcxp == NULL)
998 return NULL;
999
1000 bfa_trc(bfa, fcxp->fcxp_tag);
1001
1002 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
1003 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
1004
1005 return fcxp;
1006}
1007
1008/**
1009 * Get the internal request buffer pointer
1010 *
1011 * @param[in] fcxp BFA fcxp pointer
1012 *
1013 * @return pointer to the internal request buffer
1014 */
1015void *
1016bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
1017{
1018 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1019 void *reqbuf;
1020
1021 bfa_assert(fcxp->use_ireqbuf == 1);
1022 reqbuf = ((u8 *)mod->req_pld_list_kva) +
1023 fcxp->fcxp_tag * mod->req_pld_sz;
1024 return reqbuf;
1025}
1026
1027u32
1028bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
1029{
1030 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1031
1032 return mod->req_pld_sz;
1033}
1034
1035/**
1036 * Get the internal response buffer pointer
1037 *
1038 * @param[in] fcxp BFA fcxp pointer
1039 *
1040 * @return pointer to the internal request buffer
1041 */
1042void *
1043bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
1044{
1045 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1046 void *rspbuf;
1047
1048 bfa_assert(fcxp->use_irspbuf == 1);
1049
1050 rspbuf = ((u8 *)mod->rsp_pld_list_kva) +
1051 fcxp->fcxp_tag * mod->rsp_pld_sz;
1052 return rspbuf;
1053}
1054
1055/**
1056 * Free the BFA FCXP
1057 *
1058 * @param[in] fcxp BFA fcxp pointer
1059 *
1060 * @return void
1061 */
1062void
1063bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
1064{
1065 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
1066
1067 bfa_assert(fcxp != NULL);
1068 bfa_trc(mod->bfa, fcxp->fcxp_tag);
1069 bfa_fcxp_put(fcxp);
1070}
1071
1072/**
1073 * Send a FCXP request
1074 *
1075 * @param[in] fcxp BFA fcxp pointer
1076 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
1077 * @param[in] vf_id virtual Fabric ID
1078 * @param[in] lp_tag lport tag
1079 * @param[in] cts use Continous sequence
1080 * @param[in] cos fc Class of Service
1081 * @param[in] reqlen request length, does not include FCHS length
1082 * @param[in] fchs fc Header Pointer. The header content will be copied
1083 * in by BFA.
1084 *
1085 * @param[in] cbfn call back function to be called on receiving
1086 * the response
1087 * @param[in] cbarg arg for cbfn
1088 * @param[in] rsp_timeout
1089 * response timeout
1090 *
1091 * @return bfa_status_t
1092 */
1093void
1094bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1095 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1096 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1097 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1098{
1099 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
1100 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
1101 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
1102 struct bfi_fcxp_send_req_s *send_req;
1103
1104 bfa_trc(bfa, fcxp->fcxp_tag);
1105
1106 /**
1107 * setup request/response info
1108 */
1109 reqi->bfa_rport = rport;
1110 reqi->vf_id = vf_id;
1111 reqi->lp_tag = lp_tag;
1112 reqi->class = cos;
1113 rspi->rsp_timeout = rsp_timeout;
1114 reqi->cts = cts;
1115 reqi->fchs = *fchs;
1116 reqi->req_tot_len = reqlen;
1117 rspi->rsp_maxlen = rsp_maxlen;
1118 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1119 fcxp->send_cbarg = cbarg;
1120
1121 /**
1122 * If no room in CPE queue, wait for space in request queue
1123 */
1124 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1125 if (!send_req) {
1126 bfa_trc(bfa, fcxp->fcxp_tag);
1127 fcxp->reqq_waiting = BFA_TRUE;
1128 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1129 return;
1130 }
1131
1132 bfa_fcxp_queue(fcxp, send_req);
1133}
1134
1135/**
1136 * Abort a BFA FCXP
1137 *
1138 * @param[in] fcxp BFA fcxp pointer
1139 *
1140 * @return void
1141 */
1142bfa_status_t
1143bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1144{
1145 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
1146 bfa_assert(0);
1147 return BFA_STATUS_OK;
1148}
1149
1150void
1151bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1152 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1153 void *caller, int nreq_sgles,
1154 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1155 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1156 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1157 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1158{
1159 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1160
1161 bfa_assert(list_empty(&mod->fcxp_free_q));
1162
1163 wqe->alloc_cbfn = alloc_cbfn;
1164 wqe->alloc_cbarg = alloc_cbarg;
1165 wqe->caller = caller;
1166 wqe->bfa = bfa;
1167 wqe->nreq_sgles = nreq_sgles;
1168 wqe->nrsp_sgles = nrsp_sgles;
1169 wqe->req_sga_cbfn = req_sga_cbfn;
1170 wqe->req_sglen_cbfn = req_sglen_cbfn;
1171 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1172 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1173
1174 list_add_tail(&wqe->qe, &mod->wait_q);
1175}
1176
1177void
1178bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1179{
1180 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1181
1182 bfa_assert(bfa_q_is_on_q(&mod->wait_q, wqe));
1183 list_del(&wqe->qe);
1184}
1185
1186void
1187bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1188{
1189 /**
1190 * If waiting for room in request queue, cancel reqq wait
1191 * and free fcxp.
1192 */
1193 if (fcxp->reqq_waiting) {
1194 fcxp->reqq_waiting = BFA_FALSE;
1195 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1196 bfa_fcxp_free(fcxp);
1197 return;
1198 }
1199
1200 fcxp->send_cbfn = bfa_fcxp_null_comp;
1201}
1202
1203
1204
1205/**
1206 * hal_fcxp_public BFA FCXP public functions
1207 */
1208
1209void
1210bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1211{
1212 switch (msg->mhdr.msg_id) {
1213 case BFI_FCXP_I2H_SEND_RSP:
1214 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1215 break;
1216
1217 default:
1218 bfa_trc(bfa, msg->mhdr.msg_id);
1219 bfa_assert(0);
1220 }
1221}
1222
1223u32
1224bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1225{
1226 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1227
1228 return mod->rsp_pld_sz;
1229}
1230
1231
1232/**
1233 * BFA LPS state machine functions
1234 */
1235
1236/**
1237 * Init state -- no login
1238 */
1239static void
1240bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1241{
1242 bfa_trc(lps->bfa, lps->lp_tag);
1243 bfa_trc(lps->bfa, event);
1244
1245 switch (event) {
1246 case BFA_LPS_SM_LOGIN:
1247 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1248 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1249 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1250 } else {
1251 bfa_sm_set_state(lps, bfa_lps_sm_login);
1252 bfa_lps_send_login(lps);
1253 }
1254
1255 if (lps->fdisc)
1256 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1257 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1258 else
1259 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1260 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1261 break;
1262
1263 case BFA_LPS_SM_LOGOUT:
1264 bfa_lps_logout_comp(lps);
1265 break;
1266
1267 case BFA_LPS_SM_DELETE:
1268 bfa_lps_free(lps);
1269 break;
1270
1271 case BFA_LPS_SM_RX_CVL:
1272 case BFA_LPS_SM_OFFLINE:
1273 break;
1274
1275 case BFA_LPS_SM_FWRSP:
1276 /*
1277 * Could happen when fabric detects loopback and discards
1278 * the lps request. Fw will eventually sent out the timeout
1279 * Just ignore
1280 */
1281 break;
1282
1283 default:
1284 bfa_sm_fault(lps->bfa, event);
1285 }
1286}
1287
1288/**
1289 * login is in progress -- awaiting response from firmware
1290 */
1291static void
1292bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1293{
1294 bfa_trc(lps->bfa, lps->lp_tag);
1295 bfa_trc(lps->bfa, event);
1296
1297 switch (event) {
1298 case BFA_LPS_SM_FWRSP:
1299 if (lps->status == BFA_STATUS_OK) {
1300 bfa_sm_set_state(lps, bfa_lps_sm_online);
1301 if (lps->fdisc)
1302 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1303 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1304 else
1305 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1306 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
1307 } else {
1308 bfa_sm_set_state(lps, bfa_lps_sm_init);
1309 if (lps->fdisc)
1310 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1311 BFA_PL_EID_LOGIN, 0,
1312 "FDISC Fail (RJT or timeout)");
1313 else
1314 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1315 BFA_PL_EID_LOGIN, 0,
1316 "FLOGI Fail (RJT or timeout)");
1317 }
1318 bfa_lps_login_comp(lps);
1319 break;
1320
1321 case BFA_LPS_SM_OFFLINE:
1322 bfa_sm_set_state(lps, bfa_lps_sm_init);
1323 break;
1324
1325 default:
1326 bfa_sm_fault(lps->bfa, event);
1327 }
1328}
1329
1330/**
1331 * login pending - awaiting space in request queue
1332 */
1333static void
1334bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1335{
1336 bfa_trc(lps->bfa, lps->lp_tag);
1337 bfa_trc(lps->bfa, event);
1338
1339 switch (event) {
1340 case BFA_LPS_SM_RESUME:
1341 bfa_sm_set_state(lps, bfa_lps_sm_login);
1342 break;
1343
1344 case BFA_LPS_SM_OFFLINE:
1345 bfa_sm_set_state(lps, bfa_lps_sm_init);
1346 bfa_reqq_wcancel(&lps->wqe);
1347 break;
1348
1349 case BFA_LPS_SM_RX_CVL:
1350 /*
1351 * Login was not even sent out; so when getting out
1352 * of this state, it will appear like a login retry
1353 * after Clear virtual link
1354 */
1355 break;
1356
1357 default:
1358 bfa_sm_fault(lps->bfa, event);
1359 }
1360}
1361
1362/**
1363 * login complete
1364 */
1365static void
1366bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1367{
1368 bfa_trc(lps->bfa, lps->lp_tag);
1369 bfa_trc(lps->bfa, event);
1370
1371 switch (event) {
1372 case BFA_LPS_SM_LOGOUT:
1373 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1374 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1375 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1376 } else {
1377 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1378 bfa_lps_send_logout(lps);
1379 }
1380 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1381 BFA_PL_EID_LOGO, 0, "Logout");
1382 break;
1383
1384 case BFA_LPS_SM_RX_CVL:
1385 bfa_sm_set_state(lps, bfa_lps_sm_init);
1386
1387 /* Let the vport module know about this event */
1388 bfa_lps_cvl_event(lps);
1389 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1390 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1391 break;
1392
1393 case BFA_LPS_SM_OFFLINE:
1394 case BFA_LPS_SM_DELETE:
1395 bfa_sm_set_state(lps, bfa_lps_sm_init);
1396 break;
1397
1398 default:
1399 bfa_sm_fault(lps->bfa, event);
1400 }
1401}
1402
1403/**
1404 * logout in progress - awaiting firmware response
1405 */
1406static void
1407bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1408{
1409 bfa_trc(lps->bfa, lps->lp_tag);
1410 bfa_trc(lps->bfa, event);
1411
1412 switch (event) {
1413 case BFA_LPS_SM_FWRSP:
1414 bfa_sm_set_state(lps, bfa_lps_sm_init);
1415 bfa_lps_logout_comp(lps);
1416 break;
1417
1418 case BFA_LPS_SM_OFFLINE:
1419 bfa_sm_set_state(lps, bfa_lps_sm_init);
1420 break;
1421
1422 default:
1423 bfa_sm_fault(lps->bfa, event);
1424 }
1425}
1426
1427/**
1428 * logout pending -- awaiting space in request queue
1429 */
1430static void
1431bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1432{
1433 bfa_trc(lps->bfa, lps->lp_tag);
1434 bfa_trc(lps->bfa, event);
1435
1436 switch (event) {
1437 case BFA_LPS_SM_RESUME:
1438 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1439 bfa_lps_send_logout(lps);
1440 break;
1441
1442 case BFA_LPS_SM_OFFLINE:
1443 bfa_sm_set_state(lps, bfa_lps_sm_init);
1444 bfa_reqq_wcancel(&lps->wqe);
1445 break;
1446
1447 default:
1448 bfa_sm_fault(lps->bfa, event);
1449 }
1450}
1451
1452
1453
1454/**
1455 * lps_pvt BFA LPS private functions
1456 */
1457
1458/**
1459 * return memory requirement
1460 */
1461static void
1462bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
1463 u32 *dm_len)
1464{
1465 if (cfg->drvcfg.min_cfg)
1466 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS;
1467 else
1468 *ndm_len += sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS;
1469}
1470
1471/**
1472 * bfa module attach at initialization time
1473 */
1474static void
1475bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1476 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1477{
1478 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1479 struct bfa_lps_s *lps;
1480 int i;
1481
1482 bfa_os_memset(mod, 0, sizeof(struct bfa_lps_mod_s));
1483 mod->num_lps = BFA_LPS_MAX_LPORTS;
1484 if (cfg->drvcfg.min_cfg)
1485 mod->num_lps = BFA_LPS_MIN_LPORTS;
1486 else
1487 mod->num_lps = BFA_LPS_MAX_LPORTS;
1488 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_meminfo_kva(meminfo);
1489
1490 bfa_meminfo_kva(meminfo) += mod->num_lps * sizeof(struct bfa_lps_s);
1491
1492 INIT_LIST_HEAD(&mod->lps_free_q);
1493 INIT_LIST_HEAD(&mod->lps_active_q);
1494
1495 for (i = 0; i < mod->num_lps; i++, lps++) {
1496 lps->bfa = bfa;
1497 lps->lp_tag = (u8) i;
1498 lps->reqq = BFA_REQQ_LPS;
1499 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1500 list_add_tail(&lps->qe, &mod->lps_free_q);
1501 }
1502}
1503
1504static void
1505bfa_lps_detach(struct bfa_s *bfa)
1506{
1507}
1508
1509static void
1510bfa_lps_start(struct bfa_s *bfa)
1511{
1512}
1513
1514static void
1515bfa_lps_stop(struct bfa_s *bfa)
1516{
1517}
1518
1519/**
1520 * IOC in disabled state -- consider all lps offline
1521 */
1522static void
1523bfa_lps_iocdisable(struct bfa_s *bfa)
1524{
1525 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1526 struct bfa_lps_s *lps;
1527 struct list_head *qe, *qen;
1528
1529 list_for_each_safe(qe, qen, &mod->lps_active_q) {
1530 lps = (struct bfa_lps_s *) qe;
1531 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1532 }
1533}
1534
1535/**
1536 * Firmware login response
1537 */
1538static void
1539bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1540{
1541 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1542 struct bfa_lps_s *lps;
1543
1544 bfa_assert(rsp->lp_tag < mod->num_lps);
1545 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1546
1547 lps->status = rsp->status;
1548 switch (rsp->status) {
1549 case BFA_STATUS_OK:
1550 lps->fport = rsp->f_port;
1551 lps->npiv_en = rsp->npiv_en;
1552 lps->lp_pid = rsp->lp_pid;
1553 lps->pr_bbcred = bfa_os_ntohs(rsp->bb_credit);
1554 lps->pr_pwwn = rsp->port_name;
1555 lps->pr_nwwn = rsp->node_name;
1556 lps->auth_req = rsp->auth_req;
1557 lps->lp_mac = rsp->lp_mac;
1558 lps->brcd_switch = rsp->brcd_switch;
1559 lps->fcf_mac = rsp->fcf_mac;
1560
1561 break;
1562
1563 case BFA_STATUS_FABRIC_RJT:
1564 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1565 lps->lsrjt_expl = rsp->lsrjt_expl;
1566
1567 break;
1568
1569 case BFA_STATUS_EPROTOCOL:
1570 lps->ext_status = rsp->ext_status;
1571
1572 break;
1573
1574 default:
1575 /* Nothing to do with other status */
1576 break;
1577 }
1578
1579 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1580}
1581
1582/**
1583 * Firmware logout response
1584 */
1585static void
1586bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1587{
1588 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1589 struct bfa_lps_s *lps;
1590
1591 bfa_assert(rsp->lp_tag < mod->num_lps);
1592 lps = BFA_LPS_FROM_TAG(mod, rsp->lp_tag);
1593
1594 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1595}
1596
1597/**
1598 * Firmware received a Clear virtual link request (for FCoE)
1599 */
1600static void
1601bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1602{
1603 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1604 struct bfa_lps_s *lps;
1605
1606 lps = BFA_LPS_FROM_TAG(mod, cvl->lp_tag);
1607
1608 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1609}
1610
1611/**
1612 * Space is available in request queue, resume queueing request to firmware.
1613 */
1614static void
1615bfa_lps_reqq_resume(void *lps_arg)
1616{
1617 struct bfa_lps_s *lps = lps_arg;
1618
1619 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1620}
1621
1622/**
1623 * lps is freed -- triggered by vport delete
1624 */
1625static void
1626bfa_lps_free(struct bfa_lps_s *lps)
1627{
1628 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1629
1630 lps->lp_pid = 0;
1631 list_del(&lps->qe);
1632 list_add_tail(&lps->qe, &mod->lps_free_q);
1633}
1634
1635/**
1636 * send login request to firmware
1637 */
1638static void
1639bfa_lps_send_login(struct bfa_lps_s *lps)
1640{
1641 struct bfi_lps_login_req_s *m;
1642
1643 m = bfa_reqq_next(lps->bfa, lps->reqq);
1644 bfa_assert(m);
1645
1646 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
1647 bfa_lpuid(lps->bfa));
1648
1649 m->lp_tag = lps->lp_tag;
1650 m->alpa = lps->alpa;
1651 m->pdu_size = bfa_os_htons(lps->pdusz);
1652 m->pwwn = lps->pwwn;
1653 m->nwwn = lps->nwwn;
1654 m->fdisc = lps->fdisc;
1655 m->auth_en = lps->auth_en;
1656
1657 bfa_reqq_produce(lps->bfa, lps->reqq);
1658}
1659
1660/**
1661 * send logout request to firmware
1662 */
1663static void
1664bfa_lps_send_logout(struct bfa_lps_s *lps)
1665{
1666 struct bfi_lps_logout_req_s *m;
1667
1668 m = bfa_reqq_next(lps->bfa, lps->reqq);
1669 bfa_assert(m);
1670
1671 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
1672 bfa_lpuid(lps->bfa));
1673
1674 m->lp_tag = lps->lp_tag;
1675 m->port_name = lps->pwwn;
1676 bfa_reqq_produce(lps->bfa, lps->reqq);
1677}
1678
1679/**
1680 * Indirect login completion handler for non-fcs
1681 */
1682static void
1683bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1684{
1685 struct bfa_lps_s *lps = arg;
1686
1687 if (!complete)
1688 return;
1689
1690 if (lps->fdisc)
1691 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1692 else
1693 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1694}
1695
1696/**
1697 * Login completion handler -- direct call for fcs, queue for others
1698 */
1699static void
1700bfa_lps_login_comp(struct bfa_lps_s *lps)
1701{
1702 if (!lps->bfa->fcs) {
1703 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1704 lps);
1705 return;
1706 }
1707
1708 if (lps->fdisc)
1709 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1710 else
1711 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1712}
1713
1714/**
1715 * Indirect logout completion handler for non-fcs
1716 */
1717static void
1718bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1719{
1720 struct bfa_lps_s *lps = arg;
1721
1722 if (!complete)
1723 return;
1724
1725 if (lps->fdisc)
1726 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1727}
1728
1729/**
1730 * Logout completion handler -- direct call for fcs, queue for others
1731 */
1732static void
1733bfa_lps_logout_comp(struct bfa_lps_s *lps)
1734{
1735 if (!lps->bfa->fcs) {
1736 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1737 lps);
1738 return;
1739 }
1740 if (lps->fdisc)
1741 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1742}
1743
1744/**
1745 * Clear virtual link completion handler for non-fcs
1746 */
1747static void
1748bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1749{
1750 struct bfa_lps_s *lps = arg;
1751
1752 if (!complete)
1753 return;
1754
1755 /* Clear virtual link to base port will result in link down */
1756 if (lps->fdisc)
1757 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1758}
1759
1760/**
1761 * Received Clear virtual link event --direct call for fcs,
1762 * queue for others
1763 */
1764static void
1765bfa_lps_cvl_event(struct bfa_lps_s *lps)
1766{
1767 if (!lps->bfa->fcs) {
1768 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1769 lps);
1770 return;
1771 }
1772
1773 /* Clear virtual link to base port will result in link down */
1774 if (lps->fdisc)
1775 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1776}
1777
1778
1779
1780/**
1781 * lps_public BFA LPS public functions
1782 */
1783
1784u32
1785bfa_lps_get_max_vport(struct bfa_s *bfa)
1786{
1787 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1788 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1789 else
1790 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1791}
1792
1793/**
1794 * Allocate a lport srvice tag.
1795 */
1796struct bfa_lps_s *
1797bfa_lps_alloc(struct bfa_s *bfa)
1798{
1799 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1800 struct bfa_lps_s *lps = NULL;
1801
1802 bfa_q_deq(&mod->lps_free_q, &lps);
1803
1804 if (lps == NULL)
1805 return NULL;
1806
1807 list_add_tail(&lps->qe, &mod->lps_active_q);
1808
1809 bfa_sm_set_state(lps, bfa_lps_sm_init);
1810 return lps;
1811}
1812
1813/**
1814 * Free lport service tag. This can be called anytime after an alloc.
1815 * No need to wait for any pending login/logout completions.
1816 */
1817void
1818bfa_lps_delete(struct bfa_lps_s *lps)
1819{
1820 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1821}
1822
1823/**
1824 * Initiate a lport login.
1825 */
1826void
1827bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
1828 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en)
1829{
1830 lps->uarg = uarg;
1831 lps->alpa = alpa;
1832 lps->pdusz = pdusz;
1833 lps->pwwn = pwwn;
1834 lps->nwwn = nwwn;
1835 lps->fdisc = BFA_FALSE;
1836 lps->auth_en = auth_en;
1837 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1838}
1839
1840/**
1841 * Initiate a lport fdisc login.
1842 */
1843void
1844bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1845 wwn_t nwwn)
1846{
1847 lps->uarg = uarg;
1848 lps->alpa = 0;
1849 lps->pdusz = pdusz;
1850 lps->pwwn = pwwn;
1851 lps->nwwn = nwwn;
1852 lps->fdisc = BFA_TRUE;
1853 lps->auth_en = BFA_FALSE;
1854 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1855}
1856
1857/**
1858 * Initiate a lport logout (flogi).
1859 */
1860void
1861bfa_lps_flogo(struct bfa_lps_s *lps)
1862{
1863 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1864}
1865
1866/**
1867 * Initiate a lport FDSIC logout.
1868 */
1869void
1870bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1871{
1872 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1873}
1874
1875/**
1876 * Discard a pending login request -- should be called only for
1877 * link down handling.
1878 */
1879void
1880bfa_lps_discard(struct bfa_lps_s *lps)
1881{
1882 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1883}
1884
1885/**
1886 * Return lport services tag
1887 */
1888u8
1889bfa_lps_get_tag(struct bfa_lps_s *lps)
1890{
1891 return lps->lp_tag;
1892}
1893
1894/**
1895 * Return lport services tag given the pid
1896 */
1897u8
1898bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1899{
1900 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1901 struct bfa_lps_s *lps;
1902 int i;
1903
1904 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1905 if (lps->lp_pid == pid)
1906 return lps->lp_tag;
1907 }
1908
1909 /* Return base port tag anyway */
1910 return 0;
1911}
1912
1913/**
1914 * return if fabric login indicates support for NPIV
1915 */
1916bfa_boolean_t
1917bfa_lps_is_npiv_en(struct bfa_lps_s *lps)
1918{
1919 return lps->npiv_en;
1920}
1921
1922/**
1923 * Return TRUE if attached to F-Port, else return FALSE
1924 */
1925bfa_boolean_t
1926bfa_lps_is_fport(struct bfa_lps_s *lps)
1927{
1928 return lps->fport;
1929}
1930
1931/**
1932 * Return TRUE if attached to a Brocade Fabric
1933 */
1934bfa_boolean_t
1935bfa_lps_is_brcd_fabric(struct bfa_lps_s *lps)
1936{
1937 return lps->brcd_switch;
1938}
1939/**
1940 * return TRUE if authentication is required
1941 */
1942bfa_boolean_t
1943bfa_lps_is_authreq(struct bfa_lps_s *lps)
1944{
1945 return lps->auth_req;
1946}
1947
1948bfa_eproto_status_t
1949bfa_lps_get_extstatus(struct bfa_lps_s *lps)
1950{
1951 return lps->ext_status;
1952}
1953
1954/**
1955 * return port id assigned to the lport
1956 */
1957u32
1958bfa_lps_get_pid(struct bfa_lps_s *lps)
1959{
1960 return lps->lp_pid;
1961}
1962
1963/**
1964 * return port id assigned to the base lport
1965 */
1966u32
1967bfa_lps_get_base_pid(struct bfa_s *bfa)
1968{
1969 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1970
1971 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1972}
1973
1974/**
1975 * Return bb_credit assigned in FLOGI response
1976 */
1977u16
1978bfa_lps_get_peer_bbcredit(struct bfa_lps_s *lps)
1979{
1980 return lps->pr_bbcred;
1981}
1982
1983/**
1984 * Return peer port name
1985 */
1986wwn_t
1987bfa_lps_get_peer_pwwn(struct bfa_lps_s *lps)
1988{
1989 return lps->pr_pwwn;
1990}
1991
1992/**
1993 * Return peer node name
1994 */
1995wwn_t
1996bfa_lps_get_peer_nwwn(struct bfa_lps_s *lps)
1997{
1998 return lps->pr_nwwn;
1999}
2000
2001/**
2002 * return reason code if login request is rejected
2003 */
2004u8
2005bfa_lps_get_lsrjt_rsn(struct bfa_lps_s *lps)
2006{
2007 return lps->lsrjt_rsn;
2008}
2009
2010/**
2011 * return explanation code if login request is rejected
2012 */
2013u8
2014bfa_lps_get_lsrjt_expl(struct bfa_lps_s *lps)
2015{
2016 return lps->lsrjt_expl;
2017}
2018
2019/**
2020 * Return fpma/spma MAC for lport
2021 */
2022mac_t
2023bfa_lps_get_lp_mac(struct bfa_lps_s *lps)
2024{
2025 return lps->lp_mac;
2026}
2027
2028/**
2029 * LPS firmware message class handler.
2030 */
2031void
2032bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
2033{
2034 union bfi_lps_i2h_msg_u msg;
2035
2036 bfa_trc(bfa, m->mhdr.msg_id);
2037 msg.msg = m;
2038
2039 switch (m->mhdr.msg_id) {
2040 case BFI_LPS_H2I_LOGIN_RSP:
2041 bfa_lps_login_rsp(bfa, msg.login_rsp);
2042 break;
2043
2044 case BFI_LPS_H2I_LOGOUT_RSP:
2045 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
2046 break;
2047
2048 case BFI_LPS_H2I_CVL_EVENT:
2049 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2050 break;
2051
2052 default:
2053 bfa_trc(bfa, m->mhdr.msg_id);
2054 bfa_assert(0);
2055 }
2056}
2057
2058/**
2059 * FC PORT state machine functions
2060 */
2061static void
2062bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2063 enum bfa_fcport_sm_event event)
2064{
2065 bfa_trc(fcport->bfa, event);
2066
2067 switch (event) {
2068 case BFA_FCPORT_SM_START:
2069 /**
2070 * Start event after IOC is configured and BFA is started.
2071 */
2072 if (bfa_fcport_send_enable(fcport)) {
2073 bfa_trc(fcport->bfa, BFA_TRUE);
2074 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2075 } else {
2076 bfa_trc(fcport->bfa, BFA_FALSE);
2077 bfa_sm_set_state(fcport,
2078 bfa_fcport_sm_enabling_qwait);
2079 }
2080 break;
2081
2082 case BFA_FCPORT_SM_ENABLE:
2083 /**
2084 * Port is persistently configured to be in enabled state. Do
2085 * not change state. Port enabling is done when START event is
2086 * received.
2087 */
2088 break;
2089
2090 case BFA_FCPORT_SM_DISABLE:
2091 /**
2092 * If a port is persistently configured to be disabled, the
2093 * first event will a port disable request.
2094 */
2095 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2096 break;
2097
2098 case BFA_FCPORT_SM_HWFAIL:
2099 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2100 break;
2101
2102 default:
2103 bfa_sm_fault(fcport->bfa, event);
2104 }
2105}
2106
2107static void
2108bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2109 enum bfa_fcport_sm_event event)
2110{
2111 char pwwn_buf[BFA_STRING_32];
2112 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2113 bfa_trc(fcport->bfa, event);
2114
2115 switch (event) {
2116 case BFA_FCPORT_SM_QRESUME:
2117 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2118 bfa_fcport_send_enable(fcport);
2119 break;
2120
2121 case BFA_FCPORT_SM_STOP:
2122 bfa_reqq_wcancel(&fcport->reqq_wait);
2123 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2124 break;
2125
2126 case BFA_FCPORT_SM_ENABLE:
2127 /**
2128 * Already enable is in progress.
2129 */
2130 break;
2131
2132 case BFA_FCPORT_SM_DISABLE:
2133 /**
2134 * Just send disable request to firmware when room becomes
2135 * available in request queue.
2136 */
2137 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2138 bfa_reqq_wcancel(&fcport->reqq_wait);
2139 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2140 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2141 wwn2str(pwwn_buf, fcport->pwwn);
2142 BFA_LOG(KERN_INFO, bfad, log_level,
2143 "Base port disabled: WWN = %s\n", pwwn_buf);
2144 break;
2145
2146 case BFA_FCPORT_SM_LINKUP:
2147 case BFA_FCPORT_SM_LINKDOWN:
2148 /**
2149 * Possible to get link events when doing back-to-back
2150 * enable/disables.
2151 */
2152 break;
2153
2154 case BFA_FCPORT_SM_HWFAIL:
2155 bfa_reqq_wcancel(&fcport->reqq_wait);
2156 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2157 break;
2158
2159 default:
2160 bfa_sm_fault(fcport->bfa, event);
2161 }
2162}
2163
2164static void
2165bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2166 enum bfa_fcport_sm_event event)
2167{
2168 char pwwn_buf[BFA_STRING_32];
2169 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2170 bfa_trc(fcport->bfa, event);
2171
2172 switch (event) {
2173 case BFA_FCPORT_SM_FWRSP:
2174 case BFA_FCPORT_SM_LINKDOWN:
2175 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2176 break;
2177
2178 case BFA_FCPORT_SM_LINKUP:
2179 bfa_fcport_update_linkinfo(fcport);
2180 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2181
2182 bfa_assert(fcport->event_cbfn);
2183 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2184 break;
2185
2186 case BFA_FCPORT_SM_ENABLE:
2187 /**
2188 * Already being enabled.
2189 */
2190 break;
2191
2192 case BFA_FCPORT_SM_DISABLE:
2193 if (bfa_fcport_send_disable(fcport))
2194 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2195 else
2196 bfa_sm_set_state(fcport,
2197 bfa_fcport_sm_disabling_qwait);
2198
2199 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2200 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2201 wwn2str(pwwn_buf, fcport->pwwn);
2202 BFA_LOG(KERN_INFO, bfad, log_level,
2203 "Base port disabled: WWN = %s\n", pwwn_buf);
2204 break;
2205
2206 case BFA_FCPORT_SM_STOP:
2207 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2208 break;
2209
2210 case BFA_FCPORT_SM_HWFAIL:
2211 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2212 break;
2213
2214 default:
2215 bfa_sm_fault(fcport->bfa, event);
2216 }
2217}
2218
2219static void
2220bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2221 enum bfa_fcport_sm_event event)
2222{
2223 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2224 char pwwn_buf[BFA_STRING_32];
2225 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2226
2227 bfa_trc(fcport->bfa, event);
2228
2229 switch (event) {
2230 case BFA_FCPORT_SM_LINKUP:
2231 bfa_fcport_update_linkinfo(fcport);
2232 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2233 bfa_assert(fcport->event_cbfn);
2234 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2235 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2236 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2237
2238 bfa_trc(fcport->bfa,
2239 pevent->link_state.vc_fcf.fcf.fipenabled);
2240 bfa_trc(fcport->bfa,
2241 pevent->link_state.vc_fcf.fcf.fipfailed);
2242
2243 if (pevent->link_state.vc_fcf.fcf.fipfailed)
2244 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2245 BFA_PL_EID_FIP_FCF_DISC, 0,
2246 "FIP FCF Discovery Failed");
2247 else
2248 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2249 BFA_PL_EID_FIP_FCF_DISC, 0,
2250 "FIP FCF Discovered");
2251 }
2252
2253 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2254 wwn2str(pwwn_buf, fcport->pwwn);
2255 BFA_LOG(KERN_INFO, bfad, log_level,
2256 "Base port online: WWN = %s\n", pwwn_buf);
2257 break;
2258
2259 case BFA_FCPORT_SM_LINKDOWN:
2260 /**
2261 * Possible to get link down event.
2262 */
2263 break;
2264
2265 case BFA_FCPORT_SM_ENABLE:
2266 /**
2267 * Already enabled.
2268 */
2269 break;
2270
2271 case BFA_FCPORT_SM_DISABLE:
2272 if (bfa_fcport_send_disable(fcport))
2273 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2274 else
2275 bfa_sm_set_state(fcport,
2276 bfa_fcport_sm_disabling_qwait);
2277
2278 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2279 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2280 wwn2str(pwwn_buf, fcport->pwwn);
2281 BFA_LOG(KERN_INFO, bfad, log_level,
2282 "Base port disabled: WWN = %s\n", pwwn_buf);
2283 break;
2284
2285 case BFA_FCPORT_SM_STOP:
2286 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2287 break;
2288
2289 case BFA_FCPORT_SM_HWFAIL:
2290 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2291 break;
2292
2293 default:
2294 bfa_sm_fault(fcport->bfa, event);
2295 }
2296}
2297
2298static void
2299bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2300 enum bfa_fcport_sm_event event)
2301{
2302 char pwwn_buf[BFA_STRING_32];
2303 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2304
2305 bfa_trc(fcport->bfa, event);
2306
2307 switch (event) {
2308 case BFA_FCPORT_SM_ENABLE:
2309 /**
2310 * Already enabled.
2311 */
2312 break;
2313
2314 case BFA_FCPORT_SM_DISABLE:
2315 if (bfa_fcport_send_disable(fcport))
2316 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2317 else
2318 bfa_sm_set_state(fcport,
2319 bfa_fcport_sm_disabling_qwait);
2320
2321 bfa_fcport_reset_linkinfo(fcport);
2322 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2323 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2324 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2325 wwn2str(pwwn_buf, fcport->pwwn);
2326 BFA_LOG(KERN_INFO, bfad, log_level,
2327 "Base port offline: WWN = %s\n", pwwn_buf);
2328 BFA_LOG(KERN_INFO, bfad, log_level,
2329 "Base port disabled: WWN = %s\n", pwwn_buf);
2330 break;
2331
2332 case BFA_FCPORT_SM_LINKDOWN:
2333 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2334 bfa_fcport_reset_linkinfo(fcport);
2335 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2336 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2337 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2338 wwn2str(pwwn_buf, fcport->pwwn);
2339 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2340 BFA_LOG(KERN_INFO, bfad, log_level,
2341 "Base port offline: WWN = %s\n", pwwn_buf);
2342 else
2343 BFA_LOG(KERN_ERR, bfad, log_level,
2344 "Base port (WWN = %s) "
2345 "lost fabric connectivity\n", pwwn_buf);
2346 break;
2347
2348 case BFA_FCPORT_SM_STOP:
2349 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2350 bfa_fcport_reset_linkinfo(fcport);
2351 wwn2str(pwwn_buf, fcport->pwwn);
2352 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2353 BFA_LOG(KERN_INFO, bfad, log_level,
2354 "Base port offline: WWN = %s\n", pwwn_buf);
2355 else
2356 BFA_LOG(KERN_ERR, bfad, log_level,
2357 "Base port (WWN = %s) "
2358 "lost fabric connectivity\n", pwwn_buf);
2359 break;
2360
2361 case BFA_FCPORT_SM_HWFAIL:
2362 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2363 bfa_fcport_reset_linkinfo(fcport);
2364 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2365 wwn2str(pwwn_buf, fcport->pwwn);
2366 if (BFA_PORT_IS_DISABLED(fcport->bfa))
2367 BFA_LOG(KERN_INFO, bfad, log_level,
2368 "Base port offline: WWN = %s\n", pwwn_buf);
2369 else
2370 BFA_LOG(KERN_ERR, bfad, log_level,
2371 "Base port (WWN = %s) "
2372 "lost fabric connectivity\n", pwwn_buf);
2373 break;
2374
2375 default:
2376 bfa_sm_fault(fcport->bfa, event);
2377 }
2378}
2379
2380static void
2381bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2382 enum bfa_fcport_sm_event event)
2383{
2384 bfa_trc(fcport->bfa, event);
2385
2386 switch (event) {
2387 case BFA_FCPORT_SM_QRESUME:
2388 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2389 bfa_fcport_send_disable(fcport);
2390 break;
2391
2392 case BFA_FCPORT_SM_STOP:
2393 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2394 bfa_reqq_wcancel(&fcport->reqq_wait);
2395 break;
2396
2397 case BFA_FCPORT_SM_ENABLE:
2398 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2399 break;
2400
2401 case BFA_FCPORT_SM_DISABLE:
2402 /**
2403 * Already being disabled.
2404 */
2405 break;
2406
2407 case BFA_FCPORT_SM_LINKUP:
2408 case BFA_FCPORT_SM_LINKDOWN:
2409 /**
2410 * Possible to get link events when doing back-to-back
2411 * enable/disables.
2412 */
2413 break;
2414
2415 case BFA_FCPORT_SM_HWFAIL:
2416 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2417 bfa_reqq_wcancel(&fcport->reqq_wait);
2418 break;
2419
2420 default:
2421 bfa_sm_fault(fcport->bfa, event);
2422 }
2423}
2424
2425static void
2426bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2427 enum bfa_fcport_sm_event event)
2428{
2429 bfa_trc(fcport->bfa, event);
2430
2431 switch (event) {
2432 case BFA_FCPORT_SM_QRESUME:
2433 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2434 bfa_fcport_send_disable(fcport);
2435 if (bfa_fcport_send_enable(fcport))
2436 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2437 else
2438 bfa_sm_set_state(fcport,
2439 bfa_fcport_sm_enabling_qwait);
2440 break;
2441
2442 case BFA_FCPORT_SM_STOP:
2443 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2444 bfa_reqq_wcancel(&fcport->reqq_wait);
2445 break;
2446
2447 case BFA_FCPORT_SM_ENABLE:
2448 break;
2449
2450 case BFA_FCPORT_SM_DISABLE:
2451 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2452 break;
2453
2454 case BFA_FCPORT_SM_LINKUP:
2455 case BFA_FCPORT_SM_LINKDOWN:
2456 /**
2457 * Possible to get link events when doing back-to-back
2458 * enable/disables.
2459 */
2460 break;
2461
2462 case BFA_FCPORT_SM_HWFAIL:
2463 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2464 bfa_reqq_wcancel(&fcport->reqq_wait);
2465 break;
2466
2467 default:
2468 bfa_sm_fault(fcport->bfa, event);
2469 }
2470}
2471
2472static void
2473bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2474 enum bfa_fcport_sm_event event)
2475{
2476 char pwwn_buf[BFA_STRING_32];
2477 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2478 bfa_trc(fcport->bfa, event);
2479
2480 switch (event) {
2481 case BFA_FCPORT_SM_FWRSP:
2482 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2483 break;
2484
2485 case BFA_FCPORT_SM_DISABLE:
2486 /**
2487 * Already being disabled.
2488 */
2489 break;
2490
2491 case BFA_FCPORT_SM_ENABLE:
2492 if (bfa_fcport_send_enable(fcport))
2493 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2494 else
2495 bfa_sm_set_state(fcport,
2496 bfa_fcport_sm_enabling_qwait);
2497
2498 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2499 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2500 wwn2str(pwwn_buf, fcport->pwwn);
2501 BFA_LOG(KERN_INFO, bfad, log_level,
2502 "Base port enabled: WWN = %s\n", pwwn_buf);
2503 break;
2504
2505 case BFA_FCPORT_SM_STOP:
2506 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2507 break;
2508
2509 case BFA_FCPORT_SM_LINKUP:
2510 case BFA_FCPORT_SM_LINKDOWN:
2511 /**
2512 * Possible to get link events when doing back-to-back
2513 * enable/disables.
2514 */
2515 break;
2516
2517 case BFA_FCPORT_SM_HWFAIL:
2518 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2519 break;
2520
2521 default:
2522 bfa_sm_fault(fcport->bfa, event);
2523 }
2524}
2525
2526static void
2527bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2528 enum bfa_fcport_sm_event event)
2529{
2530 char pwwn_buf[BFA_STRING_32];
2531 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2532 bfa_trc(fcport->bfa, event);
2533
2534 switch (event) {
2535 case BFA_FCPORT_SM_START:
2536 /**
2537 * Ignore start event for a port that is disabled.
2538 */
2539 break;
2540
2541 case BFA_FCPORT_SM_STOP:
2542 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2543 break;
2544
2545 case BFA_FCPORT_SM_ENABLE:
2546 if (bfa_fcport_send_enable(fcport))
2547 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2548 else
2549 bfa_sm_set_state(fcport,
2550 bfa_fcport_sm_enabling_qwait);
2551
2552 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2553 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2554 wwn2str(pwwn_buf, fcport->pwwn);
2555 BFA_LOG(KERN_INFO, bfad, log_level,
2556 "Base port enabled: WWN = %s\n", pwwn_buf);
2557 break;
2558
2559 case BFA_FCPORT_SM_DISABLE:
2560 /**
2561 * Already disabled.
2562 */
2563 break;
2564
2565 case BFA_FCPORT_SM_HWFAIL:
2566 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2567 break;
2568
2569 default:
2570 bfa_sm_fault(fcport->bfa, event);
2571 }
2572}
2573
2574static void
2575bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2576 enum bfa_fcport_sm_event event)
2577{
2578 bfa_trc(fcport->bfa, event);
2579
2580 switch (event) {
2581 case BFA_FCPORT_SM_START:
2582 if (bfa_fcport_send_enable(fcport))
2583 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2584 else
2585 bfa_sm_set_state(fcport,
2586 bfa_fcport_sm_enabling_qwait);
2587 break;
2588
2589 default:
2590 /**
2591 * Ignore all other events.
2592 */
2593 ;
2594 }
2595}
2596
2597/**
2598 * Port is enabled. IOC is down/failed.
2599 */
2600static void
2601bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2602 enum bfa_fcport_sm_event event)
2603{
2604 bfa_trc(fcport->bfa, event);
2605
2606 switch (event) {
2607 case BFA_FCPORT_SM_START:
2608 if (bfa_fcport_send_enable(fcport))
2609 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2610 else
2611 bfa_sm_set_state(fcport,
2612 bfa_fcport_sm_enabling_qwait);
2613 break;
2614
2615 default:
2616 /**
2617 * Ignore all events.
2618 */
2619 ;
2620 }
2621}
2622
2623/**
2624 * Port is disabled. IOC is down/failed.
2625 */
2626static void
2627bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2628 enum bfa_fcport_sm_event event)
2629{
2630 bfa_trc(fcport->bfa, event);
2631
2632 switch (event) {
2633 case BFA_FCPORT_SM_START:
2634 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2635 break;
2636
2637 case BFA_FCPORT_SM_ENABLE:
2638 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2639 break;
2640
2641 default:
2642 /**
2643 * Ignore all events.
2644 */
2645 ;
2646 }
2647}
2648
2649/**
2650 * Link state is down
2651 */
2652static void
2653bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2654 enum bfa_fcport_ln_sm_event event)
2655{
2656 bfa_trc(ln->fcport->bfa, event);
2657
2658 switch (event) {
2659 case BFA_FCPORT_LN_SM_LINKUP:
2660 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2661 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2662 break;
2663
2664 default:
2665 bfa_sm_fault(ln->fcport->bfa, event);
2666 }
2667}
2668
2669/**
2670 * Link state is waiting for down notification
2671 */
2672static void
2673bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2674 enum bfa_fcport_ln_sm_event event)
2675{
2676 bfa_trc(ln->fcport->bfa, event);
2677
2678 switch (event) {
2679 case BFA_FCPORT_LN_SM_LINKUP:
2680 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2681 break;
2682
2683 case BFA_FCPORT_LN_SM_NOTIFICATION:
2684 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2685 break;
2686
2687 default:
2688 bfa_sm_fault(ln->fcport->bfa, event);
2689 }
2690}
2691
2692/**
2693 * Link state is waiting for down notification and there is a pending up
2694 */
2695static void
2696bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2697 enum bfa_fcport_ln_sm_event event)
2698{
2699 bfa_trc(ln->fcport->bfa, event);
2700
2701 switch (event) {
2702 case BFA_FCPORT_LN_SM_LINKDOWN:
2703 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2704 break;
2705
2706 case BFA_FCPORT_LN_SM_NOTIFICATION:
2707 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2708 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2709 break;
2710
2711 default:
2712 bfa_sm_fault(ln->fcport->bfa, event);
2713 }
2714}
2715
2716/**
2717 * Link state is up
2718 */
2719static void
2720bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2721 enum bfa_fcport_ln_sm_event event)
2722{
2723 bfa_trc(ln->fcport->bfa, event);
2724
2725 switch (event) {
2726 case BFA_FCPORT_LN_SM_LINKDOWN:
2727 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2728 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2729 break;
2730
2731 default:
2732 bfa_sm_fault(ln->fcport->bfa, event);
2733 }
2734}
2735
2736/**
2737 * Link state is waiting for up notification
2738 */
2739static void
2740bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2741 enum bfa_fcport_ln_sm_event event)
2742{
2743 bfa_trc(ln->fcport->bfa, event);
2744
2745 switch (event) {
2746 case BFA_FCPORT_LN_SM_LINKDOWN:
2747 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2748 break;
2749
2750 case BFA_FCPORT_LN_SM_NOTIFICATION:
2751 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2752 break;
2753
2754 default:
2755 bfa_sm_fault(ln->fcport->bfa, event);
2756 }
2757}
2758
2759/**
2760 * Link state is waiting for up notification and there is a pending down
2761 */
2762static void
2763bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2764 enum bfa_fcport_ln_sm_event event)
2765{
2766 bfa_trc(ln->fcport->bfa, event);
2767
2768 switch (event) {
2769 case BFA_FCPORT_LN_SM_LINKUP:
2770 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2771 break;
2772
2773 case BFA_FCPORT_LN_SM_NOTIFICATION:
2774 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2775 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2776 break;
2777
2778 default:
2779 bfa_sm_fault(ln->fcport->bfa, event);
2780 }
2781}
2782
2783/**
2784 * Link state is waiting for up notification and there are pending down and up
2785 */
2786static void
2787bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2788 enum bfa_fcport_ln_sm_event event)
2789{
2790 bfa_trc(ln->fcport->bfa, event);
2791
2792 switch (event) {
2793 case BFA_FCPORT_LN_SM_LINKDOWN:
2794 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2795 break;
2796
2797 case BFA_FCPORT_LN_SM_NOTIFICATION:
2798 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2799 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2800 break;
2801
2802 default:
2803 bfa_sm_fault(ln->fcport->bfa, event);
2804 }
2805}
2806
2807
2808
2809/**
2810 * hal_port_private
2811 */
2812
2813static void
2814__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2815{
2816 struct bfa_fcport_ln_s *ln = cbarg;
2817
2818 if (complete)
2819 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2820 else
2821 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2822}
2823
2824/**
2825 * Send SCN notification to upper layers.
2826 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2827 */
2828static void
2829bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2830 bfa_boolean_t trunk)
2831{
2832 if (fcport->cfg.trunked && !trunk)
2833 return;
2834
2835 switch (event) {
2836 case BFA_PORT_LINKUP:
2837 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2838 break;
2839 case BFA_PORT_LINKDOWN:
2840 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2841 break;
2842 default:
2843 bfa_assert(0);
2844 }
2845}
2846
2847static void
2848bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2849{
2850 struct bfa_fcport_s *fcport = ln->fcport;
2851
2852 if (fcport->bfa->fcs) {
2853 fcport->event_cbfn(fcport->event_cbarg, event);
2854 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2855 } else {
2856 ln->ln_event = event;
2857 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2858 __bfa_cb_fcport_event, ln);
2859 }
2860}
2861
2862#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2863 BFA_CACHELINE_SZ))
2864
2865static void
2866bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
2867 u32 *dm_len)
2868{
2869 *dm_len += FCPORT_STATS_DMA_SZ;
2870}
2871
2872static void
2873bfa_fcport_qresume(void *cbarg)
2874{
2875 struct bfa_fcport_s *fcport = cbarg;
2876
2877 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2878}
2879
2880static void
2881bfa_fcport_mem_claim(struct bfa_fcport_s *fcport, struct bfa_meminfo_s *meminfo)
2882{
2883 u8 *dm_kva;
2884 u64 dm_pa;
2885
2886 dm_kva = bfa_meminfo_dma_virt(meminfo);
2887 dm_pa = bfa_meminfo_dma_phys(meminfo);
2888
2889 fcport->stats_kva = dm_kva;
2890 fcport->stats_pa = dm_pa;
2891 fcport->stats = (union bfa_fcport_stats_u *) dm_kva;
2892
2893 dm_kva += FCPORT_STATS_DMA_SZ;
2894 dm_pa += FCPORT_STATS_DMA_SZ;
2895
2896 bfa_meminfo_dma_virt(meminfo) = dm_kva;
2897 bfa_meminfo_dma_phys(meminfo) = dm_pa;
2898}
2899
2900/**
2901 * Memory initialization.
2902 */
2903static void
2904bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
2905 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
2906{
2907 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2908 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2909 struct bfa_fcport_ln_s *ln = &fcport->ln;
2910 struct bfa_timeval_s tv;
2911
2912 bfa_os_memset(fcport, 0, sizeof(struct bfa_fcport_s));
2913 fcport->bfa = bfa;
2914 ln->fcport = fcport;
2915
2916 bfa_fcport_mem_claim(fcport, meminfo);
2917
2918 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2919 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2920
2921 /**
2922 * initialize time stamp for stats reset
2923 */
2924 bfa_os_gettimeofday(&tv);
2925 fcport->stats_reset_time = tv.tv_sec;
2926
2927 /**
2928 * initialize and set default configuration
2929 */
2930 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2931 port_cfg->speed = BFA_PORT_SPEED_AUTO;
2932 port_cfg->trunked = BFA_FALSE;
2933 port_cfg->maxfrsize = 0;
2934
2935 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2936
2937 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2938}
2939
2940static void
2941bfa_fcport_detach(struct bfa_s *bfa)
2942{
2943}
2944
2945/**
2946 * Called when IOC is ready.
2947 */
2948static void
2949bfa_fcport_start(struct bfa_s *bfa)
2950{
2951 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2952}
2953
2954/**
2955 * Called before IOC is stopped.
2956 */
2957static void
2958bfa_fcport_stop(struct bfa_s *bfa)
2959{
2960 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2961 bfa_trunk_iocdisable(bfa);
2962}
2963
2964/**
2965 * Called when IOC failure is detected.
2966 */
2967static void
2968bfa_fcport_iocdisable(struct bfa_s *bfa)
2969{
2970 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2971
2972 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2973 bfa_trunk_iocdisable(bfa);
2974}
2975
2976static void
2977bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2978{
2979 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2980 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2981
2982 fcport->speed = pevent->link_state.speed;
2983 fcport->topology = pevent->link_state.topology;
2984
2985 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2986 fcport->myalpa = 0;
2987
2988 /* QoS Details */
2989 bfa_os_assign(fcport->qos_attr, pevent->link_state.qos_attr);
2990 bfa_os_assign(fcport->qos_vc_attr,
2991 pevent->link_state.vc_fcf.qos_vc_attr);
2992
2993 /**
2994 * update trunk state if applicable
2995 */
2996 if (!fcport->cfg.trunked)
2997 trunk->attr.state = BFA_TRUNK_DISABLED;
2998
2999 /* update FCoE specific */
3000 fcport->fcoe_vlan = bfa_os_ntohs(pevent->link_state.vc_fcf.fcf.vlan);
3001
3002 bfa_trc(fcport->bfa, fcport->speed);
3003 bfa_trc(fcport->bfa, fcport->topology);
3004}
3005
3006static void
3007bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
3008{
3009 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
3010 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
3011}
3012
3013/**
3014 * Send port enable message to firmware.
3015 */
3016static bfa_boolean_t
3017bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
3018{
3019 struct bfi_fcport_enable_req_s *m;
3020
3021 /**
3022 * Increment message tag before queue check, so that responses to old
3023 * requests are discarded.
3024 */
3025 fcport->msgtag++;
3026
3027 /**
3028 * check for room in queue to send request now
3029 */
3030 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3031 if (!m) {
3032 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3033 &fcport->reqq_wait);
3034 return BFA_FALSE;
3035 }
3036
3037 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
3038 bfa_lpuid(fcport->bfa));
3039 m->nwwn = fcport->nwwn;
3040 m->pwwn = fcport->pwwn;
3041 m->port_cfg = fcport->cfg;
3042 m->msgtag = fcport->msgtag;
3043 m->port_cfg.maxfrsize = bfa_os_htons(fcport->cfg.maxfrsize);
3044 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
3045 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
3046 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
3047
3048 /**
3049 * queue I/O message to firmware
3050 */
3051 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3052 return BFA_TRUE;
3053}
3054
3055/**
3056 * Send port disable message to firmware.
3057 */
3058static bfa_boolean_t
3059bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3060{
3061 struct bfi_fcport_req_s *m;
3062
3063 /**
3064 * Increment message tag before queue check, so that responses to old
3065 * requests are discarded.
3066 */
3067 fcport->msgtag++;
3068
3069 /**
3070 * check for room in queue to send request now
3071 */
3072 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3073 if (!m) {
3074 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3075 &fcport->reqq_wait);
3076 return BFA_FALSE;
3077 }
3078
3079 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
3080 bfa_lpuid(fcport->bfa));
3081 m->msgtag = fcport->msgtag;
3082
3083 /**
3084 * queue I/O message to firmware
3085 */
3086 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3087
3088 return BFA_TRUE;
3089}
3090
3091static void
3092bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3093{
3094 fcport->pwwn = bfa_ioc_get_pwwn(&fcport->bfa->ioc);
3095 fcport->nwwn = bfa_ioc_get_nwwn(&fcport->bfa->ioc);
3096
3097 bfa_trc(fcport->bfa, fcport->pwwn);
3098 bfa_trc(fcport->bfa, fcport->nwwn);
3099}
3100
3101static void
3102bfa_fcport_send_txcredit(void *port_cbarg)
3103{
3104
3105 struct bfa_fcport_s *fcport = port_cbarg;
3106 struct bfi_fcport_set_svc_params_req_s *m;
3107
3108 /**
3109 * check for room in queue to send request now
3110 */
3111 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3112 if (!m) {
3113 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
3114 return;
3115 }
3116
3117 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
3118 bfa_lpuid(fcport->bfa));
3119 m->tx_bbcredit = bfa_os_htons((u16)fcport->cfg.tx_bbcredit);
3120
3121 /**
3122 * queue I/O message to firmware
3123 */
3124 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3125}
3126
3127static void
3128bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3129 struct bfa_qos_stats_s *s)
3130{
3131 u32 *dip = (u32 *) d;
3132 u32 *sip = (u32 *) s;
3133 int i;
3134
3135 /* Now swap the 32 bit fields */
3136 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
3137 dip[i] = bfa_os_ntohl(sip[i]);
3138}
3139
3140static void
3141bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3142 struct bfa_fcoe_stats_s *s)
3143{
3144 u32 *dip = (u32 *) d;
3145 u32 *sip = (u32 *) s;
3146 int i;
3147
3148 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3149 i = i + 2) {
3150#ifdef __BIGENDIAN
3151 dip[i] = bfa_os_ntohl(sip[i]);
3152 dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
3153#else
3154 dip[i] = bfa_os_ntohl(sip[i + 1]);
3155 dip[i + 1] = bfa_os_ntohl(sip[i]);
3156#endif
3157 }
3158}
3159
3160static void
3161__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3162{
3163 struct bfa_fcport_s *fcport = cbarg;
3164
3165 if (complete) {
3166 if (fcport->stats_status == BFA_STATUS_OK) {
3167 struct bfa_timeval_s tv;
3168
3169 /* Swap FC QoS or FCoE stats */
3170 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
3171 bfa_fcport_qos_stats_swap(
3172 &fcport->stats_ret->fcqos,
3173 &fcport->stats->fcqos);
3174 } else {
3175 bfa_fcport_fcoe_stats_swap(
3176 &fcport->stats_ret->fcoe,
3177 &fcport->stats->fcoe);
3178
3179 bfa_os_gettimeofday(&tv);
3180 fcport->stats_ret->fcoe.secs_reset =
3181 tv.tv_sec - fcport->stats_reset_time;
3182 }
3183 }
3184 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3185 } else {
3186 fcport->stats_busy = BFA_FALSE;
3187 fcport->stats_status = BFA_STATUS_OK;
3188 }
3189}
3190
3191static void
3192bfa_fcport_stats_get_timeout(void *cbarg)
3193{
3194 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3195
3196 bfa_trc(fcport->bfa, fcport->stats_qfull);
3197
3198 if (fcport->stats_qfull) {
3199 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3200 fcport->stats_qfull = BFA_FALSE;
3201 }
3202
3203 fcport->stats_status = BFA_STATUS_ETIMER;
3204 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3205 fcport);
3206}
3207
3208static void
3209bfa_fcport_send_stats_get(void *cbarg)
3210{
3211 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3212 struct bfi_fcport_req_s *msg;
3213
3214 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3215
3216 if (!msg) {
3217 fcport->stats_qfull = BFA_TRUE;
3218 bfa_reqq_winit(&fcport->stats_reqq_wait,
3219 bfa_fcport_send_stats_get, fcport);
3220 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3221 &fcport->stats_reqq_wait);
3222 return;
3223 }
3224 fcport->stats_qfull = BFA_FALSE;
3225
3226 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3227 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
3228 bfa_lpuid(fcport->bfa));
3229 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3230}
3231
3232static void
3233__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3234{
3235 struct bfa_fcport_s *fcport = cbarg;
3236
3237 if (complete) {
3238 struct bfa_timeval_s tv;
3239
3240 /**
3241 * re-initialize time stamp for stats reset
3242 */
3243 bfa_os_gettimeofday(&tv);
3244 fcport->stats_reset_time = tv.tv_sec;
3245
3246 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3247 } else {
3248 fcport->stats_busy = BFA_FALSE;
3249 fcport->stats_status = BFA_STATUS_OK;
3250 }
3251}
3252
3253static void
3254bfa_fcport_stats_clr_timeout(void *cbarg)
3255{
3256 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3257
3258 bfa_trc(fcport->bfa, fcport->stats_qfull);
3259
3260 if (fcport->stats_qfull) {
3261 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3262 fcport->stats_qfull = BFA_FALSE;
3263 }
3264
3265 fcport->stats_status = BFA_STATUS_ETIMER;
3266 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3267 __bfa_cb_fcport_stats_clr, fcport);
3268}
3269
3270static void
3271bfa_fcport_send_stats_clear(void *cbarg)
3272{
3273 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3274 struct bfi_fcport_req_s *msg;
3275
3276 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3277
3278 if (!msg) {
3279 fcport->stats_qfull = BFA_TRUE;
3280 bfa_reqq_winit(&fcport->stats_reqq_wait,
3281 bfa_fcport_send_stats_clear, fcport);
3282 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3283 &fcport->stats_reqq_wait);
3284 return;
3285 }
3286 fcport->stats_qfull = BFA_FALSE;
3287
3288 bfa_os_memset(msg, 0, sizeof(struct bfi_fcport_req_s));
3289 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
3290 bfa_lpuid(fcport->bfa));
3291 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT);
3292}
3293
3294/**
3295 * Handle trunk SCN event from firmware.
3296 */
3297static void
3298bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3299{
3300 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3301 struct bfi_fcport_trunk_link_s *tlink;
3302 struct bfa_trunk_link_attr_s *lattr;
3303 enum bfa_trunk_state state_prev;
3304 int i;
3305 int link_bm = 0;
3306
3307 bfa_trc(fcport->bfa, fcport->cfg.trunked);
3308 bfa_assert(scn->trunk_state == BFA_TRUNK_ONLINE ||
3309 scn->trunk_state == BFA_TRUNK_OFFLINE);
3310
3311 bfa_trc(fcport->bfa, trunk->attr.state);
3312 bfa_trc(fcport->bfa, scn->trunk_state);
3313 bfa_trc(fcport->bfa, scn->trunk_speed);
3314
3315 /**
3316 * Save off new state for trunk attribute query
3317 */
3318 state_prev = trunk->attr.state;
3319 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3320 trunk->attr.state = scn->trunk_state;
3321 trunk->attr.speed = scn->trunk_speed;
3322 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3323 lattr = &trunk->attr.link_attr[i];
3324 tlink = &scn->tlink[i];
3325
3326 lattr->link_state = tlink->state;
3327 lattr->trunk_wwn = tlink->trunk_wwn;
3328 lattr->fctl = tlink->fctl;
3329 lattr->speed = tlink->speed;
3330 lattr->deskew = bfa_os_ntohl(tlink->deskew);
3331
3332 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3333 fcport->speed = tlink->speed;
3334 fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3335 link_bm |= 1 << i;
3336 }
3337
3338 bfa_trc(fcport->bfa, lattr->link_state);
3339 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3340 bfa_trc(fcport->bfa, lattr->fctl);
3341 bfa_trc(fcport->bfa, lattr->speed);
3342 bfa_trc(fcport->bfa, lattr->deskew);
3343 }
3344
3345 switch (link_bm) {
3346 case 3:
3347 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3348 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3349 break;
3350 case 2:
3351 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3352 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3353 break;
3354 case 1:
3355 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3356 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3357 break;
3358 default:
3359 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3360 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3361 }
3362
3363 /**
3364 * Notify upper layers if trunk state changed.
3365 */
3366 if ((state_prev != trunk->attr.state) ||
3367 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3368 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3369 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3370 }
3371}
3372
3373static void
3374bfa_trunk_iocdisable(struct bfa_s *bfa)
3375{
3376 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3377 int i = 0;
3378
3379 /**
3380 * In trunked mode, notify upper layers that link is down
3381 */
3382 if (fcport->cfg.trunked) {
3383 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3384 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3385
3386 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3387 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3388 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3389 fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3390 fcport->trunk.attr.link_attr[i].fctl =
3391 BFA_TRUNK_LINK_FCTL_NORMAL;
3392 fcport->trunk.attr.link_attr[i].link_state =
3393 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3394 fcport->trunk.attr.link_attr[i].speed =
3395 BFA_PORT_SPEED_UNKNOWN;
3396 fcport->trunk.attr.link_attr[i].deskew = 0;
3397 }
3398 }
3399}
3400
3401
3402
3403/**
3404 * hal_port_public
3405 */
3406
3407/**
3408 * Called to initialize port attributes
3409 */
3410void
3411bfa_fcport_init(struct bfa_s *bfa)
3412{
3413 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3414
3415 /**
3416 * Initialize port attributes from IOC hardware data.
3417 */
3418 bfa_fcport_set_wwns(fcport);
3419 if (fcport->cfg.maxfrsize == 0)
3420 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3421 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3422 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3423
3424 bfa_assert(fcport->cfg.maxfrsize);
3425 bfa_assert(fcport->cfg.rx_bbcredit);
3426 bfa_assert(fcport->speed_sup);
3427}
3428
3429/**
3430 * Firmware message handler.
3431 */
3432void
3433bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3434{
3435 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3436 union bfi_fcport_i2h_msg_u i2hmsg;
3437
3438 i2hmsg.msg = msg;
3439 fcport->event_arg.i2hmsg = i2hmsg;
3440
3441 bfa_trc(bfa, msg->mhdr.msg_id);
3442 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3443
3444 switch (msg->mhdr.msg_id) {
3445 case BFI_FCPORT_I2H_ENABLE_RSP:
3446 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3447 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3448 break;
3449
3450 case BFI_FCPORT_I2H_DISABLE_RSP:
3451 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3452 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3453 break;
3454
3455 case BFI_FCPORT_I2H_EVENT:
3456 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3457 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3458 else
3459 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3460 break;
3461
3462 case BFI_FCPORT_I2H_TRUNK_SCN:
3463 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3464 break;
3465
3466 case BFI_FCPORT_I2H_STATS_GET_RSP:
3467 /*
3468 * check for timer pop before processing the rsp
3469 */
3470 if (fcport->stats_busy == BFA_FALSE ||
3471 fcport->stats_status == BFA_STATUS_ETIMER)
3472 break;
3473
3474 bfa_timer_stop(&fcport->timer);
3475 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3476 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3477 __bfa_cb_fcport_stats_get, fcport);
3478 break;
3479
3480 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3481 /*
3482 * check for timer pop before processing the rsp
3483 */
3484 if (fcport->stats_busy == BFA_FALSE ||
3485 fcport->stats_status == BFA_STATUS_ETIMER)
3486 break;
3487
3488 bfa_timer_stop(&fcport->timer);
3489 fcport->stats_status = BFA_STATUS_OK;
3490 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3491 __bfa_cb_fcport_stats_clr, fcport);
3492 break;
3493
3494 case BFI_FCPORT_I2H_ENABLE_AEN:
3495 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3496 break;
3497
3498 case BFI_FCPORT_I2H_DISABLE_AEN:
3499 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3500 break;
3501
3502 default:
3503 bfa_assert(0);
3504 break;
3505 }
3506}
3507
3508
3509
3510/**
3511 * hal_port_api
3512 */
3513
3514/**
3515 * Registered callback for port events.
3516 */
3517void
3518bfa_fcport_event_register(struct bfa_s *bfa,
3519 void (*cbfn) (void *cbarg,
3520 enum bfa_port_linkstate event),
3521 void *cbarg)
3522{
3523 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3524
3525 fcport->event_cbfn = cbfn;
3526 fcport->event_cbarg = cbarg;
3527}
3528
3529bfa_status_t
3530bfa_fcport_enable(struct bfa_s *bfa)
3531{
3532 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3533
3534 if (bfa_ioc_is_disabled(&bfa->ioc))
3535 return BFA_STATUS_IOC_DISABLED;
3536
3537 if (fcport->diag_busy)
3538 return BFA_STATUS_DIAG_BUSY;
3539
3540 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3541 return BFA_STATUS_OK;
3542}
3543
3544bfa_status_t
3545bfa_fcport_disable(struct bfa_s *bfa)
3546{
3547
3548 if (bfa_ioc_is_disabled(&bfa->ioc))
3549 return BFA_STATUS_IOC_DISABLED;
3550
3551 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3552 return BFA_STATUS_OK;
3553}
3554
3555/**
3556 * Configure port speed.
3557 */
3558bfa_status_t
3559bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3560{
3561 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3562
3563 bfa_trc(bfa, speed);
3564
3565 if (fcport->cfg.trunked == BFA_TRUE)
3566 return BFA_STATUS_TRUNK_ENABLED;
3567 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3568 bfa_trc(bfa, fcport->speed_sup);
3569 return BFA_STATUS_UNSUPP_SPEED;
3570 }
3571
3572 fcport->cfg.speed = speed;
3573
3574 return BFA_STATUS_OK;
3575}
3576
3577/**
3578 * Get current speed.
3579 */
3580enum bfa_port_speed
3581bfa_fcport_get_speed(struct bfa_s *bfa)
3582{
3583 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3584
3585 return fcport->speed;
3586}
3587
3588/**
3589 * Configure port topology.
3590 */
3591bfa_status_t
3592bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3593{
3594 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3595
3596 bfa_trc(bfa, topology);
3597 bfa_trc(bfa, fcport->cfg.topology);
3598
3599 switch (topology) {
3600 case BFA_PORT_TOPOLOGY_P2P:
3601 case BFA_PORT_TOPOLOGY_LOOP:
3602 case BFA_PORT_TOPOLOGY_AUTO:
3603 break;
3604
3605 default:
3606 return BFA_STATUS_EINVAL;
3607 }
3608
3609 fcport->cfg.topology = topology;
3610 return BFA_STATUS_OK;
3611}
3612
3613/**
3614 * Get current topology.
3615 */
3616enum bfa_port_topology
3617bfa_fcport_get_topology(struct bfa_s *bfa)
3618{
3619 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3620
3621 return fcport->topology;
3622}
3623
3624bfa_status_t
3625bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3626{
3627 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3628
3629 bfa_trc(bfa, alpa);
3630 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3631 bfa_trc(bfa, fcport->cfg.hardalpa);
3632
3633 fcport->cfg.cfg_hardalpa = BFA_TRUE;
3634 fcport->cfg.hardalpa = alpa;
3635
3636 return BFA_STATUS_OK;
3637}
3638
3639bfa_status_t
3640bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3641{
3642 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3643
3644 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3645 bfa_trc(bfa, fcport->cfg.hardalpa);
3646
3647 fcport->cfg.cfg_hardalpa = BFA_FALSE;
3648 return BFA_STATUS_OK;
3649}
3650
3651bfa_boolean_t
3652bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3653{
3654 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3655
3656 *alpa = fcport->cfg.hardalpa;
3657 return fcport->cfg.cfg_hardalpa;
3658}
3659
3660u8
3661bfa_fcport_get_myalpa(struct bfa_s *bfa)
3662{
3663 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3664
3665 return fcport->myalpa;
3666}
3667
3668bfa_status_t
3669bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3670{
3671 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3672
3673 bfa_trc(bfa, maxfrsize);
3674 bfa_trc(bfa, fcport->cfg.maxfrsize);
3675
3676 /* with in range */
3677 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3678 return BFA_STATUS_INVLD_DFSZ;
3679
3680 /* power of 2, if not the max frame size of 2112 */
3681 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3682 return BFA_STATUS_INVLD_DFSZ;
3683
3684 fcport->cfg.maxfrsize = maxfrsize;
3685 return BFA_STATUS_OK;
3686}
3687
3688u16
3689bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3690{
3691 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3692
3693 return fcport->cfg.maxfrsize;
3694}
3695
3696u8
3697bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3698{
3699 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3700
3701 return fcport->cfg.rx_bbcredit;
3702}
3703
3704void
3705bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
3706{
3707 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3708
3709 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
3710 bfa_fcport_send_txcredit(fcport);
3711}
3712
3713/**
3714 * Get port attributes.
3715 */
3716
3717wwn_t
3718bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3719{
3720 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3721 if (node)
3722 return fcport->nwwn;
3723 else
3724 return fcport->pwwn;
3725}
3726
3727void
3728bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3729{
3730 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3731
3732 bfa_os_memset(attr, 0, sizeof(struct bfa_port_attr_s));
3733
3734 attr->nwwn = fcport->nwwn;
3735 attr->pwwn = fcport->pwwn;
3736
3737 attr->factorypwwn = bfa_ioc_get_mfg_pwwn(&bfa->ioc);
3738 attr->factorynwwn = bfa_ioc_get_mfg_nwwn(&bfa->ioc);
3739
3740 bfa_os_memcpy(&attr->pport_cfg, &fcport->cfg,
3741 sizeof(struct bfa_port_cfg_s));
3742 /* speed attributes */
3743 attr->pport_cfg.speed = fcport->cfg.speed;
3744 attr->speed_supported = fcport->speed_sup;
3745 attr->speed = fcport->speed;
3746 attr->cos_supported = FC_CLASS_3;
3747
3748 /* topology attributes */
3749 attr->pport_cfg.topology = fcport->cfg.topology;
3750 attr->topology = fcport->topology;
3751 attr->pport_cfg.trunked = fcport->cfg.trunked;
3752
3753 /* beacon attributes */
3754 attr->beacon = fcport->beacon;
3755 attr->link_e2e_beacon = fcport->link_e2e_beacon;
3756 attr->plog_enabled = bfa_plog_get_setting(fcport->bfa->plog);
3757 attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
3758
3759 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
3760 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
3761 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
3762 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3763 attr->port_state = BFA_PORT_ST_IOCDIS;
3764 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3765 attr->port_state = BFA_PORT_ST_FWMISMATCH;
3766
3767 /* FCoE vlan */
3768 attr->fcoe_vlan = fcport->fcoe_vlan;
3769}
3770
3771#define BFA_FCPORT_STATS_TOV 1000
3772
3773/**
3774 * Fetch port statistics (FCQoS or FCoE).
3775 */
3776bfa_status_t
3777bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3778 bfa_cb_port_t cbfn, void *cbarg)
3779{
3780 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3781
3782 if (fcport->stats_busy) {
3783 bfa_trc(bfa, fcport->stats_busy);
3784 return BFA_STATUS_DEVBUSY;
3785 }
3786
3787 fcport->stats_busy = BFA_TRUE;
3788 fcport->stats_ret = stats;
3789 fcport->stats_cbfn = cbfn;
3790 fcport->stats_cbarg = cbarg;
3791
3792 bfa_fcport_send_stats_get(fcport);
3793
3794 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3795 fcport, BFA_FCPORT_STATS_TOV);
3796 return BFA_STATUS_OK;
3797}
3798
3799/**
3800 * Reset port statistics (FCQoS or FCoE).
3801 */
3802bfa_status_t
3803bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3804{
3805 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3806
3807 if (fcport->stats_busy) {
3808 bfa_trc(bfa, fcport->stats_busy);
3809 return BFA_STATUS_DEVBUSY;
3810 }
3811
3812 fcport->stats_busy = BFA_TRUE;
3813 fcport->stats_cbfn = cbfn;
3814 fcport->stats_cbarg = cbarg;
3815
3816 bfa_fcport_send_stats_clear(fcport);
3817
3818 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3819 fcport, BFA_FCPORT_STATS_TOV);
3820 return BFA_STATUS_OK;
3821}
3822
3823/**
3824 * Fetch FCQoS port statistics
3825 */
3826bfa_status_t
3827bfa_fcport_get_qos_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3828 bfa_cb_port_t cbfn, void *cbarg)
3829{
3830 /* Meaningful only for FC mode */
3831 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
3832
3833 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
3834}
3835
3836/**
3837 * Reset FCoE port statistics
3838 */
3839bfa_status_t
3840bfa_fcport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3841{
3842 /* Meaningful only for FC mode */
3843 bfa_assert(bfa_ioc_get_fcmode(&bfa->ioc));
3844
3845 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
3846}
3847
3848/**
3849 * Fetch FCQoS port statistics
3850 */
3851bfa_status_t
3852bfa_fcport_get_fcoe_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3853 bfa_cb_port_t cbfn, void *cbarg)
3854{
3855 /* Meaningful only for FCoE mode */
3856 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
3857
3858 return bfa_fcport_get_stats(bfa, stats, cbfn, cbarg);
3859}
3860
3861/**
3862 * Reset FCoE port statistics
3863 */
3864bfa_status_t
3865bfa_fcport_clear_fcoe_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3866{
3867 /* Meaningful only for FCoE mode */
3868 bfa_assert(!bfa_ioc_get_fcmode(&bfa->ioc));
3869
3870 return bfa_fcport_clear_stats(bfa, cbfn, cbarg);
3871}
3872
3873void
3874bfa_fcport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
3875{
3876 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3877
3878 qos_attr->state = fcport->qos_attr.state;
3879 qos_attr->total_bb_cr = bfa_os_ntohl(fcport->qos_attr.total_bb_cr);
3880}
3881
3882void
3883bfa_fcport_qos_get_vc_attr(struct bfa_s *bfa,
3884 struct bfa_qos_vc_attr_s *qos_vc_attr)
3885{
3886 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3887 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
3888 u32 i = 0;
3889
3890 qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
3891 qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit);
3892 qos_vc_attr->elp_opmode_flags =
3893 bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
3894
3895 /* Individual VC info */
3896 while (i < qos_vc_attr->total_vc_count) {
3897 qos_vc_attr->vc_info[i].vc_credit =
3898 bfa_vc_attr->vc_info[i].vc_credit;
3899 qos_vc_attr->vc_info[i].borrow_credit =
3900 bfa_vc_attr->vc_info[i].borrow_credit;
3901 qos_vc_attr->vc_info[i].priority =
3902 bfa_vc_attr->vc_info[i].priority;
3903 ++i;
3904 }
3905}
3906
3907/**
3908 * Fetch port attributes.
3909 */
3910bfa_boolean_t
3911bfa_fcport_is_disabled(struct bfa_s *bfa)
3912{
3913 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3914
3915 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3916 BFA_PORT_ST_DISABLED;
3917
3918}
3919
3920bfa_boolean_t
3921bfa_fcport_is_ratelim(struct bfa_s *bfa)
3922{
3923 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3924
3925 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3926
3927}
3928
3929void
3930bfa_fcport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
3931{
3932 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3933 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
3934
3935 bfa_trc(bfa, on_off);
3936 bfa_trc(bfa, fcport->cfg.qos_enabled);
3937
3938 bfa_trc(bfa, ioc_type);
3939
3940 if (ioc_type == BFA_IOC_TYPE_FC) {
3941 fcport->cfg.qos_enabled = on_off;
3942 /**
3943 * Notify fcpim of the change in QoS state
3944 */
3945 bfa_fcpim_update_ioredirect(bfa);
3946 }
3947}
3948
3949void
3950bfa_fcport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
3951{
3952 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3953
3954 bfa_trc(bfa, on_off);
3955 bfa_trc(bfa, fcport->cfg.ratelimit);
3956
3957 fcport->cfg.ratelimit = on_off;
3958 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
3959 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
3960}
3961
3962/**
3963 * Configure default minimum ratelim speed
3964 */
3965bfa_status_t
3966bfa_fcport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3967{
3968 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3969
3970 bfa_trc(bfa, speed);
3971
3972 /* Auto and speeds greater than the supported speed, are invalid */
3973 if ((speed == BFA_PORT_SPEED_AUTO) || (speed > fcport->speed_sup)) {
3974 bfa_trc(bfa, fcport->speed_sup);
3975 return BFA_STATUS_UNSUPP_SPEED;
3976 }
3977
3978 fcport->cfg.trl_def_speed = speed;
3979
3980 return BFA_STATUS_OK;
3981}
3982
3983/**
3984 * Get default minimum ratelim speed
3985 */
3986enum bfa_port_speed
3987bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3988{
3989 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3990
3991 bfa_trc(bfa, fcport->cfg.trl_def_speed);
3992 return fcport->cfg.trl_def_speed;
3993
3994}
3995void
3996bfa_fcport_busy(struct bfa_s *bfa, bfa_boolean_t status)
3997{
3998 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3999
4000 bfa_trc(bfa, status);
4001 bfa_trc(bfa, fcport->diag_busy);
4002
4003 fcport->diag_busy = status;
4004}
4005
4006void
4007bfa_fcport_beacon(void *dev, bfa_boolean_t beacon,
4008 bfa_boolean_t link_e2e_beacon)
4009{
4010 struct bfa_s *bfa = dev;
4011 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4012
4013 bfa_trc(bfa, beacon);
4014 bfa_trc(bfa, link_e2e_beacon);
4015 bfa_trc(bfa, fcport->beacon);
4016 bfa_trc(bfa, fcport->link_e2e_beacon);
4017
4018 fcport->beacon = beacon;
4019 fcport->link_e2e_beacon = link_e2e_beacon;
4020}
4021
4022bfa_boolean_t
4023bfa_fcport_is_linkup(struct bfa_s *bfa)
4024{
4025 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4026
4027 return (!fcport->cfg.trunked &&
4028 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
4029 (fcport->cfg.trunked &&
4030 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
4031}
4032
4033bfa_boolean_t
4034bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
4035{
4036 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4037
4038 return fcport->cfg.qos_enabled;
4039}
4040
4041bfa_status_t
4042bfa_trunk_get_attr(struct bfa_s *bfa, struct bfa_trunk_attr_s *attr)
4043
4044{
4045 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4046 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4047
4048 bfa_trc(bfa, fcport->cfg.trunked);
4049 bfa_trc(bfa, trunk->attr.state);
4050 *attr = trunk->attr;
4051 attr->port_id = bfa_lps_get_base_pid(bfa);
4052
4053 return BFA_STATUS_OK;
4054}
4055
4056void
4057bfa_trunk_enable_cfg(struct bfa_s *bfa)
4058{
4059 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4060 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4061
4062 bfa_trc(bfa, 1);
4063 trunk->attr.state = BFA_TRUNK_OFFLINE;
4064 fcport->cfg.trunked = BFA_TRUE;
4065}
4066
4067bfa_status_t
4068bfa_trunk_enable(struct bfa_s *bfa)
4069{
4070 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4071 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4072
4073 bfa_trc(bfa, 1);
4074
4075 trunk->attr.state = BFA_TRUNK_OFFLINE;
4076 bfa_fcport_disable(bfa);
4077 fcport->cfg.trunked = BFA_TRUE;
4078 bfa_fcport_enable(bfa);
4079
4080 return BFA_STATUS_OK;
4081}
4082
4083bfa_status_t
4084bfa_trunk_disable(struct bfa_s *bfa)
4085{
4086 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
4087 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
4088
4089 bfa_trc(bfa, 0);
4090 trunk->attr.state = BFA_TRUNK_DISABLED;
4091 bfa_fcport_disable(bfa);
4092 fcport->cfg.trunked = BFA_FALSE;
4093 bfa_fcport_enable(bfa);
4094 return BFA_STATUS_OK;
4095}
4096
4097
4098/**
4099 * Rport State machine functions
4100 */
4101/**
4102 * Beginning state, only online event expected.
4103 */
4104static void
4105bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
4106{
4107 bfa_trc(rp->bfa, rp->rport_tag);
4108 bfa_trc(rp->bfa, event);
4109
4110 switch (event) {
4111 case BFA_RPORT_SM_CREATE:
4112 bfa_stats(rp, sm_un_cr);
4113 bfa_sm_set_state(rp, bfa_rport_sm_created);
4114 break;
4115
4116 default:
4117 bfa_stats(rp, sm_un_unexp);
4118 bfa_sm_fault(rp->bfa, event);
4119 }
4120}
4121
4122static void
4123bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
4124{
4125 bfa_trc(rp->bfa, rp->rport_tag);
4126 bfa_trc(rp->bfa, event);
4127
4128 switch (event) {
4129 case BFA_RPORT_SM_ONLINE:
4130 bfa_stats(rp, sm_cr_on);
4131 if (bfa_rport_send_fwcreate(rp))
4132 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4133 else
4134 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4135 break;
4136
4137 case BFA_RPORT_SM_DELETE:
4138 bfa_stats(rp, sm_cr_del);
4139 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4140 bfa_rport_free(rp);
4141 break;
4142
4143 case BFA_RPORT_SM_HWFAIL:
4144 bfa_stats(rp, sm_cr_hwf);
4145 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4146 break;
4147
4148 default:
4149 bfa_stats(rp, sm_cr_unexp);
4150 bfa_sm_fault(rp->bfa, event);
4151 }
4152}
4153
4154/**
4155 * Waiting for rport create response from firmware.
4156 */
4157static void
4158bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
4159{
4160 bfa_trc(rp->bfa, rp->rport_tag);
4161 bfa_trc(rp->bfa, event);
4162
4163 switch (event) {
4164 case BFA_RPORT_SM_FWRSP:
4165 bfa_stats(rp, sm_fwc_rsp);
4166 bfa_sm_set_state(rp, bfa_rport_sm_online);
4167 bfa_rport_online_cb(rp);
4168 break;
4169
4170 case BFA_RPORT_SM_DELETE:
4171 bfa_stats(rp, sm_fwc_del);
4172 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4173 break;
4174
4175 case BFA_RPORT_SM_OFFLINE:
4176 bfa_stats(rp, sm_fwc_off);
4177 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
4178 break;
4179
4180 case BFA_RPORT_SM_HWFAIL:
4181 bfa_stats(rp, sm_fwc_hwf);
4182 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4183 break;
4184
4185 default:
4186 bfa_stats(rp, sm_fwc_unexp);
4187 bfa_sm_fault(rp->bfa, event);
4188 }
4189}
4190
4191/**
4192 * Request queue is full, awaiting queue resume to send create request.
4193 */
4194static void
4195bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4196{
4197 bfa_trc(rp->bfa, rp->rport_tag);
4198 bfa_trc(rp->bfa, event);
4199
4200 switch (event) {
4201 case BFA_RPORT_SM_QRESUME:
4202 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4203 bfa_rport_send_fwcreate(rp);
4204 break;
4205
4206 case BFA_RPORT_SM_DELETE:
4207 bfa_stats(rp, sm_fwc_del);
4208 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4209 bfa_reqq_wcancel(&rp->reqq_wait);
4210 bfa_rport_free(rp);
4211 break;
4212
4213 case BFA_RPORT_SM_OFFLINE:
4214 bfa_stats(rp, sm_fwc_off);
4215 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4216 bfa_reqq_wcancel(&rp->reqq_wait);
4217 bfa_rport_offline_cb(rp);
4218 break;
4219
4220 case BFA_RPORT_SM_HWFAIL:
4221 bfa_stats(rp, sm_fwc_hwf);
4222 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4223 bfa_reqq_wcancel(&rp->reqq_wait);
4224 break;
4225
4226 default:
4227 bfa_stats(rp, sm_fwc_unexp);
4228 bfa_sm_fault(rp->bfa, event);
4229 }
4230}
4231
4232/**
4233 * Online state - normal parking state.
4234 */
4235static void
4236bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4237{
4238 struct bfi_rport_qos_scn_s *qos_scn;
4239
4240 bfa_trc(rp->bfa, rp->rport_tag);
4241 bfa_trc(rp->bfa, event);
4242
4243 switch (event) {
4244 case BFA_RPORT_SM_OFFLINE:
4245 bfa_stats(rp, sm_on_off);
4246 if (bfa_rport_send_fwdelete(rp))
4247 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4248 else
4249 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4250 break;
4251
4252 case BFA_RPORT_SM_DELETE:
4253 bfa_stats(rp, sm_on_del);
4254 if (bfa_rport_send_fwdelete(rp))
4255 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4256 else
4257 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4258 break;
4259
4260 case BFA_RPORT_SM_HWFAIL:
4261 bfa_stats(rp, sm_on_hwf);
4262 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4263 break;
4264
4265 case BFA_RPORT_SM_SET_SPEED:
4266 bfa_rport_send_fwspeed(rp);
4267 break;
4268
4269 case BFA_RPORT_SM_QOS_SCN:
4270 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4271 rp->qos_attr = qos_scn->new_qos_attr;
4272 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4273 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4274 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4275 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4276
4277 qos_scn->old_qos_attr.qos_flow_id =
4278 bfa_os_ntohl(qos_scn->old_qos_attr.qos_flow_id);
4279 qos_scn->new_qos_attr.qos_flow_id =
4280 bfa_os_ntohl(qos_scn->new_qos_attr.qos_flow_id);
4281
4282 if (qos_scn->old_qos_attr.qos_flow_id !=
4283 qos_scn->new_qos_attr.qos_flow_id)
4284 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4285 qos_scn->old_qos_attr,
4286 qos_scn->new_qos_attr);
4287 if (qos_scn->old_qos_attr.qos_priority !=
4288 qos_scn->new_qos_attr.qos_priority)
4289 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4290 qos_scn->old_qos_attr,
4291 qos_scn->new_qos_attr);
4292 break;
4293
4294 default:
4295 bfa_stats(rp, sm_on_unexp);
4296 bfa_sm_fault(rp->bfa, event);
4297 }
4298}
4299
4300/**
4301 * Firmware rport is being deleted - awaiting f/w response.
4302 */
4303static void
4304bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4305{
4306 bfa_trc(rp->bfa, rp->rport_tag);
4307 bfa_trc(rp->bfa, event);
4308
4309 switch (event) {
4310 case BFA_RPORT_SM_FWRSP:
4311 bfa_stats(rp, sm_fwd_rsp);
4312 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4313 bfa_rport_offline_cb(rp);
4314 break;
4315
4316 case BFA_RPORT_SM_DELETE:
4317 bfa_stats(rp, sm_fwd_del);
4318 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4319 break;
4320
4321 case BFA_RPORT_SM_HWFAIL:
4322 bfa_stats(rp, sm_fwd_hwf);
4323 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4324 bfa_rport_offline_cb(rp);
4325 break;
4326
4327 default:
4328 bfa_stats(rp, sm_fwd_unexp);
4329 bfa_sm_fault(rp->bfa, event);
4330 }
4331}
4332
4333static void
4334bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4335{
4336 bfa_trc(rp->bfa, rp->rport_tag);
4337 bfa_trc(rp->bfa, event);
4338
4339 switch (event) {
4340 case BFA_RPORT_SM_QRESUME:
4341 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4342 bfa_rport_send_fwdelete(rp);
4343 break;
4344
4345 case BFA_RPORT_SM_DELETE:
4346 bfa_stats(rp, sm_fwd_del);
4347 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4348 break;
4349
4350 case BFA_RPORT_SM_HWFAIL:
4351 bfa_stats(rp, sm_fwd_hwf);
4352 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4353 bfa_reqq_wcancel(&rp->reqq_wait);
4354 bfa_rport_offline_cb(rp);
4355 break;
4356
4357 default:
4358 bfa_stats(rp, sm_fwd_unexp);
4359 bfa_sm_fault(rp->bfa, event);
4360 }
4361}
4362
4363/**
4364 * Offline state.
4365 */
4366static void
4367bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4368{
4369 bfa_trc(rp->bfa, rp->rport_tag);
4370 bfa_trc(rp->bfa, event);
4371
4372 switch (event) {
4373 case BFA_RPORT_SM_DELETE:
4374 bfa_stats(rp, sm_off_del);
4375 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4376 bfa_rport_free(rp);
4377 break;
4378
4379 case BFA_RPORT_SM_ONLINE:
4380 bfa_stats(rp, sm_off_on);
4381 if (bfa_rport_send_fwcreate(rp))
4382 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4383 else
4384 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4385 break;
4386
4387 case BFA_RPORT_SM_HWFAIL:
4388 bfa_stats(rp, sm_off_hwf);
4389 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4390 break;
4391
4392 default:
4393 bfa_stats(rp, sm_off_unexp);
4394 bfa_sm_fault(rp->bfa, event);
4395 }
4396}
4397
4398/**
4399 * Rport is deleted, waiting for firmware response to delete.
4400 */
4401static void
4402bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4403{
4404 bfa_trc(rp->bfa, rp->rport_tag);
4405 bfa_trc(rp->bfa, event);
4406
4407 switch (event) {
4408 case BFA_RPORT_SM_FWRSP:
4409 bfa_stats(rp, sm_del_fwrsp);
4410 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4411 bfa_rport_free(rp);
4412 break;
4413
4414 case BFA_RPORT_SM_HWFAIL:
4415 bfa_stats(rp, sm_del_hwf);
4416 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4417 bfa_rport_free(rp);
4418 break;
4419
4420 default:
4421 bfa_sm_fault(rp->bfa, event);
4422 }
4423}
4424
4425static void
4426bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4427{
4428 bfa_trc(rp->bfa, rp->rport_tag);
4429 bfa_trc(rp->bfa, event);
4430
4431 switch (event) {
4432 case BFA_RPORT_SM_QRESUME:
4433 bfa_stats(rp, sm_del_fwrsp);
4434 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4435 bfa_rport_send_fwdelete(rp);
4436 break;
4437
4438 case BFA_RPORT_SM_HWFAIL:
4439 bfa_stats(rp, sm_del_hwf);
4440 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4441 bfa_reqq_wcancel(&rp->reqq_wait);
4442 bfa_rport_free(rp);
4443 break;
4444
4445 default:
4446 bfa_sm_fault(rp->bfa, event);
4447 }
4448}
4449
4450/**
4451 * Waiting for rport create response from firmware. A delete is pending.
4452 */
4453static void
4454bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4455 enum bfa_rport_event event)
4456{
4457 bfa_trc(rp->bfa, rp->rport_tag);
4458 bfa_trc(rp->bfa, event);
4459
4460 switch (event) {
4461 case BFA_RPORT_SM_FWRSP:
4462 bfa_stats(rp, sm_delp_fwrsp);
4463 if (bfa_rport_send_fwdelete(rp))
4464 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4465 else
4466 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4467 break;
4468
4469 case BFA_RPORT_SM_HWFAIL:
4470 bfa_stats(rp, sm_delp_hwf);
4471 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4472 bfa_rport_free(rp);
4473 break;
4474
4475 default:
4476 bfa_stats(rp, sm_delp_unexp);
4477 bfa_sm_fault(rp->bfa, event);
4478 }
4479}
4480
4481/**
4482 * Waiting for rport create response from firmware. Rport offline is pending.
4483 */
4484static void
4485bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4486 enum bfa_rport_event event)
4487{
4488 bfa_trc(rp->bfa, rp->rport_tag);
4489 bfa_trc(rp->bfa, event);
4490
4491 switch (event) {
4492 case BFA_RPORT_SM_FWRSP:
4493 bfa_stats(rp, sm_offp_fwrsp);
4494 if (bfa_rport_send_fwdelete(rp))
4495 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4496 else
4497 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4498 break;
4499
4500 case BFA_RPORT_SM_DELETE:
4501 bfa_stats(rp, sm_offp_del);
4502 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4503 break;
4504
4505 case BFA_RPORT_SM_HWFAIL:
4506 bfa_stats(rp, sm_offp_hwf);
4507 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4508 break;
4509
4510 default:
4511 bfa_stats(rp, sm_offp_unexp);
4512 bfa_sm_fault(rp->bfa, event);
4513 }
4514}
4515
4516/**
4517 * IOC h/w failed.
4518 */
4519static void
4520bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4521{
4522 bfa_trc(rp->bfa, rp->rport_tag);
4523 bfa_trc(rp->bfa, event);
4524
4525 switch (event) {
4526 case BFA_RPORT_SM_OFFLINE:
4527 bfa_stats(rp, sm_iocd_off);
4528 bfa_rport_offline_cb(rp);
4529 break;
4530
4531 case BFA_RPORT_SM_DELETE:
4532 bfa_stats(rp, sm_iocd_del);
4533 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4534 bfa_rport_free(rp);
4535 break;
4536
4537 case BFA_RPORT_SM_ONLINE:
4538 bfa_stats(rp, sm_iocd_on);
4539 if (bfa_rport_send_fwcreate(rp))
4540 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4541 else
4542 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4543 break;
4544
4545 case BFA_RPORT_SM_HWFAIL:
4546 break;
4547
4548 default:
4549 bfa_stats(rp, sm_iocd_unexp);
4550 bfa_sm_fault(rp->bfa, event);
4551 }
4552}
4553
4554
4555
4556/**
4557 * bfa_rport_private BFA rport private functions
4558 */
4559
4560static void
4561__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4562{
4563 struct bfa_rport_s *rp = cbarg;
4564
4565 if (complete)
4566 bfa_cb_rport_online(rp->rport_drv);
4567}
4568
4569static void
4570__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4571{
4572 struct bfa_rport_s *rp = cbarg;
4573
4574 if (complete)
4575 bfa_cb_rport_offline(rp->rport_drv);
4576}
4577
4578static void
4579bfa_rport_qresume(void *cbarg)
4580{
4581 struct bfa_rport_s *rp = cbarg;
4582
4583 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4584}
4585
4586static void
4587bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4588 u32 *dm_len)
4589{
4590 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4591 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4592
4593 *km_len += cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s);
4594}
4595
4596static void
4597bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4598 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
4599{
4600 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4601 struct bfa_rport_s *rp;
4602 u16 i;
4603
4604 INIT_LIST_HEAD(&mod->rp_free_q);
4605 INIT_LIST_HEAD(&mod->rp_active_q);
4606
4607 rp = (struct bfa_rport_s *) bfa_meminfo_kva(meminfo);
4608 mod->rps_list = rp;
4609 mod->num_rports = cfg->fwcfg.num_rports;
4610
4611 bfa_assert(mod->num_rports &&
4612 !(mod->num_rports & (mod->num_rports - 1)));
4613
4614 for (i = 0; i < mod->num_rports; i++, rp++) {
4615 bfa_os_memset(rp, 0, sizeof(struct bfa_rport_s));
4616 rp->bfa = bfa;
4617 rp->rport_tag = i;
4618 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4619
4620 /**
4621 * - is unused
4622 */
4623 if (i)
4624 list_add_tail(&rp->qe, &mod->rp_free_q);
4625
4626 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4627 }
4628
4629 /**
4630 * consume memory
4631 */
4632 bfa_meminfo_kva(meminfo) = (u8 *) rp;
4633}
4634
4635static void
4636bfa_rport_detach(struct bfa_s *bfa)
4637{
4638}
4639
4640static void
4641bfa_rport_start(struct bfa_s *bfa)
4642{
4643}
4644
4645static void
4646bfa_rport_stop(struct bfa_s *bfa)
4647{
4648}
4649
4650static void
4651bfa_rport_iocdisable(struct bfa_s *bfa)
4652{
4653 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4654 struct bfa_rport_s *rport;
4655 struct list_head *qe, *qen;
4656
4657 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4658 rport = (struct bfa_rport_s *) qe;
4659 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4660 }
4661}
4662
4663static struct bfa_rport_s *
4664bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4665{
4666 struct bfa_rport_s *rport;
4667
4668 bfa_q_deq(&mod->rp_free_q, &rport);
4669 if (rport)
4670 list_add_tail(&rport->qe, &mod->rp_active_q);
4671
4672 return rport;
4673}
4674
4675static void
4676bfa_rport_free(struct bfa_rport_s *rport)
4677{
4678 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4679
4680 bfa_assert(bfa_q_is_on_q(&mod->rp_active_q, rport));
4681 list_del(&rport->qe);
4682 list_add_tail(&rport->qe, &mod->rp_free_q);
4683}
4684
4685static bfa_boolean_t
4686bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4687{
4688 struct bfi_rport_create_req_s *m;
4689
4690 /**
4691 * check for room in queue to send request now
4692 */
4693 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4694 if (!m) {
4695 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4696 return BFA_FALSE;
4697 }
4698
4699 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
4700 bfa_lpuid(rp->bfa));
4701 m->bfa_handle = rp->rport_tag;
4702 m->max_frmsz = bfa_os_htons(rp->rport_info.max_frmsz);
4703 m->pid = rp->rport_info.pid;
4704 m->lp_tag = rp->rport_info.lp_tag;
4705 m->local_pid = rp->rport_info.local_pid;
4706 m->fc_class = rp->rport_info.fc_class;
4707 m->vf_en = rp->rport_info.vf_en;
4708 m->vf_id = rp->rport_info.vf_id;
4709 m->cisc = rp->rport_info.cisc;
4710
4711 /**
4712 * queue I/O message to firmware
4713 */
4714 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4715 return BFA_TRUE;
4716}
4717
4718static bfa_boolean_t
4719bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4720{
4721 struct bfi_rport_delete_req_s *m;
4722
4723 /**
4724 * check for room in queue to send request now
4725 */
4726 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4727 if (!m) {
4728 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4729 return BFA_FALSE;
4730 }
4731
4732 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
4733 bfa_lpuid(rp->bfa));
4734 m->fw_handle = rp->fw_handle;
4735
4736 /**
4737 * queue I/O message to firmware
4738 */
4739 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4740 return BFA_TRUE;
4741}
4742
4743static bfa_boolean_t
4744bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4745{
4746 struct bfa_rport_speed_req_s *m;
4747
4748 /**
4749 * check for room in queue to send request now
4750 */
4751 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4752 if (!m) {
4753 bfa_trc(rp->bfa, rp->rport_info.speed);
4754 return BFA_FALSE;
4755 }
4756
4757 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
4758 bfa_lpuid(rp->bfa));
4759 m->fw_handle = rp->fw_handle;
4760 m->speed = (u8)rp->rport_info.speed;
4761
4762 /**
4763 * queue I/O message to firmware
4764 */
4765 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT);
4766 return BFA_TRUE;
4767}
4768
4769
4770
4771/**
4772 * bfa_rport_public
4773 */
4774
4775/**
4776 * Rport interrupt processing.
4777 */
4778void
4779bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4780{
4781 union bfi_rport_i2h_msg_u msg;
4782 struct bfa_rport_s *rp;
4783
4784 bfa_trc(bfa, m->mhdr.msg_id);
4785
4786 msg.msg = m;
4787
4788 switch (m->mhdr.msg_id) {
4789 case BFI_RPORT_I2H_CREATE_RSP:
4790 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4791 rp->fw_handle = msg.create_rsp->fw_handle;
4792 rp->qos_attr = msg.create_rsp->qos_attr;
4793 bfa_assert(msg.create_rsp->status == BFA_STATUS_OK);
4794 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4795 break;
4796
4797 case BFI_RPORT_I2H_DELETE_RSP:
4798 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
4799 bfa_assert(msg.delete_rsp->status == BFA_STATUS_OK);
4800 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4801 break;
4802
4803 case BFI_RPORT_I2H_QOS_SCN:
4804 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4805 rp->event_arg.fw_msg = msg.qos_scn_evt;
4806 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4807 break;
4808
4809 default:
4810 bfa_trc(bfa, m->mhdr.msg_id);
4811 bfa_assert(0);
4812 }
4813}
4814
4815
4816
4817/**
4818 * bfa_rport_api
4819 */
4820
4821struct bfa_rport_s *
4822bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4823{
4824 struct bfa_rport_s *rp;
4825
4826 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4827
4828 if (rp == NULL)
4829 return NULL;
4830
4831 rp->bfa = bfa;
4832 rp->rport_drv = rport_drv;
4833 bfa_rport_clear_stats(rp);
4834
4835 bfa_assert(bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
4836 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4837
4838 return rp;
4839}
4840
4841void
4842bfa_rport_delete(struct bfa_rport_s *rport)
4843{
4844 bfa_sm_send_event(rport, BFA_RPORT_SM_DELETE);
4845}
4846
4847void
4848bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4849{
4850 bfa_assert(rport_info->max_frmsz != 0);
4851
4852 /**
4853 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4854 * responses. Default to minimum size.
4855 */
4856 if (rport_info->max_frmsz == 0) {
4857 bfa_trc(rport->bfa, rport->rport_tag);
4858 rport_info->max_frmsz = FC_MIN_PDUSZ;
4859 }
4860
4861 bfa_os_assign(rport->rport_info, *rport_info);
4862 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4863}
4864
4865void
4866bfa_rport_offline(struct bfa_rport_s *rport)
4867{
4868 bfa_sm_send_event(rport, BFA_RPORT_SM_OFFLINE);
4869}
4870
4871void
4872bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4873{
4874 bfa_assert(speed != 0);
4875 bfa_assert(speed != BFA_PORT_SPEED_AUTO);
4876
4877 rport->rport_info.speed = speed;
4878 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4879}
4880
4881void
4882bfa_rport_get_stats(struct bfa_rport_s *rport,
4883 struct bfa_rport_hal_stats_s *stats)
4884{
4885 *stats = rport->stats;
4886}
4887
4888void
4889bfa_rport_get_qos_attr(struct bfa_rport_s *rport,
4890 struct bfa_rport_qos_attr_s *qos_attr)
4891{
4892 qos_attr->qos_priority = rport->qos_attr.qos_priority;
4893 qos_attr->qos_flow_id = bfa_os_ntohl(rport->qos_attr.qos_flow_id);
4894
4895}
4896
4897void
4898bfa_rport_clear_stats(struct bfa_rport_s *rport)
4899{
4900 bfa_os_memset(&rport->stats, 0, sizeof(rport->stats));
4901}
4902
4903
4904/**
4905 * SGPG related functions
4906 */
4907
4908/**
4909 * Compute and return memory needed by FCP(im) module.
4910 */
4911static void
4912bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
4913 u32 *dm_len)
4914{
4915 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4916 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
4917
4918 *km_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfa_sgpg_s);
4919 *dm_len += (cfg->drvcfg.num_sgpgs + 1) * sizeof(struct bfi_sgpg_s);
4920}
4921
4922
4923static void
4924bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
4925 struct bfa_meminfo_s *minfo, struct bfa_pcidev_s *pcidev)
4926{
4927 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4928 int i;
4929 struct bfa_sgpg_s *hsgpg;
4930 struct bfi_sgpg_s *sgpg;
4931 u64 align_len;
4932
4933 union {
4934 u64 pa;
4935 union bfi_addr_u addr;
4936 } sgpg_pa, sgpg_pa_tmp;
4937
4938 INIT_LIST_HEAD(&mod->sgpg_q);
4939 INIT_LIST_HEAD(&mod->sgpg_wait_q);
4940
4941 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4942
4943 mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
4944 mod->sgpg_arr_pa = bfa_meminfo_dma_phys(minfo);
4945 align_len = (BFA_SGPG_ROUNDUP(mod->sgpg_arr_pa) - mod->sgpg_arr_pa);
4946 mod->sgpg_arr_pa += align_len;
4947 mod->hsgpg_arr = (struct bfa_sgpg_s *) (bfa_meminfo_kva(minfo) +
4948 align_len);
4949 mod->sgpg_arr = (struct bfi_sgpg_s *) (bfa_meminfo_dma_virt(minfo) +
4950 align_len);
4951
4952 hsgpg = mod->hsgpg_arr;
4953 sgpg = mod->sgpg_arr;
4954 sgpg_pa.pa = mod->sgpg_arr_pa;
4955 mod->free_sgpgs = mod->num_sgpgs;
4956
4957 bfa_assert(!(sgpg_pa.pa & (sizeof(struct bfi_sgpg_s) - 1)));
4958
4959 for (i = 0; i < mod->num_sgpgs; i++) {
4960 bfa_os_memset(hsgpg, 0, sizeof(*hsgpg));
4961 bfa_os_memset(sgpg, 0, sizeof(*sgpg));
4962
4963 hsgpg->sgpg = sgpg;
4964 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4965 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4966 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4967
4968 hsgpg++;
4969 sgpg++;
4970 sgpg_pa.pa += sizeof(struct bfi_sgpg_s);
4971 }
4972
4973 bfa_meminfo_kva(minfo) = (u8 *) hsgpg;
4974 bfa_meminfo_dma_virt(minfo) = (u8 *) sgpg;
4975 bfa_meminfo_dma_phys(minfo) = sgpg_pa.pa;
4976}
4977
4978static void
4979bfa_sgpg_detach(struct bfa_s *bfa)
4980{
4981}
4982
4983static void
4984bfa_sgpg_start(struct bfa_s *bfa)
4985{
4986}
4987
4988static void
4989bfa_sgpg_stop(struct bfa_s *bfa)
4990{
4991}
4992
4993static void
4994bfa_sgpg_iocdisable(struct bfa_s *bfa)
4995{
4996}
4997
4998
4999
5000/**
5001 * hal_sgpg_public BFA SGPG public functions
5002 */
5003
5004bfa_status_t
5005bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
5006{
5007 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5008 struct bfa_sgpg_s *hsgpg;
5009 int i;
5010
5011 bfa_trc_fp(bfa, nsgpgs);
5012
5013 if (mod->free_sgpgs < nsgpgs)
5014 return BFA_STATUS_ENOMEM;
5015
5016 for (i = 0; i < nsgpgs; i++) {
5017 bfa_q_deq(&mod->sgpg_q, &hsgpg);
5018 bfa_assert(hsgpg);
5019 list_add_tail(&hsgpg->qe, sgpg_q);
5020 }
5021
5022 mod->free_sgpgs -= nsgpgs;
5023 return BFA_STATUS_OK;
5024}
5025
5026void
5027bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
5028{
5029 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5030 struct bfa_sgpg_wqe_s *wqe;
5031
5032 bfa_trc_fp(bfa, nsgpg);
5033
5034 mod->free_sgpgs += nsgpg;
5035 bfa_assert(mod->free_sgpgs <= mod->num_sgpgs);
5036
5037 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
5038
5039 if (list_empty(&mod->sgpg_wait_q))
5040 return;
5041
5042 /**
5043 * satisfy as many waiting requests as possible
5044 */
5045 do {
5046 wqe = bfa_q_first(&mod->sgpg_wait_q);
5047 if (mod->free_sgpgs < wqe->nsgpg)
5048 nsgpg = mod->free_sgpgs;
5049 else
5050 nsgpg = wqe->nsgpg;
5051 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
5052 wqe->nsgpg -= nsgpg;
5053 if (wqe->nsgpg == 0) {
5054 list_del(&wqe->qe);
5055 wqe->cbfn(wqe->cbarg);
5056 }
5057 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
5058}
5059
5060void
5061bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
5062{
5063 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5064
5065 bfa_assert(nsgpg > 0);
5066 bfa_assert(nsgpg > mod->free_sgpgs);
5067
5068 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
5069
5070 /**
5071 * allocate any left to this one first
5072 */
5073 if (mod->free_sgpgs) {
5074 /**
5075 * no one else is waiting for SGPG
5076 */
5077 bfa_assert(list_empty(&mod->sgpg_wait_q));
5078 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
5079 wqe->nsgpg -= mod->free_sgpgs;
5080 mod->free_sgpgs = 0;
5081 }
5082
5083 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
5084}
5085
5086void
5087bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
5088{
5089 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
5090
5091 bfa_assert(bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
5092 list_del(&wqe->qe);
5093
5094 if (wqe->nsgpg_total != wqe->nsgpg)
5095 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
5096 wqe->nsgpg_total - wqe->nsgpg);
5097}
5098
5099void
5100bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
5101 void *cbarg)
5102{
5103 INIT_LIST_HEAD(&wqe->sgpg_q);
5104 wqe->cbfn = cbfn;
5105 wqe->cbarg = cbarg;
5106}
5107
5108/**
5109 * UF related functions
5110 */
5111/*
5112 *****************************************************************************
5113 * Internal functions
5114 *****************************************************************************
5115 */
5116static void
5117__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
5118{
5119 struct bfa_uf_s *uf = cbarg;
5120 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
5121
5122 if (complete)
5123 ufm->ufrecv(ufm->cbarg, uf);
5124}
5125
5126static void
5127claim_uf_pbs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5128{
5129 u32 uf_pb_tot_sz;
5130
5131 ufm->uf_pbs_kva = (struct bfa_uf_buf_s *) bfa_meminfo_dma_virt(mi);
5132 ufm->uf_pbs_pa = bfa_meminfo_dma_phys(mi);
5133 uf_pb_tot_sz = BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * ufm->num_ufs),
5134 BFA_DMA_ALIGN_SZ);
5135
5136 bfa_meminfo_dma_virt(mi) += uf_pb_tot_sz;
5137 bfa_meminfo_dma_phys(mi) += uf_pb_tot_sz;
5138
5139 bfa_os_memset((void *)ufm->uf_pbs_kva, 0, uf_pb_tot_sz);
5140}
5141
5142static void
5143claim_uf_post_msgs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5144{
5145 struct bfi_uf_buf_post_s *uf_bp_msg;
5146 struct bfi_sge_s *sge;
5147 union bfi_addr_u sga_zero = { {0} };
5148 u16 i;
5149 u16 buf_len;
5150
5151 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_meminfo_kva(mi);
5152 uf_bp_msg = ufm->uf_buf_posts;
5153
5154 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
5155 i++, uf_bp_msg++) {
5156 bfa_os_memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
5157
5158 uf_bp_msg->buf_tag = i;
5159 buf_len = sizeof(struct bfa_uf_buf_s);
5160 uf_bp_msg->buf_len = bfa_os_htons(buf_len);
5161 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
5162 bfa_lpuid(ufm->bfa));
5163
5164 sge = uf_bp_msg->sge;
5165 sge[0].sg_len = buf_len;
5166 sge[0].flags = BFI_SGE_DATA_LAST;
5167 bfa_dma_addr_set(sge[0].sga, ufm_pbs_pa(ufm, i));
5168 bfa_sge_to_be(sge);
5169
5170 sge[1].sg_len = buf_len;
5171 sge[1].flags = BFI_SGE_PGDLEN;
5172 sge[1].sga = sga_zero;
5173 bfa_sge_to_be(&sge[1]);
5174 }
5175
5176 /**
5177 * advance pointer beyond consumed memory
5178 */
5179 bfa_meminfo_kva(mi) = (u8 *) uf_bp_msg;
5180}
5181
5182static void
5183claim_ufs(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5184{
5185 u16 i;
5186 struct bfa_uf_s *uf;
5187
5188 /*
5189 * Claim block of memory for UF list
5190 */
5191 ufm->uf_list = (struct bfa_uf_s *) bfa_meminfo_kva(mi);
5192
5193 /*
5194 * Initialize UFs and queue it in UF free queue
5195 */
5196 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
5197 bfa_os_memset(uf, 0, sizeof(struct bfa_uf_s));
5198 uf->bfa = ufm->bfa;
5199 uf->uf_tag = i;
5200 uf->pb_len = sizeof(struct bfa_uf_buf_s);
5201 uf->buf_kva = (void *)&ufm->uf_pbs_kva[i];
5202 uf->buf_pa = ufm_pbs_pa(ufm, i);
5203 list_add_tail(&uf->qe, &ufm->uf_free_q);
5204 }
5205
5206 /**
5207 * advance memory pointer
5208 */
5209 bfa_meminfo_kva(mi) = (u8 *) uf;
5210}
5211
5212static void
5213uf_mem_claim(struct bfa_uf_mod_s *ufm, struct bfa_meminfo_s *mi)
5214{
5215 claim_uf_pbs(ufm, mi);
5216 claim_ufs(ufm, mi);
5217 claim_uf_post_msgs(ufm, mi);
5218}
5219
5220static void
5221bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len, u32 *dm_len)
5222{
5223 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
5224
5225 /*
5226 * dma-able memory for UF posted bufs
5227 */
5228 *dm_len += BFA_ROUNDUP((sizeof(struct bfa_uf_buf_s) * num_ufs),
5229 BFA_DMA_ALIGN_SZ);
5230
5231 /*
5232 * kernel Virtual memory for UFs and UF buf post msg copies
5233 */
5234 *ndm_len += sizeof(struct bfa_uf_s) * num_ufs;
5235 *ndm_len += sizeof(struct bfi_uf_buf_post_s) * num_ufs;
5236}
5237
5238static void
5239bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
5240 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
5241{
5242 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5243
5244 bfa_os_memset(ufm, 0, sizeof(struct bfa_uf_mod_s));
5245 ufm->bfa = bfa;
5246 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5247 INIT_LIST_HEAD(&ufm->uf_free_q);
5248 INIT_LIST_HEAD(&ufm->uf_posted_q);
5249
5250 uf_mem_claim(ufm, meminfo);
5251}
5252
5253static void
5254bfa_uf_detach(struct bfa_s *bfa)
5255{
5256}
5257
5258static struct bfa_uf_s *
5259bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5260{
5261 struct bfa_uf_s *uf;
5262
5263 bfa_q_deq(&uf_mod->uf_free_q, &uf);
5264 return uf;
5265}
5266
5267static void
5268bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5269{
5270 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5271}
5272
5273static bfa_status_t
5274bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5275{
5276 struct bfi_uf_buf_post_s *uf_post_msg;
5277
5278 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5279 if (!uf_post_msg)
5280 return BFA_STATUS_FAILED;
5281
5282 bfa_os_memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
5283 sizeof(struct bfi_uf_buf_post_s));
5284 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP);
5285
5286 bfa_trc(ufm->bfa, uf->uf_tag);
5287
5288 list_add_tail(&uf->qe, &ufm->uf_posted_q);
5289 return BFA_STATUS_OK;
5290}
5291
5292static void
5293bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5294{
5295 struct bfa_uf_s *uf;
5296
5297 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5298 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5299 break;
5300 }
5301}
5302
5303static void
5304uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5305{
5306 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5307 u16 uf_tag = m->buf_tag;
5308 struct bfa_uf_buf_s *uf_buf = &ufm->uf_pbs_kva[uf_tag];
5309 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
5310 u8 *buf = &uf_buf->d[0];
5311 struct fchs_s *fchs;
5312
5313 m->frm_len = bfa_os_ntohs(m->frm_len);
5314 m->xfr_len = bfa_os_ntohs(m->xfr_len);
5315
5316 fchs = (struct fchs_s *)uf_buf;
5317
5318 list_del(&uf->qe); /* dequeue from posted queue */
5319
5320 uf->data_ptr = buf;
5321 uf->data_len = m->xfr_len;
5322
5323 bfa_assert(uf->data_len >= sizeof(struct fchs_s));
5324
5325 if (uf->data_len == sizeof(struct fchs_s)) {
5326 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5327 uf->data_len, (struct fchs_s *)buf);
5328 } else {
5329 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5330 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5331 BFA_PL_EID_RX, uf->data_len,
5332 (struct fchs_s *)buf, pld_w0);
5333 }
5334
5335 if (bfa->fcs)
5336 __bfa_cb_uf_recv(uf, BFA_TRUE);
5337 else
5338 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5339}
5340
5341static void
5342bfa_uf_stop(struct bfa_s *bfa)
5343{
5344}
5345
5346static void
5347bfa_uf_iocdisable(struct bfa_s *bfa)
5348{
5349 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5350 struct bfa_uf_s *uf;
5351 struct list_head *qe, *qen;
5352
5353 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5354 uf = (struct bfa_uf_s *) qe;
5355 list_del(&uf->qe);
5356 bfa_uf_put(ufm, uf);
5357 }
5358}
5359
5360static void
5361bfa_uf_start(struct bfa_s *bfa)
5362{
5363 bfa_uf_post_all(BFA_UF_MOD(bfa));
5364}
5365
5366
5367
5368/**
5369 * hal_uf_api
5370 */
5371
5372/**
5373 * Register handler for all unsolicted recieve frames.
5374 *
5375 * @param[in] bfa BFA instance
5376 * @param[in] ufrecv receive handler function
5377 * @param[in] cbarg receive handler arg
5378 */
5379void
5380bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5381{
5382 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5383
5384 ufm->ufrecv = ufrecv;
5385 ufm->cbarg = cbarg;
5386}
5387
5388/**
5389 * Free an unsolicited frame back to BFA.
5390 *
5391 * @param[in] uf unsolicited frame to be freed
5392 *
5393 * @return None
5394 */
5395void
5396bfa_uf_free(struct bfa_uf_s *uf)
5397{
5398 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5399 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5400}
5401
5402
5403
5404/**
5405 * uf_pub BFA uf module public functions
5406 */
5407void
5408bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5409{
5410 bfa_trc(bfa, msg->mhdr.msg_id);
5411
5412 switch (msg->mhdr.msg_id) {
5413 case BFI_UF_I2H_FRM_RCVD:
5414 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5415 break;
5416
5417 default:
5418 bfa_trc(bfa, msg->mhdr.msg_id);
5419 bfa_assert(0);
5420 }
5421}
5422
5423