blob: 4dcf9b9a34d0656d068c937bb2cefa587a5c8988 [file] [log] [blame]
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001/*
2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070019#include "bfa_plog.h"
20#include "bfa_cs.h"
21#include "bfa_modules.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070022
23BFA_TRC_FILE(HAL, FCXP);
24BFA_MODULE(fcxp);
25BFA_MODULE(sgpg);
26BFA_MODULE(lps);
27BFA_MODULE(fcport);
28BFA_MODULE(rport);
29BFA_MODULE(uf);
30
Jing Huang5fbe25c2010-10-18 17:17:23 -070031/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070032 * LPS related definitions
33 */
34#define BFA_LPS_MIN_LPORTS (1)
35#define BFA_LPS_MAX_LPORTS (256)
36
37/*
38 * Maximum Vports supported per physical port or vf.
39 */
40#define BFA_LPS_MAX_VPORTS_SUPP_CB 255
41#define BFA_LPS_MAX_VPORTS_SUPP_CT 190
42
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070043
Jing Huang5fbe25c2010-10-18 17:17:23 -070044/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070045 * FC PORT related definitions
46 */
47/*
48 * The port is considered disabled if corresponding physical port or IOC are
49 * disabled explicitly
50 */
51#define BFA_PORT_IS_DISABLED(bfa) \
52 ((bfa_fcport_is_disabled(bfa) == BFA_TRUE) || \
53 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
54
Jing Huang5fbe25c2010-10-18 17:17:23 -070055/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070056 * BFA port state machine events
57 */
58enum bfa_fcport_sm_event {
59 BFA_FCPORT_SM_START = 1, /* start port state machine */
60 BFA_FCPORT_SM_STOP = 2, /* stop port state machine */
61 BFA_FCPORT_SM_ENABLE = 3, /* enable port */
62 BFA_FCPORT_SM_DISABLE = 4, /* disable port state machine */
63 BFA_FCPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
64 BFA_FCPORT_SM_LINKUP = 6, /* firmware linkup event */
65 BFA_FCPORT_SM_LINKDOWN = 7, /* firmware linkup down */
66 BFA_FCPORT_SM_QRESUME = 8, /* CQ space available */
67 BFA_FCPORT_SM_HWFAIL = 9, /* IOC h/w failure */
68};
69
Jing Huang5fbe25c2010-10-18 17:17:23 -070070/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070071 * BFA port link notification state machine events
72 */
73
74enum bfa_fcport_ln_sm_event {
75 BFA_FCPORT_LN_SM_LINKUP = 1, /* linkup event */
76 BFA_FCPORT_LN_SM_LINKDOWN = 2, /* linkdown event */
77 BFA_FCPORT_LN_SM_NOTIFICATION = 3 /* done notification */
78};
79
Jing Huang5fbe25c2010-10-18 17:17:23 -070080/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070081 * RPORT related definitions
82 */
83#define bfa_rport_offline_cb(__rp) do { \
84 if ((__rp)->bfa->fcs) \
85 bfa_cb_rport_offline((__rp)->rport_drv); \
86 else { \
87 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
88 __bfa_cb_rport_offline, (__rp)); \
89 } \
90} while (0)
91
92#define bfa_rport_online_cb(__rp) do { \
93 if ((__rp)->bfa->fcs) \
94 bfa_cb_rport_online((__rp)->rport_drv); \
95 else { \
96 bfa_cb_queue((__rp)->bfa, &(__rp)->hcb_qe, \
97 __bfa_cb_rport_online, (__rp)); \
98 } \
99} while (0)
100
Jing Huang5fbe25c2010-10-18 17:17:23 -0700101/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700102 * forward declarations FCXP related functions
103 */
104static void __bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete);
105static void hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
106 struct bfi_fcxp_send_rsp_s *fcxp_rsp);
107static void hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen,
108 struct bfa_fcxp_s *fcxp, struct fchs_s *fchs);
109static void bfa_fcxp_qresume(void *cbarg);
110static void bfa_fcxp_queue(struct bfa_fcxp_s *fcxp,
111 struct bfi_fcxp_send_req_s *send_req);
112
Jing Huang5fbe25c2010-10-18 17:17:23 -0700113/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700114 * forward declarations for LPS functions
115 */
Krishna Gudipati45070252011-06-24 20:24:29 -0700116static void bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg,
117 struct bfa_meminfo_s *minfo, struct bfa_s *bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700118static void bfa_lps_attach(struct bfa_s *bfa, void *bfad,
119 struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700120 struct bfa_pcidev_s *pcidev);
121static void bfa_lps_detach(struct bfa_s *bfa);
122static void bfa_lps_start(struct bfa_s *bfa);
123static void bfa_lps_stop(struct bfa_s *bfa);
124static void bfa_lps_iocdisable(struct bfa_s *bfa);
125static void bfa_lps_login_rsp(struct bfa_s *bfa,
126 struct bfi_lps_login_rsp_s *rsp);
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700127static void bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700128static void bfa_lps_logout_rsp(struct bfa_s *bfa,
129 struct bfi_lps_logout_rsp_s *rsp);
130static void bfa_lps_reqq_resume(void *lps_arg);
131static void bfa_lps_free(struct bfa_lps_s *lps);
132static void bfa_lps_send_login(struct bfa_lps_s *lps);
133static void bfa_lps_send_logout(struct bfa_lps_s *lps);
Krishna Gudipatib7044952010-12-13 16:17:42 -0800134static void bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700135static void bfa_lps_login_comp(struct bfa_lps_s *lps);
136static void bfa_lps_logout_comp(struct bfa_lps_s *lps);
137static void bfa_lps_cvl_event(struct bfa_lps_s *lps);
138
Jing Huang5fbe25c2010-10-18 17:17:23 -0700139/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700140 * forward declaration for LPS state machine
141 */
142static void bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event);
143static void bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event);
144static void bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event
145 event);
146static void bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event);
Krishna Gudipatib7044952010-12-13 16:17:42 -0800147static void bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps,
148 enum bfa_lps_event event);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700149static void bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event);
150static void bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event
151 event);
152
Jing Huang5fbe25c2010-10-18 17:17:23 -0700153/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700154 * forward declaration for FC Port functions
155 */
156static bfa_boolean_t bfa_fcport_send_enable(struct bfa_fcport_s *fcport);
157static bfa_boolean_t bfa_fcport_send_disable(struct bfa_fcport_s *fcport);
158static void bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport);
159static void bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport);
160static void bfa_fcport_set_wwns(struct bfa_fcport_s *fcport);
161static void __bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete);
162static void bfa_fcport_scn(struct bfa_fcport_s *fcport,
163 enum bfa_port_linkstate event, bfa_boolean_t trunk);
164static void bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln,
165 enum bfa_port_linkstate event);
166static void __bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete);
167static void bfa_fcport_stats_get_timeout(void *cbarg);
168static void bfa_fcport_stats_clr_timeout(void *cbarg);
169static void bfa_trunk_iocdisable(struct bfa_s *bfa);
170
Jing Huang5fbe25c2010-10-18 17:17:23 -0700171/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700172 * forward declaration for FC PORT state machine
173 */
174static void bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
175 enum bfa_fcport_sm_event event);
176static void bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
177 enum bfa_fcport_sm_event event);
178static void bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
179 enum bfa_fcport_sm_event event);
180static void bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
181 enum bfa_fcport_sm_event event);
182static void bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
183 enum bfa_fcport_sm_event event);
184static void bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
185 enum bfa_fcport_sm_event event);
186static void bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
187 enum bfa_fcport_sm_event event);
188static void bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
189 enum bfa_fcport_sm_event event);
190static void bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
191 enum bfa_fcport_sm_event event);
192static void bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
193 enum bfa_fcport_sm_event event);
194static void bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
195 enum bfa_fcport_sm_event event);
196static void bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
197 enum bfa_fcport_sm_event event);
198
199static void bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
200 enum bfa_fcport_ln_sm_event event);
201static void bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
202 enum bfa_fcport_ln_sm_event event);
203static void bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
204 enum bfa_fcport_ln_sm_event event);
205static void bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
206 enum bfa_fcport_ln_sm_event event);
207static void bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
208 enum bfa_fcport_ln_sm_event event);
209static void bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
210 enum bfa_fcport_ln_sm_event event);
211static void bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
212 enum bfa_fcport_ln_sm_event event);
213
214static struct bfa_sm_table_s hal_port_sm_table[] = {
215 {BFA_SM(bfa_fcport_sm_uninit), BFA_PORT_ST_UNINIT},
216 {BFA_SM(bfa_fcport_sm_enabling_qwait), BFA_PORT_ST_ENABLING_QWAIT},
217 {BFA_SM(bfa_fcport_sm_enabling), BFA_PORT_ST_ENABLING},
218 {BFA_SM(bfa_fcport_sm_linkdown), BFA_PORT_ST_LINKDOWN},
219 {BFA_SM(bfa_fcport_sm_linkup), BFA_PORT_ST_LINKUP},
220 {BFA_SM(bfa_fcport_sm_disabling_qwait), BFA_PORT_ST_DISABLING_QWAIT},
221 {BFA_SM(bfa_fcport_sm_toggling_qwait), BFA_PORT_ST_TOGGLING_QWAIT},
222 {BFA_SM(bfa_fcport_sm_disabling), BFA_PORT_ST_DISABLING},
223 {BFA_SM(bfa_fcport_sm_disabled), BFA_PORT_ST_DISABLED},
224 {BFA_SM(bfa_fcport_sm_stopped), BFA_PORT_ST_STOPPED},
225 {BFA_SM(bfa_fcport_sm_iocdown), BFA_PORT_ST_IOCDOWN},
226 {BFA_SM(bfa_fcport_sm_iocfail), BFA_PORT_ST_IOCDOWN},
227};
228
229
Jing Huang5fbe25c2010-10-18 17:17:23 -0700230/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700231 * forward declaration for RPORT related functions
232 */
233static struct bfa_rport_s *bfa_rport_alloc(struct bfa_rport_mod_s *rp_mod);
234static void bfa_rport_free(struct bfa_rport_s *rport);
235static bfa_boolean_t bfa_rport_send_fwcreate(struct bfa_rport_s *rp);
236static bfa_boolean_t bfa_rport_send_fwdelete(struct bfa_rport_s *rp);
237static bfa_boolean_t bfa_rport_send_fwspeed(struct bfa_rport_s *rp);
238static void __bfa_cb_rport_online(void *cbarg,
239 bfa_boolean_t complete);
240static void __bfa_cb_rport_offline(void *cbarg,
241 bfa_boolean_t complete);
242
Jing Huang5fbe25c2010-10-18 17:17:23 -0700243/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700244 * forward declaration for RPORT state machine
245 */
246static void bfa_rport_sm_uninit(struct bfa_rport_s *rp,
247 enum bfa_rport_event event);
248static void bfa_rport_sm_created(struct bfa_rport_s *rp,
249 enum bfa_rport_event event);
250static void bfa_rport_sm_fwcreate(struct bfa_rport_s *rp,
251 enum bfa_rport_event event);
252static void bfa_rport_sm_online(struct bfa_rport_s *rp,
253 enum bfa_rport_event event);
254static void bfa_rport_sm_fwdelete(struct bfa_rport_s *rp,
255 enum bfa_rport_event event);
256static void bfa_rport_sm_offline(struct bfa_rport_s *rp,
257 enum bfa_rport_event event);
258static void bfa_rport_sm_deleting(struct bfa_rport_s *rp,
259 enum bfa_rport_event event);
260static void bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
261 enum bfa_rport_event event);
262static void bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
263 enum bfa_rport_event event);
264static void bfa_rport_sm_iocdisable(struct bfa_rport_s *rp,
265 enum bfa_rport_event event);
266static void bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp,
267 enum bfa_rport_event event);
268static void bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp,
269 enum bfa_rport_event event);
270static void bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp,
271 enum bfa_rport_event event);
272
Jing Huang5fbe25c2010-10-18 17:17:23 -0700273/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700274 * PLOG related definitions
275 */
276static int
277plkd_validate_logrec(struct bfa_plog_rec_s *pl_rec)
278{
279 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
280 (pl_rec->log_type != BFA_PL_LOG_TYPE_STRING))
281 return 1;
282
283 if ((pl_rec->log_type != BFA_PL_LOG_TYPE_INT) &&
284 (pl_rec->log_num_ints > BFA_PL_INT_LOG_SZ))
285 return 1;
286
287 return 0;
288}
289
Maggie Zhangf16a1752010-12-09 19:12:32 -0800290static u64
291bfa_get_log_time(void)
292{
293 u64 system_time = 0;
294 struct timeval tv;
295 do_gettimeofday(&tv);
296
297 /* We are interested in seconds only. */
298 system_time = tv.tv_sec;
299 return system_time;
300}
301
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700302static void
303bfa_plog_add(struct bfa_plog_s *plog, struct bfa_plog_rec_s *pl_rec)
304{
305 u16 tail;
306 struct bfa_plog_rec_s *pl_recp;
307
308 if (plog->plog_enabled == 0)
309 return;
310
311 if (plkd_validate_logrec(pl_rec)) {
Jing Huangd4b671c2010-12-26 21:46:35 -0800312 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700313 return;
314 }
315
316 tail = plog->tail;
317
318 pl_recp = &(plog->plog_recs[tail]);
319
Jing Huang6a18b162010-10-18 17:08:54 -0700320 memcpy(pl_recp, pl_rec, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700321
Maggie Zhangf16a1752010-12-09 19:12:32 -0800322 pl_recp->tv = bfa_get_log_time();
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700323 BFA_PL_LOG_REC_INCR(plog->tail);
324
325 if (plog->head == plog->tail)
326 BFA_PL_LOG_REC_INCR(plog->head);
327}
328
329void
330bfa_plog_init(struct bfa_plog_s *plog)
331{
Jing Huang6a18b162010-10-18 17:08:54 -0700332 memset((char *)plog, 0, sizeof(struct bfa_plog_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700333
Jing Huang6a18b162010-10-18 17:08:54 -0700334 memcpy(plog->plog_sig, BFA_PL_SIG_STR, BFA_PL_SIG_LEN);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700335 plog->head = plog->tail = 0;
336 plog->plog_enabled = 1;
337}
338
339void
340bfa_plog_str(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
341 enum bfa_plog_eid event,
342 u16 misc, char *log_str)
343{
344 struct bfa_plog_rec_s lp;
345
346 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700347 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700348 lp.mid = mid;
349 lp.eid = event;
350 lp.log_type = BFA_PL_LOG_TYPE_STRING;
351 lp.misc = misc;
352 strncpy(lp.log_entry.string_log, log_str,
353 BFA_PL_STRING_LOG_SZ - 1);
354 lp.log_entry.string_log[BFA_PL_STRING_LOG_SZ - 1] = '\0';
355 bfa_plog_add(plog, &lp);
356 }
357}
358
359void
360bfa_plog_intarr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
361 enum bfa_plog_eid event,
362 u16 misc, u32 *intarr, u32 num_ints)
363{
364 struct bfa_plog_rec_s lp;
365 u32 i;
366
367 if (num_ints > BFA_PL_INT_LOG_SZ)
368 num_ints = BFA_PL_INT_LOG_SZ;
369
370 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700371 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700372 lp.mid = mid;
373 lp.eid = event;
374 lp.log_type = BFA_PL_LOG_TYPE_INT;
375 lp.misc = misc;
376
377 for (i = 0; i < num_ints; i++)
Jing Huang6a18b162010-10-18 17:08:54 -0700378 lp.log_entry.int_log[i] = intarr[i];
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700379
380 lp.log_num_ints = (u8) num_ints;
381
382 bfa_plog_add(plog, &lp);
383 }
384}
385
386void
387bfa_plog_fchdr(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
388 enum bfa_plog_eid event,
389 u16 misc, struct fchs_s *fchdr)
390{
391 struct bfa_plog_rec_s lp;
392 u32 *tmp_int = (u32 *) fchdr;
393 u32 ints[BFA_PL_INT_LOG_SZ];
394
395 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700396 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700397
398 ints[0] = tmp_int[0];
399 ints[1] = tmp_int[1];
400 ints[2] = tmp_int[4];
401
402 bfa_plog_intarr(plog, mid, event, misc, ints, 3);
403 }
404}
405
406void
407bfa_plog_fchdr_and_pl(struct bfa_plog_s *plog, enum bfa_plog_mid mid,
408 enum bfa_plog_eid event, u16 misc, struct fchs_s *fchdr,
409 u32 pld_w0)
410{
411 struct bfa_plog_rec_s lp;
412 u32 *tmp_int = (u32 *) fchdr;
413 u32 ints[BFA_PL_INT_LOG_SZ];
414
415 if (plog->plog_enabled) {
Jing Huang6a18b162010-10-18 17:08:54 -0700416 memset(&lp, 0, sizeof(struct bfa_plog_rec_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700417
418 ints[0] = tmp_int[0];
419 ints[1] = tmp_int[1];
420 ints[2] = tmp_int[4];
421 ints[3] = pld_w0;
422
423 bfa_plog_intarr(plog, mid, event, misc, ints, 4);
424 }
425}
426
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700427
Jing Huang5fbe25c2010-10-18 17:17:23 -0700428/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700429 * fcxp_pvt BFA FCXP private functions
430 */
431
432static void
Krishna Gudipati45070252011-06-24 20:24:29 -0700433claim_fcxps_mem(struct bfa_fcxp_mod_s *mod)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700434{
435 u16 i;
436 struct bfa_fcxp_s *fcxp;
437
Krishna Gudipati45070252011-06-24 20:24:29 -0700438 fcxp = (struct bfa_fcxp_s *) bfa_mem_kva_curp(mod);
Jing Huang6a18b162010-10-18 17:08:54 -0700439 memset(fcxp, 0, sizeof(struct bfa_fcxp_s) * mod->num_fcxps);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700440
441 INIT_LIST_HEAD(&mod->fcxp_free_q);
442 INIT_LIST_HEAD(&mod->fcxp_active_q);
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700443 INIT_LIST_HEAD(&mod->fcxp_unused_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700444
445 mod->fcxp_list = fcxp;
446
447 for (i = 0; i < mod->num_fcxps; i++) {
448 fcxp->fcxp_mod = mod;
449 fcxp->fcxp_tag = i;
450
451 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
452 bfa_reqq_winit(&fcxp->reqq_wqe, bfa_fcxp_qresume, fcxp);
453 fcxp->reqq_waiting = BFA_FALSE;
454
455 fcxp = fcxp + 1;
456 }
457
Krishna Gudipati45070252011-06-24 20:24:29 -0700458 bfa_mem_kva_curp(mod) = (void *)fcxp;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700459}
460
461static void
Krishna Gudipati45070252011-06-24 20:24:29 -0700462bfa_fcxp_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
463 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700464{
Krishna Gudipati45070252011-06-24 20:24:29 -0700465 struct bfa_fcxp_mod_s *fcxp_mod = BFA_FCXP_MOD(bfa);
466 struct bfa_mem_kva_s *fcxp_kva = BFA_MEM_FCXP_KVA(bfa);
467 struct bfa_mem_dma_s *seg_ptr;
468 u16 nsegs, idx, per_seg_fcxp;
469 u16 num_fcxps = cfg->fwcfg.num_fcxp_reqs;
470 u32 per_fcxp_sz;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700471
Krishna Gudipati45070252011-06-24 20:24:29 -0700472 if (num_fcxps == 0)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700473 return;
474
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700475 if (cfg->drvcfg.min_cfg)
Krishna Gudipati45070252011-06-24 20:24:29 -0700476 per_fcxp_sz = 2 * BFA_FCXP_MAX_IBUF_SZ;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700477 else
Krishna Gudipati45070252011-06-24 20:24:29 -0700478 per_fcxp_sz = BFA_FCXP_MAX_IBUF_SZ + BFA_FCXP_MAX_LBUF_SZ;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700479
Krishna Gudipati45070252011-06-24 20:24:29 -0700480 /* dma memory */
481 nsegs = BFI_MEM_DMA_NSEGS(num_fcxps, per_fcxp_sz);
482 per_seg_fcxp = BFI_MEM_NREQS_SEG(per_fcxp_sz);
483
484 bfa_mem_dma_seg_iter(fcxp_mod, seg_ptr, nsegs, idx) {
485 if (num_fcxps >= per_seg_fcxp) {
486 num_fcxps -= per_seg_fcxp;
487 bfa_mem_dma_setup(minfo, seg_ptr,
488 per_seg_fcxp * per_fcxp_sz);
489 } else
490 bfa_mem_dma_setup(minfo, seg_ptr,
491 num_fcxps * per_fcxp_sz);
492 }
493
494 /* kva memory */
495 bfa_mem_kva_setup(minfo, fcxp_kva,
496 cfg->fwcfg.num_fcxp_reqs * sizeof(struct bfa_fcxp_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700497}
498
499static void
500bfa_fcxp_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -0700501 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700502{
503 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
504
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700505 mod->bfa = bfa;
506 mod->num_fcxps = cfg->fwcfg.num_fcxp_reqs;
507
Jing Huang5fbe25c2010-10-18 17:17:23 -0700508 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700509 * Initialize FCXP request and response payload sizes.
510 */
511 mod->req_pld_sz = mod->rsp_pld_sz = BFA_FCXP_MAX_IBUF_SZ;
512 if (!cfg->drvcfg.min_cfg)
513 mod->rsp_pld_sz = BFA_FCXP_MAX_LBUF_SZ;
514
515 INIT_LIST_HEAD(&mod->wait_q);
516
Krishna Gudipati45070252011-06-24 20:24:29 -0700517 claim_fcxps_mem(mod);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700518}
519
520static void
521bfa_fcxp_detach(struct bfa_s *bfa)
522{
523}
524
525static void
526bfa_fcxp_start(struct bfa_s *bfa)
527{
528}
529
530static void
531bfa_fcxp_stop(struct bfa_s *bfa)
532{
533}
534
535static void
536bfa_fcxp_iocdisable(struct bfa_s *bfa)
537{
538 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
539 struct bfa_fcxp_s *fcxp;
540 struct list_head *qe, *qen;
541
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700542 /* Enqueue unused fcxp resources to free_q */
543 list_splice_tail_init(&mod->fcxp_unused_q, &mod->fcxp_free_q);
544
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700545 list_for_each_safe(qe, qen, &mod->fcxp_active_q) {
546 fcxp = (struct bfa_fcxp_s *) qe;
547 if (fcxp->caller == NULL) {
548 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
549 BFA_STATUS_IOC_FAILURE, 0, 0, NULL);
550 bfa_fcxp_free(fcxp);
551 } else {
552 fcxp->rsp_status = BFA_STATUS_IOC_FAILURE;
553 bfa_cb_queue(bfa, &fcxp->hcb_qe,
554 __bfa_fcxp_send_cbfn, fcxp);
555 }
556 }
557}
558
559static struct bfa_fcxp_s *
560bfa_fcxp_get(struct bfa_fcxp_mod_s *fm)
561{
562 struct bfa_fcxp_s *fcxp;
563
564 bfa_q_deq(&fm->fcxp_free_q, &fcxp);
565
566 if (fcxp)
567 list_add_tail(&fcxp->qe, &fm->fcxp_active_q);
568
569 return fcxp;
570}
571
572static void
573bfa_fcxp_init_reqrsp(struct bfa_fcxp_s *fcxp,
574 struct bfa_s *bfa,
575 u8 *use_ibuf,
576 u32 *nr_sgles,
577 bfa_fcxp_get_sgaddr_t *r_sga_cbfn,
578 bfa_fcxp_get_sglen_t *r_sglen_cbfn,
579 struct list_head *r_sgpg_q,
580 int n_sgles,
581 bfa_fcxp_get_sgaddr_t sga_cbfn,
582 bfa_fcxp_get_sglen_t sglen_cbfn)
583{
584
Jing Huangd4b671c2010-12-26 21:46:35 -0800585 WARN_ON(bfa == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700586
587 bfa_trc(bfa, fcxp->fcxp_tag);
588
589 if (n_sgles == 0) {
590 *use_ibuf = 1;
591 } else {
Jing Huangd4b671c2010-12-26 21:46:35 -0800592 WARN_ON(*sga_cbfn == NULL);
593 WARN_ON(*sglen_cbfn == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700594
595 *use_ibuf = 0;
596 *r_sga_cbfn = sga_cbfn;
597 *r_sglen_cbfn = sglen_cbfn;
598
599 *nr_sgles = n_sgles;
600
601 /*
602 * alloc required sgpgs
603 */
604 if (n_sgles > BFI_SGE_INLINE)
Jing Huangd4b671c2010-12-26 21:46:35 -0800605 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700606 }
607
608}
609
610static void
611bfa_fcxp_init(struct bfa_fcxp_s *fcxp,
612 void *caller, struct bfa_s *bfa, int nreq_sgles,
613 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
614 bfa_fcxp_get_sglen_t req_sglen_cbfn,
615 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
616 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
617{
618
Jing Huangd4b671c2010-12-26 21:46:35 -0800619 WARN_ON(bfa == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700620
621 bfa_trc(bfa, fcxp->fcxp_tag);
622
623 fcxp->caller = caller;
624
625 bfa_fcxp_init_reqrsp(fcxp, bfa,
626 &fcxp->use_ireqbuf, &fcxp->nreq_sgles, &fcxp->req_sga_cbfn,
627 &fcxp->req_sglen_cbfn, &fcxp->req_sgpg_q,
628 nreq_sgles, req_sga_cbfn, req_sglen_cbfn);
629
630 bfa_fcxp_init_reqrsp(fcxp, bfa,
631 &fcxp->use_irspbuf, &fcxp->nrsp_sgles, &fcxp->rsp_sga_cbfn,
632 &fcxp->rsp_sglen_cbfn, &fcxp->rsp_sgpg_q,
633 nrsp_sgles, rsp_sga_cbfn, rsp_sglen_cbfn);
634
635}
636
637static void
638bfa_fcxp_put(struct bfa_fcxp_s *fcxp)
639{
640 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
641 struct bfa_fcxp_wqe_s *wqe;
642
643 bfa_q_deq(&mod->wait_q, &wqe);
644 if (wqe) {
645 bfa_trc(mod->bfa, fcxp->fcxp_tag);
646
647 bfa_fcxp_init(fcxp, wqe->caller, wqe->bfa, wqe->nreq_sgles,
648 wqe->nrsp_sgles, wqe->req_sga_cbfn,
649 wqe->req_sglen_cbfn, wqe->rsp_sga_cbfn,
650 wqe->rsp_sglen_cbfn);
651
652 wqe->alloc_cbfn(wqe->alloc_cbarg, fcxp);
653 return;
654 }
655
Jing Huangd4b671c2010-12-26 21:46:35 -0800656 WARN_ON(!bfa_q_is_on_q(&mod->fcxp_active_q, fcxp));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700657 list_del(&fcxp->qe);
658 list_add_tail(&fcxp->qe, &mod->fcxp_free_q);
659}
660
661static void
662bfa_fcxp_null_comp(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
663 bfa_status_t req_status, u32 rsp_len,
664 u32 resid_len, struct fchs_s *rsp_fchs)
665{
666 /* discarded fcxp completion */
667}
668
669static void
670__bfa_fcxp_send_cbfn(void *cbarg, bfa_boolean_t complete)
671{
672 struct bfa_fcxp_s *fcxp = cbarg;
673
674 if (complete) {
675 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
676 fcxp->rsp_status, fcxp->rsp_len,
677 fcxp->residue_len, &fcxp->rsp_fchs);
678 } else {
679 bfa_fcxp_free(fcxp);
680 }
681}
682
683static void
684hal_fcxp_send_comp(struct bfa_s *bfa, struct bfi_fcxp_send_rsp_s *fcxp_rsp)
685{
686 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
687 struct bfa_fcxp_s *fcxp;
Jing Huangba816ea2010-10-18 17:10:50 -0700688 u16 fcxp_tag = be16_to_cpu(fcxp_rsp->fcxp_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700689
690 bfa_trc(bfa, fcxp_tag);
691
Jing Huangba816ea2010-10-18 17:10:50 -0700692 fcxp_rsp->rsp_len = be32_to_cpu(fcxp_rsp->rsp_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700693
Jing Huang5fbe25c2010-10-18 17:17:23 -0700694 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700695 * @todo f/w should not set residue to non-0 when everything
696 * is received.
697 */
698 if (fcxp_rsp->req_status == BFA_STATUS_OK)
699 fcxp_rsp->residue_len = 0;
700 else
Jing Huangba816ea2010-10-18 17:10:50 -0700701 fcxp_rsp->residue_len = be32_to_cpu(fcxp_rsp->residue_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700702
703 fcxp = BFA_FCXP_FROM_TAG(mod, fcxp_tag);
704
Jing Huangd4b671c2010-12-26 21:46:35 -0800705 WARN_ON(fcxp->send_cbfn == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700706
707 hal_fcxp_rx_plog(mod->bfa, fcxp, fcxp_rsp);
708
709 if (fcxp->send_cbfn != NULL) {
710 bfa_trc(mod->bfa, (NULL == fcxp->caller));
711 if (fcxp->caller == NULL) {
712 fcxp->send_cbfn(fcxp->caller, fcxp, fcxp->send_cbarg,
713 fcxp_rsp->req_status, fcxp_rsp->rsp_len,
714 fcxp_rsp->residue_len, &fcxp_rsp->fchs);
715 /*
716 * fcxp automatically freed on return from the callback
717 */
718 bfa_fcxp_free(fcxp);
719 } else {
720 fcxp->rsp_status = fcxp_rsp->req_status;
721 fcxp->rsp_len = fcxp_rsp->rsp_len;
722 fcxp->residue_len = fcxp_rsp->residue_len;
723 fcxp->rsp_fchs = fcxp_rsp->fchs;
724
725 bfa_cb_queue(bfa, &fcxp->hcb_qe,
726 __bfa_fcxp_send_cbfn, fcxp);
727 }
728 } else {
729 bfa_trc(bfa, (NULL == fcxp->send_cbfn));
730 }
731}
732
733static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700734hal_fcxp_tx_plog(struct bfa_s *bfa, u32 reqlen, struct bfa_fcxp_s *fcxp,
735 struct fchs_s *fchs)
736{
737 /*
738 * TODO: TX ox_id
739 */
740 if (reqlen > 0) {
741 if (fcxp->use_ireqbuf) {
742 u32 pld_w0 =
743 *((u32 *) BFA_FCXP_REQ_PLD(fcxp));
744
745 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
746 BFA_PL_EID_TX,
747 reqlen + sizeof(struct fchs_s), fchs,
748 pld_w0);
749 } else {
750 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
751 BFA_PL_EID_TX,
752 reqlen + sizeof(struct fchs_s),
753 fchs);
754 }
755 } else {
756 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_TX,
757 reqlen + sizeof(struct fchs_s), fchs);
758 }
759}
760
761static void
762hal_fcxp_rx_plog(struct bfa_s *bfa, struct bfa_fcxp_s *fcxp,
763 struct bfi_fcxp_send_rsp_s *fcxp_rsp)
764{
765 if (fcxp_rsp->rsp_len > 0) {
766 if (fcxp->use_irspbuf) {
767 u32 pld_w0 =
768 *((u32 *) BFA_FCXP_RSP_PLD(fcxp));
769
770 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_FCXP,
771 BFA_PL_EID_RX,
772 (u16) fcxp_rsp->rsp_len,
773 &fcxp_rsp->fchs, pld_w0);
774 } else {
775 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP,
776 BFA_PL_EID_RX,
777 (u16) fcxp_rsp->rsp_len,
778 &fcxp_rsp->fchs);
779 }
780 } else {
781 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_FCXP, BFA_PL_EID_RX,
782 (u16) fcxp_rsp->rsp_len, &fcxp_rsp->fchs);
783 }
784}
785
Jing Huang5fbe25c2010-10-18 17:17:23 -0700786/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700787 * Handler to resume sending fcxp when space in available in cpe queue.
788 */
789static void
790bfa_fcxp_qresume(void *cbarg)
791{
792 struct bfa_fcxp_s *fcxp = cbarg;
793 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
794 struct bfi_fcxp_send_req_s *send_req;
795
796 fcxp->reqq_waiting = BFA_FALSE;
797 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
798 bfa_fcxp_queue(fcxp, send_req);
799}
800
Jing Huang5fbe25c2010-10-18 17:17:23 -0700801/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700802 * Queue fcxp send request to foimrware.
803 */
804static void
805bfa_fcxp_queue(struct bfa_fcxp_s *fcxp, struct bfi_fcxp_send_req_s *send_req)
806{
807 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
808 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
809 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
810 struct bfa_rport_s *rport = reqi->bfa_rport;
811
812 bfi_h2i_set(send_req->mh, BFI_MC_FCXP, BFI_FCXP_H2I_SEND_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700813 bfa_fn_lpu(bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700814
Jing Huangba816ea2010-10-18 17:10:50 -0700815 send_req->fcxp_tag = cpu_to_be16(fcxp->fcxp_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700816 if (rport) {
817 send_req->rport_fw_hndl = rport->fw_handle;
Jing Huangba816ea2010-10-18 17:10:50 -0700818 send_req->max_frmsz = cpu_to_be16(rport->rport_info.max_frmsz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700819 if (send_req->max_frmsz == 0)
Jing Huangba816ea2010-10-18 17:10:50 -0700820 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700821 } else {
822 send_req->rport_fw_hndl = 0;
Jing Huangba816ea2010-10-18 17:10:50 -0700823 send_req->max_frmsz = cpu_to_be16(FC_MAX_PDUSZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700824 }
825
Jing Huangba816ea2010-10-18 17:10:50 -0700826 send_req->vf_id = cpu_to_be16(reqi->vf_id);
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700827 send_req->lp_fwtag = bfa_lps_get_fwtag(bfa, reqi->lp_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700828 send_req->class = reqi->class;
829 send_req->rsp_timeout = rspi->rsp_timeout;
830 send_req->cts = reqi->cts;
831 send_req->fchs = reqi->fchs;
832
Jing Huangba816ea2010-10-18 17:10:50 -0700833 send_req->req_len = cpu_to_be32(reqi->req_tot_len);
834 send_req->rsp_maxlen = cpu_to_be32(rspi->rsp_maxlen);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700835
836 /*
837 * setup req sgles
838 */
839 if (fcxp->use_ireqbuf == 1) {
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700840 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700841 BFA_FCXP_REQ_PLD_PA(fcxp));
842 } else {
843 if (fcxp->nreq_sgles > 0) {
Jing Huangd4b671c2010-12-26 21:46:35 -0800844 WARN_ON(fcxp->nreq_sgles != 1);
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700845 bfa_alen_set(&send_req->req_alen, reqi->req_tot_len,
846 fcxp->req_sga_cbfn(fcxp->caller, 0));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700847 } else {
Jing Huangd4b671c2010-12-26 21:46:35 -0800848 WARN_ON(reqi->req_tot_len != 0);
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700849 bfa_alen_set(&send_req->rsp_alen, 0, 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700850 }
851 }
852
853 /*
854 * setup rsp sgles
855 */
856 if (fcxp->use_irspbuf == 1) {
Jing Huangd4b671c2010-12-26 21:46:35 -0800857 WARN_ON(rspi->rsp_maxlen > BFA_FCXP_MAX_LBUF_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700858
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700859 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700860 BFA_FCXP_RSP_PLD_PA(fcxp));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700861 } else {
862 if (fcxp->nrsp_sgles > 0) {
Jing Huangd4b671c2010-12-26 21:46:35 -0800863 WARN_ON(fcxp->nrsp_sgles != 1);
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700864 bfa_alen_set(&send_req->rsp_alen, rspi->rsp_maxlen,
865 fcxp->rsp_sga_cbfn(fcxp->caller, 0));
866
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700867 } else {
Jing Huangd4b671c2010-12-26 21:46:35 -0800868 WARN_ON(rspi->rsp_maxlen != 0);
Krishna Gudipati85ce9282011-06-13 15:39:36 -0700869 bfa_alen_set(&send_req->rsp_alen, 0, 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700870 }
871 }
872
873 hal_fcxp_tx_plog(bfa, reqi->req_tot_len, fcxp, &reqi->fchs);
874
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700875 bfa_reqq_produce(bfa, BFA_REQQ_FCXP, send_req->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700876
877 bfa_trc(bfa, bfa_reqq_pi(bfa, BFA_REQQ_FCXP));
878 bfa_trc(bfa, bfa_reqq_ci(bfa, BFA_REQQ_FCXP));
879}
880
Jing Huang5fbe25c2010-10-18 17:17:23 -0700881/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700882 * Allocate an FCXP instance to send a response or to send a request
883 * that has a response. Request/response buffers are allocated by caller.
884 *
885 * @param[in] bfa BFA bfa instance
886 * @param[in] nreq_sgles Number of SG elements required for request
887 * buffer. 0, if fcxp internal buffers are used.
888 * Use bfa_fcxp_get_reqbuf() to get the
889 * internal req buffer.
890 * @param[in] req_sgles SG elements describing request buffer. Will be
891 * copied in by BFA and hence can be freed on
892 * return from this function.
893 * @param[in] get_req_sga function ptr to be called to get a request SG
894 * Address (given the sge index).
895 * @param[in] get_req_sglen function ptr to be called to get a request SG
896 * len (given the sge index).
897 * @param[in] get_rsp_sga function ptr to be called to get a response SG
898 * Address (given the sge index).
899 * @param[in] get_rsp_sglen function ptr to be called to get a response SG
900 * len (given the sge index).
901 *
902 * @return FCXP instance. NULL on failure.
903 */
904struct bfa_fcxp_s *
905bfa_fcxp_alloc(void *caller, struct bfa_s *bfa, int nreq_sgles,
906 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
907 bfa_fcxp_get_sglen_t req_sglen_cbfn,
908 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
909 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
910{
911 struct bfa_fcxp_s *fcxp = NULL;
912
Jing Huangd4b671c2010-12-26 21:46:35 -0800913 WARN_ON(bfa == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700914
915 fcxp = bfa_fcxp_get(BFA_FCXP_MOD(bfa));
916 if (fcxp == NULL)
917 return NULL;
918
919 bfa_trc(bfa, fcxp->fcxp_tag);
920
921 bfa_fcxp_init(fcxp, caller, bfa, nreq_sgles, nrsp_sgles, req_sga_cbfn,
922 req_sglen_cbfn, rsp_sga_cbfn, rsp_sglen_cbfn);
923
924 return fcxp;
925}
926
Jing Huang5fbe25c2010-10-18 17:17:23 -0700927/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700928 * Get the internal request buffer pointer
929 *
930 * @param[in] fcxp BFA fcxp pointer
931 *
932 * @return pointer to the internal request buffer
933 */
934void *
935bfa_fcxp_get_reqbuf(struct bfa_fcxp_s *fcxp)
936{
937 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
938 void *reqbuf;
939
Jing Huangd4b671c2010-12-26 21:46:35 -0800940 WARN_ON(fcxp->use_ireqbuf != 1);
Krishna Gudipati45070252011-06-24 20:24:29 -0700941 reqbuf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
942 mod->req_pld_sz + mod->rsp_pld_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700943 return reqbuf;
944}
945
946u32
947bfa_fcxp_get_reqbufsz(struct bfa_fcxp_s *fcxp)
948{
949 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
950
951 return mod->req_pld_sz;
952}
953
Jing Huang5fbe25c2010-10-18 17:17:23 -0700954/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700955 * Get the internal response buffer pointer
956 *
957 * @param[in] fcxp BFA fcxp pointer
958 *
959 * @return pointer to the internal request buffer
960 */
961void *
962bfa_fcxp_get_rspbuf(struct bfa_fcxp_s *fcxp)
963{
964 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
Krishna Gudipati45070252011-06-24 20:24:29 -0700965 void *fcxp_buf;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700966
Jing Huangd4b671c2010-12-26 21:46:35 -0800967 WARN_ON(fcxp->use_irspbuf != 1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700968
Krishna Gudipati45070252011-06-24 20:24:29 -0700969 fcxp_buf = bfa_mem_get_dmabuf_kva(mod, fcxp->fcxp_tag,
970 mod->req_pld_sz + mod->rsp_pld_sz);
971
972 /* fcxp_buf = req_buf + rsp_buf :- add req_buf_sz to get to rsp_buf */
973 return ((u8 *) fcxp_buf) + mod->req_pld_sz;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700974}
975
Jing Huang5fbe25c2010-10-18 17:17:23 -0700976/*
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800977 * Free the BFA FCXP
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700978 *
979 * @param[in] fcxp BFA fcxp pointer
980 *
981 * @return void
982 */
983void
984bfa_fcxp_free(struct bfa_fcxp_s *fcxp)
985{
986 struct bfa_fcxp_mod_s *mod = fcxp->fcxp_mod;
987
Jing Huangd4b671c2010-12-26 21:46:35 -0800988 WARN_ON(fcxp == NULL);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700989 bfa_trc(mod->bfa, fcxp->fcxp_tag);
990 bfa_fcxp_put(fcxp);
991}
992
Jing Huang5fbe25c2010-10-18 17:17:23 -0700993/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700994 * Send a FCXP request
995 *
996 * @param[in] fcxp BFA fcxp pointer
997 * @param[in] rport BFA rport pointer. Could be left NULL for WKA rports
998 * @param[in] vf_id virtual Fabric ID
999 * @param[in] lp_tag lport tag
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001000 * @param[in] cts use Continuous sequence
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001001 * @param[in] cos fc Class of Service
1002 * @param[in] reqlen request length, does not include FCHS length
1003 * @param[in] fchs fc Header Pointer. The header content will be copied
1004 * in by BFA.
1005 *
1006 * @param[in] cbfn call back function to be called on receiving
1007 * the response
1008 * @param[in] cbarg arg for cbfn
1009 * @param[in] rsp_timeout
1010 * response timeout
1011 *
1012 * @return bfa_status_t
1013 */
1014void
1015bfa_fcxp_send(struct bfa_fcxp_s *fcxp, struct bfa_rport_s *rport,
1016 u16 vf_id, u8 lp_tag, bfa_boolean_t cts, enum fc_cos cos,
1017 u32 reqlen, struct fchs_s *fchs, bfa_cb_fcxp_send_t cbfn,
1018 void *cbarg, u32 rsp_maxlen, u8 rsp_timeout)
1019{
1020 struct bfa_s *bfa = fcxp->fcxp_mod->bfa;
1021 struct bfa_fcxp_req_info_s *reqi = &fcxp->req_info;
1022 struct bfa_fcxp_rsp_info_s *rspi = &fcxp->rsp_info;
1023 struct bfi_fcxp_send_req_s *send_req;
1024
1025 bfa_trc(bfa, fcxp->fcxp_tag);
1026
Jing Huang5fbe25c2010-10-18 17:17:23 -07001027 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001028 * setup request/response info
1029 */
1030 reqi->bfa_rport = rport;
1031 reqi->vf_id = vf_id;
1032 reqi->lp_tag = lp_tag;
1033 reqi->class = cos;
1034 rspi->rsp_timeout = rsp_timeout;
1035 reqi->cts = cts;
1036 reqi->fchs = *fchs;
1037 reqi->req_tot_len = reqlen;
1038 rspi->rsp_maxlen = rsp_maxlen;
1039 fcxp->send_cbfn = cbfn ? cbfn : bfa_fcxp_null_comp;
1040 fcxp->send_cbarg = cbarg;
1041
Jing Huang5fbe25c2010-10-18 17:17:23 -07001042 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001043 * If no room in CPE queue, wait for space in request queue
1044 */
1045 send_req = bfa_reqq_next(bfa, BFA_REQQ_FCXP);
1046 if (!send_req) {
1047 bfa_trc(bfa, fcxp->fcxp_tag);
1048 fcxp->reqq_waiting = BFA_TRUE;
1049 bfa_reqq_wait(bfa, BFA_REQQ_FCXP, &fcxp->reqq_wqe);
1050 return;
1051 }
1052
1053 bfa_fcxp_queue(fcxp, send_req);
1054}
1055
Jing Huang5fbe25c2010-10-18 17:17:23 -07001056/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001057 * Abort a BFA FCXP
1058 *
1059 * @param[in] fcxp BFA fcxp pointer
1060 *
1061 * @return void
1062 */
1063bfa_status_t
1064bfa_fcxp_abort(struct bfa_fcxp_s *fcxp)
1065{
1066 bfa_trc(fcxp->fcxp_mod->bfa, fcxp->fcxp_tag);
Jing Huangd4b671c2010-12-26 21:46:35 -08001067 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001068 return BFA_STATUS_OK;
1069}
1070
1071void
1072bfa_fcxp_alloc_wait(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe,
1073 bfa_fcxp_alloc_cbfn_t alloc_cbfn, void *alloc_cbarg,
1074 void *caller, int nreq_sgles,
1075 int nrsp_sgles, bfa_fcxp_get_sgaddr_t req_sga_cbfn,
1076 bfa_fcxp_get_sglen_t req_sglen_cbfn,
1077 bfa_fcxp_get_sgaddr_t rsp_sga_cbfn,
1078 bfa_fcxp_get_sglen_t rsp_sglen_cbfn)
1079{
1080 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1081
Jing Huangd4b671c2010-12-26 21:46:35 -08001082 WARN_ON(!list_empty(&mod->fcxp_free_q));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001083
1084 wqe->alloc_cbfn = alloc_cbfn;
1085 wqe->alloc_cbarg = alloc_cbarg;
1086 wqe->caller = caller;
1087 wqe->bfa = bfa;
1088 wqe->nreq_sgles = nreq_sgles;
1089 wqe->nrsp_sgles = nrsp_sgles;
1090 wqe->req_sga_cbfn = req_sga_cbfn;
1091 wqe->req_sglen_cbfn = req_sglen_cbfn;
1092 wqe->rsp_sga_cbfn = rsp_sga_cbfn;
1093 wqe->rsp_sglen_cbfn = rsp_sglen_cbfn;
1094
1095 list_add_tail(&wqe->qe, &mod->wait_q);
1096}
1097
1098void
1099bfa_fcxp_walloc_cancel(struct bfa_s *bfa, struct bfa_fcxp_wqe_s *wqe)
1100{
1101 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1102
Jing Huangd4b671c2010-12-26 21:46:35 -08001103 WARN_ON(!bfa_q_is_on_q(&mod->wait_q, wqe));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001104 list_del(&wqe->qe);
1105}
1106
1107void
1108bfa_fcxp_discard(struct bfa_fcxp_s *fcxp)
1109{
Jing Huang5fbe25c2010-10-18 17:17:23 -07001110 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001111 * If waiting for room in request queue, cancel reqq wait
1112 * and free fcxp.
1113 */
1114 if (fcxp->reqq_waiting) {
1115 fcxp->reqq_waiting = BFA_FALSE;
1116 bfa_reqq_wcancel(&fcxp->reqq_wqe);
1117 bfa_fcxp_free(fcxp);
1118 return;
1119 }
1120
1121 fcxp->send_cbfn = bfa_fcxp_null_comp;
1122}
1123
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001124void
1125bfa_fcxp_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
1126{
1127 switch (msg->mhdr.msg_id) {
1128 case BFI_FCXP_I2H_SEND_RSP:
1129 hal_fcxp_send_comp(bfa, (struct bfi_fcxp_send_rsp_s *) msg);
1130 break;
1131
1132 default:
1133 bfa_trc(bfa, msg->mhdr.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08001134 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001135 }
1136}
1137
1138u32
1139bfa_fcxp_get_maxrsp(struct bfa_s *bfa)
1140{
1141 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1142
1143 return mod->rsp_pld_sz;
1144}
1145
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001146void
1147bfa_fcxp_res_recfg(struct bfa_s *bfa, u16 num_fcxp_fw)
1148{
1149 struct bfa_fcxp_mod_s *mod = BFA_FCXP_MOD(bfa);
1150 struct list_head *qe;
1151 int i;
1152
1153 for (i = 0; i < (mod->num_fcxps - num_fcxp_fw); i++) {
1154 bfa_q_deq_tail(&mod->fcxp_free_q, &qe);
1155 list_add_tail(qe, &mod->fcxp_unused_q);
1156 }
1157}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001158
Jing Huang5fbe25c2010-10-18 17:17:23 -07001159/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001160 * BFA LPS state machine functions
1161 */
1162
Jing Huang5fbe25c2010-10-18 17:17:23 -07001163/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001164 * Init state -- no login
1165 */
1166static void
1167bfa_lps_sm_init(struct bfa_lps_s *lps, enum bfa_lps_event event)
1168{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001169 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001170 bfa_trc(lps->bfa, event);
1171
1172 switch (event) {
1173 case BFA_LPS_SM_LOGIN:
1174 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1175 bfa_sm_set_state(lps, bfa_lps_sm_loginwait);
1176 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1177 } else {
1178 bfa_sm_set_state(lps, bfa_lps_sm_login);
1179 bfa_lps_send_login(lps);
1180 }
1181
1182 if (lps->fdisc)
1183 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1184 BFA_PL_EID_LOGIN, 0, "FDISC Request");
1185 else
1186 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1187 BFA_PL_EID_LOGIN, 0, "FLOGI Request");
1188 break;
1189
1190 case BFA_LPS_SM_LOGOUT:
1191 bfa_lps_logout_comp(lps);
1192 break;
1193
1194 case BFA_LPS_SM_DELETE:
1195 bfa_lps_free(lps);
1196 break;
1197
1198 case BFA_LPS_SM_RX_CVL:
1199 case BFA_LPS_SM_OFFLINE:
1200 break;
1201
1202 case BFA_LPS_SM_FWRSP:
1203 /*
1204 * Could happen when fabric detects loopback and discards
1205 * the lps request. Fw will eventually sent out the timeout
1206 * Just ignore
1207 */
1208 break;
1209
1210 default:
1211 bfa_sm_fault(lps->bfa, event);
1212 }
1213}
1214
Jing Huang5fbe25c2010-10-18 17:17:23 -07001215/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001216 * login is in progress -- awaiting response from firmware
1217 */
1218static void
1219bfa_lps_sm_login(struct bfa_lps_s *lps, enum bfa_lps_event event)
1220{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001221 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001222 bfa_trc(lps->bfa, event);
1223
1224 switch (event) {
1225 case BFA_LPS_SM_FWRSP:
1226 if (lps->status == BFA_STATUS_OK) {
1227 bfa_sm_set_state(lps, bfa_lps_sm_online);
1228 if (lps->fdisc)
1229 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1230 BFA_PL_EID_LOGIN, 0, "FDISC Accept");
1231 else
1232 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1233 BFA_PL_EID_LOGIN, 0, "FLOGI Accept");
Krishna Gudipatib7044952010-12-13 16:17:42 -08001234 /* If N2N, send the assigned PID to FW */
1235 bfa_trc(lps->bfa, lps->fport);
1236 bfa_trc(lps->bfa, lps->lp_pid);
1237
1238 if (!lps->fport && lps->lp_pid)
1239 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001240 } else {
1241 bfa_sm_set_state(lps, bfa_lps_sm_init);
1242 if (lps->fdisc)
1243 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1244 BFA_PL_EID_LOGIN, 0,
1245 "FDISC Fail (RJT or timeout)");
1246 else
1247 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1248 BFA_PL_EID_LOGIN, 0,
1249 "FLOGI Fail (RJT or timeout)");
1250 }
1251 bfa_lps_login_comp(lps);
1252 break;
1253
1254 case BFA_LPS_SM_OFFLINE:
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001255 case BFA_LPS_SM_DELETE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001256 bfa_sm_set_state(lps, bfa_lps_sm_init);
1257 break;
1258
Krishna Gudipatib7044952010-12-13 16:17:42 -08001259 case BFA_LPS_SM_SET_N2N_PID:
1260 bfa_trc(lps->bfa, lps->fport);
1261 bfa_trc(lps->bfa, lps->lp_pid);
1262 break;
1263
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001264 default:
1265 bfa_sm_fault(lps->bfa, event);
1266 }
1267}
1268
Jing Huang5fbe25c2010-10-18 17:17:23 -07001269/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001270 * login pending - awaiting space in request queue
1271 */
1272static void
1273bfa_lps_sm_loginwait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1274{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001275 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001276 bfa_trc(lps->bfa, event);
1277
1278 switch (event) {
1279 case BFA_LPS_SM_RESUME:
1280 bfa_sm_set_state(lps, bfa_lps_sm_login);
1281 break;
1282
1283 case BFA_LPS_SM_OFFLINE:
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001284 case BFA_LPS_SM_DELETE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001285 bfa_sm_set_state(lps, bfa_lps_sm_init);
1286 bfa_reqq_wcancel(&lps->wqe);
1287 break;
1288
1289 case BFA_LPS_SM_RX_CVL:
1290 /*
1291 * Login was not even sent out; so when getting out
1292 * of this state, it will appear like a login retry
1293 * after Clear virtual link
1294 */
1295 break;
1296
1297 default:
1298 bfa_sm_fault(lps->bfa, event);
1299 }
1300}
1301
Jing Huang5fbe25c2010-10-18 17:17:23 -07001302/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001303 * login complete
1304 */
1305static void
1306bfa_lps_sm_online(struct bfa_lps_s *lps, enum bfa_lps_event event)
1307{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001308 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001309 bfa_trc(lps->bfa, event);
1310
1311 switch (event) {
1312 case BFA_LPS_SM_LOGOUT:
1313 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1314 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1315 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1316 } else {
1317 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1318 bfa_lps_send_logout(lps);
1319 }
1320 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1321 BFA_PL_EID_LOGO, 0, "Logout");
1322 break;
1323
1324 case BFA_LPS_SM_RX_CVL:
1325 bfa_sm_set_state(lps, bfa_lps_sm_init);
1326
1327 /* Let the vport module know about this event */
1328 bfa_lps_cvl_event(lps);
1329 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1330 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1331 break;
1332
Krishna Gudipatib7044952010-12-13 16:17:42 -08001333 case BFA_LPS_SM_SET_N2N_PID:
1334 if (bfa_reqq_full(lps->bfa, lps->reqq)) {
1335 bfa_sm_set_state(lps, bfa_lps_sm_online_n2n_pid_wait);
1336 bfa_reqq_wait(lps->bfa, lps->reqq, &lps->wqe);
1337 } else
1338 bfa_lps_send_set_n2n_pid(lps);
1339 break;
1340
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001341 case BFA_LPS_SM_OFFLINE:
1342 case BFA_LPS_SM_DELETE:
1343 bfa_sm_set_state(lps, bfa_lps_sm_init);
1344 break;
1345
1346 default:
1347 bfa_sm_fault(lps->bfa, event);
1348 }
1349}
1350
Jing Huang8f4bfad2010-12-26 21:50:10 -08001351/*
Krishna Gudipatib7044952010-12-13 16:17:42 -08001352 * login complete
1353 */
1354static void
1355bfa_lps_sm_online_n2n_pid_wait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1356{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001357 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatib7044952010-12-13 16:17:42 -08001358 bfa_trc(lps->bfa, event);
1359
1360 switch (event) {
1361 case BFA_LPS_SM_RESUME:
1362 bfa_sm_set_state(lps, bfa_lps_sm_online);
1363 bfa_lps_send_set_n2n_pid(lps);
1364 break;
1365
1366 case BFA_LPS_SM_LOGOUT:
1367 bfa_sm_set_state(lps, bfa_lps_sm_logowait);
1368 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1369 BFA_PL_EID_LOGO, 0, "Logout");
1370 break;
1371
1372 case BFA_LPS_SM_RX_CVL:
1373 bfa_sm_set_state(lps, bfa_lps_sm_init);
1374 bfa_reqq_wcancel(&lps->wqe);
1375
1376 /* Let the vport module know about this event */
1377 bfa_lps_cvl_event(lps);
1378 bfa_plog_str(lps->bfa->plog, BFA_PL_MID_LPS,
1379 BFA_PL_EID_FIP_FCF_CVL, 0, "FCF Clear Virt. Link Rx");
1380 break;
1381
1382 case BFA_LPS_SM_OFFLINE:
1383 case BFA_LPS_SM_DELETE:
1384 bfa_sm_set_state(lps, bfa_lps_sm_init);
1385 bfa_reqq_wcancel(&lps->wqe);
1386 break;
1387
1388 default:
1389 bfa_sm_fault(lps->bfa, event);
1390 }
1391}
1392
Jing Huang5fbe25c2010-10-18 17:17:23 -07001393/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001394 * logout in progress - awaiting firmware response
1395 */
1396static void
1397bfa_lps_sm_logout(struct bfa_lps_s *lps, enum bfa_lps_event event)
1398{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001399 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001400 bfa_trc(lps->bfa, event);
1401
1402 switch (event) {
1403 case BFA_LPS_SM_FWRSP:
1404 bfa_sm_set_state(lps, bfa_lps_sm_init);
1405 bfa_lps_logout_comp(lps);
1406 break;
1407
1408 case BFA_LPS_SM_OFFLINE:
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001409 case BFA_LPS_SM_DELETE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001410 bfa_sm_set_state(lps, bfa_lps_sm_init);
1411 break;
1412
1413 default:
1414 bfa_sm_fault(lps->bfa, event);
1415 }
1416}
1417
Jing Huang5fbe25c2010-10-18 17:17:23 -07001418/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001419 * logout pending -- awaiting space in request queue
1420 */
1421static void
1422bfa_lps_sm_logowait(struct bfa_lps_s *lps, enum bfa_lps_event event)
1423{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001424 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001425 bfa_trc(lps->bfa, event);
1426
1427 switch (event) {
1428 case BFA_LPS_SM_RESUME:
1429 bfa_sm_set_state(lps, bfa_lps_sm_logout);
1430 bfa_lps_send_logout(lps);
1431 break;
1432
1433 case BFA_LPS_SM_OFFLINE:
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001434 case BFA_LPS_SM_DELETE:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001435 bfa_sm_set_state(lps, bfa_lps_sm_init);
1436 bfa_reqq_wcancel(&lps->wqe);
1437 break;
1438
1439 default:
1440 bfa_sm_fault(lps->bfa, event);
1441 }
1442}
1443
1444
1445
Jing Huang5fbe25c2010-10-18 17:17:23 -07001446/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001447 * lps_pvt BFA LPS private functions
1448 */
1449
Jing Huang5fbe25c2010-10-18 17:17:23 -07001450/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001451 * return memory requirement
1452 */
1453static void
Krishna Gudipati45070252011-06-24 20:24:29 -07001454bfa_lps_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
1455 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001456{
Krishna Gudipati45070252011-06-24 20:24:29 -07001457 struct bfa_mem_kva_s *lps_kva = BFA_MEM_LPS_KVA(bfa);
1458
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001459 if (cfg->drvcfg.min_cfg)
Krishna Gudipati45070252011-06-24 20:24:29 -07001460 bfa_mem_kva_setup(minfo, lps_kva,
1461 sizeof(struct bfa_lps_s) * BFA_LPS_MIN_LPORTS);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001462 else
Krishna Gudipati45070252011-06-24 20:24:29 -07001463 bfa_mem_kva_setup(minfo, lps_kva,
1464 sizeof(struct bfa_lps_s) * BFA_LPS_MAX_LPORTS);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001465}
1466
Jing Huang5fbe25c2010-10-18 17:17:23 -07001467/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001468 * bfa module attach at initialization time
1469 */
1470static void
1471bfa_lps_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07001472 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001473{
1474 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1475 struct bfa_lps_s *lps;
1476 int i;
1477
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001478 mod->num_lps = BFA_LPS_MAX_LPORTS;
1479 if (cfg->drvcfg.min_cfg)
1480 mod->num_lps = BFA_LPS_MIN_LPORTS;
1481 else
1482 mod->num_lps = BFA_LPS_MAX_LPORTS;
Krishna Gudipati45070252011-06-24 20:24:29 -07001483 mod->lps_arr = lps = (struct bfa_lps_s *) bfa_mem_kva_curp(mod);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001484
Krishna Gudipati45070252011-06-24 20:24:29 -07001485 bfa_mem_kva_curp(mod) += mod->num_lps * sizeof(struct bfa_lps_s);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001486
1487 INIT_LIST_HEAD(&mod->lps_free_q);
1488 INIT_LIST_HEAD(&mod->lps_active_q);
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001489 INIT_LIST_HEAD(&mod->lps_login_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001490
1491 for (i = 0; i < mod->num_lps; i++, lps++) {
1492 lps->bfa = bfa;
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001493 lps->bfa_tag = (u8) i;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001494 lps->reqq = BFA_REQQ_LPS;
1495 bfa_reqq_winit(&lps->wqe, bfa_lps_reqq_resume, lps);
1496 list_add_tail(&lps->qe, &mod->lps_free_q);
1497 }
1498}
1499
1500static void
1501bfa_lps_detach(struct bfa_s *bfa)
1502{
1503}
1504
1505static void
1506bfa_lps_start(struct bfa_s *bfa)
1507{
1508}
1509
1510static void
1511bfa_lps_stop(struct bfa_s *bfa)
1512{
1513}
1514
Jing Huang5fbe25c2010-10-18 17:17:23 -07001515/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001516 * IOC in disabled state -- consider all lps offline
1517 */
1518static void
1519bfa_lps_iocdisable(struct bfa_s *bfa)
1520{
1521 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1522 struct bfa_lps_s *lps;
1523 struct list_head *qe, *qen;
1524
1525 list_for_each_safe(qe, qen, &mod->lps_active_q) {
1526 lps = (struct bfa_lps_s *) qe;
1527 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1528 }
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001529 list_for_each_safe(qe, qen, &mod->lps_login_q) {
1530 lps = (struct bfa_lps_s *) qe;
1531 bfa_sm_send_event(lps, BFA_LPS_SM_OFFLINE);
1532 }
1533 list_splice_tail_init(&mod->lps_login_q, &mod->lps_active_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001534}
1535
Jing Huang5fbe25c2010-10-18 17:17:23 -07001536/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001537 * Firmware login response
1538 */
1539static void
1540bfa_lps_login_rsp(struct bfa_s *bfa, struct bfi_lps_login_rsp_s *rsp)
1541{
1542 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1543 struct bfa_lps_s *lps;
1544
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001545 WARN_ON(rsp->bfa_tag >= mod->num_lps);
1546 lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001547
1548 lps->status = rsp->status;
1549 switch (rsp->status) {
1550 case BFA_STATUS_OK:
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001551 lps->fw_tag = rsp->fw_tag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001552 lps->fport = rsp->f_port;
Krishna Gudipatib7044952010-12-13 16:17:42 -08001553 if (lps->fport)
1554 lps->lp_pid = rsp->lp_pid;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001555 lps->npiv_en = rsp->npiv_en;
Jing Huangba816ea2010-10-18 17:10:50 -07001556 lps->pr_bbcred = be16_to_cpu(rsp->bb_credit);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001557 lps->pr_pwwn = rsp->port_name;
1558 lps->pr_nwwn = rsp->node_name;
1559 lps->auth_req = rsp->auth_req;
1560 lps->lp_mac = rsp->lp_mac;
1561 lps->brcd_switch = rsp->brcd_switch;
1562 lps->fcf_mac = rsp->fcf_mac;
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001563 lps->pr_bbscn = rsp->bb_scn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001564
1565 break;
1566
1567 case BFA_STATUS_FABRIC_RJT:
1568 lps->lsrjt_rsn = rsp->lsrjt_rsn;
1569 lps->lsrjt_expl = rsp->lsrjt_expl;
1570
1571 break;
1572
1573 case BFA_STATUS_EPROTOCOL:
1574 lps->ext_status = rsp->ext_status;
1575
1576 break;
1577
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001578 case BFA_STATUS_VPORT_MAX:
1579 if (!rsp->ext_status)
1580 bfa_lps_no_res(lps, rsp->ext_status);
1581 break;
1582
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001583 default:
1584 /* Nothing to do with other status */
1585 break;
1586 }
1587
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001588 list_del(&lps->qe);
1589 list_add_tail(&lps->qe, &mod->lps_active_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001590 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1591}
1592
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001593static void
1594bfa_lps_no_res(struct bfa_lps_s *first_lps, u8 count)
1595{
1596 struct bfa_s *bfa = first_lps->bfa;
1597 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1598 struct list_head *qe, *qe_next;
1599 struct bfa_lps_s *lps;
1600
1601 bfa_trc(bfa, count);
1602
1603 qe = bfa_q_next(first_lps);
1604
1605 while (count && qe) {
1606 qe_next = bfa_q_next(qe);
1607 lps = (struct bfa_lps_s *)qe;
1608 bfa_trc(bfa, lps->bfa_tag);
1609 lps->status = first_lps->status;
1610 list_del(&lps->qe);
1611 list_add_tail(&lps->qe, &mod->lps_active_q);
1612 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1613 qe = qe_next;
1614 count--;
1615 }
1616}
1617
Jing Huang5fbe25c2010-10-18 17:17:23 -07001618/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001619 * Firmware logout response
1620 */
1621static void
1622bfa_lps_logout_rsp(struct bfa_s *bfa, struct bfi_lps_logout_rsp_s *rsp)
1623{
1624 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1625 struct bfa_lps_s *lps;
1626
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001627 WARN_ON(rsp->bfa_tag >= mod->num_lps);
1628 lps = BFA_LPS_FROM_TAG(mod, rsp->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001629
1630 bfa_sm_send_event(lps, BFA_LPS_SM_FWRSP);
1631}
1632
Jing Huang5fbe25c2010-10-18 17:17:23 -07001633/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001634 * Firmware received a Clear virtual link request (for FCoE)
1635 */
1636static void
1637bfa_lps_rx_cvl_event(struct bfa_s *bfa, struct bfi_lps_cvl_event_s *cvl)
1638{
1639 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1640 struct bfa_lps_s *lps;
1641
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001642 lps = BFA_LPS_FROM_TAG(mod, cvl->bfa_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001643
1644 bfa_sm_send_event(lps, BFA_LPS_SM_RX_CVL);
1645}
1646
Jing Huang5fbe25c2010-10-18 17:17:23 -07001647/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001648 * Space is available in request queue, resume queueing request to firmware.
1649 */
1650static void
1651bfa_lps_reqq_resume(void *lps_arg)
1652{
1653 struct bfa_lps_s *lps = lps_arg;
1654
1655 bfa_sm_send_event(lps, BFA_LPS_SM_RESUME);
1656}
1657
Jing Huang5fbe25c2010-10-18 17:17:23 -07001658/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001659 * lps is freed -- triggered by vport delete
1660 */
1661static void
1662bfa_lps_free(struct bfa_lps_s *lps)
1663{
1664 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
1665
1666 lps->lp_pid = 0;
1667 list_del(&lps->qe);
1668 list_add_tail(&lps->qe, &mod->lps_free_q);
1669}
1670
Jing Huang5fbe25c2010-10-18 17:17:23 -07001671/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001672 * send login request to firmware
1673 */
1674static void
1675bfa_lps_send_login(struct bfa_lps_s *lps)
1676{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001677 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(lps->bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001678 struct bfi_lps_login_req_s *m;
1679
1680 m = bfa_reqq_next(lps->bfa, lps->reqq);
Jing Huangd4b671c2010-12-26 21:46:35 -08001681 WARN_ON(!m);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001682
1683 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGIN_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001684 bfa_fn_lpu(lps->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001685
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001686 m->bfa_tag = lps->bfa_tag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001687 m->alpa = lps->alpa;
Jing Huangba816ea2010-10-18 17:10:50 -07001688 m->pdu_size = cpu_to_be16(lps->pdusz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001689 m->pwwn = lps->pwwn;
1690 m->nwwn = lps->nwwn;
1691 m->fdisc = lps->fdisc;
1692 m->auth_en = lps->auth_en;
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001693 m->bb_scn = lps->bb_scn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001694
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001695 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
1696 list_del(&lps->qe);
1697 list_add_tail(&lps->qe, &mod->lps_login_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001698}
1699
Jing Huang5fbe25c2010-10-18 17:17:23 -07001700/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001701 * send logout request to firmware
1702 */
1703static void
1704bfa_lps_send_logout(struct bfa_lps_s *lps)
1705{
1706 struct bfi_lps_logout_req_s *m;
1707
1708 m = bfa_reqq_next(lps->bfa, lps->reqq);
Jing Huangd4b671c2010-12-26 21:46:35 -08001709 WARN_ON(!m);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001710
1711 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_LOGOUT_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001712 bfa_fn_lpu(lps->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001713
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001714 m->fw_tag = lps->fw_tag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001715 m->port_name = lps->pwwn;
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001716 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001717}
1718
Jing Huang8f4bfad2010-12-26 21:50:10 -08001719/*
Krishna Gudipatib7044952010-12-13 16:17:42 -08001720 * send n2n pid set request to firmware
1721 */
1722static void
1723bfa_lps_send_set_n2n_pid(struct bfa_lps_s *lps)
1724{
1725 struct bfi_lps_n2n_pid_req_s *m;
1726
1727 m = bfa_reqq_next(lps->bfa, lps->reqq);
Jing Huangd4b671c2010-12-26 21:46:35 -08001728 WARN_ON(!m);
Krishna Gudipatib7044952010-12-13 16:17:42 -08001729
1730 bfi_h2i_set(m->mh, BFI_MC_LPS, BFI_LPS_H2I_N2N_PID_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001731 bfa_fn_lpu(lps->bfa));
Krishna Gudipatib7044952010-12-13 16:17:42 -08001732
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001733 m->fw_tag = lps->fw_tag;
Krishna Gudipatib7044952010-12-13 16:17:42 -08001734 m->lp_pid = lps->lp_pid;
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001735 bfa_reqq_produce(lps->bfa, lps->reqq, m->mh);
Krishna Gudipatib7044952010-12-13 16:17:42 -08001736}
1737
Jing Huang5fbe25c2010-10-18 17:17:23 -07001738/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001739 * Indirect login completion handler for non-fcs
1740 */
1741static void
1742bfa_lps_login_comp_cb(void *arg, bfa_boolean_t complete)
1743{
1744 struct bfa_lps_s *lps = arg;
1745
1746 if (!complete)
1747 return;
1748
1749 if (lps->fdisc)
1750 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1751 else
1752 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1753}
1754
Jing Huang5fbe25c2010-10-18 17:17:23 -07001755/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001756 * Login completion handler -- direct call for fcs, queue for others
1757 */
1758static void
1759bfa_lps_login_comp(struct bfa_lps_s *lps)
1760{
1761 if (!lps->bfa->fcs) {
1762 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_login_comp_cb,
1763 lps);
1764 return;
1765 }
1766
1767 if (lps->fdisc)
1768 bfa_cb_lps_fdisc_comp(lps->bfa->bfad, lps->uarg, lps->status);
1769 else
1770 bfa_cb_lps_flogi_comp(lps->bfa->bfad, lps->uarg, lps->status);
1771}
1772
Jing Huang5fbe25c2010-10-18 17:17:23 -07001773/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001774 * Indirect logout completion handler for non-fcs
1775 */
1776static void
1777bfa_lps_logout_comp_cb(void *arg, bfa_boolean_t complete)
1778{
1779 struct bfa_lps_s *lps = arg;
1780
1781 if (!complete)
1782 return;
1783
1784 if (lps->fdisc)
1785 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1786}
1787
Jing Huang5fbe25c2010-10-18 17:17:23 -07001788/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001789 * Logout completion handler -- direct call for fcs, queue for others
1790 */
1791static void
1792bfa_lps_logout_comp(struct bfa_lps_s *lps)
1793{
1794 if (!lps->bfa->fcs) {
1795 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_logout_comp_cb,
1796 lps);
1797 return;
1798 }
1799 if (lps->fdisc)
1800 bfa_cb_lps_fdisclogo_comp(lps->bfa->bfad, lps->uarg);
1801}
1802
Jing Huang5fbe25c2010-10-18 17:17:23 -07001803/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001804 * Clear virtual link completion handler for non-fcs
1805 */
1806static void
1807bfa_lps_cvl_event_cb(void *arg, bfa_boolean_t complete)
1808{
1809 struct bfa_lps_s *lps = arg;
1810
1811 if (!complete)
1812 return;
1813
1814 /* Clear virtual link to base port will result in link down */
1815 if (lps->fdisc)
1816 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1817}
1818
Jing Huang5fbe25c2010-10-18 17:17:23 -07001819/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001820 * Received Clear virtual link event --direct call for fcs,
1821 * queue for others
1822 */
1823static void
1824bfa_lps_cvl_event(struct bfa_lps_s *lps)
1825{
1826 if (!lps->bfa->fcs) {
1827 bfa_cb_queue(lps->bfa, &lps->hcb_qe, bfa_lps_cvl_event_cb,
1828 lps);
1829 return;
1830 }
1831
1832 /* Clear virtual link to base port will result in link down */
1833 if (lps->fdisc)
1834 bfa_cb_lps_cvl_event(lps->bfa->bfad, lps->uarg);
1835}
1836
1837
1838
Jing Huang5fbe25c2010-10-18 17:17:23 -07001839/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001840 * lps_public BFA LPS public functions
1841 */
1842
1843u32
1844bfa_lps_get_max_vport(struct bfa_s *bfa)
1845{
1846 if (bfa_ioc_devid(&bfa->ioc) == BFA_PCI_DEVICE_ID_CT)
1847 return BFA_LPS_MAX_VPORTS_SUPP_CT;
1848 else
1849 return BFA_LPS_MAX_VPORTS_SUPP_CB;
1850}
1851
Jing Huang5fbe25c2010-10-18 17:17:23 -07001852/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001853 * Allocate a lport srvice tag.
1854 */
1855struct bfa_lps_s *
1856bfa_lps_alloc(struct bfa_s *bfa)
1857{
1858 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1859 struct bfa_lps_s *lps = NULL;
1860
1861 bfa_q_deq(&mod->lps_free_q, &lps);
1862
1863 if (lps == NULL)
1864 return NULL;
1865
1866 list_add_tail(&lps->qe, &mod->lps_active_q);
1867
1868 bfa_sm_set_state(lps, bfa_lps_sm_init);
1869 return lps;
1870}
1871
Jing Huang5fbe25c2010-10-18 17:17:23 -07001872/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001873 * Free lport service tag. This can be called anytime after an alloc.
1874 * No need to wait for any pending login/logout completions.
1875 */
1876void
1877bfa_lps_delete(struct bfa_lps_s *lps)
1878{
1879 bfa_sm_send_event(lps, BFA_LPS_SM_DELETE);
1880}
1881
Jing Huang5fbe25c2010-10-18 17:17:23 -07001882/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001883 * Initiate a lport login.
1884 */
1885void
1886bfa_lps_flogi(struct bfa_lps_s *lps, void *uarg, u8 alpa, u16 pdusz,
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001887 wwn_t pwwn, wwn_t nwwn, bfa_boolean_t auth_en, uint8_t bb_scn)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001888{
1889 lps->uarg = uarg;
1890 lps->alpa = alpa;
1891 lps->pdusz = pdusz;
1892 lps->pwwn = pwwn;
1893 lps->nwwn = nwwn;
1894 lps->fdisc = BFA_FALSE;
1895 lps->auth_en = auth_en;
Krishna Gudipatibe540a92011-06-13 15:53:04 -07001896 lps->bb_scn = bb_scn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001897 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1898}
1899
Jing Huang5fbe25c2010-10-18 17:17:23 -07001900/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001901 * Initiate a lport fdisc login.
1902 */
1903void
1904bfa_lps_fdisc(struct bfa_lps_s *lps, void *uarg, u16 pdusz, wwn_t pwwn,
1905 wwn_t nwwn)
1906{
1907 lps->uarg = uarg;
1908 lps->alpa = 0;
1909 lps->pdusz = pdusz;
1910 lps->pwwn = pwwn;
1911 lps->nwwn = nwwn;
1912 lps->fdisc = BFA_TRUE;
1913 lps->auth_en = BFA_FALSE;
1914 bfa_sm_send_event(lps, BFA_LPS_SM_LOGIN);
1915}
1916
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001917
Jing Huang5fbe25c2010-10-18 17:17:23 -07001918/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001919 * Initiate a lport FDSIC logout.
1920 */
1921void
1922bfa_lps_fdisclogo(struct bfa_lps_s *lps)
1923{
1924 bfa_sm_send_event(lps, BFA_LPS_SM_LOGOUT);
1925}
1926
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001927u8
1928bfa_lps_get_fwtag(struct bfa_s *bfa, u8 lp_tag)
1929{
1930 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1931
1932 return BFA_LPS_FROM_TAG(mod, lp_tag)->fw_tag;
1933}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001934
Jing Huang5fbe25c2010-10-18 17:17:23 -07001935/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001936 * Return lport services tag given the pid
1937 */
1938u8
1939bfa_lps_get_tag_from_pid(struct bfa_s *bfa, u32 pid)
1940{
1941 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1942 struct bfa_lps_s *lps;
1943 int i;
1944
1945 for (i = 0, lps = mod->lps_arr; i < mod->num_lps; i++, lps++) {
1946 if (lps->lp_pid == pid)
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001947 return lps->bfa_tag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001948 }
1949
1950 /* Return base port tag anyway */
1951 return 0;
1952}
1953
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001954
Jing Huang5fbe25c2010-10-18 17:17:23 -07001955/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001956 * return port id assigned to the base lport
1957 */
1958u32
1959bfa_lps_get_base_pid(struct bfa_s *bfa)
1960{
1961 struct bfa_lps_mod_s *mod = BFA_LPS_MOD(bfa);
1962
1963 return BFA_LPS_FROM_TAG(mod, 0)->lp_pid;
1964}
1965
Jing Huang8f4bfad2010-12-26 21:50:10 -08001966/*
Krishna Gudipatib7044952010-12-13 16:17:42 -08001967 * Set PID in case of n2n (which is assigned during PLOGI)
1968 */
1969void
1970bfa_lps_set_n2n_pid(struct bfa_lps_s *lps, uint32_t n2n_pid)
1971{
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001972 bfa_trc(lps->bfa, lps->bfa_tag);
Krishna Gudipatib7044952010-12-13 16:17:42 -08001973 bfa_trc(lps->bfa, n2n_pid);
1974
1975 lps->lp_pid = n2n_pid;
1976 bfa_sm_send_event(lps, BFA_LPS_SM_SET_N2N_PID);
1977}
1978
Jing Huang5fbe25c2010-10-18 17:17:23 -07001979/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001980 * LPS firmware message class handler.
1981 */
1982void
1983bfa_lps_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
1984{
1985 union bfi_lps_i2h_msg_u msg;
1986
1987 bfa_trc(bfa, m->mhdr.msg_id);
1988 msg.msg = m;
1989
1990 switch (m->mhdr.msg_id) {
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07001991 case BFI_LPS_I2H_LOGIN_RSP:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001992 bfa_lps_login_rsp(bfa, msg.login_rsp);
1993 break;
1994
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07001995 case BFI_LPS_I2H_LOGOUT_RSP:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001996 bfa_lps_logout_rsp(bfa, msg.logout_rsp);
1997 break;
1998
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07001999 case BFI_LPS_I2H_CVL_EVENT:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002000 bfa_lps_rx_cvl_event(bfa, msg.cvl_event);
2001 break;
2002
2003 default:
2004 bfa_trc(bfa, m->mhdr.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08002005 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002006 }
2007}
2008
Jing Huang5fbe25c2010-10-18 17:17:23 -07002009/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002010 * FC PORT state machine functions
2011 */
2012static void
2013bfa_fcport_sm_uninit(struct bfa_fcport_s *fcport,
2014 enum bfa_fcport_sm_event event)
2015{
2016 bfa_trc(fcport->bfa, event);
2017
2018 switch (event) {
2019 case BFA_FCPORT_SM_START:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002020 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002021 * Start event after IOC is configured and BFA is started.
2022 */
Krishna Gudipatif3a060c2010-12-13 16:16:50 -08002023 fcport->use_flash_cfg = BFA_TRUE;
2024
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002025 if (bfa_fcport_send_enable(fcport)) {
2026 bfa_trc(fcport->bfa, BFA_TRUE);
2027 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2028 } else {
2029 bfa_trc(fcport->bfa, BFA_FALSE);
2030 bfa_sm_set_state(fcport,
2031 bfa_fcport_sm_enabling_qwait);
2032 }
2033 break;
2034
2035 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002036 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002037 * Port is persistently configured to be in enabled state. Do
2038 * not change state. Port enabling is done when START event is
2039 * received.
2040 */
2041 break;
2042
2043 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002044 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002045 * If a port is persistently configured to be disabled, the
2046 * first event will a port disable request.
2047 */
2048 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2049 break;
2050
2051 case BFA_FCPORT_SM_HWFAIL:
2052 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2053 break;
2054
2055 default:
2056 bfa_sm_fault(fcport->bfa, event);
2057 }
2058}
2059
2060static void
2061bfa_fcport_sm_enabling_qwait(struct bfa_fcport_s *fcport,
2062 enum bfa_fcport_sm_event event)
2063{
2064 char pwwn_buf[BFA_STRING_32];
2065 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2066 bfa_trc(fcport->bfa, event);
2067
2068 switch (event) {
2069 case BFA_FCPORT_SM_QRESUME:
2070 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2071 bfa_fcport_send_enable(fcport);
2072 break;
2073
2074 case BFA_FCPORT_SM_STOP:
2075 bfa_reqq_wcancel(&fcport->reqq_wait);
2076 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2077 break;
2078
2079 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002080 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002081 * Already enable is in progress.
2082 */
2083 break;
2084
2085 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002086 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002087 * Just send disable request to firmware when room becomes
2088 * available in request queue.
2089 */
2090 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2091 bfa_reqq_wcancel(&fcport->reqq_wait);
2092 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2093 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2094 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002095 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002096 "Base port disabled: WWN = %s\n", pwwn_buf);
2097 break;
2098
2099 case BFA_FCPORT_SM_LINKUP:
2100 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002101 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002102 * Possible to get link events when doing back-to-back
2103 * enable/disables.
2104 */
2105 break;
2106
2107 case BFA_FCPORT_SM_HWFAIL:
2108 bfa_reqq_wcancel(&fcport->reqq_wait);
2109 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2110 break;
2111
2112 default:
2113 bfa_sm_fault(fcport->bfa, event);
2114 }
2115}
2116
2117static void
2118bfa_fcport_sm_enabling(struct bfa_fcport_s *fcport,
2119 enum bfa_fcport_sm_event event)
2120{
2121 char pwwn_buf[BFA_STRING_32];
2122 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2123 bfa_trc(fcport->bfa, event);
2124
2125 switch (event) {
2126 case BFA_FCPORT_SM_FWRSP:
2127 case BFA_FCPORT_SM_LINKDOWN:
2128 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2129 break;
2130
2131 case BFA_FCPORT_SM_LINKUP:
2132 bfa_fcport_update_linkinfo(fcport);
2133 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
2134
Jing Huangd4b671c2010-12-26 21:46:35 -08002135 WARN_ON(!fcport->event_cbfn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002136 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2137 break;
2138
2139 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002140 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002141 * Already being enabled.
2142 */
2143 break;
2144
2145 case BFA_FCPORT_SM_DISABLE:
2146 if (bfa_fcport_send_disable(fcport))
2147 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2148 else
2149 bfa_sm_set_state(fcport,
2150 bfa_fcport_sm_disabling_qwait);
2151
2152 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2153 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2154 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002155 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002156 "Base port disabled: WWN = %s\n", pwwn_buf);
2157 break;
2158
2159 case BFA_FCPORT_SM_STOP:
2160 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2161 break;
2162
2163 case BFA_FCPORT_SM_HWFAIL:
2164 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2165 break;
2166
2167 default:
2168 bfa_sm_fault(fcport->bfa, event);
2169 }
2170}
2171
2172static void
2173bfa_fcport_sm_linkdown(struct bfa_fcport_s *fcport,
2174 enum bfa_fcport_sm_event event)
2175{
2176 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2177 char pwwn_buf[BFA_STRING_32];
2178 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2179
2180 bfa_trc(fcport->bfa, event);
2181
2182 switch (event) {
2183 case BFA_FCPORT_SM_LINKUP:
2184 bfa_fcport_update_linkinfo(fcport);
2185 bfa_sm_set_state(fcport, bfa_fcport_sm_linkup);
Jing Huangd4b671c2010-12-26 21:46:35 -08002186 WARN_ON(!fcport->event_cbfn);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002187 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2188 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
2189 if (!bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
2190
2191 bfa_trc(fcport->bfa,
2192 pevent->link_state.vc_fcf.fcf.fipenabled);
2193 bfa_trc(fcport->bfa,
2194 pevent->link_state.vc_fcf.fcf.fipfailed);
2195
2196 if (pevent->link_state.vc_fcf.fcf.fipfailed)
2197 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2198 BFA_PL_EID_FIP_FCF_DISC, 0,
2199 "FIP FCF Discovery Failed");
2200 else
2201 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2202 BFA_PL_EID_FIP_FCF_DISC, 0,
2203 "FIP FCF Discovered");
2204 }
2205
2206 bfa_fcport_scn(fcport, BFA_PORT_LINKUP, BFA_FALSE);
2207 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002208 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002209 "Base port online: WWN = %s\n", pwwn_buf);
2210 break;
2211
2212 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002213 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002214 * Possible to get link down event.
2215 */
2216 break;
2217
2218 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002219 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002220 * Already enabled.
2221 */
2222 break;
2223
2224 case BFA_FCPORT_SM_DISABLE:
2225 if (bfa_fcport_send_disable(fcport))
2226 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2227 else
2228 bfa_sm_set_state(fcport,
2229 bfa_fcport_sm_disabling_qwait);
2230
2231 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2232 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2233 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002234 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002235 "Base port disabled: WWN = %s\n", pwwn_buf);
2236 break;
2237
2238 case BFA_FCPORT_SM_STOP:
2239 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2240 break;
2241
2242 case BFA_FCPORT_SM_HWFAIL:
2243 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2244 break;
2245
2246 default:
2247 bfa_sm_fault(fcport->bfa, event);
2248 }
2249}
2250
2251static void
2252bfa_fcport_sm_linkup(struct bfa_fcport_s *fcport,
2253 enum bfa_fcport_sm_event event)
2254{
2255 char pwwn_buf[BFA_STRING_32];
2256 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2257
2258 bfa_trc(fcport->bfa, event);
2259
2260 switch (event) {
2261 case BFA_FCPORT_SM_ENABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002262 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002263 * Already enabled.
2264 */
2265 break;
2266
2267 case BFA_FCPORT_SM_DISABLE:
2268 if (bfa_fcport_send_disable(fcport))
2269 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2270 else
2271 bfa_sm_set_state(fcport,
2272 bfa_fcport_sm_disabling_qwait);
2273
2274 bfa_fcport_reset_linkinfo(fcport);
2275 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2276 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2277 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
2278 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002279 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002280 "Base port offline: WWN = %s\n", pwwn_buf);
Jing Huang88166242010-12-09 17:11:53 -08002281 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002282 "Base port disabled: WWN = %s\n", pwwn_buf);
2283 break;
2284
2285 case BFA_FCPORT_SM_LINKDOWN:
2286 bfa_sm_set_state(fcport, bfa_fcport_sm_linkdown);
2287 bfa_fcport_reset_linkinfo(fcport);
2288 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2289 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2290 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
2291 wwn2str(pwwn_buf, fcport->pwwn);
2292 if (BFA_PORT_IS_DISABLED(fcport->bfa))
Jing Huang88166242010-12-09 17:11:53 -08002293 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002294 "Base port offline: WWN = %s\n", pwwn_buf);
2295 else
Jing Huang88166242010-12-09 17:11:53 -08002296 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002297 "Base port (WWN = %s) "
2298 "lost fabric connectivity\n", pwwn_buf);
2299 break;
2300
2301 case BFA_FCPORT_SM_STOP:
2302 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2303 bfa_fcport_reset_linkinfo(fcport);
2304 wwn2str(pwwn_buf, fcport->pwwn);
2305 if (BFA_PORT_IS_DISABLED(fcport->bfa))
Jing Huang88166242010-12-09 17:11:53 -08002306 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002307 "Base port offline: WWN = %s\n", pwwn_buf);
2308 else
Jing Huang88166242010-12-09 17:11:53 -08002309 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002310 "Base port (WWN = %s) "
2311 "lost fabric connectivity\n", pwwn_buf);
2312 break;
2313
2314 case BFA_FCPORT_SM_HWFAIL:
2315 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2316 bfa_fcport_reset_linkinfo(fcport);
2317 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_FALSE);
2318 wwn2str(pwwn_buf, fcport->pwwn);
2319 if (BFA_PORT_IS_DISABLED(fcport->bfa))
Jing Huang88166242010-12-09 17:11:53 -08002320 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002321 "Base port offline: WWN = %s\n", pwwn_buf);
2322 else
Jing Huang88166242010-12-09 17:11:53 -08002323 BFA_LOG(KERN_ERR, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002324 "Base port (WWN = %s) "
2325 "lost fabric connectivity\n", pwwn_buf);
2326 break;
2327
2328 default:
2329 bfa_sm_fault(fcport->bfa, event);
2330 }
2331}
2332
2333static void
2334bfa_fcport_sm_disabling_qwait(struct bfa_fcport_s *fcport,
2335 enum bfa_fcport_sm_event event)
2336{
2337 bfa_trc(fcport->bfa, event);
2338
2339 switch (event) {
2340 case BFA_FCPORT_SM_QRESUME:
2341 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2342 bfa_fcport_send_disable(fcport);
2343 break;
2344
2345 case BFA_FCPORT_SM_STOP:
2346 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2347 bfa_reqq_wcancel(&fcport->reqq_wait);
2348 break;
2349
2350 case BFA_FCPORT_SM_ENABLE:
2351 bfa_sm_set_state(fcport, bfa_fcport_sm_toggling_qwait);
2352 break;
2353
2354 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002355 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002356 * Already being disabled.
2357 */
2358 break;
2359
2360 case BFA_FCPORT_SM_LINKUP:
2361 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002362 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002363 * Possible to get link events when doing back-to-back
2364 * enable/disables.
2365 */
2366 break;
2367
2368 case BFA_FCPORT_SM_HWFAIL:
2369 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2370 bfa_reqq_wcancel(&fcport->reqq_wait);
2371 break;
2372
2373 default:
2374 bfa_sm_fault(fcport->bfa, event);
2375 }
2376}
2377
2378static void
2379bfa_fcport_sm_toggling_qwait(struct bfa_fcport_s *fcport,
2380 enum bfa_fcport_sm_event event)
2381{
2382 bfa_trc(fcport->bfa, event);
2383
2384 switch (event) {
2385 case BFA_FCPORT_SM_QRESUME:
2386 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling);
2387 bfa_fcport_send_disable(fcport);
2388 if (bfa_fcport_send_enable(fcport))
2389 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2390 else
2391 bfa_sm_set_state(fcport,
2392 bfa_fcport_sm_enabling_qwait);
2393 break;
2394
2395 case BFA_FCPORT_SM_STOP:
2396 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2397 bfa_reqq_wcancel(&fcport->reqq_wait);
2398 break;
2399
2400 case BFA_FCPORT_SM_ENABLE:
2401 break;
2402
2403 case BFA_FCPORT_SM_DISABLE:
2404 bfa_sm_set_state(fcport, bfa_fcport_sm_disabling_qwait);
2405 break;
2406
2407 case BFA_FCPORT_SM_LINKUP:
2408 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002409 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002410 * Possible to get link events when doing back-to-back
2411 * enable/disables.
2412 */
2413 break;
2414
2415 case BFA_FCPORT_SM_HWFAIL:
2416 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2417 bfa_reqq_wcancel(&fcport->reqq_wait);
2418 break;
2419
2420 default:
2421 bfa_sm_fault(fcport->bfa, event);
2422 }
2423}
2424
2425static void
2426bfa_fcport_sm_disabling(struct bfa_fcport_s *fcport,
2427 enum bfa_fcport_sm_event event)
2428{
2429 char pwwn_buf[BFA_STRING_32];
2430 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2431 bfa_trc(fcport->bfa, event);
2432
2433 switch (event) {
2434 case BFA_FCPORT_SM_FWRSP:
2435 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2436 break;
2437
2438 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002439 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002440 * Already being disabled.
2441 */
2442 break;
2443
2444 case BFA_FCPORT_SM_ENABLE:
2445 if (bfa_fcport_send_enable(fcport))
2446 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2447 else
2448 bfa_sm_set_state(fcport,
2449 bfa_fcport_sm_enabling_qwait);
2450
2451 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2452 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2453 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002454 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002455 "Base port enabled: WWN = %s\n", pwwn_buf);
2456 break;
2457
2458 case BFA_FCPORT_SM_STOP:
2459 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2460 break;
2461
2462 case BFA_FCPORT_SM_LINKUP:
2463 case BFA_FCPORT_SM_LINKDOWN:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002464 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002465 * Possible to get link events when doing back-to-back
2466 * enable/disables.
2467 */
2468 break;
2469
2470 case BFA_FCPORT_SM_HWFAIL:
2471 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2472 break;
2473
2474 default:
2475 bfa_sm_fault(fcport->bfa, event);
2476 }
2477}
2478
2479static void
2480bfa_fcport_sm_disabled(struct bfa_fcport_s *fcport,
2481 enum bfa_fcport_sm_event event)
2482{
2483 char pwwn_buf[BFA_STRING_32];
2484 struct bfad_s *bfad = (struct bfad_s *)fcport->bfa->bfad;
2485 bfa_trc(fcport->bfa, event);
2486
2487 switch (event) {
2488 case BFA_FCPORT_SM_START:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002489 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002490 * Ignore start event for a port that is disabled.
2491 */
2492 break;
2493
2494 case BFA_FCPORT_SM_STOP:
2495 bfa_sm_set_state(fcport, bfa_fcport_sm_stopped);
2496 break;
2497
2498 case BFA_FCPORT_SM_ENABLE:
2499 if (bfa_fcport_send_enable(fcport))
2500 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2501 else
2502 bfa_sm_set_state(fcport,
2503 bfa_fcport_sm_enabling_qwait);
2504
2505 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
2506 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
2507 wwn2str(pwwn_buf, fcport->pwwn);
Jing Huang88166242010-12-09 17:11:53 -08002508 BFA_LOG(KERN_INFO, bfad, bfa_log_level,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002509 "Base port enabled: WWN = %s\n", pwwn_buf);
2510 break;
2511
2512 case BFA_FCPORT_SM_DISABLE:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002513 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002514 * Already disabled.
2515 */
2516 break;
2517
2518 case BFA_FCPORT_SM_HWFAIL:
2519 bfa_sm_set_state(fcport, bfa_fcport_sm_iocfail);
2520 break;
2521
2522 default:
2523 bfa_sm_fault(fcport->bfa, event);
2524 }
2525}
2526
2527static void
2528bfa_fcport_sm_stopped(struct bfa_fcport_s *fcport,
2529 enum bfa_fcport_sm_event event)
2530{
2531 bfa_trc(fcport->bfa, event);
2532
2533 switch (event) {
2534 case BFA_FCPORT_SM_START:
2535 if (bfa_fcport_send_enable(fcport))
2536 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2537 else
2538 bfa_sm_set_state(fcport,
2539 bfa_fcport_sm_enabling_qwait);
2540 break;
2541
2542 default:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002543 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002544 * Ignore all other events.
2545 */
2546 ;
2547 }
2548}
2549
Jing Huang5fbe25c2010-10-18 17:17:23 -07002550/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002551 * Port is enabled. IOC is down/failed.
2552 */
2553static void
2554bfa_fcport_sm_iocdown(struct bfa_fcport_s *fcport,
2555 enum bfa_fcport_sm_event event)
2556{
2557 bfa_trc(fcport->bfa, event);
2558
2559 switch (event) {
2560 case BFA_FCPORT_SM_START:
2561 if (bfa_fcport_send_enable(fcport))
2562 bfa_sm_set_state(fcport, bfa_fcport_sm_enabling);
2563 else
2564 bfa_sm_set_state(fcport,
2565 bfa_fcport_sm_enabling_qwait);
2566 break;
2567
2568 default:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002569 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002570 * Ignore all events.
2571 */
2572 ;
2573 }
2574}
2575
Jing Huang5fbe25c2010-10-18 17:17:23 -07002576/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002577 * Port is disabled. IOC is down/failed.
2578 */
2579static void
2580bfa_fcport_sm_iocfail(struct bfa_fcport_s *fcport,
2581 enum bfa_fcport_sm_event event)
2582{
2583 bfa_trc(fcport->bfa, event);
2584
2585 switch (event) {
2586 case BFA_FCPORT_SM_START:
2587 bfa_sm_set_state(fcport, bfa_fcport_sm_disabled);
2588 break;
2589
2590 case BFA_FCPORT_SM_ENABLE:
2591 bfa_sm_set_state(fcport, bfa_fcport_sm_iocdown);
2592 break;
2593
2594 default:
Jing Huang5fbe25c2010-10-18 17:17:23 -07002595 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002596 * Ignore all events.
2597 */
2598 ;
2599 }
2600}
2601
Jing Huang5fbe25c2010-10-18 17:17:23 -07002602/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002603 * Link state is down
2604 */
2605static void
2606bfa_fcport_ln_sm_dn(struct bfa_fcport_ln_s *ln,
2607 enum bfa_fcport_ln_sm_event event)
2608{
2609 bfa_trc(ln->fcport->bfa, event);
2610
2611 switch (event) {
2612 case BFA_FCPORT_LN_SM_LINKUP:
2613 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2614 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2615 break;
2616
2617 default:
2618 bfa_sm_fault(ln->fcport->bfa, event);
2619 }
2620}
2621
Jing Huang5fbe25c2010-10-18 17:17:23 -07002622/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002623 * Link state is waiting for down notification
2624 */
2625static void
2626bfa_fcport_ln_sm_dn_nf(struct bfa_fcport_ln_s *ln,
2627 enum bfa_fcport_ln_sm_event event)
2628{
2629 bfa_trc(ln->fcport->bfa, event);
2630
2631 switch (event) {
2632 case BFA_FCPORT_LN_SM_LINKUP:
2633 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2634 break;
2635
2636 case BFA_FCPORT_LN_SM_NOTIFICATION:
2637 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2638 break;
2639
2640 default:
2641 bfa_sm_fault(ln->fcport->bfa, event);
2642 }
2643}
2644
Jing Huang5fbe25c2010-10-18 17:17:23 -07002645/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002646 * Link state is waiting for down notification and there is a pending up
2647 */
2648static void
2649bfa_fcport_ln_sm_dn_up_nf(struct bfa_fcport_ln_s *ln,
2650 enum bfa_fcport_ln_sm_event event)
2651{
2652 bfa_trc(ln->fcport->bfa, event);
2653
2654 switch (event) {
2655 case BFA_FCPORT_LN_SM_LINKDOWN:
2656 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2657 break;
2658
2659 case BFA_FCPORT_LN_SM_NOTIFICATION:
2660 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_nf);
2661 bfa_fcport_queue_cb(ln, BFA_PORT_LINKUP);
2662 break;
2663
2664 default:
2665 bfa_sm_fault(ln->fcport->bfa, event);
2666 }
2667}
2668
Jing Huang5fbe25c2010-10-18 17:17:23 -07002669/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002670 * Link state is up
2671 */
2672static void
2673bfa_fcport_ln_sm_up(struct bfa_fcport_ln_s *ln,
2674 enum bfa_fcport_ln_sm_event event)
2675{
2676 bfa_trc(ln->fcport->bfa, event);
2677
2678 switch (event) {
2679 case BFA_FCPORT_LN_SM_LINKDOWN:
2680 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2681 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2682 break;
2683
2684 default:
2685 bfa_sm_fault(ln->fcport->bfa, event);
2686 }
2687}
2688
Jing Huang5fbe25c2010-10-18 17:17:23 -07002689/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002690 * Link state is waiting for up notification
2691 */
2692static void
2693bfa_fcport_ln_sm_up_nf(struct bfa_fcport_ln_s *ln,
2694 enum bfa_fcport_ln_sm_event event)
2695{
2696 bfa_trc(ln->fcport->bfa, event);
2697
2698 switch (event) {
2699 case BFA_FCPORT_LN_SM_LINKDOWN:
2700 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2701 break;
2702
2703 case BFA_FCPORT_LN_SM_NOTIFICATION:
2704 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up);
2705 break;
2706
2707 default:
2708 bfa_sm_fault(ln->fcport->bfa, event);
2709 }
2710}
2711
Jing Huang5fbe25c2010-10-18 17:17:23 -07002712/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002713 * Link state is waiting for up notification and there is a pending down
2714 */
2715static void
2716bfa_fcport_ln_sm_up_dn_nf(struct bfa_fcport_ln_s *ln,
2717 enum bfa_fcport_ln_sm_event event)
2718{
2719 bfa_trc(ln->fcport->bfa, event);
2720
2721 switch (event) {
2722 case BFA_FCPORT_LN_SM_LINKUP:
2723 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_up_nf);
2724 break;
2725
2726 case BFA_FCPORT_LN_SM_NOTIFICATION:
2727 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_nf);
2728 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2729 break;
2730
2731 default:
2732 bfa_sm_fault(ln->fcport->bfa, event);
2733 }
2734}
2735
Jing Huang5fbe25c2010-10-18 17:17:23 -07002736/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002737 * Link state is waiting for up notification and there are pending down and up
2738 */
2739static void
2740bfa_fcport_ln_sm_up_dn_up_nf(struct bfa_fcport_ln_s *ln,
2741 enum bfa_fcport_ln_sm_event event)
2742{
2743 bfa_trc(ln->fcport->bfa, event);
2744
2745 switch (event) {
2746 case BFA_FCPORT_LN_SM_LINKDOWN:
2747 bfa_sm_set_state(ln, bfa_fcport_ln_sm_up_dn_nf);
2748 break;
2749
2750 case BFA_FCPORT_LN_SM_NOTIFICATION:
2751 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn_up_nf);
2752 bfa_fcport_queue_cb(ln, BFA_PORT_LINKDOWN);
2753 break;
2754
2755 default:
2756 bfa_sm_fault(ln->fcport->bfa, event);
2757 }
2758}
2759
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002760static void
2761__bfa_cb_fcport_event(void *cbarg, bfa_boolean_t complete)
2762{
2763 struct bfa_fcport_ln_s *ln = cbarg;
2764
2765 if (complete)
2766 ln->fcport->event_cbfn(ln->fcport->event_cbarg, ln->ln_event);
2767 else
2768 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2769}
2770
Jing Huang5fbe25c2010-10-18 17:17:23 -07002771/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002772 * Send SCN notification to upper layers.
2773 * trunk - false if caller is fcport to ignore fcport event in trunked mode
2774 */
2775static void
2776bfa_fcport_scn(struct bfa_fcport_s *fcport, enum bfa_port_linkstate event,
2777 bfa_boolean_t trunk)
2778{
2779 if (fcport->cfg.trunked && !trunk)
2780 return;
2781
2782 switch (event) {
2783 case BFA_PORT_LINKUP:
2784 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKUP);
2785 break;
2786 case BFA_PORT_LINKDOWN:
2787 bfa_sm_send_event(&fcport->ln, BFA_FCPORT_LN_SM_LINKDOWN);
2788 break;
2789 default:
Jing Huangd4b671c2010-12-26 21:46:35 -08002790 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002791 }
2792}
2793
2794static void
2795bfa_fcport_queue_cb(struct bfa_fcport_ln_s *ln, enum bfa_port_linkstate event)
2796{
2797 struct bfa_fcport_s *fcport = ln->fcport;
2798
2799 if (fcport->bfa->fcs) {
2800 fcport->event_cbfn(fcport->event_cbarg, event);
2801 bfa_sm_send_event(ln, BFA_FCPORT_LN_SM_NOTIFICATION);
2802 } else {
2803 ln->ln_event = event;
2804 bfa_cb_queue(fcport->bfa, &ln->ln_qe,
2805 __bfa_cb_fcport_event, ln);
2806 }
2807}
2808
2809#define FCPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_fcport_stats_u), \
2810 BFA_CACHELINE_SZ))
2811
2812static void
Krishna Gudipati45070252011-06-24 20:24:29 -07002813bfa_fcport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
2814 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002815{
Krishna Gudipati45070252011-06-24 20:24:29 -07002816 struct bfa_mem_dma_s *fcport_dma = BFA_MEM_FCPORT_DMA(bfa);
2817
2818 bfa_mem_dma_setup(minfo, fcport_dma, FCPORT_STATS_DMA_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002819}
2820
2821static void
2822bfa_fcport_qresume(void *cbarg)
2823{
2824 struct bfa_fcport_s *fcport = cbarg;
2825
2826 bfa_sm_send_event(fcport, BFA_FCPORT_SM_QRESUME);
2827}
2828
2829static void
Krishna Gudipati45070252011-06-24 20:24:29 -07002830bfa_fcport_mem_claim(struct bfa_fcport_s *fcport)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002831{
Krishna Gudipati45070252011-06-24 20:24:29 -07002832 struct bfa_mem_dma_s *fcport_dma = &fcport->fcport_dma;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002833
Krishna Gudipati45070252011-06-24 20:24:29 -07002834 fcport->stats_kva = bfa_mem_dma_virt(fcport_dma);
2835 fcport->stats_pa = bfa_mem_dma_phys(fcport_dma);
2836 fcport->stats = (union bfa_fcport_stats_u *)
2837 bfa_mem_dma_virt(fcport_dma);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002838}
2839
Jing Huang5fbe25c2010-10-18 17:17:23 -07002840/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002841 * Memory initialization.
2842 */
2843static void
2844bfa_fcport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07002845 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002846{
2847 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2848 struct bfa_port_cfg_s *port_cfg = &fcport->cfg;
2849 struct bfa_fcport_ln_s *ln = &fcport->ln;
Maggie Zhangf16a1752010-12-09 19:12:32 -08002850 struct timeval tv;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002851
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002852 fcport->bfa = bfa;
2853 ln->fcport = fcport;
2854
Krishna Gudipati45070252011-06-24 20:24:29 -07002855 bfa_fcport_mem_claim(fcport);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002856
2857 bfa_sm_set_state(fcport, bfa_fcport_sm_uninit);
2858 bfa_sm_set_state(ln, bfa_fcport_ln_sm_dn);
2859
Jing Huang5fbe25c2010-10-18 17:17:23 -07002860 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002861 * initialize time stamp for stats reset
2862 */
Maggie Zhangf16a1752010-12-09 19:12:32 -08002863 do_gettimeofday(&tv);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002864 fcport->stats_reset_time = tv.tv_sec;
2865
Jing Huang5fbe25c2010-10-18 17:17:23 -07002866 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002867 * initialize and set default configuration
2868 */
2869 port_cfg->topology = BFA_PORT_TOPOLOGY_P2P;
2870 port_cfg->speed = BFA_PORT_SPEED_AUTO;
2871 port_cfg->trunked = BFA_FALSE;
2872 port_cfg->maxfrsize = 0;
2873
2874 port_cfg->trl_def_speed = BFA_PORT_SPEED_1GBPS;
2875
2876 bfa_reqq_winit(&fcport->reqq_wait, bfa_fcport_qresume, fcport);
2877}
2878
2879static void
2880bfa_fcport_detach(struct bfa_s *bfa)
2881{
2882}
2883
Jing Huang5fbe25c2010-10-18 17:17:23 -07002884/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002885 * Called when IOC is ready.
2886 */
2887static void
2888bfa_fcport_start(struct bfa_s *bfa)
2889{
2890 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_START);
2891}
2892
Jing Huang5fbe25c2010-10-18 17:17:23 -07002893/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002894 * Called before IOC is stopped.
2895 */
2896static void
2897bfa_fcport_stop(struct bfa_s *bfa)
2898{
2899 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_STOP);
2900 bfa_trunk_iocdisable(bfa);
2901}
2902
Jing Huang5fbe25c2010-10-18 17:17:23 -07002903/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002904 * Called when IOC failure is detected.
2905 */
2906static void
2907bfa_fcport_iocdisable(struct bfa_s *bfa)
2908{
2909 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
2910
2911 bfa_sm_send_event(fcport, BFA_FCPORT_SM_HWFAIL);
2912 bfa_trunk_iocdisable(bfa);
2913}
2914
2915static void
2916bfa_fcport_update_linkinfo(struct bfa_fcport_s *fcport)
2917{
2918 struct bfi_fcport_event_s *pevent = fcport->event_arg.i2hmsg.event;
2919 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2920
2921 fcport->speed = pevent->link_state.speed;
2922 fcport->topology = pevent->link_state.topology;
2923
2924 if (fcport->topology == BFA_PORT_TOPOLOGY_LOOP)
2925 fcport->myalpa = 0;
2926
2927 /* QoS Details */
Jing Huang6a18b162010-10-18 17:08:54 -07002928 fcport->qos_attr = pevent->link_state.qos_attr;
2929 fcport->qos_vc_attr = pevent->link_state.vc_fcf.qos_vc_attr;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002930
Jing Huang5fbe25c2010-10-18 17:17:23 -07002931 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002932 * update trunk state if applicable
2933 */
2934 if (!fcport->cfg.trunked)
2935 trunk->attr.state = BFA_TRUNK_DISABLED;
2936
2937 /* update FCoE specific */
Jing Huangba816ea2010-10-18 17:10:50 -07002938 fcport->fcoe_vlan = be16_to_cpu(pevent->link_state.vc_fcf.fcf.vlan);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002939
2940 bfa_trc(fcport->bfa, fcport->speed);
2941 bfa_trc(fcport->bfa, fcport->topology);
2942}
2943
2944static void
2945bfa_fcport_reset_linkinfo(struct bfa_fcport_s *fcport)
2946{
2947 fcport->speed = BFA_PORT_SPEED_UNKNOWN;
2948 fcport->topology = BFA_PORT_TOPOLOGY_NONE;
Krishna Gudipatibe540a92011-06-13 15:53:04 -07002949 fcport->bbsc_op_state = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002950}
2951
Jing Huang5fbe25c2010-10-18 17:17:23 -07002952/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002953 * Send port enable message to firmware.
2954 */
2955static bfa_boolean_t
2956bfa_fcport_send_enable(struct bfa_fcport_s *fcport)
2957{
2958 struct bfi_fcport_enable_req_s *m;
2959
Jing Huang5fbe25c2010-10-18 17:17:23 -07002960 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002961 * Increment message tag before queue check, so that responses to old
2962 * requests are discarded.
2963 */
2964 fcport->msgtag++;
2965
Jing Huang5fbe25c2010-10-18 17:17:23 -07002966 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002967 * check for room in queue to send request now
2968 */
2969 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
2970 if (!m) {
2971 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
2972 &fcport->reqq_wait);
2973 return BFA_FALSE;
2974 }
2975
2976 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_ENABLE_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002977 bfa_fn_lpu(fcport->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002978 m->nwwn = fcport->nwwn;
2979 m->pwwn = fcport->pwwn;
2980 m->port_cfg = fcport->cfg;
2981 m->msgtag = fcport->msgtag;
Jing Huangba816ea2010-10-18 17:10:50 -07002982 m->port_cfg.maxfrsize = cpu_to_be16(fcport->cfg.maxfrsize);
Krishna Gudipatif3a060c2010-12-13 16:16:50 -08002983 m->use_flash_cfg = fcport->use_flash_cfg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002984 bfa_dma_be_addr_set(m->stats_dma_addr, fcport->stats_pa);
2985 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_lo);
2986 bfa_trc(fcport->bfa, m->stats_dma_addr.a32.addr_hi);
2987
Jing Huang5fbe25c2010-10-18 17:17:23 -07002988 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002989 * queue I/O message to firmware
2990 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07002991 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002992 return BFA_TRUE;
2993}
2994
Jing Huang5fbe25c2010-10-18 17:17:23 -07002995/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002996 * Send port disable message to firmware.
2997 */
2998static bfa_boolean_t
2999bfa_fcport_send_disable(struct bfa_fcport_s *fcport)
3000{
3001 struct bfi_fcport_req_s *m;
3002
Jing Huang5fbe25c2010-10-18 17:17:23 -07003003 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003004 * Increment message tag before queue check, so that responses to old
3005 * requests are discarded.
3006 */
3007 fcport->msgtag++;
3008
Jing Huang5fbe25c2010-10-18 17:17:23 -07003009 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003010 * check for room in queue to send request now
3011 */
3012 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3013 if (!m) {
3014 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3015 &fcport->reqq_wait);
3016 return BFA_FALSE;
3017 }
3018
3019 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_DISABLE_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003020 bfa_fn_lpu(fcport->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003021 m->msgtag = fcport->msgtag;
3022
Jing Huang5fbe25c2010-10-18 17:17:23 -07003023 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003024 * queue I/O message to firmware
3025 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003026 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003027
3028 return BFA_TRUE;
3029}
3030
3031static void
3032bfa_fcport_set_wwns(struct bfa_fcport_s *fcport)
3033{
Maggie Zhangf7f73812010-12-09 19:08:43 -08003034 fcport->pwwn = fcport->bfa->ioc.attr->pwwn;
3035 fcport->nwwn = fcport->bfa->ioc.attr->nwwn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003036
3037 bfa_trc(fcport->bfa, fcport->pwwn);
3038 bfa_trc(fcport->bfa, fcport->nwwn);
3039}
3040
3041static void
3042bfa_fcport_send_txcredit(void *port_cbarg)
3043{
3044
3045 struct bfa_fcport_s *fcport = port_cbarg;
3046 struct bfi_fcport_set_svc_params_req_s *m;
3047
Jing Huang5fbe25c2010-10-18 17:17:23 -07003048 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003049 * check for room in queue to send request now
3050 */
3051 m = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3052 if (!m) {
3053 bfa_trc(fcport->bfa, fcport->cfg.tx_bbcredit);
3054 return;
3055 }
3056
3057 bfi_h2i_set(m->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_SET_SVC_PARAMS_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003058 bfa_fn_lpu(fcport->bfa));
Jing Huangba816ea2010-10-18 17:10:50 -07003059 m->tx_bbcredit = cpu_to_be16((u16)fcport->cfg.tx_bbcredit);
Krishna Gudipatibe540a92011-06-13 15:53:04 -07003060 m->bb_scn = fcport->cfg.bb_scn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003061
Jing Huang5fbe25c2010-10-18 17:17:23 -07003062 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003063 * queue I/O message to firmware
3064 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003065 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003066}
3067
3068static void
3069bfa_fcport_qos_stats_swap(struct bfa_qos_stats_s *d,
3070 struct bfa_qos_stats_s *s)
3071{
3072 u32 *dip = (u32 *) d;
Maggie50444a32010-11-29 18:26:32 -08003073 __be32 *sip = (__be32 *) s;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003074 int i;
3075
3076 /* Now swap the 32 bit fields */
3077 for (i = 0; i < (sizeof(struct bfa_qos_stats_s)/sizeof(u32)); ++i)
Jing Huangba816ea2010-10-18 17:10:50 -07003078 dip[i] = be32_to_cpu(sip[i]);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003079}
3080
3081static void
3082bfa_fcport_fcoe_stats_swap(struct bfa_fcoe_stats_s *d,
3083 struct bfa_fcoe_stats_s *s)
3084{
3085 u32 *dip = (u32 *) d;
Maggie50444a32010-11-29 18:26:32 -08003086 __be32 *sip = (__be32 *) s;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003087 int i;
3088
3089 for (i = 0; i < ((sizeof(struct bfa_fcoe_stats_s))/sizeof(u32));
3090 i = i + 2) {
Maggie Zhangf16a1752010-12-09 19:12:32 -08003091#ifdef __BIG_ENDIAN
Jing Huangba816ea2010-10-18 17:10:50 -07003092 dip[i] = be32_to_cpu(sip[i]);
3093 dip[i + 1] = be32_to_cpu(sip[i + 1]);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003094#else
Jing Huangba816ea2010-10-18 17:10:50 -07003095 dip[i] = be32_to_cpu(sip[i + 1]);
3096 dip[i + 1] = be32_to_cpu(sip[i]);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003097#endif
3098 }
3099}
3100
3101static void
3102__bfa_cb_fcport_stats_get(void *cbarg, bfa_boolean_t complete)
3103{
3104 struct bfa_fcport_s *fcport = cbarg;
3105
3106 if (complete) {
3107 if (fcport->stats_status == BFA_STATUS_OK) {
Maggie Zhangf16a1752010-12-09 19:12:32 -08003108 struct timeval tv;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003109
3110 /* Swap FC QoS or FCoE stats */
3111 if (bfa_ioc_get_fcmode(&fcport->bfa->ioc)) {
3112 bfa_fcport_qos_stats_swap(
3113 &fcport->stats_ret->fcqos,
3114 &fcport->stats->fcqos);
3115 } else {
3116 bfa_fcport_fcoe_stats_swap(
3117 &fcport->stats_ret->fcoe,
3118 &fcport->stats->fcoe);
3119
Maggie Zhangf16a1752010-12-09 19:12:32 -08003120 do_gettimeofday(&tv);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003121 fcport->stats_ret->fcoe.secs_reset =
3122 tv.tv_sec - fcport->stats_reset_time;
3123 }
3124 }
3125 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3126 } else {
3127 fcport->stats_busy = BFA_FALSE;
3128 fcport->stats_status = BFA_STATUS_OK;
3129 }
3130}
3131
3132static void
3133bfa_fcport_stats_get_timeout(void *cbarg)
3134{
3135 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3136
3137 bfa_trc(fcport->bfa, fcport->stats_qfull);
3138
3139 if (fcport->stats_qfull) {
3140 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3141 fcport->stats_qfull = BFA_FALSE;
3142 }
3143
3144 fcport->stats_status = BFA_STATUS_ETIMER;
3145 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe, __bfa_cb_fcport_stats_get,
3146 fcport);
3147}
3148
3149static void
3150bfa_fcport_send_stats_get(void *cbarg)
3151{
3152 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3153 struct bfi_fcport_req_s *msg;
3154
3155 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3156
3157 if (!msg) {
3158 fcport->stats_qfull = BFA_TRUE;
3159 bfa_reqq_winit(&fcport->stats_reqq_wait,
3160 bfa_fcport_send_stats_get, fcport);
3161 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3162 &fcport->stats_reqq_wait);
3163 return;
3164 }
3165 fcport->stats_qfull = BFA_FALSE;
3166
Jing Huang6a18b162010-10-18 17:08:54 -07003167 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003168 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_GET_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003169 bfa_fn_lpu(fcport->bfa));
3170 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003171}
3172
3173static void
3174__bfa_cb_fcport_stats_clr(void *cbarg, bfa_boolean_t complete)
3175{
3176 struct bfa_fcport_s *fcport = cbarg;
3177
3178 if (complete) {
Maggie Zhangf16a1752010-12-09 19:12:32 -08003179 struct timeval tv;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003180
Jing Huang5fbe25c2010-10-18 17:17:23 -07003181 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003182 * re-initialize time stamp for stats reset
3183 */
Maggie Zhangf16a1752010-12-09 19:12:32 -08003184 do_gettimeofday(&tv);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003185 fcport->stats_reset_time = tv.tv_sec;
3186
3187 fcport->stats_cbfn(fcport->stats_cbarg, fcport->stats_status);
3188 } else {
3189 fcport->stats_busy = BFA_FALSE;
3190 fcport->stats_status = BFA_STATUS_OK;
3191 }
3192}
3193
3194static void
3195bfa_fcport_stats_clr_timeout(void *cbarg)
3196{
3197 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3198
3199 bfa_trc(fcport->bfa, fcport->stats_qfull);
3200
3201 if (fcport->stats_qfull) {
3202 bfa_reqq_wcancel(&fcport->stats_reqq_wait);
3203 fcport->stats_qfull = BFA_FALSE;
3204 }
3205
3206 fcport->stats_status = BFA_STATUS_ETIMER;
3207 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3208 __bfa_cb_fcport_stats_clr, fcport);
3209}
3210
3211static void
3212bfa_fcport_send_stats_clear(void *cbarg)
3213{
3214 struct bfa_fcport_s *fcport = (struct bfa_fcport_s *) cbarg;
3215 struct bfi_fcport_req_s *msg;
3216
3217 msg = bfa_reqq_next(fcport->bfa, BFA_REQQ_PORT);
3218
3219 if (!msg) {
3220 fcport->stats_qfull = BFA_TRUE;
3221 bfa_reqq_winit(&fcport->stats_reqq_wait,
3222 bfa_fcport_send_stats_clear, fcport);
3223 bfa_reqq_wait(fcport->bfa, BFA_REQQ_PORT,
3224 &fcport->stats_reqq_wait);
3225 return;
3226 }
3227 fcport->stats_qfull = BFA_FALSE;
3228
Jing Huang6a18b162010-10-18 17:08:54 -07003229 memset(msg, 0, sizeof(struct bfi_fcport_req_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003230 bfi_h2i_set(msg->mh, BFI_MC_FCPORT, BFI_FCPORT_H2I_STATS_CLEAR_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07003231 bfa_fn_lpu(fcport->bfa));
3232 bfa_reqq_produce(fcport->bfa, BFA_REQQ_PORT, msg->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003233}
3234
Jing Huang5fbe25c2010-10-18 17:17:23 -07003235/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003236 * Handle trunk SCN event from firmware.
3237 */
3238static void
3239bfa_trunk_scn(struct bfa_fcport_s *fcport, struct bfi_fcport_trunk_scn_s *scn)
3240{
3241 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
3242 struct bfi_fcport_trunk_link_s *tlink;
3243 struct bfa_trunk_link_attr_s *lattr;
3244 enum bfa_trunk_state state_prev;
3245 int i;
3246 int link_bm = 0;
3247
3248 bfa_trc(fcport->bfa, fcport->cfg.trunked);
Jing Huangd4b671c2010-12-26 21:46:35 -08003249 WARN_ON(scn->trunk_state != BFA_TRUNK_ONLINE &&
3250 scn->trunk_state != BFA_TRUNK_OFFLINE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003251
3252 bfa_trc(fcport->bfa, trunk->attr.state);
3253 bfa_trc(fcport->bfa, scn->trunk_state);
3254 bfa_trc(fcport->bfa, scn->trunk_speed);
3255
Jing Huang5fbe25c2010-10-18 17:17:23 -07003256 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003257 * Save off new state for trunk attribute query
3258 */
3259 state_prev = trunk->attr.state;
3260 if (fcport->cfg.trunked && (trunk->attr.state != BFA_TRUNK_DISABLED))
3261 trunk->attr.state = scn->trunk_state;
3262 trunk->attr.speed = scn->trunk_speed;
3263 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3264 lattr = &trunk->attr.link_attr[i];
3265 tlink = &scn->tlink[i];
3266
3267 lattr->link_state = tlink->state;
3268 lattr->trunk_wwn = tlink->trunk_wwn;
3269 lattr->fctl = tlink->fctl;
3270 lattr->speed = tlink->speed;
Jing Huangba816ea2010-10-18 17:10:50 -07003271 lattr->deskew = be32_to_cpu(tlink->deskew);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003272
3273 if (tlink->state == BFA_TRUNK_LINK_STATE_UP) {
3274 fcport->speed = tlink->speed;
3275 fcport->topology = BFA_PORT_TOPOLOGY_P2P;
3276 link_bm |= 1 << i;
3277 }
3278
3279 bfa_trc(fcport->bfa, lattr->link_state);
3280 bfa_trc(fcport->bfa, lattr->trunk_wwn);
3281 bfa_trc(fcport->bfa, lattr->fctl);
3282 bfa_trc(fcport->bfa, lattr->speed);
3283 bfa_trc(fcport->bfa, lattr->deskew);
3284 }
3285
3286 switch (link_bm) {
3287 case 3:
3288 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3289 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,1)");
3290 break;
3291 case 2:
3292 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3293 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(-,1)");
3294 break;
3295 case 1:
3296 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3297 BFA_PL_EID_TRUNK_SCN, 0, "Trunk up(0,-)");
3298 break;
3299 default:
3300 bfa_plog_str(fcport->bfa->plog, BFA_PL_MID_HAL,
3301 BFA_PL_EID_TRUNK_SCN, 0, "Trunk down");
3302 }
3303
Jing Huang5fbe25c2010-10-18 17:17:23 -07003304 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003305 * Notify upper layers if trunk state changed.
3306 */
3307 if ((state_prev != trunk->attr.state) ||
3308 (scn->trunk_state == BFA_TRUNK_OFFLINE)) {
3309 bfa_fcport_scn(fcport, (scn->trunk_state == BFA_TRUNK_ONLINE) ?
3310 BFA_PORT_LINKUP : BFA_PORT_LINKDOWN, BFA_TRUE);
3311 }
3312}
3313
3314static void
3315bfa_trunk_iocdisable(struct bfa_s *bfa)
3316{
3317 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3318 int i = 0;
3319
Jing Huang5fbe25c2010-10-18 17:17:23 -07003320 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003321 * In trunked mode, notify upper layers that link is down
3322 */
3323 if (fcport->cfg.trunked) {
3324 if (fcport->trunk.attr.state == BFA_TRUNK_ONLINE)
3325 bfa_fcport_scn(fcport, BFA_PORT_LINKDOWN, BFA_TRUE);
3326
3327 fcport->trunk.attr.state = BFA_TRUNK_OFFLINE;
3328 fcport->trunk.attr.speed = BFA_PORT_SPEED_UNKNOWN;
3329 for (i = 0; i < BFA_TRUNK_MAX_PORTS; i++) {
3330 fcport->trunk.attr.link_attr[i].trunk_wwn = 0;
3331 fcport->trunk.attr.link_attr[i].fctl =
3332 BFA_TRUNK_LINK_FCTL_NORMAL;
3333 fcport->trunk.attr.link_attr[i].link_state =
3334 BFA_TRUNK_LINK_STATE_DN_LINKDN;
3335 fcport->trunk.attr.link_attr[i].speed =
3336 BFA_PORT_SPEED_UNKNOWN;
3337 fcport->trunk.attr.link_attr[i].deskew = 0;
3338 }
3339 }
3340}
3341
Jing Huang5fbe25c2010-10-18 17:17:23 -07003342/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003343 * Called to initialize port attributes
3344 */
3345void
3346bfa_fcport_init(struct bfa_s *bfa)
3347{
3348 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3349
Jing Huang5fbe25c2010-10-18 17:17:23 -07003350 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003351 * Initialize port attributes from IOC hardware data.
3352 */
3353 bfa_fcport_set_wwns(fcport);
3354 if (fcport->cfg.maxfrsize == 0)
3355 fcport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
3356 fcport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
3357 fcport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
3358
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003359 if (bfa_fcport_is_pbcdisabled(bfa))
3360 bfa->modules.port.pbc_disabled = BFA_TRUE;
3361
Jing Huangd4b671c2010-12-26 21:46:35 -08003362 WARN_ON(!fcport->cfg.maxfrsize);
3363 WARN_ON(!fcport->cfg.rx_bbcredit);
3364 WARN_ON(!fcport->speed_sup);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003365}
3366
Jing Huang5fbe25c2010-10-18 17:17:23 -07003367/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003368 * Firmware message handler.
3369 */
3370void
3371bfa_fcport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
3372{
3373 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3374 union bfi_fcport_i2h_msg_u i2hmsg;
3375
3376 i2hmsg.msg = msg;
3377 fcport->event_arg.i2hmsg = i2hmsg;
3378
3379 bfa_trc(bfa, msg->mhdr.msg_id);
3380 bfa_trc(bfa, bfa_sm_to_state(hal_port_sm_table, fcport->sm));
3381
3382 switch (msg->mhdr.msg_id) {
3383 case BFI_FCPORT_I2H_ENABLE_RSP:
Krishna Gudipatif3a060c2010-12-13 16:16:50 -08003384 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag) {
3385
3386 if (fcport->use_flash_cfg) {
3387 fcport->cfg = i2hmsg.penable_rsp->port_cfg;
3388 fcport->cfg.maxfrsize =
3389 cpu_to_be16(fcport->cfg.maxfrsize);
3390 fcport->cfg.path_tov =
3391 cpu_to_be16(fcport->cfg.path_tov);
3392 fcport->cfg.q_depth =
3393 cpu_to_be16(fcport->cfg.q_depth);
3394
3395 if (fcport->cfg.trunked)
3396 fcport->trunk.attr.state =
3397 BFA_TRUNK_OFFLINE;
3398 else
3399 fcport->trunk.attr.state =
3400 BFA_TRUNK_DISABLED;
3401 fcport->use_flash_cfg = BFA_FALSE;
3402 }
3403
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003404 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
Krishna Gudipatif3a060c2010-12-13 16:16:50 -08003405 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003406 break;
3407
3408 case BFI_FCPORT_I2H_DISABLE_RSP:
3409 if (fcport->msgtag == i2hmsg.penable_rsp->msgtag)
3410 bfa_sm_send_event(fcport, BFA_FCPORT_SM_FWRSP);
3411 break;
3412
3413 case BFI_FCPORT_I2H_EVENT:
3414 if (i2hmsg.event->link_state.linkstate == BFA_PORT_LINKUP)
3415 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKUP);
3416 else
3417 bfa_sm_send_event(fcport, BFA_FCPORT_SM_LINKDOWN);
3418 break;
3419
3420 case BFI_FCPORT_I2H_TRUNK_SCN:
3421 bfa_trunk_scn(fcport, i2hmsg.trunk_scn);
3422 break;
3423
3424 case BFI_FCPORT_I2H_STATS_GET_RSP:
3425 /*
3426 * check for timer pop before processing the rsp
3427 */
3428 if (fcport->stats_busy == BFA_FALSE ||
3429 fcport->stats_status == BFA_STATUS_ETIMER)
3430 break;
3431
3432 bfa_timer_stop(&fcport->timer);
3433 fcport->stats_status = i2hmsg.pstatsget_rsp->status;
3434 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3435 __bfa_cb_fcport_stats_get, fcport);
3436 break;
3437
3438 case BFI_FCPORT_I2H_STATS_CLEAR_RSP:
3439 /*
3440 * check for timer pop before processing the rsp
3441 */
3442 if (fcport->stats_busy == BFA_FALSE ||
3443 fcport->stats_status == BFA_STATUS_ETIMER)
3444 break;
3445
3446 bfa_timer_stop(&fcport->timer);
3447 fcport->stats_status = BFA_STATUS_OK;
3448 bfa_cb_queue(fcport->bfa, &fcport->hcb_qe,
3449 __bfa_cb_fcport_stats_clr, fcport);
3450 break;
3451
3452 case BFI_FCPORT_I2H_ENABLE_AEN:
3453 bfa_sm_send_event(fcport, BFA_FCPORT_SM_ENABLE);
3454 break;
3455
3456 case BFI_FCPORT_I2H_DISABLE_AEN:
3457 bfa_sm_send_event(fcport, BFA_FCPORT_SM_DISABLE);
3458 break;
3459
3460 default:
Jing Huangd4b671c2010-12-26 21:46:35 -08003461 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003462 break;
3463 }
3464}
3465
Jing Huang5fbe25c2010-10-18 17:17:23 -07003466/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003467 * Registered callback for port events.
3468 */
3469void
3470bfa_fcport_event_register(struct bfa_s *bfa,
3471 void (*cbfn) (void *cbarg,
3472 enum bfa_port_linkstate event),
3473 void *cbarg)
3474{
3475 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3476
3477 fcport->event_cbfn = cbfn;
3478 fcport->event_cbarg = cbarg;
3479}
3480
3481bfa_status_t
3482bfa_fcport_enable(struct bfa_s *bfa)
3483{
3484 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3485
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003486 if (bfa_fcport_is_pbcdisabled(bfa))
3487 return BFA_STATUS_PBC;
3488
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003489 if (bfa_ioc_is_disabled(&bfa->ioc))
3490 return BFA_STATUS_IOC_DISABLED;
3491
3492 if (fcport->diag_busy)
3493 return BFA_STATUS_DIAG_BUSY;
3494
3495 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_ENABLE);
3496 return BFA_STATUS_OK;
3497}
3498
3499bfa_status_t
3500bfa_fcport_disable(struct bfa_s *bfa)
3501{
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003502 if (bfa_fcport_is_pbcdisabled(bfa))
3503 return BFA_STATUS_PBC;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003504
3505 if (bfa_ioc_is_disabled(&bfa->ioc))
3506 return BFA_STATUS_IOC_DISABLED;
3507
3508 bfa_sm_send_event(BFA_FCPORT_MOD(bfa), BFA_FCPORT_SM_DISABLE);
3509 return BFA_STATUS_OK;
3510}
3511
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003512/* If PBC is disabled on port, return error */
3513bfa_status_t
3514bfa_fcport_is_pbcdisabled(struct bfa_s *bfa)
3515{
3516 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3517 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
3518 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
3519
3520 if (cfgrsp->pbc_cfg.port_enabled == BFI_PBC_PORT_DISABLED) {
3521 bfa_trc(bfa, fcport->pwwn);
3522 return BFA_STATUS_PBC;
3523 }
3524 return BFA_STATUS_OK;
3525}
3526
Jing Huang5fbe25c2010-10-18 17:17:23 -07003527/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003528 * Configure port speed.
3529 */
3530bfa_status_t
3531bfa_fcport_cfg_speed(struct bfa_s *bfa, enum bfa_port_speed speed)
3532{
3533 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3534
3535 bfa_trc(bfa, speed);
3536
3537 if (fcport->cfg.trunked == BFA_TRUE)
3538 return BFA_STATUS_TRUNK_ENABLED;
3539 if ((speed != BFA_PORT_SPEED_AUTO) && (speed > fcport->speed_sup)) {
3540 bfa_trc(bfa, fcport->speed_sup);
3541 return BFA_STATUS_UNSUPP_SPEED;
3542 }
3543
Krishna Gudipatia7141342011-06-24 20:23:19 -07003544 /* For Mezz card, port speed entered needs to be checked */
3545 if (bfa_mfg_is_mezz(fcport->bfa->ioc.attr->card_type)) {
3546 if (bfa_ioc_get_type(&fcport->bfa->ioc) == BFA_IOC_TYPE_FC) {
3547 /* For CT2, 1G is not supported */
3548 if ((speed == BFA_PORT_SPEED_1GBPS) &&
3549 (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)))
3550 return BFA_STATUS_UNSUPP_SPEED;
3551
3552 /* Already checked for Auto Speed and Max Speed supp */
3553 if (!(speed == BFA_PORT_SPEED_1GBPS ||
3554 speed == BFA_PORT_SPEED_2GBPS ||
3555 speed == BFA_PORT_SPEED_4GBPS ||
3556 speed == BFA_PORT_SPEED_8GBPS ||
3557 speed == BFA_PORT_SPEED_16GBPS ||
3558 speed == BFA_PORT_SPEED_AUTO))
3559 return BFA_STATUS_UNSUPP_SPEED;
3560 } else {
3561 if (speed != BFA_PORT_SPEED_10GBPS)
3562 return BFA_STATUS_UNSUPP_SPEED;
3563 }
3564 }
3565
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003566 fcport->cfg.speed = speed;
3567
3568 return BFA_STATUS_OK;
3569}
3570
Jing Huang5fbe25c2010-10-18 17:17:23 -07003571/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003572 * Get current speed.
3573 */
3574enum bfa_port_speed
3575bfa_fcport_get_speed(struct bfa_s *bfa)
3576{
3577 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3578
3579 return fcport->speed;
3580}
3581
Jing Huang5fbe25c2010-10-18 17:17:23 -07003582/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003583 * Configure port topology.
3584 */
3585bfa_status_t
3586bfa_fcport_cfg_topology(struct bfa_s *bfa, enum bfa_port_topology topology)
3587{
3588 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3589
3590 bfa_trc(bfa, topology);
3591 bfa_trc(bfa, fcport->cfg.topology);
3592
3593 switch (topology) {
3594 case BFA_PORT_TOPOLOGY_P2P:
3595 case BFA_PORT_TOPOLOGY_LOOP:
3596 case BFA_PORT_TOPOLOGY_AUTO:
3597 break;
3598
3599 default:
3600 return BFA_STATUS_EINVAL;
3601 }
3602
3603 fcport->cfg.topology = topology;
3604 return BFA_STATUS_OK;
3605}
3606
Jing Huang5fbe25c2010-10-18 17:17:23 -07003607/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003608 * Get current topology.
3609 */
3610enum bfa_port_topology
3611bfa_fcport_get_topology(struct bfa_s *bfa)
3612{
3613 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3614
3615 return fcport->topology;
3616}
3617
3618bfa_status_t
3619bfa_fcport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
3620{
3621 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3622
3623 bfa_trc(bfa, alpa);
3624 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3625 bfa_trc(bfa, fcport->cfg.hardalpa);
3626
3627 fcport->cfg.cfg_hardalpa = BFA_TRUE;
3628 fcport->cfg.hardalpa = alpa;
3629
3630 return BFA_STATUS_OK;
3631}
3632
3633bfa_status_t
3634bfa_fcport_clr_hardalpa(struct bfa_s *bfa)
3635{
3636 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3637
3638 bfa_trc(bfa, fcport->cfg.cfg_hardalpa);
3639 bfa_trc(bfa, fcport->cfg.hardalpa);
3640
3641 fcport->cfg.cfg_hardalpa = BFA_FALSE;
3642 return BFA_STATUS_OK;
3643}
3644
3645bfa_boolean_t
3646bfa_fcport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
3647{
3648 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3649
3650 *alpa = fcport->cfg.hardalpa;
3651 return fcport->cfg.cfg_hardalpa;
3652}
3653
3654u8
3655bfa_fcport_get_myalpa(struct bfa_s *bfa)
3656{
3657 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3658
3659 return fcport->myalpa;
3660}
3661
3662bfa_status_t
3663bfa_fcport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
3664{
3665 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3666
3667 bfa_trc(bfa, maxfrsize);
3668 bfa_trc(bfa, fcport->cfg.maxfrsize);
3669
3670 /* with in range */
3671 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
3672 return BFA_STATUS_INVLD_DFSZ;
3673
3674 /* power of 2, if not the max frame size of 2112 */
3675 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
3676 return BFA_STATUS_INVLD_DFSZ;
3677
3678 fcport->cfg.maxfrsize = maxfrsize;
3679 return BFA_STATUS_OK;
3680}
3681
3682u16
3683bfa_fcport_get_maxfrsize(struct bfa_s *bfa)
3684{
3685 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3686
3687 return fcport->cfg.maxfrsize;
3688}
3689
3690u8
3691bfa_fcport_get_rx_bbcredit(struct bfa_s *bfa)
3692{
3693 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3694
3695 return fcport->cfg.rx_bbcredit;
3696}
3697
3698void
Krishna Gudipatibe540a92011-06-13 15:53:04 -07003699bfa_fcport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit, u8 bb_scn)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003700{
3701 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3702
3703 fcport->cfg.tx_bbcredit = (u8)tx_bbcredit;
Krishna Gudipatibe540a92011-06-13 15:53:04 -07003704 fcport->cfg.bb_scn = bb_scn;
3705 if (bb_scn)
3706 fcport->bbsc_op_state = BFA_TRUE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003707 bfa_fcport_send_txcredit(fcport);
3708}
3709
Jing Huang5fbe25c2010-10-18 17:17:23 -07003710/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003711 * Get port attributes.
3712 */
3713
3714wwn_t
3715bfa_fcport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
3716{
3717 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3718 if (node)
3719 return fcport->nwwn;
3720 else
3721 return fcport->pwwn;
3722}
3723
3724void
3725bfa_fcport_get_attr(struct bfa_s *bfa, struct bfa_port_attr_s *attr)
3726{
3727 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3728
Jing Huang6a18b162010-10-18 17:08:54 -07003729 memset(attr, 0, sizeof(struct bfa_port_attr_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003730
3731 attr->nwwn = fcport->nwwn;
3732 attr->pwwn = fcport->pwwn;
3733
Maggie Zhangf7f73812010-12-09 19:08:43 -08003734 attr->factorypwwn = bfa->ioc.attr->mfg_pwwn;
3735 attr->factorynwwn = bfa->ioc.attr->mfg_nwwn;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003736
Jing Huang6a18b162010-10-18 17:08:54 -07003737 memcpy(&attr->pport_cfg, &fcport->cfg,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003738 sizeof(struct bfa_port_cfg_s));
3739 /* speed attributes */
3740 attr->pport_cfg.speed = fcport->cfg.speed;
3741 attr->speed_supported = fcport->speed_sup;
3742 attr->speed = fcport->speed;
3743 attr->cos_supported = FC_CLASS_3;
3744
3745 /* topology attributes */
3746 attr->pport_cfg.topology = fcport->cfg.topology;
3747 attr->topology = fcport->topology;
3748 attr->pport_cfg.trunked = fcport->cfg.trunked;
3749
3750 /* beacon attributes */
3751 attr->beacon = fcport->beacon;
3752 attr->link_e2e_beacon = fcport->link_e2e_beacon;
Maggie Zhangf7f73812010-12-09 19:08:43 -08003753 attr->plog_enabled = (bfa_boolean_t)fcport->bfa->plog->plog_enabled;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003754 attr->io_profile = bfa_fcpim_get_io_profile(fcport->bfa);
3755
3756 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
3757 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
3758 attr->port_state = bfa_sm_to_state(hal_port_sm_table, fcport->sm);
Krishna Gudipatibe540a92011-06-13 15:53:04 -07003759 attr->bbsc_op_status = fcport->bbsc_op_state;
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003760
3761 /* PBC Disabled State */
3762 if (bfa_fcport_is_pbcdisabled(bfa))
3763 attr->port_state = BFA_PORT_ST_PREBOOT_DISABLED;
3764 else {
3765 if (bfa_ioc_is_disabled(&fcport->bfa->ioc))
3766 attr->port_state = BFA_PORT_ST_IOCDIS;
3767 else if (bfa_ioc_fw_mismatch(&fcport->bfa->ioc))
3768 attr->port_state = BFA_PORT_ST_FWMISMATCH;
Krishna Gudipatia7141342011-06-24 20:23:19 -07003769 else if (bfa_ioc_is_acq_addr(&fcport->bfa->ioc))
3770 attr->port_state = BFA_PORT_ST_ACQ_ADDR;
Krishna Gudipati43ffdf42011-06-13 15:46:21 -07003771 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003772
3773 /* FCoE vlan */
3774 attr->fcoe_vlan = fcport->fcoe_vlan;
3775}
3776
3777#define BFA_FCPORT_STATS_TOV 1000
3778
Jing Huang5fbe25c2010-10-18 17:17:23 -07003779/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003780 * Fetch port statistics (FCQoS or FCoE).
3781 */
3782bfa_status_t
3783bfa_fcport_get_stats(struct bfa_s *bfa, union bfa_fcport_stats_u *stats,
3784 bfa_cb_port_t cbfn, void *cbarg)
3785{
3786 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3787
3788 if (fcport->stats_busy) {
3789 bfa_trc(bfa, fcport->stats_busy);
3790 return BFA_STATUS_DEVBUSY;
3791 }
3792
3793 fcport->stats_busy = BFA_TRUE;
3794 fcport->stats_ret = stats;
3795 fcport->stats_cbfn = cbfn;
3796 fcport->stats_cbarg = cbarg;
3797
3798 bfa_fcport_send_stats_get(fcport);
3799
3800 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_get_timeout,
3801 fcport, BFA_FCPORT_STATS_TOV);
3802 return BFA_STATUS_OK;
3803}
3804
Jing Huang5fbe25c2010-10-18 17:17:23 -07003805/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003806 * Reset port statistics (FCQoS or FCoE).
3807 */
3808bfa_status_t
3809bfa_fcport_clear_stats(struct bfa_s *bfa, bfa_cb_port_t cbfn, void *cbarg)
3810{
3811 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3812
3813 if (fcport->stats_busy) {
3814 bfa_trc(bfa, fcport->stats_busy);
3815 return BFA_STATUS_DEVBUSY;
3816 }
3817
3818 fcport->stats_busy = BFA_TRUE;
3819 fcport->stats_cbfn = cbfn;
3820 fcport->stats_cbarg = cbarg;
3821
3822 bfa_fcport_send_stats_clear(fcport);
3823
3824 bfa_timer_start(bfa, &fcport->timer, bfa_fcport_stats_clr_timeout,
3825 fcport, BFA_FCPORT_STATS_TOV);
3826 return BFA_STATUS_OK;
3827}
3828
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003829
Jing Huang5fbe25c2010-10-18 17:17:23 -07003830/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003831 * Fetch port attributes.
3832 */
3833bfa_boolean_t
3834bfa_fcport_is_disabled(struct bfa_s *bfa)
3835{
3836 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3837
3838 return bfa_sm_to_state(hal_port_sm_table, fcport->sm) ==
3839 BFA_PORT_ST_DISABLED;
3840
3841}
3842
3843bfa_boolean_t
3844bfa_fcport_is_ratelim(struct bfa_s *bfa)
3845{
3846 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3847
3848 return fcport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
3849
3850}
3851
Jing Huang5fbe25c2010-10-18 17:17:23 -07003852/*
Krishna Gudipatia7141342011-06-24 20:23:19 -07003853 * Enable/Disable FAA feature in port config
3854 */
3855void
3856bfa_fcport_cfg_faa(struct bfa_s *bfa, u8 state)
3857{
3858 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3859
3860 bfa_trc(bfa, state);
3861 fcport->cfg.faa_state = state;
3862}
3863
3864/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003865 * Get default minimum ratelim speed
3866 */
3867enum bfa_port_speed
3868bfa_fcport_get_ratelim_speed(struct bfa_s *bfa)
3869{
3870 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3871
3872 bfa_trc(bfa, fcport->cfg.trl_def_speed);
3873 return fcport->cfg.trl_def_speed;
3874
3875}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003876
3877bfa_boolean_t
3878bfa_fcport_is_linkup(struct bfa_s *bfa)
3879{
3880 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3881
3882 return (!fcport->cfg.trunked &&
3883 bfa_sm_cmp_state(fcport, bfa_fcport_sm_linkup)) ||
3884 (fcport->cfg.trunked &&
3885 fcport->trunk.attr.state == BFA_TRUNK_ONLINE);
3886}
3887
3888bfa_boolean_t
3889bfa_fcport_is_qos_enabled(struct bfa_s *bfa)
3890{
3891 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3892
3893 return fcport->cfg.qos_enabled;
3894}
3895
Krishna Gudipatibe540a92011-06-13 15:53:04 -07003896bfa_boolean_t
3897bfa_fcport_is_trunk_enabled(struct bfa_s *bfa)
3898{
3899 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(bfa);
3900
3901 return fcport->cfg.trunked;
3902}
3903
Jing Huang5fbe25c2010-10-18 17:17:23 -07003904/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003905 * Rport State machine functions
3906 */
Jing Huang5fbe25c2010-10-18 17:17:23 -07003907/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003908 * Beginning state, only online event expected.
3909 */
3910static void
3911bfa_rport_sm_uninit(struct bfa_rport_s *rp, enum bfa_rport_event event)
3912{
3913 bfa_trc(rp->bfa, rp->rport_tag);
3914 bfa_trc(rp->bfa, event);
3915
3916 switch (event) {
3917 case BFA_RPORT_SM_CREATE:
3918 bfa_stats(rp, sm_un_cr);
3919 bfa_sm_set_state(rp, bfa_rport_sm_created);
3920 break;
3921
3922 default:
3923 bfa_stats(rp, sm_un_unexp);
3924 bfa_sm_fault(rp->bfa, event);
3925 }
3926}
3927
3928static void
3929bfa_rport_sm_created(struct bfa_rport_s *rp, enum bfa_rport_event event)
3930{
3931 bfa_trc(rp->bfa, rp->rport_tag);
3932 bfa_trc(rp->bfa, event);
3933
3934 switch (event) {
3935 case BFA_RPORT_SM_ONLINE:
3936 bfa_stats(rp, sm_cr_on);
3937 if (bfa_rport_send_fwcreate(rp))
3938 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
3939 else
3940 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
3941 break;
3942
3943 case BFA_RPORT_SM_DELETE:
3944 bfa_stats(rp, sm_cr_del);
3945 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
3946 bfa_rport_free(rp);
3947 break;
3948
3949 case BFA_RPORT_SM_HWFAIL:
3950 bfa_stats(rp, sm_cr_hwf);
3951 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3952 break;
3953
3954 default:
3955 bfa_stats(rp, sm_cr_unexp);
3956 bfa_sm_fault(rp->bfa, event);
3957 }
3958}
3959
Jing Huang5fbe25c2010-10-18 17:17:23 -07003960/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003961 * Waiting for rport create response from firmware.
3962 */
3963static void
3964bfa_rport_sm_fwcreate(struct bfa_rport_s *rp, enum bfa_rport_event event)
3965{
3966 bfa_trc(rp->bfa, rp->rport_tag);
3967 bfa_trc(rp->bfa, event);
3968
3969 switch (event) {
3970 case BFA_RPORT_SM_FWRSP:
3971 bfa_stats(rp, sm_fwc_rsp);
3972 bfa_sm_set_state(rp, bfa_rport_sm_online);
3973 bfa_rport_online_cb(rp);
3974 break;
3975
3976 case BFA_RPORT_SM_DELETE:
3977 bfa_stats(rp, sm_fwc_del);
3978 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
3979 break;
3980
3981 case BFA_RPORT_SM_OFFLINE:
3982 bfa_stats(rp, sm_fwc_off);
3983 bfa_sm_set_state(rp, bfa_rport_sm_offline_pending);
3984 break;
3985
3986 case BFA_RPORT_SM_HWFAIL:
3987 bfa_stats(rp, sm_fwc_hwf);
3988 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
3989 break;
3990
3991 default:
3992 bfa_stats(rp, sm_fwc_unexp);
3993 bfa_sm_fault(rp->bfa, event);
3994 }
3995}
3996
Jing Huang5fbe25c2010-10-18 17:17:23 -07003997/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07003998 * Request queue is full, awaiting queue resume to send create request.
3999 */
4000static void
4001bfa_rport_sm_fwcreate_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4002{
4003 bfa_trc(rp->bfa, rp->rport_tag);
4004 bfa_trc(rp->bfa, event);
4005
4006 switch (event) {
4007 case BFA_RPORT_SM_QRESUME:
4008 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4009 bfa_rport_send_fwcreate(rp);
4010 break;
4011
4012 case BFA_RPORT_SM_DELETE:
4013 bfa_stats(rp, sm_fwc_del);
4014 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4015 bfa_reqq_wcancel(&rp->reqq_wait);
4016 bfa_rport_free(rp);
4017 break;
4018
4019 case BFA_RPORT_SM_OFFLINE:
4020 bfa_stats(rp, sm_fwc_off);
4021 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4022 bfa_reqq_wcancel(&rp->reqq_wait);
4023 bfa_rport_offline_cb(rp);
4024 break;
4025
4026 case BFA_RPORT_SM_HWFAIL:
4027 bfa_stats(rp, sm_fwc_hwf);
4028 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4029 bfa_reqq_wcancel(&rp->reqq_wait);
4030 break;
4031
4032 default:
4033 bfa_stats(rp, sm_fwc_unexp);
4034 bfa_sm_fault(rp->bfa, event);
4035 }
4036}
4037
Jing Huang5fbe25c2010-10-18 17:17:23 -07004038/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004039 * Online state - normal parking state.
4040 */
4041static void
4042bfa_rport_sm_online(struct bfa_rport_s *rp, enum bfa_rport_event event)
4043{
4044 struct bfi_rport_qos_scn_s *qos_scn;
4045
4046 bfa_trc(rp->bfa, rp->rport_tag);
4047 bfa_trc(rp->bfa, event);
4048
4049 switch (event) {
4050 case BFA_RPORT_SM_OFFLINE:
4051 bfa_stats(rp, sm_on_off);
4052 if (bfa_rport_send_fwdelete(rp))
4053 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4054 else
4055 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4056 break;
4057
4058 case BFA_RPORT_SM_DELETE:
4059 bfa_stats(rp, sm_on_del);
4060 if (bfa_rport_send_fwdelete(rp))
4061 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4062 else
4063 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4064 break;
4065
4066 case BFA_RPORT_SM_HWFAIL:
4067 bfa_stats(rp, sm_on_hwf);
4068 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4069 break;
4070
4071 case BFA_RPORT_SM_SET_SPEED:
4072 bfa_rport_send_fwspeed(rp);
4073 break;
4074
4075 case BFA_RPORT_SM_QOS_SCN:
4076 qos_scn = (struct bfi_rport_qos_scn_s *) rp->event_arg.fw_msg;
4077 rp->qos_attr = qos_scn->new_qos_attr;
4078 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_flow_id);
4079 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_flow_id);
4080 bfa_trc(rp->bfa, qos_scn->old_qos_attr.qos_priority);
4081 bfa_trc(rp->bfa, qos_scn->new_qos_attr.qos_priority);
4082
4083 qos_scn->old_qos_attr.qos_flow_id =
Jing Huangba816ea2010-10-18 17:10:50 -07004084 be32_to_cpu(qos_scn->old_qos_attr.qos_flow_id);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004085 qos_scn->new_qos_attr.qos_flow_id =
Jing Huangba816ea2010-10-18 17:10:50 -07004086 be32_to_cpu(qos_scn->new_qos_attr.qos_flow_id);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004087
4088 if (qos_scn->old_qos_attr.qos_flow_id !=
4089 qos_scn->new_qos_attr.qos_flow_id)
4090 bfa_cb_rport_qos_scn_flowid(rp->rport_drv,
4091 qos_scn->old_qos_attr,
4092 qos_scn->new_qos_attr);
4093 if (qos_scn->old_qos_attr.qos_priority !=
4094 qos_scn->new_qos_attr.qos_priority)
4095 bfa_cb_rport_qos_scn_prio(rp->rport_drv,
4096 qos_scn->old_qos_attr,
4097 qos_scn->new_qos_attr);
4098 break;
4099
4100 default:
4101 bfa_stats(rp, sm_on_unexp);
4102 bfa_sm_fault(rp->bfa, event);
4103 }
4104}
4105
Jing Huang5fbe25c2010-10-18 17:17:23 -07004106/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004107 * Firmware rport is being deleted - awaiting f/w response.
4108 */
4109static void
4110bfa_rport_sm_fwdelete(struct bfa_rport_s *rp, enum bfa_rport_event event)
4111{
4112 bfa_trc(rp->bfa, rp->rport_tag);
4113 bfa_trc(rp->bfa, event);
4114
4115 switch (event) {
4116 case BFA_RPORT_SM_FWRSP:
4117 bfa_stats(rp, sm_fwd_rsp);
4118 bfa_sm_set_state(rp, bfa_rport_sm_offline);
4119 bfa_rport_offline_cb(rp);
4120 break;
4121
4122 case BFA_RPORT_SM_DELETE:
4123 bfa_stats(rp, sm_fwd_del);
4124 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4125 break;
4126
4127 case BFA_RPORT_SM_HWFAIL:
4128 bfa_stats(rp, sm_fwd_hwf);
4129 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4130 bfa_rport_offline_cb(rp);
4131 break;
4132
4133 default:
4134 bfa_stats(rp, sm_fwd_unexp);
4135 bfa_sm_fault(rp->bfa, event);
4136 }
4137}
4138
4139static void
4140bfa_rport_sm_fwdelete_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4141{
4142 bfa_trc(rp->bfa, rp->rport_tag);
4143 bfa_trc(rp->bfa, event);
4144
4145 switch (event) {
4146 case BFA_RPORT_SM_QRESUME:
4147 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4148 bfa_rport_send_fwdelete(rp);
4149 break;
4150
4151 case BFA_RPORT_SM_DELETE:
4152 bfa_stats(rp, sm_fwd_del);
4153 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4154 break;
4155
4156 case BFA_RPORT_SM_HWFAIL:
4157 bfa_stats(rp, sm_fwd_hwf);
4158 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4159 bfa_reqq_wcancel(&rp->reqq_wait);
4160 bfa_rport_offline_cb(rp);
4161 break;
4162
4163 default:
4164 bfa_stats(rp, sm_fwd_unexp);
4165 bfa_sm_fault(rp->bfa, event);
4166 }
4167}
4168
Jing Huang5fbe25c2010-10-18 17:17:23 -07004169/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004170 * Offline state.
4171 */
4172static void
4173bfa_rport_sm_offline(struct bfa_rport_s *rp, enum bfa_rport_event event)
4174{
4175 bfa_trc(rp->bfa, rp->rport_tag);
4176 bfa_trc(rp->bfa, event);
4177
4178 switch (event) {
4179 case BFA_RPORT_SM_DELETE:
4180 bfa_stats(rp, sm_off_del);
4181 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4182 bfa_rport_free(rp);
4183 break;
4184
4185 case BFA_RPORT_SM_ONLINE:
4186 bfa_stats(rp, sm_off_on);
4187 if (bfa_rport_send_fwcreate(rp))
4188 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4189 else
4190 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4191 break;
4192
4193 case BFA_RPORT_SM_HWFAIL:
4194 bfa_stats(rp, sm_off_hwf);
4195 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4196 break;
4197
4198 default:
4199 bfa_stats(rp, sm_off_unexp);
4200 bfa_sm_fault(rp->bfa, event);
4201 }
4202}
4203
Jing Huang5fbe25c2010-10-18 17:17:23 -07004204/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004205 * Rport is deleted, waiting for firmware response to delete.
4206 */
4207static void
4208bfa_rport_sm_deleting(struct bfa_rport_s *rp, enum bfa_rport_event event)
4209{
4210 bfa_trc(rp->bfa, rp->rport_tag);
4211 bfa_trc(rp->bfa, event);
4212
4213 switch (event) {
4214 case BFA_RPORT_SM_FWRSP:
4215 bfa_stats(rp, sm_del_fwrsp);
4216 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4217 bfa_rport_free(rp);
4218 break;
4219
4220 case BFA_RPORT_SM_HWFAIL:
4221 bfa_stats(rp, sm_del_hwf);
4222 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4223 bfa_rport_free(rp);
4224 break;
4225
4226 default:
4227 bfa_sm_fault(rp->bfa, event);
4228 }
4229}
4230
4231static void
4232bfa_rport_sm_deleting_qfull(struct bfa_rport_s *rp, enum bfa_rport_event event)
4233{
4234 bfa_trc(rp->bfa, rp->rport_tag);
4235 bfa_trc(rp->bfa, event);
4236
4237 switch (event) {
4238 case BFA_RPORT_SM_QRESUME:
4239 bfa_stats(rp, sm_del_fwrsp);
4240 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4241 bfa_rport_send_fwdelete(rp);
4242 break;
4243
4244 case BFA_RPORT_SM_HWFAIL:
4245 bfa_stats(rp, sm_del_hwf);
4246 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4247 bfa_reqq_wcancel(&rp->reqq_wait);
4248 bfa_rport_free(rp);
4249 break;
4250
4251 default:
4252 bfa_sm_fault(rp->bfa, event);
4253 }
4254}
4255
Jing Huang5fbe25c2010-10-18 17:17:23 -07004256/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004257 * Waiting for rport create response from firmware. A delete is pending.
4258 */
4259static void
4260bfa_rport_sm_delete_pending(struct bfa_rport_s *rp,
4261 enum bfa_rport_event event)
4262{
4263 bfa_trc(rp->bfa, rp->rport_tag);
4264 bfa_trc(rp->bfa, event);
4265
4266 switch (event) {
4267 case BFA_RPORT_SM_FWRSP:
4268 bfa_stats(rp, sm_delp_fwrsp);
4269 if (bfa_rport_send_fwdelete(rp))
4270 bfa_sm_set_state(rp, bfa_rport_sm_deleting);
4271 else
4272 bfa_sm_set_state(rp, bfa_rport_sm_deleting_qfull);
4273 break;
4274
4275 case BFA_RPORT_SM_HWFAIL:
4276 bfa_stats(rp, sm_delp_hwf);
4277 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4278 bfa_rport_free(rp);
4279 break;
4280
4281 default:
4282 bfa_stats(rp, sm_delp_unexp);
4283 bfa_sm_fault(rp->bfa, event);
4284 }
4285}
4286
Jing Huang5fbe25c2010-10-18 17:17:23 -07004287/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004288 * Waiting for rport create response from firmware. Rport offline is pending.
4289 */
4290static void
4291bfa_rport_sm_offline_pending(struct bfa_rport_s *rp,
4292 enum bfa_rport_event event)
4293{
4294 bfa_trc(rp->bfa, rp->rport_tag);
4295 bfa_trc(rp->bfa, event);
4296
4297 switch (event) {
4298 case BFA_RPORT_SM_FWRSP:
4299 bfa_stats(rp, sm_offp_fwrsp);
4300 if (bfa_rport_send_fwdelete(rp))
4301 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete);
4302 else
4303 bfa_sm_set_state(rp, bfa_rport_sm_fwdelete_qfull);
4304 break;
4305
4306 case BFA_RPORT_SM_DELETE:
4307 bfa_stats(rp, sm_offp_del);
4308 bfa_sm_set_state(rp, bfa_rport_sm_delete_pending);
4309 break;
4310
4311 case BFA_RPORT_SM_HWFAIL:
4312 bfa_stats(rp, sm_offp_hwf);
4313 bfa_sm_set_state(rp, bfa_rport_sm_iocdisable);
4314 break;
4315
4316 default:
4317 bfa_stats(rp, sm_offp_unexp);
4318 bfa_sm_fault(rp->bfa, event);
4319 }
4320}
4321
Jing Huang5fbe25c2010-10-18 17:17:23 -07004322/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004323 * IOC h/w failed.
4324 */
4325static void
4326bfa_rport_sm_iocdisable(struct bfa_rport_s *rp, enum bfa_rport_event event)
4327{
4328 bfa_trc(rp->bfa, rp->rport_tag);
4329 bfa_trc(rp->bfa, event);
4330
4331 switch (event) {
4332 case BFA_RPORT_SM_OFFLINE:
4333 bfa_stats(rp, sm_iocd_off);
4334 bfa_rport_offline_cb(rp);
4335 break;
4336
4337 case BFA_RPORT_SM_DELETE:
4338 bfa_stats(rp, sm_iocd_del);
4339 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4340 bfa_rport_free(rp);
4341 break;
4342
4343 case BFA_RPORT_SM_ONLINE:
4344 bfa_stats(rp, sm_iocd_on);
4345 if (bfa_rport_send_fwcreate(rp))
4346 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate);
4347 else
4348 bfa_sm_set_state(rp, bfa_rport_sm_fwcreate_qfull);
4349 break;
4350
4351 case BFA_RPORT_SM_HWFAIL:
4352 break;
4353
4354 default:
4355 bfa_stats(rp, sm_iocd_unexp);
4356 bfa_sm_fault(rp->bfa, event);
4357 }
4358}
4359
4360
4361
Jing Huang5fbe25c2010-10-18 17:17:23 -07004362/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004363 * bfa_rport_private BFA rport private functions
4364 */
4365
4366static void
4367__bfa_cb_rport_online(void *cbarg, bfa_boolean_t complete)
4368{
4369 struct bfa_rport_s *rp = cbarg;
4370
4371 if (complete)
4372 bfa_cb_rport_online(rp->rport_drv);
4373}
4374
4375static void
4376__bfa_cb_rport_offline(void *cbarg, bfa_boolean_t complete)
4377{
4378 struct bfa_rport_s *rp = cbarg;
4379
4380 if (complete)
4381 bfa_cb_rport_offline(rp->rport_drv);
4382}
4383
4384static void
4385bfa_rport_qresume(void *cbarg)
4386{
4387 struct bfa_rport_s *rp = cbarg;
4388
4389 bfa_sm_send_event(rp, BFA_RPORT_SM_QRESUME);
4390}
4391
4392static void
Krishna Gudipati45070252011-06-24 20:24:29 -07004393bfa_rport_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4394 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004395{
Krishna Gudipati45070252011-06-24 20:24:29 -07004396 struct bfa_mem_kva_s *rport_kva = BFA_MEM_RPORT_KVA(bfa);
4397
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004398 if (cfg->fwcfg.num_rports < BFA_RPORT_MIN)
4399 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
4400
Krishna Gudipati45070252011-06-24 20:24:29 -07004401 /* kva memory */
4402 bfa_mem_kva_setup(minfo, rport_kva,
4403 cfg->fwcfg.num_rports * sizeof(struct bfa_rport_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004404}
4405
4406static void
4407bfa_rport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07004408 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004409{
4410 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4411 struct bfa_rport_s *rp;
4412 u16 i;
4413
4414 INIT_LIST_HEAD(&mod->rp_free_q);
4415 INIT_LIST_HEAD(&mod->rp_active_q);
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004416 INIT_LIST_HEAD(&mod->rp_unused_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004417
Krishna Gudipati45070252011-06-24 20:24:29 -07004418 rp = (struct bfa_rport_s *) bfa_mem_kva_curp(mod);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004419 mod->rps_list = rp;
4420 mod->num_rports = cfg->fwcfg.num_rports;
4421
Jing Huangd4b671c2010-12-26 21:46:35 -08004422 WARN_ON(!mod->num_rports ||
4423 (mod->num_rports & (mod->num_rports - 1)));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004424
4425 for (i = 0; i < mod->num_rports; i++, rp++) {
Jing Huang6a18b162010-10-18 17:08:54 -07004426 memset(rp, 0, sizeof(struct bfa_rport_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004427 rp->bfa = bfa;
4428 rp->rport_tag = i;
4429 bfa_sm_set_state(rp, bfa_rport_sm_uninit);
4430
Jing Huang5fbe25c2010-10-18 17:17:23 -07004431 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004432 * - is unused
4433 */
4434 if (i)
4435 list_add_tail(&rp->qe, &mod->rp_free_q);
4436
4437 bfa_reqq_winit(&rp->reqq_wait, bfa_rport_qresume, rp);
4438 }
4439
Jing Huang5fbe25c2010-10-18 17:17:23 -07004440 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004441 * consume memory
4442 */
Krishna Gudipati45070252011-06-24 20:24:29 -07004443 bfa_mem_kva_curp(mod) = (u8 *) rp;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004444}
4445
4446static void
4447bfa_rport_detach(struct bfa_s *bfa)
4448{
4449}
4450
4451static void
4452bfa_rport_start(struct bfa_s *bfa)
4453{
4454}
4455
4456static void
4457bfa_rport_stop(struct bfa_s *bfa)
4458{
4459}
4460
4461static void
4462bfa_rport_iocdisable(struct bfa_s *bfa)
4463{
4464 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4465 struct bfa_rport_s *rport;
4466 struct list_head *qe, *qen;
4467
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004468 /* Enqueue unused rport resources to free_q */
4469 list_splice_tail_init(&mod->rp_unused_q, &mod->rp_free_q);
4470
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004471 list_for_each_safe(qe, qen, &mod->rp_active_q) {
4472 rport = (struct bfa_rport_s *) qe;
4473 bfa_sm_send_event(rport, BFA_RPORT_SM_HWFAIL);
4474 }
4475}
4476
4477static struct bfa_rport_s *
4478bfa_rport_alloc(struct bfa_rport_mod_s *mod)
4479{
4480 struct bfa_rport_s *rport;
4481
4482 bfa_q_deq(&mod->rp_free_q, &rport);
4483 if (rport)
4484 list_add_tail(&rport->qe, &mod->rp_active_q);
4485
4486 return rport;
4487}
4488
4489static void
4490bfa_rport_free(struct bfa_rport_s *rport)
4491{
4492 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(rport->bfa);
4493
Jing Huangd4b671c2010-12-26 21:46:35 -08004494 WARN_ON(!bfa_q_is_on_q(&mod->rp_active_q, rport));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004495 list_del(&rport->qe);
4496 list_add_tail(&rport->qe, &mod->rp_free_q);
4497}
4498
4499static bfa_boolean_t
4500bfa_rport_send_fwcreate(struct bfa_rport_s *rp)
4501{
4502 struct bfi_rport_create_req_s *m;
4503
Jing Huang5fbe25c2010-10-18 17:17:23 -07004504 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004505 * check for room in queue to send request now
4506 */
4507 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4508 if (!m) {
4509 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4510 return BFA_FALSE;
4511 }
4512
4513 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_CREATE_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004514 bfa_fn_lpu(rp->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004515 m->bfa_handle = rp->rport_tag;
Jing Huangba816ea2010-10-18 17:10:50 -07004516 m->max_frmsz = cpu_to_be16(rp->rport_info.max_frmsz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004517 m->pid = rp->rport_info.pid;
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004518 m->lp_fwtag = bfa_lps_get_fwtag(rp->bfa, (u8)rp->rport_info.lp_tag);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004519 m->local_pid = rp->rport_info.local_pid;
4520 m->fc_class = rp->rport_info.fc_class;
4521 m->vf_en = rp->rport_info.vf_en;
4522 m->vf_id = rp->rport_info.vf_id;
4523 m->cisc = rp->rport_info.cisc;
4524
Jing Huang5fbe25c2010-10-18 17:17:23 -07004525 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004526 * queue I/O message to firmware
4527 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004528 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004529 return BFA_TRUE;
4530}
4531
4532static bfa_boolean_t
4533bfa_rport_send_fwdelete(struct bfa_rport_s *rp)
4534{
4535 struct bfi_rport_delete_req_s *m;
4536
Jing Huang5fbe25c2010-10-18 17:17:23 -07004537 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004538 * check for room in queue to send request now
4539 */
4540 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4541 if (!m) {
4542 bfa_reqq_wait(rp->bfa, BFA_REQQ_RPORT, &rp->reqq_wait);
4543 return BFA_FALSE;
4544 }
4545
4546 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_DELETE_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004547 bfa_fn_lpu(rp->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004548 m->fw_handle = rp->fw_handle;
4549
Jing Huang5fbe25c2010-10-18 17:17:23 -07004550 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004551 * queue I/O message to firmware
4552 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004553 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004554 return BFA_TRUE;
4555}
4556
4557static bfa_boolean_t
4558bfa_rport_send_fwspeed(struct bfa_rport_s *rp)
4559{
4560 struct bfa_rport_speed_req_s *m;
4561
Jing Huang5fbe25c2010-10-18 17:17:23 -07004562 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004563 * check for room in queue to send request now
4564 */
4565 m = bfa_reqq_next(rp->bfa, BFA_REQQ_RPORT);
4566 if (!m) {
4567 bfa_trc(rp->bfa, rp->rport_info.speed);
4568 return BFA_FALSE;
4569 }
4570
4571 bfi_h2i_set(m->mh, BFI_MC_RPORT, BFI_RPORT_H2I_SET_SPEED_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004572 bfa_fn_lpu(rp->bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004573 m->fw_handle = rp->fw_handle;
4574 m->speed = (u8)rp->rport_info.speed;
4575
Jing Huang5fbe25c2010-10-18 17:17:23 -07004576 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004577 * queue I/O message to firmware
4578 */
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004579 bfa_reqq_produce(rp->bfa, BFA_REQQ_RPORT, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004580 return BFA_TRUE;
4581}
4582
4583
4584
Jing Huang5fbe25c2010-10-18 17:17:23 -07004585/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004586 * bfa_rport_public
4587 */
4588
Jing Huang5fbe25c2010-10-18 17:17:23 -07004589/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004590 * Rport interrupt processing.
4591 */
4592void
4593bfa_rport_isr(struct bfa_s *bfa, struct bfi_msg_s *m)
4594{
4595 union bfi_rport_i2h_msg_u msg;
4596 struct bfa_rport_s *rp;
4597
4598 bfa_trc(bfa, m->mhdr.msg_id);
4599
4600 msg.msg = m;
4601
4602 switch (m->mhdr.msg_id) {
4603 case BFI_RPORT_I2H_CREATE_RSP:
4604 rp = BFA_RPORT_FROM_TAG(bfa, msg.create_rsp->bfa_handle);
4605 rp->fw_handle = msg.create_rsp->fw_handle;
4606 rp->qos_attr = msg.create_rsp->qos_attr;
Jing Huangd4b671c2010-12-26 21:46:35 -08004607 WARN_ON(msg.create_rsp->status != BFA_STATUS_OK);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004608 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4609 break;
4610
4611 case BFI_RPORT_I2H_DELETE_RSP:
4612 rp = BFA_RPORT_FROM_TAG(bfa, msg.delete_rsp->bfa_handle);
Jing Huangd4b671c2010-12-26 21:46:35 -08004613 WARN_ON(msg.delete_rsp->status != BFA_STATUS_OK);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004614 bfa_sm_send_event(rp, BFA_RPORT_SM_FWRSP);
4615 break;
4616
4617 case BFI_RPORT_I2H_QOS_SCN:
4618 rp = BFA_RPORT_FROM_TAG(bfa, msg.qos_scn_evt->bfa_handle);
4619 rp->event_arg.fw_msg = msg.qos_scn_evt;
4620 bfa_sm_send_event(rp, BFA_RPORT_SM_QOS_SCN);
4621 break;
4622
4623 default:
4624 bfa_trc(bfa, m->mhdr.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08004625 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004626 }
4627}
4628
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004629void
4630bfa_rport_res_recfg(struct bfa_s *bfa, u16 num_rport_fw)
4631{
4632 struct bfa_rport_mod_s *mod = BFA_RPORT_MOD(bfa);
4633 struct list_head *qe;
4634 int i;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004635
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004636 for (i = 0; i < (mod->num_rports - num_rport_fw); i++) {
4637 bfa_q_deq_tail(&mod->rp_free_q, &qe);
4638 list_add_tail(qe, &mod->rp_unused_q);
4639 }
4640}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004641
Jing Huang5fbe25c2010-10-18 17:17:23 -07004642/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004643 * bfa_rport_api
4644 */
4645
4646struct bfa_rport_s *
4647bfa_rport_create(struct bfa_s *bfa, void *rport_drv)
4648{
4649 struct bfa_rport_s *rp;
4650
4651 rp = bfa_rport_alloc(BFA_RPORT_MOD(bfa));
4652
4653 if (rp == NULL)
4654 return NULL;
4655
4656 rp->bfa = bfa;
4657 rp->rport_drv = rport_drv;
Maggie Zhangf7f73812010-12-09 19:08:43 -08004658 memset(&rp->stats, 0, sizeof(rp->stats));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004659
Jing Huangd4b671c2010-12-26 21:46:35 -08004660 WARN_ON(!bfa_sm_cmp_state(rp, bfa_rport_sm_uninit));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004661 bfa_sm_send_event(rp, BFA_RPORT_SM_CREATE);
4662
4663 return rp;
4664}
4665
4666void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004667bfa_rport_online(struct bfa_rport_s *rport, struct bfa_rport_info_s *rport_info)
4668{
Jing Huangd4b671c2010-12-26 21:46:35 -08004669 WARN_ON(rport_info->max_frmsz == 0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004670
Jing Huang5fbe25c2010-10-18 17:17:23 -07004671 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004672 * Some JBODs are seen to be not setting PDU size correctly in PLOGI
4673 * responses. Default to minimum size.
4674 */
4675 if (rport_info->max_frmsz == 0) {
4676 bfa_trc(rport->bfa, rport->rport_tag);
4677 rport_info->max_frmsz = FC_MIN_PDUSZ;
4678 }
4679
Jing Huang6a18b162010-10-18 17:08:54 -07004680 rport->rport_info = *rport_info;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004681 bfa_sm_send_event(rport, BFA_RPORT_SM_ONLINE);
4682}
4683
4684void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004685bfa_rport_speed(struct bfa_rport_s *rport, enum bfa_port_speed speed)
4686{
Jing Huangd4b671c2010-12-26 21:46:35 -08004687 WARN_ON(speed == 0);
4688 WARN_ON(speed == BFA_PORT_SPEED_AUTO);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004689
4690 rport->rport_info.speed = speed;
4691 bfa_sm_send_event(rport, BFA_RPORT_SM_SET_SPEED);
4692}
4693
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004694
Jing Huang5fbe25c2010-10-18 17:17:23 -07004695/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004696 * SGPG related functions
4697 */
4698
Jing Huang5fbe25c2010-10-18 17:17:23 -07004699/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004700 * Compute and return memory needed by FCP(im) module.
4701 */
4702static void
Krishna Gudipati45070252011-06-24 20:24:29 -07004703bfa_sgpg_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
4704 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004705{
Krishna Gudipati45070252011-06-24 20:24:29 -07004706 struct bfa_sgpg_mod_s *sgpg_mod = BFA_SGPG_MOD(bfa);
4707 struct bfa_mem_kva_s *sgpg_kva = BFA_MEM_SGPG_KVA(bfa);
4708 struct bfa_mem_dma_s *seg_ptr;
4709 u16 nsegs, idx, per_seg_sgpg, num_sgpg;
4710 u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
4711
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004712 if (cfg->drvcfg.num_sgpgs < BFA_SGPG_MIN)
4713 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
Krishna Gudipati45070252011-06-24 20:24:29 -07004714 else if (cfg->drvcfg.num_sgpgs > BFA_SGPG_MAX)
4715 cfg->drvcfg.num_sgpgs = BFA_SGPG_MAX;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004716
Krishna Gudipati45070252011-06-24 20:24:29 -07004717 num_sgpg = cfg->drvcfg.num_sgpgs;
4718
4719 nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
4720 per_seg_sgpg = BFI_MEM_NREQS_SEG(sgpg_sz);
4721
4722 bfa_mem_dma_seg_iter(sgpg_mod, seg_ptr, nsegs, idx) {
4723 if (num_sgpg >= per_seg_sgpg) {
4724 num_sgpg -= per_seg_sgpg;
4725 bfa_mem_dma_setup(minfo, seg_ptr,
4726 per_seg_sgpg * sgpg_sz);
4727 } else
4728 bfa_mem_dma_setup(minfo, seg_ptr,
4729 num_sgpg * sgpg_sz);
4730 }
4731
4732 /* kva memory */
4733 bfa_mem_kva_setup(minfo, sgpg_kva,
4734 cfg->drvcfg.num_sgpgs * sizeof(struct bfa_sgpg_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004735}
4736
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004737static void
4738bfa_sgpg_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07004739 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004740{
4741 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004742 struct bfa_sgpg_s *hsgpg;
4743 struct bfi_sgpg_s *sgpg;
4744 u64 align_len;
Krishna Gudipati45070252011-06-24 20:24:29 -07004745 struct bfa_mem_dma_s *seg_ptr;
4746 u32 sgpg_sz = sizeof(struct bfi_sgpg_s);
4747 u16 i, idx, nsegs, per_seg_sgpg, num_sgpg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004748
4749 union {
4750 u64 pa;
4751 union bfi_addr_u addr;
4752 } sgpg_pa, sgpg_pa_tmp;
4753
4754 INIT_LIST_HEAD(&mod->sgpg_q);
4755 INIT_LIST_HEAD(&mod->sgpg_wait_q);
4756
4757 bfa_trc(bfa, cfg->drvcfg.num_sgpgs);
4758
Krishna Gudipati45070252011-06-24 20:24:29 -07004759 mod->free_sgpgs = mod->num_sgpgs = cfg->drvcfg.num_sgpgs;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004760
Krishna Gudipati45070252011-06-24 20:24:29 -07004761 num_sgpg = cfg->drvcfg.num_sgpgs;
4762 nsegs = BFI_MEM_DMA_NSEGS(num_sgpg, sgpg_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004763
Krishna Gudipati45070252011-06-24 20:24:29 -07004764 /* dma/kva mem claim */
4765 hsgpg = (struct bfa_sgpg_s *) bfa_mem_kva_curp(mod);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004766
Krishna Gudipati45070252011-06-24 20:24:29 -07004767 bfa_mem_dma_seg_iter(mod, seg_ptr, nsegs, idx) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004768
Krishna Gudipati45070252011-06-24 20:24:29 -07004769 if (!bfa_mem_dma_virt(seg_ptr))
4770 break;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004771
Krishna Gudipati45070252011-06-24 20:24:29 -07004772 align_len = BFA_SGPG_ROUNDUP(bfa_mem_dma_phys(seg_ptr)) -
4773 bfa_mem_dma_phys(seg_ptr);
4774
4775 sgpg = (struct bfi_sgpg_s *)
4776 (((u8 *) bfa_mem_dma_virt(seg_ptr)) + align_len);
4777 sgpg_pa.pa = bfa_mem_dma_phys(seg_ptr) + align_len;
4778 WARN_ON(sgpg_pa.pa & (sgpg_sz - 1));
4779
4780 per_seg_sgpg = (seg_ptr->mem_len - (u32)align_len) / sgpg_sz;
4781
4782 for (i = 0; num_sgpg > 0 && i < per_seg_sgpg; i++, num_sgpg--) {
4783 memset(hsgpg, 0, sizeof(*hsgpg));
4784 memset(sgpg, 0, sizeof(*sgpg));
4785
4786 hsgpg->sgpg = sgpg;
4787 sgpg_pa_tmp.pa = bfa_sgaddr_le(sgpg_pa.pa);
4788 hsgpg->sgpg_pa = sgpg_pa_tmp.addr;
4789 list_add_tail(&hsgpg->qe, &mod->sgpg_q);
4790
4791 sgpg++;
4792 hsgpg++;
4793 sgpg_pa.pa += sgpg_sz;
4794 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004795 }
4796
Krishna Gudipati45070252011-06-24 20:24:29 -07004797 bfa_mem_kva_curp(mod) = (u8 *) hsgpg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004798}
4799
4800static void
4801bfa_sgpg_detach(struct bfa_s *bfa)
4802{
4803}
4804
4805static void
4806bfa_sgpg_start(struct bfa_s *bfa)
4807{
4808}
4809
4810static void
4811bfa_sgpg_stop(struct bfa_s *bfa)
4812{
4813}
4814
4815static void
4816bfa_sgpg_iocdisable(struct bfa_s *bfa)
4817{
4818}
4819
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004820bfa_status_t
4821bfa_sgpg_malloc(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpgs)
4822{
4823 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4824 struct bfa_sgpg_s *hsgpg;
4825 int i;
4826
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004827 if (mod->free_sgpgs < nsgpgs)
4828 return BFA_STATUS_ENOMEM;
4829
4830 for (i = 0; i < nsgpgs; i++) {
4831 bfa_q_deq(&mod->sgpg_q, &hsgpg);
Jing Huangd4b671c2010-12-26 21:46:35 -08004832 WARN_ON(!hsgpg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004833 list_add_tail(&hsgpg->qe, sgpg_q);
4834 }
4835
4836 mod->free_sgpgs -= nsgpgs;
4837 return BFA_STATUS_OK;
4838}
4839
4840void
4841bfa_sgpg_mfree(struct bfa_s *bfa, struct list_head *sgpg_q, int nsgpg)
4842{
4843 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4844 struct bfa_sgpg_wqe_s *wqe;
4845
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004846 mod->free_sgpgs += nsgpg;
Jing Huangd4b671c2010-12-26 21:46:35 -08004847 WARN_ON(mod->free_sgpgs > mod->num_sgpgs);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004848
4849 list_splice_tail_init(sgpg_q, &mod->sgpg_q);
4850
4851 if (list_empty(&mod->sgpg_wait_q))
4852 return;
4853
Jing Huang5fbe25c2010-10-18 17:17:23 -07004854 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004855 * satisfy as many waiting requests as possible
4856 */
4857 do {
4858 wqe = bfa_q_first(&mod->sgpg_wait_q);
4859 if (mod->free_sgpgs < wqe->nsgpg)
4860 nsgpg = mod->free_sgpgs;
4861 else
4862 nsgpg = wqe->nsgpg;
4863 bfa_sgpg_malloc(bfa, &wqe->sgpg_q, nsgpg);
4864 wqe->nsgpg -= nsgpg;
4865 if (wqe->nsgpg == 0) {
4866 list_del(&wqe->qe);
4867 wqe->cbfn(wqe->cbarg);
4868 }
4869 } while (mod->free_sgpgs && !list_empty(&mod->sgpg_wait_q));
4870}
4871
4872void
4873bfa_sgpg_wait(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe, int nsgpg)
4874{
4875 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4876
Jing Huangd4b671c2010-12-26 21:46:35 -08004877 WARN_ON(nsgpg <= 0);
4878 WARN_ON(nsgpg <= mod->free_sgpgs);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004879
4880 wqe->nsgpg_total = wqe->nsgpg = nsgpg;
4881
Jing Huang5fbe25c2010-10-18 17:17:23 -07004882 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004883 * allocate any left to this one first
4884 */
4885 if (mod->free_sgpgs) {
Jing Huang5fbe25c2010-10-18 17:17:23 -07004886 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004887 * no one else is waiting for SGPG
4888 */
Jing Huangd4b671c2010-12-26 21:46:35 -08004889 WARN_ON(!list_empty(&mod->sgpg_wait_q));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004890 list_splice_tail_init(&mod->sgpg_q, &wqe->sgpg_q);
4891 wqe->nsgpg -= mod->free_sgpgs;
4892 mod->free_sgpgs = 0;
4893 }
4894
4895 list_add_tail(&wqe->qe, &mod->sgpg_wait_q);
4896}
4897
4898void
4899bfa_sgpg_wcancel(struct bfa_s *bfa, struct bfa_sgpg_wqe_s *wqe)
4900{
4901 struct bfa_sgpg_mod_s *mod = BFA_SGPG_MOD(bfa);
4902
Jing Huangd4b671c2010-12-26 21:46:35 -08004903 WARN_ON(!bfa_q_is_on_q(&mod->sgpg_wait_q, wqe));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004904 list_del(&wqe->qe);
4905
4906 if (wqe->nsgpg_total != wqe->nsgpg)
4907 bfa_sgpg_mfree(bfa, &wqe->sgpg_q,
4908 wqe->nsgpg_total - wqe->nsgpg);
4909}
4910
4911void
4912bfa_sgpg_winit(struct bfa_sgpg_wqe_s *wqe, void (*cbfn) (void *cbarg),
4913 void *cbarg)
4914{
4915 INIT_LIST_HEAD(&wqe->sgpg_q);
4916 wqe->cbfn = cbfn;
4917 wqe->cbarg = cbarg;
4918}
4919
Jing Huang5fbe25c2010-10-18 17:17:23 -07004920/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004921 * UF related functions
4922 */
4923/*
4924 *****************************************************************************
4925 * Internal functions
4926 *****************************************************************************
4927 */
4928static void
4929__bfa_cb_uf_recv(void *cbarg, bfa_boolean_t complete)
4930{
4931 struct bfa_uf_s *uf = cbarg;
4932 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(uf->bfa);
4933
4934 if (complete)
4935 ufm->ufrecv(ufm->cbarg, uf);
4936}
4937
4938static void
Krishna Gudipati45070252011-06-24 20:24:29 -07004939claim_uf_post_msgs(struct bfa_uf_mod_s *ufm)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004940{
4941 struct bfi_uf_buf_post_s *uf_bp_msg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004942 u16 i;
4943 u16 buf_len;
4944
Krishna Gudipati45070252011-06-24 20:24:29 -07004945 ufm->uf_buf_posts = (struct bfi_uf_buf_post_s *) bfa_mem_kva_curp(ufm);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004946 uf_bp_msg = ufm->uf_buf_posts;
4947
4948 for (i = 0, uf_bp_msg = ufm->uf_buf_posts; i < ufm->num_ufs;
4949 i++, uf_bp_msg++) {
Jing Huang6a18b162010-10-18 17:08:54 -07004950 memset(uf_bp_msg, 0, sizeof(struct bfi_uf_buf_post_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004951
4952 uf_bp_msg->buf_tag = i;
4953 buf_len = sizeof(struct bfa_uf_buf_s);
Jing Huangba816ea2010-10-18 17:10:50 -07004954 uf_bp_msg->buf_len = cpu_to_be16(buf_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004955 bfi_h2i_set(uf_bp_msg->mh, BFI_MC_UF, BFI_UF_H2I_BUF_POST,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07004956 bfa_fn_lpu(ufm->bfa));
Krishna Gudipati85ce9282011-06-13 15:39:36 -07004957 bfa_alen_set(&uf_bp_msg->alen, buf_len, ufm_pbs_pa(ufm, i));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004958 }
4959
Jing Huang5fbe25c2010-10-18 17:17:23 -07004960 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004961 * advance pointer beyond consumed memory
4962 */
Krishna Gudipati45070252011-06-24 20:24:29 -07004963 bfa_mem_kva_curp(ufm) = (u8 *) uf_bp_msg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004964}
4965
4966static void
Krishna Gudipati45070252011-06-24 20:24:29 -07004967claim_ufs(struct bfa_uf_mod_s *ufm)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004968{
4969 u16 i;
4970 struct bfa_uf_s *uf;
4971
4972 /*
4973 * Claim block of memory for UF list
4974 */
Krishna Gudipati45070252011-06-24 20:24:29 -07004975 ufm->uf_list = (struct bfa_uf_s *) bfa_mem_kva_curp(ufm);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004976
4977 /*
4978 * Initialize UFs and queue it in UF free queue
4979 */
4980 for (i = 0, uf = ufm->uf_list; i < ufm->num_ufs; i++, uf++) {
Jing Huang6a18b162010-10-18 17:08:54 -07004981 memset(uf, 0, sizeof(struct bfa_uf_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004982 uf->bfa = ufm->bfa;
4983 uf->uf_tag = i;
Krishna Gudipati45070252011-06-24 20:24:29 -07004984 uf->pb_len = BFA_PER_UF_DMA_SZ;
4985 uf->buf_kva = bfa_mem_get_dmabuf_kva(ufm, i, BFA_PER_UF_DMA_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004986 uf->buf_pa = ufm_pbs_pa(ufm, i);
4987 list_add_tail(&uf->qe, &ufm->uf_free_q);
4988 }
4989
Jing Huang5fbe25c2010-10-18 17:17:23 -07004990 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004991 * advance memory pointer
4992 */
Krishna Gudipati45070252011-06-24 20:24:29 -07004993 bfa_mem_kva_curp(ufm) = (u8 *) uf;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004994}
4995
4996static void
Krishna Gudipati45070252011-06-24 20:24:29 -07004997uf_mem_claim(struct bfa_uf_mod_s *ufm)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07004998{
Krishna Gudipati45070252011-06-24 20:24:29 -07004999 claim_ufs(ufm);
5000 claim_uf_post_msgs(ufm);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005001}
5002
5003static void
Krishna Gudipati45070252011-06-24 20:24:29 -07005004bfa_uf_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *minfo,
5005 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005006{
Krishna Gudipati45070252011-06-24 20:24:29 -07005007 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5008 struct bfa_mem_kva_s *uf_kva = BFA_MEM_UF_KVA(bfa);
5009 u32 num_ufs = cfg->fwcfg.num_uf_bufs;
5010 struct bfa_mem_dma_s *seg_ptr;
5011 u16 nsegs, idx, per_seg_uf = 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005012
Krishna Gudipati45070252011-06-24 20:24:29 -07005013 nsegs = BFI_MEM_DMA_NSEGS(num_ufs, BFA_PER_UF_DMA_SZ);
5014 per_seg_uf = BFI_MEM_NREQS_SEG(BFA_PER_UF_DMA_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005015
Krishna Gudipati45070252011-06-24 20:24:29 -07005016 bfa_mem_dma_seg_iter(ufm, seg_ptr, nsegs, idx) {
5017 if (num_ufs >= per_seg_uf) {
5018 num_ufs -= per_seg_uf;
5019 bfa_mem_dma_setup(minfo, seg_ptr,
5020 per_seg_uf * BFA_PER_UF_DMA_SZ);
5021 } else
5022 bfa_mem_dma_setup(minfo, seg_ptr,
5023 num_ufs * BFA_PER_UF_DMA_SZ);
5024 }
5025
5026 /* kva memory */
5027 bfa_mem_kva_setup(minfo, uf_kva, cfg->fwcfg.num_uf_bufs *
5028 (sizeof(struct bfa_uf_s) + sizeof(struct bfi_uf_buf_post_s)));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005029}
5030
5031static void
5032bfa_uf_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07005033 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005034{
5035 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5036
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005037 ufm->bfa = bfa;
5038 ufm->num_ufs = cfg->fwcfg.num_uf_bufs;
5039 INIT_LIST_HEAD(&ufm->uf_free_q);
5040 INIT_LIST_HEAD(&ufm->uf_posted_q);
Krishna Gudipati3fd45982011-06-24 20:24:08 -07005041 INIT_LIST_HEAD(&ufm->uf_unused_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005042
Krishna Gudipati45070252011-06-24 20:24:29 -07005043 uf_mem_claim(ufm);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005044}
5045
5046static void
5047bfa_uf_detach(struct bfa_s *bfa)
5048{
5049}
5050
5051static struct bfa_uf_s *
5052bfa_uf_get(struct bfa_uf_mod_s *uf_mod)
5053{
5054 struct bfa_uf_s *uf;
5055
5056 bfa_q_deq(&uf_mod->uf_free_q, &uf);
5057 return uf;
5058}
5059
5060static void
5061bfa_uf_put(struct bfa_uf_mod_s *uf_mod, struct bfa_uf_s *uf)
5062{
5063 list_add_tail(&uf->qe, &uf_mod->uf_free_q);
5064}
5065
5066static bfa_status_t
5067bfa_uf_post(struct bfa_uf_mod_s *ufm, struct bfa_uf_s *uf)
5068{
5069 struct bfi_uf_buf_post_s *uf_post_msg;
5070
5071 uf_post_msg = bfa_reqq_next(ufm->bfa, BFA_REQQ_FCXP);
5072 if (!uf_post_msg)
5073 return BFA_STATUS_FAILED;
5074
Jing Huang6a18b162010-10-18 17:08:54 -07005075 memcpy(uf_post_msg, &ufm->uf_buf_posts[uf->uf_tag],
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005076 sizeof(struct bfi_uf_buf_post_s));
Krishna Gudipati3fd45982011-06-24 20:24:08 -07005077 bfa_reqq_produce(ufm->bfa, BFA_REQQ_FCXP, uf_post_msg->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005078
5079 bfa_trc(ufm->bfa, uf->uf_tag);
5080
5081 list_add_tail(&uf->qe, &ufm->uf_posted_q);
5082 return BFA_STATUS_OK;
5083}
5084
5085static void
5086bfa_uf_post_all(struct bfa_uf_mod_s *uf_mod)
5087{
5088 struct bfa_uf_s *uf;
5089
5090 while ((uf = bfa_uf_get(uf_mod)) != NULL) {
5091 if (bfa_uf_post(uf_mod, uf) != BFA_STATUS_OK)
5092 break;
5093 }
5094}
5095
5096static void
5097uf_recv(struct bfa_s *bfa, struct bfi_uf_frm_rcvd_s *m)
5098{
5099 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5100 u16 uf_tag = m->buf_tag;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005101 struct bfa_uf_s *uf = &ufm->uf_list[uf_tag];
Krishna Gudipati45070252011-06-24 20:24:29 -07005102 struct bfa_uf_buf_s *uf_buf;
5103 uint8_t *buf;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005104 struct fchs_s *fchs;
5105
Krishna Gudipati45070252011-06-24 20:24:29 -07005106 uf_buf = (struct bfa_uf_buf_s *)
5107 bfa_mem_get_dmabuf_kva(ufm, uf_tag, uf->pb_len);
5108 buf = &uf_buf->d[0];
5109
Jing Huangba816ea2010-10-18 17:10:50 -07005110 m->frm_len = be16_to_cpu(m->frm_len);
5111 m->xfr_len = be16_to_cpu(m->xfr_len);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005112
5113 fchs = (struct fchs_s *)uf_buf;
5114
5115 list_del(&uf->qe); /* dequeue from posted queue */
5116
5117 uf->data_ptr = buf;
5118 uf->data_len = m->xfr_len;
5119
Jing Huangd4b671c2010-12-26 21:46:35 -08005120 WARN_ON(uf->data_len < sizeof(struct fchs_s));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005121
5122 if (uf->data_len == sizeof(struct fchs_s)) {
5123 bfa_plog_fchdr(bfa->plog, BFA_PL_MID_HAL_UF, BFA_PL_EID_RX,
5124 uf->data_len, (struct fchs_s *)buf);
5125 } else {
5126 u32 pld_w0 = *((u32 *) (buf + sizeof(struct fchs_s)));
5127 bfa_plog_fchdr_and_pl(bfa->plog, BFA_PL_MID_HAL_UF,
5128 BFA_PL_EID_RX, uf->data_len,
5129 (struct fchs_s *)buf, pld_w0);
5130 }
5131
5132 if (bfa->fcs)
5133 __bfa_cb_uf_recv(uf, BFA_TRUE);
5134 else
5135 bfa_cb_queue(bfa, &uf->hcb_qe, __bfa_cb_uf_recv, uf);
5136}
5137
5138static void
5139bfa_uf_stop(struct bfa_s *bfa)
5140{
5141}
5142
5143static void
5144bfa_uf_iocdisable(struct bfa_s *bfa)
5145{
5146 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5147 struct bfa_uf_s *uf;
5148 struct list_head *qe, *qen;
5149
Krishna Gudipati3fd45982011-06-24 20:24:08 -07005150 /* Enqueue unused uf resources to free_q */
5151 list_splice_tail_init(&ufm->uf_unused_q, &ufm->uf_free_q);
5152
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005153 list_for_each_safe(qe, qen, &ufm->uf_posted_q) {
5154 uf = (struct bfa_uf_s *) qe;
5155 list_del(&uf->qe);
5156 bfa_uf_put(ufm, uf);
5157 }
5158}
5159
5160static void
5161bfa_uf_start(struct bfa_s *bfa)
5162{
5163 bfa_uf_post_all(BFA_UF_MOD(bfa));
5164}
5165
Jing Huang5fbe25c2010-10-18 17:17:23 -07005166/*
Lucas De Marchi25985ed2011-03-30 22:57:33 -03005167 * Register handler for all unsolicted receive frames.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005168 *
5169 * @param[in] bfa BFA instance
5170 * @param[in] ufrecv receive handler function
5171 * @param[in] cbarg receive handler arg
5172 */
5173void
5174bfa_uf_recv_register(struct bfa_s *bfa, bfa_cb_uf_recv_t ufrecv, void *cbarg)
5175{
5176 struct bfa_uf_mod_s *ufm = BFA_UF_MOD(bfa);
5177
5178 ufm->ufrecv = ufrecv;
5179 ufm->cbarg = cbarg;
5180}
5181
Jing Huang5fbe25c2010-10-18 17:17:23 -07005182/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005183 * Free an unsolicited frame back to BFA.
5184 *
5185 * @param[in] uf unsolicited frame to be freed
5186 *
5187 * @return None
5188 */
5189void
5190bfa_uf_free(struct bfa_uf_s *uf)
5191{
5192 bfa_uf_put(BFA_UF_MOD(uf->bfa), uf);
5193 bfa_uf_post_all(BFA_UF_MOD(uf->bfa));
5194}
5195
5196
5197
Jing Huang5fbe25c2010-10-18 17:17:23 -07005198/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005199 * uf_pub BFA uf module public functions
5200 */
5201void
5202bfa_uf_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
5203{
5204 bfa_trc(bfa, msg->mhdr.msg_id);
5205
5206 switch (msg->mhdr.msg_id) {
5207 case BFI_UF_I2H_FRM_RCVD:
5208 uf_recv(bfa, (struct bfi_uf_frm_rcvd_s *) msg);
5209 break;
5210
5211 default:
5212 bfa_trc(bfa, msg->mhdr.msg_id);
Jing Huangd4b671c2010-12-26 21:46:35 -08005213 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005214 }
5215}
5216
Krishna Gudipati3fd45982011-06-24 20:24:08 -07005217void
5218bfa_uf_res_recfg(struct bfa_s *bfa, u16 num_uf_fw)
5219{
5220 struct bfa_uf_mod_s *mod = BFA_UF_MOD(bfa);
5221 struct list_head *qe;
5222 int i;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07005223
Krishna Gudipati3fd45982011-06-24 20:24:08 -07005224 for (i = 0; i < (mod->num_ufs - num_uf_fw); i++) {
5225 bfa_q_deq_tail(&mod->uf_free_q, &qe);
5226 list_add_tail(qe, &mod->uf_unused_q);
5227 }
5228}