blob: 68a275d66fcfbba67952e10064793e49e735846e [file] [log] [blame]
Rasesh Mody45979c12011-08-08 16:21:36 +00001/*
2 * Linux network driver for Brocade Converged Network Adapter.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License (GPL) Version 2 as
6 * published by the Free Software Foundation
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13/*
14 * Copyright (c) 2005-2011 Brocade Communications Systems, Inc.
15 * All rights reserved
16 * www.brocade.com
17 */
18#include "bna.h"
19
20static inline int
21ethport_can_be_up(struct bna_ethport *ethport)
22{
23 int ready = 0;
24 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
25 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
26 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
27 (ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
28 else
29 ready = ((ethport->flags & BNA_ETHPORT_F_ADMIN_UP) &&
30 (ethport->flags & BNA_ETHPORT_F_RX_STARTED) &&
31 !(ethport->flags & BNA_ETHPORT_F_PORT_ENABLED));
32 return ready;
33}
34
35#define ethport_is_up ethport_can_be_up
36
37enum bna_ethport_event {
38 ETHPORT_E_START = 1,
39 ETHPORT_E_STOP = 2,
40 ETHPORT_E_FAIL = 3,
41 ETHPORT_E_UP = 4,
42 ETHPORT_E_DOWN = 5,
43 ETHPORT_E_FWRESP_UP_OK = 6,
44 ETHPORT_E_FWRESP_DOWN = 7,
45 ETHPORT_E_FWRESP_UP_FAIL = 8,
46};
47
48enum bna_enet_event {
49 ENET_E_START = 1,
50 ENET_E_STOP = 2,
51 ENET_E_FAIL = 3,
52 ENET_E_PAUSE_CFG = 4,
53 ENET_E_MTU_CFG = 5,
54 ENET_E_FWRESP_PAUSE = 6,
55 ENET_E_CHLD_STOPPED = 7,
56};
57
58enum bna_ioceth_event {
59 IOCETH_E_ENABLE = 1,
60 IOCETH_E_DISABLE = 2,
61 IOCETH_E_IOC_RESET = 3,
62 IOCETH_E_IOC_FAILED = 4,
63 IOCETH_E_IOC_READY = 5,
64 IOCETH_E_ENET_ATTR_RESP = 6,
65 IOCETH_E_ENET_STOPPED = 7,
66 IOCETH_E_IOC_DISABLED = 8,
67};
68
69#define bna_stats_copy(_name, _type) \
70do { \
71 count = sizeof(struct bfi_enet_stats_ ## _type) / sizeof(u64); \
72 stats_src = (u64 *)&bna->stats.hw_stats_kva->_name ## _stats; \
73 stats_dst = (u64 *)&bna->stats.hw_stats._name ## _stats; \
74 for (i = 0; i < count; i++) \
75 stats_dst[i] = be64_to_cpu(stats_src[i]); \
76} while (0) \
77
78/*
79 * FW response handlers
80 */
81
82static void
83bna_bfi_ethport_enable_aen(struct bna_ethport *ethport,
84 struct bfi_msgq_mhdr *msghdr)
85{
86 ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
87
88 if (ethport_can_be_up(ethport))
89 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
90}
91
92static void
93bna_bfi_ethport_disable_aen(struct bna_ethport *ethport,
94 struct bfi_msgq_mhdr *msghdr)
95{
96 int ethport_up = ethport_is_up(ethport);
97
98 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
99
100 if (ethport_up)
101 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
102}
103
104static void
105bna_bfi_ethport_admin_rsp(struct bna_ethport *ethport,
106 struct bfi_msgq_mhdr *msghdr)
107{
108 struct bfi_enet_enable_req *admin_req =
109 &ethport->bfi_enet_cmd.admin_req;
110 struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
111
112 switch (admin_req->enable) {
113 case BNA_STATUS_T_ENABLED:
114 if (rsp->error == BFI_ENET_CMD_OK)
115 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
116 else {
117 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
118 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
119 }
120 break;
121
122 case BNA_STATUS_T_DISABLED:
123 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
124 ethport->link_status = BNA_LINK_DOWN;
125 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
126 break;
127 }
128}
129
130static void
131bna_bfi_ethport_lpbk_rsp(struct bna_ethport *ethport,
132 struct bfi_msgq_mhdr *msghdr)
133{
134 struct bfi_enet_diag_lb_req *diag_lb_req =
135 &ethport->bfi_enet_cmd.lpbk_req;
136 struct bfi_enet_rsp *rsp = (struct bfi_enet_rsp *)msghdr;
137
138 switch (diag_lb_req->enable) {
139 case BNA_STATUS_T_ENABLED:
140 if (rsp->error == BFI_ENET_CMD_OK)
141 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_OK);
142 else {
143 ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
144 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_UP_FAIL);
145 }
146 break;
147
148 case BNA_STATUS_T_DISABLED:
149 bfa_fsm_send_event(ethport, ETHPORT_E_FWRESP_DOWN);
150 break;
151 }
152}
153
154static void
155bna_bfi_pause_set_rsp(struct bna_enet *enet, struct bfi_msgq_mhdr *msghdr)
156{
157 bfa_fsm_send_event(enet, ENET_E_FWRESP_PAUSE);
158}
159
160static void
161bna_bfi_attr_get_rsp(struct bna_ioceth *ioceth,
162 struct bfi_msgq_mhdr *msghdr)
163{
164 struct bfi_enet_attr_rsp *rsp = (struct bfi_enet_attr_rsp *)msghdr;
165
166 /**
167 * Store only if not set earlier, since BNAD can override the HW
168 * attributes
169 */
170 if (!ioceth->attr.num_txq)
171 ioceth->attr.num_txq = ntohl(rsp->max_cfg);
172 if (!ioceth->attr.num_rxp)
173 ioceth->attr.num_rxp = ntohl(rsp->max_cfg);
174 ioceth->attr.num_ucmac = ntohl(rsp->max_ucmac);
175 ioceth->attr.num_mcmac = BFI_ENET_MAX_MCAM;
176 ioceth->attr.max_rit_size = ntohl(rsp->rit_size);
177
178 bfa_fsm_send_event(ioceth, IOCETH_E_ENET_ATTR_RESP);
179}
180
181static void
182bna_bfi_stats_get_rsp(struct bna *bna, struct bfi_msgq_mhdr *msghdr)
183{
184 struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
185 u64 *stats_src;
186 u64 *stats_dst;
187 u32 tx_enet_mask = ntohl(stats_req->tx_enet_mask);
188 u32 rx_enet_mask = ntohl(stats_req->rx_enet_mask);
189 int count;
190 int i;
191
192 bna_stats_copy(mac, mac);
193 bna_stats_copy(bpc, bpc);
194 bna_stats_copy(rad, rad);
195 bna_stats_copy(rlb, rad);
196 bna_stats_copy(fc_rx, fc_rx);
197 bna_stats_copy(fc_tx, fc_tx);
198
199 stats_src = (u64 *)&(bna->stats.hw_stats_kva->rxf_stats[0]);
200
201 /* Copy Rxf stats to SW area, scatter them while copying */
202 for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
203 stats_dst = (u64 *)&(bna->stats.hw_stats.rxf_stats[i]);
204 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_rxf));
205 if (rx_enet_mask & ((u32)(1 << i))) {
206 int k;
207 count = sizeof(struct bfi_enet_stats_rxf) /
208 sizeof(u64);
209 for (k = 0; k < count; k++) {
210 stats_dst[k] = be64_to_cpu(*stats_src);
211 stats_src++;
212 }
213 }
214 }
215
216 /* Copy Txf stats to SW area, scatter them while copying */
217 for (i = 0; i < BFI_ENET_CFG_MAX; i++) {
218 stats_dst = (u64 *)&(bna->stats.hw_stats.txf_stats[i]);
219 memset(stats_dst, 0, sizeof(struct bfi_enet_stats_txf));
220 if (tx_enet_mask & ((u32)(1 << i))) {
221 int k;
222 count = sizeof(struct bfi_enet_stats_txf) /
223 sizeof(u64);
224 for (k = 0; k < count; k++) {
225 stats_dst[k] = be64_to_cpu(*stats_src);
226 stats_src++;
227 }
228 }
229 }
230
231 bna->stats_mod.stats_get_busy = false;
232 bnad_cb_stats_get(bna->bnad, BNA_CB_SUCCESS, &bna->stats);
233}
234
235static void
236bna_bfi_ethport_linkup_aen(struct bna_ethport *ethport,
237 struct bfi_msgq_mhdr *msghdr)
238{
239 ethport->link_status = BNA_LINK_UP;
240
241 /* Dispatch events */
242 ethport->link_cbfn(ethport->bna->bnad, ethport->link_status);
243}
244
245static void
246bna_bfi_ethport_linkdown_aen(struct bna_ethport *ethport,
247 struct bfi_msgq_mhdr *msghdr)
248{
249 ethport->link_status = BNA_LINK_DOWN;
250
251 /* Dispatch events */
252 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
253}
254
255static void
256bna_err_handler(struct bna *bna, u32 intr_status)
257{
258 if (BNA_IS_HALT_INTR(bna, intr_status))
259 bna_halt_clear(bna);
260
261 bfa_nw_ioc_error_isr(&bna->ioceth.ioc);
262}
263
264void
265bna_mbox_handler(struct bna *bna, u32 intr_status)
266{
267 if (BNA_IS_ERR_INTR(bna, intr_status)) {
268 bna_err_handler(bna, intr_status);
269 return;
270 }
271 if (BNA_IS_MBOX_INTR(bna, intr_status))
272 bfa_nw_ioc_mbox_isr(&bna->ioceth.ioc);
273}
274
275static void
276bna_msgq_rsp_handler(void *arg, struct bfi_msgq_mhdr *msghdr)
277{
278 struct bna *bna = (struct bna *)arg;
279 struct bna_tx *tx;
280 struct bna_rx *rx;
281
282 switch (msghdr->msg_id) {
283 case BFI_ENET_I2H_RX_CFG_SET_RSP:
284 bna_rx_from_rid(bna, msghdr->enet_id, rx);
285 if (rx)
286 bna_bfi_rx_enet_start_rsp(rx, msghdr);
287 break;
288
289 case BFI_ENET_I2H_RX_CFG_CLR_RSP:
290 bna_rx_from_rid(bna, msghdr->enet_id, rx);
291 if (rx)
292 bna_bfi_rx_enet_stop_rsp(rx, msghdr);
293 break;
294
295 case BFI_ENET_I2H_RIT_CFG_RSP:
296 case BFI_ENET_I2H_RSS_CFG_RSP:
297 case BFI_ENET_I2H_RSS_ENABLE_RSP:
298 case BFI_ENET_I2H_RX_PROMISCUOUS_RSP:
299 case BFI_ENET_I2H_RX_DEFAULT_RSP:
300 case BFI_ENET_I2H_MAC_UCAST_SET_RSP:
301 case BFI_ENET_I2H_MAC_UCAST_CLR_RSP:
302 case BFI_ENET_I2H_MAC_UCAST_ADD_RSP:
303 case BFI_ENET_I2H_MAC_UCAST_DEL_RSP:
304 case BFI_ENET_I2H_MAC_MCAST_DEL_RSP:
305 case BFI_ENET_I2H_MAC_MCAST_FILTER_RSP:
306 case BFI_ENET_I2H_RX_VLAN_SET_RSP:
307 case BFI_ENET_I2H_RX_VLAN_STRIP_ENABLE_RSP:
308 bna_rx_from_rid(bna, msghdr->enet_id, rx);
309 if (rx)
310 bna_bfi_rxf_cfg_rsp(&rx->rxf, msghdr);
311 break;
312
313 case BFI_ENET_I2H_MAC_MCAST_ADD_RSP:
314 bna_rx_from_rid(bna, msghdr->enet_id, rx);
315 if (rx)
316 bna_bfi_rxf_mcast_add_rsp(&rx->rxf, msghdr);
317 break;
318
319 case BFI_ENET_I2H_TX_CFG_SET_RSP:
320 bna_tx_from_rid(bna, msghdr->enet_id, tx);
321 if (tx)
322 bna_bfi_tx_enet_start_rsp(tx, msghdr);
323 break;
324
325 case BFI_ENET_I2H_TX_CFG_CLR_RSP:
326 bna_tx_from_rid(bna, msghdr->enet_id, tx);
327 if (tx)
328 bna_bfi_tx_enet_stop_rsp(tx, msghdr);
329 break;
330
331 case BFI_ENET_I2H_PORT_ADMIN_RSP:
332 bna_bfi_ethport_admin_rsp(&bna->ethport, msghdr);
333 break;
334
335 case BFI_ENET_I2H_DIAG_LOOPBACK_RSP:
336 bna_bfi_ethport_lpbk_rsp(&bna->ethport, msghdr);
337 break;
338
339 case BFI_ENET_I2H_SET_PAUSE_RSP:
340 bna_bfi_pause_set_rsp(&bna->enet, msghdr);
341 break;
342
343 case BFI_ENET_I2H_GET_ATTR_RSP:
344 bna_bfi_attr_get_rsp(&bna->ioceth, msghdr);
345 break;
346
347 case BFI_ENET_I2H_STATS_GET_RSP:
348 bna_bfi_stats_get_rsp(bna, msghdr);
349 break;
350
351 case BFI_ENET_I2H_STATS_CLR_RSP:
352 /* No-op */
353 break;
354
355 case BFI_ENET_I2H_LINK_UP_AEN:
356 bna_bfi_ethport_linkup_aen(&bna->ethport, msghdr);
357 break;
358
359 case BFI_ENET_I2H_LINK_DOWN_AEN:
360 bna_bfi_ethport_linkdown_aen(&bna->ethport, msghdr);
361 break;
362
363 case BFI_ENET_I2H_PORT_ENABLE_AEN:
364 bna_bfi_ethport_enable_aen(&bna->ethport, msghdr);
365 break;
366
367 case BFI_ENET_I2H_PORT_DISABLE_AEN:
368 bna_bfi_ethport_disable_aen(&bna->ethport, msghdr);
369 break;
370
371 case BFI_ENET_I2H_BW_UPDATE_AEN:
372 bna_bfi_bw_update_aen(&bna->tx_mod);
373 break;
374
375 default:
376 break;
377 }
378}
379
380/**
381 * ETHPORT
382 */
383#define call_ethport_stop_cbfn(_ethport) \
384do { \
385 if ((_ethport)->stop_cbfn) { \
386 void (*cbfn)(struct bna_enet *); \
387 cbfn = (_ethport)->stop_cbfn; \
388 (_ethport)->stop_cbfn = NULL; \
389 cbfn(&(_ethport)->bna->enet); \
390 } \
391} while (0)
392
393#define call_ethport_adminup_cbfn(ethport, status) \
394do { \
395 if ((ethport)->adminup_cbfn) { \
396 void (*cbfn)(struct bnad *, enum bna_cb_status); \
397 cbfn = (ethport)->adminup_cbfn; \
398 (ethport)->adminup_cbfn = NULL; \
399 cbfn((ethport)->bna->bnad, status); \
400 } \
401} while (0)
402
403static void
404bna_bfi_ethport_admin_up(struct bna_ethport *ethport)
405{
406 struct bfi_enet_enable_req *admin_up_req =
407 &ethport->bfi_enet_cmd.admin_req;
408
409 bfi_msgq_mhdr_set(admin_up_req->mh, BFI_MC_ENET,
410 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
411 admin_up_req->mh.num_entries = htons(
412 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
413 admin_up_req->enable = BNA_STATUS_T_ENABLED;
414
415 bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
416 sizeof(struct bfi_enet_enable_req), &admin_up_req->mh);
417 bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
418}
419
420static void
421bna_bfi_ethport_admin_down(struct bna_ethport *ethport)
422{
423 struct bfi_enet_enable_req *admin_down_req =
424 &ethport->bfi_enet_cmd.admin_req;
425
426 bfi_msgq_mhdr_set(admin_down_req->mh, BFI_MC_ENET,
427 BFI_ENET_H2I_PORT_ADMIN_UP_REQ, 0, 0);
428 admin_down_req->mh.num_entries = htons(
429 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_enable_req)));
430 admin_down_req->enable = BNA_STATUS_T_DISABLED;
431
432 bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
433 sizeof(struct bfi_enet_enable_req), &admin_down_req->mh);
434 bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
435}
436
437static void
438bna_bfi_ethport_lpbk_up(struct bna_ethport *ethport)
439{
440 struct bfi_enet_diag_lb_req *lpbk_up_req =
441 &ethport->bfi_enet_cmd.lpbk_req;
442
443 bfi_msgq_mhdr_set(lpbk_up_req->mh, BFI_MC_ENET,
444 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
445 lpbk_up_req->mh.num_entries = htons(
446 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
447 lpbk_up_req->mode = (ethport->bna->enet.type ==
448 BNA_ENET_T_LOOPBACK_INTERNAL) ?
449 BFI_ENET_DIAG_LB_OPMODE_EXT :
450 BFI_ENET_DIAG_LB_OPMODE_CBL;
451 lpbk_up_req->enable = BNA_STATUS_T_ENABLED;
452
453 bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
454 sizeof(struct bfi_enet_diag_lb_req), &lpbk_up_req->mh);
455 bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
456}
457
458static void
459bna_bfi_ethport_lpbk_down(struct bna_ethport *ethport)
460{
461 struct bfi_enet_diag_lb_req *lpbk_down_req =
462 &ethport->bfi_enet_cmd.lpbk_req;
463
464 bfi_msgq_mhdr_set(lpbk_down_req->mh, BFI_MC_ENET,
465 BFI_ENET_H2I_DIAG_LOOPBACK_REQ, 0, 0);
466 lpbk_down_req->mh.num_entries = htons(
467 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_diag_lb_req)));
468 lpbk_down_req->enable = BNA_STATUS_T_DISABLED;
469
470 bfa_msgq_cmd_set(&ethport->msgq_cmd, NULL, NULL,
471 sizeof(struct bfi_enet_diag_lb_req), &lpbk_down_req->mh);
472 bfa_msgq_cmd_post(&ethport->bna->msgq, &ethport->msgq_cmd);
473}
474
475static void
476bna_bfi_ethport_up(struct bna_ethport *ethport)
477{
478 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
479 bna_bfi_ethport_admin_up(ethport);
480 else
481 bna_bfi_ethport_lpbk_up(ethport);
482}
483
484static void
485bna_bfi_ethport_down(struct bna_ethport *ethport)
486{
487 if (ethport->bna->enet.type == BNA_ENET_T_REGULAR)
488 bna_bfi_ethport_admin_down(ethport);
489 else
490 bna_bfi_ethport_lpbk_down(ethport);
491}
492
493bfa_fsm_state_decl(bna_ethport, stopped, struct bna_ethport,
494 enum bna_ethport_event);
495bfa_fsm_state_decl(bna_ethport, down, struct bna_ethport,
496 enum bna_ethport_event);
497bfa_fsm_state_decl(bna_ethport, up_resp_wait, struct bna_ethport,
498 enum bna_ethport_event);
499bfa_fsm_state_decl(bna_ethport, down_resp_wait, struct bna_ethport,
500 enum bna_ethport_event);
501bfa_fsm_state_decl(bna_ethport, up, struct bna_ethport,
502 enum bna_ethport_event);
503bfa_fsm_state_decl(bna_ethport, last_resp_wait, struct bna_ethport,
504 enum bna_ethport_event);
505
506static void
507bna_ethport_sm_stopped_entry(struct bna_ethport *ethport)
508{
509 call_ethport_stop_cbfn(ethport);
510}
511
512static void
513bna_ethport_sm_stopped(struct bna_ethport *ethport,
514 enum bna_ethport_event event)
515{
516 switch (event) {
517 case ETHPORT_E_START:
518 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
519 break;
520
521 case ETHPORT_E_STOP:
522 call_ethport_stop_cbfn(ethport);
523 break;
524
525 case ETHPORT_E_FAIL:
526 /* No-op */
527 break;
528
529 case ETHPORT_E_DOWN:
530 /* This event is received due to Rx objects failing */
531 /* No-op */
532 break;
533
534 default:
535 bfa_sm_fault(event);
536 }
537}
538
539static void
540bna_ethport_sm_down_entry(struct bna_ethport *ethport)
541{
542}
543
544static void
545bna_ethport_sm_down(struct bna_ethport *ethport,
546 enum bna_ethport_event event)
547{
548 switch (event) {
549 case ETHPORT_E_STOP:
550 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
551 break;
552
553 case ETHPORT_E_FAIL:
554 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
555 break;
556
557 case ETHPORT_E_UP:
558 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
559 bna_bfi_ethport_up(ethport);
560 break;
561
562 default:
563 bfa_sm_fault(event);
564 }
565}
566
567static void
568bna_ethport_sm_up_resp_wait_entry(struct bna_ethport *ethport)
569{
570}
571
572static void
573bna_ethport_sm_up_resp_wait(struct bna_ethport *ethport,
574 enum bna_ethport_event event)
575{
576 switch (event) {
577 case ETHPORT_E_STOP:
578 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
579 break;
580
581 case ETHPORT_E_FAIL:
582 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
583 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
584 break;
585
586 case ETHPORT_E_DOWN:
587 call_ethport_adminup_cbfn(ethport, BNA_CB_INTERRUPT);
588 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
589 break;
590
591 case ETHPORT_E_FWRESP_UP_OK:
592 call_ethport_adminup_cbfn(ethport, BNA_CB_SUCCESS);
593 bfa_fsm_set_state(ethport, bna_ethport_sm_up);
594 break;
595
596 case ETHPORT_E_FWRESP_UP_FAIL:
597 call_ethport_adminup_cbfn(ethport, BNA_CB_FAIL);
598 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
599 break;
600
601 case ETHPORT_E_FWRESP_DOWN:
602 /* down_resp_wait -> up_resp_wait transition on ETHPORT_E_UP */
603 bna_bfi_ethport_up(ethport);
604 break;
605
606 default:
607 bfa_sm_fault(event);
608 }
609}
610
611static void
612bna_ethport_sm_down_resp_wait_entry(struct bna_ethport *ethport)
613{
614 /**
615 * NOTE: Do not call bna_bfi_ethport_down() here. That will over step
616 * mbox due to up_resp_wait -> down_resp_wait transition on event
617 * ETHPORT_E_DOWN
618 */
619}
620
621static void
622bna_ethport_sm_down_resp_wait(struct bna_ethport *ethport,
623 enum bna_ethport_event event)
624{
625 switch (event) {
626 case ETHPORT_E_STOP:
627 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
628 break;
629
630 case ETHPORT_E_FAIL:
631 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
632 break;
633
634 case ETHPORT_E_UP:
635 bfa_fsm_set_state(ethport, bna_ethport_sm_up_resp_wait);
636 break;
637
638 case ETHPORT_E_FWRESP_UP_OK:
639 /* up_resp_wait->down_resp_wait transition on ETHPORT_E_DOWN */
640 bna_bfi_ethport_down(ethport);
641 break;
642
643 case ETHPORT_E_FWRESP_UP_FAIL:
644 case ETHPORT_E_FWRESP_DOWN:
645 bfa_fsm_set_state(ethport, bna_ethport_sm_down);
646 break;
647
648 default:
649 bfa_sm_fault(event);
650 }
651}
652
653static void
654bna_ethport_sm_up_entry(struct bna_ethport *ethport)
655{
656}
657
658static void
659bna_ethport_sm_up(struct bna_ethport *ethport,
660 enum bna_ethport_event event)
661{
662 switch (event) {
663 case ETHPORT_E_STOP:
664 bfa_fsm_set_state(ethport, bna_ethport_sm_last_resp_wait);
665 bna_bfi_ethport_down(ethport);
666 break;
667
668 case ETHPORT_E_FAIL:
669 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
670 break;
671
672 case ETHPORT_E_DOWN:
673 bfa_fsm_set_state(ethport, bna_ethport_sm_down_resp_wait);
674 bna_bfi_ethport_down(ethport);
675 break;
676
677 default:
678 bfa_sm_fault(event);
679 }
680}
681
682static void
683bna_ethport_sm_last_resp_wait_entry(struct bna_ethport *ethport)
684{
685}
686
687static void
688bna_ethport_sm_last_resp_wait(struct bna_ethport *ethport,
689 enum bna_ethport_event event)
690{
691 switch (event) {
692 case ETHPORT_E_FAIL:
693 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
694 break;
695
696 case ETHPORT_E_DOWN:
697 /**
698 * This event is received due to Rx objects stopping in
699 * parallel to ethport
700 */
701 /* No-op */
702 break;
703
704 case ETHPORT_E_FWRESP_UP_OK:
705 /* up_resp_wait->last_resp_wait transition on ETHPORT_T_STOP */
706 bna_bfi_ethport_down(ethport);
707 break;
708
709 case ETHPORT_E_FWRESP_UP_FAIL:
710 case ETHPORT_E_FWRESP_DOWN:
711 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
712 break;
713
714 default:
715 bfa_sm_fault(event);
716 }
717}
718
719static void
720bna_ethport_init(struct bna_ethport *ethport, struct bna *bna)
721{
722 ethport->flags |= (BNA_ETHPORT_F_ADMIN_UP | BNA_ETHPORT_F_PORT_ENABLED);
723 ethport->bna = bna;
724
725 ethport->link_status = BNA_LINK_DOWN;
726 ethport->link_cbfn = bnad_cb_ethport_link_status;
727
728 ethport->rx_started_count = 0;
729
730 ethport->stop_cbfn = NULL;
731 ethport->adminup_cbfn = NULL;
732
733 bfa_fsm_set_state(ethport, bna_ethport_sm_stopped);
734}
735
736static void
737bna_ethport_uninit(struct bna_ethport *ethport)
738{
739 ethport->flags &= ~BNA_ETHPORT_F_ADMIN_UP;
740 ethport->flags &= ~BNA_ETHPORT_F_PORT_ENABLED;
741
742 ethport->bna = NULL;
743}
744
745static void
746bna_ethport_start(struct bna_ethport *ethport)
747{
748 bfa_fsm_send_event(ethport, ETHPORT_E_START);
749}
750
751static void
752bna_enet_cb_ethport_stopped(struct bna_enet *enet)
753{
754 bfa_wc_down(&enet->chld_stop_wc);
755}
756
757static void
758bna_ethport_stop(struct bna_ethport *ethport)
759{
760 ethport->stop_cbfn = bna_enet_cb_ethport_stopped;
761 bfa_fsm_send_event(ethport, ETHPORT_E_STOP);
762}
763
764static void
765bna_ethport_fail(struct bna_ethport *ethport)
766{
767 /* Reset the physical port status to enabled */
768 ethport->flags |= BNA_ETHPORT_F_PORT_ENABLED;
769
770 if (ethport->link_status != BNA_LINK_DOWN) {
771 ethport->link_status = BNA_LINK_DOWN;
772 ethport->link_cbfn(ethport->bna->bnad, BNA_LINK_DOWN);
773 }
774 bfa_fsm_send_event(ethport, ETHPORT_E_FAIL);
775}
776
777/* Should be called only when ethport is disabled */
778void
779bna_ethport_cb_rx_started(struct bna_ethport *ethport)
780{
781 ethport->rx_started_count++;
782
783 if (ethport->rx_started_count == 1) {
784 ethport->flags |= BNA_ETHPORT_F_RX_STARTED;
785
786 if (ethport_can_be_up(ethport))
787 bfa_fsm_send_event(ethport, ETHPORT_E_UP);
788 }
789}
790
791void
792bna_ethport_cb_rx_stopped(struct bna_ethport *ethport)
793{
794 int ethport_up = ethport_is_up(ethport);
795
796 ethport->rx_started_count--;
797
798 if (ethport->rx_started_count == 0) {
799 ethport->flags &= ~BNA_ETHPORT_F_RX_STARTED;
800
801 if (ethport_up)
802 bfa_fsm_send_event(ethport, ETHPORT_E_DOWN);
803 }
804}
805
806/**
807 * ENET
808 */
809#define bna_enet_chld_start(enet) \
810do { \
811 enum bna_tx_type tx_type = \
812 ((enet)->type == BNA_ENET_T_REGULAR) ? \
813 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
814 enum bna_rx_type rx_type = \
815 ((enet)->type == BNA_ENET_T_REGULAR) ? \
816 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
817 bna_ethport_start(&(enet)->bna->ethport); \
818 bna_tx_mod_start(&(enet)->bna->tx_mod, tx_type); \
819 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
820} while (0)
821
822#define bna_enet_chld_stop(enet) \
823do { \
824 enum bna_tx_type tx_type = \
825 ((enet)->type == BNA_ENET_T_REGULAR) ? \
826 BNA_TX_T_REGULAR : BNA_TX_T_LOOPBACK; \
827 enum bna_rx_type rx_type = \
828 ((enet)->type == BNA_ENET_T_REGULAR) ? \
829 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
830 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
831 bfa_wc_up(&(enet)->chld_stop_wc); \
832 bna_ethport_stop(&(enet)->bna->ethport); \
833 bfa_wc_up(&(enet)->chld_stop_wc); \
834 bna_tx_mod_stop(&(enet)->bna->tx_mod, tx_type); \
835 bfa_wc_up(&(enet)->chld_stop_wc); \
836 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
837 bfa_wc_wait(&(enet)->chld_stop_wc); \
838} while (0)
839
840#define bna_enet_chld_fail(enet) \
841do { \
842 bna_ethport_fail(&(enet)->bna->ethport); \
843 bna_tx_mod_fail(&(enet)->bna->tx_mod); \
844 bna_rx_mod_fail(&(enet)->bna->rx_mod); \
845} while (0)
846
847#define bna_enet_rx_start(enet) \
848do { \
849 enum bna_rx_type rx_type = \
850 ((enet)->type == BNA_ENET_T_REGULAR) ? \
851 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
852 bna_rx_mod_start(&(enet)->bna->rx_mod, rx_type); \
853} while (0)
854
855#define bna_enet_rx_stop(enet) \
856do { \
857 enum bna_rx_type rx_type = \
858 ((enet)->type == BNA_ENET_T_REGULAR) ? \
859 BNA_RX_T_REGULAR : BNA_RX_T_LOOPBACK; \
860 bfa_wc_init(&(enet)->chld_stop_wc, bna_enet_cb_chld_stopped, (enet));\
861 bfa_wc_up(&(enet)->chld_stop_wc); \
862 bna_rx_mod_stop(&(enet)->bna->rx_mod, rx_type); \
863 bfa_wc_wait(&(enet)->chld_stop_wc); \
864} while (0)
865
866#define call_enet_stop_cbfn(enet) \
867do { \
868 if ((enet)->stop_cbfn) { \
869 void (*cbfn)(void *); \
870 void *cbarg; \
871 cbfn = (enet)->stop_cbfn; \
872 cbarg = (enet)->stop_cbarg; \
873 (enet)->stop_cbfn = NULL; \
874 (enet)->stop_cbarg = NULL; \
875 cbfn(cbarg); \
876 } \
877} while (0)
878
879#define call_enet_pause_cbfn(enet) \
880do { \
881 if ((enet)->pause_cbfn) { \
882 void (*cbfn)(struct bnad *); \
883 cbfn = (enet)->pause_cbfn; \
884 (enet)->pause_cbfn = NULL; \
885 cbfn((enet)->bna->bnad); \
886 } \
887} while (0)
888
889#define call_enet_mtu_cbfn(enet) \
890do { \
891 if ((enet)->mtu_cbfn) { \
892 void (*cbfn)(struct bnad *); \
893 cbfn = (enet)->mtu_cbfn; \
894 (enet)->mtu_cbfn = NULL; \
895 cbfn((enet)->bna->bnad); \
896 } \
897} while (0)
898
899static void bna_enet_cb_chld_stopped(void *arg);
900static void bna_bfi_pause_set(struct bna_enet *enet);
901
902bfa_fsm_state_decl(bna_enet, stopped, struct bna_enet,
903 enum bna_enet_event);
904bfa_fsm_state_decl(bna_enet, pause_init_wait, struct bna_enet,
905 enum bna_enet_event);
906bfa_fsm_state_decl(bna_enet, last_resp_wait, struct bna_enet,
907 enum bna_enet_event);
908bfa_fsm_state_decl(bna_enet, started, struct bna_enet,
909 enum bna_enet_event);
910bfa_fsm_state_decl(bna_enet, cfg_wait, struct bna_enet,
911 enum bna_enet_event);
912bfa_fsm_state_decl(bna_enet, cfg_stop_wait, struct bna_enet,
913 enum bna_enet_event);
914bfa_fsm_state_decl(bna_enet, chld_stop_wait, struct bna_enet,
915 enum bna_enet_event);
916
917static void
918bna_enet_sm_stopped_entry(struct bna_enet *enet)
919{
920 call_enet_pause_cbfn(enet);
921 call_enet_mtu_cbfn(enet);
922 call_enet_stop_cbfn(enet);
923}
924
925static void
926bna_enet_sm_stopped(struct bna_enet *enet, enum bna_enet_event event)
927{
928 switch (event) {
929 case ENET_E_START:
930 bfa_fsm_set_state(enet, bna_enet_sm_pause_init_wait);
931 break;
932
933 case ENET_E_STOP:
934 call_enet_stop_cbfn(enet);
935 break;
936
937 case ENET_E_FAIL:
938 /* No-op */
939 break;
940
941 case ENET_E_PAUSE_CFG:
942 call_enet_pause_cbfn(enet);
943 break;
944
945 case ENET_E_MTU_CFG:
946 call_enet_mtu_cbfn(enet);
947 break;
948
949 case ENET_E_CHLD_STOPPED:
950 /**
951 * This event is received due to Ethport, Tx and Rx objects
952 * failing
953 */
954 /* No-op */
955 break;
956
957 default:
958 bfa_sm_fault(event);
959 }
960}
961
962static void
963bna_enet_sm_pause_init_wait_entry(struct bna_enet *enet)
964{
965 bna_bfi_pause_set(enet);
966}
967
968static void
969bna_enet_sm_pause_init_wait(struct bna_enet *enet,
970 enum bna_enet_event event)
971{
972 switch (event) {
973 case ENET_E_STOP:
974 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
975 bfa_fsm_set_state(enet, bna_enet_sm_last_resp_wait);
976 break;
977
978 case ENET_E_FAIL:
979 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
980 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
981 break;
982
983 case ENET_E_PAUSE_CFG:
984 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
985 break;
986
987 case ENET_E_MTU_CFG:
988 /* No-op */
989 break;
990
991 case ENET_E_FWRESP_PAUSE:
992 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
993 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
994 bna_bfi_pause_set(enet);
995 } else {
996 bfa_fsm_set_state(enet, bna_enet_sm_started);
997 bna_enet_chld_start(enet);
998 }
999 break;
1000
1001 default:
1002 bfa_sm_fault(event);
1003 }
1004}
1005
1006static void
1007bna_enet_sm_last_resp_wait_entry(struct bna_enet *enet)
1008{
1009 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1010}
1011
1012static void
1013bna_enet_sm_last_resp_wait(struct bna_enet *enet,
1014 enum bna_enet_event event)
1015{
1016 switch (event) {
1017 case ENET_E_FAIL:
1018 case ENET_E_FWRESP_PAUSE:
1019 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1020 break;
1021
1022 default:
1023 bfa_sm_fault(event);
1024 }
1025}
1026
1027static void
1028bna_enet_sm_started_entry(struct bna_enet *enet)
1029{
1030 /**
1031 * NOTE: Do not call bna_enet_chld_start() here, since it will be
1032 * inadvertently called during cfg_wait->started transition as well
1033 */
1034 call_enet_pause_cbfn(enet);
1035 call_enet_mtu_cbfn(enet);
1036}
1037
1038static void
1039bna_enet_sm_started(struct bna_enet *enet,
1040 enum bna_enet_event event)
1041{
1042 switch (event) {
1043 case ENET_E_STOP:
1044 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1045 break;
1046
1047 case ENET_E_FAIL:
1048 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1049 bna_enet_chld_fail(enet);
1050 break;
1051
1052 case ENET_E_PAUSE_CFG:
1053 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1054 bna_bfi_pause_set(enet);
1055 break;
1056
1057 case ENET_E_MTU_CFG:
1058 bfa_fsm_set_state(enet, bna_enet_sm_cfg_wait);
1059 bna_enet_rx_stop(enet);
1060 break;
1061
1062 default:
1063 bfa_sm_fault(event);
1064 }
1065}
1066
1067static void
1068bna_enet_sm_cfg_wait_entry(struct bna_enet *enet)
1069{
1070}
1071
1072static void
1073bna_enet_sm_cfg_wait(struct bna_enet *enet,
1074 enum bna_enet_event event)
1075{
1076 switch (event) {
1077 case ENET_E_STOP:
1078 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1079 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1080 bfa_fsm_set_state(enet, bna_enet_sm_cfg_stop_wait);
1081 break;
1082
1083 case ENET_E_FAIL:
1084 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1085 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1086 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1087 bna_enet_chld_fail(enet);
1088 break;
1089
1090 case ENET_E_PAUSE_CFG:
1091 enet->flags |= BNA_ENET_F_PAUSE_CHANGED;
1092 break;
1093
1094 case ENET_E_MTU_CFG:
1095 enet->flags |= BNA_ENET_F_MTU_CHANGED;
1096 break;
1097
1098 case ENET_E_CHLD_STOPPED:
1099 bna_enet_rx_start(enet);
1100 /* Fall through */
1101 case ENET_E_FWRESP_PAUSE:
1102 if (enet->flags & BNA_ENET_F_PAUSE_CHANGED) {
1103 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1104 bna_bfi_pause_set(enet);
1105 } else if (enet->flags & BNA_ENET_F_MTU_CHANGED) {
1106 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1107 bna_enet_rx_stop(enet);
1108 } else {
1109 bfa_fsm_set_state(enet, bna_enet_sm_started);
1110 }
1111 break;
1112
1113 default:
1114 bfa_sm_fault(event);
1115 }
1116}
1117
1118static void
1119bna_enet_sm_cfg_stop_wait_entry(struct bna_enet *enet)
1120{
1121 enet->flags &= ~BNA_ENET_F_PAUSE_CHANGED;
1122 enet->flags &= ~BNA_ENET_F_MTU_CHANGED;
1123}
1124
1125static void
1126bna_enet_sm_cfg_stop_wait(struct bna_enet *enet,
1127 enum bna_enet_event event)
1128{
1129 switch (event) {
1130 case ENET_E_FAIL:
1131 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1132 bna_enet_chld_fail(enet);
1133 break;
1134
1135 case ENET_E_FWRESP_PAUSE:
1136 case ENET_E_CHLD_STOPPED:
1137 bfa_fsm_set_state(enet, bna_enet_sm_chld_stop_wait);
1138 break;
1139
1140 default:
1141 bfa_sm_fault(event);
1142 }
1143}
1144
1145static void
1146bna_enet_sm_chld_stop_wait_entry(struct bna_enet *enet)
1147{
1148 bna_enet_chld_stop(enet);
1149}
1150
1151static void
1152bna_enet_sm_chld_stop_wait(struct bna_enet *enet,
1153 enum bna_enet_event event)
1154{
1155 switch (event) {
1156 case ENET_E_FAIL:
1157 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1158 bna_enet_chld_fail(enet);
1159 break;
1160
1161 case ENET_E_CHLD_STOPPED:
1162 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1163 break;
1164
1165 default:
1166 bfa_sm_fault(event);
1167 }
1168}
1169
1170static void
1171bna_bfi_pause_set(struct bna_enet *enet)
1172{
1173 struct bfi_enet_set_pause_req *pause_req = &enet->pause_req;
1174
1175 bfi_msgq_mhdr_set(pause_req->mh, BFI_MC_ENET,
1176 BFI_ENET_H2I_SET_PAUSE_REQ, 0, 0);
1177 pause_req->mh.num_entries = htons(
1178 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_set_pause_req)));
1179 pause_req->tx_pause = enet->pause_config.tx_pause;
1180 pause_req->rx_pause = enet->pause_config.rx_pause;
1181
1182 bfa_msgq_cmd_set(&enet->msgq_cmd, NULL, NULL,
1183 sizeof(struct bfi_enet_set_pause_req), &pause_req->mh);
1184 bfa_msgq_cmd_post(&enet->bna->msgq, &enet->msgq_cmd);
1185}
1186
1187static void
1188bna_enet_cb_chld_stopped(void *arg)
1189{
1190 struct bna_enet *enet = (struct bna_enet *)arg;
1191
1192 bfa_fsm_send_event(enet, ENET_E_CHLD_STOPPED);
1193}
1194
1195static void
1196bna_enet_init(struct bna_enet *enet, struct bna *bna)
1197{
1198 enet->bna = bna;
1199 enet->flags = 0;
1200 enet->mtu = 0;
1201 enet->type = BNA_ENET_T_REGULAR;
1202
1203 enet->stop_cbfn = NULL;
1204 enet->stop_cbarg = NULL;
1205
1206 enet->pause_cbfn = NULL;
1207
1208 enet->mtu_cbfn = NULL;
1209
1210 bfa_fsm_set_state(enet, bna_enet_sm_stopped);
1211}
1212
1213static void
1214bna_enet_uninit(struct bna_enet *enet)
1215{
1216 enet->flags = 0;
1217
1218 enet->bna = NULL;
1219}
1220
1221static void
1222bna_enet_start(struct bna_enet *enet)
1223{
1224 enet->flags |= BNA_ENET_F_IOCETH_READY;
1225 if (enet->flags & BNA_ENET_F_ENABLED)
1226 bfa_fsm_send_event(enet, ENET_E_START);
1227}
1228
1229static void
1230bna_ioceth_cb_enet_stopped(void *arg)
1231{
1232 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1233
1234 bfa_fsm_send_event(ioceth, IOCETH_E_ENET_STOPPED);
1235}
1236
1237static void
1238bna_enet_stop(struct bna_enet *enet)
1239{
1240 enet->stop_cbfn = bna_ioceth_cb_enet_stopped;
1241 enet->stop_cbarg = &enet->bna->ioceth;
1242
1243 enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1244 bfa_fsm_send_event(enet, ENET_E_STOP);
1245}
1246
1247static void
1248bna_enet_fail(struct bna_enet *enet)
1249{
1250 enet->flags &= ~BNA_ENET_F_IOCETH_READY;
1251 bfa_fsm_send_event(enet, ENET_E_FAIL);
1252}
1253
1254void
1255bna_enet_cb_tx_stopped(struct bna_enet *enet)
1256{
1257 bfa_wc_down(&enet->chld_stop_wc);
1258}
1259
1260void
1261bna_enet_cb_rx_stopped(struct bna_enet *enet)
1262{
1263 bfa_wc_down(&enet->chld_stop_wc);
1264}
1265
1266int
1267bna_enet_mtu_get(struct bna_enet *enet)
1268{
1269 return enet->mtu;
1270}
1271
1272void
1273bna_enet_enable(struct bna_enet *enet)
1274{
1275 if (enet->fsm != (bfa_sm_t)bna_enet_sm_stopped)
1276 return;
1277
1278 enet->flags |= BNA_ENET_F_ENABLED;
1279
1280 if (enet->flags & BNA_ENET_F_IOCETH_READY)
1281 bfa_fsm_send_event(enet, ENET_E_START);
1282}
1283
1284void
1285bna_enet_disable(struct bna_enet *enet, enum bna_cleanup_type type,
1286 void (*cbfn)(void *))
1287{
1288 if (type == BNA_SOFT_CLEANUP) {
1289 (*cbfn)(enet->bna->bnad);
1290 return;
1291 }
1292
1293 enet->stop_cbfn = cbfn;
1294 enet->stop_cbarg = enet->bna->bnad;
1295
1296 enet->flags &= ~BNA_ENET_F_ENABLED;
1297
1298 bfa_fsm_send_event(enet, ENET_E_STOP);
1299}
1300
1301void
1302bna_enet_pause_config(struct bna_enet *enet,
1303 struct bna_pause_config *pause_config,
1304 void (*cbfn)(struct bnad *))
1305{
1306 enet->pause_config = *pause_config;
1307
1308 enet->pause_cbfn = cbfn;
1309
1310 bfa_fsm_send_event(enet, ENET_E_PAUSE_CFG);
1311}
1312
1313void
1314bna_enet_mtu_set(struct bna_enet *enet, int mtu,
1315 void (*cbfn)(struct bnad *))
1316{
1317 enet->mtu = mtu;
1318
1319 enet->mtu_cbfn = cbfn;
1320
1321 bfa_fsm_send_event(enet, ENET_E_MTU_CFG);
1322}
1323
1324void
1325bna_enet_perm_mac_get(struct bna_enet *enet, mac_t *mac)
1326{
1327 *mac = bfa_nw_ioc_get_mac(&enet->bna->ioceth.ioc);
1328}
1329
1330/**
1331 * IOCETH
1332 */
1333#define enable_mbox_intr(_ioceth) \
1334do { \
1335 u32 intr_status; \
1336 bna_intr_status_get((_ioceth)->bna, intr_status); \
1337 bnad_cb_mbox_intr_enable((_ioceth)->bna->bnad); \
1338 bna_mbox_intr_enable((_ioceth)->bna); \
1339} while (0)
1340
1341#define disable_mbox_intr(_ioceth) \
1342do { \
1343 bna_mbox_intr_disable((_ioceth)->bna); \
1344 bnad_cb_mbox_intr_disable((_ioceth)->bna->bnad); \
1345} while (0)
1346
1347#define call_ioceth_stop_cbfn(_ioceth) \
1348do { \
1349 if ((_ioceth)->stop_cbfn) { \
1350 void (*cbfn)(struct bnad *); \
1351 struct bnad *cbarg; \
1352 cbfn = (_ioceth)->stop_cbfn; \
1353 cbarg = (_ioceth)->stop_cbarg; \
1354 (_ioceth)->stop_cbfn = NULL; \
1355 (_ioceth)->stop_cbarg = NULL; \
1356 cbfn(cbarg); \
1357 } \
1358} while (0)
1359
1360#define bna_stats_mod_uninit(_stats_mod) \
1361do { \
1362} while (0)
1363
1364#define bna_stats_mod_start(_stats_mod) \
1365do { \
1366 (_stats_mod)->ioc_ready = true; \
1367} while (0)
1368
1369#define bna_stats_mod_stop(_stats_mod) \
1370do { \
1371 (_stats_mod)->ioc_ready = false; \
1372} while (0)
1373
1374#define bna_stats_mod_fail(_stats_mod) \
1375do { \
1376 (_stats_mod)->ioc_ready = false; \
1377 (_stats_mod)->stats_get_busy = false; \
1378 (_stats_mod)->stats_clr_busy = false; \
1379} while (0)
1380
1381static void bna_bfi_attr_get(struct bna_ioceth *ioceth);
1382
1383bfa_fsm_state_decl(bna_ioceth, stopped, struct bna_ioceth,
1384 enum bna_ioceth_event);
1385bfa_fsm_state_decl(bna_ioceth, ioc_ready_wait, struct bna_ioceth,
1386 enum bna_ioceth_event);
1387bfa_fsm_state_decl(bna_ioceth, enet_attr_wait, struct bna_ioceth,
1388 enum bna_ioceth_event);
1389bfa_fsm_state_decl(bna_ioceth, ready, struct bna_ioceth,
1390 enum bna_ioceth_event);
1391bfa_fsm_state_decl(bna_ioceth, last_resp_wait, struct bna_ioceth,
1392 enum bna_ioceth_event);
1393bfa_fsm_state_decl(bna_ioceth, enet_stop_wait, struct bna_ioceth,
1394 enum bna_ioceth_event);
1395bfa_fsm_state_decl(bna_ioceth, ioc_disable_wait, struct bna_ioceth,
1396 enum bna_ioceth_event);
1397bfa_fsm_state_decl(bna_ioceth, failed, struct bna_ioceth,
1398 enum bna_ioceth_event);
1399
1400static void
1401bna_ioceth_sm_stopped_entry(struct bna_ioceth *ioceth)
1402{
1403 call_ioceth_stop_cbfn(ioceth);
1404}
1405
1406static void
1407bna_ioceth_sm_stopped(struct bna_ioceth *ioceth,
1408 enum bna_ioceth_event event)
1409{
1410 switch (event) {
1411 case IOCETH_E_ENABLE:
1412 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1413 bfa_nw_ioc_enable(&ioceth->ioc);
1414 break;
1415
1416 case IOCETH_E_DISABLE:
1417 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1418 break;
1419
1420 case IOCETH_E_IOC_RESET:
1421 enable_mbox_intr(ioceth);
1422 break;
1423
1424 case IOCETH_E_IOC_FAILED:
1425 disable_mbox_intr(ioceth);
1426 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1427 break;
1428
1429 default:
1430 bfa_sm_fault(event);
1431 }
1432}
1433
1434static void
1435bna_ioceth_sm_ioc_ready_wait_entry(struct bna_ioceth *ioceth)
1436{
1437 /**
1438 * Do not call bfa_nw_ioc_enable() here. It must be called in the
1439 * previous state due to failed -> ioc_ready_wait transition.
1440 */
1441}
1442
1443static void
1444bna_ioceth_sm_ioc_ready_wait(struct bna_ioceth *ioceth,
1445 enum bna_ioceth_event event)
1446{
1447 switch (event) {
1448 case IOCETH_E_DISABLE:
1449 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1450 bfa_nw_ioc_disable(&ioceth->ioc);
1451 break;
1452
1453 case IOCETH_E_IOC_RESET:
1454 enable_mbox_intr(ioceth);
1455 break;
1456
1457 case IOCETH_E_IOC_FAILED:
1458 disable_mbox_intr(ioceth);
1459 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1460 break;
1461
1462 case IOCETH_E_IOC_READY:
1463 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_attr_wait);
1464 break;
1465
1466 default:
1467 bfa_sm_fault(event);
1468 }
1469}
1470
1471static void
1472bna_ioceth_sm_enet_attr_wait_entry(struct bna_ioceth *ioceth)
1473{
1474 bna_bfi_attr_get(ioceth);
1475}
1476
1477static void
1478bna_ioceth_sm_enet_attr_wait(struct bna_ioceth *ioceth,
1479 enum bna_ioceth_event event)
1480{
1481 switch (event) {
1482 case IOCETH_E_DISABLE:
1483 bfa_fsm_set_state(ioceth, bna_ioceth_sm_last_resp_wait);
1484 break;
1485
1486 case IOCETH_E_IOC_FAILED:
1487 disable_mbox_intr(ioceth);
1488 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1489 break;
1490
1491 case IOCETH_E_ENET_ATTR_RESP:
1492 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ready);
1493 break;
1494
1495 default:
1496 bfa_sm_fault(event);
1497 }
1498}
1499
1500static void
1501bna_ioceth_sm_ready_entry(struct bna_ioceth *ioceth)
1502{
1503 bna_enet_start(&ioceth->bna->enet);
1504 bna_stats_mod_start(&ioceth->bna->stats_mod);
1505 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1506}
1507
1508static void
1509bna_ioceth_sm_ready(struct bna_ioceth *ioceth, enum bna_ioceth_event event)
1510{
1511 switch (event) {
1512 case IOCETH_E_DISABLE:
1513 bfa_fsm_set_state(ioceth, bna_ioceth_sm_enet_stop_wait);
1514 break;
1515
1516 case IOCETH_E_IOC_FAILED:
1517 disable_mbox_intr(ioceth);
1518 bna_enet_fail(&ioceth->bna->enet);
1519 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1520 bfa_fsm_set_state(ioceth, bna_ioceth_sm_failed);
1521 break;
1522
1523 default:
1524 bfa_sm_fault(event);
1525 }
1526}
1527
1528static void
1529bna_ioceth_sm_last_resp_wait_entry(struct bna_ioceth *ioceth)
1530{
1531}
1532
1533static void
1534bna_ioceth_sm_last_resp_wait(struct bna_ioceth *ioceth,
1535 enum bna_ioceth_event event)
1536{
1537 switch (event) {
1538 case IOCETH_E_IOC_FAILED:
1539 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1540 disable_mbox_intr(ioceth);
1541 bfa_nw_ioc_disable(&ioceth->ioc);
1542 break;
1543
1544 case IOCETH_E_ENET_ATTR_RESP:
1545 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1546 bfa_nw_ioc_disable(&ioceth->ioc);
1547 break;
1548
1549 default:
1550 bfa_sm_fault(event);
1551 }
1552}
1553
1554static void
1555bna_ioceth_sm_enet_stop_wait_entry(struct bna_ioceth *ioceth)
1556{
1557 bna_stats_mod_stop(&ioceth->bna->stats_mod);
1558 bna_enet_stop(&ioceth->bna->enet);
1559}
1560
1561static void
1562bna_ioceth_sm_enet_stop_wait(struct bna_ioceth *ioceth,
1563 enum bna_ioceth_event event)
1564{
1565 switch (event) {
1566 case IOCETH_E_IOC_FAILED:
1567 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1568 disable_mbox_intr(ioceth);
1569 bna_enet_fail(&ioceth->bna->enet);
1570 bna_stats_mod_fail(&ioceth->bna->stats_mod);
1571 bfa_nw_ioc_disable(&ioceth->ioc);
1572 break;
1573
1574 case IOCETH_E_ENET_STOPPED:
1575 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1576 bfa_nw_ioc_disable(&ioceth->ioc);
1577 break;
1578
1579 default:
1580 bfa_sm_fault(event);
1581 }
1582}
1583
1584static void
1585bna_ioceth_sm_ioc_disable_wait_entry(struct bna_ioceth *ioceth)
1586{
1587}
1588
1589static void
1590bna_ioceth_sm_ioc_disable_wait(struct bna_ioceth *ioceth,
1591 enum bna_ioceth_event event)
1592{
1593 switch (event) {
1594 case IOCETH_E_IOC_DISABLED:
1595 disable_mbox_intr(ioceth);
1596 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1597 break;
1598
1599 case IOCETH_E_ENET_STOPPED:
1600 /* This event is received due to enet failing */
1601 /* No-op */
1602 break;
1603
1604 default:
1605 bfa_sm_fault(event);
1606 }
1607}
1608
1609static void
1610bna_ioceth_sm_failed_entry(struct bna_ioceth *ioceth)
1611{
1612 bnad_cb_ioceth_failed(ioceth->bna->bnad);
1613}
1614
1615static void
1616bna_ioceth_sm_failed(struct bna_ioceth *ioceth,
1617 enum bna_ioceth_event event)
1618{
1619 switch (event) {
1620 case IOCETH_E_DISABLE:
1621 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_disable_wait);
1622 bfa_nw_ioc_disable(&ioceth->ioc);
1623 break;
1624
1625 case IOCETH_E_IOC_RESET:
1626 enable_mbox_intr(ioceth);
1627 bfa_fsm_set_state(ioceth, bna_ioceth_sm_ioc_ready_wait);
1628 break;
1629
1630 case IOCETH_E_IOC_FAILED:
1631 break;
1632
1633 default:
1634 bfa_sm_fault(event);
1635 }
1636}
1637
1638static void
1639bna_bfi_attr_get(struct bna_ioceth *ioceth)
1640{
1641 struct bfi_enet_attr_req *attr_req = &ioceth->attr_req;
1642
1643 bfi_msgq_mhdr_set(attr_req->mh, BFI_MC_ENET,
1644 BFI_ENET_H2I_GET_ATTR_REQ, 0, 0);
1645 attr_req->mh.num_entries = htons(
1646 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_attr_req)));
1647 bfa_msgq_cmd_set(&ioceth->msgq_cmd, NULL, NULL,
1648 sizeof(struct bfi_enet_attr_req), &attr_req->mh);
1649 bfa_msgq_cmd_post(&ioceth->bna->msgq, &ioceth->msgq_cmd);
1650}
1651
1652/* IOC callback functions */
1653
1654static void
1655bna_cb_ioceth_enable(void *arg, enum bfa_status error)
1656{
1657 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1658
1659 if (error)
1660 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1661 else
1662 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_READY);
1663}
1664
1665static void
1666bna_cb_ioceth_disable(void *arg)
1667{
1668 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1669
1670 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_DISABLED);
1671}
1672
1673static void
1674bna_cb_ioceth_hbfail(void *arg)
1675{
1676 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1677
1678 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_FAILED);
1679}
1680
1681static void
1682bna_cb_ioceth_reset(void *arg)
1683{
1684 struct bna_ioceth *ioceth = (struct bna_ioceth *)arg;
1685
1686 bfa_fsm_send_event(ioceth, IOCETH_E_IOC_RESET);
1687}
1688
1689static struct bfa_ioc_cbfn bna_ioceth_cbfn = {
1690 bna_cb_ioceth_enable,
1691 bna_cb_ioceth_disable,
1692 bna_cb_ioceth_hbfail,
1693 bna_cb_ioceth_reset
1694};
1695
1696static void
1697bna_ioceth_init(struct bna_ioceth *ioceth, struct bna *bna,
1698 struct bna_res_info *res_info)
1699{
1700 u64 dma;
1701 u8 *kva;
1702
1703 ioceth->bna = bna;
1704
1705 /**
1706 * Attach IOC and claim:
1707 * 1. DMA memory for IOC attributes
1708 * 2. Kernel memory for FW trace
1709 */
1710 bfa_nw_ioc_attach(&ioceth->ioc, ioceth, &bna_ioceth_cbfn);
1711 bfa_nw_ioc_pci_init(&ioceth->ioc, &bna->pcidev, BFI_PCIFN_CLASS_ETH);
1712
1713 BNA_GET_DMA_ADDR(
1714 &res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].dma, dma);
1715 kva = res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mdl[0].kva;
1716 bfa_nw_ioc_mem_claim(&ioceth->ioc, kva, dma);
1717
1718 kva = res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mdl[0].kva;
1719
1720 /**
1721 * Attach common modules (Diag, SFP, CEE, Port) and claim respective
1722 * DMA memory.
1723 */
1724 BNA_GET_DMA_ADDR(
1725 &res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].dma, dma);
1726 kva = res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mdl[0].kva;
1727 bfa_nw_cee_attach(&bna->cee, &ioceth->ioc, bna);
1728 bfa_nw_cee_mem_claim(&bna->cee, kva, dma);
1729 kva += bfa_nw_cee_meminfo();
1730 dma += bfa_nw_cee_meminfo();
1731
1732 bfa_msgq_attach(&bna->msgq, &ioceth->ioc);
1733 bfa_msgq_memclaim(&bna->msgq, kva, dma);
1734 bfa_msgq_regisr(&bna->msgq, BFI_MC_ENET, bna_msgq_rsp_handler, bna);
1735 kva += bfa_msgq_meminfo();
1736 dma += bfa_msgq_meminfo();
1737
1738 ioceth->stop_cbfn = NULL;
1739 ioceth->stop_cbarg = NULL;
1740
1741 bfa_fsm_set_state(ioceth, bna_ioceth_sm_stopped);
1742}
1743
1744static void
1745bna_ioceth_uninit(struct bna_ioceth *ioceth)
1746{
1747 bfa_nw_ioc_detach(&ioceth->ioc);
1748
1749 ioceth->bna = NULL;
1750}
1751
1752void
1753bna_ioceth_enable(struct bna_ioceth *ioceth)
1754{
1755 if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_ready) {
1756 bnad_cb_ioceth_ready(ioceth->bna->bnad);
1757 return;
1758 }
1759
1760 if (ioceth->fsm == (bfa_fsm_t)bna_ioceth_sm_stopped)
1761 bfa_fsm_send_event(ioceth, IOCETH_E_ENABLE);
1762}
1763
1764void
1765bna_ioceth_disable(struct bna_ioceth *ioceth, enum bna_cleanup_type type)
1766{
1767 if (type == BNA_SOFT_CLEANUP) {
1768 bnad_cb_ioceth_disabled(ioceth->bna->bnad);
1769 return;
1770 }
1771
1772 ioceth->stop_cbfn = bnad_cb_ioceth_disabled;
1773 ioceth->stop_cbarg = ioceth->bna->bnad;
1774
1775 bfa_fsm_send_event(ioceth, IOCETH_E_DISABLE);
1776}
1777
1778static void
1779bna_ucam_mod_init(struct bna_ucam_mod *ucam_mod, struct bna *bna,
1780 struct bna_res_info *res_info)
1781{
1782 int i;
1783
1784 ucam_mod->ucmac = (struct bna_mac *)
1785 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1786
1787 INIT_LIST_HEAD(&ucam_mod->free_q);
1788 for (i = 0; i < bna->ioceth.attr.num_ucmac; i++) {
1789 bfa_q_qe_init(&ucam_mod->ucmac[i].qe);
1790 list_add_tail(&ucam_mod->ucmac[i].qe, &ucam_mod->free_q);
1791 }
1792
1793 ucam_mod->bna = bna;
1794}
1795
1796static void
1797bna_ucam_mod_uninit(struct bna_ucam_mod *ucam_mod)
1798{
1799 struct list_head *qe;
1800 int i = 0;
1801
1802 list_for_each(qe, &ucam_mod->free_q)
1803 i++;
1804
1805 ucam_mod->bna = NULL;
1806}
1807
1808static void
1809bna_mcam_mod_init(struct bna_mcam_mod *mcam_mod, struct bna *bna,
1810 struct bna_res_info *res_info)
1811{
1812 int i;
1813
1814 mcam_mod->mcmac = (struct bna_mac *)
1815 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mdl[0].kva;
1816
1817 INIT_LIST_HEAD(&mcam_mod->free_q);
1818 for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1819 bfa_q_qe_init(&mcam_mod->mcmac[i].qe);
1820 list_add_tail(&mcam_mod->mcmac[i].qe, &mcam_mod->free_q);
1821 }
1822
1823 mcam_mod->mchandle = (struct bna_mcam_handle *)
1824 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mdl[0].kva;
1825
1826 INIT_LIST_HEAD(&mcam_mod->free_handle_q);
1827 for (i = 0; i < bna->ioceth.attr.num_mcmac; i++) {
1828 bfa_q_qe_init(&mcam_mod->mchandle[i].qe);
1829 list_add_tail(&mcam_mod->mchandle[i].qe,
1830 &mcam_mod->free_handle_q);
1831 }
1832
1833 mcam_mod->bna = bna;
1834}
1835
1836static void
1837bna_mcam_mod_uninit(struct bna_mcam_mod *mcam_mod)
1838{
1839 struct list_head *qe;
1840 int i;
1841
1842 i = 0;
1843 list_for_each(qe, &mcam_mod->free_q) i++;
1844
1845 i = 0;
1846 list_for_each(qe, &mcam_mod->free_handle_q) i++;
1847
1848 mcam_mod->bna = NULL;
1849}
1850
1851static void
1852bna_bfi_stats_get(struct bna *bna)
1853{
1854 struct bfi_enet_stats_req *stats_req = &bna->stats_mod.stats_get;
1855
1856 bna->stats_mod.stats_get_busy = true;
1857
1858 bfi_msgq_mhdr_set(stats_req->mh, BFI_MC_ENET,
1859 BFI_ENET_H2I_STATS_GET_REQ, 0, 0);
1860 stats_req->mh.num_entries = htons(
1861 bfi_msgq_num_cmd_entries(sizeof(struct bfi_enet_stats_req)));
1862 stats_req->stats_mask = htons(BFI_ENET_STATS_ALL);
1863 stats_req->tx_enet_mask = htonl(bna->tx_mod.rid_mask);
1864 stats_req->rx_enet_mask = htonl(bna->rx_mod.rid_mask);
1865 stats_req->host_buffer.a32.addr_hi = bna->stats.hw_stats_dma.msb;
1866 stats_req->host_buffer.a32.addr_lo = bna->stats.hw_stats_dma.lsb;
1867
1868 bfa_msgq_cmd_set(&bna->stats_mod.stats_get_cmd, NULL, NULL,
1869 sizeof(struct bfi_enet_stats_req), &stats_req->mh);
1870 bfa_msgq_cmd_post(&bna->msgq, &bna->stats_mod.stats_get_cmd);
1871}
1872
1873void
1874bna_res_req(struct bna_res_info *res_info)
1875{
1876 /* DMA memory for COMMON_MODULE */
1877 res_info[BNA_RES_MEM_T_COM].res_type = BNA_RES_T_MEM;
1878 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1879 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.num = 1;
1880 res_info[BNA_RES_MEM_T_COM].res_u.mem_info.len = ALIGN(
1881 (bfa_nw_cee_meminfo() +
1882 bfa_msgq_meminfo()), PAGE_SIZE);
1883
1884 /* DMA memory for retrieving IOC attributes */
1885 res_info[BNA_RES_MEM_T_ATTR].res_type = BNA_RES_T_MEM;
1886 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1887 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.num = 1;
1888 res_info[BNA_RES_MEM_T_ATTR].res_u.mem_info.len =
1889 ALIGN(bfa_nw_ioc_meminfo(), PAGE_SIZE);
1890
1891 /* Virtual memory for retreiving fw_trc */
1892 res_info[BNA_RES_MEM_T_FWTRC].res_type = BNA_RES_T_MEM;
1893 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.mem_type = BNA_MEM_T_KVA;
1894 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.num = 0;
1895 res_info[BNA_RES_MEM_T_FWTRC].res_u.mem_info.len = 0;
1896
1897 /* DMA memory for retreiving stats */
1898 res_info[BNA_RES_MEM_T_STATS].res_type = BNA_RES_T_MEM;
1899 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mem_type = BNA_MEM_T_DMA;
1900 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.num = 1;
1901 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.len =
1902 ALIGN(sizeof(struct bfi_enet_stats),
1903 PAGE_SIZE);
1904}
1905
1906void
1907bna_mod_res_req(struct bna *bna, struct bna_res_info *res_info)
1908{
1909 struct bna_attr *attr = &bna->ioceth.attr;
1910
1911 /* Virtual memory for Tx objects - stored by Tx module */
1912 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_type = BNA_RES_T_MEM;
1913 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.mem_type =
1914 BNA_MEM_T_KVA;
1915 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.num = 1;
1916 res_info[BNA_MOD_RES_MEM_T_TX_ARRAY].res_u.mem_info.len =
1917 attr->num_txq * sizeof(struct bna_tx);
1918
1919 /* Virtual memory for TxQ - stored by Tx module */
1920 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_type = BNA_RES_T_MEM;
1921 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.mem_type =
1922 BNA_MEM_T_KVA;
1923 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.num = 1;
1924 res_info[BNA_MOD_RES_MEM_T_TXQ_ARRAY].res_u.mem_info.len =
1925 attr->num_txq * sizeof(struct bna_txq);
1926
1927 /* Virtual memory for Rx objects - stored by Rx module */
1928 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_type = BNA_RES_T_MEM;
1929 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.mem_type =
1930 BNA_MEM_T_KVA;
1931 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.num = 1;
1932 res_info[BNA_MOD_RES_MEM_T_RX_ARRAY].res_u.mem_info.len =
1933 attr->num_rxp * sizeof(struct bna_rx);
1934
1935 /* Virtual memory for RxPath - stored by Rx module */
1936 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_type = BNA_RES_T_MEM;
1937 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.mem_type =
1938 BNA_MEM_T_KVA;
1939 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.num = 1;
1940 res_info[BNA_MOD_RES_MEM_T_RXP_ARRAY].res_u.mem_info.len =
1941 attr->num_rxp * sizeof(struct bna_rxp);
1942
1943 /* Virtual memory for RxQ - stored by Rx module */
1944 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_type = BNA_RES_T_MEM;
1945 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.mem_type =
1946 BNA_MEM_T_KVA;
1947 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.num = 1;
1948 res_info[BNA_MOD_RES_MEM_T_RXQ_ARRAY].res_u.mem_info.len =
1949 (attr->num_rxp * 2) * sizeof(struct bna_rxq);
1950
1951 /* Virtual memory for Unicast MAC address - stored by ucam module */
1952 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1953 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.mem_type =
1954 BNA_MEM_T_KVA;
1955 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.num = 1;
1956 res_info[BNA_MOD_RES_MEM_T_UCMAC_ARRAY].res_u.mem_info.len =
1957 attr->num_ucmac * sizeof(struct bna_mac);
1958
1959 /* Virtual memory for Multicast MAC address - stored by mcam module */
1960 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_type = BNA_RES_T_MEM;
1961 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.mem_type =
1962 BNA_MEM_T_KVA;
1963 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.num = 1;
1964 res_info[BNA_MOD_RES_MEM_T_MCMAC_ARRAY].res_u.mem_info.len =
1965 attr->num_mcmac * sizeof(struct bna_mac);
1966
1967 /* Virtual memory for Multicast handle - stored by mcam module */
1968 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_type = BNA_RES_T_MEM;
1969 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.mem_type =
1970 BNA_MEM_T_KVA;
1971 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.num = 1;
1972 res_info[BNA_MOD_RES_MEM_T_MCHANDLE_ARRAY].res_u.mem_info.len =
1973 attr->num_mcmac * sizeof(struct bna_mcam_handle);
1974}
1975
1976void
1977bna_init(struct bna *bna, struct bnad *bnad,
1978 struct bfa_pcidev *pcidev, struct bna_res_info *res_info)
1979{
1980 bna->bnad = bnad;
1981 bna->pcidev = *pcidev;
1982
1983 bna->stats.hw_stats_kva = (struct bfi_enet_stats *)
1984 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].kva;
1985 bna->stats.hw_stats_dma.msb =
1986 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.msb;
1987 bna->stats.hw_stats_dma.lsb =
1988 res_info[BNA_RES_MEM_T_STATS].res_u.mem_info.mdl[0].dma.lsb;
1989
1990 bna_reg_addr_init(bna, &bna->pcidev);
1991
1992 /* Also initializes diag, cee, sfp, phy_port, msgq */
1993 bna_ioceth_init(&bna->ioceth, bna, res_info);
1994
1995 bna_enet_init(&bna->enet, bna);
1996 bna_ethport_init(&bna->ethport, bna);
1997}
1998
1999void
2000bna_mod_init(struct bna *bna, struct bna_res_info *res_info)
2001{
2002 bna_tx_mod_init(&bna->tx_mod, bna, res_info);
2003
2004 bna_rx_mod_init(&bna->rx_mod, bna, res_info);
2005
2006 bna_ucam_mod_init(&bna->ucam_mod, bna, res_info);
2007
2008 bna_mcam_mod_init(&bna->mcam_mod, bna, res_info);
2009
2010 bna->default_mode_rid = BFI_INVALID_RID;
2011 bna->promisc_rid = BFI_INVALID_RID;
2012
2013 bna->mod_flags |= BNA_MOD_F_INIT_DONE;
2014}
2015
2016void
2017bna_uninit(struct bna *bna)
2018{
2019 if (bna->mod_flags & BNA_MOD_F_INIT_DONE) {
2020 bna_mcam_mod_uninit(&bna->mcam_mod);
2021 bna_ucam_mod_uninit(&bna->ucam_mod);
2022 bna_rx_mod_uninit(&bna->rx_mod);
2023 bna_tx_mod_uninit(&bna->tx_mod);
2024 bna->mod_flags &= ~BNA_MOD_F_INIT_DONE;
2025 }
2026
2027 bna_stats_mod_uninit(&bna->stats_mod);
2028 bna_ethport_uninit(&bna->ethport);
2029 bna_enet_uninit(&bna->enet);
2030
2031 bna_ioceth_uninit(&bna->ioceth);
2032
2033 bna->bnad = NULL;
2034}
2035
2036int
2037bna_num_txq_set(struct bna *bna, int num_txq)
2038{
2039 if (num_txq > 0 && (num_txq <= bna->ioceth.attr.num_txq)) {
2040 bna->ioceth.attr.num_txq = num_txq;
2041 return BNA_CB_SUCCESS;
2042 }
2043
2044 return BNA_CB_FAIL;
2045}
2046
2047int
2048bna_num_rxp_set(struct bna *bna, int num_rxp)
2049{
2050 if (num_rxp > 0 && (num_rxp <= bna->ioceth.attr.num_rxp)) {
2051 bna->ioceth.attr.num_rxp = num_rxp;
2052 return BNA_CB_SUCCESS;
2053 }
2054
2055 return BNA_CB_FAIL;
2056}
2057
2058struct bna_mac *
2059bna_ucam_mod_mac_get(struct bna_ucam_mod *ucam_mod)
2060{
2061 struct list_head *qe;
2062
2063 if (list_empty(&ucam_mod->free_q))
2064 return NULL;
2065
2066 bfa_q_deq(&ucam_mod->free_q, &qe);
2067
2068 return (struct bna_mac *)qe;
2069}
2070
2071void
2072bna_ucam_mod_mac_put(struct bna_ucam_mod *ucam_mod, struct bna_mac *mac)
2073{
2074 list_add_tail(&mac->qe, &ucam_mod->free_q);
2075}
2076
2077struct bna_mac *
2078bna_mcam_mod_mac_get(struct bna_mcam_mod *mcam_mod)
2079{
2080 struct list_head *qe;
2081
2082 if (list_empty(&mcam_mod->free_q))
2083 return NULL;
2084
2085 bfa_q_deq(&mcam_mod->free_q, &qe);
2086
2087 return (struct bna_mac *)qe;
2088}
2089
2090void
2091bna_mcam_mod_mac_put(struct bna_mcam_mod *mcam_mod, struct bna_mac *mac)
2092{
2093 list_add_tail(&mac->qe, &mcam_mod->free_q);
2094}
2095
2096struct bna_mcam_handle *
2097bna_mcam_mod_handle_get(struct bna_mcam_mod *mcam_mod)
2098{
2099 struct list_head *qe;
2100
2101 if (list_empty(&mcam_mod->free_handle_q))
2102 return NULL;
2103
2104 bfa_q_deq(&mcam_mod->free_handle_q, &qe);
2105
2106 return (struct bna_mcam_handle *)qe;
2107}
2108
2109void
2110bna_mcam_mod_handle_put(struct bna_mcam_mod *mcam_mod,
2111 struct bna_mcam_handle *handle)
2112{
2113 list_add_tail(&handle->qe, &mcam_mod->free_handle_q);
2114}
2115
2116void
2117bna_hw_stats_get(struct bna *bna)
2118{
2119 if (!bna->stats_mod.ioc_ready) {
2120 bnad_cb_stats_get(bna->bnad, BNA_CB_FAIL, &bna->stats);
2121 return;
2122 }
2123 if (bna->stats_mod.stats_get_busy) {
2124 bnad_cb_stats_get(bna->bnad, BNA_CB_BUSY, &bna->stats);
2125 return;
2126 }
2127
2128 bna_bfi_stats_get(bna);
2129}