blob: aef648b55dfc3d706e0347cb53f7647696942d8f [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
2 * Copyright (c) 2005-2009 Brocade Communications Systems, Inc.
3 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
18#include <bfa.h>
19#include <bfa_svc.h>
20#include <bfi/bfi_pport.h>
21#include <cs/bfa_debug.h>
22#include <aen/bfa_aen.h>
23#include <cs/bfa_plog.h>
24#include <aen/bfa_aen_port.h>
25
26BFA_TRC_FILE(HAL, PPORT);
27BFA_MODULE(pport);
28
29#define bfa_pport_callback(__pport, __event) do { \
30 if ((__pport)->bfa->fcs) { \
31 (__pport)->event_cbfn((__pport)->event_cbarg, (__event)); \
32 } else { \
33 (__pport)->hcb_event = (__event); \
34 bfa_cb_queue((__pport)->bfa, &(__pport)->hcb_qe, \
35 __bfa_cb_port_event, (__pport)); \
36 } \
37} while (0)
38
39/*
40 * The port is considered disabled if corresponding physical port or IOC are
41 * disabled explicitly
42 */
43#define BFA_PORT_IS_DISABLED(bfa) \
44 ((bfa_pport_is_disabled(bfa) == BFA_TRUE) || \
45 (bfa_ioc_is_disabled(&bfa->ioc) == BFA_TRUE))
46
47/*
48 * forward declarations
49 */
50static bfa_boolean_t bfa_pport_send_enable(struct bfa_pport_s *port);
51static bfa_boolean_t bfa_pport_send_disable(struct bfa_pport_s *port);
52static void bfa_pport_update_linkinfo(struct bfa_pport_s *pport);
53static void bfa_pport_reset_linkinfo(struct bfa_pport_s *pport);
54static void bfa_pport_set_wwns(struct bfa_pport_s *port);
55static void __bfa_cb_port_event(void *cbarg, bfa_boolean_t complete);
56static void __bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete);
57static void __bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete);
58static void bfa_port_stats_timeout(void *cbarg);
59static void bfa_port_stats_clr_timeout(void *cbarg);
60
61/**
62 * bfa_pport_private
63 */
64
65/**
66 * BFA port state machine events
67 */
68enum bfa_pport_sm_event {
69 BFA_PPORT_SM_START = 1, /* start port state machine */
70 BFA_PPORT_SM_STOP = 2, /* stop port state machine */
71 BFA_PPORT_SM_ENABLE = 3, /* enable port */
72 BFA_PPORT_SM_DISABLE = 4, /* disable port state machine */
73 BFA_PPORT_SM_FWRSP = 5, /* firmware enable/disable rsp */
74 BFA_PPORT_SM_LINKUP = 6, /* firmware linkup event */
75 BFA_PPORT_SM_LINKDOWN = 7, /* firmware linkup down */
76 BFA_PPORT_SM_QRESUME = 8, /* CQ space available */
77 BFA_PPORT_SM_HWFAIL = 9, /* IOC h/w failure */
78};
79
80static void bfa_pport_sm_uninit(struct bfa_pport_s *pport,
81 enum bfa_pport_sm_event event);
82static void bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport,
83 enum bfa_pport_sm_event event);
84static void bfa_pport_sm_enabling(struct bfa_pport_s *pport,
85 enum bfa_pport_sm_event event);
86static void bfa_pport_sm_linkdown(struct bfa_pport_s *pport,
87 enum bfa_pport_sm_event event);
88static void bfa_pport_sm_linkup(struct bfa_pport_s *pport,
89 enum bfa_pport_sm_event event);
90static void bfa_pport_sm_disabling(struct bfa_pport_s *pport,
91 enum bfa_pport_sm_event event);
92static void bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport,
93 enum bfa_pport_sm_event event);
94static void bfa_pport_sm_disabled(struct bfa_pport_s *pport,
95 enum bfa_pport_sm_event event);
96static void bfa_pport_sm_stopped(struct bfa_pport_s *pport,
97 enum bfa_pport_sm_event event);
98static void bfa_pport_sm_iocdown(struct bfa_pport_s *pport,
99 enum bfa_pport_sm_event event);
100static void bfa_pport_sm_iocfail(struct bfa_pport_s *pport,
101 enum bfa_pport_sm_event event);
102
103static struct bfa_sm_table_s hal_pport_sm_table[] = {
104 {BFA_SM(bfa_pport_sm_uninit), BFA_PPORT_ST_UNINIT},
105 {BFA_SM(bfa_pport_sm_enabling_qwait), BFA_PPORT_ST_ENABLING_QWAIT},
106 {BFA_SM(bfa_pport_sm_enabling), BFA_PPORT_ST_ENABLING},
107 {BFA_SM(bfa_pport_sm_linkdown), BFA_PPORT_ST_LINKDOWN},
108 {BFA_SM(bfa_pport_sm_linkup), BFA_PPORT_ST_LINKUP},
109 {BFA_SM(bfa_pport_sm_disabling_qwait),
110 BFA_PPORT_ST_DISABLING_QWAIT},
111 {BFA_SM(bfa_pport_sm_disabling), BFA_PPORT_ST_DISABLING},
112 {BFA_SM(bfa_pport_sm_disabled), BFA_PPORT_ST_DISABLED},
113 {BFA_SM(bfa_pport_sm_stopped), BFA_PPORT_ST_STOPPED},
114 {BFA_SM(bfa_pport_sm_iocdown), BFA_PPORT_ST_IOCDOWN},
115 {BFA_SM(bfa_pport_sm_iocfail), BFA_PPORT_ST_IOCDOWN},
116};
117
118static void
119bfa_pport_aen_post(struct bfa_pport_s *pport, enum bfa_port_aen_event event)
120{
121 union bfa_aen_data_u aen_data;
122 struct bfa_log_mod_s *logmod = pport->bfa->logm;
123 wwn_t pwwn = pport->pwwn;
124 char pwwn_ptr[BFA_STRING_32];
125 struct bfa_ioc_attr_s ioc_attr;
126
127 wwn2str(pwwn_ptr, pwwn);
128 switch (event) {
129 case BFA_PORT_AEN_ONLINE:
130 bfa_log(logmod, BFA_AEN_PORT_ONLINE, pwwn_ptr);
131 break;
132 case BFA_PORT_AEN_OFFLINE:
133 bfa_log(logmod, BFA_AEN_PORT_OFFLINE, pwwn_ptr);
134 break;
135 case BFA_PORT_AEN_ENABLE:
136 bfa_log(logmod, BFA_AEN_PORT_ENABLE, pwwn_ptr);
137 break;
138 case BFA_PORT_AEN_DISABLE:
139 bfa_log(logmod, BFA_AEN_PORT_DISABLE, pwwn_ptr);
140 break;
141 case BFA_PORT_AEN_DISCONNECT:
142 bfa_log(logmod, BFA_AEN_PORT_DISCONNECT, pwwn_ptr);
143 break;
144 case BFA_PORT_AEN_QOS_NEG:
145 bfa_log(logmod, BFA_AEN_PORT_QOS_NEG, pwwn_ptr);
146 break;
147 default:
148 break;
149 }
150
151 bfa_ioc_get_attr(&pport->bfa->ioc, &ioc_attr);
152 aen_data.port.ioc_type = ioc_attr.ioc_type;
153 aen_data.port.pwwn = pwwn;
154}
155
156static void
157bfa_pport_sm_uninit(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
158{
159 bfa_trc(pport->bfa, event);
160
161 switch (event) {
162 case BFA_PPORT_SM_START:
163 /**
164 * Start event after IOC is configured and BFA is started.
165 */
166 if (bfa_pport_send_enable(pport))
167 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
168 else
169 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
170 break;
171
172 case BFA_PPORT_SM_ENABLE:
173 /**
174 * Port is persistently configured to be in enabled state. Do
175 * not change state. Port enabling is done when START event is
176 * received.
177 */
178 break;
179
180 case BFA_PPORT_SM_DISABLE:
181 /**
182 * If a port is persistently configured to be disabled, the
183 * first event will a port disable request.
184 */
185 bfa_sm_set_state(pport, bfa_pport_sm_disabled);
186 break;
187
188 case BFA_PPORT_SM_HWFAIL:
189 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
190 break;
191
192 default:
193 bfa_sm_fault(pport->bfa, event);
194 }
195}
196
197static void
198bfa_pport_sm_enabling_qwait(struct bfa_pport_s *pport,
199 enum bfa_pport_sm_event event)
200{
201 bfa_trc(pport->bfa, event);
202
203 switch (event) {
204 case BFA_PPORT_SM_QRESUME:
205 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
206 bfa_pport_send_enable(pport);
207 break;
208
209 case BFA_PPORT_SM_STOP:
210 bfa_reqq_wcancel(&pport->reqq_wait);
211 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
212 break;
213
214 case BFA_PPORT_SM_ENABLE:
215 /**
216 * Already enable is in progress.
217 */
218 break;
219
220 case BFA_PPORT_SM_DISABLE:
221 /**
222 * Just send disable request to firmware when room becomes
223 * available in request queue.
224 */
225 bfa_sm_set_state(pport, bfa_pport_sm_disabled);
226 bfa_reqq_wcancel(&pport->reqq_wait);
227 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
228 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
229 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
230 break;
231
232 case BFA_PPORT_SM_LINKUP:
233 case BFA_PPORT_SM_LINKDOWN:
234 /**
235 * Possible to get link events when doing back-to-back
236 * enable/disables.
237 */
238 break;
239
240 case BFA_PPORT_SM_HWFAIL:
241 bfa_reqq_wcancel(&pport->reqq_wait);
242 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
243 break;
244
245 default:
246 bfa_sm_fault(pport->bfa, event);
247 }
248}
249
250static void
251bfa_pport_sm_enabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
252{
253 bfa_trc(pport->bfa, event);
254
255 switch (event) {
256 case BFA_PPORT_SM_FWRSP:
257 case BFA_PPORT_SM_LINKDOWN:
258 bfa_sm_set_state(pport, bfa_pport_sm_linkdown);
259 break;
260
261 case BFA_PPORT_SM_LINKUP:
262 bfa_pport_update_linkinfo(pport);
263 bfa_sm_set_state(pport, bfa_pport_sm_linkup);
264
265 bfa_assert(pport->event_cbfn);
266 bfa_pport_callback(pport, BFA_PPORT_LINKUP);
267 break;
268
269 case BFA_PPORT_SM_ENABLE:
270 /**
271 * Already being enabled.
272 */
273 break;
274
275 case BFA_PPORT_SM_DISABLE:
276 if (bfa_pport_send_disable(pport))
277 bfa_sm_set_state(pport, bfa_pport_sm_disabling);
278 else
279 bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
280
281 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
282 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
283 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
284 break;
285
286 case BFA_PPORT_SM_STOP:
287 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
288 break;
289
290 case BFA_PPORT_SM_HWFAIL:
291 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
292 break;
293
294 default:
295 bfa_sm_fault(pport->bfa, event);
296 }
297}
298
299static void
300bfa_pport_sm_linkdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
301{
302 bfa_trc(pport->bfa, event);
303
304 switch (event) {
305 case BFA_PPORT_SM_LINKUP:
306 bfa_pport_update_linkinfo(pport);
307 bfa_sm_set_state(pport, bfa_pport_sm_linkup);
308 bfa_assert(pport->event_cbfn);
309 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
310 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkup");
311 bfa_pport_callback(pport, BFA_PPORT_LINKUP);
312 bfa_pport_aen_post(pport, BFA_PORT_AEN_ONLINE);
313 /**
314 * If QoS is enabled and it is not online,
315 * Send a separate event.
316 */
317 if ((pport->cfg.qos_enabled)
318 && (bfa_os_ntohl(pport->qos_attr.state) != BFA_QOS_ONLINE))
319 bfa_pport_aen_post(pport, BFA_PORT_AEN_QOS_NEG);
320
321 break;
322
323 case BFA_PPORT_SM_LINKDOWN:
324 /**
325 * Possible to get link down event.
326 */
327 break;
328
329 case BFA_PPORT_SM_ENABLE:
330 /**
331 * Already enabled.
332 */
333 break;
334
335 case BFA_PPORT_SM_DISABLE:
336 if (bfa_pport_send_disable(pport))
337 bfa_sm_set_state(pport, bfa_pport_sm_disabling);
338 else
339 bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
340
341 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
342 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
343 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
344 break;
345
346 case BFA_PPORT_SM_STOP:
347 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
348 break;
349
350 case BFA_PPORT_SM_HWFAIL:
351 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
352 break;
353
354 default:
355 bfa_sm_fault(pport->bfa, event);
356 }
357}
358
359static void
360bfa_pport_sm_linkup(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
361{
362 bfa_trc(pport->bfa, event);
363
364 switch (event) {
365 case BFA_PPORT_SM_ENABLE:
366 /**
367 * Already enabled.
368 */
369 break;
370
371 case BFA_PPORT_SM_DISABLE:
372 if (bfa_pport_send_disable(pport))
373 bfa_sm_set_state(pport, bfa_pport_sm_disabling);
374 else
375 bfa_sm_set_state(pport, bfa_pport_sm_disabling_qwait);
376
377 bfa_pport_reset_linkinfo(pport);
378 bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
379 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
380 BFA_PL_EID_PORT_DISABLE, 0, "Port Disable");
381 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
382 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISABLE);
383 break;
384
385 case BFA_PPORT_SM_LINKDOWN:
386 bfa_sm_set_state(pport, bfa_pport_sm_linkdown);
387 bfa_pport_reset_linkinfo(pport);
388 bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
389 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
390 BFA_PL_EID_PORT_ST_CHANGE, 0, "Port Linkdown");
Jing Huangf8ceafd2009-09-25 12:29:54 -0700391 if (BFA_PORT_IS_DISABLED(pport->bfa))
Jing Huang7725ccf2009-09-23 17:46:15 -0700392 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
Jing Huangf8ceafd2009-09-25 12:29:54 -0700393 else
Jing Huang7725ccf2009-09-23 17:46:15 -0700394 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
Jing Huang7725ccf2009-09-23 17:46:15 -0700395 break;
396
397 case BFA_PPORT_SM_STOP:
398 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
399 bfa_pport_reset_linkinfo(pport);
Jing Huangf8ceafd2009-09-25 12:29:54 -0700400 if (BFA_PORT_IS_DISABLED(pport->bfa))
Jing Huang7725ccf2009-09-23 17:46:15 -0700401 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
Jing Huangf8ceafd2009-09-25 12:29:54 -0700402 else
Jing Huang7725ccf2009-09-23 17:46:15 -0700403 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
Jing Huang7725ccf2009-09-23 17:46:15 -0700404 break;
405
406 case BFA_PPORT_SM_HWFAIL:
407 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
408 bfa_pport_reset_linkinfo(pport);
409 bfa_pport_callback(pport, BFA_PPORT_LINKDOWN);
Jing Huangf8ceafd2009-09-25 12:29:54 -0700410 if (BFA_PORT_IS_DISABLED(pport->bfa))
Jing Huang7725ccf2009-09-23 17:46:15 -0700411 bfa_pport_aen_post(pport, BFA_PORT_AEN_OFFLINE);
Jing Huangf8ceafd2009-09-25 12:29:54 -0700412 else
Jing Huang7725ccf2009-09-23 17:46:15 -0700413 bfa_pport_aen_post(pport, BFA_PORT_AEN_DISCONNECT);
Jing Huang7725ccf2009-09-23 17:46:15 -0700414 break;
415
416 default:
417 bfa_sm_fault(pport->bfa, event);
418 }
419}
420
421static void
422bfa_pport_sm_disabling_qwait(struct bfa_pport_s *pport,
423 enum bfa_pport_sm_event event)
424{
425 bfa_trc(pport->bfa, event);
426
427 switch (event) {
428 case BFA_PPORT_SM_QRESUME:
429 bfa_sm_set_state(pport, bfa_pport_sm_disabling);
430 bfa_pport_send_disable(pport);
431 break;
432
433 case BFA_PPORT_SM_STOP:
434 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
435 bfa_reqq_wcancel(&pport->reqq_wait);
436 break;
437
438 case BFA_PPORT_SM_DISABLE:
439 /**
440 * Already being disabled.
441 */
442 break;
443
444 case BFA_PPORT_SM_LINKUP:
445 case BFA_PPORT_SM_LINKDOWN:
446 /**
447 * Possible to get link events when doing back-to-back
448 * enable/disables.
449 */
450 break;
451
452 case BFA_PPORT_SM_HWFAIL:
453 bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
454 bfa_reqq_wcancel(&pport->reqq_wait);
455 break;
456
457 default:
458 bfa_sm_fault(pport->bfa, event);
459 }
460}
461
462static void
463bfa_pport_sm_disabling(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
464{
465 bfa_trc(pport->bfa, event);
466
467 switch (event) {
468 case BFA_PPORT_SM_FWRSP:
469 bfa_sm_set_state(pport, bfa_pport_sm_disabled);
470 break;
471
472 case BFA_PPORT_SM_DISABLE:
473 /**
474 * Already being disabled.
475 */
476 break;
477
478 case BFA_PPORT_SM_ENABLE:
479 if (bfa_pport_send_enable(pport))
480 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
481 else
482 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
483
484 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
485 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
486 bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE);
487 break;
488
489 case BFA_PPORT_SM_STOP:
490 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
491 break;
492
493 case BFA_PPORT_SM_LINKUP:
494 case BFA_PPORT_SM_LINKDOWN:
495 /**
496 * Possible to get link events when doing back-to-back
497 * enable/disables.
498 */
499 break;
500
501 case BFA_PPORT_SM_HWFAIL:
502 bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
503 break;
504
505 default:
506 bfa_sm_fault(pport->bfa, event);
507 }
508}
509
510static void
511bfa_pport_sm_disabled(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
512{
513 bfa_trc(pport->bfa, event);
514
515 switch (event) {
516 case BFA_PPORT_SM_START:
517 /**
518 * Ignore start event for a port that is disabled.
519 */
520 break;
521
522 case BFA_PPORT_SM_STOP:
523 bfa_sm_set_state(pport, bfa_pport_sm_stopped);
524 break;
525
526 case BFA_PPORT_SM_ENABLE:
527 if (bfa_pport_send_enable(pport))
528 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
529 else
530 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
531
532 bfa_plog_str(pport->bfa->plog, BFA_PL_MID_HAL,
533 BFA_PL_EID_PORT_ENABLE, 0, "Port Enable");
534 bfa_pport_aen_post(pport, BFA_PORT_AEN_ENABLE);
535 break;
536
537 case BFA_PPORT_SM_DISABLE:
538 /**
539 * Already disabled.
540 */
541 break;
542
543 case BFA_PPORT_SM_HWFAIL:
544 bfa_sm_set_state(pport, bfa_pport_sm_iocfail);
545 break;
546
547 default:
548 bfa_sm_fault(pport->bfa, event);
549 }
550}
551
552static void
553bfa_pport_sm_stopped(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
554{
555 bfa_trc(pport->bfa, event);
556
557 switch (event) {
558 case BFA_PPORT_SM_START:
559 if (bfa_pport_send_enable(pport))
560 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
561 else
562 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
563 break;
564
565 default:
566 /**
567 * Ignore all other events.
568 */
569 ;
570 }
571}
572
573/**
574 * Port is enabled. IOC is down/failed.
575 */
576static void
577bfa_pport_sm_iocdown(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
578{
579 bfa_trc(pport->bfa, event);
580
581 switch (event) {
582 case BFA_PPORT_SM_START:
583 if (bfa_pport_send_enable(pport))
584 bfa_sm_set_state(pport, bfa_pport_sm_enabling);
585 else
586 bfa_sm_set_state(pport, bfa_pport_sm_enabling_qwait);
587 break;
588
589 default:
590 /**
591 * Ignore all events.
592 */
593 ;
594 }
595}
596
597/**
598 * Port is disabled. IOC is down/failed.
599 */
600static void
601bfa_pport_sm_iocfail(struct bfa_pport_s *pport, enum bfa_pport_sm_event event)
602{
603 bfa_trc(pport->bfa, event);
604
605 switch (event) {
606 case BFA_PPORT_SM_START:
607 bfa_sm_set_state(pport, bfa_pport_sm_disabled);
608 break;
609
610 case BFA_PPORT_SM_ENABLE:
611 bfa_sm_set_state(pport, bfa_pport_sm_iocdown);
612 break;
613
614 default:
615 /**
616 * Ignore all events.
617 */
618 ;
619 }
620}
621
622
623
624/**
625 * bfa_pport_private
626 */
627
628static void
629__bfa_cb_port_event(void *cbarg, bfa_boolean_t complete)
630{
631 struct bfa_pport_s *pport = cbarg;
632
633 if (complete)
634 pport->event_cbfn(pport->event_cbarg, pport->hcb_event);
635}
636
637#define PPORT_STATS_DMA_SZ (BFA_ROUNDUP(sizeof(union bfa_pport_stats_u), \
638 BFA_CACHELINE_SZ))
639
640static void
641bfa_pport_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *ndm_len,
642 u32 *dm_len)
643{
644 *dm_len += PPORT_STATS_DMA_SZ;
645}
646
647static void
648bfa_pport_qresume(void *cbarg)
649{
650 struct bfa_pport_s *port = cbarg;
651
652 bfa_sm_send_event(port, BFA_PPORT_SM_QRESUME);
653}
654
655static void
656bfa_pport_mem_claim(struct bfa_pport_s *pport, struct bfa_meminfo_s *meminfo)
657{
658 u8 *dm_kva;
659 u64 dm_pa;
660
661 dm_kva = bfa_meminfo_dma_virt(meminfo);
662 dm_pa = bfa_meminfo_dma_phys(meminfo);
663
664 pport->stats_kva = dm_kva;
665 pport->stats_pa = dm_pa;
666 pport->stats = (union bfa_pport_stats_u *)dm_kva;
667
668 dm_kva += PPORT_STATS_DMA_SZ;
669 dm_pa += PPORT_STATS_DMA_SZ;
670
671 bfa_meminfo_dma_virt(meminfo) = dm_kva;
672 bfa_meminfo_dma_phys(meminfo) = dm_pa;
673}
674
675/**
676 * Memory initialization.
677 */
678static void
679bfa_pport_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
680 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
681{
682 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
683 struct bfa_pport_cfg_s *port_cfg = &pport->cfg;
684
685 bfa_os_memset(pport, 0, sizeof(struct bfa_pport_s));
686 pport->bfa = bfa;
687
688 bfa_pport_mem_claim(pport, meminfo);
689
690 bfa_sm_set_state(pport, bfa_pport_sm_uninit);
691
692 /**
693 * initialize and set default configuration
694 */
695 port_cfg->topology = BFA_PPORT_TOPOLOGY_P2P;
696 port_cfg->speed = BFA_PPORT_SPEED_AUTO;
697 port_cfg->trunked = BFA_FALSE;
698 port_cfg->maxfrsize = 0;
699
700 port_cfg->trl_def_speed = BFA_PPORT_SPEED_1GBPS;
701
702 bfa_reqq_winit(&pport->reqq_wait, bfa_pport_qresume, pport);
703}
704
705static void
706bfa_pport_initdone(struct bfa_s *bfa)
707{
708 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
709
710 /**
711 * Initialize port attributes from IOC hardware data.
712 */
713 bfa_pport_set_wwns(pport);
714 if (pport->cfg.maxfrsize == 0)
715 pport->cfg.maxfrsize = bfa_ioc_maxfrsize(&bfa->ioc);
716 pport->cfg.rx_bbcredit = bfa_ioc_rx_bbcredit(&bfa->ioc);
717 pport->speed_sup = bfa_ioc_speed_sup(&bfa->ioc);
718
719 bfa_assert(pport->cfg.maxfrsize);
720 bfa_assert(pport->cfg.rx_bbcredit);
721 bfa_assert(pport->speed_sup);
722}
723
724static void
725bfa_pport_detach(struct bfa_s *bfa)
726{
727}
728
729/**
730 * Called when IOC is ready.
731 */
732static void
733bfa_pport_start(struct bfa_s *bfa)
734{
735 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_START);
736}
737
738/**
739 * Called before IOC is stopped.
740 */
741static void
742bfa_pport_stop(struct bfa_s *bfa)
743{
744 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_STOP);
745}
746
747/**
748 * Called when IOC failure is detected.
749 */
750static void
751bfa_pport_iocdisable(struct bfa_s *bfa)
752{
753 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_HWFAIL);
754}
755
756static void
757bfa_pport_update_linkinfo(struct bfa_pport_s *pport)
758{
759 struct bfi_pport_event_s *pevent = pport->event_arg.i2hmsg.event;
760
761 pport->speed = pevent->link_state.speed;
762 pport->topology = pevent->link_state.topology;
763
764 if (pport->topology == BFA_PPORT_TOPOLOGY_LOOP)
765 pport->myalpa = pevent->link_state.tl.loop_info.myalpa;
766
767 /*
768 * QoS Details
769 */
770 bfa_os_assign(pport->qos_attr, pevent->link_state.qos_attr);
771 bfa_os_assign(pport->qos_vc_attr, pevent->link_state.qos_vc_attr);
772
773 bfa_trc(pport->bfa, pport->speed);
774 bfa_trc(pport->bfa, pport->topology);
775}
776
777static void
778bfa_pport_reset_linkinfo(struct bfa_pport_s *pport)
779{
780 pport->speed = BFA_PPORT_SPEED_UNKNOWN;
781 pport->topology = BFA_PPORT_TOPOLOGY_NONE;
782}
783
784/**
785 * Send port enable message to firmware.
786 */
787static bfa_boolean_t
788bfa_pport_send_enable(struct bfa_pport_s *port)
789{
790 struct bfi_pport_enable_req_s *m;
791
792 /**
793 * Increment message tag before queue check, so that responses to old
794 * requests are discarded.
795 */
796 port->msgtag++;
797
798 /**
799 * check for room in queue to send request now
800 */
801 m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
802 if (!m) {
803 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait);
804 return BFA_FALSE;
805 }
806
807 bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_ENABLE_REQ,
808 bfa_lpuid(port->bfa));
809 m->nwwn = port->nwwn;
810 m->pwwn = port->pwwn;
811 m->port_cfg = port->cfg;
812 m->msgtag = port->msgtag;
813 m->port_cfg.maxfrsize = bfa_os_htons(port->cfg.maxfrsize);
814 bfa_dma_be_addr_set(m->stats_dma_addr, port->stats_pa);
815 bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_lo);
816 bfa_trc(port->bfa, m->stats_dma_addr.a32.addr_hi);
817
818 /**
819 * queue I/O message to firmware
820 */
821 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
822 return BFA_TRUE;
823}
824
825/**
826 * Send port disable message to firmware.
827 */
828static bfa_boolean_t
829bfa_pport_send_disable(struct bfa_pport_s *port)
830{
831 bfi_pport_disable_req_t *m;
832
833 /**
834 * Increment message tag before queue check, so that responses to old
835 * requests are discarded.
836 */
837 port->msgtag++;
838
839 /**
840 * check for room in queue to send request now
841 */
842 m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
843 if (!m) {
844 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->reqq_wait);
845 return BFA_FALSE;
846 }
847
848 bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_DISABLE_REQ,
849 bfa_lpuid(port->bfa));
850 m->msgtag = port->msgtag;
851
852 /**
853 * queue I/O message to firmware
854 */
855 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
856
857 return BFA_TRUE;
858}
859
860static void
861bfa_pport_set_wwns(struct bfa_pport_s *port)
862{
863 port->pwwn = bfa_ioc_get_pwwn(&port->bfa->ioc);
864 port->nwwn = bfa_ioc_get_nwwn(&port->bfa->ioc);
865
866 bfa_trc(port->bfa, port->pwwn);
867 bfa_trc(port->bfa, port->nwwn);
868}
869
870static void
871bfa_port_send_txcredit(void *port_cbarg)
872{
873
874 struct bfa_pport_s *port = port_cbarg;
875 struct bfi_pport_set_svc_params_req_s *m;
876
877 /**
878 * check for room in queue to send request now
879 */
880 m = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
881 if (!m) {
882 bfa_trc(port->bfa, port->cfg.tx_bbcredit);
883 return;
884 }
885
886 bfi_h2i_set(m->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_SET_SVC_PARAMS_REQ,
887 bfa_lpuid(port->bfa));
888 m->tx_bbcredit = bfa_os_htons((u16) port->cfg.tx_bbcredit);
889
890 /**
891 * queue I/O message to firmware
892 */
893 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
894}
895
896
897
898/**
899 * bfa_pport_public
900 */
901
902/**
903 * Firmware message handler.
904 */
905void
906bfa_pport_isr(struct bfa_s *bfa, struct bfi_msg_s *msg)
907{
908 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
909 union bfi_pport_i2h_msg_u i2hmsg;
910
911 i2hmsg.msg = msg;
912 pport->event_arg.i2hmsg = i2hmsg;
913
914 switch (msg->mhdr.msg_id) {
915 case BFI_PPORT_I2H_ENABLE_RSP:
916 if (pport->msgtag == i2hmsg.enable_rsp->msgtag)
917 bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP);
918 break;
919
920 case BFI_PPORT_I2H_DISABLE_RSP:
921 if (pport->msgtag == i2hmsg.enable_rsp->msgtag)
922 bfa_sm_send_event(pport, BFA_PPORT_SM_FWRSP);
923 break;
924
925 case BFI_PPORT_I2H_EVENT:
926 switch (i2hmsg.event->link_state.linkstate) {
927 case BFA_PPORT_LINKUP:
928 bfa_sm_send_event(pport, BFA_PPORT_SM_LINKUP);
929 break;
930 case BFA_PPORT_LINKDOWN:
931 bfa_sm_send_event(pport, BFA_PPORT_SM_LINKDOWN);
932 break;
933 case BFA_PPORT_TRUNK_LINKDOWN:
934 /** todo: event notification */
935 break;
936 }
937 break;
938
939 case BFI_PPORT_I2H_GET_STATS_RSP:
940 case BFI_PPORT_I2H_GET_QOS_STATS_RSP:
941 /*
942 * check for timer pop before processing the rsp
943 */
944 if (pport->stats_busy == BFA_FALSE
945 || pport->stats_status == BFA_STATUS_ETIMER)
946 break;
947
948 bfa_timer_stop(&pport->timer);
949 pport->stats_status = i2hmsg.getstats_rsp->status;
950 bfa_cb_queue(pport->bfa, &pport->hcb_qe, __bfa_cb_port_stats,
951 pport);
952 break;
953 case BFI_PPORT_I2H_CLEAR_STATS_RSP:
954 case BFI_PPORT_I2H_CLEAR_QOS_STATS_RSP:
955 /*
956 * check for timer pop before processing the rsp
957 */
958 if (pport->stats_busy == BFA_FALSE
959 || pport->stats_status == BFA_STATUS_ETIMER)
960 break;
961
962 bfa_timer_stop(&pport->timer);
963 pport->stats_status = BFA_STATUS_OK;
964 bfa_cb_queue(pport->bfa, &pport->hcb_qe,
965 __bfa_cb_port_stats_clr, pport);
966 break;
967
968 default:
969 bfa_assert(0);
970 }
971}
972
973
974
975/**
976 * bfa_pport_api
977 */
978
979/**
980 * Registered callback for port events.
981 */
982void
983bfa_pport_event_register(struct bfa_s *bfa,
984 void (*cbfn) (void *cbarg, bfa_pport_event_t event),
985 void *cbarg)
986{
987 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
988
989 pport->event_cbfn = cbfn;
990 pport->event_cbarg = cbarg;
991}
992
993bfa_status_t
994bfa_pport_enable(struct bfa_s *bfa)
995{
996 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
997
998 if (pport->diag_busy)
Jing Huangf8ceafd2009-09-25 12:29:54 -0700999 return BFA_STATUS_DIAG_BUSY;
Jing Huang7725ccf2009-09-23 17:46:15 -07001000 else if (bfa_sm_cmp_state
1001 (BFA_PORT_MOD(bfa), bfa_pport_sm_disabling_qwait))
Jing Huangf8ceafd2009-09-25 12:29:54 -07001002 return BFA_STATUS_DEVBUSY;
Jing Huang7725ccf2009-09-23 17:46:15 -07001003
1004 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_ENABLE);
1005 return BFA_STATUS_OK;
1006}
1007
1008bfa_status_t
1009bfa_pport_disable(struct bfa_s *bfa)
1010{
1011 bfa_sm_send_event(BFA_PORT_MOD(bfa), BFA_PPORT_SM_DISABLE);
1012 return BFA_STATUS_OK;
1013}
1014
1015/**
1016 * Configure port speed.
1017 */
1018bfa_status_t
1019bfa_pport_cfg_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1020{
1021 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1022
1023 bfa_trc(bfa, speed);
1024
1025 if ((speed != BFA_PPORT_SPEED_AUTO) && (speed > pport->speed_sup)) {
1026 bfa_trc(bfa, pport->speed_sup);
1027 return BFA_STATUS_UNSUPP_SPEED;
1028 }
1029
1030 pport->cfg.speed = speed;
1031
Jing Huangf8ceafd2009-09-25 12:29:54 -07001032 return BFA_STATUS_OK;
Jing Huang7725ccf2009-09-23 17:46:15 -07001033}
1034
1035/**
1036 * Get current speed.
1037 */
1038enum bfa_pport_speed
1039bfa_pport_get_speed(struct bfa_s *bfa)
1040{
1041 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1042
1043 return port->speed;
1044}
1045
1046/**
1047 * Configure port topology.
1048 */
1049bfa_status_t
1050bfa_pport_cfg_topology(struct bfa_s *bfa, enum bfa_pport_topology topology)
1051{
1052 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1053
1054 bfa_trc(bfa, topology);
1055 bfa_trc(bfa, pport->cfg.topology);
1056
1057 switch (topology) {
1058 case BFA_PPORT_TOPOLOGY_P2P:
1059 case BFA_PPORT_TOPOLOGY_LOOP:
1060 case BFA_PPORT_TOPOLOGY_AUTO:
1061 break;
1062
1063 default:
1064 return BFA_STATUS_EINVAL;
1065 }
1066
1067 pport->cfg.topology = topology;
Jing Huangf8ceafd2009-09-25 12:29:54 -07001068 return BFA_STATUS_OK;
Jing Huang7725ccf2009-09-23 17:46:15 -07001069}
1070
1071/**
1072 * Get current topology.
1073 */
1074enum bfa_pport_topology
1075bfa_pport_get_topology(struct bfa_s *bfa)
1076{
1077 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1078
1079 return port->topology;
1080}
1081
1082bfa_status_t
1083bfa_pport_cfg_hardalpa(struct bfa_s *bfa, u8 alpa)
1084{
1085 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1086
1087 bfa_trc(bfa, alpa);
1088 bfa_trc(bfa, pport->cfg.cfg_hardalpa);
1089 bfa_trc(bfa, pport->cfg.hardalpa);
1090
1091 pport->cfg.cfg_hardalpa = BFA_TRUE;
1092 pport->cfg.hardalpa = alpa;
1093
Jing Huangf8ceafd2009-09-25 12:29:54 -07001094 return BFA_STATUS_OK;
Jing Huang7725ccf2009-09-23 17:46:15 -07001095}
1096
1097bfa_status_t
1098bfa_pport_clr_hardalpa(struct bfa_s *bfa)
1099{
1100 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1101
1102 bfa_trc(bfa, pport->cfg.cfg_hardalpa);
1103 bfa_trc(bfa, pport->cfg.hardalpa);
1104
1105 pport->cfg.cfg_hardalpa = BFA_FALSE;
Jing Huangf8ceafd2009-09-25 12:29:54 -07001106 return BFA_STATUS_OK;
Jing Huang7725ccf2009-09-23 17:46:15 -07001107}
1108
1109bfa_boolean_t
1110bfa_pport_get_hardalpa(struct bfa_s *bfa, u8 *alpa)
1111{
1112 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1113
1114 *alpa = port->cfg.hardalpa;
1115 return port->cfg.cfg_hardalpa;
1116}
1117
1118u8
1119bfa_pport_get_myalpa(struct bfa_s *bfa)
1120{
1121 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1122
1123 return port->myalpa;
1124}
1125
1126bfa_status_t
1127bfa_pport_cfg_maxfrsize(struct bfa_s *bfa, u16 maxfrsize)
1128{
1129 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1130
1131 bfa_trc(bfa, maxfrsize);
1132 bfa_trc(bfa, pport->cfg.maxfrsize);
1133
1134 /*
1135 * with in range
1136 */
1137 if ((maxfrsize > FC_MAX_PDUSZ) || (maxfrsize < FC_MIN_PDUSZ))
Jing Huangf8ceafd2009-09-25 12:29:54 -07001138 return BFA_STATUS_INVLD_DFSZ;
Jing Huang7725ccf2009-09-23 17:46:15 -07001139
1140 /*
1141 * power of 2, if not the max frame size of 2112
1142 */
1143 if ((maxfrsize != FC_MAX_PDUSZ) && (maxfrsize & (maxfrsize - 1)))
Jing Huangf8ceafd2009-09-25 12:29:54 -07001144 return BFA_STATUS_INVLD_DFSZ;
Jing Huang7725ccf2009-09-23 17:46:15 -07001145
1146 pport->cfg.maxfrsize = maxfrsize;
Jing Huangf8ceafd2009-09-25 12:29:54 -07001147 return BFA_STATUS_OK;
Jing Huang7725ccf2009-09-23 17:46:15 -07001148}
1149
1150u16
1151bfa_pport_get_maxfrsize(struct bfa_s *bfa)
1152{
1153 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1154
1155 return port->cfg.maxfrsize;
1156}
1157
1158u32
1159bfa_pport_mypid(struct bfa_s *bfa)
1160{
1161 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1162
1163 return port->mypid;
1164}
1165
1166u8
1167bfa_pport_get_rx_bbcredit(struct bfa_s *bfa)
1168{
1169 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1170
1171 return port->cfg.rx_bbcredit;
1172}
1173
1174void
1175bfa_pport_set_tx_bbcredit(struct bfa_s *bfa, u16 tx_bbcredit)
1176{
1177 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1178
1179 port->cfg.tx_bbcredit = (u8) tx_bbcredit;
1180 bfa_port_send_txcredit(port);
1181}
1182
1183/**
1184 * Get port attributes.
1185 */
1186
1187wwn_t
1188bfa_pport_get_wwn(struct bfa_s *bfa, bfa_boolean_t node)
1189{
1190 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1191 if (node)
1192 return pport->nwwn;
1193 else
1194 return pport->pwwn;
1195}
1196
1197void
1198bfa_pport_get_attr(struct bfa_s *bfa, struct bfa_pport_attr_s *attr)
1199{
1200 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1201
1202 bfa_os_memset(attr, 0, sizeof(struct bfa_pport_attr_s));
1203
1204 attr->nwwn = pport->nwwn;
1205 attr->pwwn = pport->pwwn;
1206
1207 bfa_os_memcpy(&attr->pport_cfg, &pport->cfg,
1208 sizeof(struct bfa_pport_cfg_s));
1209 /*
1210 * speed attributes
1211 */
1212 attr->pport_cfg.speed = pport->cfg.speed;
1213 attr->speed_supported = pport->speed_sup;
1214 attr->speed = pport->speed;
1215 attr->cos_supported = FC_CLASS_3;
1216
1217 /*
1218 * topology attributes
1219 */
1220 attr->pport_cfg.topology = pport->cfg.topology;
1221 attr->topology = pport->topology;
1222
1223 /*
1224 * beacon attributes
1225 */
1226 attr->beacon = pport->beacon;
1227 attr->link_e2e_beacon = pport->link_e2e_beacon;
1228 attr->plog_enabled = bfa_plog_get_setting(pport->bfa->plog);
1229
1230 attr->pport_cfg.path_tov = bfa_fcpim_path_tov_get(bfa);
1231 attr->pport_cfg.q_depth = bfa_fcpim_qdepth_get(bfa);
1232 attr->port_state = bfa_sm_to_state(hal_pport_sm_table, pport->sm);
1233 if (bfa_ioc_is_disabled(&pport->bfa->ioc))
1234 attr->port_state = BFA_PPORT_ST_IOCDIS;
1235 else if (bfa_ioc_fw_mismatch(&pport->bfa->ioc))
1236 attr->port_state = BFA_PPORT_ST_FWMISMATCH;
1237}
1238
1239static void
1240bfa_port_stats_query(void *cbarg)
1241{
1242 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1243 bfi_pport_get_stats_req_t *msg;
1244
1245 msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
1246
1247 if (!msg) {
1248 port->stats_qfull = BFA_TRUE;
1249 bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_query,
1250 port);
1251 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
1252 return;
1253 }
1254 port->stats_qfull = BFA_FALSE;
1255
1256 bfa_os_memset(msg, 0, sizeof(bfi_pport_get_stats_req_t));
1257 bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_GET_STATS_REQ,
1258 bfa_lpuid(port->bfa));
1259 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
1260
1261 return;
1262}
1263
1264static void
1265bfa_port_stats_clear(void *cbarg)
1266{
1267 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1268 bfi_pport_clear_stats_req_t *msg;
1269
1270 msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
1271
1272 if (!msg) {
1273 port->stats_qfull = BFA_TRUE;
1274 bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_stats_clear,
1275 port);
1276 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
1277 return;
1278 }
1279 port->stats_qfull = BFA_FALSE;
1280
1281 bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_stats_req_t));
1282 bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_STATS_REQ,
1283 bfa_lpuid(port->bfa));
1284 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
1285 return;
1286}
1287
1288static void
1289bfa_port_qos_stats_clear(void *cbarg)
1290{
1291 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1292 bfi_pport_clear_qos_stats_req_t *msg;
1293
1294 msg = bfa_reqq_next(port->bfa, BFA_REQQ_PORT);
1295
1296 if (!msg) {
1297 port->stats_qfull = BFA_TRUE;
1298 bfa_reqq_winit(&port->stats_reqq_wait, bfa_port_qos_stats_clear,
1299 port);
1300 bfa_reqq_wait(port->bfa, BFA_REQQ_PORT, &port->stats_reqq_wait);
1301 return;
1302 }
1303 port->stats_qfull = BFA_FALSE;
1304
1305 bfa_os_memset(msg, 0, sizeof(bfi_pport_clear_qos_stats_req_t));
1306 bfi_h2i_set(msg->mh, BFI_MC_FC_PORT, BFI_PPORT_H2I_CLEAR_QOS_STATS_REQ,
1307 bfa_lpuid(port->bfa));
1308 bfa_reqq_produce(port->bfa, BFA_REQQ_PORT);
1309 return;
1310}
1311
1312static void
1313bfa_pport_stats_swap(union bfa_pport_stats_u *d, union bfa_pport_stats_u *s)
1314{
1315 u32 *dip = (u32 *) d;
1316 u32 *sip = (u32 *) s;
1317 int i;
1318
1319 /*
1320 * Do 64 bit fields swap first
1321 */
1322 for (i = 0;
1323 i <
1324 ((sizeof(union bfa_pport_stats_u) -
1325 sizeof(struct bfa_qos_stats_s)) / sizeof(u32)); i = i + 2) {
1326#ifdef __BIGENDIAN
1327 dip[i] = bfa_os_ntohl(sip[i]);
1328 dip[i + 1] = bfa_os_ntohl(sip[i + 1]);
1329#else
1330 dip[i] = bfa_os_ntohl(sip[i + 1]);
1331 dip[i + 1] = bfa_os_ntohl(sip[i]);
1332#endif
1333 }
1334
1335 /*
1336 * Now swap the 32 bit fields
1337 */
1338 for (; i < (sizeof(union bfa_pport_stats_u) / sizeof(u32)); ++i)
1339 dip[i] = bfa_os_ntohl(sip[i]);
1340}
1341
1342static void
1343__bfa_cb_port_stats_clr(void *cbarg, bfa_boolean_t complete)
1344{
1345 struct bfa_pport_s *port = cbarg;
1346
1347 if (complete) {
1348 port->stats_cbfn(port->stats_cbarg, port->stats_status);
1349 } else {
1350 port->stats_busy = BFA_FALSE;
1351 port->stats_status = BFA_STATUS_OK;
1352 }
1353}
1354
1355static void
1356bfa_port_stats_clr_timeout(void *cbarg)
1357{
1358 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1359
1360 bfa_trc(port->bfa, port->stats_qfull);
1361
1362 if (port->stats_qfull) {
1363 bfa_reqq_wcancel(&port->stats_reqq_wait);
1364 port->stats_qfull = BFA_FALSE;
1365 }
1366
1367 port->stats_status = BFA_STATUS_ETIMER;
1368 bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats_clr, port);
1369}
1370
1371static void
1372__bfa_cb_port_stats(void *cbarg, bfa_boolean_t complete)
1373{
1374 struct bfa_pport_s *port = cbarg;
1375
1376 if (complete) {
1377 if (port->stats_status == BFA_STATUS_OK)
1378 bfa_pport_stats_swap(port->stats_ret, port->stats);
1379 port->stats_cbfn(port->stats_cbarg, port->stats_status);
1380 } else {
1381 port->stats_busy = BFA_FALSE;
1382 port->stats_status = BFA_STATUS_OK;
1383 }
1384}
1385
1386static void
1387bfa_port_stats_timeout(void *cbarg)
1388{
1389 struct bfa_pport_s *port = (struct bfa_pport_s *)cbarg;
1390
1391 bfa_trc(port->bfa, port->stats_qfull);
1392
1393 if (port->stats_qfull) {
1394 bfa_reqq_wcancel(&port->stats_reqq_wait);
1395 port->stats_qfull = BFA_FALSE;
1396 }
1397
1398 port->stats_status = BFA_STATUS_ETIMER;
1399 bfa_cb_queue(port->bfa, &port->hcb_qe, __bfa_cb_port_stats, port);
1400}
1401
1402#define BFA_PORT_STATS_TOV 1000
1403
1404/**
1405 * Fetch port attributes.
1406 */
1407bfa_status_t
1408bfa_pport_get_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
1409 bfa_cb_pport_t cbfn, void *cbarg)
1410{
1411 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1412
1413 if (port->stats_busy) {
1414 bfa_trc(bfa, port->stats_busy);
Jing Huangf8ceafd2009-09-25 12:29:54 -07001415 return BFA_STATUS_DEVBUSY;
Jing Huang7725ccf2009-09-23 17:46:15 -07001416 }
1417
1418 port->stats_busy = BFA_TRUE;
1419 port->stats_ret = stats;
1420 port->stats_cbfn = cbfn;
1421 port->stats_cbarg = cbarg;
1422
1423 bfa_port_stats_query(port);
1424
1425 bfa_timer_start(bfa, &port->timer, bfa_port_stats_timeout, port,
1426 BFA_PORT_STATS_TOV);
Jing Huangf8ceafd2009-09-25 12:29:54 -07001427 return BFA_STATUS_OK;
Jing Huang7725ccf2009-09-23 17:46:15 -07001428}
1429
1430bfa_status_t
1431bfa_pport_clear_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1432{
1433 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1434
1435 if (port->stats_busy) {
1436 bfa_trc(bfa, port->stats_busy);
Jing Huangf8ceafd2009-09-25 12:29:54 -07001437 return BFA_STATUS_DEVBUSY;
Jing Huang7725ccf2009-09-23 17:46:15 -07001438 }
1439
1440 port->stats_busy = BFA_TRUE;
1441 port->stats_cbfn = cbfn;
1442 port->stats_cbarg = cbarg;
1443
1444 bfa_port_stats_clear(port);
1445
1446 bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
1447 BFA_PORT_STATS_TOV);
Jing Huangf8ceafd2009-09-25 12:29:54 -07001448 return BFA_STATUS_OK;
Jing Huang7725ccf2009-09-23 17:46:15 -07001449}
1450
1451bfa_status_t
1452bfa_pport_trunk_enable(struct bfa_s *bfa, u8 bitmap)
1453{
1454 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1455
1456 bfa_trc(bfa, bitmap);
1457 bfa_trc(bfa, pport->cfg.trunked);
1458 bfa_trc(bfa, pport->cfg.trunk_ports);
1459
1460 if (!bitmap || (bitmap & (bitmap - 1)))
1461 return BFA_STATUS_EINVAL;
1462
1463 pport->cfg.trunked = BFA_TRUE;
1464 pport->cfg.trunk_ports = bitmap;
1465
1466 return BFA_STATUS_OK;
1467}
1468
1469void
1470bfa_pport_qos_get_attr(struct bfa_s *bfa, struct bfa_qos_attr_s *qos_attr)
1471{
1472 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1473
1474 qos_attr->state = bfa_os_ntohl(pport->qos_attr.state);
1475 qos_attr->total_bb_cr = bfa_os_ntohl(pport->qos_attr.total_bb_cr);
1476}
1477
1478void
1479bfa_pport_qos_get_vc_attr(struct bfa_s *bfa,
1480 struct bfa_qos_vc_attr_s *qos_vc_attr)
1481{
1482 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1483 struct bfa_qos_vc_attr_s *bfa_vc_attr = &pport->qos_vc_attr;
1484 u32 i = 0;
1485
1486 qos_vc_attr->total_vc_count = bfa_os_ntohs(bfa_vc_attr->total_vc_count);
1487 qos_vc_attr->shared_credit = bfa_os_ntohs(bfa_vc_attr->shared_credit);
1488 qos_vc_attr->elp_opmode_flags =
1489 bfa_os_ntohl(bfa_vc_attr->elp_opmode_flags);
1490
1491 /*
1492 * Individual VC info
1493 */
1494 while (i < qos_vc_attr->total_vc_count) {
1495 qos_vc_attr->vc_info[i].vc_credit =
1496 bfa_vc_attr->vc_info[i].vc_credit;
1497 qos_vc_attr->vc_info[i].borrow_credit =
1498 bfa_vc_attr->vc_info[i].borrow_credit;
1499 qos_vc_attr->vc_info[i].priority =
1500 bfa_vc_attr->vc_info[i].priority;
1501 ++i;
1502 }
1503}
1504
1505/**
1506 * Fetch QoS Stats.
1507 */
1508bfa_status_t
1509bfa_pport_get_qos_stats(struct bfa_s *bfa, union bfa_pport_stats_u *stats,
1510 bfa_cb_pport_t cbfn, void *cbarg)
1511{
1512 /*
1513 * QoS stats is embedded in port stats
1514 */
Jing Huangf8ceafd2009-09-25 12:29:54 -07001515 return bfa_pport_get_stats(bfa, stats, cbfn, cbarg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001516}
1517
1518bfa_status_t
1519bfa_pport_clear_qos_stats(struct bfa_s *bfa, bfa_cb_pport_t cbfn, void *cbarg)
1520{
1521 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1522
1523 if (port->stats_busy) {
1524 bfa_trc(bfa, port->stats_busy);
Jing Huangf8ceafd2009-09-25 12:29:54 -07001525 return BFA_STATUS_DEVBUSY;
Jing Huang7725ccf2009-09-23 17:46:15 -07001526 }
1527
1528 port->stats_busy = BFA_TRUE;
1529 port->stats_cbfn = cbfn;
1530 port->stats_cbarg = cbarg;
1531
1532 bfa_port_qos_stats_clear(port);
1533
1534 bfa_timer_start(bfa, &port->timer, bfa_port_stats_clr_timeout, port,
1535 BFA_PORT_STATS_TOV);
Jing Huangf8ceafd2009-09-25 12:29:54 -07001536 return BFA_STATUS_OK;
Jing Huang7725ccf2009-09-23 17:46:15 -07001537}
1538
1539/**
1540 * Fetch port attributes.
1541 */
1542bfa_status_t
1543bfa_pport_trunk_disable(struct bfa_s *bfa)
1544{
Jing Huangf8ceafd2009-09-25 12:29:54 -07001545 return BFA_STATUS_OK;
Jing Huang7725ccf2009-09-23 17:46:15 -07001546}
1547
1548bfa_boolean_t
1549bfa_pport_trunk_query(struct bfa_s *bfa, u32 *bitmap)
1550{
1551 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1552
1553 *bitmap = port->cfg.trunk_ports;
1554 return port->cfg.trunked;
1555}
1556
1557bfa_boolean_t
1558bfa_pport_is_disabled(struct bfa_s *bfa)
1559{
1560 struct bfa_pport_s *port = BFA_PORT_MOD(bfa);
1561
Jing Huangf8ceafd2009-09-25 12:29:54 -07001562 return bfa_sm_to_state(hal_pport_sm_table, port->sm) ==
1563 BFA_PPORT_ST_DISABLED;
Jing Huang7725ccf2009-09-23 17:46:15 -07001564
1565}
1566
1567bfa_boolean_t
1568bfa_pport_is_ratelim(struct bfa_s *bfa)
1569{
1570 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1571
Jing Huangf8ceafd2009-09-25 12:29:54 -07001572 return pport->cfg.ratelimit ? BFA_TRUE : BFA_FALSE;
Jing Huang7725ccf2009-09-23 17:46:15 -07001573
1574}
1575
1576void
1577bfa_pport_cfg_qos(struct bfa_s *bfa, bfa_boolean_t on_off)
1578{
1579 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1580
1581 bfa_trc(bfa, on_off);
1582 bfa_trc(bfa, pport->cfg.qos_enabled);
1583
1584 pport->cfg.qos_enabled = on_off;
1585}
1586
1587void
1588bfa_pport_cfg_ratelim(struct bfa_s *bfa, bfa_boolean_t on_off)
1589{
1590 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1591
1592 bfa_trc(bfa, on_off);
1593 bfa_trc(bfa, pport->cfg.ratelimit);
1594
1595 pport->cfg.ratelimit = on_off;
1596 if (pport->cfg.trl_def_speed == BFA_PPORT_SPEED_UNKNOWN)
1597 pport->cfg.trl_def_speed = BFA_PPORT_SPEED_1GBPS;
1598}
1599
1600/**
1601 * Configure default minimum ratelim speed
1602 */
1603bfa_status_t
1604bfa_pport_cfg_ratelim_speed(struct bfa_s *bfa, enum bfa_pport_speed speed)
1605{
1606 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1607
1608 bfa_trc(bfa, speed);
1609
1610 /*
1611 * Auto and speeds greater than the supported speed, are invalid
1612 */
1613 if ((speed == BFA_PPORT_SPEED_AUTO) || (speed > pport->speed_sup)) {
1614 bfa_trc(bfa, pport->speed_sup);
1615 return BFA_STATUS_UNSUPP_SPEED;
1616 }
1617
1618 pport->cfg.trl_def_speed = speed;
1619
Jing Huangf8ceafd2009-09-25 12:29:54 -07001620 return BFA_STATUS_OK;
Jing Huang7725ccf2009-09-23 17:46:15 -07001621}
1622
1623/**
1624 * Get default minimum ratelim speed
1625 */
1626enum bfa_pport_speed
1627bfa_pport_get_ratelim_speed(struct bfa_s *bfa)
1628{
1629 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1630
1631 bfa_trc(bfa, pport->cfg.trl_def_speed);
Jing Huangf8ceafd2009-09-25 12:29:54 -07001632 return pport->cfg.trl_def_speed;
Jing Huang7725ccf2009-09-23 17:46:15 -07001633
1634}
1635
1636void
1637bfa_pport_busy(struct bfa_s *bfa, bfa_boolean_t status)
1638{
1639 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1640
1641 bfa_trc(bfa, status);
1642 bfa_trc(bfa, pport->diag_busy);
1643
1644 pport->diag_busy = status;
1645}
1646
1647void
1648bfa_pport_beacon(struct bfa_s *bfa, bfa_boolean_t beacon,
1649 bfa_boolean_t link_e2e_beacon)
1650{
1651 struct bfa_pport_s *pport = BFA_PORT_MOD(bfa);
1652
1653 bfa_trc(bfa, beacon);
1654 bfa_trc(bfa, link_e2e_beacon);
1655 bfa_trc(bfa, pport->beacon);
1656 bfa_trc(bfa, pport->link_e2e_beacon);
1657
1658 pport->beacon = beacon;
1659 pport->link_e2e_beacon = link_e2e_beacon;
1660}
1661
1662bfa_boolean_t
1663bfa_pport_is_linkup(struct bfa_s *bfa)
1664{
1665 return bfa_sm_cmp_state(BFA_PORT_MOD(bfa), bfa_pport_sm_linkup);
1666}
1667
1668