blob: 7aaf1027688ccc75f3b8ec44d0f1037e07141cdf [file] [log] [blame]
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301/*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/delay.h>
36#include <linux/jiffies.h>
37#include <linux/string.h>
38#include <scsi/scsi_device.h>
39#include <scsi/scsi_transport_fc.h>
40
41#include "csio_hw.h"
42#include "csio_lnode.h"
43#include "csio_rnode.h"
44#include "csio_mb.h"
45#include "csio_wr.h"
46
47#define csio_mb_is_host_owner(__owner) ((__owner) == CSIO_MBOWNER_PL)
48
49/* MB Command/Response Helpers */
50/*
51 * csio_mb_fw_retval - FW return value from a mailbox response.
52 * @mbp: Mailbox structure
53 *
54 */
55enum fw_retval
56csio_mb_fw_retval(struct csio_mb *mbp)
57{
58 struct fw_cmd_hdr *hdr;
59
60 hdr = (struct fw_cmd_hdr *)(mbp->mb);
61
62 return FW_CMD_RETVAL_GET(ntohl(hdr->lo));
63}
64
65/*
66 * csio_mb_hello - FW HELLO command helper
67 * @hw: The HW structure
68 * @mbp: Mailbox structure
69 * @m_mbox: Master mailbox number, if any.
70 * @a_mbox: Mailbox number for asycn notifications.
71 * @master: Device mastership.
72 * @cbfn: Callback, if any.
73 *
74 */
75void
76csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
77 uint32_t m_mbox, uint32_t a_mbox, enum csio_dev_master master,
78 void (*cbfn) (struct csio_hw *, struct csio_mb *))
79{
80 struct fw_hello_cmd *cmdp = (struct fw_hello_cmd *)(mbp->mb);
81
82 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
83
84 cmdp->op_to_write = htonl(FW_CMD_OP(FW_HELLO_CMD) |
85 FW_CMD_REQUEST | FW_CMD_WRITE);
86 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
87 cmdp->err_to_clearinit = htonl(
88 FW_HELLO_CMD_MASTERDIS(master == CSIO_MASTER_CANT) |
89 FW_HELLO_CMD_MASTERFORCE(master == CSIO_MASTER_MUST) |
90 FW_HELLO_CMD_MBMASTER(master == CSIO_MASTER_MUST ?
91 m_mbox : FW_HELLO_CMD_MBMASTER_MASK) |
92 FW_HELLO_CMD_MBASYNCNOT(a_mbox) |
93 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
94 FW_HELLO_CMD_CLEARINIT);
95
96}
97
98/*
99 * csio_mb_process_hello_rsp - FW HELLO response processing helper
100 * @hw: The HW structure
101 * @mbp: Mailbox structure
102 * @retval: Mailbox return value from Firmware
103 * @state: State that the function is in.
104 * @mpfn: Master pfn
105 *
106 */
107void
108csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp,
109 enum fw_retval *retval, enum csio_dev_state *state,
110 uint8_t *mpfn)
111{
112 struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb);
113 uint32_t value;
114
115 *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
116
117 if (*retval == FW_SUCCESS) {
118 hw->fwrev = ntohl(rsp->fwrev);
119
120 value = ntohl(rsp->err_to_clearinit);
121 *mpfn = FW_HELLO_CMD_MBMASTER_GET(value);
122
123 if (value & FW_HELLO_CMD_INIT)
124 *state = CSIO_DEV_STATE_INIT;
125 else if (value & FW_HELLO_CMD_ERR)
126 *state = CSIO_DEV_STATE_ERR;
127 else
128 *state = CSIO_DEV_STATE_UNINIT;
129 }
130}
131
132/*
133 * csio_mb_bye - FW BYE command helper
134 * @hw: The HW structure
135 * @mbp: Mailbox structure
136 * @cbfn: Callback, if any.
137 *
138 */
139void
140csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
141 void (*cbfn) (struct csio_hw *, struct csio_mb *))
142{
143 struct fw_bye_cmd *cmdp = (struct fw_bye_cmd *)(mbp->mb);
144
145 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
146
147 cmdp->op_to_write = htonl(FW_CMD_OP(FW_BYE_CMD) |
148 FW_CMD_REQUEST | FW_CMD_WRITE);
149 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
150
151}
152
153/*
154 * csio_mb_reset - FW RESET command helper
155 * @hw: The HW structure
156 * @mbp: Mailbox structure
157 * @reset: Type of reset.
158 * @cbfn: Callback, if any.
159 *
160 */
161void
162csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
163 int reset, int halt,
164 void (*cbfn) (struct csio_hw *, struct csio_mb *))
165{
166 struct fw_reset_cmd *cmdp = (struct fw_reset_cmd *)(mbp->mb);
167
168 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
169
170 cmdp->op_to_write = htonl(FW_CMD_OP(FW_RESET_CMD) |
171 FW_CMD_REQUEST | FW_CMD_WRITE);
172 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
173 cmdp->val = htonl(reset);
174 cmdp->halt_pkd = htonl(halt);
175
176}
177
178/*
179 * csio_mb_params - FW PARAMS command helper
180 * @hw: The HW structure
181 * @mbp: Mailbox structure
182 * @tmo: Command timeout.
183 * @pf: PF number.
184 * @vf: VF number.
185 * @nparams: Number of paramters
186 * @params: Parameter mnemonic array.
187 * @val: Parameter value array.
188 * @wr: Write/Read PARAMS.
189 * @cbfn: Callback, if any.
190 *
191 */
192void
193csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
194 unsigned int pf, unsigned int vf, unsigned int nparams,
195 const u32 *params, u32 *val, bool wr,
196 void (*cbfn)(struct csio_hw *, struct csio_mb *))
197{
198 uint32_t i;
199 uint32_t temp_params = 0, temp_val = 0;
200 struct fw_params_cmd *cmdp = (struct fw_params_cmd *)(mbp->mb);
201 __be32 *p = &cmdp->param[0].mnem;
202
203 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
204
205 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) |
206 FW_CMD_REQUEST |
207 (wr ? FW_CMD_WRITE : FW_CMD_READ) |
208 FW_PARAMS_CMD_PFN(pf) |
209 FW_PARAMS_CMD_VFN(vf));
210 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
211
212 /* Write Params */
213 if (wr) {
214 while (nparams--) {
215 temp_params = *params++;
216 temp_val = *val++;
217
218 *p++ = htonl(temp_params);
219 *p++ = htonl(temp_val);
220 }
221 } else {
222 for (i = 0; i < nparams; i++, p += 2) {
223 temp_params = *params++;
224 *p = htonl(temp_params);
225 }
226 }
227
228}
229
230/*
231 * csio_mb_process_read_params_rsp - FW PARAMS response processing helper
232 * @hw: The HW structure
233 * @mbp: Mailbox structure
234 * @retval: Mailbox return value from Firmware
235 * @nparams: Number of parameters
236 * @val: Parameter value array.
237 *
238 */
239void
240csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp,
241 enum fw_retval *retval, unsigned int nparams,
242 u32 *val)
243{
244 struct fw_params_cmd *rsp = (struct fw_params_cmd *)(mbp->mb);
245 uint32_t i;
246 __be32 *p = &rsp->param[0].val;
247
248 *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
249
250 if (*retval == FW_SUCCESS)
251 for (i = 0; i < nparams; i++, p += 2)
252 *val++ = ntohl(*p);
253}
254
255/*
256 * csio_mb_ldst - FW LDST command
257 * @hw: The HW structure
258 * @mbp: Mailbox structure
259 * @tmo: timeout
260 * @reg: register
261 *
262 */
263void
264csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg)
265{
266 struct fw_ldst_cmd *ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
267 CSIO_INIT_MBP(mbp, ldst_cmd, tmo, hw, NULL, 1);
268
269 /*
270 * Construct and send the Firmware LDST Command to retrieve the
271 * specified PCI-E Configuration Space register.
272 */
273 ldst_cmd->op_to_addrspace =
274 htonl(FW_CMD_OP(FW_LDST_CMD) |
275 FW_CMD_REQUEST |
276 FW_CMD_READ |
277 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
278 ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd));
279 ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
280 ldst_cmd->u.pcie.ctrl_to_fn =
281 (FW_LDST_CMD_LC | FW_LDST_CMD_FN(hw->pfn));
282 ldst_cmd->u.pcie.r = (uint8_t)reg;
283}
284
285/*
286 *
287 * csio_mb_caps_config - FW Read/Write Capabilities command helper
288 * @hw: The HW structure
289 * @mbp: Mailbox structure
290 * @wr: Write if 1, Read if 0
291 * @init: Turn on initiator mode.
292 * @tgt: Turn on target mode.
293 * @cofld: If 1, Control Offload for FCoE
294 * @cbfn: Callback, if any.
295 *
296 * This helper assumes that cmdp has MB payload from a previous CAPS
297 * read command.
298 */
299void
300csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
301 bool wr, bool init, bool tgt, bool cofld,
302 void (*cbfn) (struct csio_hw *, struct csio_mb *))
303{
304 struct fw_caps_config_cmd *cmdp =
305 (struct fw_caps_config_cmd *)(mbp->mb);
306
307 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1);
308
309 cmdp->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
310 FW_CMD_REQUEST |
311 (wr ? FW_CMD_WRITE : FW_CMD_READ));
312 cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
313
314 /* Read config */
315 if (!wr)
316 return;
317
318 /* Write config */
319 cmdp->fcoecaps = 0;
320
321 if (cofld)
322 cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_CTRL_OFLD);
323 if (init)
324 cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_INITIATOR);
325 if (tgt)
326 cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET);
327}
328
329void
330csio_rss_glb_config(struct csio_hw *hw, struct csio_mb *mbp,
331 uint32_t tmo, uint8_t mode, unsigned int flags,
332 void (*cbfn)(struct csio_hw *, struct csio_mb *))
333{
334 struct fw_rss_glb_config_cmd *cmdp =
335 (struct fw_rss_glb_config_cmd *)(mbp->mb);
336
337 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
338
339 cmdp->op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
340 FW_CMD_REQUEST | FW_CMD_WRITE);
341 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
342
343 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
344 cmdp->u.manual.mode_pkd =
345 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
346 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
347 cmdp->u.basicvirtual.mode_pkd =
348 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
349 cmdp->u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
350 }
351}
352
353
354/*
355 * csio_mb_pfvf - FW Write PF/VF capabilities command helper.
356 * @hw: The HW structure
357 * @mbp: Mailbox structure
358 * @pf:
359 * @vf:
360 * @txq:
361 * @txq_eht_ctrl:
362 * @rxqi:
363 * @rxq:
364 * @tc:
365 * @vi:
366 * @pmask:
367 * @rcaps:
368 * @wxcaps:
369 * @cbfn: Callback, if any.
370 *
371 */
372void
373csio_mb_pfvf(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
374 unsigned int pf, unsigned int vf, unsigned int txq,
375 unsigned int txq_eth_ctrl, unsigned int rxqi,
376 unsigned int rxq, unsigned int tc, unsigned int vi,
377 unsigned int cmask, unsigned int pmask, unsigned int nexactf,
378 unsigned int rcaps, unsigned int wxcaps,
379 void (*cbfn) (struct csio_hw *, struct csio_mb *))
380{
381 struct fw_pfvf_cmd *cmdp = (struct fw_pfvf_cmd *)(mbp->mb);
382
383 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
384
385 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) |
386 FW_CMD_REQUEST |
387 FW_CMD_WRITE |
388 FW_PFVF_CMD_PFN(pf) |
389 FW_PFVF_CMD_VFN(vf));
390 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
391 cmdp->niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
392 FW_PFVF_CMD_NIQ(rxq));
393
394 cmdp->type_to_neq = htonl(FW_PFVF_CMD_TYPE |
395 FW_PFVF_CMD_CMASK(cmask) |
396 FW_PFVF_CMD_PMASK(pmask) |
397 FW_PFVF_CMD_NEQ(txq));
398 cmdp->tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) |
399 FW_PFVF_CMD_NVI(vi) |
400 FW_PFVF_CMD_NEXACTF(nexactf));
401 cmdp->r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
402 FW_PFVF_CMD_WX_CAPS(wxcaps) |
403 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
404}
405
406#define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
407 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
408
409/*
410 * csio_mb_port- FW PORT command helper
411 * @hw: The HW structure
412 * @mbp: Mailbox structure
413 * @tmo: COmmand timeout
414 * @portid: Port ID to get/set info
415 * @wr: Write/Read PORT information.
416 * @fc: Flow control
417 * @caps: Port capabilites to set.
418 * @cbfn: Callback, if any.
419 *
420 */
421void
422csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
423 uint8_t portid, bool wr, uint32_t fc, uint16_t caps,
424 void (*cbfn) (struct csio_hw *, struct csio_mb *))
425{
426 struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb);
427 unsigned int lfc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
428
429 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
430
431 cmdp->op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
432 FW_CMD_REQUEST |
433 (wr ? FW_CMD_EXEC : FW_CMD_READ) |
434 FW_PORT_CMD_PORTID(portid));
435 if (!wr) {
436 cmdp->action_to_len16 = htonl(
437 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
438 FW_CMD_LEN16(sizeof(*cmdp) / 16));
439 return;
440 }
441
442 /* Set port */
443 cmdp->action_to_len16 = htonl(
444 FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
445 FW_CMD_LEN16(sizeof(*cmdp) / 16));
446
447 if (fc & PAUSE_RX)
448 lfc |= FW_PORT_CAP_FC_RX;
449 if (fc & PAUSE_TX)
450 lfc |= FW_PORT_CAP_FC_TX;
451
452 if (!(caps & FW_PORT_CAP_ANEG))
453 cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) | lfc);
454 else
455 cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) |
456 lfc | mdi);
457}
458
459/*
460 * csio_mb_process_read_port_rsp - FW PORT command response processing helper
461 * @hw: The HW structure
462 * @mbp: Mailbox structure
463 * @retval: Mailbox return value from Firmware
464 * @caps: port capabilities
465 *
466 */
467void
468csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
469 enum fw_retval *retval, uint16_t *caps)
470{
471 struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb);
472
473 *retval = FW_CMD_RETVAL_GET(ntohl(rsp->action_to_len16));
474
475 if (*retval == FW_SUCCESS)
476 *caps = ntohs(rsp->u.info.pcap);
477}
478
479/*
480 * csio_mb_initialize - FW INITIALIZE command helper
481 * @hw: The HW structure
482 * @mbp: Mailbox structure
483 * @tmo: COmmand timeout
484 * @cbfn: Callback, if any.
485 *
486 */
487void
488csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
489 void (*cbfn) (struct csio_hw *, struct csio_mb *))
490{
491 struct fw_initialize_cmd *cmdp = (struct fw_initialize_cmd *)(mbp->mb);
492
493 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
494
495 cmdp->op_to_write = htonl(FW_CMD_OP(FW_INITIALIZE_CMD) |
496 FW_CMD_REQUEST | FW_CMD_WRITE);
497 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
498
499}
500
501/*
502 * csio_mb_iq_alloc - Initializes the mailbox to allocate an
503 * Ingress DMA queue in the firmware.
504 *
505 * @hw: The hw structure
506 * @mbp: Mailbox structure to initialize
507 * @priv: Private object
508 * @mb_tmo: Mailbox time-out period (in ms).
509 * @iq_params: Ingress queue params needed for allocation.
510 * @cbfn: The call-back function
511 *
512 *
513 */
514static void
515csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
516 uint32_t mb_tmo, struct csio_iq_params *iq_params,
517 void (*cbfn) (struct csio_hw *, struct csio_mb *))
518{
519 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
520
521 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
522
523 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) |
524 FW_CMD_REQUEST | FW_CMD_EXEC |
525 FW_IQ_CMD_PFN(iq_params->pfn) |
526 FW_IQ_CMD_VFN(iq_params->vfn));
527
528 cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC |
529 FW_CMD_LEN16(sizeof(*cmdp) / 16));
530
531 cmdp->type_to_iqandstindex = htonl(
532 FW_IQ_CMD_VIID(iq_params->viid) |
533 FW_IQ_CMD_TYPE(iq_params->type) |
534 FW_IQ_CMD_IQASYNCH(iq_params->iqasynch));
535
536 cmdp->fl0size = htons(iq_params->fl0size);
537 cmdp->fl0size = htons(iq_params->fl1size);
538
539} /* csio_mb_iq_alloc */
540
541/*
542 * csio_mb_iq_write - Initializes the mailbox for writing into an
543 * Ingress DMA Queue.
544 *
545 * @hw: The HW structure
546 * @mbp: Mailbox structure to initialize
547 * @priv: Private object
548 * @mb_tmo: Mailbox time-out period (in ms).
549 * @cascaded_req: TRUE - if this request is cascased with iq-alloc request.
550 * @iq_params: Ingress queue params needed for writing.
551 * @cbfn: The call-back function
552 *
553 * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
554 * because this IQ write request can be cascaded with a previous
555 * IQ alloc request, and we dont want to over-write the bits set by
556 * that request. This logic will work even in a non-cascaded case, since the
557 * cmdp structure is zeroed out by CSIO_INIT_MBP.
558 */
559static void
560csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
561 uint32_t mb_tmo, bool cascaded_req,
562 struct csio_iq_params *iq_params,
563 void (*cbfn) (struct csio_hw *, struct csio_mb *))
564{
565 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
566
567 uint32_t iq_start_stop = (iq_params->iq_start) ?
568 FW_IQ_CMD_IQSTART(1) :
569 FW_IQ_CMD_IQSTOP(1);
570
571 /*
572 * If this IQ write is cascaded with IQ alloc request, do not
573 * re-initialize with 0's.
574 *
575 */
576 if (!cascaded_req)
577 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
578
579 cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_IQ_CMD) |
580 FW_CMD_REQUEST | FW_CMD_WRITE |
581 FW_IQ_CMD_PFN(iq_params->pfn) |
582 FW_IQ_CMD_VFN(iq_params->vfn));
583 cmdp->alloc_to_len16 |= htonl(iq_start_stop |
584 FW_CMD_LEN16(sizeof(*cmdp) / 16));
585 cmdp->iqid |= htons(iq_params->iqid);
586 cmdp->fl0id |= htons(iq_params->fl0id);
587 cmdp->fl1id |= htons(iq_params->fl1id);
588 cmdp->type_to_iqandstindex |= htonl(
589 FW_IQ_CMD_IQANDST(iq_params->iqandst) |
590 FW_IQ_CMD_IQANUS(iq_params->iqanus) |
591 FW_IQ_CMD_IQANUD(iq_params->iqanud) |
592 FW_IQ_CMD_IQANDSTINDEX(iq_params->iqandstindex));
593 cmdp->iqdroprss_to_iqesize |= htons(
594 FW_IQ_CMD_IQPCIECH(iq_params->iqpciech) |
595 FW_IQ_CMD_IQDCAEN(iq_params->iqdcaen) |
596 FW_IQ_CMD_IQDCACPU(iq_params->iqdcacpu) |
597 FW_IQ_CMD_IQINTCNTTHRESH(iq_params->iqintcntthresh) |
598 FW_IQ_CMD_IQCPRIO(iq_params->iqcprio) |
599 FW_IQ_CMD_IQESIZE(iq_params->iqesize));
600
601 cmdp->iqsize |= htons(iq_params->iqsize);
602 cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr);
603
604 if (iq_params->type == 0) {
605 cmdp->iqns_to_fl0congen |= htonl(
606 FW_IQ_CMD_IQFLINTIQHSEN(iq_params->iqflintiqhsen)|
607 FW_IQ_CMD_IQFLINTCONGEN(iq_params->iqflintcongen));
608 }
609
610 if (iq_params->fl0size && iq_params->fl0addr &&
611 (iq_params->fl0id != 0xFFFF)) {
612
613 cmdp->iqns_to_fl0congen |= htonl(
614 FW_IQ_CMD_FL0HOSTFCMODE(iq_params->fl0hostfcmode)|
615 FW_IQ_CMD_FL0CPRIO(iq_params->fl0cprio) |
616 FW_IQ_CMD_FL0PADEN(iq_params->fl0paden) |
617 FW_IQ_CMD_FL0PACKEN(iq_params->fl0packen));
618 cmdp->fl0dcaen_to_fl0cidxfthresh |= htons(
619 FW_IQ_CMD_FL0DCAEN(iq_params->fl0dcaen) |
620 FW_IQ_CMD_FL0DCACPU(iq_params->fl0dcacpu) |
621 FW_IQ_CMD_FL0FBMIN(iq_params->fl0fbmin) |
622 FW_IQ_CMD_FL0FBMAX(iq_params->fl0fbmax) |
623 FW_IQ_CMD_FL0CIDXFTHRESH(iq_params->fl0cidxfthresh));
624 cmdp->fl0size |= htons(iq_params->fl0size);
625 cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr);
626 }
627} /* csio_mb_iq_write */
628
629/*
630 * csio_mb_iq_alloc_write - Initializes the mailbox for allocating an
631 * Ingress DMA Queue.
632 *
633 * @hw: The HW structure
634 * @mbp: Mailbox structure to initialize
635 * @priv: Private data.
636 * @mb_tmo: Mailbox time-out period (in ms).
637 * @iq_params: Ingress queue params needed for allocation & writing.
638 * @cbfn: The call-back function
639 *
640 *
641 */
642void
643csio_mb_iq_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
644 uint32_t mb_tmo, struct csio_iq_params *iq_params,
645 void (*cbfn) (struct csio_hw *, struct csio_mb *))
646{
647 csio_mb_iq_alloc(hw, mbp, priv, mb_tmo, iq_params, cbfn);
648 csio_mb_iq_write(hw, mbp, priv, mb_tmo, true, iq_params, cbfn);
649} /* csio_mb_iq_alloc_write */
650
651/*
652 * csio_mb_iq_alloc_write_rsp - Process the allocation & writing
653 * of ingress DMA queue mailbox's response.
654 *
655 * @hw: The HW structure.
656 * @mbp: Mailbox structure to initialize.
657 * @retval: Firmware return value.
658 * @iq_params: Ingress queue parameters, after allocation and write.
659 *
660 */
661void
662csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp,
663 enum fw_retval *ret_val,
664 struct csio_iq_params *iq_params)
665{
666 struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb);
667
668 *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
669 if (*ret_val == FW_SUCCESS) {
670 iq_params->physiqid = ntohs(rsp->physiqid);
671 iq_params->iqid = ntohs(rsp->iqid);
672 iq_params->fl0id = ntohs(rsp->fl0id);
673 iq_params->fl1id = ntohs(rsp->fl1id);
674 } else {
675 iq_params->physiqid = iq_params->iqid =
676 iq_params->fl0id = iq_params->fl1id = 0;
677 }
678} /* csio_mb_iq_alloc_write_rsp */
679
680/*
681 * csio_mb_iq_free - Initializes the mailbox for freeing a
682 * specified Ingress DMA Queue.
683 *
684 * @hw: The HW structure
685 * @mbp: Mailbox structure to initialize
686 * @priv: Private data
687 * @mb_tmo: Mailbox time-out period (in ms).
688 * @iq_params: Parameters of ingress queue, that is to be freed.
689 * @cbfn: The call-back function
690 *
691 *
692 */
693void
694csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
695 uint32_t mb_tmo, struct csio_iq_params *iq_params,
696 void (*cbfn) (struct csio_hw *, struct csio_mb *))
697{
698 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
699
700 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
701
702 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) |
703 FW_CMD_REQUEST | FW_CMD_EXEC |
704 FW_IQ_CMD_PFN(iq_params->pfn) |
705 FW_IQ_CMD_VFN(iq_params->vfn));
706 cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE |
707 FW_CMD_LEN16(sizeof(*cmdp) / 16));
708 cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iq_params->type));
709
710 cmdp->iqid = htons(iq_params->iqid);
711 cmdp->fl0id = htons(iq_params->fl0id);
712 cmdp->fl1id = htons(iq_params->fl1id);
713
714} /* csio_mb_iq_free */
715
716/*
717 * csio_mb_eq_ofld_alloc - Initializes the mailbox for allocating
718 * an offload-egress queue.
719 *
720 * @hw: The HW structure
721 * @mbp: Mailbox structure to initialize
722 * @priv: Private data
723 * @mb_tmo: Mailbox time-out period (in ms).
724 * @eq_ofld_params: (Offload) Egress queue paramters.
725 * @cbfn: The call-back function
726 *
727 *
728 */
729static void
730csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
731 uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
732 void (*cbfn) (struct csio_hw *, struct csio_mb *))
733{
734 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
735
736 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
737 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
738 FW_CMD_REQUEST | FW_CMD_EXEC |
739 FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
740 FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
741 cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
742 FW_CMD_LEN16(sizeof(*cmdp) / 16));
743
744} /* csio_mb_eq_ofld_alloc */
745
746/*
747 * csio_mb_eq_ofld_write - Initializes the mailbox for writing
748 * an alloacted offload-egress queue.
749 *
750 * @hw: The HW structure
751 * @mbp: Mailbox structure to initialize
752 * @priv: Private data
753 * @mb_tmo: Mailbox time-out period (in ms).
754 * @cascaded_req: TRUE - if this request is cascased with Eq-alloc request.
755 * @eq_ofld_params: (Offload) Egress queue paramters.
756 * @cbfn: The call-back function
757 *
758 *
759 * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
760 * because this EQ write request can be cascaded with a previous
761 * EQ alloc request, and we dont want to over-write the bits set by
762 * that request. This logic will work even in a non-cascaded case, since the
763 * cmdp structure is zeroed out by CSIO_INIT_MBP.
764 */
765static void
766csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
767 uint32_t mb_tmo, bool cascaded_req,
768 struct csio_eq_params *eq_ofld_params,
769 void (*cbfn) (struct csio_hw *, struct csio_mb *))
770{
771 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
772
773 uint32_t eq_start_stop = (eq_ofld_params->eqstart) ?
774 FW_EQ_OFLD_CMD_EQSTART : FW_EQ_OFLD_CMD_EQSTOP;
775
776 /*
777 * If this EQ write is cascaded with EQ alloc request, do not
778 * re-initialize with 0's.
779 *
780 */
781 if (!cascaded_req)
782 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
783
784 cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
785 FW_CMD_REQUEST | FW_CMD_WRITE |
786 FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
787 FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
788 cmdp->alloc_to_len16 |= htonl(eq_start_stop |
789 FW_CMD_LEN16(sizeof(*cmdp) / 16));
790
791 cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));
792
793 cmdp->fetchszm_to_iqid |= htonl(
794 FW_EQ_OFLD_CMD_HOSTFCMODE(eq_ofld_params->hostfcmode) |
795 FW_EQ_OFLD_CMD_CPRIO(eq_ofld_params->cprio) |
796 FW_EQ_OFLD_CMD_PCIECHN(eq_ofld_params->pciechn) |
797 FW_EQ_OFLD_CMD_IQID(eq_ofld_params->iqid));
798
799 cmdp->dcaen_to_eqsize |= htonl(
800 FW_EQ_OFLD_CMD_DCAEN(eq_ofld_params->dcaen) |
801 FW_EQ_OFLD_CMD_DCACPU(eq_ofld_params->dcacpu) |
802 FW_EQ_OFLD_CMD_FBMIN(eq_ofld_params->fbmin) |
803 FW_EQ_OFLD_CMD_FBMAX(eq_ofld_params->fbmax) |
804 FW_EQ_OFLD_CMD_CIDXFTHRESHO(eq_ofld_params->cidxfthresho) |
805 FW_EQ_OFLD_CMD_CIDXFTHRESH(eq_ofld_params->cidxfthresh) |
806 FW_EQ_OFLD_CMD_EQSIZE(eq_ofld_params->eqsize));
807
808 cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr);
809
810} /* csio_mb_eq_ofld_write */
811
812/*
813 * csio_mb_eq_ofld_alloc_write - Initializes the mailbox for allocation
814 * writing into an Engress DMA Queue.
815 *
816 * @hw: The HW structure
817 * @mbp: Mailbox structure to initialize
818 * @priv: Private data.
819 * @mb_tmo: Mailbox time-out period (in ms).
820 * @eq_ofld_params: (Offload) Egress queue paramters.
821 * @cbfn: The call-back function
822 *
823 *
824 */
825void
826csio_mb_eq_ofld_alloc_write(struct csio_hw *hw, struct csio_mb *mbp,
827 void *priv, uint32_t mb_tmo,
828 struct csio_eq_params *eq_ofld_params,
829 void (*cbfn) (struct csio_hw *, struct csio_mb *))
830{
831 csio_mb_eq_ofld_alloc(hw, mbp, priv, mb_tmo, eq_ofld_params, cbfn);
832 csio_mb_eq_ofld_write(hw, mbp, priv, mb_tmo, true,
833 eq_ofld_params, cbfn);
834} /* csio_mb_eq_ofld_alloc_write */
835
836/*
837 * csio_mb_eq_ofld_alloc_write_rsp - Process the allocation
838 * & write egress DMA queue mailbox's response.
839 *
840 * @hw: The HW structure.
841 * @mbp: Mailbox structure to initialize.
842 * @retval: Firmware return value.
843 * @eq_ofld_params: (Offload) Egress queue paramters.
844 *
845 */
846void
847csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw,
848 struct csio_mb *mbp, enum fw_retval *ret_val,
849 struct csio_eq_params *eq_ofld_params)
850{
851 struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb);
852
853 *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
854
855 if (*ret_val == FW_SUCCESS) {
856 eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_GET(
857 ntohl(rsp->eqid_pkd));
858 eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_GET(
859 ntohl(rsp->physeqid_pkd));
860 } else
861 eq_ofld_params->eqid = 0;
862
863} /* csio_mb_eq_ofld_alloc_write_rsp */
864
865/*
866 * csio_mb_eq_ofld_free - Initializes the mailbox for freeing a
867 * specified Engress DMA Queue.
868 *
869 * @hw: The HW structure
870 * @mbp: Mailbox structure to initialize
871 * @priv: Private data area.
872 * @mb_tmo: Mailbox time-out period (in ms).
873 * @eq_ofld_params: (Offload) Egress queue paramters, that is to be freed.
874 * @cbfn: The call-back function
875 *
876 *
877 */
878void
879csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
880 uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
881 void (*cbfn) (struct csio_hw *, struct csio_mb *))
882{
883 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
884
885 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
886
887 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
888 FW_CMD_REQUEST | FW_CMD_EXEC |
889 FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
890 FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
891 cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE |
892 FW_CMD_LEN16(sizeof(*cmdp) / 16));
893 cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));
894
895} /* csio_mb_eq_ofld_free */
896
897/*
898 * csio_write_fcoe_link_cond_init_mb - Initialize Mailbox to write FCoE link
899 * condition.
900 *
901 * @ln: The Lnode structure
902 * @mbp: Mailbox structure to initialize
903 * @mb_tmo: Mailbox time-out period (in ms).
904 * @cbfn: The call back function.
905 *
906 *
907 */
908void
909csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
910 uint32_t mb_tmo, uint8_t port_id, uint32_t sub_opcode,
911 uint8_t cos, bool link_status, uint32_t fcfi,
912 void (*cbfn) (struct csio_hw *, struct csio_mb *))
913{
914 struct fw_fcoe_link_cmd *cmdp =
915 (struct fw_fcoe_link_cmd *)(mbp->mb);
916
917 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
918
919 cmdp->op_to_portid = htonl((
920 FW_CMD_OP(FW_FCOE_LINK_CMD) |
921 FW_CMD_REQUEST |
922 FW_CMD_WRITE |
923 FW_FCOE_LINK_CMD_PORTID(port_id)));
924 cmdp->sub_opcode_fcfi = htonl(
925 FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) |
926 FW_FCOE_LINK_CMD_FCFI(fcfi));
927 cmdp->lstatus = link_status;
928 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
929
930} /* csio_write_fcoe_link_cond_init_mb */
931
932/*
933 * csio_fcoe_read_res_info_init_mb - Initializes the mailbox for reading FCoE
934 * resource information(FW_GET_RES_INFO_CMD).
935 *
936 * @hw: The HW structure
937 * @mbp: Mailbox structure to initialize
938 * @mb_tmo: Mailbox time-out period (in ms).
939 * @cbfn: The call-back function
940 *
941 *
942 */
943void
944csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
945 uint32_t mb_tmo,
946 void (*cbfn) (struct csio_hw *, struct csio_mb *))
947{
948 struct fw_fcoe_res_info_cmd *cmdp =
949 (struct fw_fcoe_res_info_cmd *)(mbp->mb);
950
951 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
952
953 cmdp->op_to_read = htonl((FW_CMD_OP(FW_FCOE_RES_INFO_CMD) |
954 FW_CMD_REQUEST |
955 FW_CMD_READ));
956
957 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
958
959} /* csio_fcoe_read_res_info_init_mb */
960
961/*
962 * csio_fcoe_vnp_alloc_init_mb - Initializes the mailbox for allocating VNP
963 * in the firmware (FW_FCOE_VNP_CMD).
964 *
965 * @ln: The Lnode structure.
966 * @mbp: Mailbox structure to initialize.
967 * @mb_tmo: Mailbox time-out period (in ms).
968 * @fcfi: FCF Index.
969 * @vnpi: vnpi
970 * @iqid: iqid
971 * @vnport_wwnn: vnport WWNN
972 * @vnport_wwpn: vnport WWPN
973 * @cbfn: The call-back function.
974 *
975 *
976 */
977void
978csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
979 uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, uint16_t iqid,
980 uint8_t vnport_wwnn[8], uint8_t vnport_wwpn[8],
981 void (*cbfn) (struct csio_hw *, struct csio_mb *))
982{
983 struct fw_fcoe_vnp_cmd *cmdp =
984 (struct fw_fcoe_vnp_cmd *)(mbp->mb);
985
986 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
987
988 cmdp->op_to_fcfi = htonl((FW_CMD_OP(FW_FCOE_VNP_CMD) |
989 FW_CMD_REQUEST |
990 FW_CMD_EXEC |
991 FW_FCOE_VNP_CMD_FCFI(fcfi)));
992
993 cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC |
994 FW_CMD_LEN16(sizeof(*cmdp) / 16));
995
996 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
997
998 cmdp->iqid = htons(iqid);
999
1000 if (!wwn_to_u64(vnport_wwnn) && !wwn_to_u64(vnport_wwpn))
1001 cmdp->gen_wwn_to_vnpi |= htonl(FW_FCOE_VNP_CMD_GEN_WWN);
1002
1003 if (vnport_wwnn)
1004 memcpy(cmdp->vnport_wwnn, vnport_wwnn, 8);
1005 if (vnport_wwpn)
1006 memcpy(cmdp->vnport_wwpn, vnport_wwpn, 8);
1007
1008} /* csio_fcoe_vnp_alloc_init_mb */
1009
1010/*
1011 * csio_fcoe_vnp_read_init_mb - Prepares VNP read cmd.
1012 * @ln: The Lnode structure.
1013 * @mbp: Mailbox structure to initialize.
1014 * @mb_tmo: Mailbox time-out period (in ms).
1015 * @fcfi: FCF Index.
1016 * @vnpi: vnpi
1017 * @cbfn: The call-back handler.
1018 */
1019void
1020csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
1021 uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
1022 void (*cbfn) (struct csio_hw *, struct csio_mb *))
1023{
1024 struct fw_fcoe_vnp_cmd *cmdp =
1025 (struct fw_fcoe_vnp_cmd *)(mbp->mb);
1026
1027 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
1028 cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) |
1029 FW_CMD_REQUEST |
1030 FW_CMD_READ |
1031 FW_FCOE_VNP_CMD_FCFI(fcfi));
1032 cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
1033 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
1034}
1035
1036/*
1037 * csio_fcoe_vnp_free_init_mb - Initializes the mailbox for freeing an
1038 * alloacted VNP in the firmware (FW_FCOE_VNP_CMD).
1039 *
1040 * @ln: The Lnode structure.
1041 * @mbp: Mailbox structure to initialize.
1042 * @mb_tmo: Mailbox time-out period (in ms).
1043 * @fcfi: FCF flow id
1044 * @vnpi: VNP flow id
1045 * @cbfn: The call-back function.
1046 * Return: None
1047 */
1048void
1049csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
1050 uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
1051 void (*cbfn) (struct csio_hw *, struct csio_mb *))
1052{
1053 struct fw_fcoe_vnp_cmd *cmdp =
1054 (struct fw_fcoe_vnp_cmd *)(mbp->mb);
1055
1056 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
1057
1058 cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) |
1059 FW_CMD_REQUEST |
1060 FW_CMD_EXEC |
1061 FW_FCOE_VNP_CMD_FCFI(fcfi));
1062 cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE |
1063 FW_CMD_LEN16(sizeof(*cmdp) / 16));
1064 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
1065}
1066
1067/*
1068 * csio_fcoe_read_fcf_init_mb - Initializes the mailbox to read the
1069 * FCF records.
1070 *
1071 * @ln: The Lnode structure
1072 * @mbp: Mailbox structure to initialize
1073 * @mb_tmo: Mailbox time-out period (in ms).
1074 * @fcf_params: FC-Forwarder parameters.
1075 * @cbfn: The call-back function
1076 *
1077 *
1078 */
1079void
1080csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
1081 uint32_t mb_tmo, uint32_t portid, uint32_t fcfi,
1082 void (*cbfn) (struct csio_hw *, struct csio_mb *))
1083{
1084 struct fw_fcoe_fcf_cmd *cmdp =
1085 (struct fw_fcoe_fcf_cmd *)(mbp->mb);
1086
1087 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
1088
1089 cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_FCF_CMD) |
1090 FW_CMD_REQUEST |
1091 FW_CMD_READ |
1092 FW_FCOE_FCF_CMD_FCFI(fcfi));
1093 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
1094
1095} /* csio_fcoe_read_fcf_init_mb */
1096
1097void
1098csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
1099 uint32_t mb_tmo,
1100 struct fw_fcoe_port_cmd_params *portparams,
1101 void (*cbfn)(struct csio_hw *,
1102 struct csio_mb *))
1103{
1104 struct fw_fcoe_stats_cmd *cmdp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
1105
1106 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
1107 mbp->mb_size = 64;
1108
1109 cmdp->op_to_flowid = htonl(FW_CMD_OP(FW_FCOE_STATS_CMD) |
1110 FW_CMD_REQUEST | FW_CMD_READ);
1111 cmdp->free_to_len16 = htonl(FW_CMD_LEN16(CSIO_MAX_MB_SIZE/16));
1112
1113 cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) |
1114 FW_FCOE_STATS_CMD_PORT(portparams->portid);
1115
1116 cmdp->u.ctl.port_valid_ix = FW_FCOE_STATS_CMD_IX(portparams->idx) |
1117 FW_FCOE_STATS_CMD_PORT_VALID;
1118
1119} /* csio_fcoe_read_portparams_init_mb */
1120
1121void
1122csio_mb_process_portparams_rsp(
1123 struct csio_hw *hw,
1124 struct csio_mb *mbp,
1125 enum fw_retval *retval,
1126 struct fw_fcoe_port_cmd_params *portparams,
1127 struct fw_fcoe_port_stats *portstats
1128 )
1129{
1130 struct fw_fcoe_stats_cmd *rsp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
1131 struct fw_fcoe_port_stats stats;
1132 uint8_t *src;
1133 uint8_t *dst;
1134
1135 *retval = FW_CMD_RETVAL_GET(ntohl(rsp->free_to_len16));
1136
1137 memset(&stats, 0, sizeof(struct fw_fcoe_port_stats));
1138
1139 if (*retval == FW_SUCCESS) {
1140 dst = (uint8_t *)(&stats) + ((portparams->idx - 1) * 8);
1141 src = (uint8_t *)rsp + (CSIO_STATS_OFFSET * 8);
1142 memcpy(dst, src, (portparams->nstats * 8));
1143 if (portparams->idx == 1) {
1144 /* Get the first 6 flits from the Mailbox */
1145 portstats->tx_bcast_bytes =
1146 be64_to_cpu(stats.tx_bcast_bytes);
1147 portstats->tx_bcast_frames =
1148 be64_to_cpu(stats.tx_bcast_frames);
1149 portstats->tx_mcast_bytes =
1150 be64_to_cpu(stats.tx_mcast_bytes);
1151 portstats->tx_mcast_frames =
1152 be64_to_cpu(stats.tx_mcast_frames);
1153 portstats->tx_ucast_bytes =
1154 be64_to_cpu(stats.tx_ucast_bytes);
1155 portstats->tx_ucast_frames =
1156 be64_to_cpu(stats.tx_ucast_frames);
1157 }
1158 if (portparams->idx == 7) {
1159 /* Get the second 6 flits from the Mailbox */
1160 portstats->tx_drop_frames =
1161 be64_to_cpu(stats.tx_drop_frames);
1162 portstats->tx_offload_bytes =
1163 be64_to_cpu(stats.tx_offload_bytes);
1164 portstats->tx_offload_frames =
1165 be64_to_cpu(stats.tx_offload_frames);
1166#if 0
1167 portstats->rx_pf_bytes =
1168 be64_to_cpu(stats.rx_pf_bytes);
1169 portstats->rx_pf_frames =
1170 be64_to_cpu(stats.rx_pf_frames);
1171#endif
1172 portstats->rx_bcast_bytes =
1173 be64_to_cpu(stats.rx_bcast_bytes);
1174 portstats->rx_bcast_frames =
1175 be64_to_cpu(stats.rx_bcast_frames);
1176 portstats->rx_mcast_bytes =
1177 be64_to_cpu(stats.rx_mcast_bytes);
1178 }
1179 if (portparams->idx == 13) {
1180 /* Get the last 4 flits from the Mailbox */
1181 portstats->rx_mcast_frames =
1182 be64_to_cpu(stats.rx_mcast_frames);
1183 portstats->rx_ucast_bytes =
1184 be64_to_cpu(stats.rx_ucast_bytes);
1185 portstats->rx_ucast_frames =
1186 be64_to_cpu(stats.rx_ucast_frames);
1187 portstats->rx_err_frames =
1188 be64_to_cpu(stats.rx_err_frames);
1189 }
1190 }
1191}
1192
1193/* Entry points/APIs for MB module */
1194/*
1195 * csio_mb_intr_enable - Enable Interrupts from mailboxes.
1196 * @hw: The HW structure
1197 *
1198 * Enables CIM interrupt bit in appropriate INT_ENABLE registers.
1199 */
1200void
1201csio_mb_intr_enable(struct csio_hw *hw)
1202{
1203 csio_wr_reg32(hw, MBMSGRDYINTEN(1), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
1204 csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
1205}
1206
1207/*
1208 * csio_mb_intr_disable - Disable Interrupts from mailboxes.
1209 * @hw: The HW structure
1210 *
1211 * Disable bit in HostInterruptEnable CIM register.
1212 */
1213void
1214csio_mb_intr_disable(struct csio_hw *hw)
1215{
1216 csio_wr_reg32(hw, MBMSGRDYINTEN(0), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
1217 csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
1218}
1219
1220static void
1221csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd)
1222{
1223 struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd;
1224
1225 if ((FW_DEBUG_CMD_TYPE_GET(ntohl(dbg->op_type))) == 1) {
1226 csio_info(hw, "FW print message:\n");
1227 csio_info(hw, "\tdebug->dprtstridx = %d\n",
1228 ntohs(dbg->u.prt.dprtstridx));
1229 csio_info(hw, "\tdebug->dprtstrparam0 = 0x%x\n",
1230 ntohl(dbg->u.prt.dprtstrparam0));
1231 csio_info(hw, "\tdebug->dprtstrparam1 = 0x%x\n",
1232 ntohl(dbg->u.prt.dprtstrparam1));
1233 csio_info(hw, "\tdebug->dprtstrparam2 = 0x%x\n",
1234 ntohl(dbg->u.prt.dprtstrparam2));
1235 csio_info(hw, "\tdebug->dprtstrparam3 = 0x%x\n",
1236 ntohl(dbg->u.prt.dprtstrparam3));
1237 } else {
1238 /* This is a FW assertion */
1239 csio_fatal(hw, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
1240 dbg->u.assert.filename_0_7,
1241 ntohl(dbg->u.assert.line),
1242 ntohl(dbg->u.assert.x),
1243 ntohl(dbg->u.assert.y));
1244 }
1245}
1246
1247static void
1248csio_mb_debug_cmd_handler(struct csio_hw *hw)
1249{
1250 int i;
1251 __be64 cmd[CSIO_MB_MAX_REGS];
1252 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
1253 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
1254 int size = sizeof(struct fw_debug_cmd);
1255
1256 /* Copy mailbox data */
1257 for (i = 0; i < size; i += 8)
1258 cmd[i / 8] = cpu_to_be64(csio_rd_reg64(hw, data_reg + i));
1259
1260 csio_mb_dump_fw_dbg(hw, cmd);
1261
1262 /* Notify FW of mailbox by setting owner as UP */
1263 csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | MBOWNER(CSIO_MBOWNER_FW),
1264 ctl_reg);
1265
1266 csio_rd_reg32(hw, ctl_reg);
1267 wmb();
1268}
1269
1270/*
1271 * csio_mb_issue - generic routine for issuing Mailbox commands.
1272 * @hw: The HW structure
1273 * @mbp: Mailbox command to issue
1274 *
1275 * Caller should hold hw lock across this call.
1276 */
1277int
1278csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
1279{
1280 uint32_t owner, ctl;
1281 int i;
1282 uint32_t ii;
1283 __be64 *cmd = mbp->mb;
1284 __be64 hdr;
1285 struct csio_mbm *mbm = &hw->mbm;
1286 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
1287 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
1288 int size = mbp->mb_size;
1289 int rv = -EINVAL;
1290 struct fw_cmd_hdr *fw_hdr;
1291
1292 /* Determine mode */
1293 if (mbp->mb_cbfn == NULL) {
1294 /* Need to issue/get results in the same context */
1295 if (mbp->tmo < CSIO_MB_POLL_FREQ) {
1296 csio_err(hw, "Invalid tmo: 0x%x\n", mbp->tmo);
1297 goto error_out;
1298 }
1299 } else if (!csio_is_host_intr_enabled(hw) ||
1300 !csio_is_hw_intr_enabled(hw)) {
1301 csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n",
1302 *((uint8_t *)mbp->mb));
1303 goto error_out;
1304 }
1305
1306 if (mbm->mcurrent != NULL) {
1307 /* Queue mbox cmd, if another mbox cmd is active */
1308 if (mbp->mb_cbfn == NULL) {
1309 rv = -EBUSY;
1310 csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n",
1311 hw->pfn, *((uint8_t *)mbp->mb));
1312
1313 goto error_out;
1314 } else {
1315 list_add_tail(&mbp->list, &mbm->req_q);
1316 CSIO_INC_STATS(mbm, n_activeq);
1317
1318 return 0;
1319 }
1320 }
1321
1322 /* Now get ownership of mailbox */
1323 owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
1324
1325 if (!csio_mb_is_host_owner(owner)) {
1326
1327 for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++)
1328 owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
1329 /*
1330 * Mailbox unavailable. In immediate mode, fail the command.
1331 * In other modes, enqueue the request.
1332 */
1333 if (!csio_mb_is_host_owner(owner)) {
1334 if (mbp->mb_cbfn == NULL) {
1335 rv = owner ? -EBUSY : -ETIMEDOUT;
1336
1337 csio_dbg(hw,
1338 "Couldnt own Mailbox %x op:0x%x "
1339 "owner:%x\n",
1340 hw->pfn, *((uint8_t *)mbp->mb), owner);
1341 goto error_out;
1342 } else {
1343 if (mbm->mcurrent == NULL) {
1344 csio_err(hw,
1345 "Couldnt own Mailbox %x "
1346 "op:0x%x owner:%x\n",
1347 hw->pfn, *((uint8_t *)mbp->mb),
1348 owner);
1349 csio_err(hw,
1350 "No outstanding driver"
1351 " mailbox as well\n");
1352 goto error_out;
1353 }
1354 }
1355 }
1356 }
1357
1358 /* Mailbox is available, copy mailbox data into it */
1359 for (i = 0; i < size; i += 8) {
1360 csio_wr_reg64(hw, be64_to_cpu(*cmd), data_reg + i);
1361 cmd++;
1362 }
1363
1364 CSIO_DUMP_MB(hw, hw->pfn, data_reg);
1365
1366 /* Start completion timers in non-immediate modes and notify FW */
1367 if (mbp->mb_cbfn != NULL) {
1368 mbm->mcurrent = mbp;
1369 mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo));
1370 csio_wr_reg32(hw, MBMSGVALID | MBINTREQ |
1371 MBOWNER(CSIO_MBOWNER_FW), ctl_reg);
1372 } else
1373 csio_wr_reg32(hw, MBMSGVALID | MBOWNER(CSIO_MBOWNER_FW),
1374 ctl_reg);
1375
1376 /* Flush posted writes */
1377 csio_rd_reg32(hw, ctl_reg);
1378 wmb();
1379
1380 CSIO_INC_STATS(mbm, n_req);
1381
1382 if (mbp->mb_cbfn)
1383 return 0;
1384
1385 /* Poll for completion in immediate mode */
1386 cmd = mbp->mb;
1387
1388 for (ii = 0; ii < mbp->tmo; ii += CSIO_MB_POLL_FREQ) {
1389 mdelay(CSIO_MB_POLL_FREQ);
1390
1391 /* Check for response */
1392 ctl = csio_rd_reg32(hw, ctl_reg);
1393 if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
1394
1395 if (!(ctl & MBMSGVALID)) {
1396 csio_wr_reg32(hw, 0, ctl_reg);
1397 continue;
1398 }
1399
1400 CSIO_DUMP_MB(hw, hw->pfn, data_reg);
1401
1402 hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
1403 fw_hdr = (struct fw_cmd_hdr *)&hdr;
1404
1405 switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) {
1406 case FW_DEBUG_CMD:
1407 csio_mb_debug_cmd_handler(hw);
1408 continue;
1409 }
1410
1411 /* Copy response */
1412 for (i = 0; i < size; i += 8)
1413 *cmd++ = cpu_to_be64(csio_rd_reg64
1414 (hw, data_reg + i));
1415 csio_wr_reg32(hw, 0, ctl_reg);
1416
1417 if (FW_CMD_RETVAL_GET(*(mbp->mb)))
1418 CSIO_INC_STATS(mbm, n_err);
1419
1420 CSIO_INC_STATS(mbm, n_rsp);
1421 return 0;
1422 }
1423 }
1424
1425 CSIO_INC_STATS(mbm, n_tmo);
1426
1427 csio_err(hw, "Mailbox %x op:0x%x timed out!\n",
1428 hw->pfn, *((uint8_t *)cmd));
1429
1430 return -ETIMEDOUT;
1431
1432error_out:
1433 CSIO_INC_STATS(mbm, n_err);
1434 return rv;
1435}
1436
1437/*
1438 * csio_mb_completions - Completion handler for Mailbox commands
1439 * @hw: The HW structure
1440 * @cbfn_q: Completion queue.
1441 *
1442 */
1443void
1444csio_mb_completions(struct csio_hw *hw, struct list_head *cbfn_q)
1445{
1446 struct csio_mb *mbp;
1447 struct csio_mbm *mbm = &hw->mbm;
1448 enum fw_retval rv;
1449
1450 while (!list_empty(cbfn_q)) {
1451 mbp = list_first_entry(cbfn_q, struct csio_mb, list);
1452 list_del_init(&mbp->list);
1453
1454 rv = csio_mb_fw_retval(mbp);
1455 if ((rv != FW_SUCCESS) && (rv != FW_HOSTERROR))
1456 CSIO_INC_STATS(mbm, n_err);
1457 else if (rv != FW_HOSTERROR)
1458 CSIO_INC_STATS(mbm, n_rsp);
1459
1460 if (mbp->mb_cbfn)
1461 mbp->mb_cbfn(hw, mbp);
1462 }
1463}
1464
1465static void
1466csio_mb_portmod_changed(struct csio_hw *hw, uint8_t port_id)
1467{
1468 static char *mod_str[] = {
1469 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
1470 };
1471
1472 struct csio_pport *port = &hw->pport[port_id];
1473
1474 if (port->mod_type == FW_PORT_MOD_TYPE_NONE)
1475 csio_info(hw, "Port:%d - port module unplugged\n", port_id);
1476 else if (port->mod_type < ARRAY_SIZE(mod_str))
1477 csio_info(hw, "Port:%d - %s port module inserted\n", port_id,
1478 mod_str[port->mod_type]);
1479 else if (port->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
1480 csio_info(hw,
1481 "Port:%d - unsupported optical port module "
1482 "inserted\n", port_id);
1483 else if (port->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
1484 csio_info(hw,
1485 "Port:%d - unknown port module inserted, forcing "
1486 "TWINAX\n", port_id);
1487 else if (port->mod_type == FW_PORT_MOD_TYPE_ERROR)
1488 csio_info(hw, "Port:%d - transceiver module error\n", port_id);
1489 else
1490 csio_info(hw, "Port:%d - unknown module type %d inserted\n",
1491 port_id, port->mod_type);
1492}
1493
1494int
1495csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd)
1496{
1497 uint8_t opcode = *(uint8_t *)cmd;
1498 struct fw_port_cmd *pcmd;
1499 uint8_t port_id;
1500 uint32_t link_status;
1501 uint16_t action;
1502 uint8_t mod_type;
1503
1504 if (opcode == FW_PORT_CMD) {
1505 pcmd = (struct fw_port_cmd *)cmd;
1506 port_id = FW_PORT_CMD_PORTID_GET(
1507 ntohl(pcmd->op_to_portid));
1508 action = FW_PORT_CMD_ACTION_GET(
1509 ntohl(pcmd->action_to_len16));
1510 if (action != FW_PORT_ACTION_GET_PORT_INFO) {
1511 csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n",
1512 action);
1513 return -EINVAL;
1514 }
1515
1516 link_status = ntohl(pcmd->u.info.lstatus_to_modtype);
1517 mod_type = FW_PORT_CMD_MODTYPE_GET(link_status);
1518
1519 hw->pport[port_id].link_status =
1520 FW_PORT_CMD_LSTATUS_GET(link_status);
1521 hw->pport[port_id].link_speed =
1522 FW_PORT_CMD_LSPEED_GET(link_status);
1523
1524 csio_info(hw, "Port:%x - LINK %s\n", port_id,
1525 FW_PORT_CMD_LSTATUS_GET(link_status) ? "UP" : "DOWN");
1526
1527 if (mod_type != hw->pport[port_id].mod_type) {
1528 hw->pport[port_id].mod_type = mod_type;
1529 csio_mb_portmod_changed(hw, port_id);
1530 }
1531 } else if (opcode == FW_DEBUG_CMD) {
1532 csio_mb_dump_fw_dbg(hw, cmd);
1533 } else {
1534 csio_dbg(hw, "Gen MB can't handle op:0x%x on evtq.\n", opcode);
1535 return -EINVAL;
1536 }
1537
1538 return 0;
1539}
1540
1541/*
1542 * csio_mb_isr_handler - Handle mailboxes related interrupts.
1543 * @hw: The HW structure
1544 *
1545 * Called from the ISR to handle Mailbox related interrupts.
1546 * HW Lock should be held across this call.
1547 */
1548int
1549csio_mb_isr_handler(struct csio_hw *hw)
1550{
1551 struct csio_mbm *mbm = &hw->mbm;
1552 struct csio_mb *mbp = mbm->mcurrent;
1553 __be64 *cmd;
1554 uint32_t ctl, cim_cause, pl_cause;
1555 int i;
1556 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
1557 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
1558 int size;
1559 __be64 hdr;
1560 struct fw_cmd_hdr *fw_hdr;
1561
1562 pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE));
1563 cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
1564
1565 if (!(pl_cause & PFCIM) || !(cim_cause & MBMSGRDYINT)) {
1566 CSIO_INC_STATS(hw, n_mbint_unexp);
1567 return -EINVAL;
1568 }
1569
1570 /*
1571 * The cause registers below HAVE to be cleared in the SAME
1572 * order as below: The low level cause register followed by
1573 * the upper level cause register. In other words, CIM-cause
1574 * first followed by PL-Cause next.
1575 */
1576 csio_wr_reg32(hw, MBMSGRDYINT, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
1577 csio_wr_reg32(hw, PFCIM, MYPF_REG(PL_PF_INT_CAUSE));
1578
1579 ctl = csio_rd_reg32(hw, ctl_reg);
1580
1581 if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
1582
1583 CSIO_DUMP_MB(hw, hw->pfn, data_reg);
1584
1585 if (!(ctl & MBMSGVALID)) {
1586 csio_warn(hw,
1587 "Stray mailbox interrupt recvd,"
1588 " mailbox data not valid\n");
1589 csio_wr_reg32(hw, 0, ctl_reg);
1590 /* Flush */
1591 csio_rd_reg32(hw, ctl_reg);
1592 return -EINVAL;
1593 }
1594
1595 hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
1596 fw_hdr = (struct fw_cmd_hdr *)&hdr;
1597
1598 switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) {
1599 case FW_DEBUG_CMD:
1600 csio_mb_debug_cmd_handler(hw);
1601 return -EINVAL;
1602#if 0
1603 case FW_ERROR_CMD:
1604 case FW_INITIALIZE_CMD: /* When we are not master */
1605#endif
1606 }
1607
1608 CSIO_ASSERT(mbp != NULL);
1609
1610 cmd = mbp->mb;
1611 size = mbp->mb_size;
1612 /* Get response */
1613 for (i = 0; i < size; i += 8)
1614 *cmd++ = cpu_to_be64(csio_rd_reg64
1615 (hw, data_reg + i));
1616
1617 csio_wr_reg32(hw, 0, ctl_reg);
1618 /* Flush */
1619 csio_rd_reg32(hw, ctl_reg);
1620
1621 mbm->mcurrent = NULL;
1622
1623 /* Add completion to tail of cbfn queue */
1624 list_add_tail(&mbp->list, &mbm->cbfn_q);
1625 CSIO_INC_STATS(mbm, n_cbfnq);
1626
1627 /*
1628 * Enqueue event to EventQ. Events processing happens
1629 * in Event worker thread context
1630 */
1631 if (csio_enqueue_evt(hw, CSIO_EVT_MBX, mbp, sizeof(mbp)))
1632 CSIO_INC_STATS(hw, n_evt_drop);
1633
1634 return 0;
1635
1636 } else {
1637 /*
1638 * We can get here if mailbox MSIX vector is shared,
1639 * or in INTx case. Or a stray interrupt.
1640 */
1641 csio_dbg(hw, "Host not owner, no mailbox interrupt\n");
1642 CSIO_INC_STATS(hw, n_int_stray);
1643 return -EINVAL;
1644 }
1645}
1646
1647/*
1648 * csio_mb_tmo_handler - Timeout handler
1649 * @hw: The HW structure
1650 *
1651 */
1652struct csio_mb *
1653csio_mb_tmo_handler(struct csio_hw *hw)
1654{
1655 struct csio_mbm *mbm = &hw->mbm;
1656 struct csio_mb *mbp = mbm->mcurrent;
1657 struct fw_cmd_hdr *fw_hdr;
1658
1659 /*
1660 * Could be a race b/w the completion handler and the timer
1661 * and the completion handler won that race.
1662 */
1663 if (mbp == NULL) {
1664 CSIO_DB_ASSERT(0);
1665 return NULL;
1666 }
1667
1668 fw_hdr = (struct fw_cmd_hdr *)(mbp->mb);
1669
1670 csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn,
1671 FW_CMD_OP_GET(ntohl(fw_hdr->hi)));
1672
1673 mbm->mcurrent = NULL;
1674 CSIO_INC_STATS(mbm, n_tmo);
1675 fw_hdr->lo = htonl(FW_CMD_RETVAL(FW_ETIMEDOUT));
1676
1677 return mbp;
1678}
1679
1680/*
1681 * csio_mb_cancel_all - Cancel all waiting commands.
1682 * @hw: The HW structure
1683 * @cbfn_q: The callback queue.
1684 *
1685 * Caller should hold hw lock across this call.
1686 */
1687void
1688csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q)
1689{
1690 struct csio_mb *mbp;
1691 struct csio_mbm *mbm = &hw->mbm;
1692 struct fw_cmd_hdr *hdr;
1693 struct list_head *tmp;
1694
1695 if (mbm->mcurrent) {
1696 mbp = mbm->mcurrent;
1697
1698 /* Stop mailbox completion timer */
1699 del_timer_sync(&mbm->timer);
1700
1701 /* Add completion to tail of cbfn queue */
1702 list_add_tail(&mbp->list, cbfn_q);
1703 mbm->mcurrent = NULL;
1704 }
1705
1706 if (!list_empty(&mbm->req_q)) {
1707 list_splice_tail_init(&mbm->req_q, cbfn_q);
1708 mbm->stats.n_activeq = 0;
1709 }
1710
1711 if (!list_empty(&mbm->cbfn_q)) {
1712 list_splice_tail_init(&mbm->cbfn_q, cbfn_q);
1713 mbm->stats.n_cbfnq = 0;
1714 }
1715
1716 if (list_empty(cbfn_q))
1717 return;
1718
1719 list_for_each(tmp, cbfn_q) {
1720 mbp = (struct csio_mb *)tmp;
1721 hdr = (struct fw_cmd_hdr *)(mbp->mb);
1722
1723 csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n",
1724 hw->pfn, FW_CMD_OP_GET(ntohl(hdr->hi)));
1725
1726 CSIO_INC_STATS(mbm, n_cancel);
1727 hdr->lo = htonl(FW_CMD_RETVAL(FW_HOSTERROR));
1728 }
1729}
1730
1731/*
1732 * csio_mbm_init - Initialize Mailbox module
1733 * @mbm: Mailbox module
1734 * @hw: The HW structure
1735 * @timer: Timing function for interrupting mailboxes
1736 *
1737 * Initialize timer and the request/response queues.
1738 */
1739int
1740csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw,
1741 void (*timer_fn)(uintptr_t))
1742{
1743 struct timer_list *timer = &mbm->timer;
1744
1745 init_timer(timer);
1746 timer->function = timer_fn;
1747 timer->data = (unsigned long)hw;
1748
1749 INIT_LIST_HEAD(&mbm->req_q);
1750 INIT_LIST_HEAD(&mbm->cbfn_q);
1751 csio_set_mb_intr_idx(mbm, -1);
1752
1753 return 0;
1754}
1755
1756/*
1757 * csio_mbm_exit - Uninitialize mailbox module
1758 * @mbm: Mailbox module
1759 *
1760 * Stop timer.
1761 */
1762void
1763csio_mbm_exit(struct csio_mbm *mbm)
1764{
1765 del_timer_sync(&mbm->timer);
1766
1767 CSIO_DB_ASSERT(mbm->mcurrent == NULL);
1768 CSIO_DB_ASSERT(list_empty(&mbm->req_q));
1769 CSIO_DB_ASSERT(list_empty(&mbm->cbfn_q));
1770}