blob: 5b27c48f6836c407d1ff7e77fb74e018ec73a42b [file] [log] [blame]
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301/*
2 * This file is part of the Chelsio FCoE driver for Linux.
3 *
4 * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved.
5 *
6 * This software is available to you under a choice of one of two
7 * licenses. You may choose to be licensed under the terms of the GNU
8 * General Public License (GPL) Version 2, available from the file
9 * COPYING in the main directory of this source tree, or the
10 * OpenIB.org BSD license below:
11 *
12 * Redistribution and use in source and binary forms, with or
13 * without modification, are permitted provided that the following
14 * conditions are met:
15 *
16 * - Redistributions of source code must retain the above
17 * copyright notice, this list of conditions and the following
18 * disclaimer.
19 *
20 * - Redistributions in binary form must reproduce the above
21 * copyright notice, this list of conditions and the following
22 * disclaimer in the documentation and/or other materials
23 * provided with the distribution.
24 *
25 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32 * SOFTWARE.
33 */
34
35#include <linux/delay.h>
36#include <linux/jiffies.h>
37#include <linux/string.h>
38#include <scsi/scsi_device.h>
39#include <scsi/scsi_transport_fc.h>
40
41#include "csio_hw.h"
42#include "csio_lnode.h"
43#include "csio_rnode.h"
44#include "csio_mb.h"
45#include "csio_wr.h"
46
47#define csio_mb_is_host_owner(__owner) ((__owner) == CSIO_MBOWNER_PL)
48
49/* MB Command/Response Helpers */
50/*
51 * csio_mb_fw_retval - FW return value from a mailbox response.
52 * @mbp: Mailbox structure
53 *
54 */
55enum fw_retval
56csio_mb_fw_retval(struct csio_mb *mbp)
57{
58 struct fw_cmd_hdr *hdr;
59
60 hdr = (struct fw_cmd_hdr *)(mbp->mb);
61
62 return FW_CMD_RETVAL_GET(ntohl(hdr->lo));
63}
64
65/*
66 * csio_mb_hello - FW HELLO command helper
67 * @hw: The HW structure
68 * @mbp: Mailbox structure
69 * @m_mbox: Master mailbox number, if any.
70 * @a_mbox: Mailbox number for asycn notifications.
71 * @master: Device mastership.
72 * @cbfn: Callback, if any.
73 *
74 */
75void
76csio_mb_hello(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
77 uint32_t m_mbox, uint32_t a_mbox, enum csio_dev_master master,
78 void (*cbfn) (struct csio_hw *, struct csio_mb *))
79{
80 struct fw_hello_cmd *cmdp = (struct fw_hello_cmd *)(mbp->mb);
81
82 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
83
84 cmdp->op_to_write = htonl(FW_CMD_OP(FW_HELLO_CMD) |
85 FW_CMD_REQUEST | FW_CMD_WRITE);
86 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
87 cmdp->err_to_clearinit = htonl(
88 FW_HELLO_CMD_MASTERDIS(master == CSIO_MASTER_CANT) |
89 FW_HELLO_CMD_MASTERFORCE(master == CSIO_MASTER_MUST) |
90 FW_HELLO_CMD_MBMASTER(master == CSIO_MASTER_MUST ?
91 m_mbox : FW_HELLO_CMD_MBMASTER_MASK) |
92 FW_HELLO_CMD_MBASYNCNOT(a_mbox) |
93 FW_HELLO_CMD_STAGE(fw_hello_cmd_stage_os) |
94 FW_HELLO_CMD_CLEARINIT);
95
96}
97
98/*
99 * csio_mb_process_hello_rsp - FW HELLO response processing helper
100 * @hw: The HW structure
101 * @mbp: Mailbox structure
102 * @retval: Mailbox return value from Firmware
103 * @state: State that the function is in.
104 * @mpfn: Master pfn
105 *
106 */
107void
108csio_mb_process_hello_rsp(struct csio_hw *hw, struct csio_mb *mbp,
109 enum fw_retval *retval, enum csio_dev_state *state,
110 uint8_t *mpfn)
111{
112 struct fw_hello_cmd *rsp = (struct fw_hello_cmd *)(mbp->mb);
113 uint32_t value;
114
115 *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
116
117 if (*retval == FW_SUCCESS) {
118 hw->fwrev = ntohl(rsp->fwrev);
119
120 value = ntohl(rsp->err_to_clearinit);
121 *mpfn = FW_HELLO_CMD_MBMASTER_GET(value);
122
123 if (value & FW_HELLO_CMD_INIT)
124 *state = CSIO_DEV_STATE_INIT;
125 else if (value & FW_HELLO_CMD_ERR)
126 *state = CSIO_DEV_STATE_ERR;
127 else
128 *state = CSIO_DEV_STATE_UNINIT;
129 }
130}
131
132/*
133 * csio_mb_bye - FW BYE command helper
134 * @hw: The HW structure
135 * @mbp: Mailbox structure
136 * @cbfn: Callback, if any.
137 *
138 */
139void
140csio_mb_bye(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
141 void (*cbfn) (struct csio_hw *, struct csio_mb *))
142{
143 struct fw_bye_cmd *cmdp = (struct fw_bye_cmd *)(mbp->mb);
144
145 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
146
147 cmdp->op_to_write = htonl(FW_CMD_OP(FW_BYE_CMD) |
148 FW_CMD_REQUEST | FW_CMD_WRITE);
149 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
150
151}
152
153/*
154 * csio_mb_reset - FW RESET command helper
155 * @hw: The HW structure
156 * @mbp: Mailbox structure
157 * @reset: Type of reset.
158 * @cbfn: Callback, if any.
159 *
160 */
161void
162csio_mb_reset(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
163 int reset, int halt,
164 void (*cbfn) (struct csio_hw *, struct csio_mb *))
165{
166 struct fw_reset_cmd *cmdp = (struct fw_reset_cmd *)(mbp->mb);
167
168 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
169
170 cmdp->op_to_write = htonl(FW_CMD_OP(FW_RESET_CMD) |
171 FW_CMD_REQUEST | FW_CMD_WRITE);
172 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
173 cmdp->val = htonl(reset);
174 cmdp->halt_pkd = htonl(halt);
175
176}
177
178/*
179 * csio_mb_params - FW PARAMS command helper
180 * @hw: The HW structure
181 * @mbp: Mailbox structure
182 * @tmo: Command timeout.
183 * @pf: PF number.
184 * @vf: VF number.
185 * @nparams: Number of paramters
186 * @params: Parameter mnemonic array.
187 * @val: Parameter value array.
188 * @wr: Write/Read PARAMS.
189 * @cbfn: Callback, if any.
190 *
191 */
192void
193csio_mb_params(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
194 unsigned int pf, unsigned int vf, unsigned int nparams,
195 const u32 *params, u32 *val, bool wr,
196 void (*cbfn)(struct csio_hw *, struct csio_mb *))
197{
198 uint32_t i;
199 uint32_t temp_params = 0, temp_val = 0;
200 struct fw_params_cmd *cmdp = (struct fw_params_cmd *)(mbp->mb);
201 __be32 *p = &cmdp->param[0].mnem;
202
203 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
204
205 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PARAMS_CMD) |
206 FW_CMD_REQUEST |
207 (wr ? FW_CMD_WRITE : FW_CMD_READ) |
208 FW_PARAMS_CMD_PFN(pf) |
209 FW_PARAMS_CMD_VFN(vf));
210 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
211
212 /* Write Params */
213 if (wr) {
214 while (nparams--) {
215 temp_params = *params++;
216 temp_val = *val++;
217
218 *p++ = htonl(temp_params);
219 *p++ = htonl(temp_val);
220 }
221 } else {
222 for (i = 0; i < nparams; i++, p += 2) {
223 temp_params = *params++;
224 *p = htonl(temp_params);
225 }
226 }
227
228}
229
230/*
231 * csio_mb_process_read_params_rsp - FW PARAMS response processing helper
232 * @hw: The HW structure
233 * @mbp: Mailbox structure
234 * @retval: Mailbox return value from Firmware
235 * @nparams: Number of parameters
236 * @val: Parameter value array.
237 *
238 */
239void
240csio_mb_process_read_params_rsp(struct csio_hw *hw, struct csio_mb *mbp,
241 enum fw_retval *retval, unsigned int nparams,
242 u32 *val)
243{
244 struct fw_params_cmd *rsp = (struct fw_params_cmd *)(mbp->mb);
245 uint32_t i;
246 __be32 *p = &rsp->param[0].val;
247
248 *retval = FW_CMD_RETVAL_GET(ntohl(rsp->retval_len16));
249
250 if (*retval == FW_SUCCESS)
251 for (i = 0; i < nparams; i++, p += 2)
252 *val++ = ntohl(*p);
253}
254
255/*
256 * csio_mb_ldst - FW LDST command
257 * @hw: The HW structure
258 * @mbp: Mailbox structure
259 * @tmo: timeout
260 * @reg: register
261 *
262 */
263void
264csio_mb_ldst(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo, int reg)
265{
266 struct fw_ldst_cmd *ldst_cmd = (struct fw_ldst_cmd *)(mbp->mb);
267 CSIO_INIT_MBP(mbp, ldst_cmd, tmo, hw, NULL, 1);
268
269 /*
270 * Construct and send the Firmware LDST Command to retrieve the
271 * specified PCI-E Configuration Space register.
272 */
273 ldst_cmd->op_to_addrspace =
274 htonl(FW_CMD_OP(FW_LDST_CMD) |
275 FW_CMD_REQUEST |
276 FW_CMD_READ |
277 FW_LDST_CMD_ADDRSPACE(FW_LDST_ADDRSPC_FUNC_PCIE));
278 ldst_cmd->cycles_to_len16 = htonl(FW_LEN16(struct fw_ldst_cmd));
279 ldst_cmd->u.pcie.select_naccess = FW_LDST_CMD_NACCESS(1);
280 ldst_cmd->u.pcie.ctrl_to_fn =
281 (FW_LDST_CMD_LC | FW_LDST_CMD_FN(hw->pfn));
282 ldst_cmd->u.pcie.r = (uint8_t)reg;
283}
284
285/*
286 *
287 * csio_mb_caps_config - FW Read/Write Capabilities command helper
288 * @hw: The HW structure
289 * @mbp: Mailbox structure
290 * @wr: Write if 1, Read if 0
291 * @init: Turn on initiator mode.
292 * @tgt: Turn on target mode.
293 * @cofld: If 1, Control Offload for FCoE
294 * @cbfn: Callback, if any.
295 *
296 * This helper assumes that cmdp has MB payload from a previous CAPS
297 * read command.
298 */
299void
300csio_mb_caps_config(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
301 bool wr, bool init, bool tgt, bool cofld,
302 void (*cbfn) (struct csio_hw *, struct csio_mb *))
303{
304 struct fw_caps_config_cmd *cmdp =
305 (struct fw_caps_config_cmd *)(mbp->mb);
306
307 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, wr ? 0 : 1);
308
309 cmdp->op_to_write = htonl(FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
310 FW_CMD_REQUEST |
311 (wr ? FW_CMD_WRITE : FW_CMD_READ));
312 cmdp->cfvalid_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
313
314 /* Read config */
315 if (!wr)
316 return;
317
318 /* Write config */
319 cmdp->fcoecaps = 0;
320
321 if (cofld)
322 cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_CTRL_OFLD);
323 if (init)
324 cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_INITIATOR);
325 if (tgt)
326 cmdp->fcoecaps |= htons(FW_CAPS_CONFIG_FCOE_TARGET);
327}
328
329void
330csio_rss_glb_config(struct csio_hw *hw, struct csio_mb *mbp,
331 uint32_t tmo, uint8_t mode, unsigned int flags,
332 void (*cbfn)(struct csio_hw *, struct csio_mb *))
333{
334 struct fw_rss_glb_config_cmd *cmdp =
335 (struct fw_rss_glb_config_cmd *)(mbp->mb);
336
337 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
338
339 cmdp->op_to_write = htonl(FW_CMD_OP(FW_RSS_GLB_CONFIG_CMD) |
340 FW_CMD_REQUEST | FW_CMD_WRITE);
341 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
342
343 if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_MANUAL) {
344 cmdp->u.manual.mode_pkd =
345 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
346 } else if (mode == FW_RSS_GLB_CONFIG_CMD_MODE_BASICVIRTUAL) {
347 cmdp->u.basicvirtual.mode_pkd =
348 htonl(FW_RSS_GLB_CONFIG_CMD_MODE(mode));
349 cmdp->u.basicvirtual.synmapen_to_hashtoeplitz = htonl(flags);
350 }
351}
352
353
354/*
355 * csio_mb_pfvf - FW Write PF/VF capabilities command helper.
356 * @hw: The HW structure
357 * @mbp: Mailbox structure
358 * @pf:
359 * @vf:
360 * @txq:
361 * @txq_eht_ctrl:
362 * @rxqi:
363 * @rxq:
364 * @tc:
365 * @vi:
366 * @pmask:
367 * @rcaps:
368 * @wxcaps:
369 * @cbfn: Callback, if any.
370 *
371 */
372void
373csio_mb_pfvf(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
374 unsigned int pf, unsigned int vf, unsigned int txq,
375 unsigned int txq_eth_ctrl, unsigned int rxqi,
376 unsigned int rxq, unsigned int tc, unsigned int vi,
377 unsigned int cmask, unsigned int pmask, unsigned int nexactf,
378 unsigned int rcaps, unsigned int wxcaps,
379 void (*cbfn) (struct csio_hw *, struct csio_mb *))
380{
381 struct fw_pfvf_cmd *cmdp = (struct fw_pfvf_cmd *)(mbp->mb);
382
383 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
384
385 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_PFVF_CMD) |
386 FW_CMD_REQUEST |
387 FW_CMD_WRITE |
388 FW_PFVF_CMD_PFN(pf) |
389 FW_PFVF_CMD_VFN(vf));
390 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
391 cmdp->niqflint_niq = htonl(FW_PFVF_CMD_NIQFLINT(rxqi) |
392 FW_PFVF_CMD_NIQ(rxq));
393
394 cmdp->type_to_neq = htonl(FW_PFVF_CMD_TYPE |
395 FW_PFVF_CMD_CMASK(cmask) |
396 FW_PFVF_CMD_PMASK(pmask) |
397 FW_PFVF_CMD_NEQ(txq));
398 cmdp->tc_to_nexactf = htonl(FW_PFVF_CMD_TC(tc) |
399 FW_PFVF_CMD_NVI(vi) |
400 FW_PFVF_CMD_NEXACTF(nexactf));
401 cmdp->r_caps_to_nethctrl = htonl(FW_PFVF_CMD_R_CAPS(rcaps) |
402 FW_PFVF_CMD_WX_CAPS(wxcaps) |
403 FW_PFVF_CMD_NETHCTRL(txq_eth_ctrl));
404}
405
406#define CSIO_ADVERT_MASK (FW_PORT_CAP_SPEED_100M | FW_PORT_CAP_SPEED_1G |\
407 FW_PORT_CAP_SPEED_10G | FW_PORT_CAP_ANEG)
408
409/*
410 * csio_mb_port- FW PORT command helper
411 * @hw: The HW structure
412 * @mbp: Mailbox structure
413 * @tmo: COmmand timeout
414 * @portid: Port ID to get/set info
415 * @wr: Write/Read PORT information.
416 * @fc: Flow control
417 * @caps: Port capabilites to set.
418 * @cbfn: Callback, if any.
419 *
420 */
421void
422csio_mb_port(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
423 uint8_t portid, bool wr, uint32_t fc, uint16_t caps,
424 void (*cbfn) (struct csio_hw *, struct csio_mb *))
425{
426 struct fw_port_cmd *cmdp = (struct fw_port_cmd *)(mbp->mb);
427 unsigned int lfc = 0, mdi = FW_PORT_MDI(FW_PORT_MDI_AUTO);
428
429 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
430
431 cmdp->op_to_portid = htonl(FW_CMD_OP(FW_PORT_CMD) |
432 FW_CMD_REQUEST |
433 (wr ? FW_CMD_EXEC : FW_CMD_READ) |
434 FW_PORT_CMD_PORTID(portid));
435 if (!wr) {
436 cmdp->action_to_len16 = htonl(
437 FW_PORT_CMD_ACTION(FW_PORT_ACTION_GET_PORT_INFO) |
438 FW_CMD_LEN16(sizeof(*cmdp) / 16));
439 return;
440 }
441
442 /* Set port */
443 cmdp->action_to_len16 = htonl(
444 FW_PORT_CMD_ACTION(FW_PORT_ACTION_L1_CFG) |
445 FW_CMD_LEN16(sizeof(*cmdp) / 16));
446
447 if (fc & PAUSE_RX)
448 lfc |= FW_PORT_CAP_FC_RX;
449 if (fc & PAUSE_TX)
450 lfc |= FW_PORT_CAP_FC_TX;
451
452 if (!(caps & FW_PORT_CAP_ANEG))
453 cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) | lfc);
454 else
455 cmdp->u.l1cfg.rcap = htonl((caps & CSIO_ADVERT_MASK) |
456 lfc | mdi);
457}
458
459/*
460 * csio_mb_process_read_port_rsp - FW PORT command response processing helper
461 * @hw: The HW structure
462 * @mbp: Mailbox structure
463 * @retval: Mailbox return value from Firmware
464 * @caps: port capabilities
465 *
466 */
467void
468csio_mb_process_read_port_rsp(struct csio_hw *hw, struct csio_mb *mbp,
469 enum fw_retval *retval, uint16_t *caps)
470{
471 struct fw_port_cmd *rsp = (struct fw_port_cmd *)(mbp->mb);
472
473 *retval = FW_CMD_RETVAL_GET(ntohl(rsp->action_to_len16));
474
475 if (*retval == FW_SUCCESS)
476 *caps = ntohs(rsp->u.info.pcap);
477}
478
479/*
480 * csio_mb_initialize - FW INITIALIZE command helper
481 * @hw: The HW structure
482 * @mbp: Mailbox structure
483 * @tmo: COmmand timeout
484 * @cbfn: Callback, if any.
485 *
486 */
487void
488csio_mb_initialize(struct csio_hw *hw, struct csio_mb *mbp, uint32_t tmo,
489 void (*cbfn) (struct csio_hw *, struct csio_mb *))
490{
491 struct fw_initialize_cmd *cmdp = (struct fw_initialize_cmd *)(mbp->mb);
492
493 CSIO_INIT_MBP(mbp, cmdp, tmo, hw, cbfn, 1);
494
495 cmdp->op_to_write = htonl(FW_CMD_OP(FW_INITIALIZE_CMD) |
496 FW_CMD_REQUEST | FW_CMD_WRITE);
497 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
498
499}
500
501/*
502 * csio_mb_iq_alloc - Initializes the mailbox to allocate an
503 * Ingress DMA queue in the firmware.
504 *
505 * @hw: The hw structure
506 * @mbp: Mailbox structure to initialize
507 * @priv: Private object
508 * @mb_tmo: Mailbox time-out period (in ms).
509 * @iq_params: Ingress queue params needed for allocation.
510 * @cbfn: The call-back function
511 *
512 *
513 */
514static void
515csio_mb_iq_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
516 uint32_t mb_tmo, struct csio_iq_params *iq_params,
517 void (*cbfn) (struct csio_hw *, struct csio_mb *))
518{
519 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
520
521 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
522
523 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) |
524 FW_CMD_REQUEST | FW_CMD_EXEC |
525 FW_IQ_CMD_PFN(iq_params->pfn) |
526 FW_IQ_CMD_VFN(iq_params->vfn));
527
528 cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_ALLOC |
529 FW_CMD_LEN16(sizeof(*cmdp) / 16));
530
531 cmdp->type_to_iqandstindex = htonl(
532 FW_IQ_CMD_VIID(iq_params->viid) |
533 FW_IQ_CMD_TYPE(iq_params->type) |
534 FW_IQ_CMD_IQASYNCH(iq_params->iqasynch));
535
536 cmdp->fl0size = htons(iq_params->fl0size);
537 cmdp->fl0size = htons(iq_params->fl1size);
538
539} /* csio_mb_iq_alloc */
540
541/*
542 * csio_mb_iq_write - Initializes the mailbox for writing into an
543 * Ingress DMA Queue.
544 *
545 * @hw: The HW structure
546 * @mbp: Mailbox structure to initialize
547 * @priv: Private object
548 * @mb_tmo: Mailbox time-out period (in ms).
549 * @cascaded_req: TRUE - if this request is cascased with iq-alloc request.
550 * @iq_params: Ingress queue params needed for writing.
551 * @cbfn: The call-back function
552 *
553 * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
554 * because this IQ write request can be cascaded with a previous
555 * IQ alloc request, and we dont want to over-write the bits set by
556 * that request. This logic will work even in a non-cascaded case, since the
557 * cmdp structure is zeroed out by CSIO_INIT_MBP.
558 */
559static void
560csio_mb_iq_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
561 uint32_t mb_tmo, bool cascaded_req,
562 struct csio_iq_params *iq_params,
563 void (*cbfn) (struct csio_hw *, struct csio_mb *))
564{
565 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
566
567 uint32_t iq_start_stop = (iq_params->iq_start) ?
568 FW_IQ_CMD_IQSTART(1) :
569 FW_IQ_CMD_IQSTOP(1);
570
571 /*
572 * If this IQ write is cascaded with IQ alloc request, do not
573 * re-initialize with 0's.
574 *
575 */
576 if (!cascaded_req)
577 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
578
579 cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_IQ_CMD) |
580 FW_CMD_REQUEST | FW_CMD_WRITE |
581 FW_IQ_CMD_PFN(iq_params->pfn) |
582 FW_IQ_CMD_VFN(iq_params->vfn));
583 cmdp->alloc_to_len16 |= htonl(iq_start_stop |
584 FW_CMD_LEN16(sizeof(*cmdp) / 16));
585 cmdp->iqid |= htons(iq_params->iqid);
586 cmdp->fl0id |= htons(iq_params->fl0id);
587 cmdp->fl1id |= htons(iq_params->fl1id);
588 cmdp->type_to_iqandstindex |= htonl(
589 FW_IQ_CMD_IQANDST(iq_params->iqandst) |
590 FW_IQ_CMD_IQANUS(iq_params->iqanus) |
591 FW_IQ_CMD_IQANUD(iq_params->iqanud) |
592 FW_IQ_CMD_IQANDSTINDEX(iq_params->iqandstindex));
593 cmdp->iqdroprss_to_iqesize |= htons(
594 FW_IQ_CMD_IQPCIECH(iq_params->iqpciech) |
595 FW_IQ_CMD_IQDCAEN(iq_params->iqdcaen) |
596 FW_IQ_CMD_IQDCACPU(iq_params->iqdcacpu) |
597 FW_IQ_CMD_IQINTCNTTHRESH(iq_params->iqintcntthresh) |
598 FW_IQ_CMD_IQCPRIO(iq_params->iqcprio) |
599 FW_IQ_CMD_IQESIZE(iq_params->iqesize));
600
601 cmdp->iqsize |= htons(iq_params->iqsize);
602 cmdp->iqaddr |= cpu_to_be64(iq_params->iqaddr);
603
604 if (iq_params->type == 0) {
605 cmdp->iqns_to_fl0congen |= htonl(
606 FW_IQ_CMD_IQFLINTIQHSEN(iq_params->iqflintiqhsen)|
607 FW_IQ_CMD_IQFLINTCONGEN(iq_params->iqflintcongen));
608 }
609
610 if (iq_params->fl0size && iq_params->fl0addr &&
611 (iq_params->fl0id != 0xFFFF)) {
612
613 cmdp->iqns_to_fl0congen |= htonl(
614 FW_IQ_CMD_FL0HOSTFCMODE(iq_params->fl0hostfcmode)|
615 FW_IQ_CMD_FL0CPRIO(iq_params->fl0cprio) |
616 FW_IQ_CMD_FL0PADEN(iq_params->fl0paden) |
617 FW_IQ_CMD_FL0PACKEN(iq_params->fl0packen));
618 cmdp->fl0dcaen_to_fl0cidxfthresh |= htons(
619 FW_IQ_CMD_FL0DCAEN(iq_params->fl0dcaen) |
620 FW_IQ_CMD_FL0DCACPU(iq_params->fl0dcacpu) |
621 FW_IQ_CMD_FL0FBMIN(iq_params->fl0fbmin) |
622 FW_IQ_CMD_FL0FBMAX(iq_params->fl0fbmax) |
623 FW_IQ_CMD_FL0CIDXFTHRESH(iq_params->fl0cidxfthresh));
624 cmdp->fl0size |= htons(iq_params->fl0size);
625 cmdp->fl0addr |= cpu_to_be64(iq_params->fl0addr);
626 }
627} /* csio_mb_iq_write */
628
629/*
630 * csio_mb_iq_alloc_write - Initializes the mailbox for allocating an
631 * Ingress DMA Queue.
632 *
633 * @hw: The HW structure
634 * @mbp: Mailbox structure to initialize
635 * @priv: Private data.
636 * @mb_tmo: Mailbox time-out period (in ms).
637 * @iq_params: Ingress queue params needed for allocation & writing.
638 * @cbfn: The call-back function
639 *
640 *
641 */
642void
643csio_mb_iq_alloc_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
644 uint32_t mb_tmo, struct csio_iq_params *iq_params,
645 void (*cbfn) (struct csio_hw *, struct csio_mb *))
646{
647 csio_mb_iq_alloc(hw, mbp, priv, mb_tmo, iq_params, cbfn);
648 csio_mb_iq_write(hw, mbp, priv, mb_tmo, true, iq_params, cbfn);
649} /* csio_mb_iq_alloc_write */
650
651/*
652 * csio_mb_iq_alloc_write_rsp - Process the allocation & writing
653 * of ingress DMA queue mailbox's response.
654 *
655 * @hw: The HW structure.
656 * @mbp: Mailbox structure to initialize.
657 * @retval: Firmware return value.
658 * @iq_params: Ingress queue parameters, after allocation and write.
659 *
660 */
661void
662csio_mb_iq_alloc_write_rsp(struct csio_hw *hw, struct csio_mb *mbp,
663 enum fw_retval *ret_val,
664 struct csio_iq_params *iq_params)
665{
666 struct fw_iq_cmd *rsp = (struct fw_iq_cmd *)(mbp->mb);
667
668 *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
669 if (*ret_val == FW_SUCCESS) {
670 iq_params->physiqid = ntohs(rsp->physiqid);
671 iq_params->iqid = ntohs(rsp->iqid);
672 iq_params->fl0id = ntohs(rsp->fl0id);
673 iq_params->fl1id = ntohs(rsp->fl1id);
674 } else {
675 iq_params->physiqid = iq_params->iqid =
676 iq_params->fl0id = iq_params->fl1id = 0;
677 }
678} /* csio_mb_iq_alloc_write_rsp */
679
680/*
681 * csio_mb_iq_free - Initializes the mailbox for freeing a
682 * specified Ingress DMA Queue.
683 *
684 * @hw: The HW structure
685 * @mbp: Mailbox structure to initialize
686 * @priv: Private data
687 * @mb_tmo: Mailbox time-out period (in ms).
688 * @iq_params: Parameters of ingress queue, that is to be freed.
689 * @cbfn: The call-back function
690 *
691 *
692 */
693void
694csio_mb_iq_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
695 uint32_t mb_tmo, struct csio_iq_params *iq_params,
696 void (*cbfn) (struct csio_hw *, struct csio_mb *))
697{
698 struct fw_iq_cmd *cmdp = (struct fw_iq_cmd *)(mbp->mb);
699
700 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
701
702 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_IQ_CMD) |
703 FW_CMD_REQUEST | FW_CMD_EXEC |
704 FW_IQ_CMD_PFN(iq_params->pfn) |
705 FW_IQ_CMD_VFN(iq_params->vfn));
706 cmdp->alloc_to_len16 = htonl(FW_IQ_CMD_FREE |
707 FW_CMD_LEN16(sizeof(*cmdp) / 16));
708 cmdp->type_to_iqandstindex = htonl(FW_IQ_CMD_TYPE(iq_params->type));
709
710 cmdp->iqid = htons(iq_params->iqid);
711 cmdp->fl0id = htons(iq_params->fl0id);
712 cmdp->fl1id = htons(iq_params->fl1id);
713
714} /* csio_mb_iq_free */
715
716/*
717 * csio_mb_eq_ofld_alloc - Initializes the mailbox for allocating
718 * an offload-egress queue.
719 *
720 * @hw: The HW structure
721 * @mbp: Mailbox structure to initialize
722 * @priv: Private data
723 * @mb_tmo: Mailbox time-out period (in ms).
724 * @eq_ofld_params: (Offload) Egress queue paramters.
725 * @cbfn: The call-back function
726 *
727 *
728 */
729static void
730csio_mb_eq_ofld_alloc(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
731 uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
732 void (*cbfn) (struct csio_hw *, struct csio_mb *))
733{
734 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
735
736 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
737 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
738 FW_CMD_REQUEST | FW_CMD_EXEC |
739 FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
740 FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
741 cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_ALLOC |
742 FW_CMD_LEN16(sizeof(*cmdp) / 16));
743
744} /* csio_mb_eq_ofld_alloc */
745
746/*
747 * csio_mb_eq_ofld_write - Initializes the mailbox for writing
748 * an alloacted offload-egress queue.
749 *
750 * @hw: The HW structure
751 * @mbp: Mailbox structure to initialize
752 * @priv: Private data
753 * @mb_tmo: Mailbox time-out period (in ms).
754 * @cascaded_req: TRUE - if this request is cascased with Eq-alloc request.
755 * @eq_ofld_params: (Offload) Egress queue paramters.
756 * @cbfn: The call-back function
757 *
758 *
759 * NOTE: We OR relevant bits with cmdp->XXX, instead of just equating,
760 * because this EQ write request can be cascaded with a previous
761 * EQ alloc request, and we dont want to over-write the bits set by
762 * that request. This logic will work even in a non-cascaded case, since the
763 * cmdp structure is zeroed out by CSIO_INIT_MBP.
764 */
765static void
766csio_mb_eq_ofld_write(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
767 uint32_t mb_tmo, bool cascaded_req,
768 struct csio_eq_params *eq_ofld_params,
769 void (*cbfn) (struct csio_hw *, struct csio_mb *))
770{
771 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
772
773 uint32_t eq_start_stop = (eq_ofld_params->eqstart) ?
774 FW_EQ_OFLD_CMD_EQSTART : FW_EQ_OFLD_CMD_EQSTOP;
775
776 /*
777 * If this EQ write is cascaded with EQ alloc request, do not
778 * re-initialize with 0's.
779 *
780 */
781 if (!cascaded_req)
782 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
783
784 cmdp->op_to_vfn |= htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
785 FW_CMD_REQUEST | FW_CMD_WRITE |
786 FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
787 FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
788 cmdp->alloc_to_len16 |= htonl(eq_start_stop |
789 FW_CMD_LEN16(sizeof(*cmdp) / 16));
790
791 cmdp->eqid_pkd |= htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));
792
793 cmdp->fetchszm_to_iqid |= htonl(
794 FW_EQ_OFLD_CMD_HOSTFCMODE(eq_ofld_params->hostfcmode) |
795 FW_EQ_OFLD_CMD_CPRIO(eq_ofld_params->cprio) |
796 FW_EQ_OFLD_CMD_PCIECHN(eq_ofld_params->pciechn) |
797 FW_EQ_OFLD_CMD_IQID(eq_ofld_params->iqid));
798
799 cmdp->dcaen_to_eqsize |= htonl(
800 FW_EQ_OFLD_CMD_DCAEN(eq_ofld_params->dcaen) |
801 FW_EQ_OFLD_CMD_DCACPU(eq_ofld_params->dcacpu) |
802 FW_EQ_OFLD_CMD_FBMIN(eq_ofld_params->fbmin) |
803 FW_EQ_OFLD_CMD_FBMAX(eq_ofld_params->fbmax) |
804 FW_EQ_OFLD_CMD_CIDXFTHRESHO(eq_ofld_params->cidxfthresho) |
805 FW_EQ_OFLD_CMD_CIDXFTHRESH(eq_ofld_params->cidxfthresh) |
806 FW_EQ_OFLD_CMD_EQSIZE(eq_ofld_params->eqsize));
807
808 cmdp->eqaddr |= cpu_to_be64(eq_ofld_params->eqaddr);
809
810} /* csio_mb_eq_ofld_write */
811
812/*
813 * csio_mb_eq_ofld_alloc_write - Initializes the mailbox for allocation
814 * writing into an Engress DMA Queue.
815 *
816 * @hw: The HW structure
817 * @mbp: Mailbox structure to initialize
818 * @priv: Private data.
819 * @mb_tmo: Mailbox time-out period (in ms).
820 * @eq_ofld_params: (Offload) Egress queue paramters.
821 * @cbfn: The call-back function
822 *
823 *
824 */
825void
826csio_mb_eq_ofld_alloc_write(struct csio_hw *hw, struct csio_mb *mbp,
827 void *priv, uint32_t mb_tmo,
828 struct csio_eq_params *eq_ofld_params,
829 void (*cbfn) (struct csio_hw *, struct csio_mb *))
830{
831 csio_mb_eq_ofld_alloc(hw, mbp, priv, mb_tmo, eq_ofld_params, cbfn);
832 csio_mb_eq_ofld_write(hw, mbp, priv, mb_tmo, true,
833 eq_ofld_params, cbfn);
834} /* csio_mb_eq_ofld_alloc_write */
835
836/*
837 * csio_mb_eq_ofld_alloc_write_rsp - Process the allocation
838 * & write egress DMA queue mailbox's response.
839 *
840 * @hw: The HW structure.
841 * @mbp: Mailbox structure to initialize.
842 * @retval: Firmware return value.
843 * @eq_ofld_params: (Offload) Egress queue paramters.
844 *
845 */
846void
847csio_mb_eq_ofld_alloc_write_rsp(struct csio_hw *hw,
848 struct csio_mb *mbp, enum fw_retval *ret_val,
849 struct csio_eq_params *eq_ofld_params)
850{
851 struct fw_eq_ofld_cmd *rsp = (struct fw_eq_ofld_cmd *)(mbp->mb);
852
853 *ret_val = FW_CMD_RETVAL_GET(ntohl(rsp->alloc_to_len16));
854
855 if (*ret_val == FW_SUCCESS) {
856 eq_ofld_params->eqid = FW_EQ_OFLD_CMD_EQID_GET(
857 ntohl(rsp->eqid_pkd));
858 eq_ofld_params->physeqid = FW_EQ_OFLD_CMD_PHYSEQID_GET(
859 ntohl(rsp->physeqid_pkd));
860 } else
861 eq_ofld_params->eqid = 0;
862
863} /* csio_mb_eq_ofld_alloc_write_rsp */
864
865/*
866 * csio_mb_eq_ofld_free - Initializes the mailbox for freeing a
867 * specified Engress DMA Queue.
868 *
869 * @hw: The HW structure
870 * @mbp: Mailbox structure to initialize
871 * @priv: Private data area.
872 * @mb_tmo: Mailbox time-out period (in ms).
873 * @eq_ofld_params: (Offload) Egress queue paramters, that is to be freed.
874 * @cbfn: The call-back function
875 *
876 *
877 */
878void
879csio_mb_eq_ofld_free(struct csio_hw *hw, struct csio_mb *mbp, void *priv,
880 uint32_t mb_tmo, struct csio_eq_params *eq_ofld_params,
881 void (*cbfn) (struct csio_hw *, struct csio_mb *))
882{
883 struct fw_eq_ofld_cmd *cmdp = (struct fw_eq_ofld_cmd *)(mbp->mb);
884
885 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, priv, cbfn, 1);
886
887 cmdp->op_to_vfn = htonl(FW_CMD_OP(FW_EQ_OFLD_CMD) |
888 FW_CMD_REQUEST | FW_CMD_EXEC |
889 FW_EQ_OFLD_CMD_PFN(eq_ofld_params->pfn) |
890 FW_EQ_OFLD_CMD_VFN(eq_ofld_params->vfn));
891 cmdp->alloc_to_len16 = htonl(FW_EQ_OFLD_CMD_FREE |
892 FW_CMD_LEN16(sizeof(*cmdp) / 16));
893 cmdp->eqid_pkd = htonl(FW_EQ_OFLD_CMD_EQID(eq_ofld_params->eqid));
894
895} /* csio_mb_eq_ofld_free */
896
897/*
898 * csio_write_fcoe_link_cond_init_mb - Initialize Mailbox to write FCoE link
899 * condition.
900 *
901 * @ln: The Lnode structure
902 * @mbp: Mailbox structure to initialize
903 * @mb_tmo: Mailbox time-out period (in ms).
904 * @cbfn: The call back function.
905 *
906 *
907 */
908void
909csio_write_fcoe_link_cond_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
910 uint32_t mb_tmo, uint8_t port_id, uint32_t sub_opcode,
911 uint8_t cos, bool link_status, uint32_t fcfi,
912 void (*cbfn) (struct csio_hw *, struct csio_mb *))
913{
914 struct fw_fcoe_link_cmd *cmdp =
915 (struct fw_fcoe_link_cmd *)(mbp->mb);
916
917 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
918
919 cmdp->op_to_portid = htonl((
920 FW_CMD_OP(FW_FCOE_LINK_CMD) |
921 FW_CMD_REQUEST |
922 FW_CMD_WRITE |
923 FW_FCOE_LINK_CMD_PORTID(port_id)));
924 cmdp->sub_opcode_fcfi = htonl(
925 FW_FCOE_LINK_CMD_SUB_OPCODE(sub_opcode) |
926 FW_FCOE_LINK_CMD_FCFI(fcfi));
927 cmdp->lstatus = link_status;
928 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
929
930} /* csio_write_fcoe_link_cond_init_mb */
931
932/*
933 * csio_fcoe_read_res_info_init_mb - Initializes the mailbox for reading FCoE
934 * resource information(FW_GET_RES_INFO_CMD).
935 *
936 * @hw: The HW structure
937 * @mbp: Mailbox structure to initialize
938 * @mb_tmo: Mailbox time-out period (in ms).
939 * @cbfn: The call-back function
940 *
941 *
942 */
943void
944csio_fcoe_read_res_info_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
945 uint32_t mb_tmo,
946 void (*cbfn) (struct csio_hw *, struct csio_mb *))
947{
948 struct fw_fcoe_res_info_cmd *cmdp =
949 (struct fw_fcoe_res_info_cmd *)(mbp->mb);
950
951 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
952
953 cmdp->op_to_read = htonl((FW_CMD_OP(FW_FCOE_RES_INFO_CMD) |
954 FW_CMD_REQUEST |
955 FW_CMD_READ));
956
957 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
958
959} /* csio_fcoe_read_res_info_init_mb */
960
961/*
962 * csio_fcoe_vnp_alloc_init_mb - Initializes the mailbox for allocating VNP
963 * in the firmware (FW_FCOE_VNP_CMD).
964 *
965 * @ln: The Lnode structure.
966 * @mbp: Mailbox structure to initialize.
967 * @mb_tmo: Mailbox time-out period (in ms).
968 * @fcfi: FCF Index.
969 * @vnpi: vnpi
970 * @iqid: iqid
971 * @vnport_wwnn: vnport WWNN
972 * @vnport_wwpn: vnport WWPN
973 * @cbfn: The call-back function.
974 *
975 *
976 */
977void
978csio_fcoe_vnp_alloc_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
979 uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi, uint16_t iqid,
980 uint8_t vnport_wwnn[8], uint8_t vnport_wwpn[8],
981 void (*cbfn) (struct csio_hw *, struct csio_mb *))
982{
983 struct fw_fcoe_vnp_cmd *cmdp =
984 (struct fw_fcoe_vnp_cmd *)(mbp->mb);
985
986 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
987
988 cmdp->op_to_fcfi = htonl((FW_CMD_OP(FW_FCOE_VNP_CMD) |
989 FW_CMD_REQUEST |
990 FW_CMD_EXEC |
991 FW_FCOE_VNP_CMD_FCFI(fcfi)));
992
993 cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_ALLOC |
994 FW_CMD_LEN16(sizeof(*cmdp) / 16));
995
996 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
997
998 cmdp->iqid = htons(iqid);
999
1000 if (!wwn_to_u64(vnport_wwnn) && !wwn_to_u64(vnport_wwpn))
1001 cmdp->gen_wwn_to_vnpi |= htonl(FW_FCOE_VNP_CMD_GEN_WWN);
1002
1003 if (vnport_wwnn)
1004 memcpy(cmdp->vnport_wwnn, vnport_wwnn, 8);
1005 if (vnport_wwpn)
1006 memcpy(cmdp->vnport_wwpn, vnport_wwpn, 8);
1007
1008} /* csio_fcoe_vnp_alloc_init_mb */
1009
1010/*
1011 * csio_fcoe_vnp_read_init_mb - Prepares VNP read cmd.
1012 * @ln: The Lnode structure.
1013 * @mbp: Mailbox structure to initialize.
1014 * @mb_tmo: Mailbox time-out period (in ms).
1015 * @fcfi: FCF Index.
1016 * @vnpi: vnpi
1017 * @cbfn: The call-back handler.
1018 */
1019void
1020csio_fcoe_vnp_read_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
1021 uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
1022 void (*cbfn) (struct csio_hw *, struct csio_mb *))
1023{
1024 struct fw_fcoe_vnp_cmd *cmdp =
1025 (struct fw_fcoe_vnp_cmd *)(mbp->mb);
1026
1027 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
1028 cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) |
1029 FW_CMD_REQUEST |
1030 FW_CMD_READ |
1031 FW_FCOE_VNP_CMD_FCFI(fcfi));
1032 cmdp->alloc_to_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
1033 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
1034}
1035
1036/*
1037 * csio_fcoe_vnp_free_init_mb - Initializes the mailbox for freeing an
1038 * alloacted VNP in the firmware (FW_FCOE_VNP_CMD).
1039 *
1040 * @ln: The Lnode structure.
1041 * @mbp: Mailbox structure to initialize.
1042 * @mb_tmo: Mailbox time-out period (in ms).
1043 * @fcfi: FCF flow id
1044 * @vnpi: VNP flow id
1045 * @cbfn: The call-back function.
1046 * Return: None
1047 */
1048void
1049csio_fcoe_vnp_free_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
1050 uint32_t mb_tmo, uint32_t fcfi, uint32_t vnpi,
1051 void (*cbfn) (struct csio_hw *, struct csio_mb *))
1052{
1053 struct fw_fcoe_vnp_cmd *cmdp =
1054 (struct fw_fcoe_vnp_cmd *)(mbp->mb);
1055
1056 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
1057
1058 cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_VNP_CMD) |
1059 FW_CMD_REQUEST |
1060 FW_CMD_EXEC |
1061 FW_FCOE_VNP_CMD_FCFI(fcfi));
1062 cmdp->alloc_to_len16 = htonl(FW_FCOE_VNP_CMD_FREE |
1063 FW_CMD_LEN16(sizeof(*cmdp) / 16));
1064 cmdp->gen_wwn_to_vnpi = htonl(FW_FCOE_VNP_CMD_VNPI(vnpi));
1065}
1066
1067/*
1068 * csio_fcoe_read_fcf_init_mb - Initializes the mailbox to read the
1069 * FCF records.
1070 *
1071 * @ln: The Lnode structure
1072 * @mbp: Mailbox structure to initialize
1073 * @mb_tmo: Mailbox time-out period (in ms).
1074 * @fcf_params: FC-Forwarder parameters.
1075 * @cbfn: The call-back function
1076 *
1077 *
1078 */
1079void
1080csio_fcoe_read_fcf_init_mb(struct csio_lnode *ln, struct csio_mb *mbp,
1081 uint32_t mb_tmo, uint32_t portid, uint32_t fcfi,
1082 void (*cbfn) (struct csio_hw *, struct csio_mb *))
1083{
1084 struct fw_fcoe_fcf_cmd *cmdp =
1085 (struct fw_fcoe_fcf_cmd *)(mbp->mb);
1086
1087 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, ln, cbfn, 1);
1088
1089 cmdp->op_to_fcfi = htonl(FW_CMD_OP(FW_FCOE_FCF_CMD) |
1090 FW_CMD_REQUEST |
1091 FW_CMD_READ |
1092 FW_FCOE_FCF_CMD_FCFI(fcfi));
1093 cmdp->retval_len16 = htonl(FW_CMD_LEN16(sizeof(*cmdp) / 16));
1094
1095} /* csio_fcoe_read_fcf_init_mb */
1096
1097void
1098csio_fcoe_read_portparams_init_mb(struct csio_hw *hw, struct csio_mb *mbp,
1099 uint32_t mb_tmo,
1100 struct fw_fcoe_port_cmd_params *portparams,
1101 void (*cbfn)(struct csio_hw *,
1102 struct csio_mb *))
1103{
1104 struct fw_fcoe_stats_cmd *cmdp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
1105
1106 CSIO_INIT_MBP(mbp, cmdp, mb_tmo, hw, cbfn, 1);
1107 mbp->mb_size = 64;
1108
1109 cmdp->op_to_flowid = htonl(FW_CMD_OP(FW_FCOE_STATS_CMD) |
1110 FW_CMD_REQUEST | FW_CMD_READ);
1111 cmdp->free_to_len16 = htonl(FW_CMD_LEN16(CSIO_MAX_MB_SIZE/16));
1112
1113 cmdp->u.ctl.nstats_port = FW_FCOE_STATS_CMD_NSTATS(portparams->nstats) |
1114 FW_FCOE_STATS_CMD_PORT(portparams->portid);
1115
1116 cmdp->u.ctl.port_valid_ix = FW_FCOE_STATS_CMD_IX(portparams->idx) |
1117 FW_FCOE_STATS_CMD_PORT_VALID;
1118
1119} /* csio_fcoe_read_portparams_init_mb */
1120
1121void
Naresh Kumar Inna5036f0a2012-11-20 18:15:40 +05301122csio_mb_process_portparams_rsp(struct csio_hw *hw,
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301123 struct csio_mb *mbp,
1124 enum fw_retval *retval,
1125 struct fw_fcoe_port_cmd_params *portparams,
Naresh Kumar Inna5036f0a2012-11-20 18:15:40 +05301126 struct fw_fcoe_port_stats *portstats)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301127{
1128 struct fw_fcoe_stats_cmd *rsp = (struct fw_fcoe_stats_cmd *)(mbp->mb);
1129 struct fw_fcoe_port_stats stats;
1130 uint8_t *src;
1131 uint8_t *dst;
1132
1133 *retval = FW_CMD_RETVAL_GET(ntohl(rsp->free_to_len16));
1134
1135 memset(&stats, 0, sizeof(struct fw_fcoe_port_stats));
1136
1137 if (*retval == FW_SUCCESS) {
1138 dst = (uint8_t *)(&stats) + ((portparams->idx - 1) * 8);
1139 src = (uint8_t *)rsp + (CSIO_STATS_OFFSET * 8);
1140 memcpy(dst, src, (portparams->nstats * 8));
1141 if (portparams->idx == 1) {
1142 /* Get the first 6 flits from the Mailbox */
Naresh Kumar Inna5036f0a2012-11-20 18:15:40 +05301143 portstats->tx_bcast_bytes = stats.tx_bcast_bytes;
1144 portstats->tx_bcast_frames = stats.tx_bcast_frames;
1145 portstats->tx_mcast_bytes = stats.tx_mcast_bytes;
1146 portstats->tx_mcast_frames = stats.tx_mcast_frames;
1147 portstats->tx_ucast_bytes = stats.tx_ucast_bytes;
1148 portstats->tx_ucast_frames = stats.tx_ucast_frames;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301149 }
1150 if (portparams->idx == 7) {
1151 /* Get the second 6 flits from the Mailbox */
Naresh Kumar Inna5036f0a2012-11-20 18:15:40 +05301152 portstats->tx_drop_frames = stats.tx_drop_frames;
1153 portstats->tx_offload_bytes = stats.tx_offload_bytes;
1154 portstats->tx_offload_frames = stats.tx_offload_frames;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301155#if 0
Naresh Kumar Inna5036f0a2012-11-20 18:15:40 +05301156 portstats->rx_pf_bytes = stats.rx_pf_bytes;
1157 portstats->rx_pf_frames = stats.rx_pf_frames;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301158#endif
Naresh Kumar Inna5036f0a2012-11-20 18:15:40 +05301159 portstats->rx_bcast_bytes = stats.rx_bcast_bytes;
1160 portstats->rx_bcast_frames = stats.rx_bcast_frames;
1161 portstats->rx_mcast_bytes = stats.rx_mcast_bytes;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301162 }
1163 if (portparams->idx == 13) {
1164 /* Get the last 4 flits from the Mailbox */
Naresh Kumar Inna5036f0a2012-11-20 18:15:40 +05301165 portstats->rx_mcast_frames = stats.rx_mcast_frames;
1166 portstats->rx_ucast_bytes = stats.rx_ucast_bytes;
1167 portstats->rx_ucast_frames = stats.rx_ucast_frames;
1168 portstats->rx_err_frames = stats.rx_err_frames;
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301169 }
1170 }
1171}
1172
1173/* Entry points/APIs for MB module */
1174/*
1175 * csio_mb_intr_enable - Enable Interrupts from mailboxes.
1176 * @hw: The HW structure
1177 *
1178 * Enables CIM interrupt bit in appropriate INT_ENABLE registers.
1179 */
1180void
1181csio_mb_intr_enable(struct csio_hw *hw)
1182{
1183 csio_wr_reg32(hw, MBMSGRDYINTEN(1), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
1184 csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
1185}
1186
1187/*
1188 * csio_mb_intr_disable - Disable Interrupts from mailboxes.
1189 * @hw: The HW structure
1190 *
1191 * Disable bit in HostInterruptEnable CIM register.
1192 */
1193void
1194csio_mb_intr_disable(struct csio_hw *hw)
1195{
1196 csio_wr_reg32(hw, MBMSGRDYINTEN(0), MYPF_REG(CIM_PF_HOST_INT_ENABLE));
1197 csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_ENABLE));
1198}
1199
1200static void
1201csio_mb_dump_fw_dbg(struct csio_hw *hw, __be64 *cmd)
1202{
1203 struct fw_debug_cmd *dbg = (struct fw_debug_cmd *)cmd;
1204
1205 if ((FW_DEBUG_CMD_TYPE_GET(ntohl(dbg->op_type))) == 1) {
1206 csio_info(hw, "FW print message:\n");
1207 csio_info(hw, "\tdebug->dprtstridx = %d\n",
1208 ntohs(dbg->u.prt.dprtstridx));
1209 csio_info(hw, "\tdebug->dprtstrparam0 = 0x%x\n",
1210 ntohl(dbg->u.prt.dprtstrparam0));
1211 csio_info(hw, "\tdebug->dprtstrparam1 = 0x%x\n",
1212 ntohl(dbg->u.prt.dprtstrparam1));
1213 csio_info(hw, "\tdebug->dprtstrparam2 = 0x%x\n",
1214 ntohl(dbg->u.prt.dprtstrparam2));
1215 csio_info(hw, "\tdebug->dprtstrparam3 = 0x%x\n",
1216 ntohl(dbg->u.prt.dprtstrparam3));
1217 } else {
1218 /* This is a FW assertion */
1219 csio_fatal(hw, "FW assertion at %.16s:%u, val0 %#x, val1 %#x\n",
1220 dbg->u.assert.filename_0_7,
1221 ntohl(dbg->u.assert.line),
1222 ntohl(dbg->u.assert.x),
1223 ntohl(dbg->u.assert.y));
1224 }
1225}
1226
1227static void
1228csio_mb_debug_cmd_handler(struct csio_hw *hw)
1229{
1230 int i;
1231 __be64 cmd[CSIO_MB_MAX_REGS];
1232 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
1233 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
1234 int size = sizeof(struct fw_debug_cmd);
1235
1236 /* Copy mailbox data */
1237 for (i = 0; i < size; i += 8)
1238 cmd[i / 8] = cpu_to_be64(csio_rd_reg64(hw, data_reg + i));
1239
1240 csio_mb_dump_fw_dbg(hw, cmd);
1241
1242 /* Notify FW of mailbox by setting owner as UP */
1243 csio_wr_reg32(hw, MBMSGVALID | MBINTREQ | MBOWNER(CSIO_MBOWNER_FW),
1244 ctl_reg);
1245
1246 csio_rd_reg32(hw, ctl_reg);
1247 wmb();
1248}
1249
1250/*
1251 * csio_mb_issue - generic routine for issuing Mailbox commands.
1252 * @hw: The HW structure
1253 * @mbp: Mailbox command to issue
1254 *
1255 * Caller should hold hw lock across this call.
1256 */
1257int
1258csio_mb_issue(struct csio_hw *hw, struct csio_mb *mbp)
1259{
1260 uint32_t owner, ctl;
1261 int i;
1262 uint32_t ii;
1263 __be64 *cmd = mbp->mb;
1264 __be64 hdr;
1265 struct csio_mbm *mbm = &hw->mbm;
1266 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
1267 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
1268 int size = mbp->mb_size;
1269 int rv = -EINVAL;
1270 struct fw_cmd_hdr *fw_hdr;
1271
1272 /* Determine mode */
1273 if (mbp->mb_cbfn == NULL) {
1274 /* Need to issue/get results in the same context */
1275 if (mbp->tmo < CSIO_MB_POLL_FREQ) {
1276 csio_err(hw, "Invalid tmo: 0x%x\n", mbp->tmo);
1277 goto error_out;
1278 }
1279 } else if (!csio_is_host_intr_enabled(hw) ||
1280 !csio_is_hw_intr_enabled(hw)) {
1281 csio_err(hw, "Cannot issue mailbox in interrupt mode 0x%x\n",
1282 *((uint8_t *)mbp->mb));
1283 goto error_out;
1284 }
1285
1286 if (mbm->mcurrent != NULL) {
1287 /* Queue mbox cmd, if another mbox cmd is active */
1288 if (mbp->mb_cbfn == NULL) {
1289 rv = -EBUSY;
1290 csio_dbg(hw, "Couldnt own Mailbox %x op:0x%x\n",
1291 hw->pfn, *((uint8_t *)mbp->mb));
1292
1293 goto error_out;
1294 } else {
1295 list_add_tail(&mbp->list, &mbm->req_q);
1296 CSIO_INC_STATS(mbm, n_activeq);
1297
1298 return 0;
1299 }
1300 }
1301
1302 /* Now get ownership of mailbox */
1303 owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
1304
1305 if (!csio_mb_is_host_owner(owner)) {
1306
1307 for (i = 0; (owner == CSIO_MBOWNER_NONE) && (i < 3); i++)
1308 owner = MBOWNER_GET(csio_rd_reg32(hw, ctl_reg));
1309 /*
1310 * Mailbox unavailable. In immediate mode, fail the command.
1311 * In other modes, enqueue the request.
1312 */
1313 if (!csio_mb_is_host_owner(owner)) {
1314 if (mbp->mb_cbfn == NULL) {
1315 rv = owner ? -EBUSY : -ETIMEDOUT;
1316
1317 csio_dbg(hw,
1318 "Couldnt own Mailbox %x op:0x%x "
1319 "owner:%x\n",
1320 hw->pfn, *((uint8_t *)mbp->mb), owner);
1321 goto error_out;
1322 } else {
1323 if (mbm->mcurrent == NULL) {
1324 csio_err(hw,
1325 "Couldnt own Mailbox %x "
1326 "op:0x%x owner:%x\n",
1327 hw->pfn, *((uint8_t *)mbp->mb),
1328 owner);
1329 csio_err(hw,
1330 "No outstanding driver"
1331 " mailbox as well\n");
1332 goto error_out;
1333 }
1334 }
1335 }
1336 }
1337
1338 /* Mailbox is available, copy mailbox data into it */
1339 for (i = 0; i < size; i += 8) {
1340 csio_wr_reg64(hw, be64_to_cpu(*cmd), data_reg + i);
1341 cmd++;
1342 }
1343
1344 CSIO_DUMP_MB(hw, hw->pfn, data_reg);
1345
1346 /* Start completion timers in non-immediate modes and notify FW */
1347 if (mbp->mb_cbfn != NULL) {
1348 mbm->mcurrent = mbp;
1349 mod_timer(&mbm->timer, jiffies + msecs_to_jiffies(mbp->tmo));
1350 csio_wr_reg32(hw, MBMSGVALID | MBINTREQ |
1351 MBOWNER(CSIO_MBOWNER_FW), ctl_reg);
1352 } else
1353 csio_wr_reg32(hw, MBMSGVALID | MBOWNER(CSIO_MBOWNER_FW),
1354 ctl_reg);
1355
1356 /* Flush posted writes */
1357 csio_rd_reg32(hw, ctl_reg);
1358 wmb();
1359
1360 CSIO_INC_STATS(mbm, n_req);
1361
1362 if (mbp->mb_cbfn)
1363 return 0;
1364
1365 /* Poll for completion in immediate mode */
1366 cmd = mbp->mb;
1367
1368 for (ii = 0; ii < mbp->tmo; ii += CSIO_MB_POLL_FREQ) {
1369 mdelay(CSIO_MB_POLL_FREQ);
1370
1371 /* Check for response */
1372 ctl = csio_rd_reg32(hw, ctl_reg);
1373 if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
1374
1375 if (!(ctl & MBMSGVALID)) {
1376 csio_wr_reg32(hw, 0, ctl_reg);
1377 continue;
1378 }
1379
1380 CSIO_DUMP_MB(hw, hw->pfn, data_reg);
1381
1382 hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
1383 fw_hdr = (struct fw_cmd_hdr *)&hdr;
1384
1385 switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) {
1386 case FW_DEBUG_CMD:
1387 csio_mb_debug_cmd_handler(hw);
1388 continue;
1389 }
1390
1391 /* Copy response */
1392 for (i = 0; i < size; i += 8)
1393 *cmd++ = cpu_to_be64(csio_rd_reg64
1394 (hw, data_reg + i));
1395 csio_wr_reg32(hw, 0, ctl_reg);
1396
Naresh Kumar Inna5036f0a2012-11-20 18:15:40 +05301397 if (csio_mb_fw_retval(mbp) != FW_SUCCESS)
Naresh Kumar Innaa3667aa2012-11-15 22:41:18 +05301398 CSIO_INC_STATS(mbm, n_err);
1399
1400 CSIO_INC_STATS(mbm, n_rsp);
1401 return 0;
1402 }
1403 }
1404
1405 CSIO_INC_STATS(mbm, n_tmo);
1406
1407 csio_err(hw, "Mailbox %x op:0x%x timed out!\n",
1408 hw->pfn, *((uint8_t *)cmd));
1409
1410 return -ETIMEDOUT;
1411
1412error_out:
1413 CSIO_INC_STATS(mbm, n_err);
1414 return rv;
1415}
1416
1417/*
1418 * csio_mb_completions - Completion handler for Mailbox commands
1419 * @hw: The HW structure
1420 * @cbfn_q: Completion queue.
1421 *
1422 */
1423void
1424csio_mb_completions(struct csio_hw *hw, struct list_head *cbfn_q)
1425{
1426 struct csio_mb *mbp;
1427 struct csio_mbm *mbm = &hw->mbm;
1428 enum fw_retval rv;
1429
1430 while (!list_empty(cbfn_q)) {
1431 mbp = list_first_entry(cbfn_q, struct csio_mb, list);
1432 list_del_init(&mbp->list);
1433
1434 rv = csio_mb_fw_retval(mbp);
1435 if ((rv != FW_SUCCESS) && (rv != FW_HOSTERROR))
1436 CSIO_INC_STATS(mbm, n_err);
1437 else if (rv != FW_HOSTERROR)
1438 CSIO_INC_STATS(mbm, n_rsp);
1439
1440 if (mbp->mb_cbfn)
1441 mbp->mb_cbfn(hw, mbp);
1442 }
1443}
1444
1445static void
1446csio_mb_portmod_changed(struct csio_hw *hw, uint8_t port_id)
1447{
1448 static char *mod_str[] = {
1449 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
1450 };
1451
1452 struct csio_pport *port = &hw->pport[port_id];
1453
1454 if (port->mod_type == FW_PORT_MOD_TYPE_NONE)
1455 csio_info(hw, "Port:%d - port module unplugged\n", port_id);
1456 else if (port->mod_type < ARRAY_SIZE(mod_str))
1457 csio_info(hw, "Port:%d - %s port module inserted\n", port_id,
1458 mod_str[port->mod_type]);
1459 else if (port->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
1460 csio_info(hw,
1461 "Port:%d - unsupported optical port module "
1462 "inserted\n", port_id);
1463 else if (port->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
1464 csio_info(hw,
1465 "Port:%d - unknown port module inserted, forcing "
1466 "TWINAX\n", port_id);
1467 else if (port->mod_type == FW_PORT_MOD_TYPE_ERROR)
1468 csio_info(hw, "Port:%d - transceiver module error\n", port_id);
1469 else
1470 csio_info(hw, "Port:%d - unknown module type %d inserted\n",
1471 port_id, port->mod_type);
1472}
1473
1474int
1475csio_mb_fwevt_handler(struct csio_hw *hw, __be64 *cmd)
1476{
1477 uint8_t opcode = *(uint8_t *)cmd;
1478 struct fw_port_cmd *pcmd;
1479 uint8_t port_id;
1480 uint32_t link_status;
1481 uint16_t action;
1482 uint8_t mod_type;
1483
1484 if (opcode == FW_PORT_CMD) {
1485 pcmd = (struct fw_port_cmd *)cmd;
1486 port_id = FW_PORT_CMD_PORTID_GET(
1487 ntohl(pcmd->op_to_portid));
1488 action = FW_PORT_CMD_ACTION_GET(
1489 ntohl(pcmd->action_to_len16));
1490 if (action != FW_PORT_ACTION_GET_PORT_INFO) {
1491 csio_err(hw, "Unhandled FW_PORT_CMD action: %u\n",
1492 action);
1493 return -EINVAL;
1494 }
1495
1496 link_status = ntohl(pcmd->u.info.lstatus_to_modtype);
1497 mod_type = FW_PORT_CMD_MODTYPE_GET(link_status);
1498
1499 hw->pport[port_id].link_status =
1500 FW_PORT_CMD_LSTATUS_GET(link_status);
1501 hw->pport[port_id].link_speed =
1502 FW_PORT_CMD_LSPEED_GET(link_status);
1503
1504 csio_info(hw, "Port:%x - LINK %s\n", port_id,
1505 FW_PORT_CMD_LSTATUS_GET(link_status) ? "UP" : "DOWN");
1506
1507 if (mod_type != hw->pport[port_id].mod_type) {
1508 hw->pport[port_id].mod_type = mod_type;
1509 csio_mb_portmod_changed(hw, port_id);
1510 }
1511 } else if (opcode == FW_DEBUG_CMD) {
1512 csio_mb_dump_fw_dbg(hw, cmd);
1513 } else {
1514 csio_dbg(hw, "Gen MB can't handle op:0x%x on evtq.\n", opcode);
1515 return -EINVAL;
1516 }
1517
1518 return 0;
1519}
1520
1521/*
1522 * csio_mb_isr_handler - Handle mailboxes related interrupts.
1523 * @hw: The HW structure
1524 *
1525 * Called from the ISR to handle Mailbox related interrupts.
1526 * HW Lock should be held across this call.
1527 */
1528int
1529csio_mb_isr_handler(struct csio_hw *hw)
1530{
1531 struct csio_mbm *mbm = &hw->mbm;
1532 struct csio_mb *mbp = mbm->mcurrent;
1533 __be64 *cmd;
1534 uint32_t ctl, cim_cause, pl_cause;
1535 int i;
1536 uint32_t ctl_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_CTRL);
1537 uint32_t data_reg = PF_REG(hw->pfn, CIM_PF_MAILBOX_DATA);
1538 int size;
1539 __be64 hdr;
1540 struct fw_cmd_hdr *fw_hdr;
1541
1542 pl_cause = csio_rd_reg32(hw, MYPF_REG(PL_PF_INT_CAUSE));
1543 cim_cause = csio_rd_reg32(hw, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
1544
1545 if (!(pl_cause & PFCIM) || !(cim_cause & MBMSGRDYINT)) {
1546 CSIO_INC_STATS(hw, n_mbint_unexp);
1547 return -EINVAL;
1548 }
1549
1550 /*
1551 * The cause registers below HAVE to be cleared in the SAME
1552 * order as below: The low level cause register followed by
1553 * the upper level cause register. In other words, CIM-cause
1554 * first followed by PL-Cause next.
1555 */
1556 csio_wr_reg32(hw, MBMSGRDYINT, MYPF_REG(CIM_PF_HOST_INT_CAUSE));
1557 csio_wr_reg32(hw, PFCIM, MYPF_REG(PL_PF_INT_CAUSE));
1558
1559 ctl = csio_rd_reg32(hw, ctl_reg);
1560
1561 if (csio_mb_is_host_owner(MBOWNER_GET(ctl))) {
1562
1563 CSIO_DUMP_MB(hw, hw->pfn, data_reg);
1564
1565 if (!(ctl & MBMSGVALID)) {
1566 csio_warn(hw,
1567 "Stray mailbox interrupt recvd,"
1568 " mailbox data not valid\n");
1569 csio_wr_reg32(hw, 0, ctl_reg);
1570 /* Flush */
1571 csio_rd_reg32(hw, ctl_reg);
1572 return -EINVAL;
1573 }
1574
1575 hdr = cpu_to_be64(csio_rd_reg64(hw, data_reg));
1576 fw_hdr = (struct fw_cmd_hdr *)&hdr;
1577
1578 switch (FW_CMD_OP_GET(ntohl(fw_hdr->hi))) {
1579 case FW_DEBUG_CMD:
1580 csio_mb_debug_cmd_handler(hw);
1581 return -EINVAL;
1582#if 0
1583 case FW_ERROR_CMD:
1584 case FW_INITIALIZE_CMD: /* When we are not master */
1585#endif
1586 }
1587
1588 CSIO_ASSERT(mbp != NULL);
1589
1590 cmd = mbp->mb;
1591 size = mbp->mb_size;
1592 /* Get response */
1593 for (i = 0; i < size; i += 8)
1594 *cmd++ = cpu_to_be64(csio_rd_reg64
1595 (hw, data_reg + i));
1596
1597 csio_wr_reg32(hw, 0, ctl_reg);
1598 /* Flush */
1599 csio_rd_reg32(hw, ctl_reg);
1600
1601 mbm->mcurrent = NULL;
1602
1603 /* Add completion to tail of cbfn queue */
1604 list_add_tail(&mbp->list, &mbm->cbfn_q);
1605 CSIO_INC_STATS(mbm, n_cbfnq);
1606
1607 /*
1608 * Enqueue event to EventQ. Events processing happens
1609 * in Event worker thread context
1610 */
1611 if (csio_enqueue_evt(hw, CSIO_EVT_MBX, mbp, sizeof(mbp)))
1612 CSIO_INC_STATS(hw, n_evt_drop);
1613
1614 return 0;
1615
1616 } else {
1617 /*
1618 * We can get here if mailbox MSIX vector is shared,
1619 * or in INTx case. Or a stray interrupt.
1620 */
1621 csio_dbg(hw, "Host not owner, no mailbox interrupt\n");
1622 CSIO_INC_STATS(hw, n_int_stray);
1623 return -EINVAL;
1624 }
1625}
1626
1627/*
1628 * csio_mb_tmo_handler - Timeout handler
1629 * @hw: The HW structure
1630 *
1631 */
1632struct csio_mb *
1633csio_mb_tmo_handler(struct csio_hw *hw)
1634{
1635 struct csio_mbm *mbm = &hw->mbm;
1636 struct csio_mb *mbp = mbm->mcurrent;
1637 struct fw_cmd_hdr *fw_hdr;
1638
1639 /*
1640 * Could be a race b/w the completion handler and the timer
1641 * and the completion handler won that race.
1642 */
1643 if (mbp == NULL) {
1644 CSIO_DB_ASSERT(0);
1645 return NULL;
1646 }
1647
1648 fw_hdr = (struct fw_cmd_hdr *)(mbp->mb);
1649
1650 csio_dbg(hw, "Mailbox num:%x op:0x%x timed out\n", hw->pfn,
1651 FW_CMD_OP_GET(ntohl(fw_hdr->hi)));
1652
1653 mbm->mcurrent = NULL;
1654 CSIO_INC_STATS(mbm, n_tmo);
1655 fw_hdr->lo = htonl(FW_CMD_RETVAL(FW_ETIMEDOUT));
1656
1657 return mbp;
1658}
1659
1660/*
1661 * csio_mb_cancel_all - Cancel all waiting commands.
1662 * @hw: The HW structure
1663 * @cbfn_q: The callback queue.
1664 *
1665 * Caller should hold hw lock across this call.
1666 */
1667void
1668csio_mb_cancel_all(struct csio_hw *hw, struct list_head *cbfn_q)
1669{
1670 struct csio_mb *mbp;
1671 struct csio_mbm *mbm = &hw->mbm;
1672 struct fw_cmd_hdr *hdr;
1673 struct list_head *tmp;
1674
1675 if (mbm->mcurrent) {
1676 mbp = mbm->mcurrent;
1677
1678 /* Stop mailbox completion timer */
1679 del_timer_sync(&mbm->timer);
1680
1681 /* Add completion to tail of cbfn queue */
1682 list_add_tail(&mbp->list, cbfn_q);
1683 mbm->mcurrent = NULL;
1684 }
1685
1686 if (!list_empty(&mbm->req_q)) {
1687 list_splice_tail_init(&mbm->req_q, cbfn_q);
1688 mbm->stats.n_activeq = 0;
1689 }
1690
1691 if (!list_empty(&mbm->cbfn_q)) {
1692 list_splice_tail_init(&mbm->cbfn_q, cbfn_q);
1693 mbm->stats.n_cbfnq = 0;
1694 }
1695
1696 if (list_empty(cbfn_q))
1697 return;
1698
1699 list_for_each(tmp, cbfn_q) {
1700 mbp = (struct csio_mb *)tmp;
1701 hdr = (struct fw_cmd_hdr *)(mbp->mb);
1702
1703 csio_dbg(hw, "Cancelling pending mailbox num %x op:%x\n",
1704 hw->pfn, FW_CMD_OP_GET(ntohl(hdr->hi)));
1705
1706 CSIO_INC_STATS(mbm, n_cancel);
1707 hdr->lo = htonl(FW_CMD_RETVAL(FW_HOSTERROR));
1708 }
1709}
1710
1711/*
1712 * csio_mbm_init - Initialize Mailbox module
1713 * @mbm: Mailbox module
1714 * @hw: The HW structure
1715 * @timer: Timing function for interrupting mailboxes
1716 *
1717 * Initialize timer and the request/response queues.
1718 */
1719int
1720csio_mbm_init(struct csio_mbm *mbm, struct csio_hw *hw,
1721 void (*timer_fn)(uintptr_t))
1722{
1723 struct timer_list *timer = &mbm->timer;
1724
1725 init_timer(timer);
1726 timer->function = timer_fn;
1727 timer->data = (unsigned long)hw;
1728
1729 INIT_LIST_HEAD(&mbm->req_q);
1730 INIT_LIST_HEAD(&mbm->cbfn_q);
1731 csio_set_mb_intr_idx(mbm, -1);
1732
1733 return 0;
1734}
1735
1736/*
1737 * csio_mbm_exit - Uninitialize mailbox module
1738 * @mbm: Mailbox module
1739 *
1740 * Stop timer.
1741 */
1742void
1743csio_mbm_exit(struct csio_mbm *mbm)
1744{
1745 del_timer_sync(&mbm->timer);
1746
1747 CSIO_DB_ASSERT(mbm->mcurrent == NULL);
1748 CSIO_DB_ASSERT(list_empty(&mbm->req_q));
1749 CSIO_DB_ASSERT(list_empty(&mbm->cbfn_q));
1750}