blob: 04d362085360023506dad6f0b681e29129796118 [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Jing Huang7725ccf2009-09-23 17:46:15 -07003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070019#include "bfa_modules.h"
Krishna Gudipati11189202011-06-13 15:50:35 -070020#include "bfi_reg.h"
Jing Huang7725ccf2009-09-23 17:46:15 -070021
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070022BFA_TRC_FILE(HAL, CORE);
Jing Huang7725ccf2009-09-23 17:46:15 -070023
Jing Huang5fbe25c2010-10-18 17:17:23 -070024/*
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080025 * BFA module list terminated by NULL
26 */
27static struct bfa_module_s *hal_mods[] = {
28 &hal_mod_sgpg,
29 &hal_mod_fcport,
30 &hal_mod_fcxp,
31 &hal_mod_lps,
32 &hal_mod_uf,
33 &hal_mod_rport,
Krishna Gudipatie2187d72011-06-13 15:53:58 -070034 &hal_mod_fcp,
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080035 NULL
36};
37
38/*
39 * Message handlers for various modules.
40 */
41static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
42 bfa_isr_unhandled, /* NONE */
43 bfa_isr_unhandled, /* BFI_MC_IOC */
44 bfa_isr_unhandled, /* BFI_MC_DIAG */
45 bfa_isr_unhandled, /* BFI_MC_FLASH */
46 bfa_isr_unhandled, /* BFI_MC_CEE */
47 bfa_fcport_isr, /* BFI_MC_FCPORT */
48 bfa_isr_unhandled, /* BFI_MC_IOCFC */
49 bfa_isr_unhandled, /* BFI_MC_LL */
50 bfa_uf_isr, /* BFI_MC_UF */
51 bfa_fcxp_isr, /* BFI_MC_FCXP */
52 bfa_lps_isr, /* BFI_MC_LPS */
53 bfa_rport_isr, /* BFI_MC_RPORT */
Krishna Gudipatie2187d72011-06-13 15:53:58 -070054 bfa_itn_isr, /* BFI_MC_ITN */
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080055 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
58 bfa_ioim_isr, /* BFI_MC_IOIM */
59 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
60 bfa_tskim_isr, /* BFI_MC_TSKIM */
61 bfa_isr_unhandled, /* BFI_MC_SBOOT */
62 bfa_isr_unhandled, /* BFI_MC_IPFC */
63 bfa_isr_unhandled, /* BFI_MC_PORT */
64 bfa_isr_unhandled, /* --------- */
65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */
70 bfa_isr_unhandled, /* --------- */
71 bfa_isr_unhandled, /* --------- */
72 bfa_isr_unhandled, /* --------- */
73 bfa_isr_unhandled, /* --------- */
74};
75/*
76 * Message handlers for mailbox command classes
77 */
78static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
79 NULL,
80 NULL, /* BFI_MC_IOC */
81 NULL, /* BFI_MC_DIAG */
82 NULL, /* BFI_MC_FLASH */
83 NULL, /* BFI_MC_CEE */
84 NULL, /* BFI_MC_PORT */
85 bfa_iocfc_isr, /* BFI_MC_IOCFC */
86 NULL,
87};
88
89
90
91static void
Krishna Gudipati45070252011-06-24 20:24:29 -070092bfa_com_port_attach(struct bfa_s *bfa)
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080093{
94 struct bfa_port_s *port = &bfa->modules.port;
Krishna Gudipati45070252011-06-24 20:24:29 -070095 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080096
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080097 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
Krishna Gudipati45070252011-06-24 20:24:29 -070098 bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080099}
100
101/*
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700102 * ablk module attach
103 */
104static void
Krishna Gudipati45070252011-06-24 20:24:29 -0700105bfa_com_ablk_attach(struct bfa_s *bfa)
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700106{
107 struct bfa_ablk_s *ablk = &bfa->modules.ablk;
Krishna Gudipati45070252011-06-24 20:24:29 -0700108 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700109
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700110 bfa_ablk_attach(ablk, &bfa->ioc);
Krishna Gudipati45070252011-06-24 20:24:29 -0700111 bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700112}
113
Krishna Gudipati148d6102011-06-24 20:25:36 -0700114static void
115bfa_com_cee_attach(struct bfa_s *bfa)
116{
117 struct bfa_cee_s *cee = &bfa->modules.cee;
118 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
119
120 cee->trcmod = bfa->trcmod;
121 bfa_cee_attach(cee, &bfa->ioc, bfa);
122 bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp);
123}
124
Krishna Gudipati51e569a2011-06-24 20:26:25 -0700125static void
126bfa_com_sfp_attach(struct bfa_s *bfa)
127{
128 struct bfa_sfp_s *sfp = BFA_SFP_MOD(bfa);
129 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
130
131 bfa_sfp_attach(sfp, &bfa->ioc, bfa, bfa->trcmod);
132 bfa_sfp_memclaim(sfp, sfp_dma->kva_curp, sfp_dma->dma_curp);
133}
134
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -0700135static void
136bfa_com_flash_attach(struct bfa_s *bfa, bfa_boolean_t mincfg)
137{
138 struct bfa_flash_s *flash = BFA_FLASH(bfa);
139 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
140
141 bfa_flash_attach(flash, &bfa->ioc, bfa, bfa->trcmod, mincfg);
142 bfa_flash_memclaim(flash, flash_dma->kva_curp,
143 flash_dma->dma_curp, mincfg);
144}
145
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700146/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700147 * BFA IOC FC related definitions
148 */
149
Jing Huang5fbe25c2010-10-18 17:17:23 -0700150/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700151 * IOC local definitions
152 */
153#define BFA_IOCFC_TOV 5000 /* msecs */
154
155enum {
156 BFA_IOCFC_ACT_NONE = 0,
157 BFA_IOCFC_ACT_INIT = 1,
158 BFA_IOCFC_ACT_STOP = 2,
159 BFA_IOCFC_ACT_DISABLE = 3,
Krishna Gudipati60138062011-06-24 20:25:15 -0700160 BFA_IOCFC_ACT_ENABLE = 4,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700161};
162
163#define DEF_CFG_NUM_FABRICS 1
164#define DEF_CFG_NUM_LPORTS 256
165#define DEF_CFG_NUM_CQS 4
166#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
167#define DEF_CFG_NUM_TSKIM_REQS 128
168#define DEF_CFG_NUM_FCXP_REQS 64
169#define DEF_CFG_NUM_UF_BUFS 64
170#define DEF_CFG_NUM_RPORTS 1024
171#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
172#define DEF_CFG_NUM_TINS 256
173
174#define DEF_CFG_NUM_SGPGS 2048
175#define DEF_CFG_NUM_REQQ_ELEMS 256
176#define DEF_CFG_NUM_RSPQ_ELEMS 64
177#define DEF_CFG_NUM_SBOOT_TGTS 16
178#define DEF_CFG_NUM_SBOOT_LUNS 16
179
Jing Huang5fbe25c2010-10-18 17:17:23 -0700180/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700181 * forward declaration for IOC FC functions
182 */
183static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
184static void bfa_iocfc_disable_cbfn(void *bfa_arg);
185static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
186static void bfa_iocfc_reset_cbfn(void *bfa_arg);
187static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
188
Jing Huang5fbe25c2010-10-18 17:17:23 -0700189/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700190 * BFA Interrupt handling functions
191 */
192static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700193bfa_reqq_resume(struct bfa_s *bfa, int qid)
194{
195 struct list_head *waitq, *qe, *qen;
196 struct bfa_reqq_wait_s *wqe;
197
198 waitq = bfa_reqq(bfa, qid);
199 list_for_each_safe(qe, qen, waitq) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700200 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700201 * Callback only as long as there is room in request queue
202 */
203 if (bfa_reqq_full(bfa, qid))
204 break;
205
206 list_del(qe);
207 wqe = (struct bfa_reqq_wait_s *) qe;
208 wqe->qresume(wqe->cbarg);
209 }
210}
211
Krishna Gudipati11189202011-06-13 15:50:35 -0700212static inline void
213bfa_isr_rspq(struct bfa_s *bfa, int qid)
214{
215 struct bfi_msg_s *m;
216 u32 pi, ci;
217 struct list_head *waitq;
218
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700219 bfa_isr_rspq_ack(bfa, qid);
Krishna Gudipati11189202011-06-13 15:50:35 -0700220
221 ci = bfa_rspq_ci(bfa, qid);
222 pi = bfa_rspq_pi(bfa, qid);
223
224 while (ci != pi) {
225 m = bfa_rspq_elem(bfa, qid, ci);
226 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
227
228 bfa_isrs[m->mhdr.msg_class] (bfa, m);
229 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
230 }
231
232 /*
233 * update CI
234 */
235 bfa_rspq_ci(bfa, qid) = pi;
236 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
237 mmiowb();
238
239 /*
240 * Resume any pending requests in the corresponding reqq.
241 */
242 waitq = bfa_reqq(bfa, qid);
243 if (!list_empty(waitq))
244 bfa_reqq_resume(bfa, qid);
245}
246
247static inline void
248bfa_isr_reqq(struct bfa_s *bfa, int qid)
249{
250 struct list_head *waitq;
251
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700252 bfa_isr_reqq_ack(bfa, qid);
Krishna Gudipati11189202011-06-13 15:50:35 -0700253
254 /*
255 * Resume any pending requests in the corresponding reqq.
256 */
257 waitq = bfa_reqq(bfa, qid);
258 if (!list_empty(waitq))
259 bfa_reqq_resume(bfa, qid);
260}
261
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700262void
263bfa_msix_all(struct bfa_s *bfa, int vec)
264{
Krishna Gudipati10a07372011-06-24 20:23:38 -0700265 u32 intr, qintr;
266 int queue;
267
268 intr = readl(bfa->iocfc.bfa_regs.intr_status);
269 if (!intr)
270 return;
271
272 /*
273 * RME completion queue interrupt
274 */
275 qintr = intr & __HFN_INT_RME_MASK;
276 if (qintr && bfa->queue_process) {
277 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
278 bfa_isr_rspq(bfa, queue);
279 }
280
281 intr &= ~qintr;
282 if (!intr)
283 return;
284
285 /*
286 * CPE completion queue interrupt
287 */
288 qintr = intr & __HFN_INT_CPE_MASK;
289 if (qintr && bfa->queue_process) {
290 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
291 bfa_isr_reqq(bfa, queue);
292 }
293 intr &= ~qintr;
294 if (!intr)
295 return;
296
297 bfa_msix_lpu_err(bfa, intr);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700298}
299
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700300bfa_boolean_t
301bfa_intx(struct bfa_s *bfa)
302{
303 u32 intr, qintr;
304 int queue;
305
Jing Huang53440262010-10-18 17:12:29 -0700306 intr = readl(bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700307 if (!intr)
308 return BFA_FALSE;
309
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700310 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
311 if (qintr)
312 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
313
Jing Huang5fbe25c2010-10-18 17:17:23 -0700314 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700315 * RME completion queue interrupt
316 */
317 qintr = intr & __HFN_INT_RME_MASK;
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700318 if (qintr && bfa->queue_process) {
319 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
320 bfa_isr_rspq(bfa, queue);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700321 }
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700322
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700323 intr &= ~qintr;
324 if (!intr)
325 return BFA_TRUE;
326
Jing Huang5fbe25c2010-10-18 17:17:23 -0700327 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700328 * CPE completion queue interrupt
329 */
330 qintr = intr & __HFN_INT_CPE_MASK;
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700331 if (qintr && bfa->queue_process) {
332 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
333 bfa_isr_reqq(bfa, queue);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700334 }
335 intr &= ~qintr;
336 if (!intr)
337 return BFA_TRUE;
338
339 bfa_msix_lpu_err(bfa, intr);
340
341 return BFA_TRUE;
342}
343
344void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700345bfa_isr_enable(struct bfa_s *bfa)
346{
Krishna Gudipati11189202011-06-13 15:50:35 -0700347 u32 umsk;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700348 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
349
350 bfa_trc(bfa, pci_func);
351
Krishna Gudipati775c7742011-06-13 15:52:12 -0700352 bfa_msix_ctrl_install(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700353
Krishna Gudipati11189202011-06-13 15:50:35 -0700354 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
355 umsk = __HFN_INT_ERR_MASK_CT2;
356 umsk |= pci_func == 0 ?
357 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
358 } else {
359 umsk = __HFN_INT_ERR_MASK;
360 umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
361 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700362
Krishna Gudipati11189202011-06-13 15:50:35 -0700363 writel(umsk, bfa->iocfc.bfa_regs.intr_status);
364 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
365 bfa->iocfc.intr_mask = ~umsk;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700366 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
367}
368
369void
370bfa_isr_disable(struct bfa_s *bfa)
371{
372 bfa_isr_mode_set(bfa, BFA_FALSE);
Jing Huang53440262010-10-18 17:12:29 -0700373 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700374 bfa_msix_uninstall(bfa);
375}
376
377void
Krishna Gudipati11189202011-06-13 15:50:35 -0700378bfa_msix_reqq(struct bfa_s *bfa, int vec)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700379{
Krishna Gudipati11189202011-06-13 15:50:35 -0700380 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700381}
382
383void
384bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
385{
386 bfa_trc(bfa, m->mhdr.msg_class);
387 bfa_trc(bfa, m->mhdr.msg_id);
388 bfa_trc(bfa, m->mhdr.mtag.i2htok);
Jing Huangd4b671c2010-12-26 21:46:35 -0800389 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700390 bfa_trc_stop(bfa->trcmod);
391}
392
393void
Krishna Gudipati11189202011-06-13 15:50:35 -0700394bfa_msix_rspq(struct bfa_s *bfa, int vec)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700395{
Krishna Gudipati11189202011-06-13 15:50:35 -0700396 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700397}
398
399void
400bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
401{
402 u32 intr, curr_value;
Krishna Gudipati11189202011-06-13 15:50:35 -0700403 bfa_boolean_t lpu_isr, halt_isr, pss_isr;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700404
Jing Huang53440262010-10-18 17:12:29 -0700405 intr = readl(bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700406
Krishna Gudipati11189202011-06-13 15:50:35 -0700407 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
408 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
409 pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
410 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
411 __HFN_INT_MBOX_LPU1_CT2);
412 intr &= __HFN_INT_ERR_MASK_CT2;
413 } else {
414 halt_isr = intr & __HFN_INT_LL_HALT;
415 pss_isr = intr & __HFN_INT_ERR_PSS;
416 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
417 intr &= __HFN_INT_ERR_MASK;
418 }
419
420 if (lpu_isr)
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800421 bfa_ioc_mbox_isr(&bfa->ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700422
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700423 if (intr) {
Krishna Gudipati11189202011-06-13 15:50:35 -0700424 if (halt_isr) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700425 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700426 * If LL_HALT bit is set then FW Init Halt LL Port
427 * Register needs to be cleared as well so Interrupt
428 * Status Register will be cleared.
429 */
Jing Huang53440262010-10-18 17:12:29 -0700430 curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700431 curr_value &= ~__FW_INIT_HALT_P;
Jing Huang53440262010-10-18 17:12:29 -0700432 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700433 }
434
Krishna Gudipati11189202011-06-13 15:50:35 -0700435 if (pss_isr) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700436 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700437 * ERR_PSS bit needs to be cleared as well in case
438 * interrups are shared so driver's interrupt handler is
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300439 * still called even though it is already masked out.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700440 */
Jing Huang53440262010-10-18 17:12:29 -0700441 curr_value = readl(
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700442 bfa->ioc.ioc_regs.pss_err_status_reg);
Jing Huang53440262010-10-18 17:12:29 -0700443 writel(curr_value,
444 bfa->ioc.ioc_regs.pss_err_status_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700445 }
446
Jing Huang53440262010-10-18 17:12:29 -0700447 writel(intr, bfa->iocfc.bfa_regs.intr_status);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800448 bfa_ioc_error_isr(&bfa->ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700449 }
450}
451
Jing Huang5fbe25c2010-10-18 17:17:23 -0700452/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700453 * BFA IOC FC related functions
454 */
455
Jing Huang5fbe25c2010-10-18 17:17:23 -0700456/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -0800457 * BFA IOC private functions
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700458 */
459
Jing Huang5fbe25c2010-10-18 17:17:23 -0700460/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700461 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
462 */
463static void
464bfa_iocfc_send_cfg(void *bfa_arg)
465{
466 struct bfa_s *bfa = bfa_arg;
467 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
468 struct bfi_iocfc_cfg_req_s cfg_req;
469 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
470 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
471 int i;
472
Jing Huangd4b671c2010-12-26 21:46:35 -0800473 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700474 bfa_trc(bfa, cfg->fwcfg.num_cqs);
475
476 bfa_iocfc_reset_queues(bfa);
477
Jing Huang5fbe25c2010-10-18 17:17:23 -0700478 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700479 * initialize IOC configuration info
480 */
Krishna Gudipati10a07372011-06-24 20:23:38 -0700481 cfg_info->single_msix_vec = 0;
482 if (bfa->msix.nvecs == 1)
483 cfg_info->single_msix_vec = 1;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700484 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
485 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700486 cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
487 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700488
489 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
Jing Huang5fbe25c2010-10-18 17:17:23 -0700490 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700491 * dma map REQ and RSP circular queues and shadow pointers
492 */
493 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
494 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
495 iocfc->req_cq_ba[i].pa);
496 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
497 iocfc->req_cq_shadow_ci[i].pa);
498 cfg_info->req_cq_elems[i] =
Jing Huangba816ea2010-10-18 17:10:50 -0700499 cpu_to_be16(cfg->drvcfg.num_reqq_elems);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700500
501 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
502 iocfc->rsp_cq_ba[i].pa);
503 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
504 iocfc->rsp_cq_shadow_pi[i].pa);
505 cfg_info->rsp_cq_elems[i] =
Jing Huangba816ea2010-10-18 17:10:50 -0700506 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700507 }
508
Jing Huang5fbe25c2010-10-18 17:17:23 -0700509 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700510 * Enable interrupt coalescing if it is driver init path
511 * and not ioc disable/enable path.
512 */
513 if (!iocfc->cfgdone)
514 cfg_info->intr_attr.coalesce = BFA_TRUE;
515
516 iocfc->cfgdone = BFA_FALSE;
517
Jing Huang5fbe25c2010-10-18 17:17:23 -0700518 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700519 * dma map IOC configuration itself
520 */
521 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700522 bfa_fn_lpu(bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700523 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
524
525 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
526 sizeof(struct bfi_iocfc_cfg_req_s));
527}
528
529static void
530bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
531 struct bfa_pcidev_s *pcidev)
532{
533 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
534
535 bfa->bfad = bfad;
536 iocfc->bfa = bfa;
537 iocfc->action = BFA_IOCFC_ACT_NONE;
538
Jing Huang6a18b162010-10-18 17:08:54 -0700539 iocfc->cfg = *cfg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700540
Jing Huang5fbe25c2010-10-18 17:17:23 -0700541 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700542 * Initialize chip specific handlers.
543 */
Krishna Gudipati11189202011-06-13 15:50:35 -0700544 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700545 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
546 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
547 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
548 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
Krishna Gudipati775c7742011-06-13 15:52:12 -0700549 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
550 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700551 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
552 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
553 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
554 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
Krishna Gudipati11189202011-06-13 15:50:35 -0700555 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
556 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700557 } else {
558 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700559 iocfc->hwif.hw_reqq_ack = NULL;
560 iocfc->hwif.hw_rspq_ack = NULL;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700561 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
Krishna Gudipati775c7742011-06-13 15:52:12 -0700562 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
563 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700564 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
565 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
566 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
567 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
Krishna Gudipati11189202011-06-13 15:50:35 -0700568 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
569 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
570 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
571 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
572 }
573
574 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
575 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
576 iocfc->hwif.hw_isr_mode_set = NULL;
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700577 iocfc->hwif.hw_rspq_ack = NULL;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700578 }
579
580 iocfc->hwif.hw_reginit(bfa);
581 bfa->msix.nvecs = 0;
582}
583
584static void
Krishna Gudipati45070252011-06-24 20:24:29 -0700585bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700586{
Krishna Gudipati45070252011-06-24 20:24:29 -0700587 u8 *dm_kva = NULL;
588 u64 dm_pa = 0;
589 int i, per_reqq_sz, per_rspq_sz, dbgsz;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700590 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
Krishna Gudipati45070252011-06-24 20:24:29 -0700591 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
592 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
593 struct bfa_mem_dma_s *reqq_dma, *rspq_dma;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700594
Krishna Gudipati45070252011-06-24 20:24:29 -0700595 /* First allocate dma memory for IOC */
596 bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
597 bfa_mem_dma_phys(ioc_dma));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700598
Krishna Gudipati45070252011-06-24 20:24:29 -0700599 /* Claim DMA-able memory for the request/response queues */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700600 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
Krishna Gudipati45070252011-06-24 20:24:29 -0700601 BFA_DMA_ALIGN_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700602 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
Krishna Gudipati45070252011-06-24 20:24:29 -0700603 BFA_DMA_ALIGN_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700604
605 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
Krishna Gudipati45070252011-06-24 20:24:29 -0700606 reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
607 iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
608 iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
609 memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700610
Krishna Gudipati45070252011-06-24 20:24:29 -0700611 rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
612 iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
613 iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
614 memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700615 }
616
Krishna Gudipati45070252011-06-24 20:24:29 -0700617 /* Claim IOCFC dma memory - for shadow CI/PI */
618 dm_kva = bfa_mem_dma_virt(iocfc_dma);
619 dm_pa = bfa_mem_dma_phys(iocfc_dma);
620
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700621 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
622 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
623 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
624 dm_kva += BFA_CACHELINE_SZ;
625 dm_pa += BFA_CACHELINE_SZ;
626
627 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
628 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
629 dm_kva += BFA_CACHELINE_SZ;
630 dm_pa += BFA_CACHELINE_SZ;
631 }
632
Krishna Gudipati45070252011-06-24 20:24:29 -0700633 /* Claim IOCFC dma memory - for the config info page */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700634 bfa->iocfc.cfg_info.kva = dm_kva;
635 bfa->iocfc.cfg_info.pa = dm_pa;
636 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
637 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
638 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
639
Krishna Gudipati45070252011-06-24 20:24:29 -0700640 /* Claim IOCFC dma memory - for the config response */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700641 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
642 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
643 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
Krishna Gudipati45070252011-06-24 20:24:29 -0700644 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
645 BFA_CACHELINE_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700646 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
Krishna Gudipati45070252011-06-24 20:24:29 -0700647 BFA_CACHELINE_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700648
Krishna Gudipati45070252011-06-24 20:24:29 -0700649 /* Claim IOCFC kva memory */
Maggie Zhangf7f738122010-12-09 19:08:43 -0800650 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700651 if (dbgsz > 0) {
Krishna Gudipati45070252011-06-24 20:24:29 -0700652 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
653 bfa_mem_kva_curp(iocfc) += dbgsz;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700654 }
655}
656
Jing Huang5fbe25c2010-10-18 17:17:23 -0700657/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700658 * Start BFA submodules.
659 */
660static void
661bfa_iocfc_start_submod(struct bfa_s *bfa)
662{
663 int i;
664
Krishna Gudipati775c7742011-06-13 15:52:12 -0700665 bfa->queue_process = BFA_TRUE;
Krishna Gudipati11189202011-06-13 15:50:35 -0700666 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700667 bfa_isr_rspq_ack(bfa, i);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700668
669 for (i = 0; hal_mods[i]; i++)
670 hal_mods[i]->start(bfa);
671}
672
Jing Huang5fbe25c2010-10-18 17:17:23 -0700673/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700674 * Disable BFA submodules.
675 */
676static void
677bfa_iocfc_disable_submod(struct bfa_s *bfa)
678{
679 int i;
680
681 for (i = 0; hal_mods[i]; i++)
682 hal_mods[i]->iocdisable(bfa);
683}
684
685static void
686bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
687{
688 struct bfa_s *bfa = bfa_arg;
689
690 if (complete) {
691 if (bfa->iocfc.cfgdone)
692 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
693 else
694 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
695 } else {
696 if (bfa->iocfc.cfgdone)
697 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
698 }
699}
700
701static void
702bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
703{
704 struct bfa_s *bfa = bfa_arg;
705 struct bfad_s *bfad = bfa->bfad;
706
707 if (compl)
708 complete(&bfad->comp);
709 else
710 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
711}
712
713static void
Krishna Gudipati60138062011-06-24 20:25:15 -0700714bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
715{
716 struct bfa_s *bfa = bfa_arg;
717 struct bfad_s *bfad = bfa->bfad;
718
719 if (compl)
720 complete(&bfad->enable_comp);
721}
722
723static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700724bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
725{
726 struct bfa_s *bfa = bfa_arg;
727 struct bfad_s *bfad = bfa->bfad;
728
729 if (compl)
730 complete(&bfad->disable_comp);
731}
732
Krishna Gudipati11189202011-06-13 15:50:35 -0700733/**
734 * configure queue registers from firmware response
735 */
736static void
737bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
738{
739 int i;
740 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
741 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
742
743 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700744 bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
Krishna Gudipati11189202011-06-13 15:50:35 -0700745 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
746 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
747 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
748 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
749 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
750 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
751 }
752}
753
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700754static void
755bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
756{
757 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
758 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
759 bfa_rport_res_recfg(bfa, fwcfg->num_rports);
760 bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs);
761 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
762}
763
Jing Huang5fbe25c2010-10-18 17:17:23 -0700764/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700765 * Update BFA configuration from firmware configuration.
766 */
767static void
768bfa_iocfc_cfgrsp(struct bfa_s *bfa)
769{
770 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
771 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
772 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
773
774 fwcfg->num_cqs = fwcfg->num_cqs;
Jing Huangba816ea2010-10-18 17:10:50 -0700775 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700776 fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
Jing Huangba816ea2010-10-18 17:10:50 -0700777 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
778 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
779 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
780 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700781
782 iocfc->cfgdone = BFA_TRUE;
783
Jing Huang5fbe25c2010-10-18 17:17:23 -0700784 /*
Krishna Gudipati11189202011-06-13 15:50:35 -0700785 * configure queue register offsets as learnt from firmware
786 */
787 bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
788
789 /*
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700790 * Re-configure resources as learnt from Firmware
791 */
792 bfa_iocfc_res_recfg(bfa, fwcfg);
793
794 /*
Krishna Gudipati775c7742011-06-13 15:52:12 -0700795 * Install MSIX queue handlers
796 */
797 bfa_msix_queue_install(bfa);
798
799 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700800 * Configuration is complete - initialize/start submodules
801 */
802 bfa_fcport_init(bfa);
803
804 if (iocfc->action == BFA_IOCFC_ACT_INIT)
805 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
Krishna Gudipati60138062011-06-24 20:25:15 -0700806 else {
807 if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
808 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
809 bfa_iocfc_enable_cb, bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700810 bfa_iocfc_start_submod(bfa);
Krishna Gudipati60138062011-06-24 20:25:15 -0700811 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700812}
813void
814bfa_iocfc_reset_queues(struct bfa_s *bfa)
815{
816 int q;
817
818 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
819 bfa_reqq_ci(bfa, q) = 0;
820 bfa_reqq_pi(bfa, q) = 0;
821 bfa_rspq_ci(bfa, q) = 0;
822 bfa_rspq_pi(bfa, q) = 0;
823 }
824}
825
Krishna Gudipatia7141342011-06-24 20:23:19 -0700826/* Fabric Assigned Address specific functions */
827
828/*
829 * Check whether IOC is ready before sending command down
830 */
831static bfa_status_t
832bfa_faa_validate_request(struct bfa_s *bfa)
833{
834 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
835 u32 card_type = bfa->ioc.attr->card_type;
836
837 if (bfa_ioc_is_operational(&bfa->ioc)) {
838 if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
839 return BFA_STATUS_FEATURE_NOT_SUPPORTED;
840 } else {
841 if (!bfa_ioc_is_acq_addr(&bfa->ioc))
842 return BFA_STATUS_IOC_NON_OP;
843 }
844
845 return BFA_STATUS_OK;
846}
847
848bfa_status_t
849bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
850{
851 struct bfi_faa_en_dis_s faa_enable_req;
852 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
853 bfa_status_t status;
854
855 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
856 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
857
858 status = bfa_faa_validate_request(bfa);
859 if (status != BFA_STATUS_OK)
860 return status;
861
862 if (iocfc->faa_args.busy == BFA_TRUE)
863 return BFA_STATUS_DEVBUSY;
864
865 if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
866 return BFA_STATUS_FAA_ENABLED;
867
868 if (bfa_fcport_is_trunk_enabled(bfa))
869 return BFA_STATUS_ERROR_TRUNK_ENABLED;
870
871 bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
872 iocfc->faa_args.busy = BFA_TRUE;
873
874 memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
875 bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700876 BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa));
Krishna Gudipatia7141342011-06-24 20:23:19 -0700877
878 bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
879 sizeof(struct bfi_faa_en_dis_s));
880
881 return BFA_STATUS_OK;
882}
883
884bfa_status_t
885bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
886 void *cbarg)
887{
888 struct bfi_faa_en_dis_s faa_disable_req;
889 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
890 bfa_status_t status;
891
892 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
893 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
894
895 status = bfa_faa_validate_request(bfa);
896 if (status != BFA_STATUS_OK)
897 return status;
898
899 if (iocfc->faa_args.busy == BFA_TRUE)
900 return BFA_STATUS_DEVBUSY;
901
902 if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
903 return BFA_STATUS_FAA_DISABLED;
904
905 bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
906 iocfc->faa_args.busy = BFA_TRUE;
907
908 memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
909 bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700910 BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa));
Krishna Gudipatia7141342011-06-24 20:23:19 -0700911
912 bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
913 sizeof(struct bfi_faa_en_dis_s));
914
915 return BFA_STATUS_OK;
916}
917
918bfa_status_t
919bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
920 bfa_cb_iocfc_t cbfn, void *cbarg)
921{
922 struct bfi_faa_query_s faa_attr_req;
923 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
924 bfa_status_t status;
925
926 iocfc->faa_args.faa_attr = attr;
927 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
928 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
929
930 status = bfa_faa_validate_request(bfa);
931 if (status != BFA_STATUS_OK)
932 return status;
933
934 if (iocfc->faa_args.busy == BFA_TRUE)
935 return BFA_STATUS_DEVBUSY;
936
937 iocfc->faa_args.busy = BFA_TRUE;
938 memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
939 bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700940 BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
Krishna Gudipatia7141342011-06-24 20:23:19 -0700941
942 bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
943 sizeof(struct bfi_faa_query_s));
944
945 return BFA_STATUS_OK;
946}
947
948/*
949 * FAA enable response
950 */
951static void
952bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
953 struct bfi_faa_en_dis_rsp_s *rsp)
954{
955 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
956 bfa_status_t status = rsp->status;
957
958 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
959
960 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
961 iocfc->faa_args.busy = BFA_FALSE;
962}
963
964/*
965 * FAA disable response
966 */
967static void
968bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
969 struct bfi_faa_en_dis_rsp_s *rsp)
970{
971 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
972 bfa_status_t status = rsp->status;
973
974 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
975
976 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
977 iocfc->faa_args.busy = BFA_FALSE;
978}
979
980/*
981 * FAA query response
982 */
983static void
984bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
985 bfi_faa_query_rsp_t *rsp)
986{
987 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
988
989 if (iocfc->faa_args.faa_attr) {
990 iocfc->faa_args.faa_attr->faa = rsp->faa;
991 iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
992 iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
993 }
994
995 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
996
997 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
998 iocfc->faa_args.busy = BFA_FALSE;
999}
1000
Jing Huang5fbe25c2010-10-18 17:17:23 -07001001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001002 * IOC enable request is complete
1003 */
1004static void
1005bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
1006{
1007 struct bfa_s *bfa = bfa_arg;
1008
Krishna Gudipatia7141342011-06-24 20:23:19 -07001009 if (status == BFA_STATUS_FAA_ACQ_ADDR) {
1010 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
1011 bfa_iocfc_init_cb, bfa);
1012 return;
1013 }
1014
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001015 if (status != BFA_STATUS_OK) {
1016 bfa_isr_disable(bfa);
1017 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
1018 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
1019 bfa_iocfc_init_cb, bfa);
Krishna Gudipati60138062011-06-24 20:25:15 -07001020 else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
1021 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
1022 bfa_iocfc_enable_cb, bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001023 return;
1024 }
1025
1026 bfa_iocfc_send_cfg(bfa);
1027}
1028
Jing Huang5fbe25c2010-10-18 17:17:23 -07001029/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001030 * IOC disable request is complete
1031 */
1032static void
1033bfa_iocfc_disable_cbfn(void *bfa_arg)
1034{
1035 struct bfa_s *bfa = bfa_arg;
1036
1037 bfa_isr_disable(bfa);
1038 bfa_iocfc_disable_submod(bfa);
1039
1040 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
1041 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
1042 bfa);
1043 else {
Jing Huangd4b671c2010-12-26 21:46:35 -08001044 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001045 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
1046 bfa);
1047 }
1048}
1049
Jing Huang5fbe25c2010-10-18 17:17:23 -07001050/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001051 * Notify sub-modules of hardware failure.
1052 */
1053static void
1054bfa_iocfc_hbfail_cbfn(void *bfa_arg)
1055{
1056 struct bfa_s *bfa = bfa_arg;
1057
Krishna Gudipati775c7742011-06-13 15:52:12 -07001058 bfa->queue_process = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001059
1060 bfa_isr_disable(bfa);
1061 bfa_iocfc_disable_submod(bfa);
1062
1063 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
1064 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
1065 bfa);
1066}
1067
Jing Huang5fbe25c2010-10-18 17:17:23 -07001068/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001069 * Actions on chip-reset completion.
1070 */
1071static void
1072bfa_iocfc_reset_cbfn(void *bfa_arg)
1073{
1074 struct bfa_s *bfa = bfa_arg;
1075
1076 bfa_iocfc_reset_queues(bfa);
1077 bfa_isr_enable(bfa);
1078}
1079
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001080
Jing Huang5fbe25c2010-10-18 17:17:23 -07001081/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001082 * Query IOC memory requirement information.
1083 */
1084void
Krishna Gudipati45070252011-06-24 20:24:29 -07001085bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1086 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001087{
Krishna Gudipati45070252011-06-24 20:24:29 -07001088 int q, per_reqq_sz, per_rspq_sz;
1089 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
1090 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
1091 struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
1092 u32 dm_len = 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001093
Krishna Gudipati45070252011-06-24 20:24:29 -07001094 /* dma memory setup for IOC */
1095 bfa_mem_dma_setup(meminfo, ioc_dma,
1096 BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));
1097
1098 /* dma memory setup for REQ/RSP queues */
1099 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
1100 BFA_DMA_ALIGN_SZ);
1101 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
1102 BFA_DMA_ALIGN_SZ);
1103
1104 for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
1105 bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
1106 per_reqq_sz);
1107 bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
1108 per_rspq_sz);
1109 }
1110
1111 /* IOCFC dma memory - calculate Shadow CI/PI size */
1112 for (q = 0; q < cfg->fwcfg.num_cqs; q++)
1113 dm_len += (2 * BFA_CACHELINE_SZ);
1114
1115 /* IOCFC dma memory - calculate config info / rsp size */
1116 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
1117 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
1118 BFA_CACHELINE_SZ);
1119
1120 /* dma memory setup for IOCFC */
1121 bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
1122
1123 /* kva memory setup for IOCFC */
1124 bfa_mem_kva_setup(meminfo, iocfc_kva,
1125 ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001126}
1127
Jing Huang5fbe25c2010-10-18 17:17:23 -07001128/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001129 * Query IOC memory requirement information.
1130 */
1131void
1132bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07001133 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001134{
1135 int i;
1136 struct bfa_ioc_s *ioc = &bfa->ioc;
1137
1138 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
1139 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
1140 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
1141 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
1142
1143 ioc->trcmod = bfa->trcmod;
1144 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
1145
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001146 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001147 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
1148
1149 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
Krishna Gudipati45070252011-06-24 20:24:29 -07001150 bfa_iocfc_mem_claim(bfa, cfg);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001151 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001152
1153 INIT_LIST_HEAD(&bfa->comp_q);
1154 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
1155 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
1156}
1157
Jing Huang5fbe25c2010-10-18 17:17:23 -07001158/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001159 * Query IOC memory requirement information.
1160 */
1161void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001162bfa_iocfc_init(struct bfa_s *bfa)
1163{
1164 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
1165 bfa_ioc_enable(&bfa->ioc);
1166}
1167
Jing Huang5fbe25c2010-10-18 17:17:23 -07001168/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001169 * IOC start called from bfa_start(). Called to start IOC operations
1170 * at driver instantiation for this instance.
1171 */
1172void
1173bfa_iocfc_start(struct bfa_s *bfa)
1174{
1175 if (bfa->iocfc.cfgdone)
1176 bfa_iocfc_start_submod(bfa);
1177}
1178
Jing Huang5fbe25c2010-10-18 17:17:23 -07001179/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001180 * IOC stop called from bfa_stop(). Called only when driver is unloaded
1181 * for this instance.
1182 */
1183void
1184bfa_iocfc_stop(struct bfa_s *bfa)
1185{
1186 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
1187
Krishna Gudipati775c7742011-06-13 15:52:12 -07001188 bfa->queue_process = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001189 bfa_ioc_disable(&bfa->ioc);
1190}
1191
1192void
1193bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
1194{
1195 struct bfa_s *bfa = bfaarg;
1196 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1197 union bfi_iocfc_i2h_msg_u *msg;
1198
1199 msg = (union bfi_iocfc_i2h_msg_u *) m;
1200 bfa_trc(bfa, msg->mh.msg_id);
1201
1202 switch (msg->mh.msg_id) {
1203 case BFI_IOCFC_I2H_CFG_REPLY:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001204 bfa_iocfc_cfgrsp(bfa);
1205 break;
1206 case BFI_IOCFC_I2H_UPDATEQ_RSP:
1207 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
1208 break;
Krishna Gudipatia7141342011-06-24 20:23:19 -07001209 case BFI_IOCFC_I2H_FAA_ENABLE_RSP:
1210 bfa_faa_enable_reply(iocfc,
1211 (struct bfi_faa_en_dis_rsp_s *)msg);
1212 break;
1213 case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
1214 bfa_faa_disable_reply(iocfc,
1215 (struct bfi_faa_en_dis_rsp_s *)msg);
1216 break;
1217 case BFI_IOCFC_I2H_FAA_QUERY_RSP:
1218 bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
1219 break;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001220 default:
Jing Huangd4b671c2010-12-26 21:46:35 -08001221 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001222 }
1223}
1224
1225void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001226bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
1227{
1228 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1229
1230 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
1231
1232 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
Jing Huangba816ea2010-10-18 17:10:50 -07001233 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
1234 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001235
1236 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
Jing Huangba816ea2010-10-18 17:10:50 -07001237 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
1238 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001239
1240 attr->config = iocfc->cfg;
1241}
1242
1243bfa_status_t
1244bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
1245{
1246 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1247 struct bfi_iocfc_set_intr_req_s *m;
1248
1249 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
Jing Huangba816ea2010-10-18 17:10:50 -07001250 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
1251 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001252
1253 if (!bfa_iocfc_is_operational(bfa))
1254 return BFA_STATUS_OK;
1255
1256 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
1257 if (!m)
1258 return BFA_STATUS_DEVBUSY;
1259
1260 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001261 bfa_fn_lpu(bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001262 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
1263 m->delay = iocfc->cfginfo->intr_attr.delay;
1264 m->latency = iocfc->cfginfo->intr_attr.latency;
1265
1266 bfa_trc(bfa, attr->delay);
1267 bfa_trc(bfa, attr->latency);
1268
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001269 bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001270 return BFA_STATUS_OK;
1271}
1272
1273void
Krishna Gudipati45070252011-06-24 20:24:29 -07001274bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001275{
1276 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1277
1278 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
Krishna Gudipati45070252011-06-24 20:24:29 -07001279 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001280}
Jing Huang5fbe25c2010-10-18 17:17:23 -07001281/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001282 * Enable IOC after it is disabled.
1283 */
1284void
1285bfa_iocfc_enable(struct bfa_s *bfa)
1286{
1287 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1288 "IOC Enable");
Krishna Gudipati60138062011-06-24 20:25:15 -07001289 bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001290 bfa_ioc_enable(&bfa->ioc);
1291}
1292
1293void
1294bfa_iocfc_disable(struct bfa_s *bfa)
1295{
1296 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1297 "IOC Disable");
1298 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
1299
Krishna Gudipati775c7742011-06-13 15:52:12 -07001300 bfa->queue_process = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001301 bfa_ioc_disable(&bfa->ioc);
1302}
1303
1304
1305bfa_boolean_t
1306bfa_iocfc_is_operational(struct bfa_s *bfa)
1307{
1308 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
1309}
1310
Jing Huang5fbe25c2010-10-18 17:17:23 -07001311/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001312 * Return boot target port wwns -- read from boot information in flash.
1313 */
1314void
1315bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
1316{
1317 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1318 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1319 int i;
1320
1321 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
1322 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
1323 *nwwns = cfgrsp->pbc_cfg.nbluns;
1324 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
1325 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
1326
1327 return;
1328 }
1329
1330 *nwwns = cfgrsp->bootwwns.nwwns;
1331 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
1332}
1333
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001334int
1335bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1336{
1337 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1338 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1339
1340 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1341 return cfgrsp->pbc_cfg.nvports;
1342}
1343
Jing Huang7725ccf2009-09-23 17:46:15 -07001344
Jing Huang5fbe25c2010-10-18 17:17:23 -07001345/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001346 * Use this function query the memory requirement of the BFA library.
1347 * This function needs to be called before bfa_attach() to get the
1348 * memory required of the BFA layer for a given driver configuration.
1349 *
1350 * This call will fail, if the cap is out of range compared to pre-defined
1351 * values within the BFA library
1352 *
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001353 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1354 * its configuration in this structure.
Jing Huang7725ccf2009-09-23 17:46:15 -07001355 * The default values for struct bfa_iocfc_cfg_s can be
1356 * fetched using bfa_cfg_get_default() API.
1357 *
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001358 * If cap's boundary check fails, the library will use
Jing Huang7725ccf2009-09-23 17:46:15 -07001359 * the default bfa_cap_t values (and log a warning msg).
1360 *
1361 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001362 * indicates the memory type (see bfa_mem_type_t) and
Jing Huang7725ccf2009-09-23 17:46:15 -07001363 * amount of memory required.
1364 *
1365 * Driver should allocate the memory, populate the
1366 * starting address for each block and provide the same
1367 * structure as input parameter to bfa_attach() call.
1368 *
Krishna Gudipati45070252011-06-24 20:24:29 -07001369 * @param[in] bfa - pointer to the bfa structure, used while fetching the
1370 * dma, kva memory information of the bfa sub-modules.
1371 *
Jing Huang7725ccf2009-09-23 17:46:15 -07001372 * @return void
1373 *
1374 * Special Considerations: @note
1375 */
1376void
Krishna Gudipati45070252011-06-24 20:24:29 -07001377bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1378 struct bfa_s *bfa)
Jing Huang7725ccf2009-09-23 17:46:15 -07001379{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001380 int i;
Krishna Gudipati45070252011-06-24 20:24:29 -07001381 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
1382 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
Krishna Gudipati148d6102011-06-24 20:25:36 -07001383 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
Krishna Gudipati51e569a2011-06-24 20:26:25 -07001384 struct bfa_mem_dma_s *sfp_dma = BFA_MEM_SFP_DMA(bfa);
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07001385 struct bfa_mem_dma_s *flash_dma = BFA_MEM_FLASH_DMA(bfa);
Jing Huang7725ccf2009-09-23 17:46:15 -07001386
Jing Huangd4b671c2010-12-26 21:46:35 -08001387 WARN_ON((cfg == NULL) || (meminfo == NULL));
Jing Huang7725ccf2009-09-23 17:46:15 -07001388
Jing Huang6a18b162010-10-18 17:08:54 -07001389 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
Jing Huang7725ccf2009-09-23 17:46:15 -07001390
Krishna Gudipati45070252011-06-24 20:24:29 -07001391 /* Initialize the DMA & KVA meminfo queues */
1392 INIT_LIST_HEAD(&meminfo->dma_info.qe);
1393 INIT_LIST_HEAD(&meminfo->kva_info.qe);
1394
1395 bfa_iocfc_meminfo(cfg, meminfo, bfa);
Jing Huang7725ccf2009-09-23 17:46:15 -07001396
1397 for (i = 0; hal_mods[i]; i++)
Krishna Gudipati45070252011-06-24 20:24:29 -07001398 hal_mods[i]->meminfo(cfg, meminfo, bfa);
Jing Huang7725ccf2009-09-23 17:46:15 -07001399
Krishna Gudipati45070252011-06-24 20:24:29 -07001400 /* dma info setup */
1401 bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
1402 bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
Krishna Gudipati148d6102011-06-24 20:25:36 -07001403 bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo());
Krishna Gudipati51e569a2011-06-24 20:26:25 -07001404 bfa_mem_dma_setup(meminfo, sfp_dma, bfa_sfp_meminfo());
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07001405 bfa_mem_dma_setup(meminfo, flash_dma,
1406 bfa_flash_meminfo(cfg->drvcfg.min_cfg));
Jing Huang7725ccf2009-09-23 17:46:15 -07001407}
1408
Jing Huang5fbe25c2010-10-18 17:17:23 -07001409/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001410 * Use this function to do attach the driver instance with the BFA
1411 * library. This function will not trigger any HW initialization
1412 * process (which will be done in bfa_init() call)
1413 *
1414 * This call will fail, if the cap is out of range compared to
1415 * pre-defined values within the BFA library
1416 *
1417 * @param[out] bfa Pointer to bfa_t.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001418 * @param[in] bfad Opaque handle back to the driver's IOC structure
Jing Huang7725ccf2009-09-23 17:46:15 -07001419 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001420 * that was used in bfa_cfg_get_meminfo().
1421 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1422 * use the bfa_cfg_get_meminfo() call to
1423 * find the memory blocks required, allocate the
1424 * required memory and provide the starting addresses.
1425 * @param[in] pcidev pointer to struct bfa_pcidev_s
Jing Huang7725ccf2009-09-23 17:46:15 -07001426 *
1427 * @return
1428 * void
1429 *
1430 * Special Considerations:
1431 *
1432 * @note
1433 *
1434 */
1435void
1436bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1437 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1438{
Krishna Gudipati45070252011-06-24 20:24:29 -07001439 int i;
1440 struct bfa_mem_dma_s *dma_info, *dma_elem;
1441 struct bfa_mem_kva_s *kva_info, *kva_elem;
1442 struct list_head *dm_qe, *km_qe;
Jing Huang7725ccf2009-09-23 17:46:15 -07001443
1444 bfa->fcs = BFA_FALSE;
1445
Jing Huangd4b671c2010-12-26 21:46:35 -08001446 WARN_ON((cfg == NULL) || (meminfo == NULL));
Jing Huang7725ccf2009-09-23 17:46:15 -07001447
Krishna Gudipati45070252011-06-24 20:24:29 -07001448 /* Initialize memory pointers for iterative allocation */
1449 dma_info = &meminfo->dma_info;
1450 dma_info->kva_curp = dma_info->kva;
1451 dma_info->dma_curp = dma_info->dma;
1452
1453 kva_info = &meminfo->kva_info;
1454 kva_info->kva_curp = kva_info->kva;
1455
1456 list_for_each(dm_qe, &dma_info->qe) {
1457 dma_elem = (struct bfa_mem_dma_s *) dm_qe;
1458 dma_elem->kva_curp = dma_elem->kva;
1459 dma_elem->dma_curp = dma_elem->dma;
Jing Huang7725ccf2009-09-23 17:46:15 -07001460 }
1461
Krishna Gudipati45070252011-06-24 20:24:29 -07001462 list_for_each(km_qe, &kva_info->qe) {
1463 kva_elem = (struct bfa_mem_kva_s *) km_qe;
1464 kva_elem->kva_curp = kva_elem->kva;
1465 }
1466
1467 bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
Jing Huang7725ccf2009-09-23 17:46:15 -07001468
1469 for (i = 0; hal_mods[i]; i++)
Krishna Gudipati45070252011-06-24 20:24:29 -07001470 hal_mods[i]->attach(bfa, bfad, cfg, pcidev);
Jing Huang7725ccf2009-09-23 17:46:15 -07001471
Krishna Gudipati45070252011-06-24 20:24:29 -07001472 bfa_com_port_attach(bfa);
1473 bfa_com_ablk_attach(bfa);
Krishna Gudipati148d6102011-06-24 20:25:36 -07001474 bfa_com_cee_attach(bfa);
Krishna Gudipati51e569a2011-06-24 20:26:25 -07001475 bfa_com_sfp_attach(bfa);
Krishna Gudipati5a54b1d2011-06-24 20:27:13 -07001476 bfa_com_flash_attach(bfa, cfg->drvcfg.min_cfg);
Jing Huang7725ccf2009-09-23 17:46:15 -07001477}
1478
Jing Huang5fbe25c2010-10-18 17:17:23 -07001479/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001480 * Use this function to delete a BFA IOC. IOC should be stopped (by
1481 * calling bfa_stop()) before this function call.
1482 *
1483 * @param[in] bfa - pointer to bfa_t.
1484 *
1485 * @return
1486 * void
1487 *
1488 * Special Considerations:
1489 *
1490 * @note
1491 */
1492void
1493bfa_detach(struct bfa_s *bfa)
1494{
1495 int i;
1496
1497 for (i = 0; hal_mods[i]; i++)
1498 hal_mods[i]->detach(bfa);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001499 bfa_ioc_detach(&bfa->ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001500}
1501
1502void
1503bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1504{
1505 INIT_LIST_HEAD(comp_q);
1506 list_splice_tail_init(&bfa->comp_q, comp_q);
1507}
1508
1509void
1510bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1511{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001512 struct list_head *qe;
1513 struct list_head *qen;
1514 struct bfa_cb_qe_s *hcb_qe;
Jing Huang7725ccf2009-09-23 17:46:15 -07001515
1516 list_for_each_safe(qe, qen, comp_q) {
1517 hcb_qe = (struct bfa_cb_qe_s *) qe;
1518 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1519 }
1520}
1521
1522void
1523bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1524{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001525 struct list_head *qe;
1526 struct bfa_cb_qe_s *hcb_qe;
Jing Huang7725ccf2009-09-23 17:46:15 -07001527
1528 while (!list_empty(comp_q)) {
1529 bfa_q_deq(comp_q, &qe);
1530 hcb_qe = (struct bfa_cb_qe_s *) qe;
1531 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1532 }
1533}
1534
Jing Huang7725ccf2009-09-23 17:46:15 -07001535
Jing Huang5fbe25c2010-10-18 17:17:23 -07001536/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001537 * Return the list of PCI vendor/device id lists supported by this
1538 * BFA instance.
1539 */
1540void
1541bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1542{
1543 static struct bfa_pciid_s __pciids[] = {
1544 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1545 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1546 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
Jing Huang293f82d2010-07-08 19:45:20 -07001547 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
Jing Huang7725ccf2009-09-23 17:46:15 -07001548 };
1549
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001550 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
Jing Huang7725ccf2009-09-23 17:46:15 -07001551 *pciids = __pciids;
1552}
1553
Jing Huang5fbe25c2010-10-18 17:17:23 -07001554/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001555 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1556 * into BFA layer). The OS driver can then turn back and overwrite entries that
1557 * have been configured by the user.
1558 *
1559 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1560 *
1561 * @return
1562 * void
1563 *
1564 * Special Considerations:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001565 * note
Jing Huang7725ccf2009-09-23 17:46:15 -07001566 */
1567void
1568bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1569{
1570 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1571 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1572 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1573 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1574 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1575 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1576 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1577 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001578 cfg->fwcfg.num_fwtio_reqs = 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07001579
1580 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1581 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1582 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1583 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1584 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1585 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1586 cfg->drvcfg.ioc_recover = BFA_FALSE;
1587 cfg->drvcfg.delay_comp = BFA_FALSE;
1588
1589}
1590
1591void
1592bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1593{
1594 bfa_cfg_get_default(cfg);
1595 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
1596 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
1597 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
1598 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
1599 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001600 cfg->fwcfg.num_fwtio_reqs = 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07001601
1602 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
1603 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
1604 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001605 cfg->drvcfg.min_cfg = BFA_TRUE;
Jing Huang7725ccf2009-09-23 17:46:15 -07001606}