blob: ec987248471143cfba15dc84d159904eefa6056a [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Jing Huang7725ccf2009-09-23 17:46:15 -07003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070019#include "bfa_modules.h"
Krishna Gudipati11189202011-06-13 15:50:35 -070020#include "bfi_reg.h"
Jing Huang7725ccf2009-09-23 17:46:15 -070021
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070022BFA_TRC_FILE(HAL, CORE);
Jing Huang7725ccf2009-09-23 17:46:15 -070023
Jing Huang5fbe25c2010-10-18 17:17:23 -070024/*
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080025 * BFA module list terminated by NULL
26 */
27static struct bfa_module_s *hal_mods[] = {
28 &hal_mod_sgpg,
29 &hal_mod_fcport,
30 &hal_mod_fcxp,
31 &hal_mod_lps,
32 &hal_mod_uf,
33 &hal_mod_rport,
Krishna Gudipatie2187d72011-06-13 15:53:58 -070034 &hal_mod_fcp,
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080035 NULL
36};
37
38/*
39 * Message handlers for various modules.
40 */
41static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
42 bfa_isr_unhandled, /* NONE */
43 bfa_isr_unhandled, /* BFI_MC_IOC */
44 bfa_isr_unhandled, /* BFI_MC_DIAG */
45 bfa_isr_unhandled, /* BFI_MC_FLASH */
46 bfa_isr_unhandled, /* BFI_MC_CEE */
47 bfa_fcport_isr, /* BFI_MC_FCPORT */
48 bfa_isr_unhandled, /* BFI_MC_IOCFC */
49 bfa_isr_unhandled, /* BFI_MC_LL */
50 bfa_uf_isr, /* BFI_MC_UF */
51 bfa_fcxp_isr, /* BFI_MC_FCXP */
52 bfa_lps_isr, /* BFI_MC_LPS */
53 bfa_rport_isr, /* BFI_MC_RPORT */
Krishna Gudipatie2187d72011-06-13 15:53:58 -070054 bfa_itn_isr, /* BFI_MC_ITN */
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080055 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
58 bfa_ioim_isr, /* BFI_MC_IOIM */
59 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
60 bfa_tskim_isr, /* BFI_MC_TSKIM */
61 bfa_isr_unhandled, /* BFI_MC_SBOOT */
62 bfa_isr_unhandled, /* BFI_MC_IPFC */
63 bfa_isr_unhandled, /* BFI_MC_PORT */
64 bfa_isr_unhandled, /* --------- */
65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */
70 bfa_isr_unhandled, /* --------- */
71 bfa_isr_unhandled, /* --------- */
72 bfa_isr_unhandled, /* --------- */
73 bfa_isr_unhandled, /* --------- */
74};
75/*
76 * Message handlers for mailbox command classes
77 */
78static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
79 NULL,
80 NULL, /* BFI_MC_IOC */
81 NULL, /* BFI_MC_DIAG */
82 NULL, /* BFI_MC_FLASH */
83 NULL, /* BFI_MC_CEE */
84 NULL, /* BFI_MC_PORT */
85 bfa_iocfc_isr, /* BFI_MC_IOCFC */
86 NULL,
87};
88
89
90
91static void
Krishna Gudipati45070252011-06-24 20:24:29 -070092bfa_com_port_attach(struct bfa_s *bfa)
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080093{
94 struct bfa_port_s *port = &bfa->modules.port;
Krishna Gudipati45070252011-06-24 20:24:29 -070095 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080096
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080097 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
Krishna Gudipati45070252011-06-24 20:24:29 -070098 bfa_port_mem_claim(port, port_dma->kva_curp, port_dma->dma_curp);
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080099}
100
101/*
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700102 * ablk module attach
103 */
104static void
Krishna Gudipati45070252011-06-24 20:24:29 -0700105bfa_com_ablk_attach(struct bfa_s *bfa)
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700106{
107 struct bfa_ablk_s *ablk = &bfa->modules.ablk;
Krishna Gudipati45070252011-06-24 20:24:29 -0700108 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700109
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700110 bfa_ablk_attach(ablk, &bfa->ioc);
Krishna Gudipati45070252011-06-24 20:24:29 -0700111 bfa_ablk_memclaim(ablk, ablk_dma->kva_curp, ablk_dma->dma_curp);
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700112}
113
Krishna Gudipati148d6102011-06-24 20:25:36 -0700114static void
115bfa_com_cee_attach(struct bfa_s *bfa)
116{
117 struct bfa_cee_s *cee = &bfa->modules.cee;
118 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
119
120 cee->trcmod = bfa->trcmod;
121 bfa_cee_attach(cee, &bfa->ioc, bfa);
122 bfa_cee_mem_claim(cee, cee_dma->kva_curp, cee_dma->dma_curp);
123}
124
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700125/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700126 * BFA IOC FC related definitions
127 */
128
Jing Huang5fbe25c2010-10-18 17:17:23 -0700129/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700130 * IOC local definitions
131 */
132#define BFA_IOCFC_TOV 5000 /* msecs */
133
134enum {
135 BFA_IOCFC_ACT_NONE = 0,
136 BFA_IOCFC_ACT_INIT = 1,
137 BFA_IOCFC_ACT_STOP = 2,
138 BFA_IOCFC_ACT_DISABLE = 3,
Krishna Gudipati60138062011-06-24 20:25:15 -0700139 BFA_IOCFC_ACT_ENABLE = 4,
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700140};
141
142#define DEF_CFG_NUM_FABRICS 1
143#define DEF_CFG_NUM_LPORTS 256
144#define DEF_CFG_NUM_CQS 4
145#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
146#define DEF_CFG_NUM_TSKIM_REQS 128
147#define DEF_CFG_NUM_FCXP_REQS 64
148#define DEF_CFG_NUM_UF_BUFS 64
149#define DEF_CFG_NUM_RPORTS 1024
150#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
151#define DEF_CFG_NUM_TINS 256
152
153#define DEF_CFG_NUM_SGPGS 2048
154#define DEF_CFG_NUM_REQQ_ELEMS 256
155#define DEF_CFG_NUM_RSPQ_ELEMS 64
156#define DEF_CFG_NUM_SBOOT_TGTS 16
157#define DEF_CFG_NUM_SBOOT_LUNS 16
158
Jing Huang5fbe25c2010-10-18 17:17:23 -0700159/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700160 * forward declaration for IOC FC functions
161 */
162static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
163static void bfa_iocfc_disable_cbfn(void *bfa_arg);
164static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
165static void bfa_iocfc_reset_cbfn(void *bfa_arg);
166static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
167
Jing Huang5fbe25c2010-10-18 17:17:23 -0700168/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700169 * BFA Interrupt handling functions
170 */
171static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700172bfa_reqq_resume(struct bfa_s *bfa, int qid)
173{
174 struct list_head *waitq, *qe, *qen;
175 struct bfa_reqq_wait_s *wqe;
176
177 waitq = bfa_reqq(bfa, qid);
178 list_for_each_safe(qe, qen, waitq) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700179 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700180 * Callback only as long as there is room in request queue
181 */
182 if (bfa_reqq_full(bfa, qid))
183 break;
184
185 list_del(qe);
186 wqe = (struct bfa_reqq_wait_s *) qe;
187 wqe->qresume(wqe->cbarg);
188 }
189}
190
Krishna Gudipati11189202011-06-13 15:50:35 -0700191static inline void
192bfa_isr_rspq(struct bfa_s *bfa, int qid)
193{
194 struct bfi_msg_s *m;
195 u32 pi, ci;
196 struct list_head *waitq;
197
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700198 bfa_isr_rspq_ack(bfa, qid);
Krishna Gudipati11189202011-06-13 15:50:35 -0700199
200 ci = bfa_rspq_ci(bfa, qid);
201 pi = bfa_rspq_pi(bfa, qid);
202
203 while (ci != pi) {
204 m = bfa_rspq_elem(bfa, qid, ci);
205 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
206
207 bfa_isrs[m->mhdr.msg_class] (bfa, m);
208 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
209 }
210
211 /*
212 * update CI
213 */
214 bfa_rspq_ci(bfa, qid) = pi;
215 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
216 mmiowb();
217
218 /*
219 * Resume any pending requests in the corresponding reqq.
220 */
221 waitq = bfa_reqq(bfa, qid);
222 if (!list_empty(waitq))
223 bfa_reqq_resume(bfa, qid);
224}
225
226static inline void
227bfa_isr_reqq(struct bfa_s *bfa, int qid)
228{
229 struct list_head *waitq;
230
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700231 bfa_isr_reqq_ack(bfa, qid);
Krishna Gudipati11189202011-06-13 15:50:35 -0700232
233 /*
234 * Resume any pending requests in the corresponding reqq.
235 */
236 waitq = bfa_reqq(bfa, qid);
237 if (!list_empty(waitq))
238 bfa_reqq_resume(bfa, qid);
239}
240
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700241void
242bfa_msix_all(struct bfa_s *bfa, int vec)
243{
Krishna Gudipati10a07372011-06-24 20:23:38 -0700244 u32 intr, qintr;
245 int queue;
246
247 intr = readl(bfa->iocfc.bfa_regs.intr_status);
248 if (!intr)
249 return;
250
251 /*
252 * RME completion queue interrupt
253 */
254 qintr = intr & __HFN_INT_RME_MASK;
255 if (qintr && bfa->queue_process) {
256 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
257 bfa_isr_rspq(bfa, queue);
258 }
259
260 intr &= ~qintr;
261 if (!intr)
262 return;
263
264 /*
265 * CPE completion queue interrupt
266 */
267 qintr = intr & __HFN_INT_CPE_MASK;
268 if (qintr && bfa->queue_process) {
269 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
270 bfa_isr_reqq(bfa, queue);
271 }
272 intr &= ~qintr;
273 if (!intr)
274 return;
275
276 bfa_msix_lpu_err(bfa, intr);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700277}
278
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700279bfa_boolean_t
280bfa_intx(struct bfa_s *bfa)
281{
282 u32 intr, qintr;
283 int queue;
284
Jing Huang53440262010-10-18 17:12:29 -0700285 intr = readl(bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700286 if (!intr)
287 return BFA_FALSE;
288
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700289 qintr = intr & (__HFN_INT_RME_MASK | __HFN_INT_CPE_MASK);
290 if (qintr)
291 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
292
Jing Huang5fbe25c2010-10-18 17:17:23 -0700293 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700294 * RME completion queue interrupt
295 */
296 qintr = intr & __HFN_INT_RME_MASK;
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700297 if (qintr && bfa->queue_process) {
298 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
299 bfa_isr_rspq(bfa, queue);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700300 }
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700301
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700302 intr &= ~qintr;
303 if (!intr)
304 return BFA_TRUE;
305
Jing Huang5fbe25c2010-10-18 17:17:23 -0700306 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700307 * CPE completion queue interrupt
308 */
309 qintr = intr & __HFN_INT_CPE_MASK;
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700310 if (qintr && bfa->queue_process) {
311 for (queue = 0; queue < BFI_IOC_MAX_CQS; queue++)
312 bfa_isr_reqq(bfa, queue);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700313 }
314 intr &= ~qintr;
315 if (!intr)
316 return BFA_TRUE;
317
318 bfa_msix_lpu_err(bfa, intr);
319
320 return BFA_TRUE;
321}
322
323void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700324bfa_isr_enable(struct bfa_s *bfa)
325{
Krishna Gudipati11189202011-06-13 15:50:35 -0700326 u32 umsk;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700327 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
328
329 bfa_trc(bfa, pci_func);
330
Krishna Gudipati775c7742011-06-13 15:52:12 -0700331 bfa_msix_ctrl_install(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700332
Krishna Gudipati11189202011-06-13 15:50:35 -0700333 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
334 umsk = __HFN_INT_ERR_MASK_CT2;
335 umsk |= pci_func == 0 ?
336 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
337 } else {
338 umsk = __HFN_INT_ERR_MASK;
339 umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
340 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700341
Krishna Gudipati11189202011-06-13 15:50:35 -0700342 writel(umsk, bfa->iocfc.bfa_regs.intr_status);
343 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
344 bfa->iocfc.intr_mask = ~umsk;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700345 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
346}
347
348void
349bfa_isr_disable(struct bfa_s *bfa)
350{
351 bfa_isr_mode_set(bfa, BFA_FALSE);
Jing Huang53440262010-10-18 17:12:29 -0700352 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700353 bfa_msix_uninstall(bfa);
354}
355
356void
Krishna Gudipati11189202011-06-13 15:50:35 -0700357bfa_msix_reqq(struct bfa_s *bfa, int vec)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700358{
Krishna Gudipati11189202011-06-13 15:50:35 -0700359 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700360}
361
362void
363bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
364{
365 bfa_trc(bfa, m->mhdr.msg_class);
366 bfa_trc(bfa, m->mhdr.msg_id);
367 bfa_trc(bfa, m->mhdr.mtag.i2htok);
Jing Huangd4b671c2010-12-26 21:46:35 -0800368 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700369 bfa_trc_stop(bfa->trcmod);
370}
371
372void
Krishna Gudipati11189202011-06-13 15:50:35 -0700373bfa_msix_rspq(struct bfa_s *bfa, int vec)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700374{
Krishna Gudipati11189202011-06-13 15:50:35 -0700375 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700376}
377
378void
379bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
380{
381 u32 intr, curr_value;
Krishna Gudipati11189202011-06-13 15:50:35 -0700382 bfa_boolean_t lpu_isr, halt_isr, pss_isr;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700383
Jing Huang53440262010-10-18 17:12:29 -0700384 intr = readl(bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700385
Krishna Gudipati11189202011-06-13 15:50:35 -0700386 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
387 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
388 pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
389 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
390 __HFN_INT_MBOX_LPU1_CT2);
391 intr &= __HFN_INT_ERR_MASK_CT2;
392 } else {
393 halt_isr = intr & __HFN_INT_LL_HALT;
394 pss_isr = intr & __HFN_INT_ERR_PSS;
395 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
396 intr &= __HFN_INT_ERR_MASK;
397 }
398
399 if (lpu_isr)
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800400 bfa_ioc_mbox_isr(&bfa->ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700401
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700402 if (intr) {
Krishna Gudipati11189202011-06-13 15:50:35 -0700403 if (halt_isr) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700404 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700405 * If LL_HALT bit is set then FW Init Halt LL Port
406 * Register needs to be cleared as well so Interrupt
407 * Status Register will be cleared.
408 */
Jing Huang53440262010-10-18 17:12:29 -0700409 curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700410 curr_value &= ~__FW_INIT_HALT_P;
Jing Huang53440262010-10-18 17:12:29 -0700411 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700412 }
413
Krishna Gudipati11189202011-06-13 15:50:35 -0700414 if (pss_isr) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700415 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700416 * ERR_PSS bit needs to be cleared as well in case
417 * interrups are shared so driver's interrupt handler is
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300418 * still called even though it is already masked out.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700419 */
Jing Huang53440262010-10-18 17:12:29 -0700420 curr_value = readl(
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700421 bfa->ioc.ioc_regs.pss_err_status_reg);
Jing Huang53440262010-10-18 17:12:29 -0700422 writel(curr_value,
423 bfa->ioc.ioc_regs.pss_err_status_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700424 }
425
Jing Huang53440262010-10-18 17:12:29 -0700426 writel(intr, bfa->iocfc.bfa_regs.intr_status);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800427 bfa_ioc_error_isr(&bfa->ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700428 }
429}
430
Jing Huang5fbe25c2010-10-18 17:17:23 -0700431/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700432 * BFA IOC FC related functions
433 */
434
Jing Huang5fbe25c2010-10-18 17:17:23 -0700435/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -0800436 * BFA IOC private functions
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700437 */
438
Jing Huang5fbe25c2010-10-18 17:17:23 -0700439/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700440 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
441 */
442static void
443bfa_iocfc_send_cfg(void *bfa_arg)
444{
445 struct bfa_s *bfa = bfa_arg;
446 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
447 struct bfi_iocfc_cfg_req_s cfg_req;
448 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
449 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
450 int i;
451
Jing Huangd4b671c2010-12-26 21:46:35 -0800452 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700453 bfa_trc(bfa, cfg->fwcfg.num_cqs);
454
455 bfa_iocfc_reset_queues(bfa);
456
Jing Huang5fbe25c2010-10-18 17:17:23 -0700457 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700458 * initialize IOC configuration info
459 */
Krishna Gudipati10a07372011-06-24 20:23:38 -0700460 cfg_info->single_msix_vec = 0;
461 if (bfa->msix.nvecs == 1)
462 cfg_info->single_msix_vec = 1;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700463 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
464 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700465 cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
466 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700467
468 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
Jing Huang5fbe25c2010-10-18 17:17:23 -0700469 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700470 * dma map REQ and RSP circular queues and shadow pointers
471 */
472 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
473 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
474 iocfc->req_cq_ba[i].pa);
475 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
476 iocfc->req_cq_shadow_ci[i].pa);
477 cfg_info->req_cq_elems[i] =
Jing Huangba816ea2010-10-18 17:10:50 -0700478 cpu_to_be16(cfg->drvcfg.num_reqq_elems);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700479
480 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
481 iocfc->rsp_cq_ba[i].pa);
482 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
483 iocfc->rsp_cq_shadow_pi[i].pa);
484 cfg_info->rsp_cq_elems[i] =
Jing Huangba816ea2010-10-18 17:10:50 -0700485 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700486 }
487
Jing Huang5fbe25c2010-10-18 17:17:23 -0700488 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700489 * Enable interrupt coalescing if it is driver init path
490 * and not ioc disable/enable path.
491 */
492 if (!iocfc->cfgdone)
493 cfg_info->intr_attr.coalesce = BFA_TRUE;
494
495 iocfc->cfgdone = BFA_FALSE;
496
Jing Huang5fbe25c2010-10-18 17:17:23 -0700497 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700498 * dma map IOC configuration itself
499 */
500 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700501 bfa_fn_lpu(bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700502 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
503
504 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
505 sizeof(struct bfi_iocfc_cfg_req_s));
506}
507
508static void
509bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
510 struct bfa_pcidev_s *pcidev)
511{
512 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
513
514 bfa->bfad = bfad;
515 iocfc->bfa = bfa;
516 iocfc->action = BFA_IOCFC_ACT_NONE;
517
Jing Huang6a18b162010-10-18 17:08:54 -0700518 iocfc->cfg = *cfg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700519
Jing Huang5fbe25c2010-10-18 17:17:23 -0700520 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700521 * Initialize chip specific handlers.
522 */
Krishna Gudipati11189202011-06-13 15:50:35 -0700523 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700524 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
525 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
526 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
527 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
Krishna Gudipati775c7742011-06-13 15:52:12 -0700528 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
529 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700530 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
531 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
532 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
533 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
Krishna Gudipati11189202011-06-13 15:50:35 -0700534 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
535 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700536 } else {
537 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700538 iocfc->hwif.hw_reqq_ack = NULL;
539 iocfc->hwif.hw_rspq_ack = NULL;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700540 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
Krishna Gudipati775c7742011-06-13 15:52:12 -0700541 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
542 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700543 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
544 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
545 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
546 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
Krishna Gudipati11189202011-06-13 15:50:35 -0700547 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
548 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
549 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
550 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
551 }
552
553 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
554 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
555 iocfc->hwif.hw_isr_mode_set = NULL;
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700556 iocfc->hwif.hw_rspq_ack = NULL;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700557 }
558
559 iocfc->hwif.hw_reginit(bfa);
560 bfa->msix.nvecs = 0;
561}
562
563static void
Krishna Gudipati45070252011-06-24 20:24:29 -0700564bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700565{
Krishna Gudipati45070252011-06-24 20:24:29 -0700566 u8 *dm_kva = NULL;
567 u64 dm_pa = 0;
568 int i, per_reqq_sz, per_rspq_sz, dbgsz;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700569 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
Krishna Gudipati45070252011-06-24 20:24:29 -0700570 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
571 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
572 struct bfa_mem_dma_s *reqq_dma, *rspq_dma;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700573
Krishna Gudipati45070252011-06-24 20:24:29 -0700574 /* First allocate dma memory for IOC */
575 bfa_ioc_mem_claim(&bfa->ioc, bfa_mem_dma_virt(ioc_dma),
576 bfa_mem_dma_phys(ioc_dma));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700577
Krishna Gudipati45070252011-06-24 20:24:29 -0700578 /* Claim DMA-able memory for the request/response queues */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700579 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
Krishna Gudipati45070252011-06-24 20:24:29 -0700580 BFA_DMA_ALIGN_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700581 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
Krishna Gudipati45070252011-06-24 20:24:29 -0700582 BFA_DMA_ALIGN_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700583
584 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
Krishna Gudipati45070252011-06-24 20:24:29 -0700585 reqq_dma = BFA_MEM_REQQ_DMA(bfa, i);
586 iocfc->req_cq_ba[i].kva = bfa_mem_dma_virt(reqq_dma);
587 iocfc->req_cq_ba[i].pa = bfa_mem_dma_phys(reqq_dma);
588 memset(iocfc->req_cq_ba[i].kva, 0, per_reqq_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700589
Krishna Gudipati45070252011-06-24 20:24:29 -0700590 rspq_dma = BFA_MEM_RSPQ_DMA(bfa, i);
591 iocfc->rsp_cq_ba[i].kva = bfa_mem_dma_virt(rspq_dma);
592 iocfc->rsp_cq_ba[i].pa = bfa_mem_dma_phys(rspq_dma);
593 memset(iocfc->rsp_cq_ba[i].kva, 0, per_rspq_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700594 }
595
Krishna Gudipati45070252011-06-24 20:24:29 -0700596 /* Claim IOCFC dma memory - for shadow CI/PI */
597 dm_kva = bfa_mem_dma_virt(iocfc_dma);
598 dm_pa = bfa_mem_dma_phys(iocfc_dma);
599
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700600 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
601 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
602 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
603 dm_kva += BFA_CACHELINE_SZ;
604 dm_pa += BFA_CACHELINE_SZ;
605
606 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
607 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
608 dm_kva += BFA_CACHELINE_SZ;
609 dm_pa += BFA_CACHELINE_SZ;
610 }
611
Krishna Gudipati45070252011-06-24 20:24:29 -0700612 /* Claim IOCFC dma memory - for the config info page */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700613 bfa->iocfc.cfg_info.kva = dm_kva;
614 bfa->iocfc.cfg_info.pa = dm_pa;
615 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
616 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
617 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
618
Krishna Gudipati45070252011-06-24 20:24:29 -0700619 /* Claim IOCFC dma memory - for the config response */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700620 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
621 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
622 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
Krishna Gudipati45070252011-06-24 20:24:29 -0700623 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
624 BFA_CACHELINE_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700625 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
Krishna Gudipati45070252011-06-24 20:24:29 -0700626 BFA_CACHELINE_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700627
Krishna Gudipati45070252011-06-24 20:24:29 -0700628 /* Claim IOCFC kva memory */
Maggie Zhangf7f738122010-12-09 19:08:43 -0800629 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700630 if (dbgsz > 0) {
Krishna Gudipati45070252011-06-24 20:24:29 -0700631 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_mem_kva_curp(iocfc));
632 bfa_mem_kva_curp(iocfc) += dbgsz;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700633 }
634}
635
Jing Huang5fbe25c2010-10-18 17:17:23 -0700636/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700637 * Start BFA submodules.
638 */
639static void
640bfa_iocfc_start_submod(struct bfa_s *bfa)
641{
642 int i;
643
Krishna Gudipati775c7742011-06-13 15:52:12 -0700644 bfa->queue_process = BFA_TRUE;
Krishna Gudipati11189202011-06-13 15:50:35 -0700645 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700646 bfa_isr_rspq_ack(bfa, i);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700647
648 for (i = 0; hal_mods[i]; i++)
649 hal_mods[i]->start(bfa);
650}
651
Jing Huang5fbe25c2010-10-18 17:17:23 -0700652/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700653 * Disable BFA submodules.
654 */
655static void
656bfa_iocfc_disable_submod(struct bfa_s *bfa)
657{
658 int i;
659
660 for (i = 0; hal_mods[i]; i++)
661 hal_mods[i]->iocdisable(bfa);
662}
663
664static void
665bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
666{
667 struct bfa_s *bfa = bfa_arg;
668
669 if (complete) {
670 if (bfa->iocfc.cfgdone)
671 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
672 else
673 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
674 } else {
675 if (bfa->iocfc.cfgdone)
676 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
677 }
678}
679
680static void
681bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
682{
683 struct bfa_s *bfa = bfa_arg;
684 struct bfad_s *bfad = bfa->bfad;
685
686 if (compl)
687 complete(&bfad->comp);
688 else
689 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
690}
691
692static void
Krishna Gudipati60138062011-06-24 20:25:15 -0700693bfa_iocfc_enable_cb(void *bfa_arg, bfa_boolean_t compl)
694{
695 struct bfa_s *bfa = bfa_arg;
696 struct bfad_s *bfad = bfa->bfad;
697
698 if (compl)
699 complete(&bfad->enable_comp);
700}
701
702static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700703bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
704{
705 struct bfa_s *bfa = bfa_arg;
706 struct bfad_s *bfad = bfa->bfad;
707
708 if (compl)
709 complete(&bfad->disable_comp);
710}
711
Krishna Gudipati11189202011-06-13 15:50:35 -0700712/**
713 * configure queue registers from firmware response
714 */
715static void
716bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
717{
718 int i;
719 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
720 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
721
722 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700723 bfa->iocfc.hw_qid[i] = qreg->hw_qid[i];
Krishna Gudipati11189202011-06-13 15:50:35 -0700724 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
725 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
726 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
727 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
728 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
729 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
730 }
731}
732
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700733static void
734bfa_iocfc_res_recfg(struct bfa_s *bfa, struct bfa_iocfc_fwcfg_s *fwcfg)
735{
736 bfa_fcxp_res_recfg(bfa, fwcfg->num_fcxp_reqs);
737 bfa_uf_res_recfg(bfa, fwcfg->num_uf_bufs);
738 bfa_rport_res_recfg(bfa, fwcfg->num_rports);
739 bfa_fcp_res_recfg(bfa, fwcfg->num_ioim_reqs);
740 bfa_tskim_res_recfg(bfa, fwcfg->num_tskim_reqs);
741}
742
Jing Huang5fbe25c2010-10-18 17:17:23 -0700743/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700744 * Update BFA configuration from firmware configuration.
745 */
746static void
747bfa_iocfc_cfgrsp(struct bfa_s *bfa)
748{
749 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
750 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
751 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
752
753 fwcfg->num_cqs = fwcfg->num_cqs;
Jing Huangba816ea2010-10-18 17:10:50 -0700754 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700755 fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
Jing Huangba816ea2010-10-18 17:10:50 -0700756 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
757 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
758 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
759 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700760
761 iocfc->cfgdone = BFA_TRUE;
762
Jing Huang5fbe25c2010-10-18 17:17:23 -0700763 /*
Krishna Gudipati11189202011-06-13 15:50:35 -0700764 * configure queue register offsets as learnt from firmware
765 */
766 bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
767
768 /*
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700769 * Re-configure resources as learnt from Firmware
770 */
771 bfa_iocfc_res_recfg(bfa, fwcfg);
772
773 /*
Krishna Gudipati775c7742011-06-13 15:52:12 -0700774 * Install MSIX queue handlers
775 */
776 bfa_msix_queue_install(bfa);
777
778 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700779 * Configuration is complete - initialize/start submodules
780 */
781 bfa_fcport_init(bfa);
782
783 if (iocfc->action == BFA_IOCFC_ACT_INIT)
784 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
Krishna Gudipati60138062011-06-24 20:25:15 -0700785 else {
786 if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
787 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
788 bfa_iocfc_enable_cb, bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700789 bfa_iocfc_start_submod(bfa);
Krishna Gudipati60138062011-06-24 20:25:15 -0700790 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700791}
792void
793bfa_iocfc_reset_queues(struct bfa_s *bfa)
794{
795 int q;
796
797 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
798 bfa_reqq_ci(bfa, q) = 0;
799 bfa_reqq_pi(bfa, q) = 0;
800 bfa_rspq_ci(bfa, q) = 0;
801 bfa_rspq_pi(bfa, q) = 0;
802 }
803}
804
Krishna Gudipatia7141342011-06-24 20:23:19 -0700805/* Fabric Assigned Address specific functions */
806
807/*
808 * Check whether IOC is ready before sending command down
809 */
810static bfa_status_t
811bfa_faa_validate_request(struct bfa_s *bfa)
812{
813 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
814 u32 card_type = bfa->ioc.attr->card_type;
815
816 if (bfa_ioc_is_operational(&bfa->ioc)) {
817 if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
818 return BFA_STATUS_FEATURE_NOT_SUPPORTED;
819 } else {
820 if (!bfa_ioc_is_acq_addr(&bfa->ioc))
821 return BFA_STATUS_IOC_NON_OP;
822 }
823
824 return BFA_STATUS_OK;
825}
826
827bfa_status_t
828bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
829{
830 struct bfi_faa_en_dis_s faa_enable_req;
831 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
832 bfa_status_t status;
833
834 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
835 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
836
837 status = bfa_faa_validate_request(bfa);
838 if (status != BFA_STATUS_OK)
839 return status;
840
841 if (iocfc->faa_args.busy == BFA_TRUE)
842 return BFA_STATUS_DEVBUSY;
843
844 if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
845 return BFA_STATUS_FAA_ENABLED;
846
847 if (bfa_fcport_is_trunk_enabled(bfa))
848 return BFA_STATUS_ERROR_TRUNK_ENABLED;
849
850 bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
851 iocfc->faa_args.busy = BFA_TRUE;
852
853 memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
854 bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700855 BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_fn_lpu(bfa));
Krishna Gudipatia7141342011-06-24 20:23:19 -0700856
857 bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
858 sizeof(struct bfi_faa_en_dis_s));
859
860 return BFA_STATUS_OK;
861}
862
863bfa_status_t
864bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
865 void *cbarg)
866{
867 struct bfi_faa_en_dis_s faa_disable_req;
868 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
869 bfa_status_t status;
870
871 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
872 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
873
874 status = bfa_faa_validate_request(bfa);
875 if (status != BFA_STATUS_OK)
876 return status;
877
878 if (iocfc->faa_args.busy == BFA_TRUE)
879 return BFA_STATUS_DEVBUSY;
880
881 if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
882 return BFA_STATUS_FAA_DISABLED;
883
884 bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
885 iocfc->faa_args.busy = BFA_TRUE;
886
887 memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
888 bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700889 BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_fn_lpu(bfa));
Krishna Gudipatia7141342011-06-24 20:23:19 -0700890
891 bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
892 sizeof(struct bfi_faa_en_dis_s));
893
894 return BFA_STATUS_OK;
895}
896
897bfa_status_t
898bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
899 bfa_cb_iocfc_t cbfn, void *cbarg)
900{
901 struct bfi_faa_query_s faa_attr_req;
902 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
903 bfa_status_t status;
904
905 iocfc->faa_args.faa_attr = attr;
906 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
907 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
908
909 status = bfa_faa_validate_request(bfa);
910 if (status != BFA_STATUS_OK)
911 return status;
912
913 if (iocfc->faa_args.busy == BFA_TRUE)
914 return BFA_STATUS_DEVBUSY;
915
916 iocfc->faa_args.busy = BFA_TRUE;
917 memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
918 bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
Krishna Gudipati3fd45982011-06-24 20:24:08 -0700919 BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_fn_lpu(bfa));
Krishna Gudipatia7141342011-06-24 20:23:19 -0700920
921 bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
922 sizeof(struct bfi_faa_query_s));
923
924 return BFA_STATUS_OK;
925}
926
927/*
928 * FAA enable response
929 */
930static void
931bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
932 struct bfi_faa_en_dis_rsp_s *rsp)
933{
934 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
935 bfa_status_t status = rsp->status;
936
937 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
938
939 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
940 iocfc->faa_args.busy = BFA_FALSE;
941}
942
943/*
944 * FAA disable response
945 */
946static void
947bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
948 struct bfi_faa_en_dis_rsp_s *rsp)
949{
950 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
951 bfa_status_t status = rsp->status;
952
953 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
954
955 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
956 iocfc->faa_args.busy = BFA_FALSE;
957}
958
959/*
960 * FAA query response
961 */
962static void
963bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
964 bfi_faa_query_rsp_t *rsp)
965{
966 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
967
968 if (iocfc->faa_args.faa_attr) {
969 iocfc->faa_args.faa_attr->faa = rsp->faa;
970 iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
971 iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
972 }
973
974 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
975
976 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
977 iocfc->faa_args.busy = BFA_FALSE;
978}
979
Jing Huang5fbe25c2010-10-18 17:17:23 -0700980/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700981 * IOC enable request is complete
982 */
983static void
984bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
985{
986 struct bfa_s *bfa = bfa_arg;
987
Krishna Gudipatia7141342011-06-24 20:23:19 -0700988 if (status == BFA_STATUS_FAA_ACQ_ADDR) {
989 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
990 bfa_iocfc_init_cb, bfa);
991 return;
992 }
993
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700994 if (status != BFA_STATUS_OK) {
995 bfa_isr_disable(bfa);
996 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
997 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
998 bfa_iocfc_init_cb, bfa);
Krishna Gudipati60138062011-06-24 20:25:15 -0700999 else if (bfa->iocfc.action == BFA_IOCFC_ACT_ENABLE)
1000 bfa_cb_queue(bfa, &bfa->iocfc.en_hcb_qe,
1001 bfa_iocfc_enable_cb, bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001002 return;
1003 }
1004
1005 bfa_iocfc_send_cfg(bfa);
1006}
1007
Jing Huang5fbe25c2010-10-18 17:17:23 -07001008/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001009 * IOC disable request is complete
1010 */
1011static void
1012bfa_iocfc_disable_cbfn(void *bfa_arg)
1013{
1014 struct bfa_s *bfa = bfa_arg;
1015
1016 bfa_isr_disable(bfa);
1017 bfa_iocfc_disable_submod(bfa);
1018
1019 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
1020 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
1021 bfa);
1022 else {
Jing Huangd4b671c2010-12-26 21:46:35 -08001023 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001024 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
1025 bfa);
1026 }
1027}
1028
Jing Huang5fbe25c2010-10-18 17:17:23 -07001029/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001030 * Notify sub-modules of hardware failure.
1031 */
1032static void
1033bfa_iocfc_hbfail_cbfn(void *bfa_arg)
1034{
1035 struct bfa_s *bfa = bfa_arg;
1036
Krishna Gudipati775c7742011-06-13 15:52:12 -07001037 bfa->queue_process = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001038
1039 bfa_isr_disable(bfa);
1040 bfa_iocfc_disable_submod(bfa);
1041
1042 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
1043 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
1044 bfa);
1045}
1046
Jing Huang5fbe25c2010-10-18 17:17:23 -07001047/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001048 * Actions on chip-reset completion.
1049 */
1050static void
1051bfa_iocfc_reset_cbfn(void *bfa_arg)
1052{
1053 struct bfa_s *bfa = bfa_arg;
1054
1055 bfa_iocfc_reset_queues(bfa);
1056 bfa_isr_enable(bfa);
1057}
1058
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001059
Jing Huang5fbe25c2010-10-18 17:17:23 -07001060/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001061 * Query IOC memory requirement information.
1062 */
1063void
Krishna Gudipati45070252011-06-24 20:24:29 -07001064bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1065 struct bfa_s *bfa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001066{
Krishna Gudipati45070252011-06-24 20:24:29 -07001067 int q, per_reqq_sz, per_rspq_sz;
1068 struct bfa_mem_dma_s *ioc_dma = BFA_MEM_IOC_DMA(bfa);
1069 struct bfa_mem_dma_s *iocfc_dma = BFA_MEM_IOCFC_DMA(bfa);
1070 struct bfa_mem_kva_s *iocfc_kva = BFA_MEM_IOCFC_KVA(bfa);
1071 u32 dm_len = 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001072
Krishna Gudipati45070252011-06-24 20:24:29 -07001073 /* dma memory setup for IOC */
1074 bfa_mem_dma_setup(meminfo, ioc_dma,
1075 BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ));
1076
1077 /* dma memory setup for REQ/RSP queues */
1078 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
1079 BFA_DMA_ALIGN_SZ);
1080 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
1081 BFA_DMA_ALIGN_SZ);
1082
1083 for (q = 0; q < cfg->fwcfg.num_cqs; q++) {
1084 bfa_mem_dma_setup(meminfo, BFA_MEM_REQQ_DMA(bfa, q),
1085 per_reqq_sz);
1086 bfa_mem_dma_setup(meminfo, BFA_MEM_RSPQ_DMA(bfa, q),
1087 per_rspq_sz);
1088 }
1089
1090 /* IOCFC dma memory - calculate Shadow CI/PI size */
1091 for (q = 0; q < cfg->fwcfg.num_cqs; q++)
1092 dm_len += (2 * BFA_CACHELINE_SZ);
1093
1094 /* IOCFC dma memory - calculate config info / rsp size */
1095 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
1096 dm_len += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
1097 BFA_CACHELINE_SZ);
1098
1099 /* dma memory setup for IOCFC */
1100 bfa_mem_dma_setup(meminfo, iocfc_dma, dm_len);
1101
1102 /* kva memory setup for IOCFC */
1103 bfa_mem_kva_setup(meminfo, iocfc_kva,
1104 ((bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001105}
1106
Jing Huang5fbe25c2010-10-18 17:17:23 -07001107/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001108 * Query IOC memory requirement information.
1109 */
1110void
1111bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
Krishna Gudipati45070252011-06-24 20:24:29 -07001112 struct bfa_pcidev_s *pcidev)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001113{
1114 int i;
1115 struct bfa_ioc_s *ioc = &bfa->ioc;
1116
1117 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
1118 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
1119 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
1120 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
1121
1122 ioc->trcmod = bfa->trcmod;
1123 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
1124
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001125 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001126 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
1127
1128 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
Krishna Gudipati45070252011-06-24 20:24:29 -07001129 bfa_iocfc_mem_claim(bfa, cfg);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001130 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001131
1132 INIT_LIST_HEAD(&bfa->comp_q);
1133 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
1134 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
1135}
1136
Jing Huang5fbe25c2010-10-18 17:17:23 -07001137/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001138 * Query IOC memory requirement information.
1139 */
1140void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001141bfa_iocfc_init(struct bfa_s *bfa)
1142{
1143 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
1144 bfa_ioc_enable(&bfa->ioc);
1145}
1146
Jing Huang5fbe25c2010-10-18 17:17:23 -07001147/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001148 * IOC start called from bfa_start(). Called to start IOC operations
1149 * at driver instantiation for this instance.
1150 */
1151void
1152bfa_iocfc_start(struct bfa_s *bfa)
1153{
1154 if (bfa->iocfc.cfgdone)
1155 bfa_iocfc_start_submod(bfa);
1156}
1157
Jing Huang5fbe25c2010-10-18 17:17:23 -07001158/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001159 * IOC stop called from bfa_stop(). Called only when driver is unloaded
1160 * for this instance.
1161 */
1162void
1163bfa_iocfc_stop(struct bfa_s *bfa)
1164{
1165 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
1166
Krishna Gudipati775c7742011-06-13 15:52:12 -07001167 bfa->queue_process = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001168 bfa_ioc_disable(&bfa->ioc);
1169}
1170
1171void
1172bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
1173{
1174 struct bfa_s *bfa = bfaarg;
1175 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1176 union bfi_iocfc_i2h_msg_u *msg;
1177
1178 msg = (union bfi_iocfc_i2h_msg_u *) m;
1179 bfa_trc(bfa, msg->mh.msg_id);
1180
1181 switch (msg->mh.msg_id) {
1182 case BFI_IOCFC_I2H_CFG_REPLY:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001183 bfa_iocfc_cfgrsp(bfa);
1184 break;
1185 case BFI_IOCFC_I2H_UPDATEQ_RSP:
1186 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
1187 break;
Krishna Gudipatia7141342011-06-24 20:23:19 -07001188 case BFI_IOCFC_I2H_FAA_ENABLE_RSP:
1189 bfa_faa_enable_reply(iocfc,
1190 (struct bfi_faa_en_dis_rsp_s *)msg);
1191 break;
1192 case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
1193 bfa_faa_disable_reply(iocfc,
1194 (struct bfi_faa_en_dis_rsp_s *)msg);
1195 break;
1196 case BFI_IOCFC_I2H_FAA_QUERY_RSP:
1197 bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
1198 break;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001199 default:
Jing Huangd4b671c2010-12-26 21:46:35 -08001200 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001201 }
1202}
1203
1204void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001205bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
1206{
1207 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1208
1209 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
1210
1211 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
Jing Huangba816ea2010-10-18 17:10:50 -07001212 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
1213 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001214
1215 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
Jing Huangba816ea2010-10-18 17:10:50 -07001216 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
1217 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001218
1219 attr->config = iocfc->cfg;
1220}
1221
1222bfa_status_t
1223bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
1224{
1225 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1226 struct bfi_iocfc_set_intr_req_s *m;
1227
1228 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
Jing Huangba816ea2010-10-18 17:10:50 -07001229 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
1230 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001231
1232 if (!bfa_iocfc_is_operational(bfa))
1233 return BFA_STATUS_OK;
1234
1235 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
1236 if (!m)
1237 return BFA_STATUS_DEVBUSY;
1238
1239 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001240 bfa_fn_lpu(bfa));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001241 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
1242 m->delay = iocfc->cfginfo->intr_attr.delay;
1243 m->latency = iocfc->cfginfo->intr_attr.latency;
1244
1245 bfa_trc(bfa, attr->delay);
1246 bfa_trc(bfa, attr->latency);
1247
Krishna Gudipati3fd45982011-06-24 20:24:08 -07001248 bfa_reqq_produce(bfa, BFA_REQQ_IOC, m->mh);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001249 return BFA_STATUS_OK;
1250}
1251
1252void
Krishna Gudipati45070252011-06-24 20:24:29 -07001253bfa_iocfc_set_snsbase(struct bfa_s *bfa, int seg_no, u64 snsbase_pa)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001254{
1255 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1256
1257 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
Krishna Gudipati45070252011-06-24 20:24:29 -07001258 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase[seg_no], snsbase_pa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001259}
Jing Huang5fbe25c2010-10-18 17:17:23 -07001260/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001261 * Enable IOC after it is disabled.
1262 */
1263void
1264bfa_iocfc_enable(struct bfa_s *bfa)
1265{
1266 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1267 "IOC Enable");
Krishna Gudipati60138062011-06-24 20:25:15 -07001268 bfa->iocfc.action = BFA_IOCFC_ACT_ENABLE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001269 bfa_ioc_enable(&bfa->ioc);
1270}
1271
1272void
1273bfa_iocfc_disable(struct bfa_s *bfa)
1274{
1275 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1276 "IOC Disable");
1277 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
1278
Krishna Gudipati775c7742011-06-13 15:52:12 -07001279 bfa->queue_process = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001280 bfa_ioc_disable(&bfa->ioc);
1281}
1282
1283
1284bfa_boolean_t
1285bfa_iocfc_is_operational(struct bfa_s *bfa)
1286{
1287 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
1288}
1289
Jing Huang5fbe25c2010-10-18 17:17:23 -07001290/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001291 * Return boot target port wwns -- read from boot information in flash.
1292 */
1293void
1294bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
1295{
1296 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1297 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1298 int i;
1299
1300 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
1301 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
1302 *nwwns = cfgrsp->pbc_cfg.nbluns;
1303 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
1304 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
1305
1306 return;
1307 }
1308
1309 *nwwns = cfgrsp->bootwwns.nwwns;
1310 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
1311}
1312
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001313int
1314bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1315{
1316 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1317 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1318
1319 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1320 return cfgrsp->pbc_cfg.nvports;
1321}
1322
Jing Huang7725ccf2009-09-23 17:46:15 -07001323
Jing Huang5fbe25c2010-10-18 17:17:23 -07001324/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001325 * Use this function query the memory requirement of the BFA library.
1326 * This function needs to be called before bfa_attach() to get the
1327 * memory required of the BFA layer for a given driver configuration.
1328 *
1329 * This call will fail, if the cap is out of range compared to pre-defined
1330 * values within the BFA library
1331 *
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001332 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1333 * its configuration in this structure.
Jing Huang7725ccf2009-09-23 17:46:15 -07001334 * The default values for struct bfa_iocfc_cfg_s can be
1335 * fetched using bfa_cfg_get_default() API.
1336 *
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001337 * If cap's boundary check fails, the library will use
Jing Huang7725ccf2009-09-23 17:46:15 -07001338 * the default bfa_cap_t values (and log a warning msg).
1339 *
1340 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001341 * indicates the memory type (see bfa_mem_type_t) and
Jing Huang7725ccf2009-09-23 17:46:15 -07001342 * amount of memory required.
1343 *
1344 * Driver should allocate the memory, populate the
1345 * starting address for each block and provide the same
1346 * structure as input parameter to bfa_attach() call.
1347 *
Krishna Gudipati45070252011-06-24 20:24:29 -07001348 * @param[in] bfa - pointer to the bfa structure, used while fetching the
1349 * dma, kva memory information of the bfa sub-modules.
1350 *
Jing Huang7725ccf2009-09-23 17:46:15 -07001351 * @return void
1352 *
1353 * Special Considerations: @note
1354 */
1355void
Krishna Gudipati45070252011-06-24 20:24:29 -07001356bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo,
1357 struct bfa_s *bfa)
Jing Huang7725ccf2009-09-23 17:46:15 -07001358{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001359 int i;
Krishna Gudipati45070252011-06-24 20:24:29 -07001360 struct bfa_mem_dma_s *port_dma = BFA_MEM_PORT_DMA(bfa);
1361 struct bfa_mem_dma_s *ablk_dma = BFA_MEM_ABLK_DMA(bfa);
Krishna Gudipati148d6102011-06-24 20:25:36 -07001362 struct bfa_mem_dma_s *cee_dma = BFA_MEM_CEE_DMA(bfa);
Jing Huang7725ccf2009-09-23 17:46:15 -07001363
Jing Huangd4b671c2010-12-26 21:46:35 -08001364 WARN_ON((cfg == NULL) || (meminfo == NULL));
Jing Huang7725ccf2009-09-23 17:46:15 -07001365
Jing Huang6a18b162010-10-18 17:08:54 -07001366 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
Jing Huang7725ccf2009-09-23 17:46:15 -07001367
Krishna Gudipati45070252011-06-24 20:24:29 -07001368 /* Initialize the DMA & KVA meminfo queues */
1369 INIT_LIST_HEAD(&meminfo->dma_info.qe);
1370 INIT_LIST_HEAD(&meminfo->kva_info.qe);
1371
1372 bfa_iocfc_meminfo(cfg, meminfo, bfa);
Jing Huang7725ccf2009-09-23 17:46:15 -07001373
1374 for (i = 0; hal_mods[i]; i++)
Krishna Gudipati45070252011-06-24 20:24:29 -07001375 hal_mods[i]->meminfo(cfg, meminfo, bfa);
Jing Huang7725ccf2009-09-23 17:46:15 -07001376
Krishna Gudipati45070252011-06-24 20:24:29 -07001377 /* dma info setup */
1378 bfa_mem_dma_setup(meminfo, port_dma, bfa_port_meminfo());
1379 bfa_mem_dma_setup(meminfo, ablk_dma, bfa_ablk_meminfo());
Krishna Gudipati148d6102011-06-24 20:25:36 -07001380 bfa_mem_dma_setup(meminfo, cee_dma, bfa_cee_meminfo());
Jing Huang7725ccf2009-09-23 17:46:15 -07001381}
1382
Jing Huang5fbe25c2010-10-18 17:17:23 -07001383/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001384 * Use this function to do attach the driver instance with the BFA
1385 * library. This function will not trigger any HW initialization
1386 * process (which will be done in bfa_init() call)
1387 *
1388 * This call will fail, if the cap is out of range compared to
1389 * pre-defined values within the BFA library
1390 *
1391 * @param[out] bfa Pointer to bfa_t.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001392 * @param[in] bfad Opaque handle back to the driver's IOC structure
Jing Huang7725ccf2009-09-23 17:46:15 -07001393 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001394 * that was used in bfa_cfg_get_meminfo().
1395 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1396 * use the bfa_cfg_get_meminfo() call to
1397 * find the memory blocks required, allocate the
1398 * required memory and provide the starting addresses.
1399 * @param[in] pcidev pointer to struct bfa_pcidev_s
Jing Huang7725ccf2009-09-23 17:46:15 -07001400 *
1401 * @return
1402 * void
1403 *
1404 * Special Considerations:
1405 *
1406 * @note
1407 *
1408 */
1409void
1410bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1411 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1412{
Krishna Gudipati45070252011-06-24 20:24:29 -07001413 int i;
1414 struct bfa_mem_dma_s *dma_info, *dma_elem;
1415 struct bfa_mem_kva_s *kva_info, *kva_elem;
1416 struct list_head *dm_qe, *km_qe;
Jing Huang7725ccf2009-09-23 17:46:15 -07001417
1418 bfa->fcs = BFA_FALSE;
1419
Jing Huangd4b671c2010-12-26 21:46:35 -08001420 WARN_ON((cfg == NULL) || (meminfo == NULL));
Jing Huang7725ccf2009-09-23 17:46:15 -07001421
Krishna Gudipati45070252011-06-24 20:24:29 -07001422 /* Initialize memory pointers for iterative allocation */
1423 dma_info = &meminfo->dma_info;
1424 dma_info->kva_curp = dma_info->kva;
1425 dma_info->dma_curp = dma_info->dma;
1426
1427 kva_info = &meminfo->kva_info;
1428 kva_info->kva_curp = kva_info->kva;
1429
1430 list_for_each(dm_qe, &dma_info->qe) {
1431 dma_elem = (struct bfa_mem_dma_s *) dm_qe;
1432 dma_elem->kva_curp = dma_elem->kva;
1433 dma_elem->dma_curp = dma_elem->dma;
Jing Huang7725ccf2009-09-23 17:46:15 -07001434 }
1435
Krishna Gudipati45070252011-06-24 20:24:29 -07001436 list_for_each(km_qe, &kva_info->qe) {
1437 kva_elem = (struct bfa_mem_kva_s *) km_qe;
1438 kva_elem->kva_curp = kva_elem->kva;
1439 }
1440
1441 bfa_iocfc_attach(bfa, bfad, cfg, pcidev);
Jing Huang7725ccf2009-09-23 17:46:15 -07001442
1443 for (i = 0; hal_mods[i]; i++)
Krishna Gudipati45070252011-06-24 20:24:29 -07001444 hal_mods[i]->attach(bfa, bfad, cfg, pcidev);
Jing Huang7725ccf2009-09-23 17:46:15 -07001445
Krishna Gudipati45070252011-06-24 20:24:29 -07001446 bfa_com_port_attach(bfa);
1447 bfa_com_ablk_attach(bfa);
Krishna Gudipati148d6102011-06-24 20:25:36 -07001448 bfa_com_cee_attach(bfa);
Jing Huang7725ccf2009-09-23 17:46:15 -07001449}
1450
Jing Huang5fbe25c2010-10-18 17:17:23 -07001451/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001452 * Use this function to delete a BFA IOC. IOC should be stopped (by
1453 * calling bfa_stop()) before this function call.
1454 *
1455 * @param[in] bfa - pointer to bfa_t.
1456 *
1457 * @return
1458 * void
1459 *
1460 * Special Considerations:
1461 *
1462 * @note
1463 */
1464void
1465bfa_detach(struct bfa_s *bfa)
1466{
1467 int i;
1468
1469 for (i = 0; hal_mods[i]; i++)
1470 hal_mods[i]->detach(bfa);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001471 bfa_ioc_detach(&bfa->ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001472}
1473
1474void
1475bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1476{
1477 INIT_LIST_HEAD(comp_q);
1478 list_splice_tail_init(&bfa->comp_q, comp_q);
1479}
1480
1481void
1482bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1483{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001484 struct list_head *qe;
1485 struct list_head *qen;
1486 struct bfa_cb_qe_s *hcb_qe;
Jing Huang7725ccf2009-09-23 17:46:15 -07001487
1488 list_for_each_safe(qe, qen, comp_q) {
1489 hcb_qe = (struct bfa_cb_qe_s *) qe;
1490 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1491 }
1492}
1493
1494void
1495bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1496{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001497 struct list_head *qe;
1498 struct bfa_cb_qe_s *hcb_qe;
Jing Huang7725ccf2009-09-23 17:46:15 -07001499
1500 while (!list_empty(comp_q)) {
1501 bfa_q_deq(comp_q, &qe);
1502 hcb_qe = (struct bfa_cb_qe_s *) qe;
1503 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1504 }
1505}
1506
Jing Huang7725ccf2009-09-23 17:46:15 -07001507
Jing Huang5fbe25c2010-10-18 17:17:23 -07001508/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001509 * Return the list of PCI vendor/device id lists supported by this
1510 * BFA instance.
1511 */
1512void
1513bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1514{
1515 static struct bfa_pciid_s __pciids[] = {
1516 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1517 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1518 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
Jing Huang293f82d2010-07-08 19:45:20 -07001519 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
Jing Huang7725ccf2009-09-23 17:46:15 -07001520 };
1521
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001522 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
Jing Huang7725ccf2009-09-23 17:46:15 -07001523 *pciids = __pciids;
1524}
1525
Jing Huang5fbe25c2010-10-18 17:17:23 -07001526/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001527 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1528 * into BFA layer). The OS driver can then turn back and overwrite entries that
1529 * have been configured by the user.
1530 *
1531 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1532 *
1533 * @return
1534 * void
1535 *
1536 * Special Considerations:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001537 * note
Jing Huang7725ccf2009-09-23 17:46:15 -07001538 */
1539void
1540bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1541{
1542 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1543 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1544 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1545 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1546 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1547 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1548 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1549 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001550 cfg->fwcfg.num_fwtio_reqs = 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07001551
1552 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1553 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1554 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1555 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1556 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1557 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1558 cfg->drvcfg.ioc_recover = BFA_FALSE;
1559 cfg->drvcfg.delay_comp = BFA_FALSE;
1560
1561}
1562
1563void
1564bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1565{
1566 bfa_cfg_get_default(cfg);
1567 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
1568 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
1569 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
1570 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
1571 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001572 cfg->fwcfg.num_fwtio_reqs = 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07001573
1574 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
1575 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
1576 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001577 cfg->drvcfg.min_cfg = BFA_TRUE;
Jing Huang7725ccf2009-09-23 17:46:15 -07001578}