blob: 3f7f3742e72c2228cbc44bbedd6d5e3335dcfdba [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Jing Huang7725ccf2009-09-23 17:46:15 -07003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070019#include "bfa_modules.h"
20#include "bfi_ctreg.h"
Jing Huang7725ccf2009-09-23 17:46:15 -070021
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070022BFA_TRC_FILE(HAL, CORE);
Jing Huang7725ccf2009-09-23 17:46:15 -070023
Jing Huang5fbe25c2010-10-18 17:17:23 -070024/*
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080025 * BFA module list terminated by NULL
26 */
27static struct bfa_module_s *hal_mods[] = {
28 &hal_mod_sgpg,
29 &hal_mod_fcport,
30 &hal_mod_fcxp,
31 &hal_mod_lps,
32 &hal_mod_uf,
33 &hal_mod_rport,
34 &hal_mod_fcpim,
35 NULL
36};
37
38/*
39 * Message handlers for various modules.
40 */
41static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
42 bfa_isr_unhandled, /* NONE */
43 bfa_isr_unhandled, /* BFI_MC_IOC */
44 bfa_isr_unhandled, /* BFI_MC_DIAG */
45 bfa_isr_unhandled, /* BFI_MC_FLASH */
46 bfa_isr_unhandled, /* BFI_MC_CEE */
47 bfa_fcport_isr, /* BFI_MC_FCPORT */
48 bfa_isr_unhandled, /* BFI_MC_IOCFC */
49 bfa_isr_unhandled, /* BFI_MC_LL */
50 bfa_uf_isr, /* BFI_MC_UF */
51 bfa_fcxp_isr, /* BFI_MC_FCXP */
52 bfa_lps_isr, /* BFI_MC_LPS */
53 bfa_rport_isr, /* BFI_MC_RPORT */
54 bfa_itnim_isr, /* BFI_MC_ITNIM */
55 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
58 bfa_ioim_isr, /* BFI_MC_IOIM */
59 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
60 bfa_tskim_isr, /* BFI_MC_TSKIM */
61 bfa_isr_unhandled, /* BFI_MC_SBOOT */
62 bfa_isr_unhandled, /* BFI_MC_IPFC */
63 bfa_isr_unhandled, /* BFI_MC_PORT */
64 bfa_isr_unhandled, /* --------- */
65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */
70 bfa_isr_unhandled, /* --------- */
71 bfa_isr_unhandled, /* --------- */
72 bfa_isr_unhandled, /* --------- */
73 bfa_isr_unhandled, /* --------- */
74};
75/*
76 * Message handlers for mailbox command classes
77 */
78static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
79 NULL,
80 NULL, /* BFI_MC_IOC */
81 NULL, /* BFI_MC_DIAG */
82 NULL, /* BFI_MC_FLASH */
83 NULL, /* BFI_MC_CEE */
84 NULL, /* BFI_MC_PORT */
85 bfa_iocfc_isr, /* BFI_MC_IOCFC */
86 NULL,
87};
88
89
90
91static void
92bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
93{
94 struct bfa_port_s *port = &bfa->modules.port;
95 u32 dm_len;
96 u8 *dm_kva;
97 u64 dm_pa;
98
99 dm_len = bfa_port_meminfo();
100 dm_kva = bfa_meminfo_dma_virt(mi);
101 dm_pa = bfa_meminfo_dma_phys(mi);
102
103 memset(port, 0, sizeof(struct bfa_port_s));
104 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
105 bfa_port_mem_claim(port, dm_kva, dm_pa);
106
107 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
108 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
109}
110
111/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700112 * BFA IOC FC related definitions
113 */
114
Jing Huang5fbe25c2010-10-18 17:17:23 -0700115/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700116 * IOC local definitions
117 */
118#define BFA_IOCFC_TOV 5000 /* msecs */
119
120enum {
121 BFA_IOCFC_ACT_NONE = 0,
122 BFA_IOCFC_ACT_INIT = 1,
123 BFA_IOCFC_ACT_STOP = 2,
124 BFA_IOCFC_ACT_DISABLE = 3,
125};
126
127#define DEF_CFG_NUM_FABRICS 1
128#define DEF_CFG_NUM_LPORTS 256
129#define DEF_CFG_NUM_CQS 4
130#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
131#define DEF_CFG_NUM_TSKIM_REQS 128
132#define DEF_CFG_NUM_FCXP_REQS 64
133#define DEF_CFG_NUM_UF_BUFS 64
134#define DEF_CFG_NUM_RPORTS 1024
135#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
136#define DEF_CFG_NUM_TINS 256
137
138#define DEF_CFG_NUM_SGPGS 2048
139#define DEF_CFG_NUM_REQQ_ELEMS 256
140#define DEF_CFG_NUM_RSPQ_ELEMS 64
141#define DEF_CFG_NUM_SBOOT_TGTS 16
142#define DEF_CFG_NUM_SBOOT_LUNS 16
143
Jing Huang5fbe25c2010-10-18 17:17:23 -0700144/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700145 * forward declaration for IOC FC functions
146 */
147static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
148static void bfa_iocfc_disable_cbfn(void *bfa_arg);
149static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
150static void bfa_iocfc_reset_cbfn(void *bfa_arg);
151static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
152
Jing Huang5fbe25c2010-10-18 17:17:23 -0700153/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700154 * BFA Interrupt handling functions
155 */
156static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700157bfa_reqq_resume(struct bfa_s *bfa, int qid)
158{
159 struct list_head *waitq, *qe, *qen;
160 struct bfa_reqq_wait_s *wqe;
161
162 waitq = bfa_reqq(bfa, qid);
163 list_for_each_safe(qe, qen, waitq) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700164 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700165 * Callback only as long as there is room in request queue
166 */
167 if (bfa_reqq_full(bfa, qid))
168 break;
169
170 list_del(qe);
171 wqe = (struct bfa_reqq_wait_s *) qe;
172 wqe->qresume(wqe->cbarg);
173 }
174}
175
176void
177bfa_msix_all(struct bfa_s *bfa, int vec)
178{
179 bfa_intx(bfa);
180}
181
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700182bfa_boolean_t
183bfa_intx(struct bfa_s *bfa)
184{
185 u32 intr, qintr;
186 int queue;
187
Jing Huang53440262010-10-18 17:12:29 -0700188 intr = readl(bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700189 if (!intr)
190 return BFA_FALSE;
191
Jing Huang5fbe25c2010-10-18 17:17:23 -0700192 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700193 * RME completion queue interrupt
194 */
195 qintr = intr & __HFN_INT_RME_MASK;
Jing Huang53440262010-10-18 17:12:29 -0700196 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700197
198 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
199 if (intr & (__HFN_INT_RME_Q0 << queue))
200 bfa_msix_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
201 }
202 intr &= ~qintr;
203 if (!intr)
204 return BFA_TRUE;
205
Jing Huang5fbe25c2010-10-18 17:17:23 -0700206 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700207 * CPE completion queue interrupt
208 */
209 qintr = intr & __HFN_INT_CPE_MASK;
Jing Huang53440262010-10-18 17:12:29 -0700210 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700211
212 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
213 if (intr & (__HFN_INT_CPE_Q0 << queue))
214 bfa_msix_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
215 }
216 intr &= ~qintr;
217 if (!intr)
218 return BFA_TRUE;
219
220 bfa_msix_lpu_err(bfa, intr);
221
222 return BFA_TRUE;
223}
224
225void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700226bfa_isr_enable(struct bfa_s *bfa)
227{
228 u32 intr_unmask;
229 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
230
231 bfa_trc(bfa, pci_func);
232
233 bfa_msix_install(bfa);
234 intr_unmask = (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
235 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS |
236 __HFN_INT_LL_HALT);
237
238 if (pci_func == 0)
239 intr_unmask |= (__HFN_INT_CPE_Q0 | __HFN_INT_CPE_Q1 |
240 __HFN_INT_CPE_Q2 | __HFN_INT_CPE_Q3 |
241 __HFN_INT_RME_Q0 | __HFN_INT_RME_Q1 |
242 __HFN_INT_RME_Q2 | __HFN_INT_RME_Q3 |
243 __HFN_INT_MBOX_LPU0);
244 else
245 intr_unmask |= (__HFN_INT_CPE_Q4 | __HFN_INT_CPE_Q5 |
246 __HFN_INT_CPE_Q6 | __HFN_INT_CPE_Q7 |
247 __HFN_INT_RME_Q4 | __HFN_INT_RME_Q5 |
248 __HFN_INT_RME_Q6 | __HFN_INT_RME_Q7 |
249 __HFN_INT_MBOX_LPU1);
250
Jing Huang53440262010-10-18 17:12:29 -0700251 writel(intr_unmask, bfa->iocfc.bfa_regs.intr_status);
252 writel(~intr_unmask, bfa->iocfc.bfa_regs.intr_mask);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700253 bfa->iocfc.intr_mask = ~intr_unmask;
254 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
255}
256
257void
258bfa_isr_disable(struct bfa_s *bfa)
259{
260 bfa_isr_mode_set(bfa, BFA_FALSE);
Jing Huang53440262010-10-18 17:12:29 -0700261 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700262 bfa_msix_uninstall(bfa);
263}
264
265void
266bfa_msix_reqq(struct bfa_s *bfa, int qid)
267{
268 struct list_head *waitq;
269
270 qid &= (BFI_IOC_MAX_CQS - 1);
271
272 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
273
Jing Huang5fbe25c2010-10-18 17:17:23 -0700274 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700275 * Resume any pending requests in the corresponding reqq.
276 */
277 waitq = bfa_reqq(bfa, qid);
278 if (!list_empty(waitq))
279 bfa_reqq_resume(bfa, qid);
280}
281
282void
283bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
284{
285 bfa_trc(bfa, m->mhdr.msg_class);
286 bfa_trc(bfa, m->mhdr.msg_id);
287 bfa_trc(bfa, m->mhdr.mtag.i2htok);
288 bfa_assert(0);
289 bfa_trc_stop(bfa->trcmod);
290}
291
292void
293bfa_msix_rspq(struct bfa_s *bfa, int qid)
294{
295 struct bfi_msg_s *m;
296 u32 pi, ci;
297 struct list_head *waitq;
298
299 bfa_trc_fp(bfa, qid);
300
301 qid &= (BFI_IOC_MAX_CQS - 1);
302
303 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
304
305 ci = bfa_rspq_ci(bfa, qid);
306 pi = bfa_rspq_pi(bfa, qid);
307
308 bfa_trc_fp(bfa, ci);
309 bfa_trc_fp(bfa, pi);
310
311 if (bfa->rme_process) {
312 while (ci != pi) {
313 m = bfa_rspq_elem(bfa, qid, ci);
314 bfa_assert_fp(m->mhdr.msg_class < BFI_MC_MAX);
315
316 bfa_isrs[m->mhdr.msg_class] (bfa, m);
317
318 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
319 }
320 }
321
Jing Huang5fbe25c2010-10-18 17:17:23 -0700322 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700323 * update CI
324 */
325 bfa_rspq_ci(bfa, qid) = pi;
Jing Huang53440262010-10-18 17:12:29 -0700326 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700327 mmiowb();
328
Jing Huang5fbe25c2010-10-18 17:17:23 -0700329 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700330 * Resume any pending requests in the corresponding reqq.
331 */
332 waitq = bfa_reqq(bfa, qid);
333 if (!list_empty(waitq))
334 bfa_reqq_resume(bfa, qid);
335}
336
337void
338bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
339{
340 u32 intr, curr_value;
341
Jing Huang53440262010-10-18 17:12:29 -0700342 intr = readl(bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700343
344 if (intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1))
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800345 bfa_ioc_mbox_isr(&bfa->ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700346
347 intr &= (__HFN_INT_ERR_EMC | __HFN_INT_ERR_LPU0 |
348 __HFN_INT_ERR_LPU1 | __HFN_INT_ERR_PSS | __HFN_INT_LL_HALT);
349
350 if (intr) {
351 if (intr & __HFN_INT_LL_HALT) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700352 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700353 * If LL_HALT bit is set then FW Init Halt LL Port
354 * Register needs to be cleared as well so Interrupt
355 * Status Register will be cleared.
356 */
Jing Huang53440262010-10-18 17:12:29 -0700357 curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700358 curr_value &= ~__FW_INIT_HALT_P;
Jing Huang53440262010-10-18 17:12:29 -0700359 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700360 }
361
362 if (intr & __HFN_INT_ERR_PSS) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700363 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700364 * ERR_PSS bit needs to be cleared as well in case
365 * interrups are shared so driver's interrupt handler is
366 * still called eventhough it is already masked out.
367 */
Jing Huang53440262010-10-18 17:12:29 -0700368 curr_value = readl(
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700369 bfa->ioc.ioc_regs.pss_err_status_reg);
370 curr_value &= __PSS_ERR_STATUS_SET;
Jing Huang53440262010-10-18 17:12:29 -0700371 writel(curr_value,
372 bfa->ioc.ioc_regs.pss_err_status_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700373 }
374
Jing Huang53440262010-10-18 17:12:29 -0700375 writel(intr, bfa->iocfc.bfa_regs.intr_status);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800376 bfa_ioc_error_isr(&bfa->ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700377 }
378}
379
Jing Huang5fbe25c2010-10-18 17:17:23 -0700380/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700381 * BFA IOC FC related functions
382 */
383
Jing Huang5fbe25c2010-10-18 17:17:23 -0700384/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -0800385 * BFA IOC private functions
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700386 */
387
388static void
389bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
390{
391 int i, per_reqq_sz, per_rspq_sz;
392
393 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
394 BFA_DMA_ALIGN_SZ);
395 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
396 BFA_DMA_ALIGN_SZ);
397
398 /*
399 * Calculate CQ size
400 */
401 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
402 *dm_len = *dm_len + per_reqq_sz;
403 *dm_len = *dm_len + per_rspq_sz;
404 }
405
406 /*
407 * Calculate Shadow CI/PI size
408 */
409 for (i = 0; i < cfg->fwcfg.num_cqs; i++)
410 *dm_len += (2 * BFA_CACHELINE_SZ);
411}
412
413static void
414bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
415{
416 *dm_len +=
417 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
418 *dm_len +=
419 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
420 BFA_CACHELINE_SZ);
421}
422
Jing Huang5fbe25c2010-10-18 17:17:23 -0700423/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700424 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
425 */
426static void
427bfa_iocfc_send_cfg(void *bfa_arg)
428{
429 struct bfa_s *bfa = bfa_arg;
430 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
431 struct bfi_iocfc_cfg_req_s cfg_req;
432 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
433 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
434 int i;
435
436 bfa_assert(cfg->fwcfg.num_cqs <= BFI_IOC_MAX_CQS);
437 bfa_trc(bfa, cfg->fwcfg.num_cqs);
438
439 bfa_iocfc_reset_queues(bfa);
440
Jing Huang5fbe25c2010-10-18 17:17:23 -0700441 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700442 * initialize IOC configuration info
443 */
444 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
445 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
446
447 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
Jing Huang5fbe25c2010-10-18 17:17:23 -0700448 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700449 * dma map REQ and RSP circular queues and shadow pointers
450 */
451 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
452 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
453 iocfc->req_cq_ba[i].pa);
454 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
455 iocfc->req_cq_shadow_ci[i].pa);
456 cfg_info->req_cq_elems[i] =
Jing Huangba816ea2010-10-18 17:10:50 -0700457 cpu_to_be16(cfg->drvcfg.num_reqq_elems);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700458
459 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
460 iocfc->rsp_cq_ba[i].pa);
461 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
462 iocfc->rsp_cq_shadow_pi[i].pa);
463 cfg_info->rsp_cq_elems[i] =
Jing Huangba816ea2010-10-18 17:10:50 -0700464 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700465 }
466
Jing Huang5fbe25c2010-10-18 17:17:23 -0700467 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700468 * Enable interrupt coalescing if it is driver init path
469 * and not ioc disable/enable path.
470 */
471 if (!iocfc->cfgdone)
472 cfg_info->intr_attr.coalesce = BFA_TRUE;
473
474 iocfc->cfgdone = BFA_FALSE;
475
Jing Huang5fbe25c2010-10-18 17:17:23 -0700476 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700477 * dma map IOC configuration itself
478 */
479 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
480 bfa_lpuid(bfa));
481 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
482
483 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
484 sizeof(struct bfi_iocfc_cfg_req_s));
485}
486
487static void
488bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
489 struct bfa_pcidev_s *pcidev)
490{
491 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
492
493 bfa->bfad = bfad;
494 iocfc->bfa = bfa;
495 iocfc->action = BFA_IOCFC_ACT_NONE;
496
Jing Huang6a18b162010-10-18 17:08:54 -0700497 iocfc->cfg = *cfg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700498
Jing Huang5fbe25c2010-10-18 17:17:23 -0700499 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700500 * Initialize chip specific handlers.
501 */
502 if (bfa_asic_id_ct(bfa_ioc_devid(&bfa->ioc))) {
503 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
504 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
505 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
506 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
507 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
508 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
509 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
510 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
511 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
512 } else {
513 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
514 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
515 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
516 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
517 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
518 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
519 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
520 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
521 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
522 }
523
524 iocfc->hwif.hw_reginit(bfa);
525 bfa->msix.nvecs = 0;
526}
527
528static void
529bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
530 struct bfa_meminfo_s *meminfo)
531{
532 u8 *dm_kva;
533 u64 dm_pa;
534 int i, per_reqq_sz, per_rspq_sz;
535 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
536 int dbgsz;
537
538 dm_kva = bfa_meminfo_dma_virt(meminfo);
539 dm_pa = bfa_meminfo_dma_phys(meminfo);
540
541 /*
542 * First allocate dma memory for IOC.
543 */
544 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800545 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
546 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700547
548 /*
549 * Claim DMA-able memory for the request/response queues and for shadow
550 * ci/pi registers
551 */
552 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
553 BFA_DMA_ALIGN_SZ);
554 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
555 BFA_DMA_ALIGN_SZ);
556
557 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
558 iocfc->req_cq_ba[i].kva = dm_kva;
559 iocfc->req_cq_ba[i].pa = dm_pa;
Jing Huang6a18b162010-10-18 17:08:54 -0700560 memset(dm_kva, 0, per_reqq_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700561 dm_kva += per_reqq_sz;
562 dm_pa += per_reqq_sz;
563
564 iocfc->rsp_cq_ba[i].kva = dm_kva;
565 iocfc->rsp_cq_ba[i].pa = dm_pa;
Jing Huang6a18b162010-10-18 17:08:54 -0700566 memset(dm_kva, 0, per_rspq_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700567 dm_kva += per_rspq_sz;
568 dm_pa += per_rspq_sz;
569 }
570
571 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
572 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
573 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
574 dm_kva += BFA_CACHELINE_SZ;
575 dm_pa += BFA_CACHELINE_SZ;
576
577 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
578 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
579 dm_kva += BFA_CACHELINE_SZ;
580 dm_pa += BFA_CACHELINE_SZ;
581 }
582
583 /*
584 * Claim DMA-able memory for the config info page
585 */
586 bfa->iocfc.cfg_info.kva = dm_kva;
587 bfa->iocfc.cfg_info.pa = dm_pa;
588 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
589 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
590 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
591
592 /*
593 * Claim DMA-able memory for the config response
594 */
595 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
596 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
597 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
598
599 dm_kva +=
600 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
601 BFA_CACHELINE_SZ);
602 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
603 BFA_CACHELINE_SZ);
604
605
606 bfa_meminfo_dma_virt(meminfo) = dm_kva;
607 bfa_meminfo_dma_phys(meminfo) = dm_pa;
608
Maggie Zhangf7f738122010-12-09 19:08:43 -0800609 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700610 if (dbgsz > 0) {
611 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
612 bfa_meminfo_kva(meminfo) += dbgsz;
613 }
614}
615
Jing Huang5fbe25c2010-10-18 17:17:23 -0700616/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700617 * Start BFA submodules.
618 */
619static void
620bfa_iocfc_start_submod(struct bfa_s *bfa)
621{
622 int i;
623
624 bfa->rme_process = BFA_TRUE;
625
626 for (i = 0; hal_mods[i]; i++)
627 hal_mods[i]->start(bfa);
628}
629
Jing Huang5fbe25c2010-10-18 17:17:23 -0700630/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700631 * Disable BFA submodules.
632 */
633static void
634bfa_iocfc_disable_submod(struct bfa_s *bfa)
635{
636 int i;
637
638 for (i = 0; hal_mods[i]; i++)
639 hal_mods[i]->iocdisable(bfa);
640}
641
642static void
643bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
644{
645 struct bfa_s *bfa = bfa_arg;
646
647 if (complete) {
648 if (bfa->iocfc.cfgdone)
649 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
650 else
651 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
652 } else {
653 if (bfa->iocfc.cfgdone)
654 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
655 }
656}
657
658static void
659bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
660{
661 struct bfa_s *bfa = bfa_arg;
662 struct bfad_s *bfad = bfa->bfad;
663
664 if (compl)
665 complete(&bfad->comp);
666 else
667 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
668}
669
670static void
671bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
672{
673 struct bfa_s *bfa = bfa_arg;
674 struct bfad_s *bfad = bfa->bfad;
675
676 if (compl)
677 complete(&bfad->disable_comp);
678}
679
Jing Huang5fbe25c2010-10-18 17:17:23 -0700680/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700681 * Update BFA configuration from firmware configuration.
682 */
683static void
684bfa_iocfc_cfgrsp(struct bfa_s *bfa)
685{
686 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
687 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
688 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
689
690 fwcfg->num_cqs = fwcfg->num_cqs;
Jing Huangba816ea2010-10-18 17:10:50 -0700691 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
692 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
693 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
694 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
695 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700696
697 iocfc->cfgdone = BFA_TRUE;
698
Jing Huang5fbe25c2010-10-18 17:17:23 -0700699 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700700 * Configuration is complete - initialize/start submodules
701 */
702 bfa_fcport_init(bfa);
703
704 if (iocfc->action == BFA_IOCFC_ACT_INIT)
705 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
706 else
707 bfa_iocfc_start_submod(bfa);
708}
709void
710bfa_iocfc_reset_queues(struct bfa_s *bfa)
711{
712 int q;
713
714 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
715 bfa_reqq_ci(bfa, q) = 0;
716 bfa_reqq_pi(bfa, q) = 0;
717 bfa_rspq_ci(bfa, q) = 0;
718 bfa_rspq_pi(bfa, q) = 0;
719 }
720}
721
Jing Huang5fbe25c2010-10-18 17:17:23 -0700722/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700723 * IOC enable request is complete
724 */
725static void
726bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
727{
728 struct bfa_s *bfa = bfa_arg;
729
730 if (status != BFA_STATUS_OK) {
731 bfa_isr_disable(bfa);
732 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
733 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
734 bfa_iocfc_init_cb, bfa);
735 return;
736 }
737
738 bfa_iocfc_send_cfg(bfa);
739}
740
Jing Huang5fbe25c2010-10-18 17:17:23 -0700741/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700742 * IOC disable request is complete
743 */
744static void
745bfa_iocfc_disable_cbfn(void *bfa_arg)
746{
747 struct bfa_s *bfa = bfa_arg;
748
749 bfa_isr_disable(bfa);
750 bfa_iocfc_disable_submod(bfa);
751
752 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
753 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
754 bfa);
755 else {
756 bfa_assert(bfa->iocfc.action == BFA_IOCFC_ACT_DISABLE);
757 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
758 bfa);
759 }
760}
761
Jing Huang5fbe25c2010-10-18 17:17:23 -0700762/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700763 * Notify sub-modules of hardware failure.
764 */
765static void
766bfa_iocfc_hbfail_cbfn(void *bfa_arg)
767{
768 struct bfa_s *bfa = bfa_arg;
769
770 bfa->rme_process = BFA_FALSE;
771
772 bfa_isr_disable(bfa);
773 bfa_iocfc_disable_submod(bfa);
774
775 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
776 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
777 bfa);
778}
779
Jing Huang5fbe25c2010-10-18 17:17:23 -0700780/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700781 * Actions on chip-reset completion.
782 */
783static void
784bfa_iocfc_reset_cbfn(void *bfa_arg)
785{
786 struct bfa_s *bfa = bfa_arg;
787
788 bfa_iocfc_reset_queues(bfa);
789 bfa_isr_enable(bfa);
790}
791
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700792
Jing Huang5fbe25c2010-10-18 17:17:23 -0700793/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700794 * Query IOC memory requirement information.
795 */
796void
797bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
798 u32 *dm_len)
799{
800 /* dma memory for IOC */
Maggie Zhangf7f738122010-12-09 19:08:43 -0800801 *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700802
803 bfa_iocfc_fw_cfg_sz(cfg, dm_len);
804 bfa_iocfc_cqs_sz(cfg, dm_len);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800805 *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700806}
807
Jing Huang5fbe25c2010-10-18 17:17:23 -0700808/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700809 * Query IOC memory requirement information.
810 */
811void
812bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
813 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
814{
815 int i;
816 struct bfa_ioc_s *ioc = &bfa->ioc;
817
818 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
819 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
820 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
821 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
822
823 ioc->trcmod = bfa->trcmod;
824 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
825
Jing Huang5fbe25c2010-10-18 17:17:23 -0700826 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700827 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
828 */
829 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
830 bfa_ioc_set_fcmode(&bfa->ioc);
831
832 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_MC_IOCFC);
833 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
834
835 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
836 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800837 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700838
839 INIT_LIST_HEAD(&bfa->comp_q);
840 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
841 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
842}
843
Jing Huang5fbe25c2010-10-18 17:17:23 -0700844/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700845 * Query IOC memory requirement information.
846 */
847void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700848bfa_iocfc_init(struct bfa_s *bfa)
849{
850 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
851 bfa_ioc_enable(&bfa->ioc);
852}
853
Jing Huang5fbe25c2010-10-18 17:17:23 -0700854/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700855 * IOC start called from bfa_start(). Called to start IOC operations
856 * at driver instantiation for this instance.
857 */
858void
859bfa_iocfc_start(struct bfa_s *bfa)
860{
861 if (bfa->iocfc.cfgdone)
862 bfa_iocfc_start_submod(bfa);
863}
864
Jing Huang5fbe25c2010-10-18 17:17:23 -0700865/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700866 * IOC stop called from bfa_stop(). Called only when driver is unloaded
867 * for this instance.
868 */
869void
870bfa_iocfc_stop(struct bfa_s *bfa)
871{
872 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
873
874 bfa->rme_process = BFA_FALSE;
875 bfa_ioc_disable(&bfa->ioc);
876}
877
878void
879bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
880{
881 struct bfa_s *bfa = bfaarg;
882 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
883 union bfi_iocfc_i2h_msg_u *msg;
884
885 msg = (union bfi_iocfc_i2h_msg_u *) m;
886 bfa_trc(bfa, msg->mh.msg_id);
887
888 switch (msg->mh.msg_id) {
889 case BFI_IOCFC_I2H_CFG_REPLY:
890 iocfc->cfg_reply = &msg->cfg_reply;
891 bfa_iocfc_cfgrsp(bfa);
892 break;
893 case BFI_IOCFC_I2H_UPDATEQ_RSP:
894 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
895 break;
896 default:
897 bfa_assert(0);
898 }
899}
900
901void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700902bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
903{
904 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
905
906 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
907
908 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
Jing Huangba816ea2010-10-18 17:10:50 -0700909 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
910 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700911
912 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
Jing Huangba816ea2010-10-18 17:10:50 -0700913 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
914 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700915
916 attr->config = iocfc->cfg;
917}
918
919bfa_status_t
920bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
921{
922 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
923 struct bfi_iocfc_set_intr_req_s *m;
924
925 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
Jing Huangba816ea2010-10-18 17:10:50 -0700926 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
927 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700928
929 if (!bfa_iocfc_is_operational(bfa))
930 return BFA_STATUS_OK;
931
932 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
933 if (!m)
934 return BFA_STATUS_DEVBUSY;
935
936 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
937 bfa_lpuid(bfa));
938 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
939 m->delay = iocfc->cfginfo->intr_attr.delay;
940 m->latency = iocfc->cfginfo->intr_attr.latency;
941
942 bfa_trc(bfa, attr->delay);
943 bfa_trc(bfa, attr->latency);
944
945 bfa_reqq_produce(bfa, BFA_REQQ_IOC);
946 return BFA_STATUS_OK;
947}
948
949void
950bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
951{
952 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
953
954 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
955 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
956}
Jing Huang5fbe25c2010-10-18 17:17:23 -0700957/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700958 * Enable IOC after it is disabled.
959 */
960void
961bfa_iocfc_enable(struct bfa_s *bfa)
962{
963 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
964 "IOC Enable");
965 bfa_ioc_enable(&bfa->ioc);
966}
967
968void
969bfa_iocfc_disable(struct bfa_s *bfa)
970{
971 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
972 "IOC Disable");
973 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
974
975 bfa->rme_process = BFA_FALSE;
976 bfa_ioc_disable(&bfa->ioc);
977}
978
979
980bfa_boolean_t
981bfa_iocfc_is_operational(struct bfa_s *bfa)
982{
983 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
984}
985
Jing Huang5fbe25c2010-10-18 17:17:23 -0700986/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700987 * Return boot target port wwns -- read from boot information in flash.
988 */
989void
990bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
991{
992 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
993 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
994 int i;
995
996 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
997 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
998 *nwwns = cfgrsp->pbc_cfg.nbluns;
999 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
1000 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
1001
1002 return;
1003 }
1004
1005 *nwwns = cfgrsp->bootwwns.nwwns;
1006 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
1007}
1008
1009void
1010bfa_iocfc_get_pbc_boot_cfg(struct bfa_s *bfa, struct bfa_boot_pbc_s *pbcfg)
1011{
1012 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1013 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1014
1015 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
1016 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
1017 pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
1018 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
1019}
1020
1021int
1022bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1023{
1024 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1025 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1026
1027 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1028 return cfgrsp->pbc_cfg.nvports;
1029}
1030
Jing Huang7725ccf2009-09-23 17:46:15 -07001031
Jing Huang5fbe25c2010-10-18 17:17:23 -07001032/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001033 * Use this function query the memory requirement of the BFA library.
1034 * This function needs to be called before bfa_attach() to get the
1035 * memory required of the BFA layer for a given driver configuration.
1036 *
1037 * This call will fail, if the cap is out of range compared to pre-defined
1038 * values within the BFA library
1039 *
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001040 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1041 * its configuration in this structure.
Jing Huang7725ccf2009-09-23 17:46:15 -07001042 * The default values for struct bfa_iocfc_cfg_s can be
1043 * fetched using bfa_cfg_get_default() API.
1044 *
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001045 * If cap's boundary check fails, the library will use
Jing Huang7725ccf2009-09-23 17:46:15 -07001046 * the default bfa_cap_t values (and log a warning msg).
1047 *
1048 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001049 * indicates the memory type (see bfa_mem_type_t) and
Jing Huang7725ccf2009-09-23 17:46:15 -07001050 * amount of memory required.
1051 *
1052 * Driver should allocate the memory, populate the
1053 * starting address for each block and provide the same
1054 * structure as input parameter to bfa_attach() call.
1055 *
1056 * @return void
1057 *
1058 * Special Considerations: @note
1059 */
1060void
1061bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
1062{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001063 int i;
1064 u32 km_len = 0, dm_len = 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07001065
1066 bfa_assert((cfg != NULL) && (meminfo != NULL));
1067
Jing Huang6a18b162010-10-18 17:08:54 -07001068 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
Jing Huang7725ccf2009-09-23 17:46:15 -07001069 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
1070 BFA_MEM_TYPE_KVA;
1071 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
1072 BFA_MEM_TYPE_DMA;
1073
1074 bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
1075
1076 for (i = 0; hal_mods[i]; i++)
1077 hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
1078
Krishna Gudipati7873ca42010-05-21 14:39:45 -07001079 dm_len += bfa_port_meminfo();
Jing Huang7725ccf2009-09-23 17:46:15 -07001080
1081 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
1082 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
1083}
1084
Jing Huang5fbe25c2010-10-18 17:17:23 -07001085/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001086 * Use this function to do attach the driver instance with the BFA
1087 * library. This function will not trigger any HW initialization
1088 * process (which will be done in bfa_init() call)
1089 *
1090 * This call will fail, if the cap is out of range compared to
1091 * pre-defined values within the BFA library
1092 *
1093 * @param[out] bfa Pointer to bfa_t.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001094 * @param[in] bfad Opaque handle back to the driver's IOC structure
Jing Huang7725ccf2009-09-23 17:46:15 -07001095 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001096 * that was used in bfa_cfg_get_meminfo().
1097 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1098 * use the bfa_cfg_get_meminfo() call to
1099 * find the memory blocks required, allocate the
1100 * required memory and provide the starting addresses.
1101 * @param[in] pcidev pointer to struct bfa_pcidev_s
Jing Huang7725ccf2009-09-23 17:46:15 -07001102 *
1103 * @return
1104 * void
1105 *
1106 * Special Considerations:
1107 *
1108 * @note
1109 *
1110 */
1111void
1112bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1113 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1114{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001115 int i;
1116 struct bfa_mem_elem_s *melem;
Jing Huang7725ccf2009-09-23 17:46:15 -07001117
1118 bfa->fcs = BFA_FALSE;
1119
1120 bfa_assert((cfg != NULL) && (meminfo != NULL));
1121
Jing Huang5fbe25c2010-10-18 17:17:23 -07001122 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001123 * initialize all memory pointers for iterative allocation
1124 */
1125 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
1126 melem = meminfo->meminfo + i;
1127 melem->kva_curp = melem->kva;
1128 melem->dma_curp = melem->dma;
1129 }
1130
1131 bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
1132
1133 for (i = 0; hal_mods[i]; i++)
1134 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
1135
Krishna Gudipati7873ca42010-05-21 14:39:45 -07001136 bfa_com_port_attach(bfa, meminfo);
Jing Huang7725ccf2009-09-23 17:46:15 -07001137}
1138
Jing Huang5fbe25c2010-10-18 17:17:23 -07001139/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001140 * Use this function to delete a BFA IOC. IOC should be stopped (by
1141 * calling bfa_stop()) before this function call.
1142 *
1143 * @param[in] bfa - pointer to bfa_t.
1144 *
1145 * @return
1146 * void
1147 *
1148 * Special Considerations:
1149 *
1150 * @note
1151 */
1152void
1153bfa_detach(struct bfa_s *bfa)
1154{
1155 int i;
1156
1157 for (i = 0; hal_mods[i]; i++)
1158 hal_mods[i]->detach(bfa);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001159 bfa_ioc_detach(&bfa->ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001160}
1161
1162void
1163bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1164{
1165 INIT_LIST_HEAD(comp_q);
1166 list_splice_tail_init(&bfa->comp_q, comp_q);
1167}
1168
1169void
1170bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1171{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001172 struct list_head *qe;
1173 struct list_head *qen;
1174 struct bfa_cb_qe_s *hcb_qe;
Jing Huang7725ccf2009-09-23 17:46:15 -07001175
1176 list_for_each_safe(qe, qen, comp_q) {
1177 hcb_qe = (struct bfa_cb_qe_s *) qe;
1178 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1179 }
1180}
1181
1182void
1183bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1184{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001185 struct list_head *qe;
1186 struct bfa_cb_qe_s *hcb_qe;
Jing Huang7725ccf2009-09-23 17:46:15 -07001187
1188 while (!list_empty(comp_q)) {
1189 bfa_q_deq(comp_q, &qe);
1190 hcb_qe = (struct bfa_cb_qe_s *) qe;
1191 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1192 }
1193}
1194
Jing Huang7725ccf2009-09-23 17:46:15 -07001195
Jing Huang5fbe25c2010-10-18 17:17:23 -07001196/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001197 * Return the list of PCI vendor/device id lists supported by this
1198 * BFA instance.
1199 */
1200void
1201bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1202{
1203 static struct bfa_pciid_s __pciids[] = {
1204 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1205 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1206 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
Jing Huang293f82d2010-07-08 19:45:20 -07001207 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
Jing Huang7725ccf2009-09-23 17:46:15 -07001208 };
1209
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001210 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
Jing Huang7725ccf2009-09-23 17:46:15 -07001211 *pciids = __pciids;
1212}
1213
Jing Huang5fbe25c2010-10-18 17:17:23 -07001214/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001215 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1216 * into BFA layer). The OS driver can then turn back and overwrite entries that
1217 * have been configured by the user.
1218 *
1219 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1220 *
1221 * @return
1222 * void
1223 *
1224 * Special Considerations:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001225 * note
Jing Huang7725ccf2009-09-23 17:46:15 -07001226 */
1227void
1228bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1229{
1230 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1231 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1232 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1233 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1234 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1235 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1236 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1237 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
1238
1239 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1240 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1241 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1242 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1243 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1244 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1245 cfg->drvcfg.ioc_recover = BFA_FALSE;
1246 cfg->drvcfg.delay_comp = BFA_FALSE;
1247
1248}
1249
1250void
1251bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1252{
1253 bfa_cfg_get_default(cfg);
1254 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
1255 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
1256 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
1257 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
1258 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
1259
1260 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
1261 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
1262 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001263 cfg->drvcfg.min_cfg = BFA_TRUE;
Jing Huang7725ccf2009-09-23 17:46:15 -07001264}