blob: 6da615f48822daee870735ea31ad6d42cba9ee21 [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Jing Huang7725ccf2009-09-23 17:46:15 -07003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070019#include "bfa_modules.h"
Krishna Gudipati11189202011-06-13 15:50:35 -070020#include "bfi_reg.h"
Jing Huang7725ccf2009-09-23 17:46:15 -070021
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070022BFA_TRC_FILE(HAL, CORE);
Jing Huang7725ccf2009-09-23 17:46:15 -070023
Jing Huang5fbe25c2010-10-18 17:17:23 -070024/*
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080025 * BFA module list terminated by NULL
26 */
27static struct bfa_module_s *hal_mods[] = {
28 &hal_mod_sgpg,
29 &hal_mod_fcport,
30 &hal_mod_fcxp,
31 &hal_mod_lps,
32 &hal_mod_uf,
33 &hal_mod_rport,
Krishna Gudipatie2187d72011-06-13 15:53:58 -070034 &hal_mod_fcp,
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080035 NULL
36};
37
38/*
39 * Message handlers for various modules.
40 */
41static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
42 bfa_isr_unhandled, /* NONE */
43 bfa_isr_unhandled, /* BFI_MC_IOC */
44 bfa_isr_unhandled, /* BFI_MC_DIAG */
45 bfa_isr_unhandled, /* BFI_MC_FLASH */
46 bfa_isr_unhandled, /* BFI_MC_CEE */
47 bfa_fcport_isr, /* BFI_MC_FCPORT */
48 bfa_isr_unhandled, /* BFI_MC_IOCFC */
49 bfa_isr_unhandled, /* BFI_MC_LL */
50 bfa_uf_isr, /* BFI_MC_UF */
51 bfa_fcxp_isr, /* BFI_MC_FCXP */
52 bfa_lps_isr, /* BFI_MC_LPS */
53 bfa_rport_isr, /* BFI_MC_RPORT */
Krishna Gudipatie2187d72011-06-13 15:53:58 -070054 bfa_itn_isr, /* BFI_MC_ITN */
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080055 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
58 bfa_ioim_isr, /* BFI_MC_IOIM */
59 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
60 bfa_tskim_isr, /* BFI_MC_TSKIM */
61 bfa_isr_unhandled, /* BFI_MC_SBOOT */
62 bfa_isr_unhandled, /* BFI_MC_IPFC */
63 bfa_isr_unhandled, /* BFI_MC_PORT */
64 bfa_isr_unhandled, /* --------- */
65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */
70 bfa_isr_unhandled, /* --------- */
71 bfa_isr_unhandled, /* --------- */
72 bfa_isr_unhandled, /* --------- */
73 bfa_isr_unhandled, /* --------- */
74};
75/*
76 * Message handlers for mailbox command classes
77 */
78static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
79 NULL,
80 NULL, /* BFI_MC_IOC */
81 NULL, /* BFI_MC_DIAG */
82 NULL, /* BFI_MC_FLASH */
83 NULL, /* BFI_MC_CEE */
84 NULL, /* BFI_MC_PORT */
85 bfa_iocfc_isr, /* BFI_MC_IOCFC */
86 NULL,
87};
88
89
90
91static void
92bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
93{
94 struct bfa_port_s *port = &bfa->modules.port;
95 u32 dm_len;
96 u8 *dm_kva;
97 u64 dm_pa;
98
99 dm_len = bfa_port_meminfo();
100 dm_kva = bfa_meminfo_dma_virt(mi);
101 dm_pa = bfa_meminfo_dma_phys(mi);
102
103 memset(port, 0, sizeof(struct bfa_port_s));
104 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
105 bfa_port_mem_claim(port, dm_kva, dm_pa);
106
107 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
108 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
109}
110
111/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700112 * BFA IOC FC related definitions
113 */
114
Jing Huang5fbe25c2010-10-18 17:17:23 -0700115/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700116 * IOC local definitions
117 */
118#define BFA_IOCFC_TOV 5000 /* msecs */
119
120enum {
121 BFA_IOCFC_ACT_NONE = 0,
122 BFA_IOCFC_ACT_INIT = 1,
123 BFA_IOCFC_ACT_STOP = 2,
124 BFA_IOCFC_ACT_DISABLE = 3,
125};
126
127#define DEF_CFG_NUM_FABRICS 1
128#define DEF_CFG_NUM_LPORTS 256
129#define DEF_CFG_NUM_CQS 4
130#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
131#define DEF_CFG_NUM_TSKIM_REQS 128
132#define DEF_CFG_NUM_FCXP_REQS 64
133#define DEF_CFG_NUM_UF_BUFS 64
134#define DEF_CFG_NUM_RPORTS 1024
135#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
136#define DEF_CFG_NUM_TINS 256
137
138#define DEF_CFG_NUM_SGPGS 2048
139#define DEF_CFG_NUM_REQQ_ELEMS 256
140#define DEF_CFG_NUM_RSPQ_ELEMS 64
141#define DEF_CFG_NUM_SBOOT_TGTS 16
142#define DEF_CFG_NUM_SBOOT_LUNS 16
143
Jing Huang5fbe25c2010-10-18 17:17:23 -0700144/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700145 * forward declaration for IOC FC functions
146 */
147static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
148static void bfa_iocfc_disable_cbfn(void *bfa_arg);
149static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
150static void bfa_iocfc_reset_cbfn(void *bfa_arg);
151static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
152
Jing Huang5fbe25c2010-10-18 17:17:23 -0700153/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700154 * BFA Interrupt handling functions
155 */
156static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700157bfa_reqq_resume(struct bfa_s *bfa, int qid)
158{
159 struct list_head *waitq, *qe, *qen;
160 struct bfa_reqq_wait_s *wqe;
161
162 waitq = bfa_reqq(bfa, qid);
163 list_for_each_safe(qe, qen, waitq) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700164 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700165 * Callback only as long as there is room in request queue
166 */
167 if (bfa_reqq_full(bfa, qid))
168 break;
169
170 list_del(qe);
171 wqe = (struct bfa_reqq_wait_s *) qe;
172 wqe->qresume(wqe->cbarg);
173 }
174}
175
Krishna Gudipati11189202011-06-13 15:50:35 -0700176static inline void
177bfa_isr_rspq(struct bfa_s *bfa, int qid)
178{
179 struct bfi_msg_s *m;
180 u32 pi, ci;
181 struct list_head *waitq;
182
183 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
184
185 ci = bfa_rspq_ci(bfa, qid);
186 pi = bfa_rspq_pi(bfa, qid);
187
188 while (ci != pi) {
189 m = bfa_rspq_elem(bfa, qid, ci);
190 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
191
192 bfa_isrs[m->mhdr.msg_class] (bfa, m);
193 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
194 }
195
196 /*
197 * update CI
198 */
199 bfa_rspq_ci(bfa, qid) = pi;
200 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
201 mmiowb();
202
203 /*
204 * Resume any pending requests in the corresponding reqq.
205 */
206 waitq = bfa_reqq(bfa, qid);
207 if (!list_empty(waitq))
208 bfa_reqq_resume(bfa, qid);
209}
210
211static inline void
212bfa_isr_reqq(struct bfa_s *bfa, int qid)
213{
214 struct list_head *waitq;
215
216 qid &= (BFI_IOC_MAX_CQS - 1);
217
218 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
219
220 /*
221 * Resume any pending requests in the corresponding reqq.
222 */
223 waitq = bfa_reqq(bfa, qid);
224 if (!list_empty(waitq))
225 bfa_reqq_resume(bfa, qid);
226}
227
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700228void
229bfa_msix_all(struct bfa_s *bfa, int vec)
230{
231 bfa_intx(bfa);
232}
233
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700234bfa_boolean_t
235bfa_intx(struct bfa_s *bfa)
236{
237 u32 intr, qintr;
238 int queue;
239
Jing Huang53440262010-10-18 17:12:29 -0700240 intr = readl(bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700241 if (!intr)
242 return BFA_FALSE;
243
Jing Huang5fbe25c2010-10-18 17:17:23 -0700244 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700245 * RME completion queue interrupt
246 */
247 qintr = intr & __HFN_INT_RME_MASK;
Jing Huang53440262010-10-18 17:12:29 -0700248 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700249
250 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
Krishna Gudipati775c7742011-06-13 15:52:12 -0700251 if ((intr & (__HFN_INT_RME_Q0 << queue)) && bfa->queue_process)
Krishna Gudipati11189202011-06-13 15:50:35 -0700252 bfa_isr_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700253 }
254 intr &= ~qintr;
255 if (!intr)
256 return BFA_TRUE;
257
Jing Huang5fbe25c2010-10-18 17:17:23 -0700258 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700259 * CPE completion queue interrupt
260 */
261 qintr = intr & __HFN_INT_CPE_MASK;
Jing Huang53440262010-10-18 17:12:29 -0700262 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700263
264 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
Krishna Gudipati775c7742011-06-13 15:52:12 -0700265 if ((intr & (__HFN_INT_CPE_Q0 << queue)) && bfa->queue_process)
Krishna Gudipati11189202011-06-13 15:50:35 -0700266 bfa_isr_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700267 }
268 intr &= ~qintr;
269 if (!intr)
270 return BFA_TRUE;
271
272 bfa_msix_lpu_err(bfa, intr);
273
274 return BFA_TRUE;
275}
276
277void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700278bfa_isr_enable(struct bfa_s *bfa)
279{
Krishna Gudipati11189202011-06-13 15:50:35 -0700280 u32 umsk;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700281 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
282
283 bfa_trc(bfa, pci_func);
284
Krishna Gudipati775c7742011-06-13 15:52:12 -0700285 bfa_msix_ctrl_install(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700286
Krishna Gudipati11189202011-06-13 15:50:35 -0700287 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
288 umsk = __HFN_INT_ERR_MASK_CT2;
289 umsk |= pci_func == 0 ?
290 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
291 } else {
292 umsk = __HFN_INT_ERR_MASK;
293 umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
294 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700295
Krishna Gudipati11189202011-06-13 15:50:35 -0700296 writel(umsk, bfa->iocfc.bfa_regs.intr_status);
297 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
298 bfa->iocfc.intr_mask = ~umsk;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700299 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
300}
301
302void
303bfa_isr_disable(struct bfa_s *bfa)
304{
305 bfa_isr_mode_set(bfa, BFA_FALSE);
Jing Huang53440262010-10-18 17:12:29 -0700306 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700307 bfa_msix_uninstall(bfa);
308}
309
310void
Krishna Gudipati11189202011-06-13 15:50:35 -0700311bfa_msix_reqq(struct bfa_s *bfa, int vec)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700312{
Krishna Gudipati11189202011-06-13 15:50:35 -0700313 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700314}
315
316void
317bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
318{
319 bfa_trc(bfa, m->mhdr.msg_class);
320 bfa_trc(bfa, m->mhdr.msg_id);
321 bfa_trc(bfa, m->mhdr.mtag.i2htok);
Jing Huangd4b671c2010-12-26 21:46:35 -0800322 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700323 bfa_trc_stop(bfa->trcmod);
324}
325
326void
Krishna Gudipati11189202011-06-13 15:50:35 -0700327bfa_msix_rspq(struct bfa_s *bfa, int vec)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700328{
Krishna Gudipati11189202011-06-13 15:50:35 -0700329 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700330}
331
332void
333bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
334{
335 u32 intr, curr_value;
Krishna Gudipati11189202011-06-13 15:50:35 -0700336 bfa_boolean_t lpu_isr, halt_isr, pss_isr;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700337
Jing Huang53440262010-10-18 17:12:29 -0700338 intr = readl(bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700339
Krishna Gudipati11189202011-06-13 15:50:35 -0700340 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
341 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
342 pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
343 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
344 __HFN_INT_MBOX_LPU1_CT2);
345 intr &= __HFN_INT_ERR_MASK_CT2;
346 } else {
347 halt_isr = intr & __HFN_INT_LL_HALT;
348 pss_isr = intr & __HFN_INT_ERR_PSS;
349 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
350 intr &= __HFN_INT_ERR_MASK;
351 }
352
353 if (lpu_isr)
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800354 bfa_ioc_mbox_isr(&bfa->ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700355
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700356 if (intr) {
Krishna Gudipati11189202011-06-13 15:50:35 -0700357 if (halt_isr) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700358 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700359 * If LL_HALT bit is set then FW Init Halt LL Port
360 * Register needs to be cleared as well so Interrupt
361 * Status Register will be cleared.
362 */
Jing Huang53440262010-10-18 17:12:29 -0700363 curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700364 curr_value &= ~__FW_INIT_HALT_P;
Jing Huang53440262010-10-18 17:12:29 -0700365 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700366 }
367
Krishna Gudipati11189202011-06-13 15:50:35 -0700368 if (pss_isr) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700369 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700370 * ERR_PSS bit needs to be cleared as well in case
371 * interrups are shared so driver's interrupt handler is
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300372 * still called even though it is already masked out.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700373 */
Jing Huang53440262010-10-18 17:12:29 -0700374 curr_value = readl(
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700375 bfa->ioc.ioc_regs.pss_err_status_reg);
Jing Huang53440262010-10-18 17:12:29 -0700376 writel(curr_value,
377 bfa->ioc.ioc_regs.pss_err_status_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700378 }
379
Jing Huang53440262010-10-18 17:12:29 -0700380 writel(intr, bfa->iocfc.bfa_regs.intr_status);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800381 bfa_ioc_error_isr(&bfa->ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700382 }
383}
384
Jing Huang5fbe25c2010-10-18 17:17:23 -0700385/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700386 * BFA IOC FC related functions
387 */
388
Jing Huang5fbe25c2010-10-18 17:17:23 -0700389/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -0800390 * BFA IOC private functions
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700391 */
392
393static void
394bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
395{
396 int i, per_reqq_sz, per_rspq_sz;
397
398 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
399 BFA_DMA_ALIGN_SZ);
400 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
401 BFA_DMA_ALIGN_SZ);
402
403 /*
404 * Calculate CQ size
405 */
406 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
407 *dm_len = *dm_len + per_reqq_sz;
408 *dm_len = *dm_len + per_rspq_sz;
409 }
410
411 /*
412 * Calculate Shadow CI/PI size
413 */
414 for (i = 0; i < cfg->fwcfg.num_cqs; i++)
415 *dm_len += (2 * BFA_CACHELINE_SZ);
416}
417
418static void
419bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
420{
421 *dm_len +=
422 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
423 *dm_len +=
424 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
425 BFA_CACHELINE_SZ);
426}
427
Jing Huang5fbe25c2010-10-18 17:17:23 -0700428/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700429 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
430 */
431static void
432bfa_iocfc_send_cfg(void *bfa_arg)
433{
434 struct bfa_s *bfa = bfa_arg;
435 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
436 struct bfi_iocfc_cfg_req_s cfg_req;
437 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
438 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
439 int i;
440
Jing Huangd4b671c2010-12-26 21:46:35 -0800441 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700442 bfa_trc(bfa, cfg->fwcfg.num_cqs);
443
444 bfa_iocfc_reset_queues(bfa);
445
Jing Huang5fbe25c2010-10-18 17:17:23 -0700446 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700447 * initialize IOC configuration info
448 */
449 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
450 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700451 cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
452 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700453
454 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
Jing Huang5fbe25c2010-10-18 17:17:23 -0700455 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700456 * dma map REQ and RSP circular queues and shadow pointers
457 */
458 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
459 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
460 iocfc->req_cq_ba[i].pa);
461 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
462 iocfc->req_cq_shadow_ci[i].pa);
463 cfg_info->req_cq_elems[i] =
Jing Huangba816ea2010-10-18 17:10:50 -0700464 cpu_to_be16(cfg->drvcfg.num_reqq_elems);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700465
466 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
467 iocfc->rsp_cq_ba[i].pa);
468 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
469 iocfc->rsp_cq_shadow_pi[i].pa);
470 cfg_info->rsp_cq_elems[i] =
Jing Huangba816ea2010-10-18 17:10:50 -0700471 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700472 }
473
Jing Huang5fbe25c2010-10-18 17:17:23 -0700474 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700475 * Enable interrupt coalescing if it is driver init path
476 * and not ioc disable/enable path.
477 */
478 if (!iocfc->cfgdone)
479 cfg_info->intr_attr.coalesce = BFA_TRUE;
480
481 iocfc->cfgdone = BFA_FALSE;
482
Jing Huang5fbe25c2010-10-18 17:17:23 -0700483 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700484 * dma map IOC configuration itself
485 */
486 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
487 bfa_lpuid(bfa));
488 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
489
490 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
491 sizeof(struct bfi_iocfc_cfg_req_s));
492}
493
494static void
495bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
496 struct bfa_pcidev_s *pcidev)
497{
498 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
499
500 bfa->bfad = bfad;
501 iocfc->bfa = bfa;
502 iocfc->action = BFA_IOCFC_ACT_NONE;
503
Jing Huang6a18b162010-10-18 17:08:54 -0700504 iocfc->cfg = *cfg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700505
Jing Huang5fbe25c2010-10-18 17:17:23 -0700506 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700507 * Initialize chip specific handlers.
508 */
Krishna Gudipati11189202011-06-13 15:50:35 -0700509 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700510 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
511 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
512 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
513 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
Krishna Gudipati775c7742011-06-13 15:52:12 -0700514 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
515 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700516 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
517 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
518 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
519 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
Krishna Gudipati11189202011-06-13 15:50:35 -0700520 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
521 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700522 } else {
523 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
524 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
525 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
526 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
Krishna Gudipati775c7742011-06-13 15:52:12 -0700527 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
528 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700529 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
530 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
531 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
532 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
Krishna Gudipati11189202011-06-13 15:50:35 -0700533 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
534 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
535 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
536 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
537 }
538
539 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
540 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
541 iocfc->hwif.hw_isr_mode_set = NULL;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700542 }
543
544 iocfc->hwif.hw_reginit(bfa);
545 bfa->msix.nvecs = 0;
546}
547
548static void
549bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
550 struct bfa_meminfo_s *meminfo)
551{
552 u8 *dm_kva;
553 u64 dm_pa;
554 int i, per_reqq_sz, per_rspq_sz;
555 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
556 int dbgsz;
557
558 dm_kva = bfa_meminfo_dma_virt(meminfo);
559 dm_pa = bfa_meminfo_dma_phys(meminfo);
560
561 /*
562 * First allocate dma memory for IOC.
563 */
564 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800565 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
566 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700567
568 /*
569 * Claim DMA-able memory for the request/response queues and for shadow
570 * ci/pi registers
571 */
572 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
573 BFA_DMA_ALIGN_SZ);
574 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
575 BFA_DMA_ALIGN_SZ);
576
577 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
578 iocfc->req_cq_ba[i].kva = dm_kva;
579 iocfc->req_cq_ba[i].pa = dm_pa;
Jing Huang6a18b162010-10-18 17:08:54 -0700580 memset(dm_kva, 0, per_reqq_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700581 dm_kva += per_reqq_sz;
582 dm_pa += per_reqq_sz;
583
584 iocfc->rsp_cq_ba[i].kva = dm_kva;
585 iocfc->rsp_cq_ba[i].pa = dm_pa;
Jing Huang6a18b162010-10-18 17:08:54 -0700586 memset(dm_kva, 0, per_rspq_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700587 dm_kva += per_rspq_sz;
588 dm_pa += per_rspq_sz;
589 }
590
591 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
592 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
593 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
594 dm_kva += BFA_CACHELINE_SZ;
595 dm_pa += BFA_CACHELINE_SZ;
596
597 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
598 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
599 dm_kva += BFA_CACHELINE_SZ;
600 dm_pa += BFA_CACHELINE_SZ;
601 }
602
603 /*
604 * Claim DMA-able memory for the config info page
605 */
606 bfa->iocfc.cfg_info.kva = dm_kva;
607 bfa->iocfc.cfg_info.pa = dm_pa;
608 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
609 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
610 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
611
612 /*
613 * Claim DMA-able memory for the config response
614 */
615 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
616 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
617 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
618
619 dm_kva +=
620 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
621 BFA_CACHELINE_SZ);
622 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
623 BFA_CACHELINE_SZ);
624
625
626 bfa_meminfo_dma_virt(meminfo) = dm_kva;
627 bfa_meminfo_dma_phys(meminfo) = dm_pa;
628
Maggie Zhangf7f738122010-12-09 19:08:43 -0800629 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700630 if (dbgsz > 0) {
631 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
632 bfa_meminfo_kva(meminfo) += dbgsz;
633 }
634}
635
Jing Huang5fbe25c2010-10-18 17:17:23 -0700636/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700637 * Start BFA submodules.
638 */
639static void
640bfa_iocfc_start_submod(struct bfa_s *bfa)
641{
642 int i;
643
Krishna Gudipati775c7742011-06-13 15:52:12 -0700644 bfa->queue_process = BFA_TRUE;
Krishna Gudipati11189202011-06-13 15:50:35 -0700645 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
646 bfa->iocfc.hwif.hw_rspq_ack(bfa, i);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700647
648 for (i = 0; hal_mods[i]; i++)
649 hal_mods[i]->start(bfa);
650}
651
Jing Huang5fbe25c2010-10-18 17:17:23 -0700652/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700653 * Disable BFA submodules.
654 */
655static void
656bfa_iocfc_disable_submod(struct bfa_s *bfa)
657{
658 int i;
659
660 for (i = 0; hal_mods[i]; i++)
661 hal_mods[i]->iocdisable(bfa);
662}
663
664static void
665bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
666{
667 struct bfa_s *bfa = bfa_arg;
668
669 if (complete) {
670 if (bfa->iocfc.cfgdone)
671 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
672 else
673 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
674 } else {
675 if (bfa->iocfc.cfgdone)
676 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
677 }
678}
679
680static void
681bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
682{
683 struct bfa_s *bfa = bfa_arg;
684 struct bfad_s *bfad = bfa->bfad;
685
686 if (compl)
687 complete(&bfad->comp);
688 else
689 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
690}
691
692static void
693bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
694{
695 struct bfa_s *bfa = bfa_arg;
696 struct bfad_s *bfad = bfa->bfad;
697
698 if (compl)
699 complete(&bfad->disable_comp);
700}
701
Krishna Gudipati11189202011-06-13 15:50:35 -0700702/**
703 * configure queue registers from firmware response
704 */
705static void
706bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
707{
708 int i;
709 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
710 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
711
712 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
713 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
714 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
715 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
716 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
717 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
718 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
719 }
720}
721
Jing Huang5fbe25c2010-10-18 17:17:23 -0700722/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700723 * Update BFA configuration from firmware configuration.
724 */
725static void
726bfa_iocfc_cfgrsp(struct bfa_s *bfa)
727{
728 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
729 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
730 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
731
732 fwcfg->num_cqs = fwcfg->num_cqs;
Jing Huangba816ea2010-10-18 17:10:50 -0700733 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700734 fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
Jing Huangba816ea2010-10-18 17:10:50 -0700735 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
736 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
737 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
738 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700739
740 iocfc->cfgdone = BFA_TRUE;
741
Jing Huang5fbe25c2010-10-18 17:17:23 -0700742 /*
Krishna Gudipati11189202011-06-13 15:50:35 -0700743 * configure queue register offsets as learnt from firmware
744 */
745 bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
746
747 /*
Krishna Gudipati775c7742011-06-13 15:52:12 -0700748 * Install MSIX queue handlers
749 */
750 bfa_msix_queue_install(bfa);
751
752 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700753 * Configuration is complete - initialize/start submodules
754 */
755 bfa_fcport_init(bfa);
756
757 if (iocfc->action == BFA_IOCFC_ACT_INIT)
758 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
759 else
760 bfa_iocfc_start_submod(bfa);
761}
762void
763bfa_iocfc_reset_queues(struct bfa_s *bfa)
764{
765 int q;
766
767 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
768 bfa_reqq_ci(bfa, q) = 0;
769 bfa_reqq_pi(bfa, q) = 0;
770 bfa_rspq_ci(bfa, q) = 0;
771 bfa_rspq_pi(bfa, q) = 0;
772 }
773}
774
Jing Huang5fbe25c2010-10-18 17:17:23 -0700775/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700776 * IOC enable request is complete
777 */
778static void
779bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
780{
781 struct bfa_s *bfa = bfa_arg;
782
783 if (status != BFA_STATUS_OK) {
784 bfa_isr_disable(bfa);
785 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
786 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
787 bfa_iocfc_init_cb, bfa);
788 return;
789 }
790
791 bfa_iocfc_send_cfg(bfa);
792}
793
Jing Huang5fbe25c2010-10-18 17:17:23 -0700794/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700795 * IOC disable request is complete
796 */
797static void
798bfa_iocfc_disable_cbfn(void *bfa_arg)
799{
800 struct bfa_s *bfa = bfa_arg;
801
802 bfa_isr_disable(bfa);
803 bfa_iocfc_disable_submod(bfa);
804
805 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
806 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
807 bfa);
808 else {
Jing Huangd4b671c2010-12-26 21:46:35 -0800809 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700810 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
811 bfa);
812 }
813}
814
Jing Huang5fbe25c2010-10-18 17:17:23 -0700815/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700816 * Notify sub-modules of hardware failure.
817 */
818static void
819bfa_iocfc_hbfail_cbfn(void *bfa_arg)
820{
821 struct bfa_s *bfa = bfa_arg;
822
Krishna Gudipati775c7742011-06-13 15:52:12 -0700823 bfa->queue_process = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700824
825 bfa_isr_disable(bfa);
826 bfa_iocfc_disable_submod(bfa);
827
828 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
829 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
830 bfa);
831}
832
Jing Huang5fbe25c2010-10-18 17:17:23 -0700833/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700834 * Actions on chip-reset completion.
835 */
836static void
837bfa_iocfc_reset_cbfn(void *bfa_arg)
838{
839 struct bfa_s *bfa = bfa_arg;
840
841 bfa_iocfc_reset_queues(bfa);
842 bfa_isr_enable(bfa);
843}
844
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700845
Jing Huang5fbe25c2010-10-18 17:17:23 -0700846/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700847 * Query IOC memory requirement information.
848 */
849void
850bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
851 u32 *dm_len)
852{
853 /* dma memory for IOC */
Maggie Zhangf7f738122010-12-09 19:08:43 -0800854 *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700855
856 bfa_iocfc_fw_cfg_sz(cfg, dm_len);
857 bfa_iocfc_cqs_sz(cfg, dm_len);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800858 *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700859}
860
Jing Huang5fbe25c2010-10-18 17:17:23 -0700861/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700862 * Query IOC memory requirement information.
863 */
864void
865bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
866 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
867{
868 int i;
869 struct bfa_ioc_s *ioc = &bfa->ioc;
870
871 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
872 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
873 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
874 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
875
876 ioc->trcmod = bfa->trcmod;
877 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
878
Jing Huang5fbe25c2010-10-18 17:17:23 -0700879 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700880 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
881 */
882 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
883 bfa_ioc_set_fcmode(&bfa->ioc);
884
Krishna Gudipatid37779f2011-06-13 15:42:10 -0700885 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700886 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
887
888 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
889 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800890 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700891
892 INIT_LIST_HEAD(&bfa->comp_q);
893 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
894 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
895}
896
Jing Huang5fbe25c2010-10-18 17:17:23 -0700897/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700898 * Query IOC memory requirement information.
899 */
900void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700901bfa_iocfc_init(struct bfa_s *bfa)
902{
903 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
904 bfa_ioc_enable(&bfa->ioc);
905}
906
Jing Huang5fbe25c2010-10-18 17:17:23 -0700907/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700908 * IOC start called from bfa_start(). Called to start IOC operations
909 * at driver instantiation for this instance.
910 */
911void
912bfa_iocfc_start(struct bfa_s *bfa)
913{
914 if (bfa->iocfc.cfgdone)
915 bfa_iocfc_start_submod(bfa);
916}
917
Jing Huang5fbe25c2010-10-18 17:17:23 -0700918/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700919 * IOC stop called from bfa_stop(). Called only when driver is unloaded
920 * for this instance.
921 */
922void
923bfa_iocfc_stop(struct bfa_s *bfa)
924{
925 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
926
Krishna Gudipati775c7742011-06-13 15:52:12 -0700927 bfa->queue_process = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700928 bfa_ioc_disable(&bfa->ioc);
929}
930
931void
932bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
933{
934 struct bfa_s *bfa = bfaarg;
935 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
936 union bfi_iocfc_i2h_msg_u *msg;
937
938 msg = (union bfi_iocfc_i2h_msg_u *) m;
939 bfa_trc(bfa, msg->mh.msg_id);
940
941 switch (msg->mh.msg_id) {
942 case BFI_IOCFC_I2H_CFG_REPLY:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700943 bfa_iocfc_cfgrsp(bfa);
944 break;
945 case BFI_IOCFC_I2H_UPDATEQ_RSP:
946 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
947 break;
948 default:
Jing Huangd4b671c2010-12-26 21:46:35 -0800949 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700950 }
951}
952
953void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700954bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
955{
956 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
957
958 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
959
960 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
Jing Huangba816ea2010-10-18 17:10:50 -0700961 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
962 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700963
964 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
Jing Huangba816ea2010-10-18 17:10:50 -0700965 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
966 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700967
968 attr->config = iocfc->cfg;
969}
970
971bfa_status_t
972bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
973{
974 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
975 struct bfi_iocfc_set_intr_req_s *m;
976
977 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
Jing Huangba816ea2010-10-18 17:10:50 -0700978 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
979 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700980
981 if (!bfa_iocfc_is_operational(bfa))
982 return BFA_STATUS_OK;
983
984 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
985 if (!m)
986 return BFA_STATUS_DEVBUSY;
987
988 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
989 bfa_lpuid(bfa));
990 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
991 m->delay = iocfc->cfginfo->intr_attr.delay;
992 m->latency = iocfc->cfginfo->intr_attr.latency;
993
994 bfa_trc(bfa, attr->delay);
995 bfa_trc(bfa, attr->latency);
996
997 bfa_reqq_produce(bfa, BFA_REQQ_IOC);
998 return BFA_STATUS_OK;
999}
1000
1001void
1002bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
1003{
1004 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1005
1006 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
1007 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
1008}
Jing Huang5fbe25c2010-10-18 17:17:23 -07001009/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001010 * Enable IOC after it is disabled.
1011 */
1012void
1013bfa_iocfc_enable(struct bfa_s *bfa)
1014{
1015 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1016 "IOC Enable");
1017 bfa_ioc_enable(&bfa->ioc);
1018}
1019
1020void
1021bfa_iocfc_disable(struct bfa_s *bfa)
1022{
1023 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1024 "IOC Disable");
1025 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
1026
Krishna Gudipati775c7742011-06-13 15:52:12 -07001027 bfa->queue_process = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001028 bfa_ioc_disable(&bfa->ioc);
1029}
1030
1031
1032bfa_boolean_t
1033bfa_iocfc_is_operational(struct bfa_s *bfa)
1034{
1035 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
1036}
1037
Jing Huang5fbe25c2010-10-18 17:17:23 -07001038/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001039 * Return boot target port wwns -- read from boot information in flash.
1040 */
1041void
1042bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
1043{
1044 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1045 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1046 int i;
1047
1048 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
1049 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
1050 *nwwns = cfgrsp->pbc_cfg.nbluns;
1051 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
1052 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
1053
1054 return;
1055 }
1056
1057 *nwwns = cfgrsp->bootwwns.nwwns;
1058 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
1059}
1060
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001061int
1062bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1063{
1064 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1065 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1066
1067 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1068 return cfgrsp->pbc_cfg.nvports;
1069}
1070
Jing Huang7725ccf2009-09-23 17:46:15 -07001071
Jing Huang5fbe25c2010-10-18 17:17:23 -07001072/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001073 * Use this function query the memory requirement of the BFA library.
1074 * This function needs to be called before bfa_attach() to get the
1075 * memory required of the BFA layer for a given driver configuration.
1076 *
1077 * This call will fail, if the cap is out of range compared to pre-defined
1078 * values within the BFA library
1079 *
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001080 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1081 * its configuration in this structure.
Jing Huang7725ccf2009-09-23 17:46:15 -07001082 * The default values for struct bfa_iocfc_cfg_s can be
1083 * fetched using bfa_cfg_get_default() API.
1084 *
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001085 * If cap's boundary check fails, the library will use
Jing Huang7725ccf2009-09-23 17:46:15 -07001086 * the default bfa_cap_t values (and log a warning msg).
1087 *
1088 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001089 * indicates the memory type (see bfa_mem_type_t) and
Jing Huang7725ccf2009-09-23 17:46:15 -07001090 * amount of memory required.
1091 *
1092 * Driver should allocate the memory, populate the
1093 * starting address for each block and provide the same
1094 * structure as input parameter to bfa_attach() call.
1095 *
1096 * @return void
1097 *
1098 * Special Considerations: @note
1099 */
1100void
1101bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
1102{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001103 int i;
1104 u32 km_len = 0, dm_len = 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07001105
Jing Huangd4b671c2010-12-26 21:46:35 -08001106 WARN_ON((cfg == NULL) || (meminfo == NULL));
Jing Huang7725ccf2009-09-23 17:46:15 -07001107
Jing Huang6a18b162010-10-18 17:08:54 -07001108 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
Jing Huang7725ccf2009-09-23 17:46:15 -07001109 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
1110 BFA_MEM_TYPE_KVA;
1111 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
1112 BFA_MEM_TYPE_DMA;
1113
1114 bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
1115
1116 for (i = 0; hal_mods[i]; i++)
1117 hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
1118
Krishna Gudipati7873ca42010-05-21 14:39:45 -07001119 dm_len += bfa_port_meminfo();
Jing Huang7725ccf2009-09-23 17:46:15 -07001120
1121 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
1122 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
1123}
1124
Jing Huang5fbe25c2010-10-18 17:17:23 -07001125/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001126 * Use this function to do attach the driver instance with the BFA
1127 * library. This function will not trigger any HW initialization
1128 * process (which will be done in bfa_init() call)
1129 *
1130 * This call will fail, if the cap is out of range compared to
1131 * pre-defined values within the BFA library
1132 *
1133 * @param[out] bfa Pointer to bfa_t.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001134 * @param[in] bfad Opaque handle back to the driver's IOC structure
Jing Huang7725ccf2009-09-23 17:46:15 -07001135 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001136 * that was used in bfa_cfg_get_meminfo().
1137 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1138 * use the bfa_cfg_get_meminfo() call to
1139 * find the memory blocks required, allocate the
1140 * required memory and provide the starting addresses.
1141 * @param[in] pcidev pointer to struct bfa_pcidev_s
Jing Huang7725ccf2009-09-23 17:46:15 -07001142 *
1143 * @return
1144 * void
1145 *
1146 * Special Considerations:
1147 *
1148 * @note
1149 *
1150 */
1151void
1152bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1153 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1154{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001155 int i;
1156 struct bfa_mem_elem_s *melem;
Jing Huang7725ccf2009-09-23 17:46:15 -07001157
1158 bfa->fcs = BFA_FALSE;
1159
Jing Huangd4b671c2010-12-26 21:46:35 -08001160 WARN_ON((cfg == NULL) || (meminfo == NULL));
Jing Huang7725ccf2009-09-23 17:46:15 -07001161
Jing Huang5fbe25c2010-10-18 17:17:23 -07001162 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001163 * initialize all memory pointers for iterative allocation
1164 */
1165 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
1166 melem = meminfo->meminfo + i;
1167 melem->kva_curp = melem->kva;
1168 melem->dma_curp = melem->dma;
1169 }
1170
1171 bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
1172
1173 for (i = 0; hal_mods[i]; i++)
1174 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
1175
Krishna Gudipati7873ca42010-05-21 14:39:45 -07001176 bfa_com_port_attach(bfa, meminfo);
Jing Huang7725ccf2009-09-23 17:46:15 -07001177}
1178
Jing Huang5fbe25c2010-10-18 17:17:23 -07001179/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001180 * Use this function to delete a BFA IOC. IOC should be stopped (by
1181 * calling bfa_stop()) before this function call.
1182 *
1183 * @param[in] bfa - pointer to bfa_t.
1184 *
1185 * @return
1186 * void
1187 *
1188 * Special Considerations:
1189 *
1190 * @note
1191 */
1192void
1193bfa_detach(struct bfa_s *bfa)
1194{
1195 int i;
1196
1197 for (i = 0; hal_mods[i]; i++)
1198 hal_mods[i]->detach(bfa);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001199 bfa_ioc_detach(&bfa->ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001200}
1201
1202void
1203bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1204{
1205 INIT_LIST_HEAD(comp_q);
1206 list_splice_tail_init(&bfa->comp_q, comp_q);
1207}
1208
1209void
1210bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1211{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001212 struct list_head *qe;
1213 struct list_head *qen;
1214 struct bfa_cb_qe_s *hcb_qe;
Jing Huang7725ccf2009-09-23 17:46:15 -07001215
1216 list_for_each_safe(qe, qen, comp_q) {
1217 hcb_qe = (struct bfa_cb_qe_s *) qe;
1218 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1219 }
1220}
1221
1222void
1223bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1224{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001225 struct list_head *qe;
1226 struct bfa_cb_qe_s *hcb_qe;
Jing Huang7725ccf2009-09-23 17:46:15 -07001227
1228 while (!list_empty(comp_q)) {
1229 bfa_q_deq(comp_q, &qe);
1230 hcb_qe = (struct bfa_cb_qe_s *) qe;
1231 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1232 }
1233}
1234
Jing Huang7725ccf2009-09-23 17:46:15 -07001235
Jing Huang5fbe25c2010-10-18 17:17:23 -07001236/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001237 * Return the list of PCI vendor/device id lists supported by this
1238 * BFA instance.
1239 */
1240void
1241bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1242{
1243 static struct bfa_pciid_s __pciids[] = {
1244 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1245 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1246 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
Jing Huang293f82d2010-07-08 19:45:20 -07001247 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
Jing Huang7725ccf2009-09-23 17:46:15 -07001248 };
1249
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001250 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
Jing Huang7725ccf2009-09-23 17:46:15 -07001251 *pciids = __pciids;
1252}
1253
Jing Huang5fbe25c2010-10-18 17:17:23 -07001254/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001255 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1256 * into BFA layer). The OS driver can then turn back and overwrite entries that
1257 * have been configured by the user.
1258 *
1259 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1260 *
1261 * @return
1262 * void
1263 *
1264 * Special Considerations:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001265 * note
Jing Huang7725ccf2009-09-23 17:46:15 -07001266 */
1267void
1268bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1269{
1270 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1271 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1272 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1273 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1274 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1275 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1276 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1277 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001278 cfg->fwcfg.num_fwtio_reqs = 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07001279
1280 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1281 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1282 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1283 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1284 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1285 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1286 cfg->drvcfg.ioc_recover = BFA_FALSE;
1287 cfg->drvcfg.delay_comp = BFA_FALSE;
1288
1289}
1290
1291void
1292bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1293{
1294 bfa_cfg_get_default(cfg);
1295 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
1296 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
1297 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
1298 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
1299 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001300 cfg->fwcfg.num_fwtio_reqs = 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07001301
1302 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
1303 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
1304 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001305 cfg->drvcfg.min_cfg = BFA_TRUE;
Jing Huang7725ccf2009-09-23 17:46:15 -07001306}