blob: 2b497243baf488f208125d137deffb36331e7f12 [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Jing Huang7725ccf2009-09-23 17:46:15 -07003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070019#include "bfa_modules.h"
Krishna Gudipati11189202011-06-13 15:50:35 -070020#include "bfi_reg.h"
Jing Huang7725ccf2009-09-23 17:46:15 -070021
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070022BFA_TRC_FILE(HAL, CORE);
Jing Huang7725ccf2009-09-23 17:46:15 -070023
Jing Huang5fbe25c2010-10-18 17:17:23 -070024/*
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080025 * BFA module list terminated by NULL
26 */
27static struct bfa_module_s *hal_mods[] = {
28 &hal_mod_sgpg,
29 &hal_mod_fcport,
30 &hal_mod_fcxp,
31 &hal_mod_lps,
32 &hal_mod_uf,
33 &hal_mod_rport,
Krishna Gudipatie2187d72011-06-13 15:53:58 -070034 &hal_mod_fcp,
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080035 NULL
36};
37
38/*
39 * Message handlers for various modules.
40 */
41static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
42 bfa_isr_unhandled, /* NONE */
43 bfa_isr_unhandled, /* BFI_MC_IOC */
44 bfa_isr_unhandled, /* BFI_MC_DIAG */
45 bfa_isr_unhandled, /* BFI_MC_FLASH */
46 bfa_isr_unhandled, /* BFI_MC_CEE */
47 bfa_fcport_isr, /* BFI_MC_FCPORT */
48 bfa_isr_unhandled, /* BFI_MC_IOCFC */
49 bfa_isr_unhandled, /* BFI_MC_LL */
50 bfa_uf_isr, /* BFI_MC_UF */
51 bfa_fcxp_isr, /* BFI_MC_FCXP */
52 bfa_lps_isr, /* BFI_MC_LPS */
53 bfa_rport_isr, /* BFI_MC_RPORT */
Krishna Gudipatie2187d72011-06-13 15:53:58 -070054 bfa_itn_isr, /* BFI_MC_ITN */
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080055 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
58 bfa_ioim_isr, /* BFI_MC_IOIM */
59 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
60 bfa_tskim_isr, /* BFI_MC_TSKIM */
61 bfa_isr_unhandled, /* BFI_MC_SBOOT */
62 bfa_isr_unhandled, /* BFI_MC_IPFC */
63 bfa_isr_unhandled, /* BFI_MC_PORT */
64 bfa_isr_unhandled, /* --------- */
65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */
70 bfa_isr_unhandled, /* --------- */
71 bfa_isr_unhandled, /* --------- */
72 bfa_isr_unhandled, /* --------- */
73 bfa_isr_unhandled, /* --------- */
74};
75/*
76 * Message handlers for mailbox command classes
77 */
78static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
79 NULL,
80 NULL, /* BFI_MC_IOC */
81 NULL, /* BFI_MC_DIAG */
82 NULL, /* BFI_MC_FLASH */
83 NULL, /* BFI_MC_CEE */
84 NULL, /* BFI_MC_PORT */
85 bfa_iocfc_isr, /* BFI_MC_IOCFC */
86 NULL,
87};
88
89
90
91static void
92bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
93{
94 struct bfa_port_s *port = &bfa->modules.port;
95 u32 dm_len;
96 u8 *dm_kva;
97 u64 dm_pa;
98
99 dm_len = bfa_port_meminfo();
100 dm_kva = bfa_meminfo_dma_virt(mi);
101 dm_pa = bfa_meminfo_dma_phys(mi);
102
103 memset(port, 0, sizeof(struct bfa_port_s));
104 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
105 bfa_port_mem_claim(port, dm_kva, dm_pa);
106
107 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
108 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
109}
110
111/*
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700112 * ablk module attach
113 */
114static void
115bfa_com_ablk_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
116{
117 struct bfa_ablk_s *ablk = &bfa->modules.ablk;
118 u32 dm_len;
119 u8 *dm_kva;
120 u64 dm_pa;
121
122 dm_len = bfa_ablk_meminfo();
123 dm_kva = bfa_meminfo_dma_virt(mi);
124 dm_pa = bfa_meminfo_dma_phys(mi);
125
126 memset(ablk, 0, sizeof(struct bfa_ablk_s));
127 bfa_ablk_attach(ablk, &bfa->ioc);
128 bfa_ablk_memclaim(ablk, dm_kva, dm_pa);
129
130 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
131 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
132}
133
134/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700135 * BFA IOC FC related definitions
136 */
137
Jing Huang5fbe25c2010-10-18 17:17:23 -0700138/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700139 * IOC local definitions
140 */
141#define BFA_IOCFC_TOV 5000 /* msecs */
142
143enum {
144 BFA_IOCFC_ACT_NONE = 0,
145 BFA_IOCFC_ACT_INIT = 1,
146 BFA_IOCFC_ACT_STOP = 2,
147 BFA_IOCFC_ACT_DISABLE = 3,
148};
149
150#define DEF_CFG_NUM_FABRICS 1
151#define DEF_CFG_NUM_LPORTS 256
152#define DEF_CFG_NUM_CQS 4
153#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
154#define DEF_CFG_NUM_TSKIM_REQS 128
155#define DEF_CFG_NUM_FCXP_REQS 64
156#define DEF_CFG_NUM_UF_BUFS 64
157#define DEF_CFG_NUM_RPORTS 1024
158#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
159#define DEF_CFG_NUM_TINS 256
160
161#define DEF_CFG_NUM_SGPGS 2048
162#define DEF_CFG_NUM_REQQ_ELEMS 256
163#define DEF_CFG_NUM_RSPQ_ELEMS 64
164#define DEF_CFG_NUM_SBOOT_TGTS 16
165#define DEF_CFG_NUM_SBOOT_LUNS 16
166
Jing Huang5fbe25c2010-10-18 17:17:23 -0700167/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700168 * forward declaration for IOC FC functions
169 */
170static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
171static void bfa_iocfc_disable_cbfn(void *bfa_arg);
172static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
173static void bfa_iocfc_reset_cbfn(void *bfa_arg);
174static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
175
Jing Huang5fbe25c2010-10-18 17:17:23 -0700176/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700177 * BFA Interrupt handling functions
178 */
179static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700180bfa_reqq_resume(struct bfa_s *bfa, int qid)
181{
182 struct list_head *waitq, *qe, *qen;
183 struct bfa_reqq_wait_s *wqe;
184
185 waitq = bfa_reqq(bfa, qid);
186 list_for_each_safe(qe, qen, waitq) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700187 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700188 * Callback only as long as there is room in request queue
189 */
190 if (bfa_reqq_full(bfa, qid))
191 break;
192
193 list_del(qe);
194 wqe = (struct bfa_reqq_wait_s *) qe;
195 wqe->qresume(wqe->cbarg);
196 }
197}
198
Krishna Gudipati11189202011-06-13 15:50:35 -0700199static inline void
200bfa_isr_rspq(struct bfa_s *bfa, int qid)
201{
202 struct bfi_msg_s *m;
203 u32 pi, ci;
204 struct list_head *waitq;
205
206 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
207
208 ci = bfa_rspq_ci(bfa, qid);
209 pi = bfa_rspq_pi(bfa, qid);
210
211 while (ci != pi) {
212 m = bfa_rspq_elem(bfa, qid, ci);
213 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
214
215 bfa_isrs[m->mhdr.msg_class] (bfa, m);
216 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
217 }
218
219 /*
220 * update CI
221 */
222 bfa_rspq_ci(bfa, qid) = pi;
223 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
224 mmiowb();
225
226 /*
227 * Resume any pending requests in the corresponding reqq.
228 */
229 waitq = bfa_reqq(bfa, qid);
230 if (!list_empty(waitq))
231 bfa_reqq_resume(bfa, qid);
232}
233
234static inline void
235bfa_isr_reqq(struct bfa_s *bfa, int qid)
236{
237 struct list_head *waitq;
238
239 qid &= (BFI_IOC_MAX_CQS - 1);
240
241 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
242
243 /*
244 * Resume any pending requests in the corresponding reqq.
245 */
246 waitq = bfa_reqq(bfa, qid);
247 if (!list_empty(waitq))
248 bfa_reqq_resume(bfa, qid);
249}
250
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700251void
252bfa_msix_all(struct bfa_s *bfa, int vec)
253{
254 bfa_intx(bfa);
255}
256
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700257bfa_boolean_t
258bfa_intx(struct bfa_s *bfa)
259{
260 u32 intr, qintr;
261 int queue;
262
Jing Huang53440262010-10-18 17:12:29 -0700263 intr = readl(bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700264 if (!intr)
265 return BFA_FALSE;
266
Jing Huang5fbe25c2010-10-18 17:17:23 -0700267 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700268 * RME completion queue interrupt
269 */
270 qintr = intr & __HFN_INT_RME_MASK;
Jing Huang53440262010-10-18 17:12:29 -0700271 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700272
273 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
Krishna Gudipati775c7742011-06-13 15:52:12 -0700274 if ((intr & (__HFN_INT_RME_Q0 << queue)) && bfa->queue_process)
Krishna Gudipati11189202011-06-13 15:50:35 -0700275 bfa_isr_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700276 }
277 intr &= ~qintr;
278 if (!intr)
279 return BFA_TRUE;
280
Jing Huang5fbe25c2010-10-18 17:17:23 -0700281 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700282 * CPE completion queue interrupt
283 */
284 qintr = intr & __HFN_INT_CPE_MASK;
Jing Huang53440262010-10-18 17:12:29 -0700285 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700286
287 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
Krishna Gudipati775c7742011-06-13 15:52:12 -0700288 if ((intr & (__HFN_INT_CPE_Q0 << queue)) && bfa->queue_process)
Krishna Gudipati11189202011-06-13 15:50:35 -0700289 bfa_isr_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700290 }
291 intr &= ~qintr;
292 if (!intr)
293 return BFA_TRUE;
294
295 bfa_msix_lpu_err(bfa, intr);
296
297 return BFA_TRUE;
298}
299
300void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700301bfa_isr_enable(struct bfa_s *bfa)
302{
Krishna Gudipati11189202011-06-13 15:50:35 -0700303 u32 umsk;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700304 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
305
306 bfa_trc(bfa, pci_func);
307
Krishna Gudipati775c7742011-06-13 15:52:12 -0700308 bfa_msix_ctrl_install(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700309
Krishna Gudipati11189202011-06-13 15:50:35 -0700310 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
311 umsk = __HFN_INT_ERR_MASK_CT2;
312 umsk |= pci_func == 0 ?
313 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
314 } else {
315 umsk = __HFN_INT_ERR_MASK;
316 umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
317 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700318
Krishna Gudipati11189202011-06-13 15:50:35 -0700319 writel(umsk, bfa->iocfc.bfa_regs.intr_status);
320 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
321 bfa->iocfc.intr_mask = ~umsk;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700322 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
323}
324
325void
326bfa_isr_disable(struct bfa_s *bfa)
327{
328 bfa_isr_mode_set(bfa, BFA_FALSE);
Jing Huang53440262010-10-18 17:12:29 -0700329 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700330 bfa_msix_uninstall(bfa);
331}
332
333void
Krishna Gudipati11189202011-06-13 15:50:35 -0700334bfa_msix_reqq(struct bfa_s *bfa, int vec)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700335{
Krishna Gudipati11189202011-06-13 15:50:35 -0700336 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700337}
338
339void
340bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
341{
342 bfa_trc(bfa, m->mhdr.msg_class);
343 bfa_trc(bfa, m->mhdr.msg_id);
344 bfa_trc(bfa, m->mhdr.mtag.i2htok);
Jing Huangd4b671c2010-12-26 21:46:35 -0800345 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700346 bfa_trc_stop(bfa->trcmod);
347}
348
349void
Krishna Gudipati11189202011-06-13 15:50:35 -0700350bfa_msix_rspq(struct bfa_s *bfa, int vec)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700351{
Krishna Gudipati11189202011-06-13 15:50:35 -0700352 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700353}
354
355void
356bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
357{
358 u32 intr, curr_value;
Krishna Gudipati11189202011-06-13 15:50:35 -0700359 bfa_boolean_t lpu_isr, halt_isr, pss_isr;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700360
Jing Huang53440262010-10-18 17:12:29 -0700361 intr = readl(bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700362
Krishna Gudipati11189202011-06-13 15:50:35 -0700363 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
364 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
365 pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
366 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
367 __HFN_INT_MBOX_LPU1_CT2);
368 intr &= __HFN_INT_ERR_MASK_CT2;
369 } else {
370 halt_isr = intr & __HFN_INT_LL_HALT;
371 pss_isr = intr & __HFN_INT_ERR_PSS;
372 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
373 intr &= __HFN_INT_ERR_MASK;
374 }
375
376 if (lpu_isr)
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800377 bfa_ioc_mbox_isr(&bfa->ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700378
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700379 if (intr) {
Krishna Gudipati11189202011-06-13 15:50:35 -0700380 if (halt_isr) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700381 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700382 * If LL_HALT bit is set then FW Init Halt LL Port
383 * Register needs to be cleared as well so Interrupt
384 * Status Register will be cleared.
385 */
Jing Huang53440262010-10-18 17:12:29 -0700386 curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700387 curr_value &= ~__FW_INIT_HALT_P;
Jing Huang53440262010-10-18 17:12:29 -0700388 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700389 }
390
Krishna Gudipati11189202011-06-13 15:50:35 -0700391 if (pss_isr) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700392 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700393 * ERR_PSS bit needs to be cleared as well in case
394 * interrups are shared so driver's interrupt handler is
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300395 * still called even though it is already masked out.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700396 */
Jing Huang53440262010-10-18 17:12:29 -0700397 curr_value = readl(
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700398 bfa->ioc.ioc_regs.pss_err_status_reg);
Jing Huang53440262010-10-18 17:12:29 -0700399 writel(curr_value,
400 bfa->ioc.ioc_regs.pss_err_status_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700401 }
402
Jing Huang53440262010-10-18 17:12:29 -0700403 writel(intr, bfa->iocfc.bfa_regs.intr_status);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800404 bfa_ioc_error_isr(&bfa->ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700405 }
406}
407
Jing Huang5fbe25c2010-10-18 17:17:23 -0700408/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700409 * BFA IOC FC related functions
410 */
411
Jing Huang5fbe25c2010-10-18 17:17:23 -0700412/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -0800413 * BFA IOC private functions
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700414 */
415
416static void
417bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
418{
419 int i, per_reqq_sz, per_rspq_sz;
420
421 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
422 BFA_DMA_ALIGN_SZ);
423 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
424 BFA_DMA_ALIGN_SZ);
425
426 /*
427 * Calculate CQ size
428 */
429 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
430 *dm_len = *dm_len + per_reqq_sz;
431 *dm_len = *dm_len + per_rspq_sz;
432 }
433
434 /*
435 * Calculate Shadow CI/PI size
436 */
437 for (i = 0; i < cfg->fwcfg.num_cqs; i++)
438 *dm_len += (2 * BFA_CACHELINE_SZ);
439}
440
441static void
442bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
443{
444 *dm_len +=
445 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
446 *dm_len +=
447 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
448 BFA_CACHELINE_SZ);
449}
450
Jing Huang5fbe25c2010-10-18 17:17:23 -0700451/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700452 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
453 */
454static void
455bfa_iocfc_send_cfg(void *bfa_arg)
456{
457 struct bfa_s *bfa = bfa_arg;
458 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
459 struct bfi_iocfc_cfg_req_s cfg_req;
460 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
461 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
462 int i;
463
Jing Huangd4b671c2010-12-26 21:46:35 -0800464 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700465 bfa_trc(bfa, cfg->fwcfg.num_cqs);
466
467 bfa_iocfc_reset_queues(bfa);
468
Jing Huang5fbe25c2010-10-18 17:17:23 -0700469 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700470 * initialize IOC configuration info
471 */
472 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
473 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700474 cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
475 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700476
477 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
Jing Huang5fbe25c2010-10-18 17:17:23 -0700478 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700479 * dma map REQ and RSP circular queues and shadow pointers
480 */
481 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
482 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
483 iocfc->req_cq_ba[i].pa);
484 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
485 iocfc->req_cq_shadow_ci[i].pa);
486 cfg_info->req_cq_elems[i] =
Jing Huangba816ea2010-10-18 17:10:50 -0700487 cpu_to_be16(cfg->drvcfg.num_reqq_elems);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700488
489 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
490 iocfc->rsp_cq_ba[i].pa);
491 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
492 iocfc->rsp_cq_shadow_pi[i].pa);
493 cfg_info->rsp_cq_elems[i] =
Jing Huangba816ea2010-10-18 17:10:50 -0700494 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700495 }
496
Jing Huang5fbe25c2010-10-18 17:17:23 -0700497 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700498 * Enable interrupt coalescing if it is driver init path
499 * and not ioc disable/enable path.
500 */
501 if (!iocfc->cfgdone)
502 cfg_info->intr_attr.coalesce = BFA_TRUE;
503
504 iocfc->cfgdone = BFA_FALSE;
505
Jing Huang5fbe25c2010-10-18 17:17:23 -0700506 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700507 * dma map IOC configuration itself
508 */
509 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
510 bfa_lpuid(bfa));
511 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
512
513 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
514 sizeof(struct bfi_iocfc_cfg_req_s));
515}
516
517static void
518bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
519 struct bfa_pcidev_s *pcidev)
520{
521 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
522
523 bfa->bfad = bfad;
524 iocfc->bfa = bfa;
525 iocfc->action = BFA_IOCFC_ACT_NONE;
526
Jing Huang6a18b162010-10-18 17:08:54 -0700527 iocfc->cfg = *cfg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700528
Jing Huang5fbe25c2010-10-18 17:17:23 -0700529 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700530 * Initialize chip specific handlers.
531 */
Krishna Gudipati11189202011-06-13 15:50:35 -0700532 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700533 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
534 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
535 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
536 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
Krishna Gudipati775c7742011-06-13 15:52:12 -0700537 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
538 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700539 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
540 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
541 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
542 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
Krishna Gudipati11189202011-06-13 15:50:35 -0700543 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
544 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700545 } else {
546 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
547 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
548 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
549 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
Krishna Gudipati775c7742011-06-13 15:52:12 -0700550 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
551 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700552 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
553 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
554 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
555 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
Krishna Gudipati11189202011-06-13 15:50:35 -0700556 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
557 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
558 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
559 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
560 }
561
562 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
563 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
564 iocfc->hwif.hw_isr_mode_set = NULL;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700565 }
566
567 iocfc->hwif.hw_reginit(bfa);
568 bfa->msix.nvecs = 0;
569}
570
571static void
572bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
573 struct bfa_meminfo_s *meminfo)
574{
575 u8 *dm_kva;
576 u64 dm_pa;
577 int i, per_reqq_sz, per_rspq_sz;
578 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
579 int dbgsz;
580
581 dm_kva = bfa_meminfo_dma_virt(meminfo);
582 dm_pa = bfa_meminfo_dma_phys(meminfo);
583
584 /*
585 * First allocate dma memory for IOC.
586 */
587 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800588 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
589 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700590
591 /*
592 * Claim DMA-able memory for the request/response queues and for shadow
593 * ci/pi registers
594 */
595 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
596 BFA_DMA_ALIGN_SZ);
597 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
598 BFA_DMA_ALIGN_SZ);
599
600 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
601 iocfc->req_cq_ba[i].kva = dm_kva;
602 iocfc->req_cq_ba[i].pa = dm_pa;
Jing Huang6a18b162010-10-18 17:08:54 -0700603 memset(dm_kva, 0, per_reqq_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700604 dm_kva += per_reqq_sz;
605 dm_pa += per_reqq_sz;
606
607 iocfc->rsp_cq_ba[i].kva = dm_kva;
608 iocfc->rsp_cq_ba[i].pa = dm_pa;
Jing Huang6a18b162010-10-18 17:08:54 -0700609 memset(dm_kva, 0, per_rspq_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700610 dm_kva += per_rspq_sz;
611 dm_pa += per_rspq_sz;
612 }
613
614 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
615 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
616 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
617 dm_kva += BFA_CACHELINE_SZ;
618 dm_pa += BFA_CACHELINE_SZ;
619
620 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
621 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
622 dm_kva += BFA_CACHELINE_SZ;
623 dm_pa += BFA_CACHELINE_SZ;
624 }
625
626 /*
627 * Claim DMA-able memory for the config info page
628 */
629 bfa->iocfc.cfg_info.kva = dm_kva;
630 bfa->iocfc.cfg_info.pa = dm_pa;
631 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
632 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
633 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
634
635 /*
636 * Claim DMA-able memory for the config response
637 */
638 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
639 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
640 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
641
642 dm_kva +=
643 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
644 BFA_CACHELINE_SZ);
645 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
646 BFA_CACHELINE_SZ);
647
648
649 bfa_meminfo_dma_virt(meminfo) = dm_kva;
650 bfa_meminfo_dma_phys(meminfo) = dm_pa;
651
Maggie Zhangf7f738122010-12-09 19:08:43 -0800652 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700653 if (dbgsz > 0) {
654 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
655 bfa_meminfo_kva(meminfo) += dbgsz;
656 }
657}
658
Jing Huang5fbe25c2010-10-18 17:17:23 -0700659/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700660 * Start BFA submodules.
661 */
662static void
663bfa_iocfc_start_submod(struct bfa_s *bfa)
664{
665 int i;
666
Krishna Gudipati775c7742011-06-13 15:52:12 -0700667 bfa->queue_process = BFA_TRUE;
Krishna Gudipati11189202011-06-13 15:50:35 -0700668 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
669 bfa->iocfc.hwif.hw_rspq_ack(bfa, i);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700670
671 for (i = 0; hal_mods[i]; i++)
672 hal_mods[i]->start(bfa);
673}
674
Jing Huang5fbe25c2010-10-18 17:17:23 -0700675/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700676 * Disable BFA submodules.
677 */
678static void
679bfa_iocfc_disable_submod(struct bfa_s *bfa)
680{
681 int i;
682
683 for (i = 0; hal_mods[i]; i++)
684 hal_mods[i]->iocdisable(bfa);
685}
686
687static void
688bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
689{
690 struct bfa_s *bfa = bfa_arg;
691
692 if (complete) {
693 if (bfa->iocfc.cfgdone)
694 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
695 else
696 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
697 } else {
698 if (bfa->iocfc.cfgdone)
699 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
700 }
701}
702
703static void
704bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
705{
706 struct bfa_s *bfa = bfa_arg;
707 struct bfad_s *bfad = bfa->bfad;
708
709 if (compl)
710 complete(&bfad->comp);
711 else
712 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
713}
714
715static void
716bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
717{
718 struct bfa_s *bfa = bfa_arg;
719 struct bfad_s *bfad = bfa->bfad;
720
721 if (compl)
722 complete(&bfad->disable_comp);
723}
724
Krishna Gudipati11189202011-06-13 15:50:35 -0700725/**
726 * configure queue registers from firmware response
727 */
728static void
729bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
730{
731 int i;
732 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
733 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
734
735 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
736 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
737 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
738 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
739 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
740 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
741 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
742 }
743}
744
Jing Huang5fbe25c2010-10-18 17:17:23 -0700745/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700746 * Update BFA configuration from firmware configuration.
747 */
748static void
749bfa_iocfc_cfgrsp(struct bfa_s *bfa)
750{
751 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
752 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
753 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
754
755 fwcfg->num_cqs = fwcfg->num_cqs;
Jing Huangba816ea2010-10-18 17:10:50 -0700756 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700757 fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
Jing Huangba816ea2010-10-18 17:10:50 -0700758 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
759 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
760 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
761 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700762
763 iocfc->cfgdone = BFA_TRUE;
764
Jing Huang5fbe25c2010-10-18 17:17:23 -0700765 /*
Krishna Gudipati11189202011-06-13 15:50:35 -0700766 * configure queue register offsets as learnt from firmware
767 */
768 bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
769
770 /*
Krishna Gudipati775c7742011-06-13 15:52:12 -0700771 * Install MSIX queue handlers
772 */
773 bfa_msix_queue_install(bfa);
774
775 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700776 * Configuration is complete - initialize/start submodules
777 */
778 bfa_fcport_init(bfa);
779
780 if (iocfc->action == BFA_IOCFC_ACT_INIT)
781 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
782 else
783 bfa_iocfc_start_submod(bfa);
784}
785void
786bfa_iocfc_reset_queues(struct bfa_s *bfa)
787{
788 int q;
789
790 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
791 bfa_reqq_ci(bfa, q) = 0;
792 bfa_reqq_pi(bfa, q) = 0;
793 bfa_rspq_ci(bfa, q) = 0;
794 bfa_rspq_pi(bfa, q) = 0;
795 }
796}
797
Jing Huang5fbe25c2010-10-18 17:17:23 -0700798/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700799 * IOC enable request is complete
800 */
801static void
802bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
803{
804 struct bfa_s *bfa = bfa_arg;
805
806 if (status != BFA_STATUS_OK) {
807 bfa_isr_disable(bfa);
808 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
809 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
810 bfa_iocfc_init_cb, bfa);
811 return;
812 }
813
814 bfa_iocfc_send_cfg(bfa);
815}
816
Jing Huang5fbe25c2010-10-18 17:17:23 -0700817/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700818 * IOC disable request is complete
819 */
820static void
821bfa_iocfc_disable_cbfn(void *bfa_arg)
822{
823 struct bfa_s *bfa = bfa_arg;
824
825 bfa_isr_disable(bfa);
826 bfa_iocfc_disable_submod(bfa);
827
828 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
829 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
830 bfa);
831 else {
Jing Huangd4b671c2010-12-26 21:46:35 -0800832 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700833 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
834 bfa);
835 }
836}
837
Jing Huang5fbe25c2010-10-18 17:17:23 -0700838/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700839 * Notify sub-modules of hardware failure.
840 */
841static void
842bfa_iocfc_hbfail_cbfn(void *bfa_arg)
843{
844 struct bfa_s *bfa = bfa_arg;
845
Krishna Gudipati775c7742011-06-13 15:52:12 -0700846 bfa->queue_process = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700847
848 bfa_isr_disable(bfa);
849 bfa_iocfc_disable_submod(bfa);
850
851 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
852 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
853 bfa);
854}
855
Jing Huang5fbe25c2010-10-18 17:17:23 -0700856/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700857 * Actions on chip-reset completion.
858 */
859static void
860bfa_iocfc_reset_cbfn(void *bfa_arg)
861{
862 struct bfa_s *bfa = bfa_arg;
863
864 bfa_iocfc_reset_queues(bfa);
865 bfa_isr_enable(bfa);
866}
867
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700868
Jing Huang5fbe25c2010-10-18 17:17:23 -0700869/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700870 * Query IOC memory requirement information.
871 */
872void
873bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
874 u32 *dm_len)
875{
876 /* dma memory for IOC */
Maggie Zhangf7f738122010-12-09 19:08:43 -0800877 *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700878
879 bfa_iocfc_fw_cfg_sz(cfg, dm_len);
880 bfa_iocfc_cqs_sz(cfg, dm_len);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800881 *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700882}
883
Jing Huang5fbe25c2010-10-18 17:17:23 -0700884/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700885 * Query IOC memory requirement information.
886 */
887void
888bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
889 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
890{
891 int i;
892 struct bfa_ioc_s *ioc = &bfa->ioc;
893
894 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
895 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
896 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
897 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
898
899 ioc->trcmod = bfa->trcmod;
900 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
901
Jing Huang5fbe25c2010-10-18 17:17:23 -0700902 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700903 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
904 */
905 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
906 bfa_ioc_set_fcmode(&bfa->ioc);
907
Krishna Gudipatid37779f2011-06-13 15:42:10 -0700908 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700909 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
910
911 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
912 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800913 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700914
915 INIT_LIST_HEAD(&bfa->comp_q);
916 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
917 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
918}
919
Jing Huang5fbe25c2010-10-18 17:17:23 -0700920/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700921 * Query IOC memory requirement information.
922 */
923void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700924bfa_iocfc_init(struct bfa_s *bfa)
925{
926 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
927 bfa_ioc_enable(&bfa->ioc);
928}
929
Jing Huang5fbe25c2010-10-18 17:17:23 -0700930/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700931 * IOC start called from bfa_start(). Called to start IOC operations
932 * at driver instantiation for this instance.
933 */
934void
935bfa_iocfc_start(struct bfa_s *bfa)
936{
937 if (bfa->iocfc.cfgdone)
938 bfa_iocfc_start_submod(bfa);
939}
940
Jing Huang5fbe25c2010-10-18 17:17:23 -0700941/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700942 * IOC stop called from bfa_stop(). Called only when driver is unloaded
943 * for this instance.
944 */
945void
946bfa_iocfc_stop(struct bfa_s *bfa)
947{
948 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
949
Krishna Gudipati775c7742011-06-13 15:52:12 -0700950 bfa->queue_process = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700951 bfa_ioc_disable(&bfa->ioc);
952}
953
954void
955bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
956{
957 struct bfa_s *bfa = bfaarg;
958 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
959 union bfi_iocfc_i2h_msg_u *msg;
960
961 msg = (union bfi_iocfc_i2h_msg_u *) m;
962 bfa_trc(bfa, msg->mh.msg_id);
963
964 switch (msg->mh.msg_id) {
965 case BFI_IOCFC_I2H_CFG_REPLY:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700966 bfa_iocfc_cfgrsp(bfa);
967 break;
968 case BFI_IOCFC_I2H_UPDATEQ_RSP:
969 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
970 break;
971 default:
Jing Huangd4b671c2010-12-26 21:46:35 -0800972 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700973 }
974}
975
976void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700977bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
978{
979 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
980
981 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
982
983 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
Jing Huangba816ea2010-10-18 17:10:50 -0700984 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
985 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700986
987 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
Jing Huangba816ea2010-10-18 17:10:50 -0700988 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
989 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700990
991 attr->config = iocfc->cfg;
992}
993
994bfa_status_t
995bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
996{
997 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
998 struct bfi_iocfc_set_intr_req_s *m;
999
1000 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
Jing Huangba816ea2010-10-18 17:10:50 -07001001 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
1002 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001003
1004 if (!bfa_iocfc_is_operational(bfa))
1005 return BFA_STATUS_OK;
1006
1007 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
1008 if (!m)
1009 return BFA_STATUS_DEVBUSY;
1010
1011 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
1012 bfa_lpuid(bfa));
1013 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
1014 m->delay = iocfc->cfginfo->intr_attr.delay;
1015 m->latency = iocfc->cfginfo->intr_attr.latency;
1016
1017 bfa_trc(bfa, attr->delay);
1018 bfa_trc(bfa, attr->latency);
1019
1020 bfa_reqq_produce(bfa, BFA_REQQ_IOC);
1021 return BFA_STATUS_OK;
1022}
1023
1024void
1025bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
1026{
1027 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1028
1029 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
1030 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
1031}
Jing Huang5fbe25c2010-10-18 17:17:23 -07001032/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001033 * Enable IOC after it is disabled.
1034 */
1035void
1036bfa_iocfc_enable(struct bfa_s *bfa)
1037{
1038 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1039 "IOC Enable");
1040 bfa_ioc_enable(&bfa->ioc);
1041}
1042
1043void
1044bfa_iocfc_disable(struct bfa_s *bfa)
1045{
1046 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1047 "IOC Disable");
1048 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
1049
Krishna Gudipati775c7742011-06-13 15:52:12 -07001050 bfa->queue_process = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001051 bfa_ioc_disable(&bfa->ioc);
1052}
1053
1054
1055bfa_boolean_t
1056bfa_iocfc_is_operational(struct bfa_s *bfa)
1057{
1058 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
1059}
1060
Jing Huang5fbe25c2010-10-18 17:17:23 -07001061/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001062 * Return boot target port wwns -- read from boot information in flash.
1063 */
1064void
1065bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
1066{
1067 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1068 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1069 int i;
1070
1071 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
1072 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
1073 *nwwns = cfgrsp->pbc_cfg.nbluns;
1074 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
1075 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
1076
1077 return;
1078 }
1079
1080 *nwwns = cfgrsp->bootwwns.nwwns;
1081 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
1082}
1083
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001084int
1085bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1086{
1087 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1088 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1089
1090 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1091 return cfgrsp->pbc_cfg.nvports;
1092}
1093
Jing Huang7725ccf2009-09-23 17:46:15 -07001094
Jing Huang5fbe25c2010-10-18 17:17:23 -07001095/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001096 * Use this function query the memory requirement of the BFA library.
1097 * This function needs to be called before bfa_attach() to get the
1098 * memory required of the BFA layer for a given driver configuration.
1099 *
1100 * This call will fail, if the cap is out of range compared to pre-defined
1101 * values within the BFA library
1102 *
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001103 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1104 * its configuration in this structure.
Jing Huang7725ccf2009-09-23 17:46:15 -07001105 * The default values for struct bfa_iocfc_cfg_s can be
1106 * fetched using bfa_cfg_get_default() API.
1107 *
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001108 * If cap's boundary check fails, the library will use
Jing Huang7725ccf2009-09-23 17:46:15 -07001109 * the default bfa_cap_t values (and log a warning msg).
1110 *
1111 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001112 * indicates the memory type (see bfa_mem_type_t) and
Jing Huang7725ccf2009-09-23 17:46:15 -07001113 * amount of memory required.
1114 *
1115 * Driver should allocate the memory, populate the
1116 * starting address for each block and provide the same
1117 * structure as input parameter to bfa_attach() call.
1118 *
1119 * @return void
1120 *
1121 * Special Considerations: @note
1122 */
1123void
1124bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
1125{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001126 int i;
1127 u32 km_len = 0, dm_len = 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07001128
Jing Huangd4b671c2010-12-26 21:46:35 -08001129 WARN_ON((cfg == NULL) || (meminfo == NULL));
Jing Huang7725ccf2009-09-23 17:46:15 -07001130
Jing Huang6a18b162010-10-18 17:08:54 -07001131 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
Jing Huang7725ccf2009-09-23 17:46:15 -07001132 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
1133 BFA_MEM_TYPE_KVA;
1134 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
1135 BFA_MEM_TYPE_DMA;
1136
1137 bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
1138
1139 for (i = 0; hal_mods[i]; i++)
1140 hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
1141
Krishna Gudipati7873ca42010-05-21 14:39:45 -07001142 dm_len += bfa_port_meminfo();
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07001143 dm_len += bfa_ablk_meminfo();
Jing Huang7725ccf2009-09-23 17:46:15 -07001144
1145 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
1146 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
1147}
1148
Jing Huang5fbe25c2010-10-18 17:17:23 -07001149/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001150 * Use this function to do attach the driver instance with the BFA
1151 * library. This function will not trigger any HW initialization
1152 * process (which will be done in bfa_init() call)
1153 *
1154 * This call will fail, if the cap is out of range compared to
1155 * pre-defined values within the BFA library
1156 *
1157 * @param[out] bfa Pointer to bfa_t.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001158 * @param[in] bfad Opaque handle back to the driver's IOC structure
Jing Huang7725ccf2009-09-23 17:46:15 -07001159 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001160 * that was used in bfa_cfg_get_meminfo().
1161 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1162 * use the bfa_cfg_get_meminfo() call to
1163 * find the memory blocks required, allocate the
1164 * required memory and provide the starting addresses.
1165 * @param[in] pcidev pointer to struct bfa_pcidev_s
Jing Huang7725ccf2009-09-23 17:46:15 -07001166 *
1167 * @return
1168 * void
1169 *
1170 * Special Considerations:
1171 *
1172 * @note
1173 *
1174 */
1175void
1176bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1177 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1178{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001179 int i;
1180 struct bfa_mem_elem_s *melem;
Jing Huang7725ccf2009-09-23 17:46:15 -07001181
1182 bfa->fcs = BFA_FALSE;
1183
Jing Huangd4b671c2010-12-26 21:46:35 -08001184 WARN_ON((cfg == NULL) || (meminfo == NULL));
Jing Huang7725ccf2009-09-23 17:46:15 -07001185
Jing Huang5fbe25c2010-10-18 17:17:23 -07001186 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001187 * initialize all memory pointers for iterative allocation
1188 */
1189 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
1190 melem = meminfo->meminfo + i;
1191 melem->kva_curp = melem->kva;
1192 melem->dma_curp = melem->dma;
1193 }
1194
1195 bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
1196
1197 for (i = 0; hal_mods[i]; i++)
1198 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
1199
Krishna Gudipati7873ca42010-05-21 14:39:45 -07001200 bfa_com_port_attach(bfa, meminfo);
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07001201 bfa_com_ablk_attach(bfa, meminfo);
Jing Huang7725ccf2009-09-23 17:46:15 -07001202}
1203
Jing Huang5fbe25c2010-10-18 17:17:23 -07001204/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001205 * Use this function to delete a BFA IOC. IOC should be stopped (by
1206 * calling bfa_stop()) before this function call.
1207 *
1208 * @param[in] bfa - pointer to bfa_t.
1209 *
1210 * @return
1211 * void
1212 *
1213 * Special Considerations:
1214 *
1215 * @note
1216 */
1217void
1218bfa_detach(struct bfa_s *bfa)
1219{
1220 int i;
1221
1222 for (i = 0; hal_mods[i]; i++)
1223 hal_mods[i]->detach(bfa);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001224 bfa_ioc_detach(&bfa->ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001225}
1226
1227void
1228bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1229{
1230 INIT_LIST_HEAD(comp_q);
1231 list_splice_tail_init(&bfa->comp_q, comp_q);
1232}
1233
1234void
1235bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1236{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001237 struct list_head *qe;
1238 struct list_head *qen;
1239 struct bfa_cb_qe_s *hcb_qe;
Jing Huang7725ccf2009-09-23 17:46:15 -07001240
1241 list_for_each_safe(qe, qen, comp_q) {
1242 hcb_qe = (struct bfa_cb_qe_s *) qe;
1243 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1244 }
1245}
1246
1247void
1248bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1249{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001250 struct list_head *qe;
1251 struct bfa_cb_qe_s *hcb_qe;
Jing Huang7725ccf2009-09-23 17:46:15 -07001252
1253 while (!list_empty(comp_q)) {
1254 bfa_q_deq(comp_q, &qe);
1255 hcb_qe = (struct bfa_cb_qe_s *) qe;
1256 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1257 }
1258}
1259
Jing Huang7725ccf2009-09-23 17:46:15 -07001260
Jing Huang5fbe25c2010-10-18 17:17:23 -07001261/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001262 * Return the list of PCI vendor/device id lists supported by this
1263 * BFA instance.
1264 */
1265void
1266bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1267{
1268 static struct bfa_pciid_s __pciids[] = {
1269 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1270 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1271 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
Jing Huang293f82d2010-07-08 19:45:20 -07001272 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
Jing Huang7725ccf2009-09-23 17:46:15 -07001273 };
1274
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001275 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
Jing Huang7725ccf2009-09-23 17:46:15 -07001276 *pciids = __pciids;
1277}
1278
Jing Huang5fbe25c2010-10-18 17:17:23 -07001279/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001280 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1281 * into BFA layer). The OS driver can then turn back and overwrite entries that
1282 * have been configured by the user.
1283 *
1284 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1285 *
1286 * @return
1287 * void
1288 *
1289 * Special Considerations:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001290 * note
Jing Huang7725ccf2009-09-23 17:46:15 -07001291 */
1292void
1293bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1294{
1295 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1296 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1297 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1298 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1299 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1300 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1301 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1302 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001303 cfg->fwcfg.num_fwtio_reqs = 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07001304
1305 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1306 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1307 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1308 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1309 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1310 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1311 cfg->drvcfg.ioc_recover = BFA_FALSE;
1312 cfg->drvcfg.delay_comp = BFA_FALSE;
1313
1314}
1315
1316void
1317bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1318{
1319 bfa_cfg_get_default(cfg);
1320 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
1321 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
1322 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
1323 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
1324 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001325 cfg->fwcfg.num_fwtio_reqs = 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07001326
1327 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
1328 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
1329 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001330 cfg->drvcfg.min_cfg = BFA_TRUE;
Jing Huang7725ccf2009-09-23 17:46:15 -07001331}