blob: 4befbf9fd88848ab8f92145aa1458574626cd1c1 [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Jing Huang7725ccf2009-09-23 17:46:15 -07003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070019#include "bfa_modules.h"
Krishna Gudipati11189202011-06-13 15:50:35 -070020#include "bfi_reg.h"
Jing Huang7725ccf2009-09-23 17:46:15 -070021
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070022BFA_TRC_FILE(HAL, CORE);
Jing Huang7725ccf2009-09-23 17:46:15 -070023
Jing Huang5fbe25c2010-10-18 17:17:23 -070024/*
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080025 * BFA module list terminated by NULL
26 */
27static struct bfa_module_s *hal_mods[] = {
28 &hal_mod_sgpg,
29 &hal_mod_fcport,
30 &hal_mod_fcxp,
31 &hal_mod_lps,
32 &hal_mod_uf,
33 &hal_mod_rport,
Krishna Gudipatie2187d72011-06-13 15:53:58 -070034 &hal_mod_fcp,
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080035 NULL
36};
37
38/*
39 * Message handlers for various modules.
40 */
41static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
42 bfa_isr_unhandled, /* NONE */
43 bfa_isr_unhandled, /* BFI_MC_IOC */
44 bfa_isr_unhandled, /* BFI_MC_DIAG */
45 bfa_isr_unhandled, /* BFI_MC_FLASH */
46 bfa_isr_unhandled, /* BFI_MC_CEE */
47 bfa_fcport_isr, /* BFI_MC_FCPORT */
48 bfa_isr_unhandled, /* BFI_MC_IOCFC */
49 bfa_isr_unhandled, /* BFI_MC_LL */
50 bfa_uf_isr, /* BFI_MC_UF */
51 bfa_fcxp_isr, /* BFI_MC_FCXP */
52 bfa_lps_isr, /* BFI_MC_LPS */
53 bfa_rport_isr, /* BFI_MC_RPORT */
Krishna Gudipatie2187d72011-06-13 15:53:58 -070054 bfa_itn_isr, /* BFI_MC_ITN */
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080055 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
58 bfa_ioim_isr, /* BFI_MC_IOIM */
59 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
60 bfa_tskim_isr, /* BFI_MC_TSKIM */
61 bfa_isr_unhandled, /* BFI_MC_SBOOT */
62 bfa_isr_unhandled, /* BFI_MC_IPFC */
63 bfa_isr_unhandled, /* BFI_MC_PORT */
64 bfa_isr_unhandled, /* --------- */
65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */
70 bfa_isr_unhandled, /* --------- */
71 bfa_isr_unhandled, /* --------- */
72 bfa_isr_unhandled, /* --------- */
73 bfa_isr_unhandled, /* --------- */
74};
75/*
76 * Message handlers for mailbox command classes
77 */
78static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
79 NULL,
80 NULL, /* BFI_MC_IOC */
81 NULL, /* BFI_MC_DIAG */
82 NULL, /* BFI_MC_FLASH */
83 NULL, /* BFI_MC_CEE */
84 NULL, /* BFI_MC_PORT */
85 bfa_iocfc_isr, /* BFI_MC_IOCFC */
86 NULL,
87};
88
89
90
91static void
92bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
93{
94 struct bfa_port_s *port = &bfa->modules.port;
95 u32 dm_len;
96 u8 *dm_kva;
97 u64 dm_pa;
98
99 dm_len = bfa_port_meminfo();
100 dm_kva = bfa_meminfo_dma_virt(mi);
101 dm_pa = bfa_meminfo_dma_phys(mi);
102
103 memset(port, 0, sizeof(struct bfa_port_s));
104 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
105 bfa_port_mem_claim(port, dm_kva, dm_pa);
106
107 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
108 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
109}
110
111/*
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -0700112 * ablk module attach
113 */
114static void
115bfa_com_ablk_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
116{
117 struct bfa_ablk_s *ablk = &bfa->modules.ablk;
118 u32 dm_len;
119 u8 *dm_kva;
120 u64 dm_pa;
121
122 dm_len = bfa_ablk_meminfo();
123 dm_kva = bfa_meminfo_dma_virt(mi);
124 dm_pa = bfa_meminfo_dma_phys(mi);
125
126 memset(ablk, 0, sizeof(struct bfa_ablk_s));
127 bfa_ablk_attach(ablk, &bfa->ioc);
128 bfa_ablk_memclaim(ablk, dm_kva, dm_pa);
129
130 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
131 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
132}
133
134/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700135 * BFA IOC FC related definitions
136 */
137
Jing Huang5fbe25c2010-10-18 17:17:23 -0700138/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700139 * IOC local definitions
140 */
141#define BFA_IOCFC_TOV 5000 /* msecs */
142
143enum {
144 BFA_IOCFC_ACT_NONE = 0,
145 BFA_IOCFC_ACT_INIT = 1,
146 BFA_IOCFC_ACT_STOP = 2,
147 BFA_IOCFC_ACT_DISABLE = 3,
148};
149
150#define DEF_CFG_NUM_FABRICS 1
151#define DEF_CFG_NUM_LPORTS 256
152#define DEF_CFG_NUM_CQS 4
153#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
154#define DEF_CFG_NUM_TSKIM_REQS 128
155#define DEF_CFG_NUM_FCXP_REQS 64
156#define DEF_CFG_NUM_UF_BUFS 64
157#define DEF_CFG_NUM_RPORTS 1024
158#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
159#define DEF_CFG_NUM_TINS 256
160
161#define DEF_CFG_NUM_SGPGS 2048
162#define DEF_CFG_NUM_REQQ_ELEMS 256
163#define DEF_CFG_NUM_RSPQ_ELEMS 64
164#define DEF_CFG_NUM_SBOOT_TGTS 16
165#define DEF_CFG_NUM_SBOOT_LUNS 16
166
Jing Huang5fbe25c2010-10-18 17:17:23 -0700167/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700168 * forward declaration for IOC FC functions
169 */
170static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
171static void bfa_iocfc_disable_cbfn(void *bfa_arg);
172static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
173static void bfa_iocfc_reset_cbfn(void *bfa_arg);
174static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
175
Jing Huang5fbe25c2010-10-18 17:17:23 -0700176/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700177 * BFA Interrupt handling functions
178 */
179static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700180bfa_reqq_resume(struct bfa_s *bfa, int qid)
181{
182 struct list_head *waitq, *qe, *qen;
183 struct bfa_reqq_wait_s *wqe;
184
185 waitq = bfa_reqq(bfa, qid);
186 list_for_each_safe(qe, qen, waitq) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700187 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700188 * Callback only as long as there is room in request queue
189 */
190 if (bfa_reqq_full(bfa, qid))
191 break;
192
193 list_del(qe);
194 wqe = (struct bfa_reqq_wait_s *) qe;
195 wqe->qresume(wqe->cbarg);
196 }
197}
198
Krishna Gudipati11189202011-06-13 15:50:35 -0700199static inline void
200bfa_isr_rspq(struct bfa_s *bfa, int qid)
201{
202 struct bfi_msg_s *m;
203 u32 pi, ci;
204 struct list_head *waitq;
205
206 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
207
208 ci = bfa_rspq_ci(bfa, qid);
209 pi = bfa_rspq_pi(bfa, qid);
210
211 while (ci != pi) {
212 m = bfa_rspq_elem(bfa, qid, ci);
213 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
214
215 bfa_isrs[m->mhdr.msg_class] (bfa, m);
216 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
217 }
218
219 /*
220 * update CI
221 */
222 bfa_rspq_ci(bfa, qid) = pi;
223 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
224 mmiowb();
225
226 /*
227 * Resume any pending requests in the corresponding reqq.
228 */
229 waitq = bfa_reqq(bfa, qid);
230 if (!list_empty(waitq))
231 bfa_reqq_resume(bfa, qid);
232}
233
234static inline void
235bfa_isr_reqq(struct bfa_s *bfa, int qid)
236{
237 struct list_head *waitq;
238
239 qid &= (BFI_IOC_MAX_CQS - 1);
240
241 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
242
243 /*
244 * Resume any pending requests in the corresponding reqq.
245 */
246 waitq = bfa_reqq(bfa, qid);
247 if (!list_empty(waitq))
248 bfa_reqq_resume(bfa, qid);
249}
250
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700251void
252bfa_msix_all(struct bfa_s *bfa, int vec)
253{
254 bfa_intx(bfa);
255}
256
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700257bfa_boolean_t
258bfa_intx(struct bfa_s *bfa)
259{
260 u32 intr, qintr;
261 int queue;
262
Jing Huang53440262010-10-18 17:12:29 -0700263 intr = readl(bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700264 if (!intr)
265 return BFA_FALSE;
266
Jing Huang5fbe25c2010-10-18 17:17:23 -0700267 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700268 * RME completion queue interrupt
269 */
270 qintr = intr & __HFN_INT_RME_MASK;
Jing Huang53440262010-10-18 17:12:29 -0700271 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700272
273 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
Krishna Gudipati775c7742011-06-13 15:52:12 -0700274 if ((intr & (__HFN_INT_RME_Q0 << queue)) && bfa->queue_process)
Krishna Gudipati11189202011-06-13 15:50:35 -0700275 bfa_isr_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700276 }
277 intr &= ~qintr;
278 if (!intr)
279 return BFA_TRUE;
280
Jing Huang5fbe25c2010-10-18 17:17:23 -0700281 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700282 * CPE completion queue interrupt
283 */
284 qintr = intr & __HFN_INT_CPE_MASK;
Jing Huang53440262010-10-18 17:12:29 -0700285 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700286
287 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
Krishna Gudipati775c7742011-06-13 15:52:12 -0700288 if ((intr & (__HFN_INT_CPE_Q0 << queue)) && bfa->queue_process)
Krishna Gudipati11189202011-06-13 15:50:35 -0700289 bfa_isr_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700290 }
291 intr &= ~qintr;
292 if (!intr)
293 return BFA_TRUE;
294
295 bfa_msix_lpu_err(bfa, intr);
296
297 return BFA_TRUE;
298}
299
300void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700301bfa_isr_enable(struct bfa_s *bfa)
302{
Krishna Gudipati11189202011-06-13 15:50:35 -0700303 u32 umsk;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700304 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
305
306 bfa_trc(bfa, pci_func);
307
Krishna Gudipati775c7742011-06-13 15:52:12 -0700308 bfa_msix_ctrl_install(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700309
Krishna Gudipati11189202011-06-13 15:50:35 -0700310 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
311 umsk = __HFN_INT_ERR_MASK_CT2;
312 umsk |= pci_func == 0 ?
313 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
314 } else {
315 umsk = __HFN_INT_ERR_MASK;
316 umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
317 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700318
Krishna Gudipati11189202011-06-13 15:50:35 -0700319 writel(umsk, bfa->iocfc.bfa_regs.intr_status);
320 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
321 bfa->iocfc.intr_mask = ~umsk;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700322 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
323}
324
325void
326bfa_isr_disable(struct bfa_s *bfa)
327{
328 bfa_isr_mode_set(bfa, BFA_FALSE);
Jing Huang53440262010-10-18 17:12:29 -0700329 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700330 bfa_msix_uninstall(bfa);
331}
332
333void
Krishna Gudipati11189202011-06-13 15:50:35 -0700334bfa_msix_reqq(struct bfa_s *bfa, int vec)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700335{
Krishna Gudipati11189202011-06-13 15:50:35 -0700336 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700337}
338
339void
340bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
341{
342 bfa_trc(bfa, m->mhdr.msg_class);
343 bfa_trc(bfa, m->mhdr.msg_id);
344 bfa_trc(bfa, m->mhdr.mtag.i2htok);
Jing Huangd4b671c2010-12-26 21:46:35 -0800345 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700346 bfa_trc_stop(bfa->trcmod);
347}
348
349void
Krishna Gudipati11189202011-06-13 15:50:35 -0700350bfa_msix_rspq(struct bfa_s *bfa, int vec)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700351{
Krishna Gudipati11189202011-06-13 15:50:35 -0700352 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700353}
354
355void
356bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
357{
358 u32 intr, curr_value;
Krishna Gudipati11189202011-06-13 15:50:35 -0700359 bfa_boolean_t lpu_isr, halt_isr, pss_isr;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700360
Jing Huang53440262010-10-18 17:12:29 -0700361 intr = readl(bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700362
Krishna Gudipati11189202011-06-13 15:50:35 -0700363 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
364 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
365 pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
366 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
367 __HFN_INT_MBOX_LPU1_CT2);
368 intr &= __HFN_INT_ERR_MASK_CT2;
369 } else {
370 halt_isr = intr & __HFN_INT_LL_HALT;
371 pss_isr = intr & __HFN_INT_ERR_PSS;
372 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
373 intr &= __HFN_INT_ERR_MASK;
374 }
375
376 if (lpu_isr)
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800377 bfa_ioc_mbox_isr(&bfa->ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700378
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700379 if (intr) {
Krishna Gudipati11189202011-06-13 15:50:35 -0700380 if (halt_isr) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700381 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700382 * If LL_HALT bit is set then FW Init Halt LL Port
383 * Register needs to be cleared as well so Interrupt
384 * Status Register will be cleared.
385 */
Jing Huang53440262010-10-18 17:12:29 -0700386 curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700387 curr_value &= ~__FW_INIT_HALT_P;
Jing Huang53440262010-10-18 17:12:29 -0700388 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700389 }
390
Krishna Gudipati11189202011-06-13 15:50:35 -0700391 if (pss_isr) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700392 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700393 * ERR_PSS bit needs to be cleared as well in case
394 * interrups are shared so driver's interrupt handler is
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300395 * still called even though it is already masked out.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700396 */
Jing Huang53440262010-10-18 17:12:29 -0700397 curr_value = readl(
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700398 bfa->ioc.ioc_regs.pss_err_status_reg);
Jing Huang53440262010-10-18 17:12:29 -0700399 writel(curr_value,
400 bfa->ioc.ioc_regs.pss_err_status_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700401 }
402
Jing Huang53440262010-10-18 17:12:29 -0700403 writel(intr, bfa->iocfc.bfa_regs.intr_status);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800404 bfa_ioc_error_isr(&bfa->ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700405 }
406}
407
Jing Huang5fbe25c2010-10-18 17:17:23 -0700408/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700409 * BFA IOC FC related functions
410 */
411
Jing Huang5fbe25c2010-10-18 17:17:23 -0700412/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -0800413 * BFA IOC private functions
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700414 */
415
416static void
417bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
418{
419 int i, per_reqq_sz, per_rspq_sz;
420
421 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
422 BFA_DMA_ALIGN_SZ);
423 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
424 BFA_DMA_ALIGN_SZ);
425
426 /*
427 * Calculate CQ size
428 */
429 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
430 *dm_len = *dm_len + per_reqq_sz;
431 *dm_len = *dm_len + per_rspq_sz;
432 }
433
434 /*
435 * Calculate Shadow CI/PI size
436 */
437 for (i = 0; i < cfg->fwcfg.num_cqs; i++)
438 *dm_len += (2 * BFA_CACHELINE_SZ);
439}
440
441static void
442bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
443{
444 *dm_len +=
445 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
446 *dm_len +=
447 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
448 BFA_CACHELINE_SZ);
449}
450
Jing Huang5fbe25c2010-10-18 17:17:23 -0700451/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700452 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
453 */
454static void
455bfa_iocfc_send_cfg(void *bfa_arg)
456{
457 struct bfa_s *bfa = bfa_arg;
458 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
459 struct bfi_iocfc_cfg_req_s cfg_req;
460 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
461 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
462 int i;
463
Jing Huangd4b671c2010-12-26 21:46:35 -0800464 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700465 bfa_trc(bfa, cfg->fwcfg.num_cqs);
466
467 bfa_iocfc_reset_queues(bfa);
468
Jing Huang5fbe25c2010-10-18 17:17:23 -0700469 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700470 * initialize IOC configuration info
471 */
472 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
473 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700474 cfg_info->num_ioim_reqs = cpu_to_be16(cfg->fwcfg.num_ioim_reqs);
475 cfg_info->num_fwtio_reqs = cpu_to_be16(cfg->fwcfg.num_fwtio_reqs);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700476
477 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
Jing Huang5fbe25c2010-10-18 17:17:23 -0700478 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700479 * dma map REQ and RSP circular queues and shadow pointers
480 */
481 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
482 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
483 iocfc->req_cq_ba[i].pa);
484 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
485 iocfc->req_cq_shadow_ci[i].pa);
486 cfg_info->req_cq_elems[i] =
Jing Huangba816ea2010-10-18 17:10:50 -0700487 cpu_to_be16(cfg->drvcfg.num_reqq_elems);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700488
489 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
490 iocfc->rsp_cq_ba[i].pa);
491 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
492 iocfc->rsp_cq_shadow_pi[i].pa);
493 cfg_info->rsp_cq_elems[i] =
Jing Huangba816ea2010-10-18 17:10:50 -0700494 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700495 }
496
Jing Huang5fbe25c2010-10-18 17:17:23 -0700497 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700498 * Enable interrupt coalescing if it is driver init path
499 * and not ioc disable/enable path.
500 */
501 if (!iocfc->cfgdone)
502 cfg_info->intr_attr.coalesce = BFA_TRUE;
503
504 iocfc->cfgdone = BFA_FALSE;
505
Jing Huang5fbe25c2010-10-18 17:17:23 -0700506 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700507 * dma map IOC configuration itself
508 */
509 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
510 bfa_lpuid(bfa));
511 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
512
513 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
514 sizeof(struct bfi_iocfc_cfg_req_s));
515}
516
517static void
518bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
519 struct bfa_pcidev_s *pcidev)
520{
521 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
522
523 bfa->bfad = bfad;
524 iocfc->bfa = bfa;
525 iocfc->action = BFA_IOCFC_ACT_NONE;
526
Jing Huang6a18b162010-10-18 17:08:54 -0700527 iocfc->cfg = *cfg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700528
Jing Huang5fbe25c2010-10-18 17:17:23 -0700529 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700530 * Initialize chip specific handlers.
531 */
Krishna Gudipati11189202011-06-13 15:50:35 -0700532 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700533 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
534 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
535 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
536 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
Krishna Gudipati775c7742011-06-13 15:52:12 -0700537 iocfc->hwif.hw_msix_ctrl_install = bfa_hwct_msix_ctrl_install;
538 iocfc->hwif.hw_msix_queue_install = bfa_hwct_msix_queue_install;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700539 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
540 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
541 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
542 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
Krishna Gudipati11189202011-06-13 15:50:35 -0700543 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
544 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700545 } else {
546 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
547 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
548 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
549 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
Krishna Gudipati775c7742011-06-13 15:52:12 -0700550 iocfc->hwif.hw_msix_ctrl_install = bfa_hwcb_msix_ctrl_install;
551 iocfc->hwif.hw_msix_queue_install = bfa_hwcb_msix_queue_install;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700552 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
553 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
554 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
555 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
Krishna Gudipati11189202011-06-13 15:50:35 -0700556 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
557 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
558 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
559 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
560 }
561
562 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
563 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
564 iocfc->hwif.hw_isr_mode_set = NULL;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700565 }
566
567 iocfc->hwif.hw_reginit(bfa);
568 bfa->msix.nvecs = 0;
569}
570
571static void
572bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
573 struct bfa_meminfo_s *meminfo)
574{
575 u8 *dm_kva;
576 u64 dm_pa;
577 int i, per_reqq_sz, per_rspq_sz;
578 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
579 int dbgsz;
580
581 dm_kva = bfa_meminfo_dma_virt(meminfo);
582 dm_pa = bfa_meminfo_dma_phys(meminfo);
583
584 /*
585 * First allocate dma memory for IOC.
586 */
587 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800588 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
589 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700590
591 /*
592 * Claim DMA-able memory for the request/response queues and for shadow
593 * ci/pi registers
594 */
595 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
596 BFA_DMA_ALIGN_SZ);
597 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
598 BFA_DMA_ALIGN_SZ);
599
600 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
601 iocfc->req_cq_ba[i].kva = dm_kva;
602 iocfc->req_cq_ba[i].pa = dm_pa;
Jing Huang6a18b162010-10-18 17:08:54 -0700603 memset(dm_kva, 0, per_reqq_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700604 dm_kva += per_reqq_sz;
605 dm_pa += per_reqq_sz;
606
607 iocfc->rsp_cq_ba[i].kva = dm_kva;
608 iocfc->rsp_cq_ba[i].pa = dm_pa;
Jing Huang6a18b162010-10-18 17:08:54 -0700609 memset(dm_kva, 0, per_rspq_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700610 dm_kva += per_rspq_sz;
611 dm_pa += per_rspq_sz;
612 }
613
614 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
615 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
616 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
617 dm_kva += BFA_CACHELINE_SZ;
618 dm_pa += BFA_CACHELINE_SZ;
619
620 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
621 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
622 dm_kva += BFA_CACHELINE_SZ;
623 dm_pa += BFA_CACHELINE_SZ;
624 }
625
626 /*
627 * Claim DMA-able memory for the config info page
628 */
629 bfa->iocfc.cfg_info.kva = dm_kva;
630 bfa->iocfc.cfg_info.pa = dm_pa;
631 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
632 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
633 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
634
635 /*
636 * Claim DMA-able memory for the config response
637 */
638 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
639 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
640 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
641
642 dm_kva +=
643 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
644 BFA_CACHELINE_SZ);
645 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
646 BFA_CACHELINE_SZ);
647
648
649 bfa_meminfo_dma_virt(meminfo) = dm_kva;
650 bfa_meminfo_dma_phys(meminfo) = dm_pa;
651
Maggie Zhangf7f738122010-12-09 19:08:43 -0800652 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700653 if (dbgsz > 0) {
654 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
655 bfa_meminfo_kva(meminfo) += dbgsz;
656 }
657}
658
Jing Huang5fbe25c2010-10-18 17:17:23 -0700659/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700660 * Start BFA submodules.
661 */
662static void
663bfa_iocfc_start_submod(struct bfa_s *bfa)
664{
665 int i;
666
Krishna Gudipati775c7742011-06-13 15:52:12 -0700667 bfa->queue_process = BFA_TRUE;
Krishna Gudipati11189202011-06-13 15:50:35 -0700668 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
669 bfa->iocfc.hwif.hw_rspq_ack(bfa, i);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700670
671 for (i = 0; hal_mods[i]; i++)
672 hal_mods[i]->start(bfa);
673}
674
Jing Huang5fbe25c2010-10-18 17:17:23 -0700675/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700676 * Disable BFA submodules.
677 */
678static void
679bfa_iocfc_disable_submod(struct bfa_s *bfa)
680{
681 int i;
682
683 for (i = 0; hal_mods[i]; i++)
684 hal_mods[i]->iocdisable(bfa);
685}
686
687static void
688bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
689{
690 struct bfa_s *bfa = bfa_arg;
691
692 if (complete) {
693 if (bfa->iocfc.cfgdone)
694 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
695 else
696 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
697 } else {
698 if (bfa->iocfc.cfgdone)
699 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
700 }
701}
702
703static void
704bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
705{
706 struct bfa_s *bfa = bfa_arg;
707 struct bfad_s *bfad = bfa->bfad;
708
709 if (compl)
710 complete(&bfad->comp);
711 else
712 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
713}
714
715static void
716bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
717{
718 struct bfa_s *bfa = bfa_arg;
719 struct bfad_s *bfad = bfa->bfad;
720
721 if (compl)
722 complete(&bfad->disable_comp);
723}
724
Krishna Gudipati11189202011-06-13 15:50:35 -0700725/**
726 * configure queue registers from firmware response
727 */
728static void
729bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
730{
731 int i;
732 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
733 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
734
735 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
736 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
737 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
738 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
739 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
740 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
741 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
742 }
743}
744
Jing Huang5fbe25c2010-10-18 17:17:23 -0700745/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700746 * Update BFA configuration from firmware configuration.
747 */
748static void
749bfa_iocfc_cfgrsp(struct bfa_s *bfa)
750{
751 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
752 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
753 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
754
755 fwcfg->num_cqs = fwcfg->num_cqs;
Jing Huangba816ea2010-10-18 17:10:50 -0700756 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
Krishna Gudipatie2187d72011-06-13 15:53:58 -0700757 fwcfg->num_fwtio_reqs = be16_to_cpu(fwcfg->num_fwtio_reqs);
Jing Huangba816ea2010-10-18 17:10:50 -0700758 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
759 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
760 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
761 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700762
763 iocfc->cfgdone = BFA_TRUE;
764
Jing Huang5fbe25c2010-10-18 17:17:23 -0700765 /*
Krishna Gudipati11189202011-06-13 15:50:35 -0700766 * configure queue register offsets as learnt from firmware
767 */
768 bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
769
770 /*
Krishna Gudipati775c7742011-06-13 15:52:12 -0700771 * Install MSIX queue handlers
772 */
773 bfa_msix_queue_install(bfa);
774
775 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700776 * Configuration is complete - initialize/start submodules
777 */
778 bfa_fcport_init(bfa);
779
780 if (iocfc->action == BFA_IOCFC_ACT_INIT)
781 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
782 else
783 bfa_iocfc_start_submod(bfa);
784}
785void
786bfa_iocfc_reset_queues(struct bfa_s *bfa)
787{
788 int q;
789
790 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
791 bfa_reqq_ci(bfa, q) = 0;
792 bfa_reqq_pi(bfa, q) = 0;
793 bfa_rspq_ci(bfa, q) = 0;
794 bfa_rspq_pi(bfa, q) = 0;
795 }
796}
797
Krishna Gudipatia7141342011-06-24 20:23:19 -0700798/* Fabric Assigned Address specific functions */
799
800/*
801 * Check whether IOC is ready before sending command down
802 */
803static bfa_status_t
804bfa_faa_validate_request(struct bfa_s *bfa)
805{
806 enum bfa_ioc_type_e ioc_type = bfa_get_type(bfa);
807 u32 card_type = bfa->ioc.attr->card_type;
808
809 if (bfa_ioc_is_operational(&bfa->ioc)) {
810 if ((ioc_type != BFA_IOC_TYPE_FC) || bfa_mfg_is_mezz(card_type))
811 return BFA_STATUS_FEATURE_NOT_SUPPORTED;
812 } else {
813 if (!bfa_ioc_is_acq_addr(&bfa->ioc))
814 return BFA_STATUS_IOC_NON_OP;
815 }
816
817 return BFA_STATUS_OK;
818}
819
820bfa_status_t
821bfa_faa_enable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn, void *cbarg)
822{
823 struct bfi_faa_en_dis_s faa_enable_req;
824 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
825 bfa_status_t status;
826
827 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
828 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
829
830 status = bfa_faa_validate_request(bfa);
831 if (status != BFA_STATUS_OK)
832 return status;
833
834 if (iocfc->faa_args.busy == BFA_TRUE)
835 return BFA_STATUS_DEVBUSY;
836
837 if (iocfc->faa_args.faa_state == BFA_FAA_ENABLED)
838 return BFA_STATUS_FAA_ENABLED;
839
840 if (bfa_fcport_is_trunk_enabled(bfa))
841 return BFA_STATUS_ERROR_TRUNK_ENABLED;
842
843 bfa_fcport_cfg_faa(bfa, BFA_FAA_ENABLED);
844 iocfc->faa_args.busy = BFA_TRUE;
845
846 memset(&faa_enable_req, 0, sizeof(struct bfi_faa_en_dis_s));
847 bfi_h2i_set(faa_enable_req.mh, BFI_MC_IOCFC,
848 BFI_IOCFC_H2I_FAA_ENABLE_REQ, bfa_lpuid(bfa));
849
850 bfa_ioc_mbox_send(&bfa->ioc, &faa_enable_req,
851 sizeof(struct bfi_faa_en_dis_s));
852
853 return BFA_STATUS_OK;
854}
855
856bfa_status_t
857bfa_faa_disable(struct bfa_s *bfa, bfa_cb_iocfc_t cbfn,
858 void *cbarg)
859{
860 struct bfi_faa_en_dis_s faa_disable_req;
861 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
862 bfa_status_t status;
863
864 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
865 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
866
867 status = bfa_faa_validate_request(bfa);
868 if (status != BFA_STATUS_OK)
869 return status;
870
871 if (iocfc->faa_args.busy == BFA_TRUE)
872 return BFA_STATUS_DEVBUSY;
873
874 if (iocfc->faa_args.faa_state == BFA_FAA_DISABLED)
875 return BFA_STATUS_FAA_DISABLED;
876
877 bfa_fcport_cfg_faa(bfa, BFA_FAA_DISABLED);
878 iocfc->faa_args.busy = BFA_TRUE;
879
880 memset(&faa_disable_req, 0, sizeof(struct bfi_faa_en_dis_s));
881 bfi_h2i_set(faa_disable_req.mh, BFI_MC_IOCFC,
882 BFI_IOCFC_H2I_FAA_DISABLE_REQ, bfa_lpuid(bfa));
883
884 bfa_ioc_mbox_send(&bfa->ioc, &faa_disable_req,
885 sizeof(struct bfi_faa_en_dis_s));
886
887 return BFA_STATUS_OK;
888}
889
890bfa_status_t
891bfa_faa_query(struct bfa_s *bfa, struct bfa_faa_attr_s *attr,
892 bfa_cb_iocfc_t cbfn, void *cbarg)
893{
894 struct bfi_faa_query_s faa_attr_req;
895 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
896 bfa_status_t status;
897
898 iocfc->faa_args.faa_attr = attr;
899 iocfc->faa_args.faa_cb.faa_cbfn = cbfn;
900 iocfc->faa_args.faa_cb.faa_cbarg = cbarg;
901
902 status = bfa_faa_validate_request(bfa);
903 if (status != BFA_STATUS_OK)
904 return status;
905
906 if (iocfc->faa_args.busy == BFA_TRUE)
907 return BFA_STATUS_DEVBUSY;
908
909 iocfc->faa_args.busy = BFA_TRUE;
910 memset(&faa_attr_req, 0, sizeof(struct bfi_faa_query_s));
911 bfi_h2i_set(faa_attr_req.mh, BFI_MC_IOCFC,
912 BFI_IOCFC_H2I_FAA_QUERY_REQ, bfa_lpuid(bfa));
913
914 bfa_ioc_mbox_send(&bfa->ioc, &faa_attr_req,
915 sizeof(struct bfi_faa_query_s));
916
917 return BFA_STATUS_OK;
918}
919
920/*
921 * FAA enable response
922 */
923static void
924bfa_faa_enable_reply(struct bfa_iocfc_s *iocfc,
925 struct bfi_faa_en_dis_rsp_s *rsp)
926{
927 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
928 bfa_status_t status = rsp->status;
929
930 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
931
932 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
933 iocfc->faa_args.busy = BFA_FALSE;
934}
935
936/*
937 * FAA disable response
938 */
939static void
940bfa_faa_disable_reply(struct bfa_iocfc_s *iocfc,
941 struct bfi_faa_en_dis_rsp_s *rsp)
942{
943 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
944 bfa_status_t status = rsp->status;
945
946 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
947
948 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, status);
949 iocfc->faa_args.busy = BFA_FALSE;
950}
951
952/*
953 * FAA query response
954 */
955static void
956bfa_faa_query_reply(struct bfa_iocfc_s *iocfc,
957 bfi_faa_query_rsp_t *rsp)
958{
959 void *cbarg = iocfc->faa_args.faa_cb.faa_cbarg;
960
961 if (iocfc->faa_args.faa_attr) {
962 iocfc->faa_args.faa_attr->faa = rsp->faa;
963 iocfc->faa_args.faa_attr->faa_state = rsp->faa_status;
964 iocfc->faa_args.faa_attr->pwwn_source = rsp->addr_source;
965 }
966
967 WARN_ON(!iocfc->faa_args.faa_cb.faa_cbfn);
968
969 iocfc->faa_args.faa_cb.faa_cbfn(cbarg, BFA_STATUS_OK);
970 iocfc->faa_args.busy = BFA_FALSE;
971}
972
Jing Huang5fbe25c2010-10-18 17:17:23 -0700973/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700974 * IOC enable request is complete
975 */
976static void
977bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
978{
979 struct bfa_s *bfa = bfa_arg;
980
Krishna Gudipatia7141342011-06-24 20:23:19 -0700981 if (status == BFA_STATUS_FAA_ACQ_ADDR) {
982 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
983 bfa_iocfc_init_cb, bfa);
984 return;
985 }
986
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700987 if (status != BFA_STATUS_OK) {
988 bfa_isr_disable(bfa);
989 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
990 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
991 bfa_iocfc_init_cb, bfa);
992 return;
993 }
994
995 bfa_iocfc_send_cfg(bfa);
996}
997
Jing Huang5fbe25c2010-10-18 17:17:23 -0700998/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700999 * IOC disable request is complete
1000 */
1001static void
1002bfa_iocfc_disable_cbfn(void *bfa_arg)
1003{
1004 struct bfa_s *bfa = bfa_arg;
1005
1006 bfa_isr_disable(bfa);
1007 bfa_iocfc_disable_submod(bfa);
1008
1009 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
1010 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
1011 bfa);
1012 else {
Jing Huangd4b671c2010-12-26 21:46:35 -08001013 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001014 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
1015 bfa);
1016 }
1017}
1018
Jing Huang5fbe25c2010-10-18 17:17:23 -07001019/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001020 * Notify sub-modules of hardware failure.
1021 */
1022static void
1023bfa_iocfc_hbfail_cbfn(void *bfa_arg)
1024{
1025 struct bfa_s *bfa = bfa_arg;
1026
Krishna Gudipati775c7742011-06-13 15:52:12 -07001027 bfa->queue_process = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001028
1029 bfa_isr_disable(bfa);
1030 bfa_iocfc_disable_submod(bfa);
1031
1032 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
1033 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
1034 bfa);
1035}
1036
Jing Huang5fbe25c2010-10-18 17:17:23 -07001037/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001038 * Actions on chip-reset completion.
1039 */
1040static void
1041bfa_iocfc_reset_cbfn(void *bfa_arg)
1042{
1043 struct bfa_s *bfa = bfa_arg;
1044
1045 bfa_iocfc_reset_queues(bfa);
1046 bfa_isr_enable(bfa);
1047}
1048
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001049
Jing Huang5fbe25c2010-10-18 17:17:23 -07001050/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001051 * Query IOC memory requirement information.
1052 */
1053void
1054bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
1055 u32 *dm_len)
1056{
1057 /* dma memory for IOC */
Maggie Zhangf7f738122010-12-09 19:08:43 -08001058 *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001059
1060 bfa_iocfc_fw_cfg_sz(cfg, dm_len);
1061 bfa_iocfc_cqs_sz(cfg, dm_len);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001062 *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001063}
1064
Jing Huang5fbe25c2010-10-18 17:17:23 -07001065/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001066 * Query IOC memory requirement information.
1067 */
1068void
1069bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1070 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1071{
1072 int i;
1073 struct bfa_ioc_s *ioc = &bfa->ioc;
1074
1075 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
1076 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
1077 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
1078 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
1079
1080 ioc->trcmod = bfa->trcmod;
1081 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
1082
Jing Huang5fbe25c2010-10-18 17:17:23 -07001083 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001084 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
1085 */
1086 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
1087 bfa_ioc_set_fcmode(&bfa->ioc);
1088
Krishna Gudipatid37779f2011-06-13 15:42:10 -07001089 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001090 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
1091
1092 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
1093 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001094 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001095
1096 INIT_LIST_HEAD(&bfa->comp_q);
1097 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
1098 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
1099}
1100
Jing Huang5fbe25c2010-10-18 17:17:23 -07001101/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001102 * Query IOC memory requirement information.
1103 */
1104void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001105bfa_iocfc_init(struct bfa_s *bfa)
1106{
1107 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
1108 bfa_ioc_enable(&bfa->ioc);
1109}
1110
Jing Huang5fbe25c2010-10-18 17:17:23 -07001111/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001112 * IOC start called from bfa_start(). Called to start IOC operations
1113 * at driver instantiation for this instance.
1114 */
1115void
1116bfa_iocfc_start(struct bfa_s *bfa)
1117{
1118 if (bfa->iocfc.cfgdone)
1119 bfa_iocfc_start_submod(bfa);
1120}
1121
Jing Huang5fbe25c2010-10-18 17:17:23 -07001122/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001123 * IOC stop called from bfa_stop(). Called only when driver is unloaded
1124 * for this instance.
1125 */
1126void
1127bfa_iocfc_stop(struct bfa_s *bfa)
1128{
1129 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
1130
Krishna Gudipati775c7742011-06-13 15:52:12 -07001131 bfa->queue_process = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001132 bfa_ioc_disable(&bfa->ioc);
1133}
1134
1135void
1136bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
1137{
1138 struct bfa_s *bfa = bfaarg;
1139 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1140 union bfi_iocfc_i2h_msg_u *msg;
1141
1142 msg = (union bfi_iocfc_i2h_msg_u *) m;
1143 bfa_trc(bfa, msg->mh.msg_id);
1144
1145 switch (msg->mh.msg_id) {
1146 case BFI_IOCFC_I2H_CFG_REPLY:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001147 bfa_iocfc_cfgrsp(bfa);
1148 break;
1149 case BFI_IOCFC_I2H_UPDATEQ_RSP:
1150 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
1151 break;
Krishna Gudipatia7141342011-06-24 20:23:19 -07001152 case BFI_IOCFC_I2H_FAA_ENABLE_RSP:
1153 bfa_faa_enable_reply(iocfc,
1154 (struct bfi_faa_en_dis_rsp_s *)msg);
1155 break;
1156 case BFI_IOCFC_I2H_FAA_DISABLE_RSP:
1157 bfa_faa_disable_reply(iocfc,
1158 (struct bfi_faa_en_dis_rsp_s *)msg);
1159 break;
1160 case BFI_IOCFC_I2H_FAA_QUERY_RSP:
1161 bfa_faa_query_reply(iocfc, (bfi_faa_query_rsp_t *)msg);
1162 break;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001163 default:
Jing Huangd4b671c2010-12-26 21:46:35 -08001164 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001165 }
1166}
1167
1168void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001169bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
1170{
1171 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1172
1173 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
1174
1175 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
Jing Huangba816ea2010-10-18 17:10:50 -07001176 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
1177 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001178
1179 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
Jing Huangba816ea2010-10-18 17:10:50 -07001180 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
1181 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001182
1183 attr->config = iocfc->cfg;
1184}
1185
1186bfa_status_t
1187bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
1188{
1189 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1190 struct bfi_iocfc_set_intr_req_s *m;
1191
1192 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
Jing Huangba816ea2010-10-18 17:10:50 -07001193 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
1194 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001195
1196 if (!bfa_iocfc_is_operational(bfa))
1197 return BFA_STATUS_OK;
1198
1199 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
1200 if (!m)
1201 return BFA_STATUS_DEVBUSY;
1202
1203 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
1204 bfa_lpuid(bfa));
1205 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
1206 m->delay = iocfc->cfginfo->intr_attr.delay;
1207 m->latency = iocfc->cfginfo->intr_attr.latency;
1208
1209 bfa_trc(bfa, attr->delay);
1210 bfa_trc(bfa, attr->latency);
1211
1212 bfa_reqq_produce(bfa, BFA_REQQ_IOC);
1213 return BFA_STATUS_OK;
1214}
1215
1216void
1217bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
1218{
1219 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1220
1221 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
1222 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
1223}
Jing Huang5fbe25c2010-10-18 17:17:23 -07001224/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001225 * Enable IOC after it is disabled.
1226 */
1227void
1228bfa_iocfc_enable(struct bfa_s *bfa)
1229{
1230 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1231 "IOC Enable");
1232 bfa_ioc_enable(&bfa->ioc);
1233}
1234
1235void
1236bfa_iocfc_disable(struct bfa_s *bfa)
1237{
1238 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1239 "IOC Disable");
1240 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
1241
Krishna Gudipati775c7742011-06-13 15:52:12 -07001242 bfa->queue_process = BFA_FALSE;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001243 bfa_ioc_disable(&bfa->ioc);
1244}
1245
1246
1247bfa_boolean_t
1248bfa_iocfc_is_operational(struct bfa_s *bfa)
1249{
1250 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
1251}
1252
Jing Huang5fbe25c2010-10-18 17:17:23 -07001253/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001254 * Return boot target port wwns -- read from boot information in flash.
1255 */
1256void
1257bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
1258{
1259 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1260 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1261 int i;
1262
1263 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
1264 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
1265 *nwwns = cfgrsp->pbc_cfg.nbluns;
1266 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
1267 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
1268
1269 return;
1270 }
1271
1272 *nwwns = cfgrsp->bootwwns.nwwns;
1273 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
1274}
1275
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001276int
1277bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1278{
1279 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1280 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1281
1282 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1283 return cfgrsp->pbc_cfg.nvports;
1284}
1285
Jing Huang7725ccf2009-09-23 17:46:15 -07001286
Jing Huang5fbe25c2010-10-18 17:17:23 -07001287/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001288 * Use this function query the memory requirement of the BFA library.
1289 * This function needs to be called before bfa_attach() to get the
1290 * memory required of the BFA layer for a given driver configuration.
1291 *
1292 * This call will fail, if the cap is out of range compared to pre-defined
1293 * values within the BFA library
1294 *
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001295 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1296 * its configuration in this structure.
Jing Huang7725ccf2009-09-23 17:46:15 -07001297 * The default values for struct bfa_iocfc_cfg_s can be
1298 * fetched using bfa_cfg_get_default() API.
1299 *
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001300 * If cap's boundary check fails, the library will use
Jing Huang7725ccf2009-09-23 17:46:15 -07001301 * the default bfa_cap_t values (and log a warning msg).
1302 *
1303 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001304 * indicates the memory type (see bfa_mem_type_t) and
Jing Huang7725ccf2009-09-23 17:46:15 -07001305 * amount of memory required.
1306 *
1307 * Driver should allocate the memory, populate the
1308 * starting address for each block and provide the same
1309 * structure as input parameter to bfa_attach() call.
1310 *
1311 * @return void
1312 *
1313 * Special Considerations: @note
1314 */
1315void
1316bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
1317{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001318 int i;
1319 u32 km_len = 0, dm_len = 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07001320
Jing Huangd4b671c2010-12-26 21:46:35 -08001321 WARN_ON((cfg == NULL) || (meminfo == NULL));
Jing Huang7725ccf2009-09-23 17:46:15 -07001322
Jing Huang6a18b162010-10-18 17:08:54 -07001323 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
Jing Huang7725ccf2009-09-23 17:46:15 -07001324 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
1325 BFA_MEM_TYPE_KVA;
1326 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
1327 BFA_MEM_TYPE_DMA;
1328
1329 bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
1330
1331 for (i = 0; hal_mods[i]; i++)
1332 hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
1333
Krishna Gudipati7873ca42010-05-21 14:39:45 -07001334 dm_len += bfa_port_meminfo();
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07001335 dm_len += bfa_ablk_meminfo();
Jing Huang7725ccf2009-09-23 17:46:15 -07001336
1337 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
1338 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
1339}
1340
Jing Huang5fbe25c2010-10-18 17:17:23 -07001341/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001342 * Use this function to do attach the driver instance with the BFA
1343 * library. This function will not trigger any HW initialization
1344 * process (which will be done in bfa_init() call)
1345 *
1346 * This call will fail, if the cap is out of range compared to
1347 * pre-defined values within the BFA library
1348 *
1349 * @param[out] bfa Pointer to bfa_t.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001350 * @param[in] bfad Opaque handle back to the driver's IOC structure
Jing Huang7725ccf2009-09-23 17:46:15 -07001351 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001352 * that was used in bfa_cfg_get_meminfo().
1353 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1354 * use the bfa_cfg_get_meminfo() call to
1355 * find the memory blocks required, allocate the
1356 * required memory and provide the starting addresses.
1357 * @param[in] pcidev pointer to struct bfa_pcidev_s
Jing Huang7725ccf2009-09-23 17:46:15 -07001358 *
1359 * @return
1360 * void
1361 *
1362 * Special Considerations:
1363 *
1364 * @note
1365 *
1366 */
1367void
1368bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1369 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1370{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001371 int i;
1372 struct bfa_mem_elem_s *melem;
Jing Huang7725ccf2009-09-23 17:46:15 -07001373
1374 bfa->fcs = BFA_FALSE;
1375
Jing Huangd4b671c2010-12-26 21:46:35 -08001376 WARN_ON((cfg == NULL) || (meminfo == NULL));
Jing Huang7725ccf2009-09-23 17:46:15 -07001377
Jing Huang5fbe25c2010-10-18 17:17:23 -07001378 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001379 * initialize all memory pointers for iterative allocation
1380 */
1381 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
1382 melem = meminfo->meminfo + i;
1383 melem->kva_curp = melem->kva;
1384 melem->dma_curp = melem->dma;
1385 }
1386
1387 bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
1388
1389 for (i = 0; hal_mods[i]; i++)
1390 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
1391
Krishna Gudipati7873ca42010-05-21 14:39:45 -07001392 bfa_com_port_attach(bfa, meminfo);
Krishna Gudipati1a4d8e12011-06-24 20:22:28 -07001393 bfa_com_ablk_attach(bfa, meminfo);
Jing Huang7725ccf2009-09-23 17:46:15 -07001394}
1395
Jing Huang5fbe25c2010-10-18 17:17:23 -07001396/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001397 * Use this function to delete a BFA IOC. IOC should be stopped (by
1398 * calling bfa_stop()) before this function call.
1399 *
1400 * @param[in] bfa - pointer to bfa_t.
1401 *
1402 * @return
1403 * void
1404 *
1405 * Special Considerations:
1406 *
1407 * @note
1408 */
1409void
1410bfa_detach(struct bfa_s *bfa)
1411{
1412 int i;
1413
1414 for (i = 0; hal_mods[i]; i++)
1415 hal_mods[i]->detach(bfa);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001416 bfa_ioc_detach(&bfa->ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001417}
1418
1419void
1420bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1421{
1422 INIT_LIST_HEAD(comp_q);
1423 list_splice_tail_init(&bfa->comp_q, comp_q);
1424}
1425
1426void
1427bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1428{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001429 struct list_head *qe;
1430 struct list_head *qen;
1431 struct bfa_cb_qe_s *hcb_qe;
Jing Huang7725ccf2009-09-23 17:46:15 -07001432
1433 list_for_each_safe(qe, qen, comp_q) {
1434 hcb_qe = (struct bfa_cb_qe_s *) qe;
1435 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1436 }
1437}
1438
1439void
1440bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1441{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001442 struct list_head *qe;
1443 struct bfa_cb_qe_s *hcb_qe;
Jing Huang7725ccf2009-09-23 17:46:15 -07001444
1445 while (!list_empty(comp_q)) {
1446 bfa_q_deq(comp_q, &qe);
1447 hcb_qe = (struct bfa_cb_qe_s *) qe;
1448 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1449 }
1450}
1451
Jing Huang7725ccf2009-09-23 17:46:15 -07001452
Jing Huang5fbe25c2010-10-18 17:17:23 -07001453/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001454 * Return the list of PCI vendor/device id lists supported by this
1455 * BFA instance.
1456 */
1457void
1458bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1459{
1460 static struct bfa_pciid_s __pciids[] = {
1461 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1462 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1463 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
Jing Huang293f82d2010-07-08 19:45:20 -07001464 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
Jing Huang7725ccf2009-09-23 17:46:15 -07001465 };
1466
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001467 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
Jing Huang7725ccf2009-09-23 17:46:15 -07001468 *pciids = __pciids;
1469}
1470
Jing Huang5fbe25c2010-10-18 17:17:23 -07001471/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001472 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1473 * into BFA layer). The OS driver can then turn back and overwrite entries that
1474 * have been configured by the user.
1475 *
1476 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1477 *
1478 * @return
1479 * void
1480 *
1481 * Special Considerations:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001482 * note
Jing Huang7725ccf2009-09-23 17:46:15 -07001483 */
1484void
1485bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1486{
1487 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1488 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1489 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1490 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1491 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1492 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1493 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1494 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001495 cfg->fwcfg.num_fwtio_reqs = 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07001496
1497 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1498 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1499 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1500 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1501 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1502 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1503 cfg->drvcfg.ioc_recover = BFA_FALSE;
1504 cfg->drvcfg.delay_comp = BFA_FALSE;
1505
1506}
1507
1508void
1509bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1510{
1511 bfa_cfg_get_default(cfg);
1512 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
1513 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
1514 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
1515 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
1516 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
Krishna Gudipatie2187d72011-06-13 15:53:58 -07001517 cfg->fwcfg.num_fwtio_reqs = 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07001518
1519 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
1520 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
1521 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001522 cfg->drvcfg.min_cfg = BFA_TRUE;
Jing Huang7725ccf2009-09-23 17:46:15 -07001523}