blob: 5a00217ee5be55f60aa7fbbbc8b1fbd204b7659c [file] [log] [blame]
Jing Huang7725ccf2009-09-23 17:46:15 -07001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Jing Huang7725ccf2009-09-23 17:46:15 -07003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Maggie Zhangf16a1752010-12-09 19:12:32 -080018#include "bfad_drv.h"
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070019#include "bfa_modules.h"
Krishna Gudipati11189202011-06-13 15:50:35 -070020#include "bfi_reg.h"
Jing Huang7725ccf2009-09-23 17:46:15 -070021
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070022BFA_TRC_FILE(HAL, CORE);
Jing Huang7725ccf2009-09-23 17:46:15 -070023
Jing Huang5fbe25c2010-10-18 17:17:23 -070024/*
Maggie Zhangb77ee1f2010-12-09 19:09:26 -080025 * BFA module list terminated by NULL
26 */
27static struct bfa_module_s *hal_mods[] = {
28 &hal_mod_sgpg,
29 &hal_mod_fcport,
30 &hal_mod_fcxp,
31 &hal_mod_lps,
32 &hal_mod_uf,
33 &hal_mod_rport,
34 &hal_mod_fcpim,
35 NULL
36};
37
38/*
39 * Message handlers for various modules.
40 */
41static bfa_isr_func_t bfa_isrs[BFI_MC_MAX] = {
42 bfa_isr_unhandled, /* NONE */
43 bfa_isr_unhandled, /* BFI_MC_IOC */
44 bfa_isr_unhandled, /* BFI_MC_DIAG */
45 bfa_isr_unhandled, /* BFI_MC_FLASH */
46 bfa_isr_unhandled, /* BFI_MC_CEE */
47 bfa_fcport_isr, /* BFI_MC_FCPORT */
48 bfa_isr_unhandled, /* BFI_MC_IOCFC */
49 bfa_isr_unhandled, /* BFI_MC_LL */
50 bfa_uf_isr, /* BFI_MC_UF */
51 bfa_fcxp_isr, /* BFI_MC_FCXP */
52 bfa_lps_isr, /* BFI_MC_LPS */
53 bfa_rport_isr, /* BFI_MC_RPORT */
54 bfa_itnim_isr, /* BFI_MC_ITNIM */
55 bfa_isr_unhandled, /* BFI_MC_IOIM_READ */
56 bfa_isr_unhandled, /* BFI_MC_IOIM_WRITE */
57 bfa_isr_unhandled, /* BFI_MC_IOIM_IO */
58 bfa_ioim_isr, /* BFI_MC_IOIM */
59 bfa_ioim_good_comp_isr, /* BFI_MC_IOIM_IOCOM */
60 bfa_tskim_isr, /* BFI_MC_TSKIM */
61 bfa_isr_unhandled, /* BFI_MC_SBOOT */
62 bfa_isr_unhandled, /* BFI_MC_IPFC */
63 bfa_isr_unhandled, /* BFI_MC_PORT */
64 bfa_isr_unhandled, /* --------- */
65 bfa_isr_unhandled, /* --------- */
66 bfa_isr_unhandled, /* --------- */
67 bfa_isr_unhandled, /* --------- */
68 bfa_isr_unhandled, /* --------- */
69 bfa_isr_unhandled, /* --------- */
70 bfa_isr_unhandled, /* --------- */
71 bfa_isr_unhandled, /* --------- */
72 bfa_isr_unhandled, /* --------- */
73 bfa_isr_unhandled, /* --------- */
74};
75/*
76 * Message handlers for mailbox command classes
77 */
78static bfa_ioc_mbox_mcfunc_t bfa_mbox_isrs[BFI_MC_MAX] = {
79 NULL,
80 NULL, /* BFI_MC_IOC */
81 NULL, /* BFI_MC_DIAG */
82 NULL, /* BFI_MC_FLASH */
83 NULL, /* BFI_MC_CEE */
84 NULL, /* BFI_MC_PORT */
85 bfa_iocfc_isr, /* BFI_MC_IOCFC */
86 NULL,
87};
88
89
90
91static void
92bfa_com_port_attach(struct bfa_s *bfa, struct bfa_meminfo_s *mi)
93{
94 struct bfa_port_s *port = &bfa->modules.port;
95 u32 dm_len;
96 u8 *dm_kva;
97 u64 dm_pa;
98
99 dm_len = bfa_port_meminfo();
100 dm_kva = bfa_meminfo_dma_virt(mi);
101 dm_pa = bfa_meminfo_dma_phys(mi);
102
103 memset(port, 0, sizeof(struct bfa_port_s));
104 bfa_port_attach(port, &bfa->ioc, bfa, bfa->trcmod);
105 bfa_port_mem_claim(port, dm_kva, dm_pa);
106
107 bfa_meminfo_dma_virt(mi) = dm_kva + dm_len;
108 bfa_meminfo_dma_phys(mi) = dm_pa + dm_len;
109}
110
111/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700112 * BFA IOC FC related definitions
113 */
114
Jing Huang5fbe25c2010-10-18 17:17:23 -0700115/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700116 * IOC local definitions
117 */
118#define BFA_IOCFC_TOV 5000 /* msecs */
119
120enum {
121 BFA_IOCFC_ACT_NONE = 0,
122 BFA_IOCFC_ACT_INIT = 1,
123 BFA_IOCFC_ACT_STOP = 2,
124 BFA_IOCFC_ACT_DISABLE = 3,
125};
126
127#define DEF_CFG_NUM_FABRICS 1
128#define DEF_CFG_NUM_LPORTS 256
129#define DEF_CFG_NUM_CQS 4
130#define DEF_CFG_NUM_IOIM_REQS (BFA_IOIM_MAX)
131#define DEF_CFG_NUM_TSKIM_REQS 128
132#define DEF_CFG_NUM_FCXP_REQS 64
133#define DEF_CFG_NUM_UF_BUFS 64
134#define DEF_CFG_NUM_RPORTS 1024
135#define DEF_CFG_NUM_ITNIMS (DEF_CFG_NUM_RPORTS)
136#define DEF_CFG_NUM_TINS 256
137
138#define DEF_CFG_NUM_SGPGS 2048
139#define DEF_CFG_NUM_REQQ_ELEMS 256
140#define DEF_CFG_NUM_RSPQ_ELEMS 64
141#define DEF_CFG_NUM_SBOOT_TGTS 16
142#define DEF_CFG_NUM_SBOOT_LUNS 16
143
Jing Huang5fbe25c2010-10-18 17:17:23 -0700144/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700145 * forward declaration for IOC FC functions
146 */
147static void bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status);
148static void bfa_iocfc_disable_cbfn(void *bfa_arg);
149static void bfa_iocfc_hbfail_cbfn(void *bfa_arg);
150static void bfa_iocfc_reset_cbfn(void *bfa_arg);
151static struct bfa_ioc_cbfn_s bfa_iocfc_cbfn;
152
Jing Huang5fbe25c2010-10-18 17:17:23 -0700153/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700154 * BFA Interrupt handling functions
155 */
156static void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700157bfa_reqq_resume(struct bfa_s *bfa, int qid)
158{
159 struct list_head *waitq, *qe, *qen;
160 struct bfa_reqq_wait_s *wqe;
161
162 waitq = bfa_reqq(bfa, qid);
163 list_for_each_safe(qe, qen, waitq) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700164 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700165 * Callback only as long as there is room in request queue
166 */
167 if (bfa_reqq_full(bfa, qid))
168 break;
169
170 list_del(qe);
171 wqe = (struct bfa_reqq_wait_s *) qe;
172 wqe->qresume(wqe->cbarg);
173 }
174}
175
Krishna Gudipati11189202011-06-13 15:50:35 -0700176static inline void
177bfa_isr_rspq(struct bfa_s *bfa, int qid)
178{
179 struct bfi_msg_s *m;
180 u32 pi, ci;
181 struct list_head *waitq;
182
183 bfa->iocfc.hwif.hw_rspq_ack(bfa, qid);
184
185 ci = bfa_rspq_ci(bfa, qid);
186 pi = bfa_rspq_pi(bfa, qid);
187
188 while (ci != pi) {
189 m = bfa_rspq_elem(bfa, qid, ci);
190 WARN_ON(m->mhdr.msg_class >= BFI_MC_MAX);
191
192 bfa_isrs[m->mhdr.msg_class] (bfa, m);
193 CQ_INCR(ci, bfa->iocfc.cfg.drvcfg.num_rspq_elems);
194 }
195
196 /*
197 * update CI
198 */
199 bfa_rspq_ci(bfa, qid) = pi;
200 writel(pi, bfa->iocfc.bfa_regs.rme_q_ci[qid]);
201 mmiowb();
202
203 /*
204 * Resume any pending requests in the corresponding reqq.
205 */
206 waitq = bfa_reqq(bfa, qid);
207 if (!list_empty(waitq))
208 bfa_reqq_resume(bfa, qid);
209}
210
211static inline void
212bfa_isr_reqq(struct bfa_s *bfa, int qid)
213{
214 struct list_head *waitq;
215
216 qid &= (BFI_IOC_MAX_CQS - 1);
217
218 bfa->iocfc.hwif.hw_reqq_ack(bfa, qid);
219
220 /*
221 * Resume any pending requests in the corresponding reqq.
222 */
223 waitq = bfa_reqq(bfa, qid);
224 if (!list_empty(waitq))
225 bfa_reqq_resume(bfa, qid);
226}
227
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700228void
229bfa_msix_all(struct bfa_s *bfa, int vec)
230{
231 bfa_intx(bfa);
232}
233
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700234bfa_boolean_t
235bfa_intx(struct bfa_s *bfa)
236{
237 u32 intr, qintr;
238 int queue;
239
Jing Huang53440262010-10-18 17:12:29 -0700240 intr = readl(bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700241 if (!intr)
242 return BFA_FALSE;
243
Jing Huang5fbe25c2010-10-18 17:17:23 -0700244 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700245 * RME completion queue interrupt
246 */
247 qintr = intr & __HFN_INT_RME_MASK;
Jing Huang53440262010-10-18 17:12:29 -0700248 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700249
250 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
251 if (intr & (__HFN_INT_RME_Q0 << queue))
Krishna Gudipati11189202011-06-13 15:50:35 -0700252 bfa_isr_rspq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700253 }
254 intr &= ~qintr;
255 if (!intr)
256 return BFA_TRUE;
257
Jing Huang5fbe25c2010-10-18 17:17:23 -0700258 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700259 * CPE completion queue interrupt
260 */
261 qintr = intr & __HFN_INT_CPE_MASK;
Jing Huang53440262010-10-18 17:12:29 -0700262 writel(qintr, bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700263
264 for (queue = 0; queue < BFI_IOC_MAX_CQS_ASIC; queue++) {
265 if (intr & (__HFN_INT_CPE_Q0 << queue))
Krishna Gudipati11189202011-06-13 15:50:35 -0700266 bfa_isr_reqq(bfa, queue & (BFI_IOC_MAX_CQS - 1));
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700267 }
268 intr &= ~qintr;
269 if (!intr)
270 return BFA_TRUE;
271
272 bfa_msix_lpu_err(bfa, intr);
273
274 return BFA_TRUE;
275}
276
277void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700278bfa_isr_enable(struct bfa_s *bfa)
279{
Krishna Gudipati11189202011-06-13 15:50:35 -0700280 u32 umsk;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700281 int pci_func = bfa_ioc_pcifn(&bfa->ioc);
282
283 bfa_trc(bfa, pci_func);
284
285 bfa_msix_install(bfa);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700286
Krishna Gudipati11189202011-06-13 15:50:35 -0700287 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
288 umsk = __HFN_INT_ERR_MASK_CT2;
289 umsk |= pci_func == 0 ?
290 __HFN_INT_FN0_MASK_CT2 : __HFN_INT_FN1_MASK_CT2;
291 } else {
292 umsk = __HFN_INT_ERR_MASK;
293 umsk |= pci_func == 0 ? __HFN_INT_FN0_MASK : __HFN_INT_FN1_MASK;
294 }
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700295
Krishna Gudipati11189202011-06-13 15:50:35 -0700296 writel(umsk, bfa->iocfc.bfa_regs.intr_status);
297 writel(~umsk, bfa->iocfc.bfa_regs.intr_mask);
298 bfa->iocfc.intr_mask = ~umsk;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700299 bfa_isr_mode_set(bfa, bfa->msix.nvecs != 0);
300}
301
302void
303bfa_isr_disable(struct bfa_s *bfa)
304{
305 bfa_isr_mode_set(bfa, BFA_FALSE);
Jing Huang53440262010-10-18 17:12:29 -0700306 writel(-1L, bfa->iocfc.bfa_regs.intr_mask);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700307 bfa_msix_uninstall(bfa);
308}
309
310void
Krishna Gudipati11189202011-06-13 15:50:35 -0700311bfa_msix_reqq(struct bfa_s *bfa, int vec)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700312{
Krishna Gudipati11189202011-06-13 15:50:35 -0700313 bfa_isr_reqq(bfa, vec - bfa->iocfc.hwif.cpe_vec_q0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700314}
315
316void
317bfa_isr_unhandled(struct bfa_s *bfa, struct bfi_msg_s *m)
318{
319 bfa_trc(bfa, m->mhdr.msg_class);
320 bfa_trc(bfa, m->mhdr.msg_id);
321 bfa_trc(bfa, m->mhdr.mtag.i2htok);
Jing Huangd4b671c2010-12-26 21:46:35 -0800322 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700323 bfa_trc_stop(bfa->trcmod);
324}
325
326void
Krishna Gudipati11189202011-06-13 15:50:35 -0700327bfa_msix_rspq(struct bfa_s *bfa, int vec)
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700328{
Krishna Gudipati11189202011-06-13 15:50:35 -0700329 if (!bfa->rme_process)
330 return;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700331
Krishna Gudipati11189202011-06-13 15:50:35 -0700332 bfa_isr_rspq(bfa, vec - bfa->iocfc.hwif.rme_vec_q0);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700333}
334
335void
336bfa_msix_lpu_err(struct bfa_s *bfa, int vec)
337{
338 u32 intr, curr_value;
Krishna Gudipati11189202011-06-13 15:50:35 -0700339 bfa_boolean_t lpu_isr, halt_isr, pss_isr;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700340
Jing Huang53440262010-10-18 17:12:29 -0700341 intr = readl(bfa->iocfc.bfa_regs.intr_status);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700342
Krishna Gudipati11189202011-06-13 15:50:35 -0700343 if (bfa_asic_id_ct2(bfa->ioc.pcidev.device_id)) {
344 halt_isr = intr & __HFN_INT_CPQ_HALT_CT2;
345 pss_isr = intr & __HFN_INT_ERR_PSS_CT2;
346 lpu_isr = intr & (__HFN_INT_MBOX_LPU0_CT2 |
347 __HFN_INT_MBOX_LPU1_CT2);
348 intr &= __HFN_INT_ERR_MASK_CT2;
349 } else {
350 halt_isr = intr & __HFN_INT_LL_HALT;
351 pss_isr = intr & __HFN_INT_ERR_PSS;
352 lpu_isr = intr & (__HFN_INT_MBOX_LPU0 | __HFN_INT_MBOX_LPU1);
353 intr &= __HFN_INT_ERR_MASK;
354 }
355
356 if (lpu_isr)
Maggie Zhangda99dcc2010-12-09 19:13:20 -0800357 bfa_ioc_mbox_isr(&bfa->ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700358
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700359 if (intr) {
Krishna Gudipati11189202011-06-13 15:50:35 -0700360 if (halt_isr) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700361 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700362 * If LL_HALT bit is set then FW Init Halt LL Port
363 * Register needs to be cleared as well so Interrupt
364 * Status Register will be cleared.
365 */
Jing Huang53440262010-10-18 17:12:29 -0700366 curr_value = readl(bfa->ioc.ioc_regs.ll_halt);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700367 curr_value &= ~__FW_INIT_HALT_P;
Jing Huang53440262010-10-18 17:12:29 -0700368 writel(curr_value, bfa->ioc.ioc_regs.ll_halt);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700369 }
370
Krishna Gudipati11189202011-06-13 15:50:35 -0700371 if (pss_isr) {
Jing Huang5fbe25c2010-10-18 17:17:23 -0700372 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700373 * ERR_PSS bit needs to be cleared as well in case
374 * interrups are shared so driver's interrupt handler is
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300375 * still called even though it is already masked out.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700376 */
Jing Huang53440262010-10-18 17:12:29 -0700377 curr_value = readl(
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700378 bfa->ioc.ioc_regs.pss_err_status_reg);
Jing Huang53440262010-10-18 17:12:29 -0700379 writel(curr_value,
380 bfa->ioc.ioc_regs.pss_err_status_reg);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700381 }
382
Jing Huang53440262010-10-18 17:12:29 -0700383 writel(intr, bfa->iocfc.bfa_regs.intr_status);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800384 bfa_ioc_error_isr(&bfa->ioc);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700385 }
386}
387
Jing Huang5fbe25c2010-10-18 17:17:23 -0700388/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700389 * BFA IOC FC related functions
390 */
391
Jing Huang5fbe25c2010-10-18 17:17:23 -0700392/*
Maggie Zhangdf0f1932010-12-09 19:07:46 -0800393 * BFA IOC private functions
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700394 */
395
396static void
397bfa_iocfc_cqs_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
398{
399 int i, per_reqq_sz, per_rspq_sz;
400
401 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
402 BFA_DMA_ALIGN_SZ);
403 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
404 BFA_DMA_ALIGN_SZ);
405
406 /*
407 * Calculate CQ size
408 */
409 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
410 *dm_len = *dm_len + per_reqq_sz;
411 *dm_len = *dm_len + per_rspq_sz;
412 }
413
414 /*
415 * Calculate Shadow CI/PI size
416 */
417 for (i = 0; i < cfg->fwcfg.num_cqs; i++)
418 *dm_len += (2 * BFA_CACHELINE_SZ);
419}
420
421static void
422bfa_iocfc_fw_cfg_sz(struct bfa_iocfc_cfg_s *cfg, u32 *dm_len)
423{
424 *dm_len +=
425 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
426 *dm_len +=
427 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
428 BFA_CACHELINE_SZ);
429}
430
Jing Huang5fbe25c2010-10-18 17:17:23 -0700431/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700432 * Use the Mailbox interface to send BFI_IOCFC_H2I_CFG_REQ
433 */
434static void
435bfa_iocfc_send_cfg(void *bfa_arg)
436{
437 struct bfa_s *bfa = bfa_arg;
438 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
439 struct bfi_iocfc_cfg_req_s cfg_req;
440 struct bfi_iocfc_cfg_s *cfg_info = iocfc->cfginfo;
441 struct bfa_iocfc_cfg_s *cfg = &iocfc->cfg;
442 int i;
443
Jing Huangd4b671c2010-12-26 21:46:35 -0800444 WARN_ON(cfg->fwcfg.num_cqs > BFI_IOC_MAX_CQS);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700445 bfa_trc(bfa, cfg->fwcfg.num_cqs);
446
447 bfa_iocfc_reset_queues(bfa);
448
Jing Huang5fbe25c2010-10-18 17:17:23 -0700449 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700450 * initialize IOC configuration info
451 */
452 cfg_info->endian_sig = BFI_IOC_ENDIAN_SIG;
453 cfg_info->num_cqs = cfg->fwcfg.num_cqs;
454
455 bfa_dma_be_addr_set(cfg_info->cfgrsp_addr, iocfc->cfgrsp_dma.pa);
Jing Huang5fbe25c2010-10-18 17:17:23 -0700456 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700457 * dma map REQ and RSP circular queues and shadow pointers
458 */
459 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
460 bfa_dma_be_addr_set(cfg_info->req_cq_ba[i],
461 iocfc->req_cq_ba[i].pa);
462 bfa_dma_be_addr_set(cfg_info->req_shadow_ci[i],
463 iocfc->req_cq_shadow_ci[i].pa);
464 cfg_info->req_cq_elems[i] =
Jing Huangba816ea2010-10-18 17:10:50 -0700465 cpu_to_be16(cfg->drvcfg.num_reqq_elems);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700466
467 bfa_dma_be_addr_set(cfg_info->rsp_cq_ba[i],
468 iocfc->rsp_cq_ba[i].pa);
469 bfa_dma_be_addr_set(cfg_info->rsp_shadow_pi[i],
470 iocfc->rsp_cq_shadow_pi[i].pa);
471 cfg_info->rsp_cq_elems[i] =
Jing Huangba816ea2010-10-18 17:10:50 -0700472 cpu_to_be16(cfg->drvcfg.num_rspq_elems);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700473 }
474
Jing Huang5fbe25c2010-10-18 17:17:23 -0700475 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700476 * Enable interrupt coalescing if it is driver init path
477 * and not ioc disable/enable path.
478 */
479 if (!iocfc->cfgdone)
480 cfg_info->intr_attr.coalesce = BFA_TRUE;
481
482 iocfc->cfgdone = BFA_FALSE;
483
Jing Huang5fbe25c2010-10-18 17:17:23 -0700484 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700485 * dma map IOC configuration itself
486 */
487 bfi_h2i_set(cfg_req.mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_CFG_REQ,
488 bfa_lpuid(bfa));
489 bfa_dma_be_addr_set(cfg_req.ioc_cfg_dma_addr, iocfc->cfg_info.pa);
490
491 bfa_ioc_mbox_send(&bfa->ioc, &cfg_req,
492 sizeof(struct bfi_iocfc_cfg_req_s));
493}
494
495static void
496bfa_iocfc_init_mem(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
497 struct bfa_pcidev_s *pcidev)
498{
499 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
500
501 bfa->bfad = bfad;
502 iocfc->bfa = bfa;
503 iocfc->action = BFA_IOCFC_ACT_NONE;
504
Jing Huang6a18b162010-10-18 17:08:54 -0700505 iocfc->cfg = *cfg;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700506
Jing Huang5fbe25c2010-10-18 17:17:23 -0700507 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700508 * Initialize chip specific handlers.
509 */
Krishna Gudipati11189202011-06-13 15:50:35 -0700510 if (bfa_asic_id_ctc(bfa_ioc_devid(&bfa->ioc))) {
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700511 iocfc->hwif.hw_reginit = bfa_hwct_reginit;
512 iocfc->hwif.hw_reqq_ack = bfa_hwct_reqq_ack;
513 iocfc->hwif.hw_rspq_ack = bfa_hwct_rspq_ack;
514 iocfc->hwif.hw_msix_init = bfa_hwct_msix_init;
515 iocfc->hwif.hw_msix_install = bfa_hwct_msix_install;
516 iocfc->hwif.hw_msix_uninstall = bfa_hwct_msix_uninstall;
517 iocfc->hwif.hw_isr_mode_set = bfa_hwct_isr_mode_set;
518 iocfc->hwif.hw_msix_getvecs = bfa_hwct_msix_getvecs;
519 iocfc->hwif.hw_msix_get_rme_range = bfa_hwct_msix_get_rme_range;
Krishna Gudipati11189202011-06-13 15:50:35 -0700520 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CT;
521 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CT;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700522 } else {
523 iocfc->hwif.hw_reginit = bfa_hwcb_reginit;
524 iocfc->hwif.hw_reqq_ack = bfa_hwcb_reqq_ack;
525 iocfc->hwif.hw_rspq_ack = bfa_hwcb_rspq_ack;
526 iocfc->hwif.hw_msix_init = bfa_hwcb_msix_init;
527 iocfc->hwif.hw_msix_install = bfa_hwcb_msix_install;
528 iocfc->hwif.hw_msix_uninstall = bfa_hwcb_msix_uninstall;
529 iocfc->hwif.hw_isr_mode_set = bfa_hwcb_isr_mode_set;
530 iocfc->hwif.hw_msix_getvecs = bfa_hwcb_msix_getvecs;
531 iocfc->hwif.hw_msix_get_rme_range = bfa_hwcb_msix_get_rme_range;
Krishna Gudipati11189202011-06-13 15:50:35 -0700532 iocfc->hwif.rme_vec_q0 = BFI_MSIX_RME_QMIN_CB +
533 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
534 iocfc->hwif.cpe_vec_q0 = BFI_MSIX_CPE_QMIN_CB +
535 bfa_ioc_pcifn(&bfa->ioc) * BFI_IOC_MAX_CQS;
536 }
537
538 if (bfa_asic_id_ct2(bfa_ioc_devid(&bfa->ioc))) {
539 iocfc->hwif.hw_reginit = bfa_hwct2_reginit;
540 iocfc->hwif.hw_isr_mode_set = NULL;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700541 }
542
543 iocfc->hwif.hw_reginit(bfa);
544 bfa->msix.nvecs = 0;
545}
546
547static void
548bfa_iocfc_mem_claim(struct bfa_s *bfa, struct bfa_iocfc_cfg_s *cfg,
549 struct bfa_meminfo_s *meminfo)
550{
551 u8 *dm_kva;
552 u64 dm_pa;
553 int i, per_reqq_sz, per_rspq_sz;
554 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
555 int dbgsz;
556
557 dm_kva = bfa_meminfo_dma_virt(meminfo);
558 dm_pa = bfa_meminfo_dma_phys(meminfo);
559
560 /*
561 * First allocate dma memory for IOC.
562 */
563 bfa_ioc_mem_claim(&bfa->ioc, dm_kva, dm_pa);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800564 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
565 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700566
567 /*
568 * Claim DMA-able memory for the request/response queues and for shadow
569 * ci/pi registers
570 */
571 per_reqq_sz = BFA_ROUNDUP((cfg->drvcfg.num_reqq_elems * BFI_LMSG_SZ),
572 BFA_DMA_ALIGN_SZ);
573 per_rspq_sz = BFA_ROUNDUP((cfg->drvcfg.num_rspq_elems * BFI_LMSG_SZ),
574 BFA_DMA_ALIGN_SZ);
575
576 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
577 iocfc->req_cq_ba[i].kva = dm_kva;
578 iocfc->req_cq_ba[i].pa = dm_pa;
Jing Huang6a18b162010-10-18 17:08:54 -0700579 memset(dm_kva, 0, per_reqq_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700580 dm_kva += per_reqq_sz;
581 dm_pa += per_reqq_sz;
582
583 iocfc->rsp_cq_ba[i].kva = dm_kva;
584 iocfc->rsp_cq_ba[i].pa = dm_pa;
Jing Huang6a18b162010-10-18 17:08:54 -0700585 memset(dm_kva, 0, per_rspq_sz);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700586 dm_kva += per_rspq_sz;
587 dm_pa += per_rspq_sz;
588 }
589
590 for (i = 0; i < cfg->fwcfg.num_cqs; i++) {
591 iocfc->req_cq_shadow_ci[i].kva = dm_kva;
592 iocfc->req_cq_shadow_ci[i].pa = dm_pa;
593 dm_kva += BFA_CACHELINE_SZ;
594 dm_pa += BFA_CACHELINE_SZ;
595
596 iocfc->rsp_cq_shadow_pi[i].kva = dm_kva;
597 iocfc->rsp_cq_shadow_pi[i].pa = dm_pa;
598 dm_kva += BFA_CACHELINE_SZ;
599 dm_pa += BFA_CACHELINE_SZ;
600 }
601
602 /*
603 * Claim DMA-able memory for the config info page
604 */
605 bfa->iocfc.cfg_info.kva = dm_kva;
606 bfa->iocfc.cfg_info.pa = dm_pa;
607 bfa->iocfc.cfginfo = (struct bfi_iocfc_cfg_s *) dm_kva;
608 dm_kva += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
609 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfg_s), BFA_CACHELINE_SZ);
610
611 /*
612 * Claim DMA-able memory for the config response
613 */
614 bfa->iocfc.cfgrsp_dma.kva = dm_kva;
615 bfa->iocfc.cfgrsp_dma.pa = dm_pa;
616 bfa->iocfc.cfgrsp = (struct bfi_iocfc_cfgrsp_s *) dm_kva;
617
618 dm_kva +=
619 BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
620 BFA_CACHELINE_SZ);
621 dm_pa += BFA_ROUNDUP(sizeof(struct bfi_iocfc_cfgrsp_s),
622 BFA_CACHELINE_SZ);
623
624
625 bfa_meminfo_dma_virt(meminfo) = dm_kva;
626 bfa_meminfo_dma_phys(meminfo) = dm_pa;
627
Maggie Zhangf7f738122010-12-09 19:08:43 -0800628 dbgsz = (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700629 if (dbgsz > 0) {
630 bfa_ioc_debug_memclaim(&bfa->ioc, bfa_meminfo_kva(meminfo));
631 bfa_meminfo_kva(meminfo) += dbgsz;
632 }
633}
634
Jing Huang5fbe25c2010-10-18 17:17:23 -0700635/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700636 * Start BFA submodules.
637 */
638static void
639bfa_iocfc_start_submod(struct bfa_s *bfa)
640{
641 int i;
642
643 bfa->rme_process = BFA_TRUE;
Krishna Gudipati11189202011-06-13 15:50:35 -0700644 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
645 bfa->iocfc.hwif.hw_rspq_ack(bfa, i);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700646
647 for (i = 0; hal_mods[i]; i++)
648 hal_mods[i]->start(bfa);
649}
650
Jing Huang5fbe25c2010-10-18 17:17:23 -0700651/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700652 * Disable BFA submodules.
653 */
654static void
655bfa_iocfc_disable_submod(struct bfa_s *bfa)
656{
657 int i;
658
659 for (i = 0; hal_mods[i]; i++)
660 hal_mods[i]->iocdisable(bfa);
661}
662
663static void
664bfa_iocfc_init_cb(void *bfa_arg, bfa_boolean_t complete)
665{
666 struct bfa_s *bfa = bfa_arg;
667
668 if (complete) {
669 if (bfa->iocfc.cfgdone)
670 bfa_cb_init(bfa->bfad, BFA_STATUS_OK);
671 else
672 bfa_cb_init(bfa->bfad, BFA_STATUS_FAILED);
673 } else {
674 if (bfa->iocfc.cfgdone)
675 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
676 }
677}
678
679static void
680bfa_iocfc_stop_cb(void *bfa_arg, bfa_boolean_t compl)
681{
682 struct bfa_s *bfa = bfa_arg;
683 struct bfad_s *bfad = bfa->bfad;
684
685 if (compl)
686 complete(&bfad->comp);
687 else
688 bfa->iocfc.action = BFA_IOCFC_ACT_NONE;
689}
690
691static void
692bfa_iocfc_disable_cb(void *bfa_arg, bfa_boolean_t compl)
693{
694 struct bfa_s *bfa = bfa_arg;
695 struct bfad_s *bfad = bfa->bfad;
696
697 if (compl)
698 complete(&bfad->disable_comp);
699}
700
Krishna Gudipati11189202011-06-13 15:50:35 -0700701/**
702 * configure queue registers from firmware response
703 */
704static void
705bfa_iocfc_qreg(struct bfa_s *bfa, struct bfi_iocfc_qreg_s *qreg)
706{
707 int i;
708 struct bfa_iocfc_regs_s *r = &bfa->iocfc.bfa_regs;
709 void __iomem *kva = bfa_ioc_bar0(&bfa->ioc);
710
711 for (i = 0; i < BFI_IOC_MAX_CQS; i++) {
712 r->cpe_q_ci[i] = kva + be32_to_cpu(qreg->cpe_q_ci_off[i]);
713 r->cpe_q_pi[i] = kva + be32_to_cpu(qreg->cpe_q_pi_off[i]);
714 r->cpe_q_ctrl[i] = kva + be32_to_cpu(qreg->cpe_qctl_off[i]);
715 r->rme_q_ci[i] = kva + be32_to_cpu(qreg->rme_q_ci_off[i]);
716 r->rme_q_pi[i] = kva + be32_to_cpu(qreg->rme_q_pi_off[i]);
717 r->rme_q_ctrl[i] = kva + be32_to_cpu(qreg->rme_qctl_off[i]);
718 }
719}
720
Jing Huang5fbe25c2010-10-18 17:17:23 -0700721/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700722 * Update BFA configuration from firmware configuration.
723 */
724static void
725bfa_iocfc_cfgrsp(struct bfa_s *bfa)
726{
727 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
728 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
729 struct bfa_iocfc_fwcfg_s *fwcfg = &cfgrsp->fwcfg;
730
731 fwcfg->num_cqs = fwcfg->num_cqs;
Jing Huangba816ea2010-10-18 17:10:50 -0700732 fwcfg->num_ioim_reqs = be16_to_cpu(fwcfg->num_ioim_reqs);
733 fwcfg->num_tskim_reqs = be16_to_cpu(fwcfg->num_tskim_reqs);
734 fwcfg->num_fcxp_reqs = be16_to_cpu(fwcfg->num_fcxp_reqs);
735 fwcfg->num_uf_bufs = be16_to_cpu(fwcfg->num_uf_bufs);
736 fwcfg->num_rports = be16_to_cpu(fwcfg->num_rports);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700737
738 iocfc->cfgdone = BFA_TRUE;
739
Jing Huang5fbe25c2010-10-18 17:17:23 -0700740 /*
Krishna Gudipati11189202011-06-13 15:50:35 -0700741 * configure queue register offsets as learnt from firmware
742 */
743 bfa_iocfc_qreg(bfa, &cfgrsp->qreg);
744
745 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700746 * Configuration is complete - initialize/start submodules
747 */
748 bfa_fcport_init(bfa);
749
750 if (iocfc->action == BFA_IOCFC_ACT_INIT)
751 bfa_cb_queue(bfa, &iocfc->init_hcb_qe, bfa_iocfc_init_cb, bfa);
752 else
753 bfa_iocfc_start_submod(bfa);
754}
755void
756bfa_iocfc_reset_queues(struct bfa_s *bfa)
757{
758 int q;
759
760 for (q = 0; q < BFI_IOC_MAX_CQS; q++) {
761 bfa_reqq_ci(bfa, q) = 0;
762 bfa_reqq_pi(bfa, q) = 0;
763 bfa_rspq_ci(bfa, q) = 0;
764 bfa_rspq_pi(bfa, q) = 0;
765 }
766}
767
Jing Huang5fbe25c2010-10-18 17:17:23 -0700768/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700769 * IOC enable request is complete
770 */
771static void
772bfa_iocfc_enable_cbfn(void *bfa_arg, enum bfa_status status)
773{
774 struct bfa_s *bfa = bfa_arg;
775
776 if (status != BFA_STATUS_OK) {
777 bfa_isr_disable(bfa);
778 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
779 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe,
780 bfa_iocfc_init_cb, bfa);
781 return;
782 }
783
784 bfa_iocfc_send_cfg(bfa);
785}
786
Jing Huang5fbe25c2010-10-18 17:17:23 -0700787/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700788 * IOC disable request is complete
789 */
790static void
791bfa_iocfc_disable_cbfn(void *bfa_arg)
792{
793 struct bfa_s *bfa = bfa_arg;
794
795 bfa_isr_disable(bfa);
796 bfa_iocfc_disable_submod(bfa);
797
798 if (bfa->iocfc.action == BFA_IOCFC_ACT_STOP)
799 bfa_cb_queue(bfa, &bfa->iocfc.stop_hcb_qe, bfa_iocfc_stop_cb,
800 bfa);
801 else {
Jing Huangd4b671c2010-12-26 21:46:35 -0800802 WARN_ON(bfa->iocfc.action != BFA_IOCFC_ACT_DISABLE);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700803 bfa_cb_queue(bfa, &bfa->iocfc.dis_hcb_qe, bfa_iocfc_disable_cb,
804 bfa);
805 }
806}
807
Jing Huang5fbe25c2010-10-18 17:17:23 -0700808/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700809 * Notify sub-modules of hardware failure.
810 */
811static void
812bfa_iocfc_hbfail_cbfn(void *bfa_arg)
813{
814 struct bfa_s *bfa = bfa_arg;
815
816 bfa->rme_process = BFA_FALSE;
817
818 bfa_isr_disable(bfa);
819 bfa_iocfc_disable_submod(bfa);
820
821 if (bfa->iocfc.action == BFA_IOCFC_ACT_INIT)
822 bfa_cb_queue(bfa, &bfa->iocfc.init_hcb_qe, bfa_iocfc_init_cb,
823 bfa);
824}
825
Jing Huang5fbe25c2010-10-18 17:17:23 -0700826/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700827 * Actions on chip-reset completion.
828 */
829static void
830bfa_iocfc_reset_cbfn(void *bfa_arg)
831{
832 struct bfa_s *bfa = bfa_arg;
833
834 bfa_iocfc_reset_queues(bfa);
835 bfa_isr_enable(bfa);
836}
837
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700838
Jing Huang5fbe25c2010-10-18 17:17:23 -0700839/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700840 * Query IOC memory requirement information.
841 */
842void
843bfa_iocfc_meminfo(struct bfa_iocfc_cfg_s *cfg, u32 *km_len,
844 u32 *dm_len)
845{
846 /* dma memory for IOC */
Maggie Zhangf7f738122010-12-09 19:08:43 -0800847 *dm_len += BFA_ROUNDUP(sizeof(struct bfi_ioc_attr_s), BFA_DMA_ALIGN_SZ);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700848
849 bfa_iocfc_fw_cfg_sz(cfg, dm_len);
850 bfa_iocfc_cqs_sz(cfg, dm_len);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800851 *km_len += (bfa_auto_recover) ? BFA_DBG_FWTRC_LEN : 0;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700852}
853
Jing Huang5fbe25c2010-10-18 17:17:23 -0700854/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700855 * Query IOC memory requirement information.
856 */
857void
858bfa_iocfc_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
859 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
860{
861 int i;
862 struct bfa_ioc_s *ioc = &bfa->ioc;
863
864 bfa_iocfc_cbfn.enable_cbfn = bfa_iocfc_enable_cbfn;
865 bfa_iocfc_cbfn.disable_cbfn = bfa_iocfc_disable_cbfn;
866 bfa_iocfc_cbfn.hbfail_cbfn = bfa_iocfc_hbfail_cbfn;
867 bfa_iocfc_cbfn.reset_cbfn = bfa_iocfc_reset_cbfn;
868
869 ioc->trcmod = bfa->trcmod;
870 bfa_ioc_attach(&bfa->ioc, bfa, &bfa_iocfc_cbfn, &bfa->timer_mod);
871
Jing Huang5fbe25c2010-10-18 17:17:23 -0700872 /*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700873 * Set FC mode for BFA_PCI_DEVICE_ID_CT_FC.
874 */
875 if (pcidev->device_id == BFA_PCI_DEVICE_ID_CT_FC)
876 bfa_ioc_set_fcmode(&bfa->ioc);
877
Krishna Gudipatid37779f2011-06-13 15:42:10 -0700878 bfa_ioc_pci_init(&bfa->ioc, pcidev, BFI_PCIFN_CLASS_FC);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700879 bfa_ioc_mbox_register(&bfa->ioc, bfa_mbox_isrs);
880
881 bfa_iocfc_init_mem(bfa, bfad, cfg, pcidev);
882 bfa_iocfc_mem_claim(bfa, cfg, meminfo);
Maggie Zhangf7f738122010-12-09 19:08:43 -0800883 INIT_LIST_HEAD(&bfa->timer_mod.timer_q);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700884
885 INIT_LIST_HEAD(&bfa->comp_q);
886 for (i = 0; i < BFI_IOC_MAX_CQS; i++)
887 INIT_LIST_HEAD(&bfa->reqq_waitq[i]);
888}
889
Jing Huang5fbe25c2010-10-18 17:17:23 -0700890/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700891 * Query IOC memory requirement information.
892 */
893void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700894bfa_iocfc_init(struct bfa_s *bfa)
895{
896 bfa->iocfc.action = BFA_IOCFC_ACT_INIT;
897 bfa_ioc_enable(&bfa->ioc);
898}
899
Jing Huang5fbe25c2010-10-18 17:17:23 -0700900/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700901 * IOC start called from bfa_start(). Called to start IOC operations
902 * at driver instantiation for this instance.
903 */
904void
905bfa_iocfc_start(struct bfa_s *bfa)
906{
907 if (bfa->iocfc.cfgdone)
908 bfa_iocfc_start_submod(bfa);
909}
910
Jing Huang5fbe25c2010-10-18 17:17:23 -0700911/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700912 * IOC stop called from bfa_stop(). Called only when driver is unloaded
913 * for this instance.
914 */
915void
916bfa_iocfc_stop(struct bfa_s *bfa)
917{
918 bfa->iocfc.action = BFA_IOCFC_ACT_STOP;
919
920 bfa->rme_process = BFA_FALSE;
921 bfa_ioc_disable(&bfa->ioc);
922}
923
924void
925bfa_iocfc_isr(void *bfaarg, struct bfi_mbmsg_s *m)
926{
927 struct bfa_s *bfa = bfaarg;
928 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
929 union bfi_iocfc_i2h_msg_u *msg;
930
931 msg = (union bfi_iocfc_i2h_msg_u *) m;
932 bfa_trc(bfa, msg->mh.msg_id);
933
934 switch (msg->mh.msg_id) {
935 case BFI_IOCFC_I2H_CFG_REPLY:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700936 bfa_iocfc_cfgrsp(bfa);
937 break;
938 case BFI_IOCFC_I2H_UPDATEQ_RSP:
939 iocfc->updateq_cbfn(iocfc->updateq_cbarg, BFA_STATUS_OK);
940 break;
941 default:
Jing Huangd4b671c2010-12-26 21:46:35 -0800942 WARN_ON(1);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700943 }
944}
945
946void
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700947bfa_iocfc_get_attr(struct bfa_s *bfa, struct bfa_iocfc_attr_s *attr)
948{
949 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
950
951 attr->intr_attr.coalesce = iocfc->cfginfo->intr_attr.coalesce;
952
953 attr->intr_attr.delay = iocfc->cfginfo->intr_attr.delay ?
Jing Huangba816ea2010-10-18 17:10:50 -0700954 be16_to_cpu(iocfc->cfginfo->intr_attr.delay) :
955 be16_to_cpu(iocfc->cfgrsp->intr_attr.delay);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700956
957 attr->intr_attr.latency = iocfc->cfginfo->intr_attr.latency ?
Jing Huangba816ea2010-10-18 17:10:50 -0700958 be16_to_cpu(iocfc->cfginfo->intr_attr.latency) :
959 be16_to_cpu(iocfc->cfgrsp->intr_attr.latency);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700960
961 attr->config = iocfc->cfg;
962}
963
964bfa_status_t
965bfa_iocfc_israttr_set(struct bfa_s *bfa, struct bfa_iocfc_intr_attr_s *attr)
966{
967 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
968 struct bfi_iocfc_set_intr_req_s *m;
969
970 iocfc->cfginfo->intr_attr.coalesce = attr->coalesce;
Jing Huangba816ea2010-10-18 17:10:50 -0700971 iocfc->cfginfo->intr_attr.delay = cpu_to_be16(attr->delay);
972 iocfc->cfginfo->intr_attr.latency = cpu_to_be16(attr->latency);
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700973
974 if (!bfa_iocfc_is_operational(bfa))
975 return BFA_STATUS_OK;
976
977 m = bfa_reqq_next(bfa, BFA_REQQ_IOC);
978 if (!m)
979 return BFA_STATUS_DEVBUSY;
980
981 bfi_h2i_set(m->mh, BFI_MC_IOCFC, BFI_IOCFC_H2I_SET_INTR_REQ,
982 bfa_lpuid(bfa));
983 m->coalesce = iocfc->cfginfo->intr_attr.coalesce;
984 m->delay = iocfc->cfginfo->intr_attr.delay;
985 m->latency = iocfc->cfginfo->intr_attr.latency;
986
987 bfa_trc(bfa, attr->delay);
988 bfa_trc(bfa, attr->latency);
989
990 bfa_reqq_produce(bfa, BFA_REQQ_IOC);
991 return BFA_STATUS_OK;
992}
993
994void
995bfa_iocfc_set_snsbase(struct bfa_s *bfa, u64 snsbase_pa)
996{
997 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
998
999 iocfc->cfginfo->sense_buf_len = (BFI_IOIM_SNSLEN - 1);
1000 bfa_dma_be_addr_set(iocfc->cfginfo->ioim_snsbase, snsbase_pa);
1001}
Jing Huang5fbe25c2010-10-18 17:17:23 -07001002/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001003 * Enable IOC after it is disabled.
1004 */
1005void
1006bfa_iocfc_enable(struct bfa_s *bfa)
1007{
1008 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1009 "IOC Enable");
1010 bfa_ioc_enable(&bfa->ioc);
1011}
1012
1013void
1014bfa_iocfc_disable(struct bfa_s *bfa)
1015{
1016 bfa_plog_str(bfa->plog, BFA_PL_MID_HAL, BFA_PL_EID_MISC, 0,
1017 "IOC Disable");
1018 bfa->iocfc.action = BFA_IOCFC_ACT_DISABLE;
1019
1020 bfa->rme_process = BFA_FALSE;
1021 bfa_ioc_disable(&bfa->ioc);
1022}
1023
1024
1025bfa_boolean_t
1026bfa_iocfc_is_operational(struct bfa_s *bfa)
1027{
1028 return bfa_ioc_is_operational(&bfa->ioc) && bfa->iocfc.cfgdone;
1029}
1030
Jing Huang5fbe25c2010-10-18 17:17:23 -07001031/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001032 * Return boot target port wwns -- read from boot information in flash.
1033 */
1034void
1035bfa_iocfc_get_bootwwns(struct bfa_s *bfa, u8 *nwwns, wwn_t *wwns)
1036{
1037 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1038 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1039 int i;
1040
1041 if (cfgrsp->pbc_cfg.boot_enabled && cfgrsp->pbc_cfg.nbluns) {
1042 bfa_trc(bfa, cfgrsp->pbc_cfg.nbluns);
1043 *nwwns = cfgrsp->pbc_cfg.nbluns;
1044 for (i = 0; i < cfgrsp->pbc_cfg.nbluns; i++)
1045 wwns[i] = cfgrsp->pbc_cfg.blun[i].tgt_pwwn;
1046
1047 return;
1048 }
1049
1050 *nwwns = cfgrsp->bootwwns.nwwns;
1051 memcpy(wwns, cfgrsp->bootwwns.wwn, sizeof(cfgrsp->bootwwns.wwn));
1052}
1053
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001054int
1055bfa_iocfc_get_pbc_vports(struct bfa_s *bfa, struct bfi_pbc_vport_s *pbc_vport)
1056{
1057 struct bfa_iocfc_s *iocfc = &bfa->iocfc;
1058 struct bfi_iocfc_cfgrsp_s *cfgrsp = iocfc->cfgrsp;
1059
1060 memcpy(pbc_vport, cfgrsp->pbc_cfg.vport, sizeof(cfgrsp->pbc_cfg.vport));
1061 return cfgrsp->pbc_cfg.nvports;
1062}
1063
Jing Huang7725ccf2009-09-23 17:46:15 -07001064
Jing Huang5fbe25c2010-10-18 17:17:23 -07001065/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001066 * Use this function query the memory requirement of the BFA library.
1067 * This function needs to be called before bfa_attach() to get the
1068 * memory required of the BFA layer for a given driver configuration.
1069 *
1070 * This call will fail, if the cap is out of range compared to pre-defined
1071 * values within the BFA library
1072 *
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001073 * @param[in] cfg - pointer to bfa_ioc_cfg_t. Driver layer should indicate
1074 * its configuration in this structure.
Jing Huang7725ccf2009-09-23 17:46:15 -07001075 * The default values for struct bfa_iocfc_cfg_s can be
1076 * fetched using bfa_cfg_get_default() API.
1077 *
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001078 * If cap's boundary check fails, the library will use
Jing Huang7725ccf2009-09-23 17:46:15 -07001079 * the default bfa_cap_t values (and log a warning msg).
1080 *
1081 * @param[out] meminfo - pointer to bfa_meminfo_t. This content
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001082 * indicates the memory type (see bfa_mem_type_t) and
Jing Huang7725ccf2009-09-23 17:46:15 -07001083 * amount of memory required.
1084 *
1085 * Driver should allocate the memory, populate the
1086 * starting address for each block and provide the same
1087 * structure as input parameter to bfa_attach() call.
1088 *
1089 * @return void
1090 *
1091 * Special Considerations: @note
1092 */
1093void
1094bfa_cfg_get_meminfo(struct bfa_iocfc_cfg_s *cfg, struct bfa_meminfo_s *meminfo)
1095{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001096 int i;
1097 u32 km_len = 0, dm_len = 0;
Jing Huang7725ccf2009-09-23 17:46:15 -07001098
Jing Huangd4b671c2010-12-26 21:46:35 -08001099 WARN_ON((cfg == NULL) || (meminfo == NULL));
Jing Huang7725ccf2009-09-23 17:46:15 -07001100
Jing Huang6a18b162010-10-18 17:08:54 -07001101 memset((void *)meminfo, 0, sizeof(struct bfa_meminfo_s));
Jing Huang7725ccf2009-09-23 17:46:15 -07001102 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_type =
1103 BFA_MEM_TYPE_KVA;
1104 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_type =
1105 BFA_MEM_TYPE_DMA;
1106
1107 bfa_iocfc_meminfo(cfg, &km_len, &dm_len);
1108
1109 for (i = 0; hal_mods[i]; i++)
1110 hal_mods[i]->meminfo(cfg, &km_len, &dm_len);
1111
Krishna Gudipati7873ca42010-05-21 14:39:45 -07001112 dm_len += bfa_port_meminfo();
Jing Huang7725ccf2009-09-23 17:46:15 -07001113
1114 meminfo->meminfo[BFA_MEM_TYPE_KVA - 1].mem_len = km_len;
1115 meminfo->meminfo[BFA_MEM_TYPE_DMA - 1].mem_len = dm_len;
1116}
1117
Jing Huang5fbe25c2010-10-18 17:17:23 -07001118/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001119 * Use this function to do attach the driver instance with the BFA
1120 * library. This function will not trigger any HW initialization
1121 * process (which will be done in bfa_init() call)
1122 *
1123 * This call will fail, if the cap is out of range compared to
1124 * pre-defined values within the BFA library
1125 *
1126 * @param[out] bfa Pointer to bfa_t.
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001127 * @param[in] bfad Opaque handle back to the driver's IOC structure
Jing Huang7725ccf2009-09-23 17:46:15 -07001128 * @param[in] cfg Pointer to bfa_ioc_cfg_t. Should be same structure
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001129 * that was used in bfa_cfg_get_meminfo().
1130 * @param[in] meminfo Pointer to bfa_meminfo_t. The driver should
1131 * use the bfa_cfg_get_meminfo() call to
1132 * find the memory blocks required, allocate the
1133 * required memory and provide the starting addresses.
1134 * @param[in] pcidev pointer to struct bfa_pcidev_s
Jing Huang7725ccf2009-09-23 17:46:15 -07001135 *
1136 * @return
1137 * void
1138 *
1139 * Special Considerations:
1140 *
1141 * @note
1142 *
1143 */
1144void
1145bfa_attach(struct bfa_s *bfa, void *bfad, struct bfa_iocfc_cfg_s *cfg,
1146 struct bfa_meminfo_s *meminfo, struct bfa_pcidev_s *pcidev)
1147{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001148 int i;
1149 struct bfa_mem_elem_s *melem;
Jing Huang7725ccf2009-09-23 17:46:15 -07001150
1151 bfa->fcs = BFA_FALSE;
1152
Jing Huangd4b671c2010-12-26 21:46:35 -08001153 WARN_ON((cfg == NULL) || (meminfo == NULL));
Jing Huang7725ccf2009-09-23 17:46:15 -07001154
Jing Huang5fbe25c2010-10-18 17:17:23 -07001155 /*
Jing Huang7725ccf2009-09-23 17:46:15 -07001156 * initialize all memory pointers for iterative allocation
1157 */
1158 for (i = 0; i < BFA_MEM_TYPE_MAX; i++) {
1159 melem = meminfo->meminfo + i;
1160 melem->kva_curp = melem->kva;
1161 melem->dma_curp = melem->dma;
1162 }
1163
1164 bfa_iocfc_attach(bfa, bfad, cfg, meminfo, pcidev);
1165
1166 for (i = 0; hal_mods[i]; i++)
1167 hal_mods[i]->attach(bfa, bfad, cfg, meminfo, pcidev);
1168
Krishna Gudipati7873ca42010-05-21 14:39:45 -07001169 bfa_com_port_attach(bfa, meminfo);
Jing Huang7725ccf2009-09-23 17:46:15 -07001170}
1171
Jing Huang5fbe25c2010-10-18 17:17:23 -07001172/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001173 * Use this function to delete a BFA IOC. IOC should be stopped (by
1174 * calling bfa_stop()) before this function call.
1175 *
1176 * @param[in] bfa - pointer to bfa_t.
1177 *
1178 * @return
1179 * void
1180 *
1181 * Special Considerations:
1182 *
1183 * @note
1184 */
1185void
1186bfa_detach(struct bfa_s *bfa)
1187{
1188 int i;
1189
1190 for (i = 0; hal_mods[i]; i++)
1191 hal_mods[i]->detach(bfa);
Maggie Zhangf7f738122010-12-09 19:08:43 -08001192 bfa_ioc_detach(&bfa->ioc);
Jing Huang7725ccf2009-09-23 17:46:15 -07001193}
1194
1195void
1196bfa_comp_deq(struct bfa_s *bfa, struct list_head *comp_q)
1197{
1198 INIT_LIST_HEAD(comp_q);
1199 list_splice_tail_init(&bfa->comp_q, comp_q);
1200}
1201
1202void
1203bfa_comp_process(struct bfa_s *bfa, struct list_head *comp_q)
1204{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001205 struct list_head *qe;
1206 struct list_head *qen;
1207 struct bfa_cb_qe_s *hcb_qe;
Jing Huang7725ccf2009-09-23 17:46:15 -07001208
1209 list_for_each_safe(qe, qen, comp_q) {
1210 hcb_qe = (struct bfa_cb_qe_s *) qe;
1211 hcb_qe->cbfn(hcb_qe->cbarg, BFA_TRUE);
1212 }
1213}
1214
1215void
1216bfa_comp_free(struct bfa_s *bfa, struct list_head *comp_q)
1217{
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001218 struct list_head *qe;
1219 struct bfa_cb_qe_s *hcb_qe;
Jing Huang7725ccf2009-09-23 17:46:15 -07001220
1221 while (!list_empty(comp_q)) {
1222 bfa_q_deq(comp_q, &qe);
1223 hcb_qe = (struct bfa_cb_qe_s *) qe;
1224 hcb_qe->cbfn(hcb_qe->cbarg, BFA_FALSE);
1225 }
1226}
1227
Jing Huang7725ccf2009-09-23 17:46:15 -07001228
Jing Huang5fbe25c2010-10-18 17:17:23 -07001229/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001230 * Return the list of PCI vendor/device id lists supported by this
1231 * BFA instance.
1232 */
1233void
1234bfa_get_pciids(struct bfa_pciid_s **pciids, int *npciids)
1235{
1236 static struct bfa_pciid_s __pciids[] = {
1237 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G2P},
1238 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_FC_8G1P},
1239 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT},
Jing Huang293f82d2010-07-08 19:45:20 -07001240 {BFA_PCI_VENDOR_ID_BROCADE, BFA_PCI_DEVICE_ID_CT_FC},
Jing Huang7725ccf2009-09-23 17:46:15 -07001241 };
1242
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001243 *npciids = sizeof(__pciids) / sizeof(__pciids[0]);
Jing Huang7725ccf2009-09-23 17:46:15 -07001244 *pciids = __pciids;
1245}
1246
Jing Huang5fbe25c2010-10-18 17:17:23 -07001247/*
Jing Huang7725ccf2009-09-23 17:46:15 -07001248 * Use this function query the default struct bfa_iocfc_cfg_s value (compiled
1249 * into BFA layer). The OS driver can then turn back and overwrite entries that
1250 * have been configured by the user.
1251 *
1252 * @param[in] cfg - pointer to bfa_ioc_cfg_t
1253 *
1254 * @return
1255 * void
1256 *
1257 * Special Considerations:
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001258 * note
Jing Huang7725ccf2009-09-23 17:46:15 -07001259 */
1260void
1261bfa_cfg_get_default(struct bfa_iocfc_cfg_s *cfg)
1262{
1263 cfg->fwcfg.num_fabrics = DEF_CFG_NUM_FABRICS;
1264 cfg->fwcfg.num_lports = DEF_CFG_NUM_LPORTS;
1265 cfg->fwcfg.num_rports = DEF_CFG_NUM_RPORTS;
1266 cfg->fwcfg.num_ioim_reqs = DEF_CFG_NUM_IOIM_REQS;
1267 cfg->fwcfg.num_tskim_reqs = DEF_CFG_NUM_TSKIM_REQS;
1268 cfg->fwcfg.num_fcxp_reqs = DEF_CFG_NUM_FCXP_REQS;
1269 cfg->fwcfg.num_uf_bufs = DEF_CFG_NUM_UF_BUFS;
1270 cfg->fwcfg.num_cqs = DEF_CFG_NUM_CQS;
1271
1272 cfg->drvcfg.num_reqq_elems = DEF_CFG_NUM_REQQ_ELEMS;
1273 cfg->drvcfg.num_rspq_elems = DEF_CFG_NUM_RSPQ_ELEMS;
1274 cfg->drvcfg.num_sgpgs = DEF_CFG_NUM_SGPGS;
1275 cfg->drvcfg.num_sboot_tgts = DEF_CFG_NUM_SBOOT_TGTS;
1276 cfg->drvcfg.num_sboot_luns = DEF_CFG_NUM_SBOOT_LUNS;
1277 cfg->drvcfg.path_tov = BFA_FCPIM_PATHTOV_DEF;
1278 cfg->drvcfg.ioc_recover = BFA_FALSE;
1279 cfg->drvcfg.delay_comp = BFA_FALSE;
1280
1281}
1282
1283void
1284bfa_cfg_get_min(struct bfa_iocfc_cfg_s *cfg)
1285{
1286 bfa_cfg_get_default(cfg);
1287 cfg->fwcfg.num_ioim_reqs = BFA_IOIM_MIN;
1288 cfg->fwcfg.num_tskim_reqs = BFA_TSKIM_MIN;
1289 cfg->fwcfg.num_fcxp_reqs = BFA_FCXP_MIN;
1290 cfg->fwcfg.num_uf_bufs = BFA_UF_MIN;
1291 cfg->fwcfg.num_rports = BFA_RPORT_MIN;
1292
1293 cfg->drvcfg.num_sgpgs = BFA_SGPG_MIN;
1294 cfg->drvcfg.num_reqq_elems = BFA_REQQ_NELEMS_MIN;
1295 cfg->drvcfg.num_rspq_elems = BFA_RSPQ_NELEMS_MIN;
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07001296 cfg->drvcfg.min_cfg = BFA_TRUE;
Jing Huang7725ccf2009-09-23 17:46:15 -07001297}