blob: f21b82c5f64c64cf271b378099d38484dbf2611c [file] [log] [blame]
Krishna Gudipati0a20de42010-03-05 19:34:20 -08001/*
Krishna Gudipatia36c61f2010-09-15 11:50:55 -07002 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
Krishna Gudipati0a20de42010-03-05 19:34:20 -08003 * All rights reserved
4 * www.brocade.com
5 *
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 */
17
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070018#include "bfa_ioc.h"
19#include "bfi_ctreg.h"
20#include "bfa_defs.h"
Krishna Gudipati0a20de42010-03-05 19:34:20 -080021
22BFA_TRC_FILE(CNA, IOC_CT);
23
24/*
25 * forward declarations
26 */
Krishna Gudipati0a20de42010-03-05 19:34:20 -080027static bfa_boolean_t bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc);
28static void bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc);
Krishna Gudipati0a20de42010-03-05 19:34:20 -080029static void bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc);
30static void bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc);
31static void bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix);
32static void bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc);
33static void bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc);
34
Jing Huang293f82d2010-07-08 19:45:20 -070035struct bfa_ioc_hwif_s hwif_ct;
Krishna Gudipati0a20de42010-03-05 19:34:20 -080036
37/**
38 * Called from bfa_ioc_attach() to map asic specific calls.
39 */
40void
41bfa_ioc_set_ct_hwif(struct bfa_ioc_s *ioc)
42{
Jing Huang293f82d2010-07-08 19:45:20 -070043 hwif_ct.ioc_pll_init = bfa_ioc_ct_pll_init;
44 hwif_ct.ioc_firmware_lock = bfa_ioc_ct_firmware_lock;
45 hwif_ct.ioc_firmware_unlock = bfa_ioc_ct_firmware_unlock;
46 hwif_ct.ioc_reg_init = bfa_ioc_ct_reg_init;
47 hwif_ct.ioc_map_port = bfa_ioc_ct_map_port;
48 hwif_ct.ioc_isr_mode_set = bfa_ioc_ct_isr_mode_set;
49 hwif_ct.ioc_notify_hbfail = bfa_ioc_ct_notify_hbfail;
50 hwif_ct.ioc_ownership_reset = bfa_ioc_ct_ownership_reset;
51
Krishna Gudipati0a20de42010-03-05 19:34:20 -080052 ioc->ioc_hwif = &hwif_ct;
53}
54
Krishna Gudipati0a20de42010-03-05 19:34:20 -080055/**
56 * Return true if firmware of current driver matches the running firmware.
57 */
58static bfa_boolean_t
59bfa_ioc_ct_firmware_lock(struct bfa_ioc_s *ioc)
60{
61 enum bfi_ioc_state ioc_fwstate;
Krishna Gudipatid1c61f82010-03-05 19:38:44 -080062 u32 usecnt;
Krishna Gudipati0a20de42010-03-05 19:34:20 -080063 struct bfi_ioc_image_hdr_s fwhdr;
64
65 /**
66 * Firmware match check is relevant only for CNA.
67 */
68 if (!ioc->cna)
69 return BFA_TRUE;
70
71 /**
72 * If bios boot (flash based) -- do not increment usage count
73 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -070074 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
75 BFA_IOC_FWIMG_MINSZ)
Krishna Gudipati0a20de42010-03-05 19:34:20 -080076 return BFA_TRUE;
77
78 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
79 usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
80
81 /**
82 * If usage count is 0, always return TRUE.
83 */
84 if (usecnt == 0) {
85 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 1);
86 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
87 bfa_trc(ioc, usecnt);
88 return BFA_TRUE;
89 }
90
91 ioc_fwstate = bfa_reg_read(ioc->ioc_regs.ioc_fwstate);
92 bfa_trc(ioc, ioc_fwstate);
93
94 /**
95 * Use count cannot be non-zero and chip in uninitialized state.
96 */
97 bfa_assert(ioc_fwstate != BFI_IOC_UNINIT);
98
99 /**
100 * Check if another driver with a different firmware is active
101 */
102 bfa_ioc_fwver_get(ioc, &fwhdr);
103 if (!bfa_ioc_fwver_cmp(ioc, &fwhdr)) {
104 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
105 bfa_trc(ioc, usecnt);
106 return BFA_FALSE;
107 }
108
109 /**
110 * Same firmware version. Increment the reference count.
111 */
112 usecnt++;
113 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
114 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
115 bfa_trc(ioc, usecnt);
116 return BFA_TRUE;
117}
118
119static void
120bfa_ioc_ct_firmware_unlock(struct bfa_ioc_s *ioc)
121{
Krishna Gudipatid1c61f82010-03-05 19:38:44 -0800122 u32 usecnt;
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800123
124 /**
125 * Firmware lock is relevant only for CNA.
Jing Huang293f82d2010-07-08 19:45:20 -0700126 */
127 if (!ioc->cna)
128 return;
129
130 /**
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800131 * If bios boot (flash based) -- do not decrement usage count
132 */
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700133 if (bfa_cb_image_get_size(BFA_IOC_FWIMG_TYPE(ioc)) <
134 BFA_IOC_FWIMG_MINSZ)
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800135 return;
136
137 /**
138 * decrement usage count
139 */
140 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
141 usecnt = bfa_reg_read(ioc->ioc_regs.ioc_usage_reg);
142 bfa_assert(usecnt > 0);
143
144 usecnt--;
145 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, usecnt);
146 bfa_trc(ioc, usecnt);
147
148 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
149}
150
151/**
152 * Notify other functions on HB failure.
153 */
154static void
155bfa_ioc_ct_notify_hbfail(struct bfa_ioc_s *ioc)
156{
Krishna Gudipati816e49b2010-03-05 19:36:56 -0800157 if (ioc->cna) {
158 bfa_reg_write(ioc->ioc_regs.ll_halt, __FW_INIT_HALT_P);
159 /* Wait for halt to take effect */
160 bfa_reg_read(ioc->ioc_regs.ll_halt);
161 } else {
162 bfa_reg_write(ioc->ioc_regs.err_set, __PSS_ERR_STATUS_SET);
163 bfa_reg_read(ioc->ioc_regs.err_set);
164 }
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800165}
166
167/**
168 * Host to LPU mailbox message addresses
169 */
Krishna Gudipatid1c61f82010-03-05 19:38:44 -0800170static struct { u32 hfn_mbox, lpu_mbox, hfn_pgn; } iocreg_fnreg[] = {
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800171 { HOSTFN0_LPU_MBOX0_0, LPU_HOSTFN0_MBOX0_0, HOST_PAGE_NUM_FN0 },
172 { HOSTFN1_LPU_MBOX0_8, LPU_HOSTFN1_MBOX0_8, HOST_PAGE_NUM_FN1 },
173 { HOSTFN2_LPU_MBOX0_0, LPU_HOSTFN2_MBOX0_0, HOST_PAGE_NUM_FN2 },
174 { HOSTFN3_LPU_MBOX0_8, LPU_HOSTFN3_MBOX0_8, HOST_PAGE_NUM_FN3 }
175};
176
177/**
178 * Host <-> LPU mailbox command/status registers - port 0
179 */
Krishna Gudipatid1c61f82010-03-05 19:38:44 -0800180static struct { u32 hfn, lpu; } iocreg_mbcmd_p0[] = {
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800181 { HOSTFN0_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN0_MBOX0_CMD_STAT },
182 { HOSTFN1_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN1_MBOX0_CMD_STAT },
183 { HOSTFN2_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN2_MBOX0_CMD_STAT },
184 { HOSTFN3_LPU0_MBOX0_CMD_STAT, LPU0_HOSTFN3_MBOX0_CMD_STAT }
185};
186
187/**
188 * Host <-> LPU mailbox command/status registers - port 1
189 */
Krishna Gudipatid1c61f82010-03-05 19:38:44 -0800190static struct { u32 hfn, lpu; } iocreg_mbcmd_p1[] = {
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800191 { HOSTFN0_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN0_MBOX0_CMD_STAT },
192 { HOSTFN1_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN1_MBOX0_CMD_STAT },
193 { HOSTFN2_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN2_MBOX0_CMD_STAT },
194 { HOSTFN3_LPU1_MBOX0_CMD_STAT, LPU1_HOSTFN3_MBOX0_CMD_STAT }
195};
196
197static void
198bfa_ioc_ct_reg_init(struct bfa_ioc_s *ioc)
199{
200 bfa_os_addr_t rb;
201 int pcifn = bfa_ioc_pcifn(ioc);
202
203 rb = bfa_ioc_bar0(ioc);
204
205 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox;
206 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox;
207 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn;
208
209 if (ioc->port_id == 0) {
210 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG;
211 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG;
212 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].hfn;
213 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p0[pcifn].lpu;
214 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P0;
215 } else {
216 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG);
217 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG);
218 ioc->ioc_regs.hfn_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].hfn;
219 ioc->ioc_regs.lpu_mbox_cmd = rb + iocreg_mbcmd_p1[pcifn].lpu;
220 ioc->ioc_regs.ll_halt = rb + FW_INIT_HALT_P1;
221 }
222
223 /*
224 * PSS control registers
225 */
226 ioc->ioc_regs.pss_ctl_reg = (rb + PSS_CTL_REG);
Krishna Gudipati8b651b42010-03-05 19:34:44 -0800227 ioc->ioc_regs.pss_err_status_reg = (rb + PSS_ERR_STATUS_REG);
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800228 ioc->ioc_regs.app_pll_fast_ctl_reg = (rb + APP_PLL_425_CTL_REG);
229 ioc->ioc_regs.app_pll_slow_ctl_reg = (rb + APP_PLL_312_CTL_REG);
230
231 /*
232 * IOC semaphore registers and serialization
233 */
234 ioc->ioc_regs.ioc_sem_reg = (rb + HOST_SEM0_REG);
235 ioc->ioc_regs.ioc_usage_sem_reg = (rb + HOST_SEM1_REG);
236 ioc->ioc_regs.ioc_init_sem_reg = (rb + HOST_SEM2_REG);
237 ioc->ioc_regs.ioc_usage_reg = (rb + BFA_FW_USE_COUNT);
238
239 /**
240 * sram memory access
241 */
242 ioc->ioc_regs.smem_page_start = (rb + PSS_SMEM_PAGE_START);
243 ioc->ioc_regs.smem_pg0 = BFI_IOC_SMEM_PG0_CT;
Krishna Gudipati816e49b2010-03-05 19:36:56 -0800244
245 /*
246 * err set reg : for notification of hb failure in fcmode
247 */
248 ioc->ioc_regs.err_set = (rb + ERR_SET_REG);
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800249}
250
251/**
252 * Initialize IOC to port mapping.
253 */
254
255#define FNC_PERS_FN_SHIFT(__fn) ((__fn) * 8)
256static void
257bfa_ioc_ct_map_port(struct bfa_ioc_s *ioc)
258{
259 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
Krishna Gudipatid1c61f82010-03-05 19:38:44 -0800260 u32 r32;
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800261
262 /**
263 * For catapult, base port id on personality register and IOC type
264 */
265 r32 = bfa_reg_read(rb + FNC_PERS_REG);
266 r32 >>= FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc));
267 ioc->port_id = (r32 & __F0_PORT_MAP_MK) >> __F0_PORT_MAP_SH;
268
269 bfa_trc(ioc, bfa_ioc_pcifn(ioc));
270 bfa_trc(ioc, ioc->port_id);
271}
272
273/**
274 * Set interrupt mode for a function: INTX or MSIX
275 */
276static void
277bfa_ioc_ct_isr_mode_set(struct bfa_ioc_s *ioc, bfa_boolean_t msix)
278{
279 bfa_os_addr_t rb = ioc->pcidev.pci_bar_kva;
Krishna Gudipatid1c61f82010-03-05 19:38:44 -0800280 u32 r32, mode;
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800281
282 r32 = bfa_reg_read(rb + FNC_PERS_REG);
283 bfa_trc(ioc, r32);
284
285 mode = (r32 >> FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc))) &
286 __F0_INTX_STATUS;
287
288 /**
289 * If already in desired mode, do not change anything
290 */
291 if (!msix && mode)
292 return;
293
294 if (msix)
295 mode = __F0_INTX_STATUS_MSIX;
296 else
297 mode = __F0_INTX_STATUS_INTA;
298
299 r32 &= ~(__F0_INTX_STATUS << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
300 r32 |= (mode << FNC_PERS_FN_SHIFT(bfa_ioc_pcifn(ioc)));
301 bfa_trc(ioc, r32);
302
303 bfa_reg_write(rb + FNC_PERS_REG, r32);
304}
305
Krishna Gudipati0a20de42010-03-05 19:34:20 -0800306/**
307 * Cleanup hw semaphore and usecnt registers
308 */
309static void
310bfa_ioc_ct_ownership_reset(struct bfa_ioc_s *ioc)
311{
312
313 if (ioc->cna) {
314 bfa_ioc_sem_get(ioc->ioc_regs.ioc_usage_sem_reg);
315 bfa_reg_write(ioc->ioc_regs.ioc_usage_reg, 0);
316 bfa_ioc_sem_release(ioc->ioc_regs.ioc_usage_sem_reg);
317 }
318
319 /*
320 * Read the hw sem reg to make sure that it is locked
321 * before we clear it. If it is not locked, writing 1
322 * will lock it instead of clearing it.
323 */
324 bfa_reg_read(ioc->ioc_regs.ioc_sem_reg);
325 bfa_ioc_hw_sem_release(ioc);
326}
Krishna Gudipatia36c61f2010-09-15 11:50:55 -0700327
328
329
330/*
331 * Check the firmware state to know if pll_init has been completed already
332 */
333bfa_boolean_t
334bfa_ioc_ct_pll_init_complete(bfa_os_addr_t rb)
335{
336 if ((bfa_reg_read(rb + BFA_IOC0_STATE_REG) == BFI_IOC_OP) ||
337 (bfa_reg_read(rb + BFA_IOC1_STATE_REG) == BFI_IOC_OP))
338 return BFA_TRUE;
339
340 return BFA_FALSE;
341}
342
343bfa_status_t
344bfa_ioc_ct_pll_init(bfa_os_addr_t rb, bfa_boolean_t fcmode)
345{
346 u32 pll_sclk, pll_fclk, r32;
347
348 pll_sclk = __APP_PLL_312_LRESETN | __APP_PLL_312_ENARST |
349 __APP_PLL_312_RSEL200500 | __APP_PLL_312_P0_1(3U) |
350 __APP_PLL_312_JITLMT0_1(3U) |
351 __APP_PLL_312_CNTLMT0_1(1U);
352 pll_fclk = __APP_PLL_425_LRESETN | __APP_PLL_425_ENARST |
353 __APP_PLL_425_RSEL200500 | __APP_PLL_425_P0_1(3U) |
354 __APP_PLL_425_JITLMT0_1(3U) |
355 __APP_PLL_425_CNTLMT0_1(1U);
356 if (fcmode) {
357 bfa_reg_write((rb + OP_MODE), 0);
358 bfa_reg_write((rb + ETH_MAC_SER_REG),
359 __APP_EMS_CMLCKSEL |
360 __APP_EMS_REFCKBUFEN2 |
361 __APP_EMS_CHANNEL_SEL);
362 } else {
363 bfa_reg_write((rb + OP_MODE), __GLOBAL_FCOE_MODE);
364 bfa_reg_write((rb + ETH_MAC_SER_REG),
365 __APP_EMS_REFCKBUFEN1);
366 }
367 bfa_reg_write((rb + BFA_IOC0_STATE_REG), BFI_IOC_UNINIT);
368 bfa_reg_write((rb + BFA_IOC1_STATE_REG), BFI_IOC_UNINIT);
369 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
370 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
371 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
372 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
373 bfa_reg_write((rb + HOSTFN0_INT_MSK), 0xffffffffU);
374 bfa_reg_write((rb + HOSTFN1_INT_MSK), 0xffffffffU);
375 bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
376 __APP_PLL_312_LOGIC_SOFT_RESET);
377 bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
378 __APP_PLL_425_LOGIC_SOFT_RESET);
379 bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
380 __APP_PLL_312_LOGIC_SOFT_RESET | __APP_PLL_312_ENABLE);
381 bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
382 __APP_PLL_425_LOGIC_SOFT_RESET | __APP_PLL_425_ENABLE);
383 bfa_reg_read(rb + HOSTFN0_INT_MSK);
384 bfa_os_udelay(2000);
385 bfa_reg_write((rb + HOSTFN0_INT_STATUS), 0xffffffffU);
386 bfa_reg_write((rb + HOSTFN1_INT_STATUS), 0xffffffffU);
387 bfa_reg_write(rb + APP_PLL_312_CTL_REG, pll_sclk |
388 __APP_PLL_312_ENABLE);
389 bfa_reg_write(rb + APP_PLL_425_CTL_REG, pll_fclk |
390 __APP_PLL_425_ENABLE);
391 if (!fcmode) {
392 bfa_reg_write((rb + PMM_1T_RESET_REG_P0), __PMM_1T_RESET_P);
393 bfa_reg_write((rb + PMM_1T_RESET_REG_P1), __PMM_1T_RESET_P);
394 }
395 r32 = bfa_reg_read((rb + PSS_CTL_REG));
396 r32 &= ~__PSS_LMEM_RESET;
397 bfa_reg_write((rb + PSS_CTL_REG), r32);
398 bfa_os_udelay(1000);
399 if (!fcmode) {
400 bfa_reg_write((rb + PMM_1T_RESET_REG_P0), 0);
401 bfa_reg_write((rb + PMM_1T_RESET_REG_P1), 0);
402 }
403
404 bfa_reg_write((rb + MBIST_CTL_REG), __EDRAM_BISTR_START);
405 bfa_os_udelay(1000);
406 r32 = bfa_reg_read((rb + MBIST_STAT_REG));
407 bfa_reg_write((rb + MBIST_CTL_REG), 0);
408 return BFA_STATUS_OK;
409}