blob: 2c04b97f85a96bb080f150def0c12c72ec6cab08 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080041#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020042#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070045#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020046#include <linux/workqueue.h>
47#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070048#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020049#include <linux/prefetch.h>
50#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020051#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000052#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020053
Dmitry Kravkovb0efbb92010-07-27 12:33:43 +000054#define BNX2X_MAIN
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020055#include "bnx2x.h"
56#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070057#include "bnx2x_init_ops.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000058#include "bnx2x_cmn.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020059
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020060
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070061#include <linux/firmware.h>
62#include "bnx2x_fw_file_hdr.h"
63/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000064#define FW_FILE_VERSION \
65 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
66 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
67 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
68 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
Dmitry Kravkov560131f2010-10-06 03:18:47 +000069#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
70#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070071
Eilon Greenstein34f80b02008-06-23 20:33:01 -070072/* Time in jiffies before concluding the transmitter is hung */
73#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020074
Andrew Morton53a10562008-02-09 23:16:41 -080075static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070076 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020077 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070079MODULE_AUTHOR("Eliezer Tamir");
Eilon Greensteine47d7e62009-01-14 06:44:28 +000080MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020081MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000083MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020085
Eilon Greenstein555f6c72009-02-12 08:36:11 +000086static int multi_mode = 1;
87module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070088MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89 "(0 Disable; 1 Enable (default))");
90
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000091static int num_queues;
92module_param(num_queues, int, 0);
93MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000095
Eilon Greenstein19680c42008-08-13 15:47:33 -070096static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070097module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000098MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +000099
100static int int_mode;
101module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000102MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000104
Eilon Greensteina18f5122009-08-12 08:23:26 +0000105static int dropless_fc;
106module_param(dropless_fc, int, 0);
107MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
Eilon Greenstein9898f862009-02-12 08:38:27 +0000109static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200110module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000111MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000112
113static int mrrs = -1;
114module_param(mrrs, int, 0);
115MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
Eilon Greenstein9898f862009-02-12 08:38:27 +0000117static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200118module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000119MODULE_PARM_DESC(debug, " Default debug msglevel");
120
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800121static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200122
123enum bnx2x_board_type {
124 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700125 BCM57711 = 1,
126 BCM57711E = 2,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200127};
128
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700129/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800130static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200131 char *name;
132} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700133 { "Broadcom NetXtreme II BCM57710 XGb" },
134 { "Broadcom NetXtreme II BCM57711 XGb" },
135 { "Broadcom NetXtreme II BCM57711E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200136};
137
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700138
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000139static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000140 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200143 { 0 }
144};
145
146MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148/****************************************************************************
149* General service functions
150****************************************************************************/
151
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000152static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
153 u32 addr, dma_addr_t mapping)
154{
155 REG_WR(bp, addr, U64_LO(mapping));
156 REG_WR(bp, addr + 4, U64_HI(mapping));
157}
158
159static inline void __storm_memset_fill(struct bnx2x *bp,
160 u32 addr, size_t size, u32 val)
161{
162 int i;
163 for (i = 0; i < size/4; i++)
164 REG_WR(bp, addr + (i * 4), val);
165}
166
167static inline void storm_memset_ustats_zero(struct bnx2x *bp,
168 u8 port, u16 stat_id)
169{
170 size_t size = sizeof(struct ustorm_per_client_stats);
171
172 u32 addr = BAR_USTRORM_INTMEM +
173 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
174
175 __storm_memset_fill(bp, addr, size, 0);
176}
177
178static inline void storm_memset_tstats_zero(struct bnx2x *bp,
179 u8 port, u16 stat_id)
180{
181 size_t size = sizeof(struct tstorm_per_client_stats);
182
183 u32 addr = BAR_TSTRORM_INTMEM +
184 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
185
186 __storm_memset_fill(bp, addr, size, 0);
187}
188
189static inline void storm_memset_xstats_zero(struct bnx2x *bp,
190 u8 port, u16 stat_id)
191{
192 size_t size = sizeof(struct xstorm_per_client_stats);
193
194 u32 addr = BAR_XSTRORM_INTMEM +
195 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
196
197 __storm_memset_fill(bp, addr, size, 0);
198}
199
200
201static inline void storm_memset_spq_addr(struct bnx2x *bp,
202 dma_addr_t mapping, u16 abs_fid)
203{
204 u32 addr = XSEM_REG_FAST_MEMORY +
205 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
206
207 __storm_memset_dma_mapping(bp, addr, mapping);
208}
209
210static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
211{
212 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
213}
214
215static inline void storm_memset_func_cfg(struct bnx2x *bp,
216 struct tstorm_eth_function_common_config *tcfg,
217 u16 abs_fid)
218{
219 size_t size = sizeof(struct tstorm_eth_function_common_config);
220
221 u32 addr = BAR_TSTRORM_INTMEM +
222 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
223
224 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
225}
226
227static inline void storm_memset_xstats_flags(struct bnx2x *bp,
228 struct stats_indication_flags *flags,
229 u16 abs_fid)
230{
231 size_t size = sizeof(struct stats_indication_flags);
232
233 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
234
235 __storm_memset_struct(bp, addr, size, (u32 *)flags);
236}
237
238static inline void storm_memset_tstats_flags(struct bnx2x *bp,
239 struct stats_indication_flags *flags,
240 u16 abs_fid)
241{
242 size_t size = sizeof(struct stats_indication_flags);
243
244 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
245
246 __storm_memset_struct(bp, addr, size, (u32 *)flags);
247}
248
249static inline void storm_memset_ustats_flags(struct bnx2x *bp,
250 struct stats_indication_flags *flags,
251 u16 abs_fid)
252{
253 size_t size = sizeof(struct stats_indication_flags);
254
255 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
256
257 __storm_memset_struct(bp, addr, size, (u32 *)flags);
258}
259
260static inline void storm_memset_cstats_flags(struct bnx2x *bp,
261 struct stats_indication_flags *flags,
262 u16 abs_fid)
263{
264 size_t size = sizeof(struct stats_indication_flags);
265
266 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
267
268 __storm_memset_struct(bp, addr, size, (u32 *)flags);
269}
270
271static inline void storm_memset_xstats_addr(struct bnx2x *bp,
272 dma_addr_t mapping, u16 abs_fid)
273{
274 u32 addr = BAR_XSTRORM_INTMEM +
275 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
276
277 __storm_memset_dma_mapping(bp, addr, mapping);
278}
279
280static inline void storm_memset_tstats_addr(struct bnx2x *bp,
281 dma_addr_t mapping, u16 abs_fid)
282{
283 u32 addr = BAR_TSTRORM_INTMEM +
284 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
285
286 __storm_memset_dma_mapping(bp, addr, mapping);
287}
288
289static inline void storm_memset_ustats_addr(struct bnx2x *bp,
290 dma_addr_t mapping, u16 abs_fid)
291{
292 u32 addr = BAR_USTRORM_INTMEM +
293 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
294
295 __storm_memset_dma_mapping(bp, addr, mapping);
296}
297
298static inline void storm_memset_cstats_addr(struct bnx2x *bp,
299 dma_addr_t mapping, u16 abs_fid)
300{
301 u32 addr = BAR_CSTRORM_INTMEM +
302 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
303
304 __storm_memset_dma_mapping(bp, addr, mapping);
305}
306
307static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
308 u16 pf_id)
309{
310 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
311 pf_id);
312 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
313 pf_id);
314 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
315 pf_id);
316 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
317 pf_id);
318}
319
320static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
321 u8 enable)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
324 enable);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
326 enable);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
328 enable);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
330 enable);
331}
332
333static inline void storm_memset_eq_data(struct bnx2x *bp,
334 struct event_ring_data *eq_data,
335 u16 pfid)
336{
337 size_t size = sizeof(struct event_ring_data);
338
339 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
340
341 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
342}
343
344static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
345 u16 pfid)
346{
347 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
348 REG_WR16(bp, addr, eq_prod);
349}
350
351static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
352 u16 fw_sb_id, u8 sb_index,
353 u8 ticks)
354{
355
356 int index_offset =
357 offsetof(struct hc_status_block_data_e1x, index_data);
358 u32 addr = BAR_CSTRORM_INTMEM +
359 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
360 index_offset +
361 sizeof(struct hc_index_data)*sb_index +
362 offsetof(struct hc_index_data, timeout);
363 REG_WR8(bp, addr, ticks);
364 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
365 port, fw_sb_id, sb_index, ticks);
366}
367static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
368 u16 fw_sb_id, u8 sb_index,
369 u8 disable)
370{
371 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
372 int index_offset =
373 offsetof(struct hc_status_block_data_e1x, index_data);
374 u32 addr = BAR_CSTRORM_INTMEM +
375 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
376 index_offset +
377 sizeof(struct hc_index_data)*sb_index +
378 offsetof(struct hc_index_data, flags);
379 u16 flags = REG_RD16(bp, addr);
380 /* clear and set */
381 flags &= ~HC_INDEX_DATA_HC_ENABLED;
382 flags |= enable_flag;
383 REG_WR16(bp, addr, flags);
384 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
385 port, fw_sb_id, sb_index, disable);
386}
387
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200388/* used only at init
389 * locking is done by mcp
390 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000391void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200392{
393 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
394 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
395 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
396 PCICFG_VENDOR_ID_OFFSET);
397}
398
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200399static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
400{
401 u32 val;
402
403 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
404 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
405 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
406 PCICFG_VENDOR_ID_OFFSET);
407
408 return val;
409}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200410
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000411const u32 dmae_reg_go_c[] = {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200412 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
413 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
414 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
415 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
416};
417
418/* copy command into DMAE command memory and set DMAE command go */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000419void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200420{
421 u32 cmd_offset;
422 int i;
423
424 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
425 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
426 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
427
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700428 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
429 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200430 }
431 REG_WR(bp, dmae_reg_go_c[idx], 1);
432}
433
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700434void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
435 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200436{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000437 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200438 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700439 int cnt = 200;
440
441 if (!bp->dmae_ready) {
442 u32 *data = bnx2x_sp(bp, wb_data[0]);
443
444 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
445 " using indirect\n", dst_addr, len32);
446 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
447 return;
448 }
449
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000450 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200451
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000452 dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
453 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
454 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200455#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000456 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200457#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000458 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200459#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000460 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
461 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
462 dmae.src_addr_lo = U64_LO(dma_addr);
463 dmae.src_addr_hi = U64_HI(dma_addr);
464 dmae.dst_addr_lo = dst_addr >> 2;
465 dmae.dst_addr_hi = 0;
466 dmae.len = len32;
467 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
468 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
469 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200470
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000471 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200472 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
473 "dst_addr [%x:%08x (%08x)]\n"
474 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000475 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
476 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
477 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700478 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200479 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
480 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200481
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000482 mutex_lock(&bp->dmae_mutex);
483
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200484 *wb_comp = 0;
485
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000486 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200487
488 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700489
490 while (*wb_comp != DMAE_COMP_VAL) {
491 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
492
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700493 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000494 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200495 break;
496 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700497 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700498 /* adjust delay for emulation/FPGA */
499 if (CHIP_REV_IS_SLOW(bp))
500 msleep(100);
501 else
502 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200503 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700504
505 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200506}
507
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700508void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200509{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000510 struct dmae_command dmae;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200511 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700512 int cnt = 200;
513
514 if (!bp->dmae_ready) {
515 u32 *data = bnx2x_sp(bp, wb_data[0]);
516 int i;
517
518 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
519 " using indirect\n", src_addr, len32);
520 for (i = 0; i < len32; i++)
521 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
522 return;
523 }
524
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000525 memset(&dmae, 0, sizeof(struct dmae_command));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200526
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000527 dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
528 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
529 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200530#ifdef __BIG_ENDIAN
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000531 DMAE_CMD_ENDIANITY_B_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200532#else
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000533 DMAE_CMD_ENDIANITY_DW_SWAP |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200534#endif
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000535 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
536 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
537 dmae.src_addr_lo = src_addr >> 2;
538 dmae.src_addr_hi = 0;
539 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
540 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
541 dmae.len = len32;
542 dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
543 dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
544 dmae.comp_val = DMAE_COMP_VAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200545
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000546 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200547 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
548 "dst_addr [%x:%08x (%08x)]\n"
549 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000550 dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
551 dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
552 dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200553
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000554 mutex_lock(&bp->dmae_mutex);
555
556 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200557 *wb_comp = 0;
558
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000559 bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200560
561 udelay(5);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700562
563 while (*wb_comp != DMAE_COMP_VAL) {
564
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700565 if (!cnt) {
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000566 BNX2X_ERR("DMAE timeout!\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200567 break;
568 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700569 cnt--;
Yitchak Gertner12469402008-08-13 15:52:08 -0700570 /* adjust delay for emulation/FPGA */
571 if (CHIP_REV_IS_SLOW(bp))
572 msleep(100);
573 else
574 udelay(5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200575 }
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700576 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200577 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700579
580 mutex_unlock(&bp->dmae_mutex);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200581}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200582
Eilon Greenstein573f2032009-08-12 08:24:14 +0000583void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
584 u32 addr, u32 len)
585{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000586 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000587 int offset = 0;
588
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000589 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000590 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000591 addr + offset, dmae_wr_max);
592 offset += dmae_wr_max * 4;
593 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000594 }
595
596 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
597}
598
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700599/* used only for slowpath so not inlined */
600static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
601{
602 u32 wb_write[2];
603
604 wb_write[0] = val_hi;
605 wb_write[1] = val_lo;
606 REG_WR_DMAE(bp, reg, wb_write, 2);
607}
608
609#ifdef USE_WB_RD
610static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
611{
612 u32 wb_data[2];
613
614 REG_RD_DMAE(bp, reg, wb_data, 2);
615
616 return HILO_U64(wb_data[0], wb_data[1]);
617}
618#endif
619
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200620static int bnx2x_mc_assert(struct bnx2x *bp)
621{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200622 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700623 int i, rc = 0;
624 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200625
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700626 /* XSTORM */
627 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
628 XSTORM_ASSERT_LIST_INDEX_OFFSET);
629 if (last_idx)
630 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200631
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700632 /* print the asserts */
633 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200634
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700635 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
636 XSTORM_ASSERT_LIST_OFFSET(i));
637 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
638 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
639 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
640 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
641 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
642 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200643
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700644 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
645 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
646 " 0x%08x 0x%08x 0x%08x\n",
647 i, row3, row2, row1, row0);
648 rc++;
649 } else {
650 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200651 }
652 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700653
654 /* TSTORM */
655 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
656 TSTORM_ASSERT_LIST_INDEX_OFFSET);
657 if (last_idx)
658 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
659
660 /* print the asserts */
661 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
662
663 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
664 TSTORM_ASSERT_LIST_OFFSET(i));
665 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
666 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
667 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
668 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
669 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
670 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
671
672 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
673 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
674 " 0x%08x 0x%08x 0x%08x\n",
675 i, row3, row2, row1, row0);
676 rc++;
677 } else {
678 break;
679 }
680 }
681
682 /* CSTORM */
683 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
684 CSTORM_ASSERT_LIST_INDEX_OFFSET);
685 if (last_idx)
686 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
687
688 /* print the asserts */
689 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
690
691 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
692 CSTORM_ASSERT_LIST_OFFSET(i));
693 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
694 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
695 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
696 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
697 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
698 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
699
700 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
701 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
702 " 0x%08x 0x%08x 0x%08x\n",
703 i, row3, row2, row1, row0);
704 rc++;
705 } else {
706 break;
707 }
708 }
709
710 /* USTORM */
711 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
712 USTORM_ASSERT_LIST_INDEX_OFFSET);
713 if (last_idx)
714 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
715
716 /* print the asserts */
717 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
718
719 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
720 USTORM_ASSERT_LIST_OFFSET(i));
721 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
722 USTORM_ASSERT_LIST_OFFSET(i) + 4);
723 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
724 USTORM_ASSERT_LIST_OFFSET(i) + 8);
725 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
726 USTORM_ASSERT_LIST_OFFSET(i) + 12);
727
728 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
729 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
730 " 0x%08x 0x%08x 0x%08x\n",
731 i, row3, row2, row1, row0);
732 rc++;
733 } else {
734 break;
735 }
736 }
737
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200738 return rc;
739}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800740
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200741static void bnx2x_fw_dump(struct bnx2x *bp)
742{
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000743 u32 addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200744 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000745 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200746 int word;
747
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000748 if (BP_NOMCP(bp)) {
749 BNX2X_ERR("NO MCP - can not dump\n");
750 return;
751 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000752
753 addr = bp->common.shmem_base - 0x0800 + 4;
754 mark = REG_RD(bp, addr);
755 mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
Joe Perches7995c642010-02-17 15:01:52 +0000756 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200757
Joe Perches7995c642010-02-17 15:01:52 +0000758 pr_err("");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000759 for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200760 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000761 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200762 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000763 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200764 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000765 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200766 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000767 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200768 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000769 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200770 }
Joe Perches7995c642010-02-17 15:01:52 +0000771 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200772}
773
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000774void bnx2x_panic_dump(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200775{
776 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000777 u16 j;
778 struct hc_sp_status_block_data sp_sb_data;
779 int func = BP_FUNC(bp);
780#ifdef BNX2X_STOP_ON_ERROR
781 u16 start = 0, end = 0;
782#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200783
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700784 bp->stats_state = STATS_STATE_DISABLED;
785 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
786
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200787 BNX2X_ERR("begin crash dump -----------------\n");
788
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000789 /* Indices */
790 /* Common */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000791 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000792 " spq_prod_idx(0x%x)\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000793 bp->def_idx, bp->def_att_idx,
794 bp->attn_state, bp->spq_prod_idx);
795 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
796 bp->def_status_blk->atten_status_block.attn_bits,
797 bp->def_status_blk->atten_status_block.attn_bits_ack,
798 bp->def_status_blk->atten_status_block.status_block_id,
799 bp->def_status_blk->atten_status_block.attn_bits_index);
800 BNX2X_ERR(" def (");
801 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
802 pr_cont("0x%x%s",
803 bp->def_status_blk->sp_sb.index_values[i],
804 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000805
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000806 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
807 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
808 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
809 i*sizeof(u32));
810
811 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
812 "pf_id(0x%x) vnic_id(0x%x) "
813 "vf_id(0x%x) vf_valid (0x%x)\n",
814 sp_sb_data.igu_sb_id,
815 sp_sb_data.igu_seg_id,
816 sp_sb_data.p_func.pf_id,
817 sp_sb_data.p_func.vnic_id,
818 sp_sb_data.p_func.vf_id,
819 sp_sb_data.p_func.vf_valid);
820
821
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000822 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000823 struct bnx2x_fastpath *fp = &bp->fp[i];
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000824 int loop;
825 struct hc_status_block_data_e1x sb_data_e1x;
826 struct hc_status_block_sm *hc_sm_p =
827 sb_data_e1x.common.state_machine;
828 struct hc_index_data *hc_index_p =
829 sb_data_e1x.index_data;
830 int data_size;
831 u32 *sb_data_p;
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000832
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000833 /* Rx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000834 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000835 " rx_comp_prod(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000836 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000837 i, fp->rx_bd_prod, fp->rx_bd_cons,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000838 fp->rx_comp_prod,
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000839 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000840 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000841 " fp_hc_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000842 fp->rx_sge_prod, fp->last_max_sge,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000843 le16_to_cpu(fp->fp_hc_idx));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000844
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000845 /* Tx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000846 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
847 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
848 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200849 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700850 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000851
852 loop = HC_SB_MAX_INDICES_E1X;
853
854 /* host sb data */
855
856 BNX2X_ERR(" run indexes (");
857 for (j = 0; j < HC_SB_MAX_SM; j++)
858 pr_cont("0x%x%s",
859 fp->sb_running_index[j],
860 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
861
862 BNX2X_ERR(" indexes (");
863 for (j = 0; j < loop; j++)
864 pr_cont("0x%x%s",
865 fp->sb_index_values[j],
866 (j == loop - 1) ? ")" : " ");
867 /* fw sb data */
868 data_size =
869 sizeof(struct hc_status_block_data_e1x);
870 data_size /= sizeof(u32);
871 sb_data_p = (u32 *)&sb_data_e1x;
872 /* copy sb data in here */
873 for (j = 0; j < data_size; j++)
874 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
875 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
876 j * sizeof(u32));
877
878 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
879 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
880 sb_data_e1x.common.p_func.pf_id,
881 sb_data_e1x.common.p_func.vf_id,
882 sb_data_e1x.common.p_func.vf_valid,
883 sb_data_e1x.common.p_func.vnic_id,
884 sb_data_e1x.common.same_igu_sb_1b);
885
886 /* SB_SMs data */
887 for (j = 0; j < HC_SB_MAX_SM; j++) {
888 pr_cont("SM[%d] __flags (0x%x) "
889 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
890 "time_to_expire (0x%x) "
891 "timer_value(0x%x)\n", j,
892 hc_sm_p[j].__flags,
893 hc_sm_p[j].igu_sb_id,
894 hc_sm_p[j].igu_seg_id,
895 hc_sm_p[j].time_to_expire,
896 hc_sm_p[j].timer_value);
897 }
898
899 /* Indecies data */
900 for (j = 0; j < loop; j++) {
901 pr_cont("INDEX[%d] flags (0x%x) "
902 "timeout (0x%x)\n", j,
903 hc_index_p[j].flags,
904 hc_index_p[j].timeout);
905 }
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000906 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200907
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000908#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000909 /* Rings */
910 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000911 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000912 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200913
914 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
915 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000916 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200917 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
918 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
919
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000920 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
921 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200922 }
923
Eilon Greenstein3196a882008-08-13 15:58:49 -0700924 start = RX_SGE(fp->rx_sge_prod);
925 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000926 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700927 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
928 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
929
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000930 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
931 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -0700932 }
933
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200934 start = RCQ_BD(fp->rx_comp_cons - 10);
935 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000936 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200937 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
938
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000939 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
940 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200941 }
942 }
943
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000944 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000945 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000946 struct bnx2x_fastpath *fp = &bp->fp[i];
947
948 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
949 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
950 for (j = start; j != end; j = TX_BD(j + 1)) {
951 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
952
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000953 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
954 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000955 }
956
957 start = TX_BD(fp->tx_bd_cons - 10);
958 end = TX_BD(fp->tx_bd_cons + 254);
959 for (j = start; j != end; j = TX_BD(j + 1)) {
960 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
961
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +0000962 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
963 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000964 }
965 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000966#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700967 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200968 bnx2x_mc_assert(bp);
969 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200970}
971
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +0000972void bnx2x_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200973{
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700974 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200975 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
976 u32 val = REG_RD(bp, addr);
977 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +0000978 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200979
980 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +0000981 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
982 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200983 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
984 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +0000985 } else if (msi) {
986 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
987 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
988 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
989 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200990 } else {
991 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800992 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200993 HC_CONFIG_0_REG_INT_LINE_EN_0 |
994 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800995
Eilon Greenstein8badd272009-02-12 08:36:15 +0000996 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
997 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -0800998
999 REG_WR(bp, addr, val);
1000
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001001 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1002 }
1003
Eilon Greenstein8badd272009-02-12 08:36:15 +00001004 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1005 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001006
1007 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001008 /*
1009 * Ensure that HC_CONFIG is written before leading/trailing edge config
1010 */
1011 mmiowb();
1012 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001013
1014 if (CHIP_IS_E1H(bp)) {
1015 /* init leading/trailing edge */
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001016 if (IS_MF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001017 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001018 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001019 /* enable nig and gpio3 attention */
1020 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001021 } else
1022 val = 0xffff;
1023
1024 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1025 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1026 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001027
1028 /* Make sure that interrupts are indeed enabled from here on */
1029 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001030}
1031
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001032void bnx2x_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001033{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001034 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001035 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1036 u32 val = REG_RD(bp, addr);
1037
1038 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1039 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1040 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1041 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1042
1043 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1044 val, port, addr);
1045
Eilon Greenstein8badd272009-02-12 08:36:15 +00001046 /* flush all outstanding writes */
1047 mmiowb();
1048
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001049 REG_WR(bp, addr, val);
1050 if (REG_RD(bp, addr) != val)
1051 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1052}
1053
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001054void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001055{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001056 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001057 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001058
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001059 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001060 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +00001061 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1062
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07001063 if (disable_hw)
1064 /* prevent the HW from sending interrupts */
1065 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001066
1067 /* make sure all ISRs are done */
1068 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001069 synchronize_irq(bp->msix_table[0].vector);
1070 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +00001071#ifdef BCM_CNIC
1072 offset++;
1073#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001074 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +00001075 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001076 } else
1077 synchronize_irq(bp->pdev->irq);
1078
1079 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001080 cancel_delayed_work(&bp->sp_task);
1081 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001082}
1083
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001084/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001085
1086/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001087 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001088 */
1089
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001090/* Return true if succeeded to acquire the lock */
1091static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1092{
1093 u32 lock_status;
1094 u32 resource_bit = (1 << resource);
1095 int func = BP_FUNC(bp);
1096 u32 hw_lock_control_reg;
1097
1098 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1099
1100 /* Validating that the resource is within range */
1101 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1102 DP(NETIF_MSG_HW,
1103 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1104 resource, HW_LOCK_MAX_RESOURCE_VALUE);
Eric Dumazet0fdf4d02010-08-26 22:03:53 -07001105 return false;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001106 }
1107
1108 if (func <= 5)
1109 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1110 else
1111 hw_lock_control_reg =
1112 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1113
1114 /* Try to acquire the lock */
1115 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1116 lock_status = REG_RD(bp, hw_lock_control_reg);
1117 if (lock_status & resource_bit)
1118 return true;
1119
1120 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1121 return false;
1122}
1123
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001124
Michael Chan993ac7b2009-10-10 13:46:56 +00001125#ifdef BCM_CNIC
1126static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1127#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001128
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001129void bnx2x_sp_event(struct bnx2x_fastpath *fp,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001130 union eth_rx_cqe *rr_cqe)
1131{
1132 struct bnx2x *bp = fp->bp;
1133 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1134 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1135
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001136 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001137 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001138 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001139 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001140
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001141 switch (command | fp->state) {
1142 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1143 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1144 fp->state = BNX2X_FP_STATE_OPEN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001145 break;
1146
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001147 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1148 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001149 fp->state = BNX2X_FP_STATE_HALTED;
1150 break;
1151
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001152 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1153 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1154 fp->state = BNX2X_FP_STATE_TERMINATED;
Eliezer Tamir49d66772008-02-28 11:53:13 -08001155 break;
1156
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001157 default:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001158 BNX2X_ERR("unexpected MC reply (%d) "
1159 "fp[%d] state is %x\n",
1160 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001161 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001162 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001163
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00001164 smp_mb__before_atomic_inc();
1165 atomic_inc(&bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001166 /* push the change in fp->state and towards the memory */
1167 smp_wmb();
1168
1169 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001170}
1171
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001172irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001173{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001174 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001175 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001176 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001177 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001178
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001179 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001180 if (unlikely(status == 0)) {
1181 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1182 return IRQ_NONE;
1183 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001184 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001185
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001186 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001187 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1188 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1189 return IRQ_HANDLED;
1190 }
1191
Eilon Greenstein3196a882008-08-13 15:58:49 -07001192#ifdef BNX2X_STOP_ON_ERROR
1193 if (unlikely(bp->panic))
1194 return IRQ_HANDLED;
1195#endif
1196
Eilon Greensteinca003922009-08-12 22:53:28 -07001197 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1198 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001199
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001200 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
Eilon Greensteinca003922009-08-12 22:53:28 -07001201 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001202 /* Handle Rx and Tx according to SB id */
1203 prefetch(fp->rx_cons_sb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001204 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001205 prefetch(&fp->sb_running_index[SM_RX_ID]);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001206 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001207 status &= ~mask;
1208 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001209 }
1210
Michael Chan993ac7b2009-10-10 13:46:56 +00001211#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001212 mask = 0x2;
Michael Chan993ac7b2009-10-10 13:46:56 +00001213 if (status & (mask | 0x1)) {
1214 struct cnic_ops *c_ops = NULL;
1215
1216 rcu_read_lock();
1217 c_ops = rcu_dereference(bp->cnic_ops);
1218 if (c_ops)
1219 c_ops->cnic_handler(bp->cnic_data, NULL);
1220 rcu_read_unlock();
1221
1222 status &= ~mask;
1223 }
1224#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001225
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001226 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001227 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001228
1229 status &= ~0x1;
1230 if (!status)
1231 return IRQ_HANDLED;
1232 }
1233
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001234 if (unlikely(status))
1235 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001236 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001237
1238 return IRQ_HANDLED;
1239}
1240
1241/* end of fast path */
1242
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001243
1244/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001245
1246/*
1247 * General service functions
1248 */
1249
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001250int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001251{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001252 u32 lock_status;
1253 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001254 int func = BP_FUNC(bp);
1255 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001256 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001257
1258 /* Validating that the resource is within range */
1259 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1260 DP(NETIF_MSG_HW,
1261 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1262 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1263 return -EINVAL;
1264 }
1265
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001266 if (func <= 5) {
1267 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1268 } else {
1269 hw_lock_control_reg =
1270 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1271 }
1272
Eliezer Tamirf1410642008-02-28 11:51:50 -08001273 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001274 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001275 if (lock_status & resource_bit) {
1276 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1277 lock_status, resource_bit);
1278 return -EEXIST;
1279 }
1280
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001281 /* Try for 5 second every 5ms */
1282 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001283 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001284 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1285 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001286 if (lock_status & resource_bit)
1287 return 0;
1288
1289 msleep(5);
1290 }
1291 DP(NETIF_MSG_HW, "Timeout\n");
1292 return -EAGAIN;
1293}
1294
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001295int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001296{
1297 u32 lock_status;
1298 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001299 int func = BP_FUNC(bp);
1300 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001301
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001302 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1303
Eliezer Tamirf1410642008-02-28 11:51:50 -08001304 /* Validating that the resource is within range */
1305 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1306 DP(NETIF_MSG_HW,
1307 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1308 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1309 return -EINVAL;
1310 }
1311
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001312 if (func <= 5) {
1313 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1314 } else {
1315 hw_lock_control_reg =
1316 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1317 }
1318
Eliezer Tamirf1410642008-02-28 11:51:50 -08001319 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001320 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001321 if (!(lock_status & resource_bit)) {
1322 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1323 lock_status, resource_bit);
1324 return -EFAULT;
1325 }
1326
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001327 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001328 return 0;
1329}
1330
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001331
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001332int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1333{
1334 /* The GPIO should be swapped if swap register is set and active */
1335 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1336 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1337 int gpio_shift = gpio_num +
1338 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1339 u32 gpio_mask = (1 << gpio_shift);
1340 u32 gpio_reg;
1341 int value;
1342
1343 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1344 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1345 return -EINVAL;
1346 }
1347
1348 /* read GPIO value */
1349 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1350
1351 /* get the requested pin value */
1352 if ((gpio_reg & gpio_mask) == gpio_mask)
1353 value = 1;
1354 else
1355 value = 0;
1356
1357 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1358
1359 return value;
1360}
1361
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001362int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001363{
1364 /* The GPIO should be swapped if swap register is set and active */
1365 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001366 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001367 int gpio_shift = gpio_num +
1368 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1369 u32 gpio_mask = (1 << gpio_shift);
1370 u32 gpio_reg;
1371
1372 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1373 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1374 return -EINVAL;
1375 }
1376
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001377 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001378 /* read GPIO and mask except the float bits */
1379 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1380
1381 switch (mode) {
1382 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1383 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1384 gpio_num, gpio_shift);
1385 /* clear FLOAT and set CLR */
1386 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1387 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1388 break;
1389
1390 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1391 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1392 gpio_num, gpio_shift);
1393 /* clear FLOAT and set SET */
1394 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1395 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1396 break;
1397
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001398 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001399 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1400 gpio_num, gpio_shift);
1401 /* set FLOAT */
1402 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1403 break;
1404
1405 default:
1406 break;
1407 }
1408
1409 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001410 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001411
1412 return 0;
1413}
1414
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001415int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1416{
1417 /* The GPIO should be swapped if swap register is set and active */
1418 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1419 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1420 int gpio_shift = gpio_num +
1421 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1422 u32 gpio_mask = (1 << gpio_shift);
1423 u32 gpio_reg;
1424
1425 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1426 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1427 return -EINVAL;
1428 }
1429
1430 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1431 /* read GPIO int */
1432 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1433
1434 switch (mode) {
1435 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1436 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1437 "output low\n", gpio_num, gpio_shift);
1438 /* clear SET and set CLR */
1439 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1440 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1441 break;
1442
1443 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1444 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1445 "output high\n", gpio_num, gpio_shift);
1446 /* clear CLR and set SET */
1447 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1448 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1449 break;
1450
1451 default:
1452 break;
1453 }
1454
1455 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1456 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1457
1458 return 0;
1459}
1460
Eliezer Tamirf1410642008-02-28 11:51:50 -08001461static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1462{
1463 u32 spio_mask = (1 << spio_num);
1464 u32 spio_reg;
1465
1466 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1467 (spio_num > MISC_REGISTERS_SPIO_7)) {
1468 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1469 return -EINVAL;
1470 }
1471
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001472 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001473 /* read SPIO and mask except the float bits */
1474 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1475
1476 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001477 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001478 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1479 /* clear FLOAT and set CLR */
1480 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1481 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1482 break;
1483
Eilon Greenstein6378c022008-08-13 15:59:25 -07001484 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001485 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1486 /* clear FLOAT and set SET */
1487 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1488 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1489 break;
1490
1491 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1492 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1493 /* set FLOAT */
1494 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1495 break;
1496
1497 default:
1498 break;
1499 }
1500
1501 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001502 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001503
1504 return 0;
1505}
1506
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001507int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1508{
1509 u32 sel_phy_idx = 0;
1510 if (bp->link_vars.link_up) {
1511 sel_phy_idx = EXT_PHY1;
1512 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1513 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1514 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1515 sel_phy_idx = EXT_PHY2;
1516 } else {
1517
1518 switch (bnx2x_phy_selection(&bp->link_params)) {
1519 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1520 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1521 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1522 sel_phy_idx = EXT_PHY1;
1523 break;
1524 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1525 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1526 sel_phy_idx = EXT_PHY2;
1527 break;
1528 }
1529 }
1530 /*
1531 * The selected actived PHY is always after swapping (in case PHY
1532 * swapping is enabled). So when swapping is enabled, we need to reverse
1533 * the configuration
1534 */
1535
1536 if (bp->link_params.multi_phy_config &
1537 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1538 if (sel_phy_idx == EXT_PHY1)
1539 sel_phy_idx = EXT_PHY2;
1540 else if (sel_phy_idx == EXT_PHY2)
1541 sel_phy_idx = EXT_PHY1;
1542 }
1543 return LINK_CONFIG_IDX(sel_phy_idx);
1544}
1545
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001546void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001547{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001548 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
Eilon Greensteinad33ea32009-01-14 21:24:57 -08001549 switch (bp->link_vars.ieee_fc &
1550 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001551 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001552 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001553 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001554 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001555
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001556 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001557 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001558 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001559 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001560
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001561 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001562 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001563 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001564
Eliezer Tamirf1410642008-02-28 11:51:50 -08001565 default:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001566 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001567 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001568 break;
1569 }
1570}
1571
Eilon Greenstein2691d512009-08-12 08:22:08 +00001572
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001573u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001574{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001575 if (!BP_NOMCP(bp)) {
1576 u8 rc;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001577 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1578 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
Eilon Greenstein19680c42008-08-13 15:47:33 -07001579 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001580 /* It is recommended to turn off RX FC for jumbo frames
1581 for better performance */
Eilon Greenstein0c593272009-08-12 08:22:13 +00001582 if (bp->dev->mtu > 5000)
David S. Millerc0700f92008-12-16 23:53:20 -08001583 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001584 else
David S. Millerc0700f92008-12-16 23:53:20 -08001585 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001586
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001587 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001588
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001589 if (load_mode == LOAD_DIAG) {
Yaniv Rosnerde6eae12010-09-07 11:41:13 +00001590 bp->link_params.loopback_mode = LOOPBACK_XGXS;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001591 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1592 }
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001593
Eilon Greenstein19680c42008-08-13 15:47:33 -07001594 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001595
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001596 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001597
Eilon Greenstein3c96c682009-01-14 21:25:31 -08001598 bnx2x_calc_fc_adv(bp);
1599
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001600 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1601 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001602 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001603 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001604 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
Eilon Greenstein19680c42008-08-13 15:47:33 -07001605 return rc;
1606 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001607 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07001608 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001609}
1610
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001611void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001612{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001613 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001614 bnx2x_acquire_phy_lock(bp);
Yaniv Rosner54c2fb72010-09-01 09:51:23 +00001615 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001616 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001617 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001618
Eilon Greenstein19680c42008-08-13 15:47:33 -07001619 bnx2x_calc_fc_adv(bp);
1620 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001621 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001622}
1623
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001624static void bnx2x__link_reset(struct bnx2x *bp)
1625{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001626 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001627 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00001628 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001629 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001630 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001631 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001632}
1633
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001634u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001635{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001636 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001637
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001638 if (!BP_NOMCP(bp)) {
1639 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001640 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1641 is_serdes);
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001642 bnx2x_release_phy_lock(bp);
1643 } else
1644 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001645
1646 return rc;
1647}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001648
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001649static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001650{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001651 u32 r_param = bp->link_vars.line_speed / 8;
1652 u32 fair_periodic_timeout_usec;
1653 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001654
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001655 memset(&(bp->cmng.rs_vars), 0,
1656 sizeof(struct rate_shaping_vars_per_port));
1657 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001658
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001659 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1660 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001661
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001662 /* this is the threshold below which no timer arming will occur
1663 1.25 coefficient is for the threshold to be a little bigger
1664 than the real time, to compensate for timer in-accuracy */
1665 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001666 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1667
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001668 /* resolution of fairness timer */
1669 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1670 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1671 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001672
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001673 /* this is the threshold below which we won't arm the timer anymore */
1674 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001675
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001676 /* we multiply by 1e3/8 to get bytes/msec.
1677 We don't want the credits to pass a credit
1678 of the t_fair*FAIR_MEM (algorithm resolution) */
1679 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1680 /* since each tick is 4 usec */
1681 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001682}
1683
Eilon Greenstein2691d512009-08-12 08:22:08 +00001684/* Calculates the sum of vn_min_rates.
1685 It's needed for further normalizing of the min_rates.
1686 Returns:
1687 sum of vn_min_rates.
1688 or
1689 0 - if all the min_rates are 0.
1690 In the later case fainess algorithm should be deactivated.
1691 If not all min_rates are zero then those that are zeroes will be set to 1.
1692 */
1693static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1694{
1695 int all_zero = 1;
1696 int port = BP_PORT(bp);
1697 int vn;
1698
1699 bp->vn_weight_sum = 0;
1700 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1701 int func = 2*vn + port;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001702 u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config);
Eilon Greenstein2691d512009-08-12 08:22:08 +00001703 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1704 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1705
1706 /* Skip hidden vns */
1707 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1708 continue;
1709
1710 /* If min rate is zero - set it to 1 */
1711 if (!vn_min_rate)
1712 vn_min_rate = DEF_MIN_RATE;
1713 else
1714 all_zero = 0;
1715
1716 bp->vn_weight_sum += vn_min_rate;
1717 }
1718
1719 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001720 if (all_zero) {
1721 bp->cmng.flags.cmng_enables &=
1722 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1723 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1724 " fairness will be disabled\n");
1725 } else
1726 bp->cmng.flags.cmng_enables |=
1727 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001728}
1729
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001730static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001731{
1732 struct rate_shaping_vars_per_vn m_rs_vn;
1733 struct fairness_vars_per_vn m_fair_vn;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001734 u32 vn_cfg = MF_CFG_RD(bp, func_mf_config[func].config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001735 u16 vn_min_rate, vn_max_rate;
1736 int i;
1737
1738 /* If function is hidden - set min and max to zeroes */
1739 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1740 vn_min_rate = 0;
1741 vn_max_rate = 0;
1742
1743 } else {
1744 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1745 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001746 /* If min rate is zero - set it to 1 */
1747 if (!vn_min_rate)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001748 vn_min_rate = DEF_MIN_RATE;
1749 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1750 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1751 }
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001752 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001753 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001754 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001755
1756 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1757 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1758
1759 /* global vn counter - maximal Mbps for this vn */
1760 m_rs_vn.vn_counter.rate = vn_max_rate;
1761
1762 /* quota - number of bytes transmitted in this period */
1763 m_rs_vn.vn_counter.quota =
1764 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1765
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001766 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001767 /* credit for each period of the fairness algorithm:
1768 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001769 vn_weight_sum should not be larger than 10000, thus
1770 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1771 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001772 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001773 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1774 (8 * bp->vn_weight_sum))),
1775 (bp->cmng.fair_vars.fair_threshold * 2));
1776 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001777 m_fair_vn.vn_credit_delta);
1778 }
1779
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001780 /* Store it to internal memory */
1781 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1782 REG_WR(bp, BAR_XSTRORM_INTMEM +
1783 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1784 ((u32 *)(&m_rs_vn))[i]);
1785
1786 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1787 REG_WR(bp, BAR_XSTRORM_INTMEM +
1788 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1789 ((u32 *)(&m_fair_vn))[i]);
1790}
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001791static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1792{
1793 if (CHIP_REV_IS_SLOW(bp))
1794 return CMNG_FNS_NONE;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001795 if (IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001796 return CMNG_FNS_MINMAX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001797
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001798 return CMNG_FNS_NONE;
1799}
1800
1801static void bnx2x_read_mf_cfg(struct bnx2x *bp)
1802{
1803 int vn;
1804
1805 if (BP_NOMCP(bp))
1806 return; /* what should be the default bvalue in this case */
1807
1808 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1809 int /*abs*/func = 2*vn + BP_PORT(bp);
1810 bp->mf_config =
1811 MF_CFG_RD(bp, func_mf_config[func].config);
1812 }
1813}
1814
1815static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
1816{
1817
1818 if (cmng_type == CMNG_FNS_MINMAX) {
1819 int vn;
1820
1821 /* clear cmng_enables */
1822 bp->cmng.flags.cmng_enables = 0;
1823
1824 /* read mf conf from shmem */
1825 if (read_cfg)
1826 bnx2x_read_mf_cfg(bp);
1827
1828 /* Init rate shaping and fairness contexts */
1829 bnx2x_init_port_minmax(bp);
1830
1831 /* vn_weight_sum and enable fairness if not 0 */
1832 bnx2x_calc_vn_weight_sum(bp);
1833
1834 /* calculate and set min-max rate for each vn */
1835 for (vn = VN_0; vn < E1HVN_MAX; vn++)
1836 bnx2x_init_vn_minmax(bp, vn);
1837
1838 /* always enable rate shaping and fairness */
1839 bp->cmng.flags.cmng_enables |=
1840 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
1841 if (!bp->vn_weight_sum)
1842 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1843 " fairness will be disabled\n");
1844 return;
1845 }
1846
1847 /* rate shaping and fairness are disabled */
1848 DP(NETIF_MSG_IFUP,
1849 "rate shaping and fairness are disabled\n");
1850}
1851
1852static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
1853{
1854 int port = BP_PORT(bp);
1855 int func;
1856 int vn;
1857
1858 /* Set the attention towards other drivers on the same port */
1859 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1860 if (vn == BP_E1HVN(bp))
1861 continue;
1862
1863 func = ((vn << 1) | port);
1864 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1865 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1866 }
1867}
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001868
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001869/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001870static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001871{
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00001872 u32 prev_link_status = bp->link_vars.link_status;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001873 /* Make sure that we are synced with the current statistics */
1874 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1875
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001876 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001877
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001878 if (bp->link_vars.link_up) {
1879
Eilon Greenstein1c063282009-02-12 08:36:43 +00001880 /* dropless flow control */
Eilon Greensteina18f5122009-08-12 08:23:26 +00001881 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00001882 int port = BP_PORT(bp);
1883 u32 pause_enabled = 0;
1884
1885 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1886 pause_enabled = 1;
1887
1888 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07001889 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00001890 pause_enabled);
1891 }
1892
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001893 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1894 struct host_port_stats *pstats;
1895
1896 pstats = bnx2x_sp(bp, port_stats);
1897 /* reset old bmac stats */
1898 memset(&(pstats->mac_stx[0]), 0,
1899 sizeof(struct mac_stx));
1900 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07001901 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001902 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1903 }
1904
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00001905 /* indicate link status only if link status actually changed */
1906 if (prev_link_status != bp->link_vars.link_status)
1907 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001908
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001909 if (IS_MF(bp)) {
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001910 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001911 int func;
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001912 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001913
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00001914 /* Set the attention towards other drivers on the same port */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001915 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1916 if (vn == BP_E1HVN(bp))
1917 continue;
1918
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001919 func = ((vn << 1) | port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001920 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1921 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1922 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001923
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001924 if (bp->link_vars.link_up) {
1925 int i;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001926
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001927 /* Init rate shaping and fairness contexts */
1928 bnx2x_init_port_minmax(bp);
1929
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001930 for (vn = VN_0; vn < E1HVN_MAX; vn++)
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001931 bnx2x_init_vn_minmax(bp, 2*vn + port);
1932
1933 /* Store it to internal memory */
1934 for (i = 0;
1935 i < sizeof(struct cmng_struct_per_port) / 4; i++)
1936 REG_WR(bp, BAR_XSTRORM_INTMEM +
1937 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1938 ((u32 *)(&bp->cmng))[i]);
1939 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001940 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001941}
1942
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001943void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001944{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07001945 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001946 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001947
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001948 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1949
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001950 if (bp->link_vars.link_up)
1951 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1952 else
1953 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1954
Eilon Greenstein2691d512009-08-12 08:22:08 +00001955 bnx2x_calc_vn_weight_sum(bp);
1956
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001957 /* indicate link status */
1958 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001959}
1960
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001961static void bnx2x_pmf_update(struct bnx2x *bp)
1962{
1963 int port = BP_PORT(bp);
1964 u32 val;
1965
1966 bp->port.pmf = 1;
1967 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1968
1969 /* enable nig attention */
1970 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1971 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1972 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07001973
1974 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001975}
1976
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001977/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001978
1979/* slow path */
1980
1981/*
1982 * General service functions
1983 */
1984
Eilon Greenstein2691d512009-08-12 08:22:08 +00001985/* send the MCP a request, block until there is a reply */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001986u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
Eilon Greenstein2691d512009-08-12 08:22:08 +00001987{
1988 int func = BP_FUNC(bp);
1989 u32 seq = ++bp->fw_seq;
1990 u32 rc = 0;
1991 u32 cnt = 1;
1992 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1993
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07001994 mutex_lock(&bp->fw_mb_mutex);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001995 SHMEM_WR(bp, func_mb[func].drv_mb_param, param);
Eilon Greenstein2691d512009-08-12 08:22:08 +00001996 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1997 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1998
1999 do {
2000 /* let the FW do it's magic ... */
2001 msleep(delay);
2002
2003 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2004
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002005 /* Give the FW up to 5 second (500*10ms) */
2006 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002007
2008 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2009 cnt*delay, rc, seq);
2010
2011 /* is this a reply to our command? */
2012 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2013 rc &= FW_MSG_CODE_MASK;
2014 else {
2015 /* FW BUG! */
2016 BNX2X_ERR("FW failed to respond!\n");
2017 bnx2x_fw_dump(bp);
2018 rc = 0;
2019 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002020 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002021
2022 return rc;
2023}
2024
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002025/* must be called under rtnl_lock */
2026void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2027{
2028 u32 mask = (1 << cl_id);
2029
2030 /* initial seeting is BNX2X_ACCEPT_NONE */
2031 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2032 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2033 u8 unmatched_unicast = 0;
2034
2035 if (filters & BNX2X_PROMISCUOUS_MODE) {
2036 /* promiscious - accept all, drop none */
2037 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2038 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2039 }
2040 if (filters & BNX2X_ACCEPT_UNICAST) {
2041 /* accept matched ucast */
2042 drop_all_ucast = 0;
2043 }
2044 if (filters & BNX2X_ACCEPT_MULTICAST) {
2045 /* accept matched mcast */
2046 drop_all_mcast = 0;
2047 }
2048 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2049 /* accept all mcast */
2050 drop_all_ucast = 0;
2051 accp_all_ucast = 1;
2052 }
2053 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2054 /* accept all mcast */
2055 drop_all_mcast = 0;
2056 accp_all_mcast = 1;
2057 }
2058 if (filters & BNX2X_ACCEPT_BROADCAST) {
2059 /* accept (all) bcast */
2060 drop_all_bcast = 0;
2061 accp_all_bcast = 1;
2062 }
2063
2064 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2065 bp->mac_filters.ucast_drop_all | mask :
2066 bp->mac_filters.ucast_drop_all & ~mask;
2067
2068 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2069 bp->mac_filters.mcast_drop_all | mask :
2070 bp->mac_filters.mcast_drop_all & ~mask;
2071
2072 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2073 bp->mac_filters.bcast_drop_all | mask :
2074 bp->mac_filters.bcast_drop_all & ~mask;
2075
2076 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2077 bp->mac_filters.ucast_accept_all | mask :
2078 bp->mac_filters.ucast_accept_all & ~mask;
2079
2080 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2081 bp->mac_filters.mcast_accept_all | mask :
2082 bp->mac_filters.mcast_accept_all & ~mask;
2083
2084 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2085 bp->mac_filters.bcast_accept_all | mask :
2086 bp->mac_filters.bcast_accept_all & ~mask;
2087
2088 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2089 bp->mac_filters.unmatched_unicast | mask :
2090 bp->mac_filters.unmatched_unicast & ~mask;
2091}
2092
2093void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2094{
2095 if (FUNC_CONFIG(p->func_flgs)) {
2096 struct tstorm_eth_function_common_config tcfg = {0};
2097
2098 /* tpa */
2099 if (p->func_flgs & FUNC_FLG_TPA)
2100 tcfg.config_flags |=
2101 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2102
2103 /* set rss flags */
2104 if (p->func_flgs & FUNC_FLG_RSS) {
2105 u16 rss_flgs = (p->rss->mode <<
2106 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2107
2108 if (p->rss->cap & RSS_IPV4_CAP)
2109 rss_flgs |= RSS_IPV4_CAP_MASK;
2110 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2111 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2112 if (p->rss->cap & RSS_IPV6_CAP)
2113 rss_flgs |= RSS_IPV6_CAP_MASK;
2114 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2115 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2116
2117 tcfg.config_flags |= rss_flgs;
2118 tcfg.rss_result_mask = p->rss->result_mask;
2119
2120 }
2121
2122 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2123 }
2124
2125 /* Enable the function in the FW */
2126 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2127 storm_memset_func_en(bp, p->func_id, 1);
2128
2129 /* statistics */
2130 if (p->func_flgs & FUNC_FLG_STATS) {
2131 struct stats_indication_flags stats_flags = {0};
2132 stats_flags.collect_eth = 1;
2133
2134 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2135 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2136
2137 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2138 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2139
2140 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2141 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2142
2143 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2144 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2145 }
2146
2147 /* spq */
2148 if (p->func_flgs & FUNC_FLG_SPQ) {
2149 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2150 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2151 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2152 }
2153}
2154
2155static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2156 struct bnx2x_fastpath *fp)
2157{
2158 u16 flags = 0;
2159
2160 /* calculate queue flags */
2161 flags |= QUEUE_FLG_CACHE_ALIGN;
2162 flags |= QUEUE_FLG_HC;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002163 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002164
2165#ifdef BCM_VLAN
2166 flags |= QUEUE_FLG_VLAN;
2167 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2168#endif
2169
2170 if (!fp->disable_tpa)
2171 flags |= QUEUE_FLG_TPA;
2172
2173 flags |= QUEUE_FLG_STATS;
2174
2175 return flags;
2176}
2177
2178static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2179 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2180 struct bnx2x_rxq_init_params *rxq_init)
2181{
2182 u16 max_sge = 0;
2183 u16 sge_sz = 0;
2184 u16 tpa_agg_size = 0;
2185
2186 /* calculate queue flags */
2187 u16 flags = bnx2x_get_cl_flags(bp, fp);
2188
2189 if (!fp->disable_tpa) {
2190 pause->sge_th_hi = 250;
2191 pause->sge_th_lo = 150;
2192 tpa_agg_size = min_t(u32,
2193 (min_t(u32, 8, MAX_SKB_FRAGS) *
2194 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2195 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2196 SGE_PAGE_SHIFT;
2197 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2198 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2199 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2200 0xffff);
2201 }
2202
2203 /* pause - not for e1 */
2204 if (!CHIP_IS_E1(bp)) {
2205 pause->bd_th_hi = 350;
2206 pause->bd_th_lo = 250;
2207 pause->rcq_th_hi = 350;
2208 pause->rcq_th_lo = 250;
2209 pause->sge_th_hi = 0;
2210 pause->sge_th_lo = 0;
2211 pause->pri_map = 1;
2212 }
2213
2214 /* rxq setup */
2215 rxq_init->flags = flags;
2216 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2217 rxq_init->dscr_map = fp->rx_desc_mapping;
2218 rxq_init->sge_map = fp->rx_sge_mapping;
2219 rxq_init->rcq_map = fp->rx_comp_mapping;
2220 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2221 rxq_init->mtu = bp->dev->mtu;
2222 rxq_init->buf_sz = bp->rx_buf_size;
2223 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2224 rxq_init->cl_id = fp->cl_id;
2225 rxq_init->spcl_id = fp->cl_id;
2226 rxq_init->stat_id = fp->cl_id;
2227 rxq_init->tpa_agg_sz = tpa_agg_size;
2228 rxq_init->sge_buf_sz = sge_sz;
2229 rxq_init->max_sges_pkt = max_sge;
2230 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2231 rxq_init->fw_sb_id = fp->fw_sb_id;
2232
2233 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2234
2235 rxq_init->cid = HW_CID(bp, fp->cid);
2236
2237 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2238}
2239
2240static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2241 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2242{
2243 u16 flags = bnx2x_get_cl_flags(bp, fp);
2244
2245 txq_init->flags = flags;
2246 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2247 txq_init->dscr_map = fp->tx_desc_mapping;
2248 txq_init->stat_id = fp->cl_id;
2249 txq_init->cid = HW_CID(bp, fp->cid);
2250 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2251 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2252 txq_init->fw_sb_id = fp->fw_sb_id;
2253 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2254}
2255
2256void bnx2x_pf_init(struct bnx2x *bp)
2257{
2258 struct bnx2x_func_init_params func_init = {0};
2259 struct bnx2x_rss_params rss = {0};
2260 struct event_ring_data eq_data = { {0} };
2261 u16 flags;
2262
2263 /* pf specific setups */
2264 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002265 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002266
2267 /* function setup flags */
2268 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2269
2270 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2271
2272 /**
2273 * Although RSS is meaningless when there is a single HW queue we
2274 * still need it enabled in order to have HW Rx hash generated.
2275 *
2276 * if (is_eth_multi(bp))
2277 * flags |= FUNC_FLG_RSS;
2278 */
2279
2280 /* function setup */
2281 if (flags & FUNC_FLG_RSS) {
2282 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2283 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2284 rss.mode = bp->multi_mode;
2285 rss.result_mask = MULTI_MASK;
2286 func_init.rss = &rss;
2287 }
2288
2289 func_init.func_flgs = flags;
2290 func_init.pf_id = BP_FUNC(bp);
2291 func_init.func_id = BP_FUNC(bp);
2292 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2293 func_init.spq_map = bp->spq_mapping;
2294 func_init.spq_prod = bp->spq_prod_idx;
2295
2296 bnx2x_func_init(bp, &func_init);
2297
2298 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2299
2300 /*
2301 Congestion management values depend on the link rate
2302 There is no active link so initial link rate is set to 10 Gbps.
2303 When the link comes up The congestion management values are
2304 re-calculated according to the actual link rate.
2305 */
2306 bp->link_vars.line_speed = SPEED_10000;
2307 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2308
2309 /* Only the PMF sets the HW */
2310 if (bp->port.pmf)
2311 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2312
2313 /* no rx until link is up */
2314 bp->rx_mode = BNX2X_RX_MODE_NONE;
2315 bnx2x_set_storm_rx_mode(bp);
2316
2317 /* init Event Queue */
2318 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2319 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2320 eq_data.producer = bp->eq_prod;
2321 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2322 eq_data.sb_id = DEF_SB_ID;
2323 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2324}
2325
2326
Eilon Greenstein2691d512009-08-12 08:22:08 +00002327static void bnx2x_e1h_disable(struct bnx2x *bp)
2328{
2329 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002330
2331 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002332
2333 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2334
Eilon Greenstein2691d512009-08-12 08:22:08 +00002335 netif_carrier_off(bp->dev);
2336}
2337
2338static void bnx2x_e1h_enable(struct bnx2x *bp)
2339{
2340 int port = BP_PORT(bp);
2341
2342 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2343
Eilon Greenstein2691d512009-08-12 08:22:08 +00002344 /* Tx queue should be only reenabled */
2345 netif_tx_wake_all_queues(bp->dev);
2346
Eilon Greenstein061bc702009-10-15 00:18:47 -07002347 /*
2348 * Should not call netif_carrier_on since it will be called if the link
2349 * is up when checking for link state
2350 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002351}
2352
Eilon Greenstein2691d512009-08-12 08:22:08 +00002353static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2354{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002355 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002356
2357 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2358
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002359 /*
2360 * This is the only place besides the function initialization
2361 * where the bp->flags can change so it is done without any
2362 * locks
2363 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002364 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2365 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002366 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002367
2368 bnx2x_e1h_disable(bp);
2369 } else {
2370 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002371 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002372
2373 bnx2x_e1h_enable(bp);
2374 }
2375 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2376 }
2377 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2378
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002379 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2380 bnx2x_link_sync_notify(bp);
2381 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002382 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2383 }
2384
2385 /* Report results to MCP */
2386 if (dcc_event)
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002387 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002388 else
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002389 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002390}
2391
Michael Chan28912902009-10-10 13:46:53 +00002392/* must be called under the spq lock */
2393static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2394{
2395 struct eth_spe *next_spe = bp->spq_prod_bd;
2396
2397 if (bp->spq_prod_bd == bp->spq_last_bd) {
2398 bp->spq_prod_bd = bp->spq;
2399 bp->spq_prod_idx = 0;
2400 DP(NETIF_MSG_TIMER, "end of spq\n");
2401 } else {
2402 bp->spq_prod_bd++;
2403 bp->spq_prod_idx++;
2404 }
2405 return next_spe;
2406}
2407
2408/* must be called under the spq lock */
2409static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2410{
2411 int func = BP_FUNC(bp);
2412
2413 /* Make sure that BD data is updated before writing the producer */
2414 wmb();
2415
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002416 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Michael Chan28912902009-10-10 13:46:53 +00002417 bp->spq_prod_idx);
2418 mmiowb();
2419}
2420
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002421/* the slow path queue is odd since completions arrive on the fastpath ring */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002422int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002423 u32 data_hi, u32 data_lo, int common)
2424{
Michael Chan28912902009-10-10 13:46:53 +00002425 struct eth_spe *spe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002426 u16 type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002427
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002428#ifdef BNX2X_STOP_ON_ERROR
2429 if (unlikely(bp->panic))
2430 return -EIO;
2431#endif
2432
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002433 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002434
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002435 if (!atomic_read(&bp->spq_left)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002436 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002437 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002438 bnx2x_panic();
2439 return -EBUSY;
2440 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002441
Michael Chan28912902009-10-10 13:46:53 +00002442 spe = bnx2x_sp_get_next(bp);
2443
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002444 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002445 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002446 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2447 HW_CID(bp, cid));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002448
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002449 if (common)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002450 /* Common ramrods:
2451 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2452 * TRAFFIC_STOP, TRAFFIC_START
2453 */
2454 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2455 & SPE_HDR_CONN_TYPE;
2456 else
2457 /* ETH ramrods: SETUP, HALT */
2458 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2459 & SPE_HDR_CONN_TYPE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002460
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002461 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2462 SPE_HDR_FUNCTION_ID);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002463
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002464 spe->hdr.type = cpu_to_le16(type);
2465
2466 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2467 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2468
2469 /* stats ramrod has it's own slot on the spq */
2470 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2471 /* It's ok if the actual decrement is issued towards the memory
2472 * somewhere between the spin_lock and spin_unlock. Thus no
2473 * more explict memory barrier is needed.
2474 */
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002475 atomic_dec(&bp->spq_left);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002476
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002477 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002478 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2479 "type(0x%x) left %x\n",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002480 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2481 (u32)(U64_LO(bp->spq_mapping) +
2482 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002483 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002484
Michael Chan28912902009-10-10 13:46:53 +00002485 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002486 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002487 return 0;
2488}
2489
2490/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002491static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002492{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002493 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002494 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002495
2496 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002497 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002498 val = (1UL << 31);
2499 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2500 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2501 if (val & (1L << 31))
2502 break;
2503
2504 msleep(5);
2505 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002506 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002507 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002508 rc = -EBUSY;
2509 }
2510
2511 return rc;
2512}
2513
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002514/* release split MCP access lock register */
2515static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002516{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002517 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002518}
2519
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002520#define BNX2X_DEF_SB_ATT_IDX 0x0001
2521#define BNX2X_DEF_SB_IDX 0x0002
2522
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002523static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2524{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002525 struct host_sp_status_block *def_sb = bp->def_status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002526 u16 rc = 0;
2527
2528 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002529 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2530 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002531 rc |= BNX2X_DEF_SB_ATT_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002532 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002533
2534 if (bp->def_idx != def_sb->sp_sb.running_index) {
2535 bp->def_idx = def_sb->sp_sb.running_index;
2536 rc |= BNX2X_DEF_SB_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002537 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002538
2539 /* Do not reorder: indecies reading should complete before handling */
2540 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002541 return rc;
2542}
2543
2544/*
2545 * slow path service functions
2546 */
2547
2548static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2549{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002550 int port = BP_PORT(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07002551 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2552 COMMAND_REG_ATTN_BITS_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002553 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2554 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002555 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2556 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002557 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002558 u32 nig_mask = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002559
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002560 if (bp->attn_state & asserted)
2561 BNX2X_ERR("IGU ERROR\n");
2562
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002563 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2564 aeu_mask = REG_RD(bp, aeu_addr);
2565
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002566 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002567 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002568 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002569 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002570
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002571 REG_WR(bp, aeu_addr, aeu_mask);
2572 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002573
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002574 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002575 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002576 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002577
2578 if (asserted & ATTN_HARD_WIRED_MASK) {
2579 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002580
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002581 bnx2x_acquire_phy_lock(bp);
2582
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002583 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002584 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002585 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002586
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002587 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002588
2589 /* handle unicore attn? */
2590 }
2591 if (asserted & ATTN_SW_TIMER_4_FUNC)
2592 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2593
2594 if (asserted & GPIO_2_FUNC)
2595 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2596
2597 if (asserted & GPIO_3_FUNC)
2598 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2599
2600 if (asserted & GPIO_4_FUNC)
2601 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2602
2603 if (port == 0) {
2604 if (asserted & ATTN_GENERAL_ATTN_1) {
2605 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2606 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2607 }
2608 if (asserted & ATTN_GENERAL_ATTN_2) {
2609 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2610 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2611 }
2612 if (asserted & ATTN_GENERAL_ATTN_3) {
2613 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2614 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2615 }
2616 } else {
2617 if (asserted & ATTN_GENERAL_ATTN_4) {
2618 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2619 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2620 }
2621 if (asserted & ATTN_GENERAL_ATTN_5) {
2622 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2623 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2624 }
2625 if (asserted & ATTN_GENERAL_ATTN_6) {
2626 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2627 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2628 }
2629 }
2630
2631 } /* if hardwired */
2632
Eilon Greenstein5c862842008-08-13 15:51:48 -07002633 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2634 asserted, hc_addr);
2635 REG_WR(bp, hc_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002636
2637 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002638 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002639 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002640 bnx2x_release_phy_lock(bp);
2641 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002642}
2643
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002644static inline void bnx2x_fan_failure(struct bnx2x *bp)
2645{
2646 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002647 u32 ext_phy_config;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002648 /* mark the failure */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002649 ext_phy_config =
2650 SHMEM_RD(bp,
2651 dev_info.port_hw_config[port].external_phy_config);
2652
2653 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2654 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002655 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002656 ext_phy_config);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002657
2658 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002659 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2660 " the driver to shutdown the card to prevent permanent"
2661 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002662}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002663
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002664static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2665{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002666 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002667 int reg_offset;
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002668 u32 val;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002669
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002670 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2671 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002672
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002673 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002674
2675 val = REG_RD(bp, reg_offset);
2676 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2677 REG_WR(bp, reg_offset, val);
2678
2679 BNX2X_ERR("SPIO5 hw attention\n");
2680
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002681 /* Fan failure attention */
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002682 bnx2x_hw_reset_phy(&bp->link_params);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002683 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002684 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002685
Eilon Greenstein589abe32009-02-12 08:36:55 +00002686 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2687 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2688 bnx2x_acquire_phy_lock(bp);
2689 bnx2x_handle_module_detect_int(&bp->link_params);
2690 bnx2x_release_phy_lock(bp);
2691 }
2692
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002693 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2694
2695 val = REG_RD(bp, reg_offset);
2696 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2697 REG_WR(bp, reg_offset, val);
2698
2699 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002700 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002701 bnx2x_panic();
2702 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002703}
2704
2705static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2706{
2707 u32 val;
2708
Eilon Greenstein0626b892009-02-12 08:38:14 +00002709 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002710
2711 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2712 BNX2X_ERR("DB hw attention 0x%x\n", val);
2713 /* DORQ discard attention */
2714 if (val & 0x2)
2715 BNX2X_ERR("FATAL error from DORQ\n");
2716 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002717
2718 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2719
2720 int port = BP_PORT(bp);
2721 int reg_offset;
2722
2723 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2724 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2725
2726 val = REG_RD(bp, reg_offset);
2727 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2728 REG_WR(bp, reg_offset, val);
2729
2730 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002731 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002732 bnx2x_panic();
2733 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002734}
2735
2736static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2737{
2738 u32 val;
2739
2740 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2741
2742 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2743 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2744 /* CFC error attention */
2745 if (val & 0x2)
2746 BNX2X_ERR("FATAL error from CFC\n");
2747 }
2748
2749 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2750
2751 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2752 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2753 /* RQ_USDMDP_FIFO_OVERFLOW */
2754 if (val & 0x18000)
2755 BNX2X_ERR("FATAL error from PXP\n");
2756 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002757
2758 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2759
2760 int port = BP_PORT(bp);
2761 int reg_offset;
2762
2763 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2764 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2765
2766 val = REG_RD(bp, reg_offset);
2767 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2768 REG_WR(bp, reg_offset, val);
2769
2770 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002771 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002772 bnx2x_panic();
2773 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002774}
2775
2776static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2777{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002778 u32 val;
2779
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002780 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2781
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002782 if (attn & BNX2X_PMF_LINK_ASSERT) {
2783 int func = BP_FUNC(bp);
2784
2785 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002786 bp->mf_config =
2787 MF_CFG_RD(bp, func_mf_config[func].config);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002788 val = SHMEM_RD(bp, func_mb[func].drv_status);
2789 if (val & DRV_STATUS_DCC_EVENT_MASK)
2790 bnx2x_dcc_event(bp,
2791 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002792 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002793 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002794 bnx2x_pmf_update(bp);
2795
2796 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002797
2798 BNX2X_ERR("MC assert!\n");
2799 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2800 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2801 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2802 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2803 bnx2x_panic();
2804
2805 } else if (attn & BNX2X_MCP_ASSERT) {
2806
2807 BNX2X_ERR("MCP assert!\n");
2808 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002809 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002810
2811 } else
2812 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2813 }
2814
2815 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002816 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2817 if (attn & BNX2X_GRC_TIMEOUT) {
2818 val = CHIP_IS_E1H(bp) ?
2819 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2820 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2821 }
2822 if (attn & BNX2X_GRC_RSV) {
2823 val = CHIP_IS_E1H(bp) ?
2824 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2825 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2826 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002827 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002828 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002829}
2830
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002831#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
2832#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
2833#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2834#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
2835#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
2836#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2837/*
2838 * should be run under rtnl lock
2839 */
2840static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2841{
2842 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2843 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2844 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2845 barrier();
2846 mmiowb();
2847}
2848
2849/*
2850 * should be run under rtnl lock
2851 */
2852static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2853{
2854 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2855 val |= (1 << 16);
2856 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2857 barrier();
2858 mmiowb();
2859}
2860
2861/*
2862 * should be run under rtnl lock
2863 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002864bool bnx2x_reset_is_done(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002865{
2866 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2867 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2868 return (val & RESET_DONE_FLAG_MASK) ? false : true;
2869}
2870
2871/*
2872 * should be run under rtnl lock
2873 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002874inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002875{
2876 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2877
2878 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2879
2880 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2881 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2882 barrier();
2883 mmiowb();
2884}
2885
2886/*
2887 * should be run under rtnl lock
2888 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002889u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002890{
2891 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2892
2893 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2894
2895 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2896 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2897 barrier();
2898 mmiowb();
2899
2900 return val1;
2901}
2902
2903/*
2904 * should be run under rtnl lock
2905 */
2906static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2907{
2908 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2909}
2910
2911static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2912{
2913 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2914 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2915}
2916
2917static inline void _print_next_block(int idx, const char *blk)
2918{
2919 if (idx)
2920 pr_cont(", ");
2921 pr_cont("%s", blk);
2922}
2923
2924static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2925{
2926 int i = 0;
2927 u32 cur_bit = 0;
2928 for (i = 0; sig; i++) {
2929 cur_bit = ((u32)0x1 << i);
2930 if (sig & cur_bit) {
2931 switch (cur_bit) {
2932 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2933 _print_next_block(par_num++, "BRB");
2934 break;
2935 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2936 _print_next_block(par_num++, "PARSER");
2937 break;
2938 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2939 _print_next_block(par_num++, "TSDM");
2940 break;
2941 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2942 _print_next_block(par_num++, "SEARCHER");
2943 break;
2944 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2945 _print_next_block(par_num++, "TSEMI");
2946 break;
2947 }
2948
2949 /* Clear the bit */
2950 sig &= ~cur_bit;
2951 }
2952 }
2953
2954 return par_num;
2955}
2956
2957static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2958{
2959 int i = 0;
2960 u32 cur_bit = 0;
2961 for (i = 0; sig; i++) {
2962 cur_bit = ((u32)0x1 << i);
2963 if (sig & cur_bit) {
2964 switch (cur_bit) {
2965 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2966 _print_next_block(par_num++, "PBCLIENT");
2967 break;
2968 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2969 _print_next_block(par_num++, "QM");
2970 break;
2971 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2972 _print_next_block(par_num++, "XSDM");
2973 break;
2974 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2975 _print_next_block(par_num++, "XSEMI");
2976 break;
2977 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2978 _print_next_block(par_num++, "DOORBELLQ");
2979 break;
2980 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2981 _print_next_block(par_num++, "VAUX PCI CORE");
2982 break;
2983 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2984 _print_next_block(par_num++, "DEBUG");
2985 break;
2986 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2987 _print_next_block(par_num++, "USDM");
2988 break;
2989 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2990 _print_next_block(par_num++, "USEMI");
2991 break;
2992 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2993 _print_next_block(par_num++, "UPB");
2994 break;
2995 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2996 _print_next_block(par_num++, "CSDM");
2997 break;
2998 }
2999
3000 /* Clear the bit */
3001 sig &= ~cur_bit;
3002 }
3003 }
3004
3005 return par_num;
3006}
3007
3008static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3009{
3010 int i = 0;
3011 u32 cur_bit = 0;
3012 for (i = 0; sig; i++) {
3013 cur_bit = ((u32)0x1 << i);
3014 if (sig & cur_bit) {
3015 switch (cur_bit) {
3016 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3017 _print_next_block(par_num++, "CSEMI");
3018 break;
3019 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3020 _print_next_block(par_num++, "PXP");
3021 break;
3022 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3023 _print_next_block(par_num++,
3024 "PXPPCICLOCKCLIENT");
3025 break;
3026 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3027 _print_next_block(par_num++, "CFC");
3028 break;
3029 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3030 _print_next_block(par_num++, "CDU");
3031 break;
3032 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3033 _print_next_block(par_num++, "IGU");
3034 break;
3035 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3036 _print_next_block(par_num++, "MISC");
3037 break;
3038 }
3039
3040 /* Clear the bit */
3041 sig &= ~cur_bit;
3042 }
3043 }
3044
3045 return par_num;
3046}
3047
3048static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3049{
3050 int i = 0;
3051 u32 cur_bit = 0;
3052 for (i = 0; sig; i++) {
3053 cur_bit = ((u32)0x1 << i);
3054 if (sig & cur_bit) {
3055 switch (cur_bit) {
3056 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3057 _print_next_block(par_num++, "MCP ROM");
3058 break;
3059 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3060 _print_next_block(par_num++, "MCP UMP RX");
3061 break;
3062 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3063 _print_next_block(par_num++, "MCP UMP TX");
3064 break;
3065 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3066 _print_next_block(par_num++, "MCP SCPAD");
3067 break;
3068 }
3069
3070 /* Clear the bit */
3071 sig &= ~cur_bit;
3072 }
3073 }
3074
3075 return par_num;
3076}
3077
3078static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3079 u32 sig2, u32 sig3)
3080{
3081 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3082 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3083 int par_num = 0;
3084 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3085 "[0]:0x%08x [1]:0x%08x "
3086 "[2]:0x%08x [3]:0x%08x\n",
3087 sig0 & HW_PRTY_ASSERT_SET_0,
3088 sig1 & HW_PRTY_ASSERT_SET_1,
3089 sig2 & HW_PRTY_ASSERT_SET_2,
3090 sig3 & HW_PRTY_ASSERT_SET_3);
3091 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3092 bp->dev->name);
3093 par_num = bnx2x_print_blocks_with_parity0(
3094 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3095 par_num = bnx2x_print_blocks_with_parity1(
3096 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3097 par_num = bnx2x_print_blocks_with_parity2(
3098 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3099 par_num = bnx2x_print_blocks_with_parity3(
3100 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3101 printk("\n");
3102 return true;
3103 } else
3104 return false;
3105}
3106
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003107bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003108{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003109 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003110 int port = BP_PORT(bp);
3111
3112 attn.sig[0] = REG_RD(bp,
3113 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3114 port*4);
3115 attn.sig[1] = REG_RD(bp,
3116 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3117 port*4);
3118 attn.sig[2] = REG_RD(bp,
3119 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3120 port*4);
3121 attn.sig[3] = REG_RD(bp,
3122 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3123 port*4);
3124
3125 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3126 attn.sig[3]);
3127}
3128
3129static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3130{
3131 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003132 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003133 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003134 u32 reg_addr;
3135 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003136 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003137
3138 /* need to take HW lock because MCP or other port might also
3139 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003140 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003141
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003142 if (bnx2x_chk_parity_attn(bp)) {
3143 bp->recovery_state = BNX2X_RECOVERY_INIT;
3144 bnx2x_set_reset_in_progress(bp);
3145 schedule_delayed_work(&bp->reset_task, 0);
3146 /* Disable HW interrupts */
3147 bnx2x_int_disable(bp);
3148 bnx2x_release_alr(bp);
3149 /* In case of parity errors don't handle attentions so that
3150 * other function would "see" parity errors.
3151 */
3152 return;
3153 }
3154
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003155 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3156 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3157 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3158 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003159 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
3160 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003161
3162 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3163 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003164 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003165
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003166 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003167 index, group_mask->sig[0], group_mask->sig[1],
3168 group_mask->sig[2], group_mask->sig[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003169
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003170 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003171 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003172 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003173 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003174 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003175 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003176 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003177 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003178 }
3179 }
3180
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003181 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003182
Eilon Greenstein5c862842008-08-13 15:51:48 -07003183 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003184
3185 val = ~deasserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003186 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
3187 val, reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003188 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003189
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003190 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003191 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003192
3193 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3194 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3195
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003196 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3197 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003198
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003199 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3200 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003201 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003202 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3203
3204 REG_WR(bp, reg_addr, aeu_mask);
3205 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003206
3207 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3208 bp->attn_state &= ~deasserted;
3209 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3210}
3211
3212static void bnx2x_attn_int(struct bnx2x *bp)
3213{
3214 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003215 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3216 attn_bits);
3217 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3218 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003219 u32 attn_state = bp->attn_state;
3220
3221 /* look for changed bits */
3222 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3223 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3224
3225 DP(NETIF_MSG_HW,
3226 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3227 attn_bits, attn_ack, asserted, deasserted);
3228
3229 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003230 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003231
3232 /* handle bits that were raised */
3233 if (asserted)
3234 bnx2x_attn_int_asserted(bp, asserted);
3235
3236 if (deasserted)
3237 bnx2x_attn_int_deasserted(bp, deasserted);
3238}
3239
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003240static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3241{
3242 /* No memory barriers */
3243 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3244 mmiowb(); /* keep prod updates ordered */
3245}
3246
3247#ifdef BCM_CNIC
3248static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3249 union event_ring_elem *elem)
3250{
3251 if (!bp->cnic_eth_dev.starting_cid ||
3252 cid < bp->cnic_eth_dev.starting_cid)
3253 return 1;
3254
3255 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3256
3257 if (unlikely(elem->message.data.cfc_del_event.error)) {
3258 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3259 cid);
3260 bnx2x_panic_dump(bp);
3261 }
3262 bnx2x_cnic_cfc_comp(bp, cid);
3263 return 0;
3264}
3265#endif
3266
3267static void bnx2x_eq_int(struct bnx2x *bp)
3268{
3269 u16 hw_cons, sw_cons, sw_prod;
3270 union event_ring_elem *elem;
3271 u32 cid;
3272 u8 opcode;
3273 int spqe_cnt = 0;
3274
3275 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3276
3277 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3278 * when we get the the next-page we nned to adjust so the loop
3279 * condition below will be met. The next element is the size of a
3280 * regular element and hence incrementing by 1
3281 */
3282 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3283 hw_cons++;
3284
3285 /* This function may never run in parralel with itself for a
3286 * specific bp, thus there is no need in "paired" read memory
3287 * barrier here.
3288 */
3289 sw_cons = bp->eq_cons;
3290 sw_prod = bp->eq_prod;
3291
3292 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003293 hw_cons, sw_cons, atomic_read(&bp->spq_left));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003294
3295 for (; sw_cons != hw_cons;
3296 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3297
3298
3299 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3300
3301 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3302 opcode = elem->message.opcode;
3303
3304
3305 /* handle eq element */
3306 switch (opcode) {
3307 case EVENT_RING_OPCODE_STAT_QUERY:
3308 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3309 /* nothing to do with stats comp */
3310 continue;
3311
3312 case EVENT_RING_OPCODE_CFC_DEL:
3313 /* handle according to cid range */
3314 /*
3315 * we may want to verify here that the bp state is
3316 * HALTING
3317 */
3318 DP(NETIF_MSG_IFDOWN,
3319 "got delete ramrod for MULTI[%d]\n", cid);
3320#ifdef BCM_CNIC
3321 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3322 goto next_spqe;
3323#endif
3324 bnx2x_fp(bp, cid, state) =
3325 BNX2X_FP_STATE_CLOSED;
3326
3327 goto next_spqe;
3328 }
3329
3330 switch (opcode | bp->state) {
3331 case (EVENT_RING_OPCODE_FUNCTION_START |
3332 BNX2X_STATE_OPENING_WAIT4_PORT):
3333 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3334 bp->state = BNX2X_STATE_FUNC_STARTED;
3335 break;
3336
3337 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3338 BNX2X_STATE_CLOSING_WAIT4_HALT):
3339 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3340 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3341 break;
3342
3343 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3344 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3345 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3346 bp->set_mac_pending = 0;
3347 break;
3348
3349 case (EVENT_RING_OPCODE_SET_MAC |
3350 BNX2X_STATE_CLOSING_WAIT4_HALT):
3351 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3352 bp->set_mac_pending = 0;
3353 break;
3354 default:
3355 /* unknown event log error and continue */
3356 BNX2X_ERR("Unknown EQ event %d\n",
3357 elem->message.opcode);
3358 }
3359next_spqe:
3360 spqe_cnt++;
3361 } /* for */
3362
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003363 smp_mb__before_atomic_inc();
3364 atomic_add(spqe_cnt, &bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003365
3366 bp->eq_cons = sw_cons;
3367 bp->eq_prod = sw_prod;
3368 /* Make sure that above mem writes were issued towards the memory */
3369 smp_wmb();
3370
3371 /* update producer */
3372 bnx2x_update_eq_prod(bp, bp->eq_prod);
3373}
3374
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003375static void bnx2x_sp_task(struct work_struct *work)
3376{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003377 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003378 u16 status;
3379
3380 /* Return here if interrupt is disabled */
3381 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003382 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003383 return;
3384 }
3385
3386 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003387/* if (status == 0) */
3388/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003389
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003390 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003391
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003392 /* HW attentions */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003393 if (status & BNX2X_DEF_SB_ATT_IDX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003394 bnx2x_attn_int(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003395 status &= ~BNX2X_DEF_SB_ATT_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003396 }
3397
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003398 /* SP events: STAT_QUERY and others */
3399 if (status & BNX2X_DEF_SB_IDX) {
3400
3401 /* Handle EQ completions */
3402 bnx2x_eq_int(bp);
3403
3404 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3405 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3406
3407 status &= ~BNX2X_DEF_SB_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003408 }
3409
3410 if (unlikely(status))
3411 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3412 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003413
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003414 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3415 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003416}
3417
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003418irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003419{
3420 struct net_device *dev = dev_instance;
3421 struct bnx2x *bp = netdev_priv(dev);
3422
3423 /* Return here if interrupt is disabled */
3424 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003425 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003426 return IRQ_HANDLED;
3427 }
3428
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003429 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3430 IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003431
3432#ifdef BNX2X_STOP_ON_ERROR
3433 if (unlikely(bp->panic))
3434 return IRQ_HANDLED;
3435#endif
3436
Michael Chan993ac7b2009-10-10 13:46:56 +00003437#ifdef BCM_CNIC
3438 {
3439 struct cnic_ops *c_ops;
3440
3441 rcu_read_lock();
3442 c_ops = rcu_dereference(bp->cnic_ops);
3443 if (c_ops)
3444 c_ops->cnic_handler(bp->cnic_data, NULL);
3445 rcu_read_unlock();
3446 }
3447#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003448 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003449
3450 return IRQ_HANDLED;
3451}
3452
3453/* end of slow path */
3454
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003455static void bnx2x_timer(unsigned long data)
3456{
3457 struct bnx2x *bp = (struct bnx2x *) data;
3458
3459 if (!netif_running(bp->dev))
3460 return;
3461
3462 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08003463 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003464
3465 if (poll) {
3466 struct bnx2x_fastpath *fp = &bp->fp[0];
3467 int rc;
3468
Eilon Greenstein7961f792009-03-02 07:59:31 +00003469 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003470 rc = bnx2x_rx_int(fp, 1000);
3471 }
3472
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003473 if (!BP_NOMCP(bp)) {
3474 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003475 u32 drv_pulse;
3476 u32 mcp_pulse;
3477
3478 ++bp->fw_drv_pulse_wr_seq;
3479 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3480 /* TBD - add SYSTEM_TIME */
3481 drv_pulse = bp->fw_drv_pulse_wr_seq;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003482 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003483
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003484 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003485 MCP_PULSE_SEQ_MASK);
3486 /* The delta between driver pulse and mcp response
3487 * should be 1 (before mcp response) or 0 (after mcp response)
3488 */
3489 if ((drv_pulse != mcp_pulse) &&
3490 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3491 /* someone lost a heartbeat... */
3492 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3493 drv_pulse, mcp_pulse);
3494 }
3495 }
3496
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07003497 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003498 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003499
Eliezer Tamirf1410642008-02-28 11:51:50 -08003500timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003501 mod_timer(&bp->timer, jiffies + bp->current_interval);
3502}
3503
3504/* end of Statistics */
3505
3506/* nic init */
3507
3508/*
3509 * nic init service functions
3510 */
3511
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003512static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003513{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003514 u32 i;
3515 if (!(len%4) && !(addr%4))
3516 for (i = 0; i < len; i += 4)
3517 REG_WR(bp, addr + i, fill);
3518 else
3519 for (i = 0; i < len; i++)
3520 REG_WR8(bp, addr + i, fill);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003521
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003522}
3523
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003524/* helper: writes FP SP data to FW - data_size in dwords */
3525static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3526 int fw_sb_id,
3527 u32 *sb_data_p,
3528 u32 data_size)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003529{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003530 int index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003531 for (index = 0; index < data_size; index++)
3532 REG_WR(bp, BAR_CSTRORM_INTMEM +
3533 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3534 sizeof(u32)*index,
3535 *(sb_data_p + index));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003536}
3537
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003538static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3539{
3540 u32 *sb_data_p;
3541 u32 data_size = 0;
3542 struct hc_status_block_data_e1x sb_data_e1x;
3543
3544 /* disable the function first */
3545 memset(&sb_data_e1x, 0,
3546 sizeof(struct hc_status_block_data_e1x));
3547 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3548 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3549 sb_data_e1x.common.p_func.vf_valid = false;
3550 sb_data_p = (u32 *)&sb_data_e1x;
3551 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3552
3553 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3554
3555 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3556 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3557 CSTORM_STATUS_BLOCK_SIZE);
3558 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3559 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3560 CSTORM_SYNC_BLOCK_SIZE);
3561}
3562
3563/* helper: writes SP SB data to FW */
3564static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3565 struct hc_sp_status_block_data *sp_sb_data)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003566{
3567 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003568 int i;
3569 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3570 REG_WR(bp, BAR_CSTRORM_INTMEM +
3571 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3572 i*sizeof(u32),
3573 *((u32 *)sp_sb_data + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003574}
3575
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003576static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3577{
3578 int func = BP_FUNC(bp);
3579 struct hc_sp_status_block_data sp_sb_data;
3580 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3581
3582 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3583 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3584 sp_sb_data.p_func.vf_valid = false;
3585
3586 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3587
3588 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3589 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3590 CSTORM_SP_STATUS_BLOCK_SIZE);
3591 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3592 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3593 CSTORM_SP_SYNC_BLOCK_SIZE);
3594
3595}
3596
3597
3598static inline
3599void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3600 int igu_sb_id, int igu_seg_id)
3601{
3602 hc_sm->igu_sb_id = igu_sb_id;
3603 hc_sm->igu_seg_id = igu_seg_id;
3604 hc_sm->timer_value = 0xFF;
3605 hc_sm->time_to_expire = 0xFFFFFFFF;
3606}
3607
3608void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3609 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3610{
3611 int igu_seg_id;
3612
3613 struct hc_status_block_data_e1x sb_data_e1x;
3614 struct hc_status_block_sm *hc_sm_p;
3615 struct hc_index_data *hc_index_p;
3616 int data_size;
3617 u32 *sb_data_p;
3618
3619 igu_seg_id = HC_SEG_ACCESS_NORM;
3620
3621 bnx2x_zero_fp_sb(bp, fw_sb_id);
3622
3623 memset(&sb_data_e1x, 0,
3624 sizeof(struct hc_status_block_data_e1x));
3625 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3626 sb_data_e1x.common.p_func.vf_id = 0xff;
3627 sb_data_e1x.common.p_func.vf_valid = false;
3628 sb_data_e1x.common.p_func.vnic_id = BP_E1HVN(bp);
3629 sb_data_e1x.common.same_igu_sb_1b = true;
3630 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3631 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3632 hc_sm_p = sb_data_e1x.common.state_machine;
3633 hc_index_p = sb_data_e1x.index_data;
3634 sb_data_p = (u32 *)&sb_data_e1x;
3635 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3636
3637
3638 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3639 igu_sb_id, igu_seg_id);
3640 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3641 igu_sb_id, igu_seg_id);
3642
3643 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3644
3645 /* write indecies to HW */
3646 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3647}
3648
3649static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3650 u8 sb_index, u8 disable, u16 usec)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003651{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003652 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003653 u8 ticks = usec / BNX2X_BTR;
3654
3655 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3656
3657 disable = disable ? 1 : (usec ? 0 : 1);
3658 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3659}
3660
3661static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
3662 u16 tx_usec, u16 rx_usec)
3663{
3664 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
3665 false, rx_usec);
3666 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
3667 false, tx_usec);
3668}
3669static void bnx2x_init_def_sb(struct bnx2x *bp)
3670{
3671 struct host_sp_status_block *def_sb = bp->def_status_blk;
3672 dma_addr_t mapping = bp->def_status_blk_mapping;
3673 int igu_sp_sb_index;
3674 int igu_seg_id;
3675 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003676 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003677 int reg_offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003678 u64 section;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003679 int index;
3680 struct hc_sp_status_block_data sp_sb_data;
3681 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3682
3683 igu_sp_sb_index = DEF_SB_IGU_ID;
3684 igu_seg_id = HC_SEG_ACCESS_DEF;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003685
3686 /* ATTN */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003687 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003688 atten_status_block);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003689 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003690
Eliezer Tamir49d66772008-02-28 11:53:13 -08003691 bp->attn_state = 0;
3692
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003693 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
3694 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003695 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003696 int sindex;
3697 /* take care of sig[0]..sig[4] */
3698 for (sindex = 0; sindex < 4; sindex++)
3699 bp->attn_group[index].sig[sindex] =
3700 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003701 }
3702
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003703 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
3704 HC_REG_ATTN_MSG0_ADDR_L);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003705 REG_WR(bp, reg_offset, U64_LO(section));
3706 REG_WR(bp, reg_offset + 4, U64_HI(section));
3707
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003708 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
3709 sp_sb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003710
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003711 bnx2x_zero_sp_sb(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003712
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003713 sp_sb_data.host_sb_addr.lo = U64_LO(section);
3714 sp_sb_data.host_sb_addr.hi = U64_HI(section);
3715 sp_sb_data.igu_sb_id = igu_sp_sb_index;
3716 sp_sb_data.igu_seg_id = igu_seg_id;
3717 sp_sb_data.p_func.pf_id = func;
3718 sp_sb_data.p_func.vnic_id = BP_E1HVN(bp);
3719 sp_sb_data.p_func.vf_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003720
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003721 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003722
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003723 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07003724 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003725
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003726 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003727}
3728
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003729void bnx2x_update_coalesce(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003730{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003731 int i;
3732
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003733 for_each_queue(bp, i)
3734 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
3735 bp->rx_ticks, bp->tx_ticks);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003736}
3737
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003738static void bnx2x_init_sp_ring(struct bnx2x *bp)
3739{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003740 spin_lock_init(&bp->spq_lock);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003741 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003742
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003743 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003744 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
3745 bp->spq_prod_bd = bp->spq;
3746 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003747}
3748
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003749static void bnx2x_init_eq_ring(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003750{
3751 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003752 for (i = 1; i <= NUM_EQ_PAGES; i++) {
3753 union event_ring_elem *elem =
3754 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003755
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003756 elem->next_page.addr.hi =
3757 cpu_to_le32(U64_HI(bp->eq_mapping +
3758 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
3759 elem->next_page.addr.lo =
3760 cpu_to_le32(U64_LO(bp->eq_mapping +
3761 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003762 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003763 bp->eq_cons = 0;
3764 bp->eq_prod = NUM_EQ_DESC;
3765 bp->eq_cons_sb = BNX2X_EQ_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003766}
3767
3768static void bnx2x_init_ind_table(struct bnx2x *bp)
3769{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08003770 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003771 int i;
3772
Eilon Greenstein555f6c72009-02-12 08:36:11 +00003773 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003774 return;
3775
Eilon Greenstein555f6c72009-02-12 08:36:11 +00003776 DP(NETIF_MSG_IFUP,
3777 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003778 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003779 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08003780 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00003781 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003782}
3783
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003784void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003785{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003786 int mode = bp->rx_mode;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003787 u16 cl_id;
3788
Eilon Greenstein581ce432009-07-29 00:20:04 +00003789 /* All but management unicast packets should pass to the host as well */
3790 u32 llh_mask =
3791 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3792 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3793 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3794 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003795
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003796 switch (mode) {
3797 case BNX2X_RX_MODE_NONE: /* no Rx */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003798 cl_id = BP_L_ID(bp);
3799 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003800 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00003801
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003802 case BNX2X_RX_MODE_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003803 cl_id = BP_L_ID(bp);
3804 bnx2x_rxq_set_mac_filters(bp, cl_id,
3805 BNX2X_ACCEPT_UNICAST |
3806 BNX2X_ACCEPT_BROADCAST |
3807 BNX2X_ACCEPT_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003808 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00003809
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003810 case BNX2X_RX_MODE_ALLMULTI:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003811 cl_id = BP_L_ID(bp);
3812 bnx2x_rxq_set_mac_filters(bp, cl_id,
3813 BNX2X_ACCEPT_UNICAST |
3814 BNX2X_ACCEPT_BROADCAST |
3815 BNX2X_ACCEPT_ALL_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003816 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00003817
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003818 case BNX2X_RX_MODE_PROMISC:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003819 cl_id = BP_L_ID(bp);
3820 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
3821
Eilon Greenstein581ce432009-07-29 00:20:04 +00003822 /* pass management unicast packets as well */
3823 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003824 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00003825
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003826 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003827 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3828 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003829 }
3830
Eilon Greenstein581ce432009-07-29 00:20:04 +00003831 REG_WR(bp,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003832 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
3833 NIG_REG_LLH0_BRB1_DRV_MASK,
Eilon Greenstein581ce432009-07-29 00:20:04 +00003834 llh_mask);
3835
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003836 DP(NETIF_MSG_IFUP, "rx mode %d\n"
3837 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
3838 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
3839 bp->mac_filters.ucast_drop_all,
3840 bp->mac_filters.mcast_drop_all,
3841 bp->mac_filters.bcast_drop_all,
3842 bp->mac_filters.ucast_accept_all,
3843 bp->mac_filters.mcast_accept_all,
3844 bp->mac_filters.bcast_accept_all
3845 );
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003846
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003847 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003848}
3849
Eilon Greenstein471de712008-08-13 15:49:35 -07003850static void bnx2x_init_internal_common(struct bnx2x *bp)
3851{
3852 int i;
3853
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003854 if (!CHIP_IS_E1(bp)) {
3855
3856 /* xstorm needs to know whether to add ovlan to packets or not,
3857 * in switch-independent we'll write 0 to here... */
3858 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00003859 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003860 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00003861 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003862 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00003863 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003864 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00003865 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003866 }
3867
Eilon Greenstein471de712008-08-13 15:49:35 -07003868 /* Zero this manually as its initialization is
3869 currently missing in the initTool */
3870 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3871 REG_WR(bp, BAR_USTRORM_INTMEM +
3872 USTORM_AGG_DATA_OFFSET + i * 4, 0);
3873}
3874
3875static void bnx2x_init_internal_port(struct bnx2x *bp)
3876{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003877 /* port */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003878}
3879
Eilon Greenstein471de712008-08-13 15:49:35 -07003880static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3881{
3882 switch (load_code) {
3883 case FW_MSG_CODE_DRV_LOAD_COMMON:
3884 bnx2x_init_internal_common(bp);
3885 /* no break */
3886
3887 case FW_MSG_CODE_DRV_LOAD_PORT:
3888 bnx2x_init_internal_port(bp);
3889 /* no break */
3890
3891 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003892 /* internal memory per function is
3893 initialized inside bnx2x_pf_init */
Eilon Greenstein471de712008-08-13 15:49:35 -07003894 break;
3895
3896 default:
3897 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3898 break;
3899 }
3900}
3901
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003902static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
3903{
3904 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
3905
3906 fp->state = BNX2X_FP_STATE_CLOSED;
3907
3908 fp->index = fp->cid = fp_idx;
3909 fp->cl_id = BP_L_ID(bp) + fp_idx;
3910 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
3911 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
3912 /* qZone id equals to FW (per path) client id */
3913 fp->cl_qzone_id = fp->cl_id +
3914 BP_PORT(bp)*(ETH_MAX_RX_CLIENTS_E1H);
3915 /* init shortcut */
3916 fp->ustorm_rx_prods_offset =
3917 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
3918 /* Setup SB indicies */
3919 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
3920 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
3921
3922 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
3923 "cl_id %d fw_sb %d igu_sb %d\n",
3924 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
3925 fp->igu_sb_id);
3926 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
3927 fp->fw_sb_id, fp->igu_sb_id);
3928
3929 bnx2x_update_fpsb_idx(fp);
3930}
3931
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003932void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003933{
3934 int i;
3935
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003936 for_each_queue(bp, i)
3937 bnx2x_init_fp_sb(bp, i);
Michael Chan37b091b2009-10-10 13:46:55 +00003938#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003939
3940 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
3941 BNX2X_VF_ID_INVALID, false,
3942 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
3943
Michael Chan37b091b2009-10-10 13:46:55 +00003944#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003945
Eilon Greenstein16119782009-03-02 07:59:27 +00003946 /* ensure status block indices were read */
3947 rmb();
3948
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003949 bnx2x_init_def_sb(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003950 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003951 bnx2x_init_rx_rings(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003952 bnx2x_init_tx_rings(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003953 bnx2x_init_sp_ring(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003954 bnx2x_init_eq_ring(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07003955 bnx2x_init_internal(bp, load_code);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003956 bnx2x_pf_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003957 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08003958 bnx2x_stats_init(bp);
3959
3960 /* At this point, we are ready for interrupts */
3961 atomic_set(&bp->intr_sem, 0);
3962
3963 /* flush all before enabling interrupts */
3964 mb();
3965 mmiowb();
3966
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08003967 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00003968
3969 /* Check for SPIO5 */
3970 bnx2x_attn_int_deasserted0(bp,
3971 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3972 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003973}
3974
3975/* end of nic init */
3976
3977/*
3978 * gzip service functions
3979 */
3980
3981static int bnx2x_gunzip_init(struct bnx2x *bp)
3982{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00003983 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3984 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003985 if (bp->gunzip_buf == NULL)
3986 goto gunzip_nomem1;
3987
3988 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3989 if (bp->strm == NULL)
3990 goto gunzip_nomem2;
3991
3992 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3993 GFP_KERNEL);
3994 if (bp->strm->workspace == NULL)
3995 goto gunzip_nomem3;
3996
3997 return 0;
3998
3999gunzip_nomem3:
4000 kfree(bp->strm);
4001 bp->strm = NULL;
4002
4003gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004004 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4005 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004006 bp->gunzip_buf = NULL;
4007
4008gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004009 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4010 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004011 return -ENOMEM;
4012}
4013
4014static void bnx2x_gunzip_end(struct bnx2x *bp)
4015{
4016 kfree(bp->strm->workspace);
4017
4018 kfree(bp->strm);
4019 bp->strm = NULL;
4020
4021 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004022 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4023 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004024 bp->gunzip_buf = NULL;
4025 }
4026}
4027
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004028static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004029{
4030 int n, rc;
4031
4032 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004033 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4034 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004035 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004036 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004037
4038 n = 10;
4039
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004040#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004041
4042 if (zbuf[3] & FNAME)
4043 while ((zbuf[n++] != 0) && (n < len));
4044
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004045 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004046 bp->strm->avail_in = len - n;
4047 bp->strm->next_out = bp->gunzip_buf;
4048 bp->strm->avail_out = FW_BUF_SIZE;
4049
4050 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4051 if (rc != Z_OK)
4052 return rc;
4053
4054 rc = zlib_inflate(bp->strm, Z_FINISH);
4055 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00004056 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4057 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004058
4059 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4060 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004061 netdev_err(bp->dev, "Firmware decompression error:"
4062 " gunzip_outlen (%d) not aligned\n",
4063 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004064 bp->gunzip_outlen >>= 2;
4065
4066 zlib_inflateEnd(bp->strm);
4067
4068 if (rc == Z_STREAM_END)
4069 return 0;
4070
4071 return rc;
4072}
4073
4074/* nic load/unload */
4075
4076/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004077 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004078 */
4079
4080/* send a NIG loopback debug packet */
4081static void bnx2x_lb_pckt(struct bnx2x *bp)
4082{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004083 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004084
4085 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004086 wb_write[0] = 0x55555555;
4087 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004088 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004089 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004090
4091 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004092 wb_write[0] = 0x09000000;
4093 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004094 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004095 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004096}
4097
4098/* some of the internal memories
4099 * are not directly readable from the driver
4100 * to test them we send debug packets
4101 */
4102static int bnx2x_int_mem_test(struct bnx2x *bp)
4103{
4104 int factor;
4105 int count, i;
4106 u32 val = 0;
4107
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004108 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004109 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004110 else if (CHIP_REV_IS_EMUL(bp))
4111 factor = 200;
4112 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004113 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004114
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004115 /* Disable inputs of parser neighbor blocks */
4116 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4117 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4118 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004119 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004120
4121 /* Write 0 to parser credits for CFC search request */
4122 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4123
4124 /* send Ethernet packet */
4125 bnx2x_lb_pckt(bp);
4126
4127 /* TODO do i reset NIG statistic? */
4128 /* Wait until NIG register shows 1 packet of size 0x10 */
4129 count = 1000 * factor;
4130 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004131
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004132 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4133 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004134 if (val == 0x10)
4135 break;
4136
4137 msleep(10);
4138 count--;
4139 }
4140 if (val != 0x10) {
4141 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4142 return -1;
4143 }
4144
4145 /* Wait until PRS register shows 1 packet */
4146 count = 1000 * factor;
4147 while (count) {
4148 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004149 if (val == 1)
4150 break;
4151
4152 msleep(10);
4153 count--;
4154 }
4155 if (val != 0x1) {
4156 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4157 return -2;
4158 }
4159
4160 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004161 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004162 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004163 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004164 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004165 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4166 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004167
4168 DP(NETIF_MSG_HW, "part2\n");
4169
4170 /* Disable inputs of parser neighbor blocks */
4171 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4172 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4173 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004174 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004175
4176 /* Write 0 to parser credits for CFC search request */
4177 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4178
4179 /* send 10 Ethernet packets */
4180 for (i = 0; i < 10; i++)
4181 bnx2x_lb_pckt(bp);
4182
4183 /* Wait until NIG register shows 10 + 1
4184 packets of size 11*0x10 = 0xb0 */
4185 count = 1000 * factor;
4186 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004187
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004188 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4189 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004190 if (val == 0xb0)
4191 break;
4192
4193 msleep(10);
4194 count--;
4195 }
4196 if (val != 0xb0) {
4197 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4198 return -3;
4199 }
4200
4201 /* Wait until PRS register shows 2 packets */
4202 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4203 if (val != 2)
4204 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4205
4206 /* Write 1 to parser credits for CFC search request */
4207 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4208
4209 /* Wait until PRS register shows 3 packets */
4210 msleep(10 * factor);
4211 /* Wait until NIG register shows 1 packet of size 0x10 */
4212 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4213 if (val != 3)
4214 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4215
4216 /* clear NIG EOP FIFO */
4217 for (i = 0; i < 11; i++)
4218 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4219 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4220 if (val != 1) {
4221 BNX2X_ERR("clear of NIG failed\n");
4222 return -4;
4223 }
4224
4225 /* Reset and init BRB, PRS, NIG */
4226 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4227 msleep(50);
4228 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4229 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004230 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4231 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004232#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004233 /* set NIC mode */
4234 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4235#endif
4236
4237 /* Enable inputs of parser neighbor blocks */
4238 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4239 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4240 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004241 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004242
4243 DP(NETIF_MSG_HW, "done\n");
4244
4245 return 0; /* OK */
4246}
4247
4248static void enable_blocks_attention(struct bnx2x *bp)
4249{
4250 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
4251 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
4252 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4253 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4254 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4255 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4256 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4257 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4258 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004259/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4260/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004261 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4262 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4263 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004264/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4265/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004266 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4267 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4268 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4269 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004270/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4271/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
4272 if (CHIP_REV_IS_FPGA(bp))
4273 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
4274 else
4275 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004276 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4277 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4278 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004279/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4280/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004281 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4282 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004283/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4284 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004285}
4286
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004287static const struct {
4288 u32 addr;
4289 u32 mask;
4290} bnx2x_parity_mask[] = {
4291 {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
4292 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4293 {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
4294 {HC_REG_HC_PRTY_MASK, 0xffffffff},
4295 {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
4296 {QM_REG_QM_PRTY_MASK, 0x0},
4297 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
4298 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4299 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
4300 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4301 {CDU_REG_CDU_PRTY_MASK, 0x0},
4302 {CFC_REG_CFC_PRTY_MASK, 0x0},
4303 {DBG_REG_DBG_PRTY_MASK, 0x0},
4304 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4305 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4306 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4307 {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
4308 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4309 {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
4310 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4311 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4312 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4313 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4314 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4315 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4316 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4317 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4318 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
4319};
4320
4321static void enable_blocks_parity(struct bnx2x *bp)
4322{
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004323 int i;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004324
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004325 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004326 REG_WR(bp, bnx2x_parity_mask[i].addr,
4327 bnx2x_parity_mask[i].mask);
4328}
4329
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004330
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004331static void bnx2x_reset_common(struct bnx2x *bp)
4332{
4333 /* reset_common */
4334 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4335 0xd3ffff7f);
4336 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4337}
4338
Eilon Greenstein573f2032009-08-12 08:24:14 +00004339static void bnx2x_init_pxp(struct bnx2x *bp)
4340{
4341 u16 devctl;
4342 int r_order, w_order;
4343
4344 pci_read_config_word(bp->pdev,
4345 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4346 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4347 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4348 if (bp->mrrs == -1)
4349 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4350 else {
4351 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4352 r_order = bp->mrrs;
4353 }
4354
4355 bnx2x_init_pxp_arb(bp, r_order, w_order);
4356}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004357
4358static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4359{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004360 int is_required;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004361 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004362 int port;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004363
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004364 if (BP_NOMCP(bp))
4365 return;
4366
4367 is_required = 0;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004368 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4369 SHARED_HW_CFG_FAN_FAILURE_MASK;
4370
4371 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4372 is_required = 1;
4373
4374 /*
4375 * The fan failure mechanism is usually related to the PHY type since
4376 * the power consumption of the board is affected by the PHY. Currently,
4377 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4378 */
4379 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4380 for (port = PORT_0; port < PORT_MAX; port++) {
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004381 is_required |=
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004382 bnx2x_fan_failure_det_req(
4383 bp,
4384 bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004385 bp->common.shmem2_base,
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004386 port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004387 }
4388
4389 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4390
4391 if (is_required == 0)
4392 return;
4393
4394 /* Fan failure is indicated by SPIO 5 */
4395 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4396 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4397
4398 /* set to active low mode */
4399 val = REG_RD(bp, MISC_REG_SPIO_INT);
4400 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004401 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004402 REG_WR(bp, MISC_REG_SPIO_INT, val);
4403
4404 /* enable interrupt to signal the IGU */
4405 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4406 val |= (1 << MISC_REGISTERS_SPIO_5);
4407 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4408}
4409
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004410static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004411{
4412 u32 val, i;
4413
4414 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
4415
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004416 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004417 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4418 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4419
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004420 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004421 if (CHIP_IS_E1H(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004422 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004423
4424 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
4425 msleep(30);
4426 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
4427
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004428 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004429 if (CHIP_IS_E1(bp)) {
4430 /* enable HW interrupt from PXP on USDM overflow
4431 bit 16 on INT_MASK_0 */
4432 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004433 }
4434
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004435 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004436 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004437
4438#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004439 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4440 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4441 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4442 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4443 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00004444 /* make sure this value is 0 */
4445 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004446
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004447/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4448 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4449 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4450 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4451 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004452#endif
4453
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004454 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4455
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004456
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004457 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4458 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004459
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004460 /* let the HW do it's magic ... */
4461 msleep(100);
4462 /* finish PXP init */
4463 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4464 if (val != 1) {
4465 BNX2X_ERR("PXP2 CFG failed\n");
4466 return -EBUSY;
4467 }
4468 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4469 if (val != 1) {
4470 BNX2X_ERR("PXP2 RD_INIT failed\n");
4471 return -EBUSY;
4472 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004473
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004474 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4475 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004476
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004477 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004478
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004479 /* clean the DMAE memory */
4480 bp->dmae_ready = 1;
4481 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004482
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004483 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4484 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4485 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4486 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004487
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004488 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4489 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4490 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
4491 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
4492
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004493 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004494
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004495 /* QM queues pointers table */
4496 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00004497
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004498 /* soft reset pulse */
4499 REG_WR(bp, QM_REG_SOFT_RESET, 1);
4500 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004501
Michael Chan37b091b2009-10-10 13:46:55 +00004502#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004503 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004504#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004505
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004506 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004507 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
4508
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004509 if (!CHIP_REV_IS_SLOW(bp)) {
4510 /* enable hw interrupt from doorbell Q */
4511 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4512 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004513
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004514 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4515 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004516 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00004517#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07004518 /* set NIC mode */
4519 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00004520#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004521 if (CHIP_IS_E1H(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004522 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004523
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004524 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4525 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4526 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4527 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004528
Eilon Greensteinca003922009-08-12 22:53:28 -07004529 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4530 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4531 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4532 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004533
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004534 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4535 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4536 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4537 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004538
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004539 /* sync semi rtc */
4540 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4541 0x80000000);
4542 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4543 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004544
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004545 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4546 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4547 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004548
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004549 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07004550 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4551 REG_WR(bp, i, random32());
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004552 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004553#ifdef BCM_CNIC
4554 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4555 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4556 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4557 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4558 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4559 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4560 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4561 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4562 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4563 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4564#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004565 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004566
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004567 if (sizeof(union cdu_context) != 1024)
4568 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004569 dev_alert(&bp->pdev->dev, "please adjust the size "
4570 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00004571 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004572
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004573 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004574 val = (4 << 24) + (0 << 12) + 1024;
4575 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004576
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004577 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004578 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08004579 /* enable context validation interrupt from CFC */
4580 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4581
4582 /* set the thresholds to prevent CFC/CDU race */
4583 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004584
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004585 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4586 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004587
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004588 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004589 /* Reset PCIE errors for debug */
4590 REG_WR(bp, 0x2814, 0xffffffff);
4591 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004592
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004593 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004594 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004595 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004596 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004597
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004598 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004599 if (CHIP_IS_E1H(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004600 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
4601 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004602 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004603
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004604 if (CHIP_REV_IS_SLOW(bp))
4605 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004606
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004607 /* finish CFC init */
4608 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4609 if (val != 1) {
4610 BNX2X_ERR("CFC LL_INIT failed\n");
4611 return -EBUSY;
4612 }
4613 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4614 if (val != 1) {
4615 BNX2X_ERR("CFC AC_INIT failed\n");
4616 return -EBUSY;
4617 }
4618 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4619 if (val != 1) {
4620 BNX2X_ERR("CFC CAM_INIT failed\n");
4621 return -EBUSY;
4622 }
4623 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004624
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004625 /* read NIG statistic
4626 to see if this is our first up since powerup */
4627 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4628 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004629
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004630 /* do internal memory self test */
4631 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4632 BNX2X_ERR("internal mem self test failed\n");
4633 return -EBUSY;
4634 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004635
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004636 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004637 bp->common.shmem_base,
4638 bp->common.shmem2_base);
Eliezer Tamirf1410642008-02-28 11:51:50 -08004639
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004640 bnx2x_setup_fan_failure_detection(bp);
4641
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004642 /* clear PXP2 attentions */
4643 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004644
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004645 enable_blocks_attention(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004646 if (CHIP_PARITY_SUPPORTED(bp))
4647 enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004648
Yaniv Rosner6bbca912008-08-13 15:57:28 -07004649 if (!BP_NOMCP(bp)) {
4650 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004651 bnx2x_common_init_phy(bp, bp->common.shmem_base,
4652 bp->common.shmem2_base);
Yaniv Rosner6bbca912008-08-13 15:57:28 -07004653 bnx2x_release_phy_lock(bp);
4654 } else
4655 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4656
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004657 return 0;
4658}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004659
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004660static int bnx2x_init_hw_port(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004661{
4662 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004663 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00004664 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004665 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004666
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004667 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004668
4669 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004670
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004671 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004672 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07004673
4674 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4675 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4676 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004677 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004678
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004679 /* QM cid (connection) count */
4680 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004681
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004682#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004683 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00004684 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4685 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004686#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004687
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004688 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00004689
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004690 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00004691 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4692 /* no pause for emulation and FPGA */
4693 low = 0;
4694 high = 513;
4695 } else {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004696 if (IS_MF(bp))
Eilon Greenstein1c063282009-02-12 08:36:43 +00004697 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4698 else if (bp->dev->mtu > 4096) {
4699 if (bp->flags & ONE_PORT_FLAG)
4700 low = 160;
4701 else {
4702 val = bp->dev->mtu;
4703 /* (24*1024 + val*4)/256 */
4704 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4705 }
4706 } else
4707 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4708 high = low + 56; /* 14*1024/256 */
4709 }
4710 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4711 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4712
4713
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004714 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07004715
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004716 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004717 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004718 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004719 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00004720
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004721 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4722 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4723 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4724 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00004725
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004726 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004727 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004728
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004729 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004730
4731 /* configure PBF to work without PAUSE mtu 9000 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004732 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004733
4734 /* update threshold */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004735 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004736 /* update init credit */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004737 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004738
4739 /* probe changes */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004740 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004741 msleep(5);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004742 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004743
Michael Chan37b091b2009-10-10 13:46:55 +00004744#ifdef BCM_CNIC
4745 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004746#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004747 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004748 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004749
4750 if (CHIP_IS_E1(bp)) {
4751 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4752 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4753 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004754 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004755
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004756 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004757 /* init aeu_mask_attn_func_0/1:
4758 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4759 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4760 * bits 4-7 are used for "per vn group attention" */
4761 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004762 (IS_MF(bp) ? 0xF7 : 0x7));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004763
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004764 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004765 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004766 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004767 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004768 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00004769
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004770 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004771
4772 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4773
4774 if (CHIP_IS_E1H(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004775 /* 0x2 disable mf_ov, 0x1 enable */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004776 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004777 (IS_MF(bp) ? 0x1 : 0x2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004778
Eilon Greenstein1c063282009-02-12 08:36:43 +00004779 {
4780 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4781 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4782 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4783 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004784 }
4785
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004786 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004787 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004788 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004789 bp->common.shmem_base,
4790 bp->common.shmem2_base);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004791 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004792 bp->common.shmem2_base, port)) {
Eilon Greenstein4d295db2009-07-21 05:47:47 +00004793 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4794 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4795 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08004796 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00004797 REG_WR(bp, reg_addr, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08004798 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07004799 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004800
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004801 return 0;
4802}
4803
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004804static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4805{
4806 int reg;
4807
4808 if (CHIP_IS_E1H(bp))
4809 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4810 else /* E1 */
4811 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4812
4813 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4814}
4815
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004816static int bnx2x_init_hw_func(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004817{
4818 int port = BP_PORT(bp);
4819 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004820 struct bnx2x_ilt *ilt = BP_ILT(bp);
4821 u16 cdu_ilt_start;
Eilon Greenstein8badd272009-02-12 08:36:15 +00004822 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004823 int i;
4824
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004825 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004826
Eilon Greenstein8badd272009-02-12 08:36:15 +00004827 /* set MSI reconfigure capability */
4828 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4829 val = REG_RD(bp, addr);
4830 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4831 REG_WR(bp, addr, val);
4832
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004833 ilt = BP_ILT(bp);
4834 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004835
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004836 for (i = 0; i < L2_ILT_LINES(bp); i++) {
4837 ilt->lines[cdu_ilt_start + i].page =
4838 bp->context.vcxt + (ILT_PAGE_CIDS * i);
4839 ilt->lines[cdu_ilt_start + i].page_mapping =
4840 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
4841 /* cdu ilt pages are allocated manually so there's no need to
4842 set the size */
4843 }
4844 bnx2x_ilt_init_op(bp, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00004845#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004846 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
Michael Chan37b091b2009-10-10 13:46:55 +00004847
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004848 /* T1 hash bits value determines the T1 number of entries */
4849 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
Michael Chan37b091b2009-10-10 13:46:55 +00004850#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004851
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004852#ifndef BCM_CNIC
4853 /* set NIC mode */
4854 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4855#endif /* BCM_CNIC */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004856
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004857 bp->dmae_ready = 1;
4858
4859 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
4860
4861 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4862 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4863 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4864 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4865 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4866 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4867 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4868 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4869 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4870
4871 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
4872 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
4873 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
4874 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
4875 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
4876 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
4877 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
4878 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
4879 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
4880 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
4881 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
4882 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
4883 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
4884
4885 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
4886
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004887 if (IS_MF(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004888 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004889 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004890 }
4891
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004892 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
4893
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004894 /* HC init per function */
4895 if (CHIP_IS_E1H(bp)) {
4896 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4897
4898 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4899 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4900 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004901 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004902
Eliezer Tamirc14423f2008-02-28 11:49:42 -08004903 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004904 REG_WR(bp, 0x2114, 0xffffffff);
4905 REG_WR(bp, 0x2120, 0xffffffff);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004906
4907 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
4908 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
4909 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
4910 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
4911 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
4912 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
4913
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00004914 bnx2x_phy_probe(&bp->link_params);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004915 return 0;
4916}
4917
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004918int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004919{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004920 int rc = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004921
4922 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
4923 BP_FUNC(bp), load_code);
4924
4925 bp->dmae_ready = 0;
4926 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00004927 rc = bnx2x_gunzip_init(bp);
4928 if (rc)
4929 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004930
4931 switch (load_code) {
4932 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004933 rc = bnx2x_init_hw_common(bp, load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004934 if (rc)
4935 goto init_hw_err;
4936 /* no break */
4937
4938 case FW_MSG_CODE_DRV_LOAD_PORT:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004939 rc = bnx2x_init_hw_port(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004940 if (rc)
4941 goto init_hw_err;
4942 /* no break */
4943
4944 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004945 rc = bnx2x_init_hw_func(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004946 if (rc)
4947 goto init_hw_err;
4948 break;
4949
4950 default:
4951 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4952 break;
4953 }
4954
4955 if (!BP_NOMCP(bp)) {
4956 int func = BP_FUNC(bp);
4957
4958 bp->fw_drv_pulse_wr_seq =
4959 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
4960 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00004961 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4962 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004963
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004964init_hw_err:
4965 bnx2x_gunzip_end(bp);
4966
4967 return rc;
4968}
4969
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004970void bnx2x_free_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004971{
4972
4973#define BNX2X_PCI_FREE(x, y, size) \
4974 do { \
4975 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004976 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004977 x = NULL; \
4978 y = 0; \
4979 } \
4980 } while (0)
4981
4982#define BNX2X_FREE(x) \
4983 do { \
4984 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004985 kfree((void *)x); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004986 x = NULL; \
4987 } \
4988 } while (0)
4989
4990 int i;
4991
4992 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004993 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004994 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004995 /* status blocks */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004996 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004997 bnx2x_fp(bp, i, status_blk_mapping),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004998 sizeof(struct host_hc_status_block_e1x));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004999 }
5000 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005001 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005002
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005003 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005004 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5005 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5006 bnx2x_fp(bp, i, rx_desc_mapping),
5007 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5008
5009 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5010 bnx2x_fp(bp, i, rx_comp_mapping),
5011 sizeof(struct eth_fast_path_rx_cqe) *
5012 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005013
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005014 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07005015 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005016 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5017 bnx2x_fp(bp, i, rx_sge_mapping),
5018 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5019 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005020 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005021 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005022
5023 /* fastpath tx rings: tx_buf tx_desc */
5024 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5025 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5026 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005027 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005028 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005029 /* end of fastpath */
5030
5031 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005032 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005033
5034 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005035 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005036
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005037 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5038 bp->context.size);
5039
5040 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5041
5042 BNX2X_FREE(bp->ilt->lines);
Michael Chan37b091b2009-10-10 13:46:55 +00005043#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005044
5045 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5046 sizeof(struct host_hc_status_block_e1x));
5047 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005048#endif
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005049 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005050
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005051 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5052 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5053
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005054#undef BNX2X_PCI_FREE
5055#undef BNX2X_KFREE
5056}
5057
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005058int bnx2x_alloc_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005059{
5060
5061#define BNX2X_PCI_ALLOC(x, y, size) \
5062 do { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00005063 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005064 if (x == NULL) \
5065 goto alloc_mem_err; \
5066 memset(x, 0, size); \
5067 } while (0)
5068
5069#define BNX2X_ALLOC(x, size) \
5070 do { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005071 x = kzalloc(size, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005072 if (x == NULL) \
5073 goto alloc_mem_err; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005074 } while (0)
5075
5076 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005077 void *p;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005078
5079 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005080 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005081 for_each_queue(bp, i) {
5082 bnx2x_fp(bp, i, bp) = bp;
5083
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005084 /* status blocks */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005085 BNX2X_PCI_ALLOC(p,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005086 &bnx2x_fp(bp, i, status_blk_mapping),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005087 sizeof(struct host_hc_status_block_e1x));
5088
5089 bnx2x_fp(bp, i, status_blk.e1x_sb) =
5090 (struct host_hc_status_block_e1x *)p;
5091
5092 bnx2x_fp(bp, i, sb_index_values) = (__le16 *)
5093 (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.index_values);
5094 bnx2x_fp(bp, i, sb_running_index) = (__le16 *)
5095 (bnx2x_fp(bp, i, status_blk.e1x_sb)->sb.running_index);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005096 }
5097 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005098 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005099
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005100 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005101 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5102 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5103 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5104 &bnx2x_fp(bp, i, rx_desc_mapping),
5105 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5106
5107 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5108 &bnx2x_fp(bp, i, rx_comp_mapping),
5109 sizeof(struct eth_fast_path_rx_cqe) *
5110 NUM_RCQ_BD);
5111
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005112 /* SGE ring */
5113 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5114 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5115 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5116 &bnx2x_fp(bp, i, rx_sge_mapping),
5117 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005118 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005119 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005120 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005121
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005122 /* fastpath tx rings: tx_buf tx_desc */
5123 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5124 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5125 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5126 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005127 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005128 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005129 /* end of fastpath */
5130
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005131#ifdef BCM_CNIC
5132 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5133 sizeof(struct host_hc_status_block_e1x));
5134
5135 /* allocate searcher T2 table */
5136 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5137#endif
5138
5139
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005140 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005141 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005142
5143 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5144 sizeof(struct bnx2x_slowpath));
5145
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005146 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
5147 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5148 bp->context.size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005149
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005150 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005151
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005152 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5153 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005154
5155 /* Slow path ring */
5156 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5157
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005158 /* EQ */
5159 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5160 BCM_PAGE_SIZE * NUM_EQ_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005161 return 0;
5162
5163alloc_mem_err:
5164 bnx2x_free_mem(bp);
5165 return -ENOMEM;
5166
5167#undef BNX2X_PCI_ALLOC
5168#undef BNX2X_ALLOC
5169}
5170
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005171/*
5172 * Init service functions
5173 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005174int bnx2x_func_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005175{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005176 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005177
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005178 /* Wait for completion */
5179 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5180 WAIT_RAMROD_COMMON);
5181}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005182
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005183int bnx2x_func_stop(struct bnx2x *bp)
5184{
5185 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005186
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005187 /* Wait for completion */
5188 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
5189 0, &(bp->state), WAIT_RAMROD_COMMON);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005190}
5191
Michael Chane665bfd2009-10-10 13:46:54 +00005192/**
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005193 * Sets a MAC in a CAM for a few L2 Clients for E1x chip
Michael Chane665bfd2009-10-10 13:46:54 +00005194 *
5195 * @param bp driver descriptor
5196 * @param set set or clear an entry (1 or 0)
5197 * @param mac pointer to a buffer containing a MAC
5198 * @param cl_bit_vec bit vector of clients to register a MAC for
5199 * @param cam_offset offset in a CAM to use
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005200 * @param is_bcast is the set MAC a broadcast address (for E1 only)
Michael Chane665bfd2009-10-10 13:46:54 +00005201 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005202static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
5203 u32 cl_bit_vec, u8 cam_offset,
5204 u8 is_bcast)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005205{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005206 struct mac_configuration_cmd *config =
5207 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
5208 int ramrod_flags = WAIT_RAMROD_COMMON;
5209
5210 bp->set_mac_pending = 1;
5211 smp_wmb();
5212
5213 config->hdr.length = 1 + (is_bcast ? 1 : 0);
5214 config->hdr.offset = cam_offset;
5215 config->hdr.client_id = 0xff;
5216 config->hdr.reserved1 = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005217
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005218 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00005219 config->hdr.offset = cam_offset;
5220 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005221 config->hdr.reserved1 = 0;
5222
5223 /* primary MAC */
5224 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00005225 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005226 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00005227 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005228 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00005229 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07005230 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00005231 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005232 config->config_table[0].vlan_id = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005233 config->config_table[0].pf_id = BP_FUNC(bp);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07005234 if (set)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005235 SET_FLAG(config->config_table[0].flags,
5236 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5237 T_ETH_MAC_COMMAND_SET);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07005238 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005239 SET_FLAG(config->config_table[0].flags,
5240 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5241 T_ETH_MAC_COMMAND_INVALIDATE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005242
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005243 if (is_bcast)
5244 SET_FLAG(config->config_table[0].flags,
5245 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
5246
5247 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07005248 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005249 config->config_table[0].msb_mac_addr,
5250 config->config_table[0].middle_mac_addr,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005251 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005252
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005253 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005254 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005255 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
5256
5257 /* Wait for a completion */
5258 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005259}
5260
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005261
5262int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
5263 int *state_p, int flags)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005264{
5265 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00005266 int cnt = 5000;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005267 u8 poll = flags & WAIT_RAMROD_POLL;
5268 u8 common = flags & WAIT_RAMROD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005269
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005270 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
5271 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005272
5273 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005274 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005275 if (poll) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005276 if (common)
5277 bnx2x_eq_int(bp);
5278 else {
5279 bnx2x_rx_int(bp->fp, 10);
5280 /* if index is different from 0
5281 * the reply for some commands will
5282 * be on the non default queue
5283 */
5284 if (idx)
5285 bnx2x_rx_int(&bp->fp[idx], 10);
5286 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005287 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005288
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07005289 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00005290 if (*state_p == state) {
5291#ifdef BNX2X_STOP_ON_ERROR
5292 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
5293#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005294 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00005295 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005296
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005297 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00005298
5299 if (bp->panic)
5300 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005301 }
5302
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005303 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08005304 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
5305 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005306#ifdef BNX2X_STOP_ON_ERROR
5307 bnx2x_panic();
5308#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005309
Eliezer Tamir49d66772008-02-28 11:53:13 -08005310 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005311}
5312
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005313u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
Michael Chane665bfd2009-10-10 13:46:54 +00005314{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005315 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
Michael Chane665bfd2009-10-10 13:46:54 +00005316}
5317
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005318void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
Michael Chane665bfd2009-10-10 13:46:54 +00005319{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005320 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
5321 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
5322
5323 /* networking MAC */
5324 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
5325 (1 << bp->fp->cl_id), cam_offset , 0);
5326
5327 if (CHIP_IS_E1(bp)) {
5328 /* broadcast MAC */
5329 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
5330 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
5331 }
5332}
5333static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
5334{
5335 int i = 0, old;
5336 struct net_device *dev = bp->dev;
5337 struct netdev_hw_addr *ha;
5338 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
5339 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
5340
5341 netdev_for_each_mc_addr(ha, dev) {
5342 /* copy mac */
5343 config_cmd->config_table[i].msb_mac_addr =
5344 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
5345 config_cmd->config_table[i].middle_mac_addr =
5346 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
5347 config_cmd->config_table[i].lsb_mac_addr =
5348 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
5349
5350 config_cmd->config_table[i].vlan_id = 0;
5351 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
5352 config_cmd->config_table[i].clients_bit_vector =
5353 cpu_to_le32(1 << BP_L_ID(bp));
5354
5355 SET_FLAG(config_cmd->config_table[i].flags,
5356 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5357 T_ETH_MAC_COMMAND_SET);
5358
5359 DP(NETIF_MSG_IFUP,
5360 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
5361 config_cmd->config_table[i].msb_mac_addr,
5362 config_cmd->config_table[i].middle_mac_addr,
5363 config_cmd->config_table[i].lsb_mac_addr);
5364 i++;
5365 }
5366 old = config_cmd->hdr.length;
5367 if (old > i) {
5368 for (; i < old; i++) {
5369 if (CAM_IS_INVALID(config_cmd->
5370 config_table[i])) {
5371 /* already invalidated */
5372 break;
5373 }
5374 /* invalidate */
5375 SET_FLAG(config_cmd->config_table[i].flags,
5376 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5377 T_ETH_MAC_COMMAND_INVALIDATE);
5378 }
5379 }
5380
5381 config_cmd->hdr.length = i;
5382 config_cmd->hdr.offset = offset;
5383 config_cmd->hdr.client_id = 0xff;
5384 config_cmd->hdr.reserved1 = 0;
5385
5386 bp->set_mac_pending = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00005387 smp_wmb();
5388
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005389 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
5390 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
5391}
5392static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
5393{
5394 int i;
5395 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
5396 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
5397 int ramrod_flags = WAIT_RAMROD_COMMON;
5398
5399 bp->set_mac_pending = 1;
5400 smp_wmb();
5401
5402 for (i = 0; i < config_cmd->hdr.length; i++)
5403 SET_FLAG(config_cmd->config_table[i].flags,
5404 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
5405 T_ETH_MAC_COMMAND_INVALIDATE);
5406
5407 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
5408 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
Michael Chane665bfd2009-10-10 13:46:54 +00005409
5410 /* Wait for a completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005411 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
5412 ramrod_flags);
5413
Michael Chane665bfd2009-10-10 13:46:54 +00005414}
5415
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005416
Michael Chan993ac7b2009-10-10 13:46:56 +00005417#ifdef BCM_CNIC
5418/**
5419 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
5420 * MAC(s). This function will wait until the ramdord completion
5421 * returns.
5422 *
5423 * @param bp driver handle
5424 * @param set set or clear the CAM entry
5425 *
5426 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
5427 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005428int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
Michael Chan993ac7b2009-10-10 13:46:56 +00005429{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005430 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
5431 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
5432 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
5433 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
Michael Chan993ac7b2009-10-10 13:46:56 +00005434
5435 /* Send a SET_MAC ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005436 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
5437 cam_offset, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00005438 return 0;
5439}
5440#endif
5441
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005442static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
5443 struct bnx2x_client_init_params *params,
5444 u8 activate,
5445 struct client_init_ramrod_data *data)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005446{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005447 /* Clear the buffer */
5448 memset(data, 0, sizeof(*data));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005449
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005450 /* general */
5451 data->general.client_id = params->rxq_params.cl_id;
5452 data->general.statistics_counter_id = params->rxq_params.stat_id;
5453 data->general.statistics_en_flg =
5454 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
5455 data->general.activate_flg = activate;
5456 data->general.sp_client_id = params->rxq_params.spcl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005457
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005458 /* Rx data */
5459 data->rx.tpa_en_flg =
5460 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
5461 data->rx.vmqueue_mode_en_flg = 0;
5462 data->rx.cache_line_alignment_log_size =
5463 params->rxq_params.cache_line_log;
5464 data->rx.enable_dynamic_hc =
5465 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
5466 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
5467 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
5468 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
5469
5470 /* We don't set drop flags */
5471 data->rx.drop_ip_cs_err_flg = 0;
5472 data->rx.drop_tcp_cs_err_flg = 0;
5473 data->rx.drop_ttl0_flg = 0;
5474 data->rx.drop_udp_cs_err_flg = 0;
5475
5476 data->rx.inner_vlan_removal_enable_flg =
5477 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
5478 data->rx.outer_vlan_removal_enable_flg =
5479 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
5480 data->rx.status_block_id = params->rxq_params.fw_sb_id;
5481 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
5482 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
5483 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
5484 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
5485 data->rx.bd_page_base.lo =
5486 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
5487 data->rx.bd_page_base.hi =
5488 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
5489 data->rx.sge_page_base.lo =
5490 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
5491 data->rx.sge_page_base.hi =
5492 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
5493 data->rx.cqe_page_base.lo =
5494 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
5495 data->rx.cqe_page_base.hi =
5496 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
5497 data->rx.is_leading_rss =
5498 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
5499 data->rx.is_approx_mcast = data->rx.is_leading_rss;
5500
5501 /* Tx data */
5502 data->tx.enforce_security_flg = 0; /* VF specific */
5503 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
5504 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
5505 data->tx.mtu = 0; /* VF specific */
5506 data->tx.tx_bd_page_base.lo =
5507 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
5508 data->tx.tx_bd_page_base.hi =
5509 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
5510
5511 /* flow control data */
5512 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
5513 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
5514 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
5515 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
5516 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
5517 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
5518 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
5519
5520 data->fc.safc_group_num = params->txq_params.cos;
5521 data->fc.safc_group_en_flg =
5522 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
5523 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
5524}
5525
5526static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
5527{
5528 /* ustorm cxt validation */
5529 cxt->ustorm_ag_context.cdu_usage =
5530 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
5531 ETH_CONNECTION_TYPE);
5532 /* xcontext validation */
5533 cxt->xstorm_ag_context.cdu_reserved =
5534 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
5535 ETH_CONNECTION_TYPE);
5536}
5537
5538int bnx2x_setup_fw_client(struct bnx2x *bp,
5539 struct bnx2x_client_init_params *params,
5540 u8 activate,
5541 struct client_init_ramrod_data *data,
5542 dma_addr_t data_mapping)
5543{
5544 u16 hc_usec;
5545 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
5546 int ramrod_flags = 0, rc;
5547
5548 /* HC and context validation values */
5549 hc_usec = params->txq_params.hc_rate ?
5550 1000000 / params->txq_params.hc_rate : 0;
5551 bnx2x_update_coalesce_sb_index(bp,
5552 params->txq_params.fw_sb_id,
5553 params->txq_params.sb_cq_index,
5554 !(params->txq_params.flags & QUEUE_FLG_HC),
5555 hc_usec);
5556
5557 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
5558
5559 hc_usec = params->rxq_params.hc_rate ?
5560 1000000 / params->rxq_params.hc_rate : 0;
5561 bnx2x_update_coalesce_sb_index(bp,
5562 params->rxq_params.fw_sb_id,
5563 params->rxq_params.sb_cq_index,
5564 !(params->rxq_params.flags & QUEUE_FLG_HC),
5565 hc_usec);
5566
5567 bnx2x_set_ctx_validation(params->rxq_params.cxt,
5568 params->rxq_params.cid);
5569
5570 /* zero stats */
5571 if (params->txq_params.flags & QUEUE_FLG_STATS)
5572 storm_memset_xstats_zero(bp, BP_PORT(bp),
5573 params->txq_params.stat_id);
5574
5575 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
5576 storm_memset_ustats_zero(bp, BP_PORT(bp),
5577 params->rxq_params.stat_id);
5578 storm_memset_tstats_zero(bp, BP_PORT(bp),
5579 params->rxq_params.stat_id);
5580 }
5581
5582 /* Fill the ramrod data */
5583 bnx2x_fill_cl_init_data(bp, params, activate, data);
5584
5585 /* SETUP ramrod.
5586 *
5587 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
5588 * barrier except from mmiowb() is needed to impose a
5589 * proper ordering of memory operations.
5590 */
5591 mmiowb();
5592
5593
5594 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
5595 U64_HI(data_mapping), U64_LO(data_mapping), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005596
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005597 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005598 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
5599 params->ramrod_params.index,
5600 params->ramrod_params.pstate,
5601 ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005602 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005603}
5604
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005605void bnx2x_set_num_queues_msix(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005606{
Eilon Greensteinca003922009-08-12 22:53:28 -07005607
5608 switch (bp->multi_mode) {
5609 case ETH_RSS_MODE_DISABLED:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005610 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07005611 break;
5612
5613 case ETH_RSS_MODE_REGULAR:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005614 if (num_queues)
5615 bp->num_queues = min_t(u32, num_queues,
5616 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07005617 else
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005618 bp->num_queues = min_t(u32, num_online_cpus(),
5619 BNX2X_MAX_QUEUES(bp));
Eilon Greensteinca003922009-08-12 22:53:28 -07005620 break;
5621
5622
5623 default:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005624 bp->num_queues = 1;
Eilon Greensteinca003922009-08-12 22:53:28 -07005625 break;
5626 }
Eilon Greensteinca003922009-08-12 22:53:28 -07005627}
5628
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005629void bnx2x_ilt_set_info(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005630{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005631 struct ilt_client_info *ilt_client;
5632 struct bnx2x_ilt *ilt = BP_ILT(bp);
5633 u16 line = 0;
5634
5635 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
5636 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
5637
5638 /* CDU */
5639 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
5640 ilt_client->client_num = ILT_CLIENT_CDU;
5641 ilt_client->page_size = CDU_ILT_PAGE_SZ;
5642 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
5643 ilt_client->start = line;
5644 line += L2_ILT_LINES(bp);
5645#ifdef BCM_CNIC
5646 line += CNIC_ILT_LINES;
5647#endif
5648 ilt_client->end = line - 1;
5649
5650 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
5651 "flags 0x%x, hw psz %d\n",
5652 ilt_client->start,
5653 ilt_client->end,
5654 ilt_client->page_size,
5655 ilt_client->flags,
5656 ilog2(ilt_client->page_size >> 12));
5657
5658 /* QM */
5659 if (QM_INIT(bp->qm_cid_count)) {
5660 ilt_client = &ilt->clients[ILT_CLIENT_QM];
5661 ilt_client->client_num = ILT_CLIENT_QM;
5662 ilt_client->page_size = QM_ILT_PAGE_SZ;
5663 ilt_client->flags = 0;
5664 ilt_client->start = line;
5665
5666 /* 4 bytes for each cid */
5667 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
5668 QM_ILT_PAGE_SZ);
5669
5670 ilt_client->end = line - 1;
5671
5672 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
5673 "flags 0x%x, hw psz %d\n",
5674 ilt_client->start,
5675 ilt_client->end,
5676 ilt_client->page_size,
5677 ilt_client->flags,
5678 ilog2(ilt_client->page_size >> 12));
5679
5680 }
5681 /* SRC */
5682 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
5683#ifdef BCM_CNIC
5684 ilt_client->client_num = ILT_CLIENT_SRC;
5685 ilt_client->page_size = SRC_ILT_PAGE_SZ;
5686 ilt_client->flags = 0;
5687 ilt_client->start = line;
5688 line += SRC_ILT_LINES;
5689 ilt_client->end = line - 1;
5690
5691 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
5692 "flags 0x%x, hw psz %d\n",
5693 ilt_client->start,
5694 ilt_client->end,
5695 ilt_client->page_size,
5696 ilt_client->flags,
5697 ilog2(ilt_client->page_size >> 12));
5698
5699#else
5700 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
5701#endif
5702
5703 /* TM */
5704 ilt_client = &ilt->clients[ILT_CLIENT_TM];
5705#ifdef BCM_CNIC
5706 ilt_client->client_num = ILT_CLIENT_TM;
5707 ilt_client->page_size = TM_ILT_PAGE_SZ;
5708 ilt_client->flags = 0;
5709 ilt_client->start = line;
5710 line += TM_ILT_LINES;
5711 ilt_client->end = line - 1;
5712
5713 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
5714 "flags 0x%x, hw psz %d\n",
5715 ilt_client->start,
5716 ilt_client->end,
5717 ilt_client->page_size,
5718 ilt_client->flags,
5719 ilog2(ilt_client->page_size >> 12));
5720
5721#else
5722 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
5723#endif
5724}
5725int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
5726 int is_leading)
5727{
5728 struct bnx2x_client_init_params params = { {0} };
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005729 int rc;
5730
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005731 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
5732 IGU_INT_ENABLE, 0);
5733
5734 params.ramrod_params.pstate = &fp->state;
5735 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
5736 params.ramrod_params.index = fp->index;
5737 params.ramrod_params.cid = fp->cid;
5738
5739 if (is_leading)
5740 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
5741
5742 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
5743
5744 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
5745
5746 rc = bnx2x_setup_fw_client(bp, &params, 1,
5747 bnx2x_sp(bp, client_init_data),
5748 bnx2x_sp_mapping(bp, client_init_data));
5749 return rc;
5750}
5751
5752int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
5753{
5754 int rc;
5755
5756 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
5757
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005758 /* halt the connection */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005759 *p->pstate = BNX2X_FP_STATE_HALTING;
5760 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
5761 p->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005762
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005763 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005764 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
5765 p->pstate, poll_flag);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005766 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005767 return rc;
5768
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005769 *p->pstate = BNX2X_FP_STATE_TERMINATING;
5770 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
5771 p->cl_id, 0);
5772 /* Wait for completion */
5773 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
5774 p->pstate, poll_flag);
5775 if (rc) /* timeout */
5776 return rc;
5777
5778
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005779 /* delete cfc entry */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005780 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005781
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005782 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005783 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
5784 p->pstate, WAIT_RAMROD_COMMON);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005785 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005786}
5787
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005788static int bnx2x_stop_client(struct bnx2x *bp, int index)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005789{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005790 struct bnx2x_client_ramrod_params client_stop = {0};
5791 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005792
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005793 client_stop.index = index;
5794 client_stop.cid = fp->cid;
5795 client_stop.cl_id = fp->cl_id;
5796 client_stop.pstate = &(fp->state);
5797 client_stop.poll = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005798
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005799 return bnx2x_stop_fw_client(bp, &client_stop);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005800}
5801
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005802
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005803static void bnx2x_reset_func(struct bnx2x *bp)
5804{
5805 int port = BP_PORT(bp);
5806 int func = BP_FUNC(bp);
5807 int base, i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005808 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
5809 offsetof(struct hc_status_block_data_e1x, common);
5810 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
5811 int pfid_offset = offsetof(struct pci_entity, pf_id);
5812
5813 /* Disable the function in the FW */
5814 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
5815 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
5816 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
5817 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
5818
5819 /* FP SBs */
5820 for_each_queue(bp, i) {
5821 struct bnx2x_fastpath *fp = &bp->fp[i];
5822 REG_WR8(bp,
5823 BAR_CSTRORM_INTMEM +
5824 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
5825 + pfunc_offset_fp + pfid_offset,
5826 HC_FUNCTION_DISABLED);
5827 }
5828
5829 /* SP SB */
5830 REG_WR8(bp,
5831 BAR_CSTRORM_INTMEM +
5832 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
5833 pfunc_offset_sp + pfid_offset,
5834 HC_FUNCTION_DISABLED);
5835
5836
5837 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
5838 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
5839 0);
Eliezer Tamir49d66772008-02-28 11:53:13 -08005840
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005841 /* Configure IGU */
5842 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5843 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5844
Michael Chan37b091b2009-10-10 13:46:55 +00005845#ifdef BCM_CNIC
5846 /* Disable Timer scan */
5847 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5848 /*
5849 * Wait for at least 10ms and up to 2 second for the timers scan to
5850 * complete
5851 */
5852 for (i = 0; i < 200; i++) {
5853 msleep(10);
5854 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5855 break;
5856 }
5857#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005858 /* Clear ILT */
5859 base = FUNC_ILT_BASE(func);
5860 for (i = base; i < base + ILT_PER_FUNC; i++)
5861 bnx2x_ilt_wr(bp, i, 0);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005862
5863 bp->dmae_ready = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005864}
5865
5866static void bnx2x_reset_port(struct bnx2x *bp)
5867{
5868 int port = BP_PORT(bp);
5869 u32 val;
5870
5871 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5872
5873 /* Do not rcv packets to BRB */
5874 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5875 /* Do not direct rcv packets that are not for MCP to the BRB */
5876 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5877 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5878
5879 /* Configure AEU */
5880 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5881
5882 msleep(100);
5883 /* Check for BRB port occupancy */
5884 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5885 if (val)
5886 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07005887 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005888
5889 /* TODO: Close Doorbell port? */
5890}
5891
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005892static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5893{
5894 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
5895 BP_FUNC(bp), reset_code);
5896
5897 switch (reset_code) {
5898 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5899 bnx2x_reset_port(bp);
5900 bnx2x_reset_func(bp);
5901 bnx2x_reset_common(bp);
5902 break;
5903
5904 case FW_MSG_CODE_DRV_UNLOAD_PORT:
5905 bnx2x_reset_port(bp);
5906 bnx2x_reset_func(bp);
5907 break;
5908
5909 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5910 bnx2x_reset_func(bp);
5911 break;
5912
5913 default:
5914 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5915 break;
5916 }
5917}
5918
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005919void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005920{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005921 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005922 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005923 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005924
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005925 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005926 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08005927 struct bnx2x_fastpath *fp = &bp->fp[i];
5928
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005929 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08005930 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005931
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005932 if (!cnt) {
5933 BNX2X_ERR("timeout waiting for queue[%d]\n",
5934 i);
5935#ifdef BNX2X_STOP_ON_ERROR
5936 bnx2x_panic();
5937 return -EBUSY;
5938#else
5939 break;
5940#endif
5941 }
5942 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005943 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005944 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08005945 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005946 /* Give HW time to discard old tx messages */
5947 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005948
Yitchak Gertner65abd742008-08-25 15:26:24 -07005949 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005950 /* invalidate mc list,
5951 * wait and poll (interrupts are off)
5952 */
5953 bnx2x_invlidate_e1_mc_list(bp);
5954 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07005955
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005956 } else {
Yitchak Gertner65abd742008-08-25 15:26:24 -07005957 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5958
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005959 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07005960
5961 for (i = 0; i < MC_HASH_SIZE; i++)
5962 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5963 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005964
Michael Chan993ac7b2009-10-10 13:46:56 +00005965#ifdef BCM_CNIC
5966 /* Clear iSCSI L2 MAC */
5967 mutex_lock(&bp->cnic_mutex);
5968 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5969 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5970 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5971 }
5972 mutex_unlock(&bp->cnic_mutex);
5973#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07005974
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005975 if (unload_mode == UNLOAD_NORMAL)
5976 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08005977
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00005978 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005979 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005980
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00005981 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005982 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005983 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005984 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005985 /* The mac address is written to entries 1-4 to
5986 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07005987 u8 entry = (BP_E1HVN(bp) + 1)*8;
5988
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005989 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07005990 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005991
5992 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5993 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07005994 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005995
5996 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08005997
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005998 } else
5999 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6000
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006001 /* Close multi and leading connections
6002 Completions for ramrods are collected in a synchronous way */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006003 for_each_queue(bp, i)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006004
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006005 if (bnx2x_stop_client(bp, i))
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006006#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006007 return;
6008#else
6009 goto unload_error;
6010#endif
6011
6012 rc = bnx2x_func_stop(bp);
6013 if (rc) {
6014 BNX2X_ERR("Function stop failed!\n");
6015#ifdef BNX2X_STOP_ON_ERROR
6016 return;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006017#else
6018 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006019#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08006020 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006021#ifndef BNX2X_STOP_ON_ERROR
Eliezer Tamir228241e2008-02-28 11:56:57 -08006022unload_error:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006023#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006024 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006025 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006026 else {
Eilon Greensteinf5372252009-02-12 08:38:30 +00006027 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006028 load_count[0], load_count[1], load_count[2]);
6029 load_count[0]--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006030 load_count[1 + port]--;
Eilon Greensteinf5372252009-02-12 08:38:30 +00006031 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006032 load_count[0], load_count[1], load_count[2]);
6033 if (load_count[0] == 0)
6034 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006035 else if (load_count[1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006036 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6037 else
6038 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6039 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006040
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006041 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6042 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6043 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006044
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006045 /* Disable HW interrupts, NAPI */
6046 bnx2x_netif_stop(bp, 1);
6047
6048 /* Release IRQs */
6049 bnx2x_free_irq(bp, false);
6050
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006051 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08006052 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006053
6054 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006055 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006056 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006057
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006058}
6059
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006060void bnx2x_disable_close_the_gate(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006061{
6062 u32 val;
6063
6064 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6065
6066 if (CHIP_IS_E1(bp)) {
6067 int port = BP_PORT(bp);
6068 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6069 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6070
6071 val = REG_RD(bp, addr);
6072 val &= ~(0x300);
6073 REG_WR(bp, addr, val);
6074 } else if (CHIP_IS_E1H(bp)) {
6075 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6076 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6077 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6078 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6079 }
6080}
6081
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006082
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006083/* Close gates #2, #3 and #4: */
6084static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6085{
6086 u32 val, addr;
6087
6088 /* Gates #2 and #4a are closed/opened for "not E1" only */
6089 if (!CHIP_IS_E1(bp)) {
6090 /* #4 */
6091 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6092 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6093 close ? (val | 0x1) : (val & (~(u32)1)));
6094 /* #2 */
6095 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6096 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6097 close ? (val | 0x1) : (val & (~(u32)1)));
6098 }
6099
6100 /* #3 */
6101 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6102 val = REG_RD(bp, addr);
6103 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6104
6105 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6106 close ? "closing" : "opening");
6107 mmiowb();
6108}
6109
6110#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
6111
6112static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6113{
6114 /* Do some magic... */
6115 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6116 *magic_val = val & SHARED_MF_CLP_MAGIC;
6117 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6118}
6119
6120/* Restore the value of the `magic' bit.
6121 *
6122 * @param pdev Device handle.
6123 * @param magic_val Old value of the `magic' bit.
6124 */
6125static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
6126{
6127 /* Restore the `magic' bit value... */
6128 /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
6129 SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
6130 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
6131 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6132 MF_CFG_WR(bp, shared_mf_config.clp_mb,
6133 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
6134}
6135
6136/* Prepares for MCP reset: takes care of CLP configurations.
6137 *
6138 * @param bp
6139 * @param magic_val Old value of 'magic' bit.
6140 */
6141static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
6142{
6143 u32 shmem;
6144 u32 validity_offset;
6145
6146 DP(NETIF_MSG_HW, "Starting\n");
6147
6148 /* Set `magic' bit in order to save MF config */
6149 if (!CHIP_IS_E1(bp))
6150 bnx2x_clp_reset_prep(bp, magic_val);
6151
6152 /* Get shmem offset */
6153 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6154 validity_offset = offsetof(struct shmem_region, validity_map[0]);
6155
6156 /* Clear validity map flags */
6157 if (shmem > 0)
6158 REG_WR(bp, shmem + validity_offset, 0);
6159}
6160
6161#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
6162#define MCP_ONE_TIMEOUT 100 /* 100 ms */
6163
6164/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
6165 * depending on the HW type.
6166 *
6167 * @param bp
6168 */
6169static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
6170{
6171 /* special handling for emulation and FPGA,
6172 wait 10 times longer */
6173 if (CHIP_REV_IS_SLOW(bp))
6174 msleep(MCP_ONE_TIMEOUT*10);
6175 else
6176 msleep(MCP_ONE_TIMEOUT);
6177}
6178
6179static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
6180{
6181 u32 shmem, cnt, validity_offset, val;
6182 int rc = 0;
6183
6184 msleep(100);
6185
6186 /* Get shmem offset */
6187 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
6188 if (shmem == 0) {
6189 BNX2X_ERR("Shmem 0 return failure\n");
6190 rc = -ENOTTY;
6191 goto exit_lbl;
6192 }
6193
6194 validity_offset = offsetof(struct shmem_region, validity_map[0]);
6195
6196 /* Wait for MCP to come up */
6197 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
6198 /* TBD: its best to check validity map of last port.
6199 * currently checks on port 0.
6200 */
6201 val = REG_RD(bp, shmem + validity_offset);
6202 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
6203 shmem + validity_offset, val);
6204
6205 /* check that shared memory is valid. */
6206 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6207 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6208 break;
6209
6210 bnx2x_mcp_wait_one(bp);
6211 }
6212
6213 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
6214
6215 /* Check that shared memory is valid. This indicates that MCP is up. */
6216 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
6217 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
6218 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
6219 rc = -ENOTTY;
6220 goto exit_lbl;
6221 }
6222
6223exit_lbl:
6224 /* Restore the `magic' bit value */
6225 if (!CHIP_IS_E1(bp))
6226 bnx2x_clp_reset_done(bp, magic_val);
6227
6228 return rc;
6229}
6230
6231static void bnx2x_pxp_prep(struct bnx2x *bp)
6232{
6233 if (!CHIP_IS_E1(bp)) {
6234 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
6235 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
6236 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
6237 mmiowb();
6238 }
6239}
6240
6241/*
6242 * Reset the whole chip except for:
6243 * - PCIE core
6244 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
6245 * one reset bit)
6246 * - IGU
6247 * - MISC (including AEU)
6248 * - GRC
6249 * - RBCN, RBCP
6250 */
6251static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
6252{
6253 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
6254
6255 not_reset_mask1 =
6256 MISC_REGISTERS_RESET_REG_1_RST_HC |
6257 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
6258 MISC_REGISTERS_RESET_REG_1_RST_PXP;
6259
6260 not_reset_mask2 =
6261 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
6262 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
6263 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
6264 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
6265 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
6266 MISC_REGISTERS_RESET_REG_2_RST_GRC |
6267 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
6268 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
6269
6270 reset_mask1 = 0xffffffff;
6271
6272 if (CHIP_IS_E1(bp))
6273 reset_mask2 = 0xffff;
6274 else
6275 reset_mask2 = 0x1ffff;
6276
6277 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
6278 reset_mask1 & (~not_reset_mask1));
6279 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6280 reset_mask2 & (~not_reset_mask2));
6281
6282 barrier();
6283 mmiowb();
6284
6285 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
6286 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
6287 mmiowb();
6288}
6289
6290static int bnx2x_process_kill(struct bnx2x *bp)
6291{
6292 int cnt = 1000;
6293 u32 val = 0;
6294 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
6295
6296
6297 /* Empty the Tetris buffer, wait for 1s */
6298 do {
6299 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
6300 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
6301 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
6302 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
6303 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
6304 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
6305 ((port_is_idle_0 & 0x1) == 0x1) &&
6306 ((port_is_idle_1 & 0x1) == 0x1) &&
6307 (pgl_exp_rom2 == 0xffffffff))
6308 break;
6309 msleep(1);
6310 } while (cnt-- > 0);
6311
6312 if (cnt <= 0) {
6313 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
6314 " are still"
6315 " outstanding read requests after 1s!\n");
6316 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
6317 " port_is_idle_0=0x%08x,"
6318 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
6319 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
6320 pgl_exp_rom2);
6321 return -EAGAIN;
6322 }
6323
6324 barrier();
6325
6326 /* Close gates #2, #3 and #4 */
6327 bnx2x_set_234_gates(bp, true);
6328
6329 /* TBD: Indicate that "process kill" is in progress to MCP */
6330
6331 /* Clear "unprepared" bit */
6332 REG_WR(bp, MISC_REG_UNPREPARED, 0);
6333 barrier();
6334
6335 /* Make sure all is written to the chip before the reset */
6336 mmiowb();
6337
6338 /* Wait for 1ms to empty GLUE and PCI-E core queues,
6339 * PSWHST, GRC and PSWRD Tetris buffer.
6340 */
6341 msleep(1);
6342
6343 /* Prepare to chip reset: */
6344 /* MCP */
6345 bnx2x_reset_mcp_prep(bp, &val);
6346
6347 /* PXP */
6348 bnx2x_pxp_prep(bp);
6349 barrier();
6350
6351 /* reset the chip */
6352 bnx2x_process_kill_chip_reset(bp);
6353 barrier();
6354
6355 /* Recover after reset: */
6356 /* MCP */
6357 if (bnx2x_reset_mcp_comp(bp, val))
6358 return -EAGAIN;
6359
6360 /* PXP */
6361 bnx2x_pxp_prep(bp);
6362
6363 /* Open the gates #2, #3 and #4 */
6364 bnx2x_set_234_gates(bp, false);
6365
6366 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
6367 * reset state, re-enable attentions. */
6368
6369 return 0;
6370}
6371
6372static int bnx2x_leader_reset(struct bnx2x *bp)
6373{
6374 int rc = 0;
6375 /* Try to recover after the failure */
6376 if (bnx2x_process_kill(bp)) {
6377 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
6378 bp->dev->name);
6379 rc = -EAGAIN;
6380 goto exit_leader_reset;
6381 }
6382
6383 /* Clear "reset is in progress" bit and update the driver state */
6384 bnx2x_set_reset_done(bp);
6385 bp->recovery_state = BNX2X_RECOVERY_DONE;
6386
6387exit_leader_reset:
6388 bp->is_leader = 0;
6389 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
6390 smp_wmb();
6391 return rc;
6392}
6393
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006394/* Assumption: runs under rtnl lock. This together with the fact
6395 * that it's called only from bnx2x_reset_task() ensure that it
6396 * will never be called when netif_running(bp->dev) is false.
6397 */
6398static void bnx2x_parity_recover(struct bnx2x *bp)
6399{
6400 DP(NETIF_MSG_HW, "Handling parity\n");
6401 while (1) {
6402 switch (bp->recovery_state) {
6403 case BNX2X_RECOVERY_INIT:
6404 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
6405 /* Try to get a LEADER_LOCK HW lock */
6406 if (bnx2x_trylock_hw_lock(bp,
6407 HW_LOCK_RESOURCE_RESERVED_08))
6408 bp->is_leader = 1;
6409
6410 /* Stop the driver */
6411 /* If interface has been removed - break */
6412 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
6413 return;
6414
6415 bp->recovery_state = BNX2X_RECOVERY_WAIT;
6416 /* Ensure "is_leader" and "recovery_state"
6417 * update values are seen on other CPUs
6418 */
6419 smp_wmb();
6420 break;
6421
6422 case BNX2X_RECOVERY_WAIT:
6423 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
6424 if (bp->is_leader) {
6425 u32 load_counter = bnx2x_get_load_cnt(bp);
6426 if (load_counter) {
6427 /* Wait until all other functions get
6428 * down.
6429 */
6430 schedule_delayed_work(&bp->reset_task,
6431 HZ/10);
6432 return;
6433 } else {
6434 /* If all other functions got down -
6435 * try to bring the chip back to
6436 * normal. In any case it's an exit
6437 * point for a leader.
6438 */
6439 if (bnx2x_leader_reset(bp) ||
6440 bnx2x_nic_load(bp, LOAD_NORMAL)) {
6441 printk(KERN_ERR"%s: Recovery "
6442 "has failed. Power cycle is "
6443 "needed.\n", bp->dev->name);
6444 /* Disconnect this device */
6445 netif_device_detach(bp->dev);
6446 /* Block ifup for all function
6447 * of this ASIC until
6448 * "process kill" or power
6449 * cycle.
6450 */
6451 bnx2x_set_reset_in_progress(bp);
6452 /* Shut down the power */
6453 bnx2x_set_power_state(bp,
6454 PCI_D3hot);
6455 return;
6456 }
6457
6458 return;
6459 }
6460 } else { /* non-leader */
6461 if (!bnx2x_reset_is_done(bp)) {
6462 /* Try to get a LEADER_LOCK HW lock as
6463 * long as a former leader may have
6464 * been unloaded by the user or
6465 * released a leadership by another
6466 * reason.
6467 */
6468 if (bnx2x_trylock_hw_lock(bp,
6469 HW_LOCK_RESOURCE_RESERVED_08)) {
6470 /* I'm a leader now! Restart a
6471 * switch case.
6472 */
6473 bp->is_leader = 1;
6474 break;
6475 }
6476
6477 schedule_delayed_work(&bp->reset_task,
6478 HZ/10);
6479 return;
6480
6481 } else { /* A leader has completed
6482 * the "process kill". It's an exit
6483 * point for a non-leader.
6484 */
6485 bnx2x_nic_load(bp, LOAD_NORMAL);
6486 bp->recovery_state =
6487 BNX2X_RECOVERY_DONE;
6488 smp_wmb();
6489 return;
6490 }
6491 }
6492 default:
6493 return;
6494 }
6495 }
6496}
6497
6498/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
6499 * scheduled on a general queue in order to prevent a dead lock.
6500 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006501static void bnx2x_reset_task(struct work_struct *work)
6502{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006503 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006504
6505#ifdef BNX2X_STOP_ON_ERROR
6506 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
6507 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006508 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006509 return;
6510#endif
6511
6512 rtnl_lock();
6513
6514 if (!netif_running(bp->dev))
6515 goto reset_task_exit;
6516
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006517 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
6518 bnx2x_parity_recover(bp);
6519 else {
6520 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
6521 bnx2x_nic_load(bp, LOAD_NORMAL);
6522 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006523
6524reset_task_exit:
6525 rtnl_unlock();
6526}
6527
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006528/* end of nic load/unload */
6529
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006530/*
6531 * Init service functions
6532 */
6533
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00006534static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
6535{
6536 switch (func) {
6537 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
6538 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
6539 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
6540 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
6541 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
6542 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
6543 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
6544 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
6545 default:
6546 BNX2X_ERR("Unsupported function index: %d\n", func);
6547 return (u32)(-1);
6548 }
6549}
6550
6551static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
6552{
6553 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
6554
6555 /* Flush all outstanding writes */
6556 mmiowb();
6557
6558 /* Pretend to be function 0 */
6559 REG_WR(bp, reg, 0);
6560 /* Flush the GRC transaction (in the chip) */
6561 new_val = REG_RD(bp, reg);
6562 if (new_val != 0) {
6563 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
6564 new_val);
6565 BUG();
6566 }
6567
6568 /* From now we are in the "like-E1" mode */
6569 bnx2x_int_disable(bp);
6570
6571 /* Flush all outstanding writes */
6572 mmiowb();
6573
6574 /* Restore the original funtion settings */
6575 REG_WR(bp, reg, orig_func);
6576 new_val = REG_RD(bp, reg);
6577 if (new_val != orig_func) {
6578 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
6579 orig_func, new_val);
6580 BUG();
6581 }
6582}
6583
6584static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
6585{
6586 if (CHIP_IS_E1H(bp))
6587 bnx2x_undi_int_disable_e1h(bp, func);
6588 else
6589 bnx2x_int_disable(bp);
6590}
6591
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006592static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006593{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006594 u32 val;
6595
6596 /* Check if there is any driver already loaded */
6597 val = REG_RD(bp, MISC_REG_UNPREPARED);
6598 if (val == 0x1) {
6599 /* Check if it is the UNDI driver
6600 * UNDI driver initializes CID offset for normal bell to 0x7
6601 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07006602 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006603 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
6604 if (val == 0x7) {
6605 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006606 /* save our func */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006607 int func = BP_FUNC(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006608 u32 swap_en;
6609 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006610
Eilon Greensteinb4661732009-01-14 06:43:56 +00006611 /* clear the UNDI indication */
6612 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
6613
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006614 BNX2X_DEV_INFO("UNDI is active! reset device\n");
6615
6616 /* try unload UNDI on port 0 */
6617 bp->func = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006618 bp->fw_seq =
6619 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6620 DRV_MSG_SEQ_NUMBER_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006621 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006622
6623 /* if UNDI is loaded on the other port */
6624 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
6625
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006626 /* send "DONE" for previous unload */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006627 bnx2x_fw_command(bp,
6628 DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006629
6630 /* unload UNDI on port 1 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006631 bp->func = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006632 bp->fw_seq =
6633 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6634 DRV_MSG_SEQ_NUMBER_MASK);
6635 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006636
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006637 bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006638 }
6639
Eilon Greensteinb4661732009-01-14 06:43:56 +00006640 /* now it's safe to release the lock */
6641 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
6642
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00006643 bnx2x_undi_int_disable(bp, func);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006644
6645 /* close input traffic and wait for it */
6646 /* Do not rcv packets to BRB */
6647 REG_WR(bp,
6648 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
6649 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
6650 /* Do not direct rcv packets that are not for MCP to
6651 * the BRB */
6652 REG_WR(bp,
6653 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
6654 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6655 /* clear AEU */
6656 REG_WR(bp,
6657 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6658 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
6659 msleep(10);
6660
6661 /* save NIG port swap info */
6662 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6663 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006664 /* reset device */
6665 REG_WR(bp,
6666 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006667 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006668 REG_WR(bp,
6669 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
6670 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006671 /* take the NIG out of reset and restore swap values */
6672 REG_WR(bp,
6673 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
6674 MISC_REGISTERS_RESET_REG_1_RST_NIG);
6675 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
6676 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
6677
6678 /* send unload done to the MCP */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006679 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006680
6681 /* restore our func and fw_seq */
6682 bp->func = func;
6683 bp->fw_seq =
6684 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
6685 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00006686
6687 } else
6688 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006689 }
6690}
6691
6692static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
6693{
6694 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07006695 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006696
6697 /* Get the chip revision id and number. */
6698 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
6699 val = REG_RD(bp, MISC_REG_CHIP_NUM);
6700 id = ((val & 0xffff) << 16);
6701 val = REG_RD(bp, MISC_REG_CHIP_REV);
6702 id |= ((val & 0xf) << 12);
6703 val = REG_RD(bp, MISC_REG_CHIP_METAL);
6704 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00006705 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006706 id |= (val & 0xf);
6707 bp->common.chip_id = id;
6708 bp->link_params.chip_id = bp->common.chip_id;
6709 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
6710
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006711 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
6712
6713 /* Set doorbell size */
6714 bp->db_size = (1 << BNX2X_DB_SHIFT);
6715
6716 /*
6717 * set base FW non-default (fast path) status block id, this value is
6718 * used to initialize the fw_sb_id saved on the fp/queue structure to
6719 * determine the id used by the FW.
6720 */
6721 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
6722
Eilon Greenstein1c063282009-02-12 08:36:43 +00006723 val = (REG_RD(bp, 0x2874) & 0x55);
6724 if ((bp->common.chip_id & 0x1) ||
6725 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
6726 bp->flags |= ONE_PORT_FLAG;
6727 BNX2X_DEV_INFO("single port device\n");
6728 }
6729
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006730 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
6731 bp->common.flash_size = (NVRAM_1MB_SIZE <<
6732 (val & MCPR_NVM_CFG4_FLASH_SIZE));
6733 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
6734 bp->common.flash_size, bp->common.flash_size);
6735
6736 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Eilon Greenstein2691d512009-08-12 08:22:08 +00006737 bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006738 bp->link_params.shmem_base = bp->common.shmem_base;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006739 bp->link_params.shmem2_base = bp->common.shmem2_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00006740 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
6741 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006742
6743 if (!bp->common.shmem_base ||
6744 (bp->common.shmem_base < 0xA0000) ||
6745 (bp->common.shmem_base >= 0xC0000)) {
6746 BNX2X_DEV_INFO("MCP not active\n");
6747 bp->flags |= NO_MCP_FLAG;
6748 return;
6749 }
6750
6751 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
6752 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
6753 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006754 BNX2X_ERROR("BAD MCP validity signature\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006755
6756 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00006757 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006758
6759 bp->link_params.hw_led_mode = ((bp->common.hw_config &
6760 SHARED_HW_CFG_LED_MODE_MASK) >>
6761 SHARED_HW_CFG_LED_MODE_SHIFT);
6762
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00006763 bp->link_params.feature_config_flags = 0;
6764 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
6765 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
6766 bp->link_params.feature_config_flags |=
6767 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6768 else
6769 bp->link_params.feature_config_flags &=
6770 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6771
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006772 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6773 bp->common.bc_ver = val;
6774 BNX2X_DEV_INFO("bc_ver %X\n", val);
6775 if (val < BNX2X_BC_VER) {
6776 /* for now only warn
6777 * later we might need to enforce this */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006778 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6779 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006780 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006781 bp->link_params.feature_config_flags |=
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006782 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
Eilon Greenstein4d295db2009-07-21 05:47:47 +00006783 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006784 bp->link_params.feature_config_flags |=
6785 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
6786 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07006787
6788 if (BP_E1HVN(bp) == 0) {
6789 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6790 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6791 } else {
6792 /* no WOL capability for E1HVN != 0 */
6793 bp->flags |= NO_WOL_FLAG;
6794 }
6795 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00006796 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006797
6798 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6799 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6800 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6801 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6802
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006803 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6804 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006805}
6806
6807static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6808 u32 switch_cfg)
6809{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006810 int cfg_size = 0, idx, port = BP_PORT(bp);
6811
6812 /* Aggregation of supported attributes of all external phys */
6813 bp->port.supported[0] = 0;
6814 bp->port.supported[1] = 0;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006815 switch (bp->link_params.num_phys) {
6816 case 1:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006817 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
6818 cfg_size = 1;
6819 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006820 case 2:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006821 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
6822 cfg_size = 1;
6823 break;
6824 case 3:
6825 if (bp->link_params.multi_phy_config &
6826 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
6827 bp->port.supported[1] =
6828 bp->link_params.phy[EXT_PHY1].supported;
6829 bp->port.supported[0] =
6830 bp->link_params.phy[EXT_PHY2].supported;
6831 } else {
6832 bp->port.supported[0] =
6833 bp->link_params.phy[EXT_PHY1].supported;
6834 bp->port.supported[1] =
6835 bp->link_params.phy[EXT_PHY2].supported;
6836 }
6837 cfg_size = 2;
6838 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006839 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006840
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006841 if (!(bp->port.supported[0] || bp->port.supported[1])) {
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006842 BNX2X_ERR("NVRAM config error. BAD phy config."
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006843 "PHY1 config 0x%x, PHY2 config 0x%x\n",
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006844 SHMEM_RD(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006845 dev_info.port_hw_config[port].external_phy_config),
6846 SHMEM_RD(bp,
6847 dev_info.port_hw_config[port].external_phy_config2));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006848 return;
6849 }
6850
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00006851 switch (switch_cfg) {
6852 case SWITCH_CFG_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006853 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6854 port*0x10);
6855 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006856 break;
6857
6858 case SWITCH_CFG_10G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006859 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6860 port*0x18);
6861 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006862
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006863 break;
6864
6865 default:
6866 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006867 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006868 return;
6869 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006870 /* mask what we support according to speed_cap_mask per configuration */
6871 for (idx = 0; idx < cfg_size; idx++) {
6872 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006873 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006874 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006875
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006876 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006877 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006878 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006879
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006880 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006881 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006882 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006883
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006884 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006885 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006886 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006887
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006888 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006889 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006890 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006891 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006892
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006893 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006894 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006895 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006896
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006897 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07006898 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006899 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006900
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006901 }
6902
6903 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
6904 bp->port.supported[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006905}
6906
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006907static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006908{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006909 u32 link_config, idx, cfg_size = 0;
6910 bp->port.advertising[0] = 0;
6911 bp->port.advertising[1] = 0;
6912 switch (bp->link_params.num_phys) {
6913 case 1:
6914 case 2:
6915 cfg_size = 1;
6916 break;
6917 case 3:
6918 cfg_size = 2;
6919 break;
6920 }
6921 for (idx = 0; idx < cfg_size; idx++) {
6922 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
6923 link_config = bp->port.link_config[idx];
6924 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006925 case PORT_FEATURE_LINK_SPEED_AUTO:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006926 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
6927 bp->link_params.req_line_speed[idx] =
6928 SPEED_AUTO_NEG;
6929 bp->port.advertising[idx] |=
6930 bp->port.supported[idx];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006931 } else {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006932 /* force 10G, no AN */
6933 bp->link_params.req_line_speed[idx] =
6934 SPEED_10000;
6935 bp->port.advertising[idx] |=
6936 (ADVERTISED_10000baseT_Full |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006937 ADVERTISED_FIBRE);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006938 continue;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006939 }
6940 break;
6941
6942 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006943 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
6944 bp->link_params.req_line_speed[idx] =
6945 SPEED_10;
6946 bp->port.advertising[idx] |=
6947 (ADVERTISED_10baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006948 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006949 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006950 BNX2X_ERROR("NVRAM config error. "
6951 "Invalid link_config 0x%x"
6952 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006953 link_config,
6954 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006955 return;
6956 }
6957 break;
6958
6959 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006960 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
6961 bp->link_params.req_line_speed[idx] =
6962 SPEED_10;
6963 bp->link_params.req_duplex[idx] =
6964 DUPLEX_HALF;
6965 bp->port.advertising[idx] |=
6966 (ADVERTISED_10baseT_Half |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006967 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006968 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006969 BNX2X_ERROR("NVRAM config error. "
6970 "Invalid link_config 0x%x"
6971 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006972 link_config,
6973 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006974 return;
6975 }
6976 break;
6977
6978 case PORT_FEATURE_LINK_SPEED_100M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006979 if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) {
6980 bp->link_params.req_line_speed[idx] =
6981 SPEED_100;
6982 bp->port.advertising[idx] |=
6983 (ADVERTISED_100baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006984 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006985 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00006986 BNX2X_ERROR("NVRAM config error. "
6987 "Invalid link_config 0x%x"
6988 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006989 link_config,
6990 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006991 return;
6992 }
6993 break;
6994
6995 case PORT_FEATURE_LINK_SPEED_100M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006996 if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) {
6997 bp->link_params.req_line_speed[idx] = SPEED_100;
6998 bp->link_params.req_duplex[idx] = DUPLEX_HALF;
6999 bp->port.advertising[idx] |=
7000 (ADVERTISED_100baseT_Half |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007001 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007002 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007003 BNX2X_ERROR("NVRAM config error. "
7004 "Invalid link_config 0x%x"
7005 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007006 link_config,
7007 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007008 return;
7009 }
7010 break;
7011
7012 case PORT_FEATURE_LINK_SPEED_1G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007013 if (bp->port.supported[idx] &
7014 SUPPORTED_1000baseT_Full) {
7015 bp->link_params.req_line_speed[idx] =
7016 SPEED_1000;
7017 bp->port.advertising[idx] |=
7018 (ADVERTISED_1000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007019 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007020 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007021 BNX2X_ERROR("NVRAM config error. "
7022 "Invalid link_config 0x%x"
7023 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007024 link_config,
7025 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007026 return;
7027 }
7028 break;
7029
7030 case PORT_FEATURE_LINK_SPEED_2_5G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007031 if (bp->port.supported[idx] &
7032 SUPPORTED_2500baseX_Full) {
7033 bp->link_params.req_line_speed[idx] =
7034 SPEED_2500;
7035 bp->port.advertising[idx] |=
7036 (ADVERTISED_2500baseX_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007037 ADVERTISED_TP);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007038 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007039 BNX2X_ERROR("NVRAM config error. "
7040 "Invalid link_config 0x%x"
7041 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007042 link_config,
7043 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007044 return;
7045 }
7046 break;
7047
7048 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7049 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7050 case PORT_FEATURE_LINK_SPEED_10G_KR:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007051 if (bp->port.supported[idx] &
7052 SUPPORTED_10000baseT_Full) {
7053 bp->link_params.req_line_speed[idx] =
7054 SPEED_10000;
7055 bp->port.advertising[idx] |=
7056 (ADVERTISED_10000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007057 ADVERTISED_FIBRE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007058 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007059 BNX2X_ERROR("NVRAM config error. "
7060 "Invalid link_config 0x%x"
7061 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007062 link_config,
7063 bp->link_params.speed_cap_mask[idx]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007064 return;
7065 }
7066 break;
7067
7068 default:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007069 BNX2X_ERROR("NVRAM config error. "
7070 "BAD link speed link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007071 link_config);
7072 bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG;
7073 bp->port.advertising[idx] = bp->port.supported[idx];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007074 break;
7075 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007076
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007077 bp->link_params.req_flow_ctrl[idx] = (link_config &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007078 PORT_FEATURE_FLOW_CONTROL_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007079 if ((bp->link_params.req_flow_ctrl[idx] ==
7080 BNX2X_FLOW_CTRL_AUTO) &&
7081 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
7082 bp->link_params.req_flow_ctrl[idx] =
7083 BNX2X_FLOW_CTRL_NONE;
7084 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007085
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007086 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
7087 " 0x%x advertising 0x%x\n",
7088 bp->link_params.req_line_speed[idx],
7089 bp->link_params.req_duplex[idx],
7090 bp->link_params.req_flow_ctrl[idx],
7091 bp->port.advertising[idx]);
7092 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007093}
7094
Michael Chane665bfd2009-10-10 13:46:54 +00007095static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
7096{
7097 mac_hi = cpu_to_be16(mac_hi);
7098 mac_lo = cpu_to_be32(mac_lo);
7099 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
7100 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
7101}
7102
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007103static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007104{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007105 int port = BP_PORT(bp);
7106 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00007107 u32 config;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007108 u32 ext_phy_type, ext_phy_config;;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007109
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007110 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007111 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007112
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007113 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007114 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007115
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007116 bp->link_params.speed_cap_mask[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007117 SHMEM_RD(bp,
7118 dev_info.port_hw_config[port].speed_capability_mask);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007119 bp->link_params.speed_cap_mask[1] =
7120 SHMEM_RD(bp,
7121 dev_info.port_hw_config[port].speed_capability_mask2);
7122 bp->port.link_config[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007123 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7124
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007125 bp->port.link_config[1] =
7126 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007127
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007128 bp->link_params.multi_phy_config =
7129 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00007130 /* If the device is capable of WoL, set the default state according
7131 * to the HW
7132 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007133 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00007134 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
7135 (config & PORT_FEATURE_WOL_ENABLED));
7136
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007137 BNX2X_DEV_INFO("lane_config 0x%08x"
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007138 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007139 bp->link_params.lane_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007140 bp->link_params.speed_cap_mask[0],
7141 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007142
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007143 bp->link_params.switch_cfg = (bp->port.link_config[0] &
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007144 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007145 bnx2x_phy_probe(&bp->link_params);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007146 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007147
7148 bnx2x_link_settings_requested(bp);
7149
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007150 /*
7151 * If connected directly, work with the internal PHY, otherwise, work
7152 * with the external PHY
7153 */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007154 ext_phy_config =
7155 SHMEM_RD(bp,
7156 dev_info.port_hw_config[port].external_phy_config);
7157 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007158 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007159 bp->mdio.prtad = bp->port.phy_addr;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007160
7161 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
7162 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
7163 bp->mdio.prtad =
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007164 XGXS_EXT_PHY_ADDR(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007165
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007166 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7167 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00007168 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007169 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7170 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00007171
7172#ifdef BCM_CNIC
7173 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
7174 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
7175 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
7176#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007177}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007178
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007179static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7180{
7181 int func = BP_FUNC(bp);
7182 u32 val, val2;
7183 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007184
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007185 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007186
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007187 bp->common.int_block = INT_BLOCK_HC;
7188
7189 bp->igu_dsb_id = DEF_SB_IGU_ID;
7190 bp->igu_base_sb = 0;
7191 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
7192
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00007193 bp->mf_ov = 0;
7194 bp->mf_mode = 0;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00007195 if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007196
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007197 bp->common.mf_cfg_base = bp->common.shmem_base +
7198 offsetof(struct shmem_region, func_mb) +
7199 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
7200 bp->mf_config =
7201 MF_CFG_RD(bp, func_mf_config[func].config);
7202
7203 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07007204 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00007205 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00007206 bp->mf_mode = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007207 BNX2X_DEV_INFO("%s function mode\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00007208 IS_MF(bp) ? "multi" : "single");
Eilon Greenstein2691d512009-08-12 08:22:08 +00007209
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00007210 if (IS_MF(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007211 val = (MF_CFG_RD(bp, func_mf_config[func].
Eilon Greenstein2691d512009-08-12 08:22:08 +00007212 e1hov_tag) &
7213 FUNC_MF_CFG_E1HOV_TAG_MASK);
7214 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00007215 bp->mf_ov = val;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007216 BNX2X_DEV_INFO("E1HOV for func %d is %d "
7217 "(0x%04x)\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00007218 func, bp->mf_ov, bp->mf_ov);
Eilon Greenstein2691d512009-08-12 08:22:08 +00007219 } else {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007220 BNX2X_ERROR("No valid E1HOV for func %d,"
7221 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007222 rc = -EPERM;
7223 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00007224 } else {
7225 if (BP_E1HVN(bp)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007226 BNX2X_ERROR("VN %d in single function mode,"
7227 " aborting\n", BP_E1HVN(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00007228 rc = -EPERM;
7229 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007230 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007231 }
7232
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007233 /* adjust igu_sb_cnt to MF */
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00007234 if (IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007235 bp->igu_sb_cnt /= E1HVN_MAX;
7236
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007237 if (!BP_NOMCP(bp)) {
7238 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007239
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007240 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7241 DRV_MSG_SEQ_NUMBER_MASK);
7242 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7243 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007244
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00007245 if (IS_MF(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007246 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
7247 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007248 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7249 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7250 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7251 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7252 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7253 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7254 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7255 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7256 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7257 ETH_ALEN);
7258 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7259 ETH_ALEN);
7260 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007261
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007262 return rc;
7263 }
7264
7265 if (BP_NOMCP(bp)) {
7266 /* only supposed to happen on emulation/FPGA */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007267 BNX2X_ERROR("warning: random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007268 random_ether_addr(bp->dev->dev_addr);
7269 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7270 }
7271
7272 return rc;
7273}
7274
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00007275static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
7276{
7277 int cnt, i, block_end, rodi;
7278 char vpd_data[BNX2X_VPD_LEN+1];
7279 char str_id_reg[VENDOR_ID_LEN+1];
7280 char str_id_cap[VENDOR_ID_LEN+1];
7281 u8 len;
7282
7283 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
7284 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
7285
7286 if (cnt < BNX2X_VPD_LEN)
7287 goto out_not_found;
7288
7289 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
7290 PCI_VPD_LRDT_RO_DATA);
7291 if (i < 0)
7292 goto out_not_found;
7293
7294
7295 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
7296 pci_vpd_lrdt_size(&vpd_data[i]);
7297
7298 i += PCI_VPD_LRDT_TAG_SIZE;
7299
7300 if (block_end > BNX2X_VPD_LEN)
7301 goto out_not_found;
7302
7303 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
7304 PCI_VPD_RO_KEYWORD_MFR_ID);
7305 if (rodi < 0)
7306 goto out_not_found;
7307
7308 len = pci_vpd_info_field_size(&vpd_data[rodi]);
7309
7310 if (len != VENDOR_ID_LEN)
7311 goto out_not_found;
7312
7313 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
7314
7315 /* vendor specific info */
7316 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
7317 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
7318 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
7319 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
7320
7321 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
7322 PCI_VPD_RO_KEYWORD_VENDOR0);
7323 if (rodi >= 0) {
7324 len = pci_vpd_info_field_size(&vpd_data[rodi]);
7325
7326 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
7327
7328 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
7329 memcpy(bp->fw_ver, &vpd_data[rodi], len);
7330 bp->fw_ver[len] = ' ';
7331 }
7332 }
7333 return;
7334 }
7335out_not_found:
7336 return;
7337}
7338
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007339static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7340{
7341 int func = BP_FUNC(bp);
Eilon Greenstein87942b42009-02-12 08:36:49 +00007342 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007343 int rc;
7344
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007345 /* Disable interrupt handling until HW is initialized */
7346 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00007347 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007348
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007349 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07007350 mutex_init(&bp->fw_mb_mutex);
David S. Millerbb7e95c2010-07-27 21:01:35 -07007351 spin_lock_init(&bp->stats_lock);
Michael Chan993ac7b2009-10-10 13:46:56 +00007352#ifdef BCM_CNIC
7353 mutex_init(&bp->cnic_mutex);
7354#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007355
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08007356 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007357 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007358
7359 rc = bnx2x_get_hwinfo(bp);
7360
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007361 if (!rc)
7362 rc = bnx2x_alloc_mem_bp(bp);
7363
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00007364 bnx2x_read_fwinfo(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007365 /* need to reset chip if undi was active */
7366 if (!BP_NOMCP(bp))
7367 bnx2x_undi_unload(bp);
7368
7369 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007370 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007371
7372 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007373 dev_err(&bp->pdev->dev, "MCP disabled, "
7374 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007375
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007376 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00007377 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
7378 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007379 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
7380 "requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007381 multi_mode = ETH_RSS_MODE_DISABLED;
7382 }
7383 bp->multi_mode = multi_mode;
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00007384 bp->int_mode = int_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007385
Dmitry Kravkov4fd89b7a2010-04-01 19:45:34 -07007386 bp->dev->features |= NETIF_F_GRO;
7387
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007388 /* Set TPA flags */
7389 if (disable_tpa) {
7390 bp->flags &= ~TPA_ENABLE_FLAG;
7391 bp->dev->features &= ~NETIF_F_LRO;
7392 } else {
7393 bp->flags |= TPA_ENABLE_FLAG;
7394 bp->dev->features |= NETIF_F_LRO;
7395 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00007396 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007397
Eilon Greensteina18f5122009-08-12 08:23:26 +00007398 if (CHIP_IS_E1(bp))
7399 bp->dropless_fc = 0;
7400 else
7401 bp->dropless_fc = dropless_fc;
7402
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00007403 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07007404
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007405 bp->tx_ring_size = MAX_TX_AVAIL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007406
7407 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007408
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00007409 /* make sure that the numbers are in the right granularity */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007410 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
7411 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007412
Eilon Greenstein87942b42009-02-12 08:36:49 +00007413 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7414 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007415
7416 init_timer(&bp->timer);
7417 bp->timer.expires = jiffies + bp->current_interval;
7418 bp->timer.data = (unsigned long) bp;
7419 bp->timer.function = bnx2x_timer;
7420
7421 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007422}
7423
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007424
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00007425/****************************************************************************
7426* General service functions
7427****************************************************************************/
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007428
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007429/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007430static int bnx2x_open(struct net_device *dev)
7431{
7432 struct bnx2x *bp = netdev_priv(dev);
7433
Eilon Greenstein6eccabb2009-01-22 03:37:48 +00007434 netif_carrier_off(dev);
7435
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007436 bnx2x_set_power_state(bp, PCI_D0);
7437
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007438 if (!bnx2x_reset_is_done(bp)) {
7439 do {
7440 /* Reset MCP mail box sequence if there is on going
7441 * recovery
7442 */
7443 bp->fw_seq = 0;
7444
7445 /* If it's the first function to load and reset done
7446 * is still not cleared it may mean that. We don't
7447 * check the attention state here because it may have
7448 * already been cleared by a "common" reset but we
7449 * shell proceed with "process kill" anyway.
7450 */
7451 if ((bnx2x_get_load_cnt(bp) == 0) &&
7452 bnx2x_trylock_hw_lock(bp,
7453 HW_LOCK_RESOURCE_RESERVED_08) &&
7454 (!bnx2x_leader_reset(bp))) {
7455 DP(NETIF_MSG_HW, "Recovered in open\n");
7456 break;
7457 }
7458
7459 bnx2x_set_power_state(bp, PCI_D3hot);
7460
7461 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
7462 " completed yet. Try again later. If u still see this"
7463 " message after a few retries then power cycle is"
7464 " required.\n", bp->dev->name);
7465
7466 return -EAGAIN;
7467 } while (0);
7468 }
7469
7470 bp->recovery_state = BNX2X_RECOVERY_DONE;
7471
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007472 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007473}
7474
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007475/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007476static int bnx2x_close(struct net_device *dev)
7477{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007478 struct bnx2x *bp = netdev_priv(dev);
7479
7480 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07007481 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +00007482 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007483
7484 return 0;
7485}
7486
Eilon Greensteinf5372252009-02-12 08:38:30 +00007487/* called with netif_tx_lock from dev_mcast.c */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00007488void bnx2x_set_rx_mode(struct net_device *dev)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007489{
7490 struct bnx2x *bp = netdev_priv(dev);
7491 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
7492 int port = BP_PORT(bp);
7493
7494 if (bp->state != BNX2X_STATE_OPEN) {
7495 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
7496 return;
7497 }
7498
7499 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
7500
7501 if (dev->flags & IFF_PROMISC)
7502 rx_mode = BNX2X_RX_MODE_PROMISC;
7503
7504 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00007505 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
7506 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007507 rx_mode = BNX2X_RX_MODE_ALLMULTI;
7508
7509 else { /* some multicasts */
7510 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007511 /*
7512 * set mc list, do not wait as wait implies sleep
7513 * and set_rx_mode can be invoked from non-sleepable
7514 * context
7515 */
7516 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
7517 BNX2X_MAX_EMUL_MULTI*(1 + port) :
7518 BNX2X_MAX_MULTICAST*(1 + port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007519
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007520 bnx2x_set_e1_mc_list(bp, offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007521 } else { /* E1H */
7522 /* Accept one or more multicasts */
Jiri Pirko22bedad32010-04-01 21:22:57 +00007523 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007524 u32 mc_filter[MC_HASH_SIZE];
7525 u32 crc, bit, regidx;
7526 int i;
7527
7528 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
7529
Jiri Pirko22bedad32010-04-01 21:22:57 +00007530 netdev_for_each_mc_addr(ha, dev) {
Johannes Berg7c510e42008-10-27 17:47:26 -07007531 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007532 bnx2x_mc_addr(ha));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007533
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007534 crc = crc32c_le(0, bnx2x_mc_addr(ha),
7535 ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007536 bit = (crc >> 24) & 0xff;
7537 regidx = bit >> 5;
7538 bit &= 0x1f;
7539 mc_filter[regidx] |= (1 << bit);
7540 }
7541
7542 for (i = 0; i < MC_HASH_SIZE; i++)
7543 REG_WR(bp, MC_HASH_OFFSET(bp, i),
7544 mc_filter[i]);
7545 }
7546 }
7547
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007548
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007549 bp->rx_mode = rx_mode;
7550 bnx2x_set_storm_rx_mode(bp);
7551}
7552
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007553
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007554/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007555static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
7556 int devad, u16 addr)
7557{
7558 struct bnx2x *bp = netdev_priv(netdev);
7559 u16 value;
7560 int rc;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007561
7562 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
7563 prtad, devad, addr);
7564
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007565 /* The HW expects different devad if CL22 is used */
7566 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7567
7568 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00007569 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007570 bnx2x_release_phy_lock(bp);
7571 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
7572
7573 if (!rc)
7574 rc = value;
7575 return rc;
7576}
7577
7578/* called with rtnl_lock */
7579static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
7580 u16 addr, u16 value)
7581{
7582 struct bnx2x *bp = netdev_priv(netdev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007583 int rc;
7584
7585 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
7586 " value 0x%x\n", prtad, devad, addr, value);
7587
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007588 /* The HW expects different devad if CL22 is used */
7589 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
7590
7591 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00007592 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007593 bnx2x_release_phy_lock(bp);
7594 return rc;
7595}
7596
7597/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007598static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7599{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007600 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007601 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007602
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007603 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
7604 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007605
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007606 if (!netif_running(dev))
7607 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007608
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007609 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007610}
7611
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00007612#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007613static void poll_bnx2x(struct net_device *dev)
7614{
7615 struct bnx2x *bp = netdev_priv(dev);
7616
7617 disable_irq(bp->pdev->irq);
7618 bnx2x_interrupt(bp->pdev->irq, dev);
7619 enable_irq(bp->pdev->irq);
7620}
7621#endif
7622
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08007623static const struct net_device_ops bnx2x_netdev_ops = {
7624 .ndo_open = bnx2x_open,
7625 .ndo_stop = bnx2x_close,
7626 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +00007627 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08007628 .ndo_set_mac_address = bnx2x_change_mac_addr,
7629 .ndo_validate_addr = eth_validate_addr,
7630 .ndo_do_ioctl = bnx2x_ioctl,
7631 .ndo_change_mtu = bnx2x_change_mtu,
7632 .ndo_tx_timeout = bnx2x_tx_timeout,
7633#ifdef BCM_VLAN
7634 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
7635#endif
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00007636#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08007637 .ndo_poll_controller = poll_bnx2x,
7638#endif
7639};
7640
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007641static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
7642 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007643{
7644 struct bnx2x *bp;
7645 int rc;
7646
7647 SET_NETDEV_DEV(dev, &pdev->dev);
7648 bp = netdev_priv(dev);
7649
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007650 bp->dev = dev;
7651 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007652 bp->flags = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007653 bp->func = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007654
7655 rc = pci_enable_device(pdev);
7656 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007657 dev_err(&bp->pdev->dev,
7658 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007659 goto err_out;
7660 }
7661
7662 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007663 dev_err(&bp->pdev->dev,
7664 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007665 rc = -ENODEV;
7666 goto err_out_disable;
7667 }
7668
7669 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007670 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
7671 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007672 rc = -ENODEV;
7673 goto err_out_disable;
7674 }
7675
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007676 if (atomic_read(&pdev->enable_cnt) == 1) {
7677 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7678 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007679 dev_err(&bp->pdev->dev,
7680 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007681 goto err_out_disable;
7682 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007683
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007684 pci_set_master(pdev);
7685 pci_save_state(pdev);
7686 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007687
7688 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7689 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007690 dev_err(&bp->pdev->dev,
7691 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007692 rc = -EIO;
7693 goto err_out_release;
7694 }
7695
7696 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7697 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007698 dev_err(&bp->pdev->dev,
7699 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007700 rc = -EIO;
7701 goto err_out_release;
7702 }
7703
FUJITA Tomonori1a983142010-04-04 01:51:03 +00007704 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007705 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00007706 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007707 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
7708 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007709 rc = -EIO;
7710 goto err_out_release;
7711 }
7712
FUJITA Tomonori1a983142010-04-04 01:51:03 +00007713 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007714 dev_err(&bp->pdev->dev,
7715 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007716 rc = -EIO;
7717 goto err_out_release;
7718 }
7719
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007720 dev->mem_start = pci_resource_start(pdev, 0);
7721 dev->base_addr = dev->mem_start;
7722 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007723
7724 dev->irq = pdev->irq;
7725
Arjan van de Ven275f1652008-10-20 21:42:39 -07007726 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007727 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007728 dev_err(&bp->pdev->dev,
7729 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007730 rc = -ENOMEM;
7731 goto err_out_release;
7732 }
7733
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007734 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007735 min_t(u64, BNX2X_DB_SIZE(bp),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007736 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007737 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007738 dev_err(&bp->pdev->dev,
7739 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007740 rc = -ENOMEM;
7741 goto err_out_unmap;
7742 }
7743
7744 bnx2x_set_power_state(bp, PCI_D0);
7745
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007746 /* clean indirect addresses */
7747 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7748 PCICFG_VENDOR_ID_OFFSET);
7749 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7750 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7751 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7752 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007753
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007754 /* Reset the load counter */
7755 bnx2x_clear_load_cnt(bp);
7756
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007757 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007758
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08007759 dev->netdev_ops = &bnx2x_netdev_ops;
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00007760 bnx2x_set_ethtool_ops(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007761 dev->features |= NETIF_F_SG;
7762 dev->features |= NETIF_F_HW_CSUM;
7763 if (bp->flags & USING_DAC_FLAG)
7764 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +00007765 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7766 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007767#ifdef BCM_VLAN
7768 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08007769 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +00007770
7771 dev->vlan_features |= NETIF_F_SG;
7772 dev->vlan_features |= NETIF_F_HW_CSUM;
7773 if (bp->flags & USING_DAC_FLAG)
7774 dev->vlan_features |= NETIF_F_HIGHDMA;
7775 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7776 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007777#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007778
Eilon Greenstein01cd4522009-08-12 08:23:08 +00007779 /* get_port_hwinfo() will set prtad and mmds properly */
7780 bp->mdio.prtad = MDIO_PRTAD_NONE;
7781 bp->mdio.mmds = 0;
7782 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7783 bp->mdio.dev = dev;
7784 bp->mdio.mdio_read = bnx2x_mdio_read;
7785 bp->mdio.mdio_write = bnx2x_mdio_write;
7786
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007787 return 0;
7788
7789err_out_unmap:
7790 if (bp->regview) {
7791 iounmap(bp->regview);
7792 bp->regview = NULL;
7793 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007794 if (bp->doorbells) {
7795 iounmap(bp->doorbells);
7796 bp->doorbells = NULL;
7797 }
7798
7799err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007800 if (atomic_read(&pdev->enable_cnt) == 1)
7801 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007802
7803err_out_disable:
7804 pci_disable_device(pdev);
7805 pci_set_drvdata(pdev, NULL);
7806
7807err_out:
7808 return rc;
7809}
7810
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00007811static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7812 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -08007813{
7814 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7815
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00007816 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7817
7818 /* return value of 1=2.5GHz 2=5GHz */
7819 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -08007820}
7821
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007822static int bnx2x_check_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007823{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00007824 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007825 struct bnx2x_fw_file_hdr *fw_hdr;
7826 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007827 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00007828 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007829 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00007830 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007831
7832 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7833 return -EINVAL;
7834
7835 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7836 sections = (struct bnx2x_fw_file_section *)fw_hdr;
7837
7838 /* Make sure none of the offsets and sizes make us read beyond
7839 * the end of the firmware data */
7840 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7841 offset = be32_to_cpu(sections[i].offset);
7842 len = be32_to_cpu(sections[i].len);
7843 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007844 dev_err(&bp->pdev->dev,
7845 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007846 return -EINVAL;
7847 }
7848 }
7849
7850 /* Likewise for the init_ops offsets */
7851 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7852 ops_offsets = (u16 *)(firmware->data + offset);
7853 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7854
7855 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7856 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007857 dev_err(&bp->pdev->dev,
7858 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007859 return -EINVAL;
7860 }
7861 }
7862
7863 /* Check FW version */
7864 offset = be32_to_cpu(fw_hdr->fw_version.offset);
7865 fw_ver = firmware->data + offset;
7866 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7867 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7868 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7869 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007870 dev_err(&bp->pdev->dev,
7871 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007872 fw_ver[0], fw_ver[1], fw_ver[2],
7873 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7874 BCM_5710_FW_MINOR_VERSION,
7875 BCM_5710_FW_REVISION_VERSION,
7876 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007877 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007878 }
7879
7880 return 0;
7881}
7882
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007883static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007884{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007885 const __be32 *source = (const __be32 *)_source;
7886 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007887 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007888
7889 for (i = 0; i < n/4; i++)
7890 target[i] = be32_to_cpu(source[i]);
7891}
7892
7893/*
7894 Ops array is stored in the following format:
7895 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7896 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007897static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007898{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007899 const __be32 *source = (const __be32 *)_source;
7900 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007901 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007902
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007903 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007904 tmp = be32_to_cpu(source[j]);
7905 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007906 target[i].offset = tmp & 0xffffff;
7907 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007908 }
7909}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007910
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007911/**
7912 * IRO array is stored in the following format:
7913 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
7914 */
7915static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
7916{
7917 const __be32 *source = (const __be32 *)_source;
7918 struct iro *target = (struct iro *)_target;
7919 u32 i, j, tmp;
7920
7921 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
7922 target[i].base = be32_to_cpu(source[j]);
7923 j++;
7924 tmp = be32_to_cpu(source[j]);
7925 target[i].m1 = (tmp >> 16) & 0xffff;
7926 target[i].m2 = tmp & 0xffff;
7927 j++;
7928 tmp = be32_to_cpu(source[j]);
7929 target[i].m3 = (tmp >> 16) & 0xffff;
7930 target[i].size = tmp & 0xffff;
7931 j++;
7932 }
7933}
7934
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007935static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007936{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007937 const __be16 *source = (const __be16 *)_source;
7938 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007939 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007940
7941 for (i = 0; i < n/2; i++)
7942 target[i] = be16_to_cpu(source[i]);
7943}
7944
Joe Perches7995c642010-02-17 15:01:52 +00007945#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
7946do { \
7947 u32 len = be32_to_cpu(fw_hdr->arr.len); \
7948 bp->arr = kmalloc(len, GFP_KERNEL); \
7949 if (!bp->arr) { \
7950 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7951 goto lbl; \
7952 } \
7953 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
7954 (u8 *)bp->arr, len); \
7955} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007956
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007957int bnx2x_init_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007958{
Ben Hutchings45229b42009-11-07 11:53:39 +00007959 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007960 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +00007961 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007962
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007963 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00007964 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007965 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00007966 fw_file_name = FW_FILE_NAME_E1H;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007967 else {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007968 BNX2X_ERR("Unsupported chip revision\n");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007969 return -EINVAL;
7970 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007971
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007972 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007973
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007974 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007975 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007976 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007977 goto request_firmware_exit;
7978 }
7979
7980 rc = bnx2x_check_firmware(bp);
7981 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00007982 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007983 goto request_firmware_exit;
7984 }
7985
7986 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7987
7988 /* Initialize the pointers to the init arrays */
7989 /* Blob */
7990 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7991
7992 /* Opcodes */
7993 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7994
7995 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00007996 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7997 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07007998
7999 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +00008000 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8001 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8002 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
8003 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8004 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8005 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8006 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
8007 be32_to_cpu(fw_hdr->usem_pram_data.offset);
8008 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8009 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8010 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
8011 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8012 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8013 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8014 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
8015 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008016 /* IRO */
8017 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008018
8019 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008020
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008021iro_alloc_err:
8022 kfree(bp->init_ops_offsets);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008023init_offsets_alloc_err:
8024 kfree(bp->init_ops);
8025init_ops_alloc_err:
8026 kfree(bp->init_data);
8027request_firmware_exit:
8028 release_firmware(bp->firmware);
8029
8030 return rc;
8031}
8032
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008033static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
8034{
8035 int cid_count = L2_FP_COUNT(l2_cid_count);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008036
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008037#ifdef BCM_CNIC
8038 cid_count += CNIC_CID_MAX;
8039#endif
8040 return roundup(cid_count, QM_CID_ROUND);
8041}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008042static int __devinit bnx2x_init_one(struct pci_dev *pdev,
8043 const struct pci_device_id *ent)
8044{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008045 struct net_device *dev = NULL;
8046 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008047 int pcie_width, pcie_speed;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008048 int rc, cid_count;
8049
8050 cid_count = FP_SB_MAX_E1x + CNIC_CONTEXT_USE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008051
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008052 /* dev zeroed in init_etherdev */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008053 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008054 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008055 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008056 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008057 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008058
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008059 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +00008060 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008061
Eilon Greensteindf4770de2009-08-12 08:23:28 +00008062 pci_set_drvdata(pdev, dev);
8063
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008064 bp->l2_cid_count = cid_count;
8065
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008066 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008067 if (rc < 0) {
8068 free_netdev(dev);
8069 return rc;
8070 }
8071
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008072 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00008073 if (rc)
8074 goto init_one_exit;
8075
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008076 /* calc qm_cid_count */
8077 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
8078
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00008079 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008080 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00008081 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008082 goto init_one_exit;
8083 }
8084
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008085 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008086 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
8087 " IRQ %d, ", board_info[ent->driver_data].name,
8088 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
8089 pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
8090 dev->base_addr, bp->pdev->irq);
8091 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +00008092
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008093 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008094
8095init_one_exit:
8096 if (bp->regview)
8097 iounmap(bp->regview);
8098
8099 if (bp->doorbells)
8100 iounmap(bp->doorbells);
8101
8102 free_netdev(dev);
8103
8104 if (atomic_read(&pdev->enable_cnt) == 1)
8105 pci_release_regions(pdev);
8106
8107 pci_disable_device(pdev);
8108 pci_set_drvdata(pdev, NULL);
8109
8110 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008111}
8112
8113static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
8114{
8115 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08008116 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008117
Eliezer Tamir228241e2008-02-28 11:56:57 -08008118 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008119 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08008120 return;
8121 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08008122 bp = netdev_priv(dev);
8123
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008124 unregister_netdev(dev);
8125
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008126 /* Make sure RESET task is not scheduled before continuing */
8127 cancel_delayed_work_sync(&bp->reset_task);
8128
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008129 if (bp->regview)
8130 iounmap(bp->regview);
8131
8132 if (bp->doorbells)
8133 iounmap(bp->doorbells);
8134
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008135 bnx2x_free_mem_bp(bp);
8136
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008137 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008138
8139 if (atomic_read(&pdev->enable_cnt) == 1)
8140 pci_release_regions(pdev);
8141
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008142 pci_disable_device(pdev);
8143 pci_set_drvdata(pdev, NULL);
8144}
8145
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008146static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
8147{
8148 int i;
8149
8150 bp->state = BNX2X_STATE_ERROR;
8151
8152 bp->rx_mode = BNX2X_RX_MODE_NONE;
8153
8154 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07008155 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008156
8157 del_timer_sync(&bp->timer);
8158 bp->stats_state = STATS_STATE_DISABLED;
8159 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
8160
8161 /* Release IRQs */
Vladislav Zolotarov6cbe5062010-02-17 02:03:27 +00008162 bnx2x_free_irq(bp, false);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008163
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008164 /* Free SKBs, SGEs, TPA pool and driver internals */
8165 bnx2x_free_skbs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008166
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008167 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008168 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00008169 for_each_queue(bp, i)
Eilon Greenstein7cde1c82009-01-22 06:01:25 +00008170 netif_napi_del(&bnx2x_fp(bp, i, napi));
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008171 bnx2x_free_mem(bp);
8172
8173 bp->state = BNX2X_STATE_CLOSED;
8174
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008175 return 0;
8176}
8177
8178static void bnx2x_eeh_recover(struct bnx2x *bp)
8179{
8180 u32 val;
8181
8182 mutex_init(&bp->port.phy_mutex);
8183
8184 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
8185 bp->link_params.shmem_base = bp->common.shmem_base;
8186 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
8187
8188 if (!bp->common.shmem_base ||
8189 (bp->common.shmem_base < 0xA0000) ||
8190 (bp->common.shmem_base >= 0xC0000)) {
8191 BNX2X_DEV_INFO("MCP not active\n");
8192 bp->flags |= NO_MCP_FLAG;
8193 return;
8194 }
8195
8196 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
8197 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8198 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
8199 BNX2X_ERR("BAD MCP validity signature\n");
8200
8201 if (!BP_NOMCP(bp)) {
8202 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
8203 & DRV_MSG_SEQ_NUMBER_MASK);
8204 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8205 }
8206}
8207
Wendy Xiong493adb12008-06-23 20:36:22 -07008208/**
8209 * bnx2x_io_error_detected - called when PCI error is detected
8210 * @pdev: Pointer to PCI device
8211 * @state: The current pci connection state
8212 *
8213 * This function is called after a PCI bus error affecting
8214 * this device has been detected.
8215 */
8216static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
8217 pci_channel_state_t state)
8218{
8219 struct net_device *dev = pci_get_drvdata(pdev);
8220 struct bnx2x *bp = netdev_priv(dev);
8221
8222 rtnl_lock();
8223
8224 netif_device_detach(dev);
8225
Dean Nelson07ce50e2009-07-31 09:13:25 +00008226 if (state == pci_channel_io_perm_failure) {
8227 rtnl_unlock();
8228 return PCI_ERS_RESULT_DISCONNECT;
8229 }
8230
Wendy Xiong493adb12008-06-23 20:36:22 -07008231 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008232 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -07008233
8234 pci_disable_device(pdev);
8235
8236 rtnl_unlock();
8237
8238 /* Request a slot reset */
8239 return PCI_ERS_RESULT_NEED_RESET;
8240}
8241
8242/**
8243 * bnx2x_io_slot_reset - called after the PCI bus has been reset
8244 * @pdev: Pointer to PCI device
8245 *
8246 * Restart the card from scratch, as if from a cold-boot.
8247 */
8248static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
8249{
8250 struct net_device *dev = pci_get_drvdata(pdev);
8251 struct bnx2x *bp = netdev_priv(dev);
8252
8253 rtnl_lock();
8254
8255 if (pci_enable_device(pdev)) {
8256 dev_err(&pdev->dev,
8257 "Cannot re-enable PCI device after reset\n");
8258 rtnl_unlock();
8259 return PCI_ERS_RESULT_DISCONNECT;
8260 }
8261
8262 pci_set_master(pdev);
8263 pci_restore_state(pdev);
8264
8265 if (netif_running(dev))
8266 bnx2x_set_power_state(bp, PCI_D0);
8267
8268 rtnl_unlock();
8269
8270 return PCI_ERS_RESULT_RECOVERED;
8271}
8272
8273/**
8274 * bnx2x_io_resume - called when traffic can start flowing again
8275 * @pdev: Pointer to PCI device
8276 *
8277 * This callback is called when the error recovery driver tells us that
8278 * its OK to resume normal operation.
8279 */
8280static void bnx2x_io_resume(struct pci_dev *pdev)
8281{
8282 struct net_device *dev = pci_get_drvdata(pdev);
8283 struct bnx2x *bp = netdev_priv(dev);
8284
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008285 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
8286 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
8287 return;
8288 }
8289
Wendy Xiong493adb12008-06-23 20:36:22 -07008290 rtnl_lock();
8291
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008292 bnx2x_eeh_recover(bp);
8293
Wendy Xiong493adb12008-06-23 20:36:22 -07008294 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07008295 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -07008296
8297 netif_device_attach(dev);
8298
8299 rtnl_unlock();
8300}
8301
8302static struct pci_error_handlers bnx2x_err_handler = {
8303 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +00008304 .slot_reset = bnx2x_io_slot_reset,
8305 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -07008306};
8307
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008308static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -07008309 .name = DRV_MODULE_NAME,
8310 .id_table = bnx2x_pci_tbl,
8311 .probe = bnx2x_init_one,
8312 .remove = __devexit_p(bnx2x_remove_one),
8313 .suspend = bnx2x_suspend,
8314 .resume = bnx2x_resume,
8315 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008316};
8317
8318static int __init bnx2x_init(void)
8319{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00008320 int ret;
8321
Joe Perches7995c642010-02-17 15:01:52 +00008322 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +00008323
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008324 bnx2x_wq = create_singlethread_workqueue("bnx2x");
8325 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +00008326 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008327 return -ENOMEM;
8328 }
8329
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00008330 ret = pci_register_driver(&bnx2x_pci_driver);
8331 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +00008332 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00008333 destroy_workqueue(bnx2x_wq);
8334 }
8335 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008336}
8337
8338static void __exit bnx2x_cleanup(void)
8339{
8340 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008341
8342 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008343}
8344
8345module_init(bnx2x_init);
8346module_exit(bnx2x_cleanup);
8347
Michael Chan993ac7b2009-10-10 13:46:56 +00008348#ifdef BCM_CNIC
8349
8350/* count denotes the number of new completions we have seen */
8351static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
8352{
8353 struct eth_spe *spe;
8354
8355#ifdef BNX2X_STOP_ON_ERROR
8356 if (unlikely(bp->panic))
8357 return;
8358#endif
8359
8360 spin_lock_bh(&bp->spq_lock);
8361 bp->cnic_spq_pending -= count;
8362
8363 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
8364 bp->cnic_spq_pending++) {
8365
8366 if (!bp->cnic_kwq_pending)
8367 break;
8368
8369 spe = bnx2x_sp_get_next(bp);
8370 *spe = *bp->cnic_kwq_cons;
8371
8372 bp->cnic_kwq_pending--;
8373
8374 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
8375 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
8376
8377 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
8378 bp->cnic_kwq_cons = bp->cnic_kwq;
8379 else
8380 bp->cnic_kwq_cons++;
8381 }
8382 bnx2x_sp_prod_update(bp);
8383 spin_unlock_bh(&bp->spq_lock);
8384}
8385
8386static int bnx2x_cnic_sp_queue(struct net_device *dev,
8387 struct kwqe_16 *kwqes[], u32 count)
8388{
8389 struct bnx2x *bp = netdev_priv(dev);
8390 int i;
8391
8392#ifdef BNX2X_STOP_ON_ERROR
8393 if (unlikely(bp->panic))
8394 return -EIO;
8395#endif
8396
8397 spin_lock_bh(&bp->spq_lock);
8398
8399 for (i = 0; i < count; i++) {
8400 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
8401
8402 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
8403 break;
8404
8405 *bp->cnic_kwq_prod = *spe;
8406
8407 bp->cnic_kwq_pending++;
8408
8409 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
8410 spe->hdr.conn_and_cmd_data, spe->hdr.type,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008411 spe->data.update_data_addr.hi,
8412 spe->data.update_data_addr.lo,
Michael Chan993ac7b2009-10-10 13:46:56 +00008413 bp->cnic_kwq_pending);
8414
8415 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
8416 bp->cnic_kwq_prod = bp->cnic_kwq;
8417 else
8418 bp->cnic_kwq_prod++;
8419 }
8420
8421 spin_unlock_bh(&bp->spq_lock);
8422
8423 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
8424 bnx2x_cnic_sp_post(bp, 0);
8425
8426 return i;
8427}
8428
8429static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
8430{
8431 struct cnic_ops *c_ops;
8432 int rc = 0;
8433
8434 mutex_lock(&bp->cnic_mutex);
8435 c_ops = bp->cnic_ops;
8436 if (c_ops)
8437 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
8438 mutex_unlock(&bp->cnic_mutex);
8439
8440 return rc;
8441}
8442
8443static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
8444{
8445 struct cnic_ops *c_ops;
8446 int rc = 0;
8447
8448 rcu_read_lock();
8449 c_ops = rcu_dereference(bp->cnic_ops);
8450 if (c_ops)
8451 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
8452 rcu_read_unlock();
8453
8454 return rc;
8455}
8456
8457/*
8458 * for commands that have no data
8459 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008460int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
Michael Chan993ac7b2009-10-10 13:46:56 +00008461{
8462 struct cnic_ctl_info ctl = {0};
8463
8464 ctl.cmd = cmd;
8465
8466 return bnx2x_cnic_ctl_send(bp, &ctl);
8467}
8468
8469static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
8470{
8471 struct cnic_ctl_info ctl;
8472
8473 /* first we tell CNIC and only then we count this as a completion */
8474 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
8475 ctl.data.comp.cid = cid;
8476
8477 bnx2x_cnic_ctl_send_bh(bp, &ctl);
8478 bnx2x_cnic_sp_post(bp, 1);
8479}
8480
8481static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
8482{
8483 struct bnx2x *bp = netdev_priv(dev);
8484 int rc = 0;
8485
8486 switch (ctl->cmd) {
8487 case DRV_CTL_CTXTBL_WR_CMD: {
8488 u32 index = ctl->data.io.offset;
8489 dma_addr_t addr = ctl->data.io.dma_addr;
8490
8491 bnx2x_ilt_wr(bp, index, addr);
8492 break;
8493 }
8494
8495 case DRV_CTL_COMPLETION_CMD: {
8496 int count = ctl->data.comp.comp_count;
8497
8498 bnx2x_cnic_sp_post(bp, count);
8499 break;
8500 }
8501
8502 /* rtnl_lock is held. */
8503 case DRV_CTL_START_L2_CMD: {
8504 u32 cli = ctl->data.ring.client_id;
8505
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008506 /* Set iSCSI MAC address */
8507 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8508
8509 mmiowb();
8510 barrier();
8511
8512 /* Start accepting on iSCSI L2 ring. Accept all multicasts
8513 * because it's the only way for UIO Client to accept
8514 * multicasts (in non-promiscuous mode only one Client per
8515 * function will receive multicast packets (leading in our
8516 * case).
8517 */
8518 bnx2x_rxq_set_mac_filters(bp, cli,
8519 BNX2X_ACCEPT_UNICAST |
8520 BNX2X_ACCEPT_BROADCAST |
8521 BNX2X_ACCEPT_ALL_MULTICAST);
8522 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
8523
Michael Chan993ac7b2009-10-10 13:46:56 +00008524 break;
8525 }
8526
8527 /* rtnl_lock is held. */
8528 case DRV_CTL_STOP_L2_CMD: {
8529 u32 cli = ctl->data.ring.client_id;
8530
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008531 /* Stop accepting on iSCSI L2 ring */
8532 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
8533 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
8534
8535 mmiowb();
8536 barrier();
8537
8538 /* Unset iSCSI L2 MAC */
8539 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00008540 break;
8541 }
8542
8543 default:
8544 BNX2X_ERR("unknown command %x\n", ctl->cmd);
8545 rc = -EINVAL;
8546 }
8547
8548 return rc;
8549}
8550
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008551void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
Michael Chan993ac7b2009-10-10 13:46:56 +00008552{
8553 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8554
8555 if (bp->flags & USING_MSIX_FLAG) {
8556 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
8557 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
8558 cp->irq_arr[0].vector = bp->msix_table[1].vector;
8559 } else {
8560 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
8561 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
8562 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008563 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
Michael Chan993ac7b2009-10-10 13:46:56 +00008564 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008565 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00008566 cp->irq_arr[1].status_blk = bp->def_status_blk;
8567 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008568 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
Michael Chan993ac7b2009-10-10 13:46:56 +00008569
8570 cp->num_irq = 2;
8571}
8572
8573static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
8574 void *data)
8575{
8576 struct bnx2x *bp = netdev_priv(dev);
8577 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8578
8579 if (ops == NULL)
8580 return -EINVAL;
8581
8582 if (atomic_read(&bp->intr_sem) != 0)
8583 return -EBUSY;
8584
8585 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
8586 if (!bp->cnic_kwq)
8587 return -ENOMEM;
8588
8589 bp->cnic_kwq_cons = bp->cnic_kwq;
8590 bp->cnic_kwq_prod = bp->cnic_kwq;
8591 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
8592
8593 bp->cnic_spq_pending = 0;
8594 bp->cnic_kwq_pending = 0;
8595
8596 bp->cnic_data = data;
8597
8598 cp->num_irq = 0;
8599 cp->drv_state = CNIC_DRV_STATE_REGD;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008600 cp->iro_arr = bp->iro_arr;
Michael Chan993ac7b2009-10-10 13:46:56 +00008601
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008602 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
8603 BNX2X_VF_ID_INVALID, false,
8604 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
Michael Chan993ac7b2009-10-10 13:46:56 +00008605
8606 bnx2x_setup_cnic_irq_info(bp);
8607 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
8608 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
8609 rcu_assign_pointer(bp->cnic_ops, ops);
8610
8611 return 0;
8612}
8613
8614static int bnx2x_unregister_cnic(struct net_device *dev)
8615{
8616 struct bnx2x *bp = netdev_priv(dev);
8617 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8618
8619 mutex_lock(&bp->cnic_mutex);
8620 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
8621 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
8622 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
8623 }
8624 cp->drv_state = 0;
8625 rcu_assign_pointer(bp->cnic_ops, NULL);
8626 mutex_unlock(&bp->cnic_mutex);
8627 synchronize_rcu();
8628 kfree(bp->cnic_kwq);
8629 bp->cnic_kwq = NULL;
8630
8631 return 0;
8632}
8633
8634struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
8635{
8636 struct bnx2x *bp = netdev_priv(dev);
8637 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
8638
8639 cp->drv_owner = THIS_MODULE;
8640 cp->chip_id = CHIP_ID(bp);
8641 cp->pdev = bp->pdev;
8642 cp->io_base = bp->regview;
8643 cp->io_base2 = bp->doorbells;
8644 cp->max_kwqe_pending = 8;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008645 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
Michael Chan993ac7b2009-10-10 13:46:56 +00008646 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
8647 cp->ctx_tbl_len = CNIC_ILT_LINES;
8648 cp->starting_cid = BCM_CNIC_CID_START;
8649 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
8650 cp->drv_ctl = bnx2x_drv_ctl;
8651 cp->drv_register_cnic = bnx2x_register_cnic;
8652 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
8653
8654 return cp;
8655}
8656EXPORT_SYMBOL(bnx2x_cnic_probe);
8657
8658#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008659