blob: 563b2cb8e544831801557bbb2a7691f94cd44ec6 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020026#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080040#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020041#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070044#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020045#include <linux/workqueue.h>
46#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070047#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020048#include <linux/prefetch.h>
49#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020050#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000051#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020052
Dmitry Kravkovb0efbb92010-07-27 12:33:43 +000053#define BNX2X_MAIN
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070056#include "bnx2x_init_ops.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000057#include "bnx2x_cmn.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020058
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070059#include <linux/firmware.h>
60#include "bnx2x_fw_file_hdr.h"
61/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000062#define FW_FILE_VERSION \
63 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
Dmitry Kravkov560131f2010-10-06 03:18:47 +000067#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000069#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070070
Eilon Greenstein34f80b02008-06-23 20:33:01 -070071/* Time in jiffies before concluding the transmitter is hung */
72#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020073
Andrew Morton53a10562008-02-09 23:16:41 -080074static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070075 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020076 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070078MODULE_AUTHOR("Eliezer Tamir");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000079MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020081MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000083MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000085MODULE_FIRMWARE(FW_FILE_NAME_E2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020086
Eilon Greenstein555f6c72009-02-12 08:36:11 +000087static int multi_mode = 1;
88module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070089MODULE_PARM_DESC(multi_mode, " Multi queue mode "
90 "(0 Disable; 1 Enable (default))");
91
Dmitry Kravkovd6214d72010-10-06 03:32:10 +000092int num_queues;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000093module_param(num_queues, int, 0);
94MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
95 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000096
Eilon Greenstein19680c42008-08-13 15:47:33 -070097static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070098module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000099MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000100
101static int int_mode;
102module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000103MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
104 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000105
Eilon Greensteina18f5122009-08-12 08:23:26 +0000106static int dropless_fc;
107module_param(dropless_fc, int, 0);
108MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109
Eilon Greenstein9898f862009-02-12 08:38:27 +0000110static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200111module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000112MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000113
114static int mrrs = -1;
115module_param(mrrs, int, 0);
116MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
Eilon Greenstein9898f862009-02-12 08:38:27 +0000118static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200119module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000120MODULE_PARM_DESC(debug, " Default debug msglevel");
121
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800122static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200123
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000124#ifdef BCM_CNIC
125static u8 ALL_ENODE_MACS[] = {0x01, 0x10, 0x18, 0x01, 0x00, 0x01};
126#endif
127
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200128enum bnx2x_board_type {
129 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700130 BCM57711 = 1,
131 BCM57711E = 2,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000132 BCM57712 = 3,
133 BCM57712E = 4
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200134};
135
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700136/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800137static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200138 char *name;
139} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700140 { "Broadcom NetXtreme II BCM57710 XGb" },
141 { "Broadcom NetXtreme II BCM57711 XGb" },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000142 { "Broadcom NetXtreme II BCM57711E XGb" },
143 { "Broadcom NetXtreme II BCM57712 XGb" },
144 { "Broadcom NetXtreme II BCM57712E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200145};
146
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000147#ifndef PCI_DEVICE_ID_NX2_57712
148#define PCI_DEVICE_ID_NX2_57712 0x1662
149#endif
150#ifndef PCI_DEVICE_ID_NX2_57712E
151#define PCI_DEVICE_ID_NX2_57712E 0x1663
152#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700153
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000154static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
156 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
157 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000158 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
159 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200160 { 0 }
161};
162
163MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
164
165/****************************************************************************
166* General service functions
167****************************************************************************/
168
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000169static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
170 u32 addr, dma_addr_t mapping)
171{
172 REG_WR(bp, addr, U64_LO(mapping));
173 REG_WR(bp, addr + 4, U64_HI(mapping));
174}
175
176static inline void __storm_memset_fill(struct bnx2x *bp,
177 u32 addr, size_t size, u32 val)
178{
179 int i;
180 for (i = 0; i < size/4; i++)
181 REG_WR(bp, addr + (i * 4), val);
182}
183
184static inline void storm_memset_ustats_zero(struct bnx2x *bp,
185 u8 port, u16 stat_id)
186{
187 size_t size = sizeof(struct ustorm_per_client_stats);
188
189 u32 addr = BAR_USTRORM_INTMEM +
190 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
191
192 __storm_memset_fill(bp, addr, size, 0);
193}
194
195static inline void storm_memset_tstats_zero(struct bnx2x *bp,
196 u8 port, u16 stat_id)
197{
198 size_t size = sizeof(struct tstorm_per_client_stats);
199
200 u32 addr = BAR_TSTRORM_INTMEM +
201 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
202
203 __storm_memset_fill(bp, addr, size, 0);
204}
205
206static inline void storm_memset_xstats_zero(struct bnx2x *bp,
207 u8 port, u16 stat_id)
208{
209 size_t size = sizeof(struct xstorm_per_client_stats);
210
211 u32 addr = BAR_XSTRORM_INTMEM +
212 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
213
214 __storm_memset_fill(bp, addr, size, 0);
215}
216
217
218static inline void storm_memset_spq_addr(struct bnx2x *bp,
219 dma_addr_t mapping, u16 abs_fid)
220{
221 u32 addr = XSEM_REG_FAST_MEMORY +
222 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
223
224 __storm_memset_dma_mapping(bp, addr, mapping);
225}
226
227static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
228{
229 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
230}
231
232static inline void storm_memset_func_cfg(struct bnx2x *bp,
233 struct tstorm_eth_function_common_config *tcfg,
234 u16 abs_fid)
235{
236 size_t size = sizeof(struct tstorm_eth_function_common_config);
237
238 u32 addr = BAR_TSTRORM_INTMEM +
239 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
240
241 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
242}
243
244static inline void storm_memset_xstats_flags(struct bnx2x *bp,
245 struct stats_indication_flags *flags,
246 u16 abs_fid)
247{
248 size_t size = sizeof(struct stats_indication_flags);
249
250 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
251
252 __storm_memset_struct(bp, addr, size, (u32 *)flags);
253}
254
255static inline void storm_memset_tstats_flags(struct bnx2x *bp,
256 struct stats_indication_flags *flags,
257 u16 abs_fid)
258{
259 size_t size = sizeof(struct stats_indication_flags);
260
261 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
262
263 __storm_memset_struct(bp, addr, size, (u32 *)flags);
264}
265
266static inline void storm_memset_ustats_flags(struct bnx2x *bp,
267 struct stats_indication_flags *flags,
268 u16 abs_fid)
269{
270 size_t size = sizeof(struct stats_indication_flags);
271
272 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
273
274 __storm_memset_struct(bp, addr, size, (u32 *)flags);
275}
276
277static inline void storm_memset_cstats_flags(struct bnx2x *bp,
278 struct stats_indication_flags *flags,
279 u16 abs_fid)
280{
281 size_t size = sizeof(struct stats_indication_flags);
282
283 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
284
285 __storm_memset_struct(bp, addr, size, (u32 *)flags);
286}
287
288static inline void storm_memset_xstats_addr(struct bnx2x *bp,
289 dma_addr_t mapping, u16 abs_fid)
290{
291 u32 addr = BAR_XSTRORM_INTMEM +
292 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
293
294 __storm_memset_dma_mapping(bp, addr, mapping);
295}
296
297static inline void storm_memset_tstats_addr(struct bnx2x *bp,
298 dma_addr_t mapping, u16 abs_fid)
299{
300 u32 addr = BAR_TSTRORM_INTMEM +
301 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
302
303 __storm_memset_dma_mapping(bp, addr, mapping);
304}
305
306static inline void storm_memset_ustats_addr(struct bnx2x *bp,
307 dma_addr_t mapping, u16 abs_fid)
308{
309 u32 addr = BAR_USTRORM_INTMEM +
310 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
311
312 __storm_memset_dma_mapping(bp, addr, mapping);
313}
314
315static inline void storm_memset_cstats_addr(struct bnx2x *bp,
316 dma_addr_t mapping, u16 abs_fid)
317{
318 u32 addr = BAR_CSTRORM_INTMEM +
319 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
320
321 __storm_memset_dma_mapping(bp, addr, mapping);
322}
323
324static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
325 u16 pf_id)
326{
327 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
330 pf_id);
331 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
332 pf_id);
333 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
334 pf_id);
335}
336
337static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
338 u8 enable)
339{
340 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
343 enable);
344 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
345 enable);
346 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
347 enable);
348}
349
350static inline void storm_memset_eq_data(struct bnx2x *bp,
351 struct event_ring_data *eq_data,
352 u16 pfid)
353{
354 size_t size = sizeof(struct event_ring_data);
355
356 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
357
358 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
359}
360
361static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
362 u16 pfid)
363{
364 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
365 REG_WR16(bp, addr, eq_prod);
366}
367
368static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
369 u16 fw_sb_id, u8 sb_index,
370 u8 ticks)
371{
372
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000373 int index_offset = CHIP_IS_E2(bp) ?
374 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000375 offsetof(struct hc_status_block_data_e1x, index_data);
376 u32 addr = BAR_CSTRORM_INTMEM +
377 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
378 index_offset +
379 sizeof(struct hc_index_data)*sb_index +
380 offsetof(struct hc_index_data, timeout);
381 REG_WR8(bp, addr, ticks);
382 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
383 port, fw_sb_id, sb_index, ticks);
384}
385static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
386 u16 fw_sb_id, u8 sb_index,
387 u8 disable)
388{
389 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000390 int index_offset = CHIP_IS_E2(bp) ?
391 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000392 offsetof(struct hc_status_block_data_e1x, index_data);
393 u32 addr = BAR_CSTRORM_INTMEM +
394 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
395 index_offset +
396 sizeof(struct hc_index_data)*sb_index +
397 offsetof(struct hc_index_data, flags);
398 u16 flags = REG_RD16(bp, addr);
399 /* clear and set */
400 flags &= ~HC_INDEX_DATA_HC_ENABLED;
401 flags |= enable_flag;
402 REG_WR16(bp, addr, flags);
403 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
404 port, fw_sb_id, sb_index, disable);
405}
406
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200407/* used only at init
408 * locking is done by mcp
409 */
stephen hemminger8d962862010-10-21 07:50:56 +0000410static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200411{
412 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
413 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
414 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
415 PCICFG_VENDOR_ID_OFFSET);
416}
417
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200418static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
419{
420 u32 val;
421
422 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
423 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
424 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
425 PCICFG_VENDOR_ID_OFFSET);
426
427 return val;
428}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200429
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000430#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
431#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
432#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
433#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
434#define DMAE_DP_DST_NONE "dst_addr [none]"
435
stephen hemminger8d962862010-10-21 07:50:56 +0000436static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
437 int msglvl)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000438{
439 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
440
441 switch (dmae->opcode & DMAE_COMMAND_DST) {
442 case DMAE_CMD_DST_PCI:
443 if (src_type == DMAE_CMD_SRC_PCI)
444 DP(msglvl, "DMAE: opcode 0x%08x\n"
445 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
446 "comp_addr [%x:%08x], comp_val 0x%08x\n",
447 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
448 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
449 dmae->comp_addr_hi, dmae->comp_addr_lo,
450 dmae->comp_val);
451 else
452 DP(msglvl, "DMAE: opcode 0x%08x\n"
453 "src [%08x], len [%d*4], dst [%x:%08x]\n"
454 "comp_addr [%x:%08x], comp_val 0x%08x\n",
455 dmae->opcode, dmae->src_addr_lo >> 2,
456 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
457 dmae->comp_addr_hi, dmae->comp_addr_lo,
458 dmae->comp_val);
459 break;
460 case DMAE_CMD_DST_GRC:
461 if (src_type == DMAE_CMD_SRC_PCI)
462 DP(msglvl, "DMAE: opcode 0x%08x\n"
463 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
464 "comp_addr [%x:%08x], comp_val 0x%08x\n",
465 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
466 dmae->len, dmae->dst_addr_lo >> 2,
467 dmae->comp_addr_hi, dmae->comp_addr_lo,
468 dmae->comp_val);
469 else
470 DP(msglvl, "DMAE: opcode 0x%08x\n"
471 "src [%08x], len [%d*4], dst [%08x]\n"
472 "comp_addr [%x:%08x], comp_val 0x%08x\n",
473 dmae->opcode, dmae->src_addr_lo >> 2,
474 dmae->len, dmae->dst_addr_lo >> 2,
475 dmae->comp_addr_hi, dmae->comp_addr_lo,
476 dmae->comp_val);
477 break;
478 default:
479 if (src_type == DMAE_CMD_SRC_PCI)
480 DP(msglvl, "DMAE: opcode 0x%08x\n"
481 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
482 "dst_addr [none]\n"
483 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
484 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
485 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
486 dmae->comp_val);
487 else
488 DP(msglvl, "DMAE: opcode 0x%08x\n"
489 DP_LEVEL "src_addr [%08x] len [%d * 4] "
490 "dst_addr [none]\n"
491 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
492 dmae->opcode, dmae->src_addr_lo >> 2,
493 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
494 dmae->comp_val);
495 break;
496 }
497
498}
499
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000500const u32 dmae_reg_go_c[] = {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200501 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
502 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
503 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
504 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
505};
506
507/* copy command into DMAE command memory and set DMAE command go */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000508void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200509{
510 u32 cmd_offset;
511 int i;
512
513 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
514 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
515 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
516
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700517 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
518 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200519 }
520 REG_WR(bp, dmae_reg_go_c[idx], 1);
521}
522
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000523u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
524{
525 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
526 DMAE_CMD_C_ENABLE);
527}
528
529u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
530{
531 return opcode & ~DMAE_CMD_SRC_RESET;
532}
533
534u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
535 bool with_comp, u8 comp_type)
536{
537 u32 opcode = 0;
538
539 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
540 (dst_type << DMAE_COMMAND_DST_SHIFT));
541
542 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
543
544 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
545 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
546 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
547 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
548
549#ifdef __BIG_ENDIAN
550 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
551#else
552 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
553#endif
554 if (with_comp)
555 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
556 return opcode;
557}
558
stephen hemminger8d962862010-10-21 07:50:56 +0000559static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
560 struct dmae_command *dmae,
561 u8 src_type, u8 dst_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000562{
563 memset(dmae, 0, sizeof(struct dmae_command));
564
565 /* set the opcode */
566 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
567 true, DMAE_COMP_PCI);
568
569 /* fill in the completion parameters */
570 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
571 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
572 dmae->comp_val = DMAE_COMP_VAL;
573}
574
575/* issue a dmae command over the init-channel and wailt for completion */
stephen hemminger8d962862010-10-21 07:50:56 +0000576static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
577 struct dmae_command *dmae)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000578{
579 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
580 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
581 int rc = 0;
582
583 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
584 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
585 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
586
587 /* lock the dmae channel */
588 mutex_lock(&bp->dmae_mutex);
589
590 /* reset completion */
591 *wb_comp = 0;
592
593 /* post the command on the channel used for initializations */
594 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
595
596 /* wait for completion */
597 udelay(5);
598 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
599 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
600
601 if (!cnt) {
602 BNX2X_ERR("DMAE timeout!\n");
603 rc = DMAE_TIMEOUT;
604 goto unlock;
605 }
606 cnt--;
607 udelay(50);
608 }
609 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
610 BNX2X_ERR("DMAE PCI error!\n");
611 rc = DMAE_PCI_ERROR;
612 }
613
614 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
615 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
616 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
617
618unlock:
619 mutex_unlock(&bp->dmae_mutex);
620 return rc;
621}
622
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700623void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
624 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200625{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000626 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700627
628 if (!bp->dmae_ready) {
629 u32 *data = bnx2x_sp(bp, wb_data[0]);
630
631 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
632 " using indirect\n", dst_addr, len32);
633 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
634 return;
635 }
636
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000637 /* set opcode and fixed command fields */
638 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200639
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000640 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000641 dmae.src_addr_lo = U64_LO(dma_addr);
642 dmae.src_addr_hi = U64_HI(dma_addr);
643 dmae.dst_addr_lo = dst_addr >> 2;
644 dmae.dst_addr_hi = 0;
645 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200646
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000647 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200648
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000649 /* issue the command and wait for completion */
650 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200651}
652
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700653void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200654{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000655 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700656
657 if (!bp->dmae_ready) {
658 u32 *data = bnx2x_sp(bp, wb_data[0]);
659 int i;
660
661 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
662 " using indirect\n", src_addr, len32);
663 for (i = 0; i < len32; i++)
664 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
665 return;
666 }
667
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000668 /* set opcode and fixed command fields */
669 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200670
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000671 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000672 dmae.src_addr_lo = src_addr >> 2;
673 dmae.src_addr_hi = 0;
674 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
675 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
676 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200677
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000678 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200679
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000680 /* issue the command and wait for completion */
681 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200682}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200683
stephen hemminger8d962862010-10-21 07:50:56 +0000684static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
685 u32 addr, u32 len)
Eilon Greenstein573f2032009-08-12 08:24:14 +0000686{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000687 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000688 int offset = 0;
689
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000690 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000691 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000692 addr + offset, dmae_wr_max);
693 offset += dmae_wr_max * 4;
694 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000695 }
696
697 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
698}
699
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700700/* used only for slowpath so not inlined */
701static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
702{
703 u32 wb_write[2];
704
705 wb_write[0] = val_hi;
706 wb_write[1] = val_lo;
707 REG_WR_DMAE(bp, reg, wb_write, 2);
708}
709
710#ifdef USE_WB_RD
711static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
712{
713 u32 wb_data[2];
714
715 REG_RD_DMAE(bp, reg, wb_data, 2);
716
717 return HILO_U64(wb_data[0], wb_data[1]);
718}
719#endif
720
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200721static int bnx2x_mc_assert(struct bnx2x *bp)
722{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200723 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700724 int i, rc = 0;
725 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200726
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700727 /* XSTORM */
728 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
729 XSTORM_ASSERT_LIST_INDEX_OFFSET);
730 if (last_idx)
731 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200732
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700733 /* print the asserts */
734 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200735
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700736 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
737 XSTORM_ASSERT_LIST_OFFSET(i));
738 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
739 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
740 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
741 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
742 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
743 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200744
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700745 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
746 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
747 " 0x%08x 0x%08x 0x%08x\n",
748 i, row3, row2, row1, row0);
749 rc++;
750 } else {
751 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200752 }
753 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700754
755 /* TSTORM */
756 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
757 TSTORM_ASSERT_LIST_INDEX_OFFSET);
758 if (last_idx)
759 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
760
761 /* print the asserts */
762 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
763
764 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
765 TSTORM_ASSERT_LIST_OFFSET(i));
766 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
767 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
768 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
769 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
770 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
771 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
772
773 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
774 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
775 " 0x%08x 0x%08x 0x%08x\n",
776 i, row3, row2, row1, row0);
777 rc++;
778 } else {
779 break;
780 }
781 }
782
783 /* CSTORM */
784 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
785 CSTORM_ASSERT_LIST_INDEX_OFFSET);
786 if (last_idx)
787 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
788
789 /* print the asserts */
790 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
791
792 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
793 CSTORM_ASSERT_LIST_OFFSET(i));
794 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
795 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
796 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
797 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
798 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
799 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
800
801 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
802 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
803 " 0x%08x 0x%08x 0x%08x\n",
804 i, row3, row2, row1, row0);
805 rc++;
806 } else {
807 break;
808 }
809 }
810
811 /* USTORM */
812 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
813 USTORM_ASSERT_LIST_INDEX_OFFSET);
814 if (last_idx)
815 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
816
817 /* print the asserts */
818 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
819
820 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
821 USTORM_ASSERT_LIST_OFFSET(i));
822 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
823 USTORM_ASSERT_LIST_OFFSET(i) + 4);
824 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
825 USTORM_ASSERT_LIST_OFFSET(i) + 8);
826 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
827 USTORM_ASSERT_LIST_OFFSET(i) + 12);
828
829 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
830 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
831 " 0x%08x 0x%08x 0x%08x\n",
832 i, row3, row2, row1, row0);
833 rc++;
834 } else {
835 break;
836 }
837 }
838
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200839 return rc;
840}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800841
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200842static void bnx2x_fw_dump(struct bnx2x *bp)
843{
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000844 u32 addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200845 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000846 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200847 int word;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000848 u32 trace_shmem_base;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000849 if (BP_NOMCP(bp)) {
850 BNX2X_ERR("NO MCP - can not dump\n");
851 return;
852 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000853
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000854 if (BP_PATH(bp) == 0)
855 trace_shmem_base = bp->common.shmem_base;
856 else
857 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
858 addr = trace_shmem_base - 0x0800 + 4;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000859 mark = REG_RD(bp, addr);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000860 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
861 + ((mark + 0x3) & ~0x3) - 0x08000000;
Joe Perches7995c642010-02-17 15:01:52 +0000862 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200863
Joe Perches7995c642010-02-17 15:01:52 +0000864 pr_err("");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000865 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200866 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000867 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200868 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000869 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200870 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000871 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200872 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000873 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200874 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000875 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200876 }
Joe Perches7995c642010-02-17 15:01:52 +0000877 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200878}
879
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000880void bnx2x_panic_dump(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200881{
882 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000883 u16 j;
884 struct hc_sp_status_block_data sp_sb_data;
885 int func = BP_FUNC(bp);
886#ifdef BNX2X_STOP_ON_ERROR
887 u16 start = 0, end = 0;
888#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200889
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700890 bp->stats_state = STATS_STATE_DISABLED;
891 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
892
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200893 BNX2X_ERR("begin crash dump -----------------\n");
894
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000895 /* Indices */
896 /* Common */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000897 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000898 " spq_prod_idx(0x%x)\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000899 bp->def_idx, bp->def_att_idx,
900 bp->attn_state, bp->spq_prod_idx);
901 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
902 bp->def_status_blk->atten_status_block.attn_bits,
903 bp->def_status_blk->atten_status_block.attn_bits_ack,
904 bp->def_status_blk->atten_status_block.status_block_id,
905 bp->def_status_blk->atten_status_block.attn_bits_index);
906 BNX2X_ERR(" def (");
907 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
908 pr_cont("0x%x%s",
909 bp->def_status_blk->sp_sb.index_values[i],
910 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000911
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000912 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
913 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
914 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
915 i*sizeof(u32));
916
917 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
918 "pf_id(0x%x) vnic_id(0x%x) "
919 "vf_id(0x%x) vf_valid (0x%x)\n",
920 sp_sb_data.igu_sb_id,
921 sp_sb_data.igu_seg_id,
922 sp_sb_data.p_func.pf_id,
923 sp_sb_data.p_func.vnic_id,
924 sp_sb_data.p_func.vf_id,
925 sp_sb_data.p_func.vf_valid);
926
927
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000928 for_each_eth_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000929 struct bnx2x_fastpath *fp = &bp->fp[i];
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000930 int loop;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000931 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000932 struct hc_status_block_data_e1x sb_data_e1x;
933 struct hc_status_block_sm *hc_sm_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000934 CHIP_IS_E2(bp) ?
935 sb_data_e2.common.state_machine :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000936 sb_data_e1x.common.state_machine;
937 struct hc_index_data *hc_index_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000938 CHIP_IS_E2(bp) ?
939 sb_data_e2.index_data :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000940 sb_data_e1x.index_data;
941 int data_size;
942 u32 *sb_data_p;
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000943
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000944 /* Rx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000945 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000946 " rx_comp_prod(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000947 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000948 i, fp->rx_bd_prod, fp->rx_bd_cons,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000949 fp->rx_comp_prod,
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000950 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000951 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000952 " fp_hc_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000953 fp->rx_sge_prod, fp->last_max_sge,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000954 le16_to_cpu(fp->fp_hc_idx));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000955
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000956 /* Tx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000957 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
958 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
959 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200960 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700961 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000962
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000963 loop = CHIP_IS_E2(bp) ?
964 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000965
966 /* host sb data */
967
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +0000968#ifdef BCM_CNIC
969 if (IS_FCOE_FP(fp))
970 continue;
971#endif
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000972 BNX2X_ERR(" run indexes (");
973 for (j = 0; j < HC_SB_MAX_SM; j++)
974 pr_cont("0x%x%s",
975 fp->sb_running_index[j],
976 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
977
978 BNX2X_ERR(" indexes (");
979 for (j = 0; j < loop; j++)
980 pr_cont("0x%x%s",
981 fp->sb_index_values[j],
982 (j == loop - 1) ? ")" : " ");
983 /* fw sb data */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000984 data_size = CHIP_IS_E2(bp) ?
985 sizeof(struct hc_status_block_data_e2) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000986 sizeof(struct hc_status_block_data_e1x);
987 data_size /= sizeof(u32);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000988 sb_data_p = CHIP_IS_E2(bp) ?
989 (u32 *)&sb_data_e2 :
990 (u32 *)&sb_data_e1x;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000991 /* copy sb data in here */
992 for (j = 0; j < data_size; j++)
993 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
994 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
995 j * sizeof(u32));
996
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000997 if (CHIP_IS_E2(bp)) {
998 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
999 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1000 sb_data_e2.common.p_func.pf_id,
1001 sb_data_e2.common.p_func.vf_id,
1002 sb_data_e2.common.p_func.vf_valid,
1003 sb_data_e2.common.p_func.vnic_id,
1004 sb_data_e2.common.same_igu_sb_1b);
1005 } else {
1006 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
1007 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1008 sb_data_e1x.common.p_func.pf_id,
1009 sb_data_e1x.common.p_func.vf_id,
1010 sb_data_e1x.common.p_func.vf_valid,
1011 sb_data_e1x.common.p_func.vnic_id,
1012 sb_data_e1x.common.same_igu_sb_1b);
1013 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001014
1015 /* SB_SMs data */
1016 for (j = 0; j < HC_SB_MAX_SM; j++) {
1017 pr_cont("SM[%d] __flags (0x%x) "
1018 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1019 "time_to_expire (0x%x) "
1020 "timer_value(0x%x)\n", j,
1021 hc_sm_p[j].__flags,
1022 hc_sm_p[j].igu_sb_id,
1023 hc_sm_p[j].igu_seg_id,
1024 hc_sm_p[j].time_to_expire,
1025 hc_sm_p[j].timer_value);
1026 }
1027
1028 /* Indecies data */
1029 for (j = 0; j < loop; j++) {
1030 pr_cont("INDEX[%d] flags (0x%x) "
1031 "timeout (0x%x)\n", j,
1032 hc_index_p[j].flags,
1033 hc_index_p[j].timeout);
1034 }
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001035 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001036
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001037#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001038 /* Rings */
1039 /* Rx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001040 for_each_rx_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001041 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001042
1043 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1044 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001045 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001046 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1047 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1048
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001049 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1050 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001051 }
1052
Eilon Greenstein3196a882008-08-13 15:58:49 -07001053 start = RX_SGE(fp->rx_sge_prod);
1054 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001055 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001056 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1057 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1058
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001059 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1060 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001061 }
1062
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001063 start = RCQ_BD(fp->rx_comp_cons - 10);
1064 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001065 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001066 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1067
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001068 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1069 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001070 }
1071 }
1072
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001073 /* Tx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001074 for_each_tx_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001075 struct bnx2x_fastpath *fp = &bp->fp[i];
1076
1077 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1078 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1079 for (j = start; j != end; j = TX_BD(j + 1)) {
1080 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1081
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001082 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1083 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001084 }
1085
1086 start = TX_BD(fp->tx_bd_cons - 10);
1087 end = TX_BD(fp->tx_bd_cons + 254);
1088 for (j = start; j != end; j = TX_BD(j + 1)) {
1089 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1090
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001091 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1092 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001093 }
1094 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001095#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001096 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001097 bnx2x_mc_assert(bp);
1098 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001099}
1100
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001101static void bnx2x_hc_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001102{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001103 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001104 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1105 u32 val = REG_RD(bp, addr);
1106 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001107 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001108
1109 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001110 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1111 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001112 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1113 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +00001114 } else if (msi) {
1115 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1116 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1117 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1118 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001119 } else {
1120 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001121 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001122 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1123 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001124
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001125 if (!CHIP_IS_E1(bp)) {
1126 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1127 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001128
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001129 REG_WR(bp, addr, val);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001130
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001131 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1132 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001133 }
1134
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001135 if (CHIP_IS_E1(bp))
1136 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1137
Eilon Greenstein8badd272009-02-12 08:36:15 +00001138 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1139 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001140
1141 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001142 /*
1143 * Ensure that HC_CONFIG is written before leading/trailing edge config
1144 */
1145 mmiowb();
1146 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001147
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001148 if (!CHIP_IS_E1(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001149 /* init leading/trailing edge */
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001150 if (IS_MF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001151 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001152 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001153 /* enable nig and gpio3 attention */
1154 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001155 } else
1156 val = 0xffff;
1157
1158 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1159 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1160 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001161
1162 /* Make sure that interrupts are indeed enabled from here on */
1163 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001164}
1165
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001166static void bnx2x_igu_int_enable(struct bnx2x *bp)
1167{
1168 u32 val;
1169 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1170 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1171
1172 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1173
1174 if (msix) {
1175 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1176 IGU_PF_CONF_SINGLE_ISR_EN);
1177 val |= (IGU_PF_CONF_FUNC_EN |
1178 IGU_PF_CONF_MSI_MSIX_EN |
1179 IGU_PF_CONF_ATTN_BIT_EN);
1180 } else if (msi) {
1181 val &= ~IGU_PF_CONF_INT_LINE_EN;
1182 val |= (IGU_PF_CONF_FUNC_EN |
1183 IGU_PF_CONF_MSI_MSIX_EN |
1184 IGU_PF_CONF_ATTN_BIT_EN |
1185 IGU_PF_CONF_SINGLE_ISR_EN);
1186 } else {
1187 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1188 val |= (IGU_PF_CONF_FUNC_EN |
1189 IGU_PF_CONF_INT_LINE_EN |
1190 IGU_PF_CONF_ATTN_BIT_EN |
1191 IGU_PF_CONF_SINGLE_ISR_EN);
1192 }
1193
1194 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1195 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1196
1197 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1198
1199 barrier();
1200
1201 /* init leading/trailing edge */
1202 if (IS_MF(bp)) {
1203 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1204 if (bp->port.pmf)
1205 /* enable nig and gpio3 attention */
1206 val |= 0x1100;
1207 } else
1208 val = 0xffff;
1209
1210 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1211 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1212
1213 /* Make sure that interrupts are indeed enabled from here on */
1214 mmiowb();
1215}
1216
1217void bnx2x_int_enable(struct bnx2x *bp)
1218{
1219 if (bp->common.int_block == INT_BLOCK_HC)
1220 bnx2x_hc_int_enable(bp);
1221 else
1222 bnx2x_igu_int_enable(bp);
1223}
1224
1225static void bnx2x_hc_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001226{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001227 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001228 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1229 u32 val = REG_RD(bp, addr);
1230
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001231 /*
1232 * in E1 we must use only PCI configuration space to disable
1233 * MSI/MSIX capablility
1234 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1235 */
1236 if (CHIP_IS_E1(bp)) {
1237 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1238 * Use mask register to prevent from HC sending interrupts
1239 * after we exit the function
1240 */
1241 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1242
1243 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1244 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1245 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1246 } else
1247 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1248 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1249 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1250 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001251
1252 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1253 val, port, addr);
1254
Eilon Greenstein8badd272009-02-12 08:36:15 +00001255 /* flush all outstanding writes */
1256 mmiowb();
1257
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001258 REG_WR(bp, addr, val);
1259 if (REG_RD(bp, addr) != val)
1260 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1261}
1262
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001263static void bnx2x_igu_int_disable(struct bnx2x *bp)
1264{
1265 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1266
1267 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1268 IGU_PF_CONF_INT_LINE_EN |
1269 IGU_PF_CONF_ATTN_BIT_EN);
1270
1271 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1272
1273 /* flush all outstanding writes */
1274 mmiowb();
1275
1276 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1277 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1278 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1279}
1280
stephen hemminger8d962862010-10-21 07:50:56 +00001281static void bnx2x_int_disable(struct bnx2x *bp)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001282{
1283 if (bp->common.int_block == INT_BLOCK_HC)
1284 bnx2x_hc_int_disable(bp);
1285 else
1286 bnx2x_igu_int_disable(bp);
1287}
1288
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001289void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001290{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001291 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001292 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001293
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001294 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001295 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +00001296 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1297
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07001298 if (disable_hw)
1299 /* prevent the HW from sending interrupts */
1300 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001301
1302 /* make sure all ISRs are done */
1303 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001304 synchronize_irq(bp->msix_table[0].vector);
1305 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +00001306#ifdef BCM_CNIC
1307 offset++;
1308#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001309 for_each_eth_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +00001310 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001311 } else
1312 synchronize_irq(bp->pdev->irq);
1313
1314 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001315 cancel_delayed_work(&bp->sp_task);
1316 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001317}
1318
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001319/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001320
1321/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001322 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001323 */
1324
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001325/* Return true if succeeded to acquire the lock */
1326static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1327{
1328 u32 lock_status;
1329 u32 resource_bit = (1 << resource);
1330 int func = BP_FUNC(bp);
1331 u32 hw_lock_control_reg;
1332
1333 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1334
1335 /* Validating that the resource is within range */
1336 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1337 DP(NETIF_MSG_HW,
1338 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1339 resource, HW_LOCK_MAX_RESOURCE_VALUE);
Eric Dumazet0fdf4d02010-08-26 22:03:53 -07001340 return false;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001341 }
1342
1343 if (func <= 5)
1344 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1345 else
1346 hw_lock_control_reg =
1347 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1348
1349 /* Try to acquire the lock */
1350 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1351 lock_status = REG_RD(bp, hw_lock_control_reg);
1352 if (lock_status & resource_bit)
1353 return true;
1354
1355 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1356 return false;
1357}
1358
Michael Chan993ac7b2009-10-10 13:46:56 +00001359#ifdef BCM_CNIC
1360static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1361#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001362
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001363void bnx2x_sp_event(struct bnx2x_fastpath *fp,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001364 union eth_rx_cqe *rr_cqe)
1365{
1366 struct bnx2x *bp = fp->bp;
1367 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1368 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1369
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001370 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001371 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001372 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001373 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001374
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001375 switch (command | fp->state) {
1376 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1377 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1378 fp->state = BNX2X_FP_STATE_OPEN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001379 break;
1380
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001381 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1382 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001383 fp->state = BNX2X_FP_STATE_HALTED;
1384 break;
1385
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001386 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1387 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1388 fp->state = BNX2X_FP_STATE_TERMINATED;
Eliezer Tamir49d66772008-02-28 11:53:13 -08001389 break;
1390
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001391 default:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001392 BNX2X_ERR("unexpected MC reply (%d) "
1393 "fp[%d] state is %x\n",
1394 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001395 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001396 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001397
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00001398 smp_mb__before_atomic_inc();
1399 atomic_inc(&bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001400 /* push the change in fp->state and towards the memory */
1401 smp_wmb();
1402
1403 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001404}
1405
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001406irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001407{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001408 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001409 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001410 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001411 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001412
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001413 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001414 if (unlikely(status == 0)) {
1415 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1416 return IRQ_NONE;
1417 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001418 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001419
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001420 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001421 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1422 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1423 return IRQ_HANDLED;
1424 }
1425
Eilon Greenstein3196a882008-08-13 15:58:49 -07001426#ifdef BNX2X_STOP_ON_ERROR
1427 if (unlikely(bp->panic))
1428 return IRQ_HANDLED;
1429#endif
1430
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00001431 for_each_eth_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07001432 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001433
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001434 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
Eilon Greensteinca003922009-08-12 22:53:28 -07001435 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001436 /* Handle Rx and Tx according to SB id */
1437 prefetch(fp->rx_cons_sb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001438 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001439 prefetch(&fp->sb_running_index[SM_RX_ID]);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001440 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001441 status &= ~mask;
1442 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001443 }
1444
Michael Chan993ac7b2009-10-10 13:46:56 +00001445#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001446 mask = 0x2;
Michael Chan993ac7b2009-10-10 13:46:56 +00001447 if (status & (mask | 0x1)) {
1448 struct cnic_ops *c_ops = NULL;
1449
1450 rcu_read_lock();
1451 c_ops = rcu_dereference(bp->cnic_ops);
1452 if (c_ops)
1453 c_ops->cnic_handler(bp->cnic_data, NULL);
1454 rcu_read_unlock();
1455
1456 status &= ~mask;
1457 }
1458#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001459
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001460 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001461 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001462
1463 status &= ~0x1;
1464 if (!status)
1465 return IRQ_HANDLED;
1466 }
1467
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001468 if (unlikely(status))
1469 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001470 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001471
1472 return IRQ_HANDLED;
1473}
1474
1475/* end of fast path */
1476
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001477
1478/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001479
1480/*
1481 * General service functions
1482 */
1483
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001484int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001485{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001486 u32 lock_status;
1487 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001488 int func = BP_FUNC(bp);
1489 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001490 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001491
1492 /* Validating that the resource is within range */
1493 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1494 DP(NETIF_MSG_HW,
1495 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1496 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1497 return -EINVAL;
1498 }
1499
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001500 if (func <= 5) {
1501 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1502 } else {
1503 hw_lock_control_reg =
1504 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1505 }
1506
Eliezer Tamirf1410642008-02-28 11:51:50 -08001507 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001508 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001509 if (lock_status & resource_bit) {
1510 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1511 lock_status, resource_bit);
1512 return -EEXIST;
1513 }
1514
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001515 /* Try for 5 second every 5ms */
1516 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001517 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001518 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1519 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001520 if (lock_status & resource_bit)
1521 return 0;
1522
1523 msleep(5);
1524 }
1525 DP(NETIF_MSG_HW, "Timeout\n");
1526 return -EAGAIN;
1527}
1528
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001529int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001530{
1531 u32 lock_status;
1532 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001533 int func = BP_FUNC(bp);
1534 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001535
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001536 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1537
Eliezer Tamirf1410642008-02-28 11:51:50 -08001538 /* Validating that the resource is within range */
1539 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1540 DP(NETIF_MSG_HW,
1541 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1542 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1543 return -EINVAL;
1544 }
1545
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001546 if (func <= 5) {
1547 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1548 } else {
1549 hw_lock_control_reg =
1550 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1551 }
1552
Eliezer Tamirf1410642008-02-28 11:51:50 -08001553 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001554 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001555 if (!(lock_status & resource_bit)) {
1556 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1557 lock_status, resource_bit);
1558 return -EFAULT;
1559 }
1560
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001561 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001562 return 0;
1563}
1564
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001565
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001566int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1567{
1568 /* The GPIO should be swapped if swap register is set and active */
1569 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1570 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1571 int gpio_shift = gpio_num +
1572 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1573 u32 gpio_mask = (1 << gpio_shift);
1574 u32 gpio_reg;
1575 int value;
1576
1577 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1578 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1579 return -EINVAL;
1580 }
1581
1582 /* read GPIO value */
1583 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1584
1585 /* get the requested pin value */
1586 if ((gpio_reg & gpio_mask) == gpio_mask)
1587 value = 1;
1588 else
1589 value = 0;
1590
1591 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1592
1593 return value;
1594}
1595
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001596int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001597{
1598 /* The GPIO should be swapped if swap register is set and active */
1599 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001600 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001601 int gpio_shift = gpio_num +
1602 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1603 u32 gpio_mask = (1 << gpio_shift);
1604 u32 gpio_reg;
1605
1606 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1607 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1608 return -EINVAL;
1609 }
1610
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001611 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001612 /* read GPIO and mask except the float bits */
1613 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1614
1615 switch (mode) {
1616 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1617 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1618 gpio_num, gpio_shift);
1619 /* clear FLOAT and set CLR */
1620 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1621 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1622 break;
1623
1624 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1625 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1626 gpio_num, gpio_shift);
1627 /* clear FLOAT and set SET */
1628 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1629 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1630 break;
1631
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001632 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001633 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1634 gpio_num, gpio_shift);
1635 /* set FLOAT */
1636 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1637 break;
1638
1639 default:
1640 break;
1641 }
1642
1643 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001644 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001645
1646 return 0;
1647}
1648
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001649int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1650{
1651 /* The GPIO should be swapped if swap register is set and active */
1652 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1653 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1654 int gpio_shift = gpio_num +
1655 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1656 u32 gpio_mask = (1 << gpio_shift);
1657 u32 gpio_reg;
1658
1659 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1660 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1661 return -EINVAL;
1662 }
1663
1664 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1665 /* read GPIO int */
1666 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1667
1668 switch (mode) {
1669 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1670 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1671 "output low\n", gpio_num, gpio_shift);
1672 /* clear SET and set CLR */
1673 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1674 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1675 break;
1676
1677 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1678 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1679 "output high\n", gpio_num, gpio_shift);
1680 /* clear CLR and set SET */
1681 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1682 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1683 break;
1684
1685 default:
1686 break;
1687 }
1688
1689 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1690 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1691
1692 return 0;
1693}
1694
Eliezer Tamirf1410642008-02-28 11:51:50 -08001695static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1696{
1697 u32 spio_mask = (1 << spio_num);
1698 u32 spio_reg;
1699
1700 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1701 (spio_num > MISC_REGISTERS_SPIO_7)) {
1702 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1703 return -EINVAL;
1704 }
1705
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001706 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001707 /* read SPIO and mask except the float bits */
1708 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1709
1710 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001711 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001712 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1713 /* clear FLOAT and set CLR */
1714 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1715 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1716 break;
1717
Eilon Greenstein6378c022008-08-13 15:59:25 -07001718 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001719 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1720 /* clear FLOAT and set SET */
1721 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1722 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1723 break;
1724
1725 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1726 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1727 /* set FLOAT */
1728 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1729 break;
1730
1731 default:
1732 break;
1733 }
1734
1735 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001736 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001737
1738 return 0;
1739}
1740
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001741int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1742{
1743 u32 sel_phy_idx = 0;
1744 if (bp->link_vars.link_up) {
1745 sel_phy_idx = EXT_PHY1;
1746 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1747 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1748 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1749 sel_phy_idx = EXT_PHY2;
1750 } else {
1751
1752 switch (bnx2x_phy_selection(&bp->link_params)) {
1753 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1754 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1755 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1756 sel_phy_idx = EXT_PHY1;
1757 break;
1758 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1759 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1760 sel_phy_idx = EXT_PHY2;
1761 break;
1762 }
1763 }
1764 /*
1765 * The selected actived PHY is always after swapping (in case PHY
1766 * swapping is enabled). So when swapping is enabled, we need to reverse
1767 * the configuration
1768 */
1769
1770 if (bp->link_params.multi_phy_config &
1771 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1772 if (sel_phy_idx == EXT_PHY1)
1773 sel_phy_idx = EXT_PHY2;
1774 else if (sel_phy_idx == EXT_PHY2)
1775 sel_phy_idx = EXT_PHY1;
1776 }
1777 return LINK_CONFIG_IDX(sel_phy_idx);
1778}
1779
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001780void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001781{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001782 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
Eilon Greensteinad33ea32009-01-14 21:24:57 -08001783 switch (bp->link_vars.ieee_fc &
1784 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001785 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001786 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001787 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001788 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001789
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001790 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001791 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001792 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001793 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001794
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001795 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001796 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001797 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001798
Eliezer Tamirf1410642008-02-28 11:51:50 -08001799 default:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001800 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001801 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001802 break;
1803 }
1804}
1805
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001806u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001807{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001808 if (!BP_NOMCP(bp)) {
1809 u8 rc;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001810 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1811 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
Eilon Greenstein19680c42008-08-13 15:47:33 -07001812 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001813 /* It is recommended to turn off RX FC for jumbo frames
1814 for better performance */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001815 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
David S. Millerc0700f92008-12-16 23:53:20 -08001816 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001817 else
David S. Millerc0700f92008-12-16 23:53:20 -08001818 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001819
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001820 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001821
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001822 if (load_mode == LOAD_DIAG) {
Yaniv Rosnerde6eae12010-09-07 11:41:13 +00001823 bp->link_params.loopback_mode = LOOPBACK_XGXS;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001824 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1825 }
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001826
Eilon Greenstein19680c42008-08-13 15:47:33 -07001827 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001828
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001829 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001830
Eilon Greenstein3c96c682009-01-14 21:25:31 -08001831 bnx2x_calc_fc_adv(bp);
1832
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001833 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1834 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001835 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001836 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001837 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
Eilon Greenstein19680c42008-08-13 15:47:33 -07001838 return rc;
1839 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001840 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07001841 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001842}
1843
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001844void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001845{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001846 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001847 bnx2x_acquire_phy_lock(bp);
Yaniv Rosner54c2fb72010-09-01 09:51:23 +00001848 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001849 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001850 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001851
Eilon Greenstein19680c42008-08-13 15:47:33 -07001852 bnx2x_calc_fc_adv(bp);
1853 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001854 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001855}
1856
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001857static void bnx2x__link_reset(struct bnx2x *bp)
1858{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001859 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001860 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00001861 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001862 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001863 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001864 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001865}
1866
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001867u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001868{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001869 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001870
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001871 if (!BP_NOMCP(bp)) {
1872 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001873 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1874 is_serdes);
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001875 bnx2x_release_phy_lock(bp);
1876 } else
1877 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001878
1879 return rc;
1880}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001881
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001882static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001883{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001884 u32 r_param = bp->link_vars.line_speed / 8;
1885 u32 fair_periodic_timeout_usec;
1886 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001887
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001888 memset(&(bp->cmng.rs_vars), 0,
1889 sizeof(struct rate_shaping_vars_per_port));
1890 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001891
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001892 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1893 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001894
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001895 /* this is the threshold below which no timer arming will occur
1896 1.25 coefficient is for the threshold to be a little bigger
1897 than the real time, to compensate for timer in-accuracy */
1898 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001899 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1900
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001901 /* resolution of fairness timer */
1902 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1903 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1904 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001905
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001906 /* this is the threshold below which we won't arm the timer anymore */
1907 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001908
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001909 /* we multiply by 1e3/8 to get bytes/msec.
1910 We don't want the credits to pass a credit
1911 of the t_fair*FAIR_MEM (algorithm resolution) */
1912 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1913 /* since each tick is 4 usec */
1914 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001915}
1916
Eilon Greenstein2691d512009-08-12 08:22:08 +00001917/* Calculates the sum of vn_min_rates.
1918 It's needed for further normalizing of the min_rates.
1919 Returns:
1920 sum of vn_min_rates.
1921 or
1922 0 - if all the min_rates are 0.
1923 In the later case fainess algorithm should be deactivated.
1924 If not all min_rates are zero then those that are zeroes will be set to 1.
1925 */
1926static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1927{
1928 int all_zero = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001929 int vn;
1930
1931 bp->vn_weight_sum = 0;
1932 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001933 u32 vn_cfg = bp->mf_config[vn];
Eilon Greenstein2691d512009-08-12 08:22:08 +00001934 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1935 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1936
1937 /* Skip hidden vns */
1938 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1939 continue;
1940
1941 /* If min rate is zero - set it to 1 */
1942 if (!vn_min_rate)
1943 vn_min_rate = DEF_MIN_RATE;
1944 else
1945 all_zero = 0;
1946
1947 bp->vn_weight_sum += vn_min_rate;
1948 }
1949
1950 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001951 if (all_zero) {
1952 bp->cmng.flags.cmng_enables &=
1953 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1954 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1955 " fairness will be disabled\n");
1956 } else
1957 bp->cmng.flags.cmng_enables |=
1958 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001959}
1960
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001961static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001962{
1963 struct rate_shaping_vars_per_vn m_rs_vn;
1964 struct fairness_vars_per_vn m_fair_vn;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001965 u32 vn_cfg = bp->mf_config[vn];
1966 int func = 2*vn + BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001967 u16 vn_min_rate, vn_max_rate;
1968 int i;
1969
1970 /* If function is hidden - set min and max to zeroes */
1971 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1972 vn_min_rate = 0;
1973 vn_max_rate = 0;
1974
1975 } else {
1976 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1977 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001978 /* If min rate is zero - set it to 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001979 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001980 vn_min_rate = DEF_MIN_RATE;
1981 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1982 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1983 }
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001984
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001985 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001986 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001987 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001988
1989 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1990 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1991
1992 /* global vn counter - maximal Mbps for this vn */
1993 m_rs_vn.vn_counter.rate = vn_max_rate;
1994
1995 /* quota - number of bytes transmitted in this period */
1996 m_rs_vn.vn_counter.quota =
1997 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1998
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001999 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002000 /* credit for each period of the fairness algorithm:
2001 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002002 vn_weight_sum should not be larger than 10000, thus
2003 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2004 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002005 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002006 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
2007 (8 * bp->vn_weight_sum))),
2008 (bp->cmng.fair_vars.fair_threshold * 2));
2009 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002010 m_fair_vn.vn_credit_delta);
2011 }
2012
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002013 /* Store it to internal memory */
2014 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2015 REG_WR(bp, BAR_XSTRORM_INTMEM +
2016 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2017 ((u32 *)(&m_rs_vn))[i]);
2018
2019 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2020 REG_WR(bp, BAR_XSTRORM_INTMEM +
2021 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2022 ((u32 *)(&m_fair_vn))[i]);
2023}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002024
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002025static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2026{
2027 if (CHIP_REV_IS_SLOW(bp))
2028 return CMNG_FNS_NONE;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002029 if (IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002030 return CMNG_FNS_MINMAX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002031
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002032 return CMNG_FNS_NONE;
2033}
2034
2035static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2036{
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002037 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002038
2039 if (BP_NOMCP(bp))
2040 return; /* what should be the default bvalue in this case */
2041
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002042 /* For 2 port configuration the absolute function number formula
2043 * is:
2044 * abs_func = 2 * vn + BP_PORT + BP_PATH
2045 *
2046 * and there are 4 functions per port
2047 *
2048 * For 4 port configuration it is
2049 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2050 *
2051 * and there are 2 functions per port
2052 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002053 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002054 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2055
2056 if (func >= E1H_FUNC_MAX)
2057 break;
2058
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002059 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002060 MF_CFG_RD(bp, func_mf_config[func].config);
2061 }
2062}
2063
2064static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2065{
2066
2067 if (cmng_type == CMNG_FNS_MINMAX) {
2068 int vn;
2069
2070 /* clear cmng_enables */
2071 bp->cmng.flags.cmng_enables = 0;
2072
2073 /* read mf conf from shmem */
2074 if (read_cfg)
2075 bnx2x_read_mf_cfg(bp);
2076
2077 /* Init rate shaping and fairness contexts */
2078 bnx2x_init_port_minmax(bp);
2079
2080 /* vn_weight_sum and enable fairness if not 0 */
2081 bnx2x_calc_vn_weight_sum(bp);
2082
2083 /* calculate and set min-max rate for each vn */
2084 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2085 bnx2x_init_vn_minmax(bp, vn);
2086
2087 /* always enable rate shaping and fairness */
2088 bp->cmng.flags.cmng_enables |=
2089 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2090 if (!bp->vn_weight_sum)
2091 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2092 " fairness will be disabled\n");
2093 return;
2094 }
2095
2096 /* rate shaping and fairness are disabled */
2097 DP(NETIF_MSG_IFUP,
2098 "rate shaping and fairness are disabled\n");
2099}
2100
2101static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2102{
2103 int port = BP_PORT(bp);
2104 int func;
2105 int vn;
2106
2107 /* Set the attention towards other drivers on the same port */
2108 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2109 if (vn == BP_E1HVN(bp))
2110 continue;
2111
2112 func = ((vn << 1) | port);
2113 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2114 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2115 }
2116}
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002117
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002118/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002119static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002120{
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002121 u32 prev_link_status = bp->link_vars.link_status;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002122 /* Make sure that we are synced with the current statistics */
2123 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2124
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002125 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002126
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002127 if (bp->link_vars.link_up) {
2128
Eilon Greenstein1c063282009-02-12 08:36:43 +00002129 /* dropless flow control */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002130 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002131 int port = BP_PORT(bp);
2132 u32 pause_enabled = 0;
2133
2134 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2135 pause_enabled = 1;
2136
2137 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002138 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002139 pause_enabled);
2140 }
2141
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002142 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2143 struct host_port_stats *pstats;
2144
2145 pstats = bnx2x_sp(bp, port_stats);
2146 /* reset old bmac stats */
2147 memset(&(pstats->mac_stx[0]), 0,
2148 sizeof(struct mac_stx));
2149 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002150 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002151 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2152 }
2153
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002154 /* indicate link status only if link status actually changed */
2155 if (prev_link_status != bp->link_vars.link_status)
2156 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002157
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002158 if (IS_MF(bp))
2159 bnx2x_link_sync_notify(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002160
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002161 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2162 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002163
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002164 if (cmng_fns != CMNG_FNS_NONE) {
2165 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2166 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2167 } else
2168 /* rate shaping and fairness are disabled */
2169 DP(NETIF_MSG_IFUP,
2170 "single function mode without fairness\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002171 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002172}
2173
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002174void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002175{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002176 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002177 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002178
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002179 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2180
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002181 if (bp->link_vars.link_up)
2182 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2183 else
2184 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2185
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002186 /* the link status update could be the result of a DCC event
2187 hence re-read the shmem mf configuration */
2188 bnx2x_read_mf_cfg(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002189
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002190 /* indicate link status */
2191 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002192}
2193
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002194static void bnx2x_pmf_update(struct bnx2x *bp)
2195{
2196 int port = BP_PORT(bp);
2197 u32 val;
2198
2199 bp->port.pmf = 1;
2200 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2201
2202 /* enable nig attention */
2203 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002204 if (bp->common.int_block == INT_BLOCK_HC) {
2205 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2206 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2207 } else if (CHIP_IS_E2(bp)) {
2208 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2209 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2210 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002211
2212 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002213}
2214
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002215/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002216
2217/* slow path */
2218
2219/*
2220 * General service functions
2221 */
2222
Eilon Greenstein2691d512009-08-12 08:22:08 +00002223/* send the MCP a request, block until there is a reply */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002224u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
Eilon Greenstein2691d512009-08-12 08:22:08 +00002225{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002226 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002227 u32 seq = ++bp->fw_seq;
2228 u32 rc = 0;
2229 u32 cnt = 1;
2230 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2231
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002232 mutex_lock(&bp->fw_mb_mutex);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002233 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2234 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2235
Eilon Greenstein2691d512009-08-12 08:22:08 +00002236 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2237
2238 do {
2239 /* let the FW do it's magic ... */
2240 msleep(delay);
2241
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002242 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002243
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002244 /* Give the FW up to 5 second (500*10ms) */
2245 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002246
2247 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2248 cnt*delay, rc, seq);
2249
2250 /* is this a reply to our command? */
2251 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2252 rc &= FW_MSG_CODE_MASK;
2253 else {
2254 /* FW BUG! */
2255 BNX2X_ERR("FW failed to respond!\n");
2256 bnx2x_fw_dump(bp);
2257 rc = 0;
2258 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002259 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002260
2261 return rc;
2262}
2263
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002264static u8 stat_counter_valid(struct bnx2x *bp, struct bnx2x_fastpath *fp)
2265{
2266#ifdef BCM_CNIC
2267 if (IS_FCOE_FP(fp) && IS_MF(bp))
2268 return false;
2269#endif
2270 return true;
2271}
2272
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002273/* must be called under rtnl_lock */
stephen hemminger8d962862010-10-21 07:50:56 +00002274static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002275{
2276 u32 mask = (1 << cl_id);
2277
2278 /* initial seeting is BNX2X_ACCEPT_NONE */
2279 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2280 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2281 u8 unmatched_unicast = 0;
2282
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002283 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2284 unmatched_unicast = 1;
2285
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002286 if (filters & BNX2X_PROMISCUOUS_MODE) {
2287 /* promiscious - accept all, drop none */
2288 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2289 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002290 if (IS_MF_SI(bp)) {
2291 /*
2292 * SI mode defines to accept in promiscuos mode
2293 * only unmatched packets
2294 */
2295 unmatched_unicast = 1;
2296 accp_all_ucast = 0;
2297 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002298 }
2299 if (filters & BNX2X_ACCEPT_UNICAST) {
2300 /* accept matched ucast */
2301 drop_all_ucast = 0;
2302 }
2303 if (filters & BNX2X_ACCEPT_MULTICAST) {
2304 /* accept matched mcast */
2305 drop_all_mcast = 0;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002306 if (IS_MF_SI(bp))
2307 /* since mcast addresses won't arrive with ovlan,
2308 * fw needs to accept all of them in
2309 * switch-independent mode */
2310 accp_all_mcast = 1;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002311 }
2312 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2313 /* accept all mcast */
2314 drop_all_ucast = 0;
2315 accp_all_ucast = 1;
2316 }
2317 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2318 /* accept all mcast */
2319 drop_all_mcast = 0;
2320 accp_all_mcast = 1;
2321 }
2322 if (filters & BNX2X_ACCEPT_BROADCAST) {
2323 /* accept (all) bcast */
2324 drop_all_bcast = 0;
2325 accp_all_bcast = 1;
2326 }
2327
2328 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2329 bp->mac_filters.ucast_drop_all | mask :
2330 bp->mac_filters.ucast_drop_all & ~mask;
2331
2332 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2333 bp->mac_filters.mcast_drop_all | mask :
2334 bp->mac_filters.mcast_drop_all & ~mask;
2335
2336 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2337 bp->mac_filters.bcast_drop_all | mask :
2338 bp->mac_filters.bcast_drop_all & ~mask;
2339
2340 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2341 bp->mac_filters.ucast_accept_all | mask :
2342 bp->mac_filters.ucast_accept_all & ~mask;
2343
2344 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2345 bp->mac_filters.mcast_accept_all | mask :
2346 bp->mac_filters.mcast_accept_all & ~mask;
2347
2348 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2349 bp->mac_filters.bcast_accept_all | mask :
2350 bp->mac_filters.bcast_accept_all & ~mask;
2351
2352 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2353 bp->mac_filters.unmatched_unicast | mask :
2354 bp->mac_filters.unmatched_unicast & ~mask;
2355}
2356
stephen hemminger8d962862010-10-21 07:50:56 +00002357static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002358{
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002359 struct tstorm_eth_function_common_config tcfg = {0};
2360 u16 rss_flgs;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002361
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002362 /* tpa */
2363 if (p->func_flgs & FUNC_FLG_TPA)
2364 tcfg.config_flags |=
2365 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002366
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002367 /* set rss flags */
2368 rss_flgs = (p->rss->mode <<
2369 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002370
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002371 if (p->rss->cap & RSS_IPV4_CAP)
2372 rss_flgs |= RSS_IPV4_CAP_MASK;
2373 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2374 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2375 if (p->rss->cap & RSS_IPV6_CAP)
2376 rss_flgs |= RSS_IPV6_CAP_MASK;
2377 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2378 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002379
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002380 tcfg.config_flags |= rss_flgs;
2381 tcfg.rss_result_mask = p->rss->result_mask;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002382
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002383 storm_memset_func_cfg(bp, &tcfg, p->func_id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002384
2385 /* Enable the function in the FW */
2386 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2387 storm_memset_func_en(bp, p->func_id, 1);
2388
2389 /* statistics */
2390 if (p->func_flgs & FUNC_FLG_STATS) {
2391 struct stats_indication_flags stats_flags = {0};
2392 stats_flags.collect_eth = 1;
2393
2394 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2395 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2396
2397 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2398 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2399
2400 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2401 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2402
2403 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2404 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2405 }
2406
2407 /* spq */
2408 if (p->func_flgs & FUNC_FLG_SPQ) {
2409 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2410 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2411 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2412 }
2413}
2414
2415static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2416 struct bnx2x_fastpath *fp)
2417{
2418 u16 flags = 0;
2419
2420 /* calculate queue flags */
2421 flags |= QUEUE_FLG_CACHE_ALIGN;
2422 flags |= QUEUE_FLG_HC;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002423 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002424
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002425 flags |= QUEUE_FLG_VLAN;
2426 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002427
2428 if (!fp->disable_tpa)
2429 flags |= QUEUE_FLG_TPA;
2430
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002431 flags = stat_counter_valid(bp, fp) ?
2432 (flags | QUEUE_FLG_STATS) : (flags & ~QUEUE_FLG_STATS);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002433
2434 return flags;
2435}
2436
2437static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2438 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2439 struct bnx2x_rxq_init_params *rxq_init)
2440{
2441 u16 max_sge = 0;
2442 u16 sge_sz = 0;
2443 u16 tpa_agg_size = 0;
2444
2445 /* calculate queue flags */
2446 u16 flags = bnx2x_get_cl_flags(bp, fp);
2447
2448 if (!fp->disable_tpa) {
2449 pause->sge_th_hi = 250;
2450 pause->sge_th_lo = 150;
2451 tpa_agg_size = min_t(u32,
2452 (min_t(u32, 8, MAX_SKB_FRAGS) *
2453 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2454 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2455 SGE_PAGE_SHIFT;
2456 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2457 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2458 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2459 0xffff);
2460 }
2461
2462 /* pause - not for e1 */
2463 if (!CHIP_IS_E1(bp)) {
2464 pause->bd_th_hi = 350;
2465 pause->bd_th_lo = 250;
2466 pause->rcq_th_hi = 350;
2467 pause->rcq_th_lo = 250;
2468 pause->sge_th_hi = 0;
2469 pause->sge_th_lo = 0;
2470 pause->pri_map = 1;
2471 }
2472
2473 /* rxq setup */
2474 rxq_init->flags = flags;
2475 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2476 rxq_init->dscr_map = fp->rx_desc_mapping;
2477 rxq_init->sge_map = fp->rx_sge_mapping;
2478 rxq_init->rcq_map = fp->rx_comp_mapping;
2479 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2480 rxq_init->mtu = bp->dev->mtu;
2481 rxq_init->buf_sz = bp->rx_buf_size;
2482 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2483 rxq_init->cl_id = fp->cl_id;
2484 rxq_init->spcl_id = fp->cl_id;
2485 rxq_init->stat_id = fp->cl_id;
2486 rxq_init->tpa_agg_sz = tpa_agg_size;
2487 rxq_init->sge_buf_sz = sge_sz;
2488 rxq_init->max_sges_pkt = max_sge;
2489 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2490 rxq_init->fw_sb_id = fp->fw_sb_id;
2491
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002492 if (IS_FCOE_FP(fp))
2493 rxq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_RX_CQ_CONS;
2494 else
2495 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002496
2497 rxq_init->cid = HW_CID(bp, fp->cid);
2498
2499 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2500}
2501
2502static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2503 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2504{
2505 u16 flags = bnx2x_get_cl_flags(bp, fp);
2506
2507 txq_init->flags = flags;
2508 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2509 txq_init->dscr_map = fp->tx_desc_mapping;
2510 txq_init->stat_id = fp->cl_id;
2511 txq_init->cid = HW_CID(bp, fp->cid);
2512 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2513 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2514 txq_init->fw_sb_id = fp->fw_sb_id;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00002515
2516 if (IS_FCOE_FP(fp)) {
2517 txq_init->sb_cq_index = HC_SP_INDEX_ETH_FCOE_TX_CQ_CONS;
2518 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_FCOE;
2519 }
2520
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002521 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2522}
2523
stephen hemminger8d962862010-10-21 07:50:56 +00002524static void bnx2x_pf_init(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002525{
2526 struct bnx2x_func_init_params func_init = {0};
2527 struct bnx2x_rss_params rss = {0};
2528 struct event_ring_data eq_data = { {0} };
2529 u16 flags;
2530
2531 /* pf specific setups */
2532 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002533 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002534
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002535 if (CHIP_IS_E2(bp)) {
2536 /* reset IGU PF statistics: MSIX + ATTN */
2537 /* PF */
2538 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2539 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2540 (CHIP_MODE_IS_4_PORT(bp) ?
2541 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2542 /* ATTN */
2543 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2544 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2545 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2546 (CHIP_MODE_IS_4_PORT(bp) ?
2547 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2548 }
2549
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002550 /* function setup flags */
2551 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2552
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002553 if (CHIP_IS_E1x(bp))
2554 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2555 else
2556 flags |= FUNC_FLG_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002557
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002558 /* function setup */
2559
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002560 /**
2561 * Although RSS is meaningless when there is a single HW queue we
2562 * still need it enabled in order to have HW Rx hash generated.
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002563 */
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002564 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2565 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2566 rss.mode = bp->multi_mode;
2567 rss.result_mask = MULTI_MASK;
2568 func_init.rss = &rss;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002569
2570 func_init.func_flgs = flags;
2571 func_init.pf_id = BP_FUNC(bp);
2572 func_init.func_id = BP_FUNC(bp);
2573 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2574 func_init.spq_map = bp->spq_mapping;
2575 func_init.spq_prod = bp->spq_prod_idx;
2576
2577 bnx2x_func_init(bp, &func_init);
2578
2579 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2580
2581 /*
2582 Congestion management values depend on the link rate
2583 There is no active link so initial link rate is set to 10 Gbps.
2584 When the link comes up The congestion management values are
2585 re-calculated according to the actual link rate.
2586 */
2587 bp->link_vars.line_speed = SPEED_10000;
2588 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2589
2590 /* Only the PMF sets the HW */
2591 if (bp->port.pmf)
2592 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2593
2594 /* no rx until link is up */
2595 bp->rx_mode = BNX2X_RX_MODE_NONE;
2596 bnx2x_set_storm_rx_mode(bp);
2597
2598 /* init Event Queue */
2599 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2600 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2601 eq_data.producer = bp->eq_prod;
2602 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2603 eq_data.sb_id = DEF_SB_ID;
2604 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2605}
2606
2607
Eilon Greenstein2691d512009-08-12 08:22:08 +00002608static void bnx2x_e1h_disable(struct bnx2x *bp)
2609{
2610 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002611
2612 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002613
2614 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2615
Eilon Greenstein2691d512009-08-12 08:22:08 +00002616 netif_carrier_off(bp->dev);
2617}
2618
2619static void bnx2x_e1h_enable(struct bnx2x *bp)
2620{
2621 int port = BP_PORT(bp);
2622
2623 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2624
Eilon Greenstein2691d512009-08-12 08:22:08 +00002625 /* Tx queue should be only reenabled */
2626 netif_tx_wake_all_queues(bp->dev);
2627
Eilon Greenstein061bc702009-10-15 00:18:47 -07002628 /*
2629 * Should not call netif_carrier_on since it will be called if the link
2630 * is up when checking for link state
2631 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002632}
2633
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002634/* called due to MCP event (on pmf):
2635 * reread new bandwidth configuration
2636 * configure FW
2637 * notify others function about the change
2638 */
2639static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2640{
2641 if (bp->link_vars.link_up) {
2642 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2643 bnx2x_link_sync_notify(bp);
2644 }
2645 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2646}
2647
2648static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2649{
2650 bnx2x_config_mf_bw(bp);
2651 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2652}
2653
Eilon Greenstein2691d512009-08-12 08:22:08 +00002654static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2655{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002656 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002657
2658 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2659
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002660 /*
2661 * This is the only place besides the function initialization
2662 * where the bp->flags can change so it is done without any
2663 * locks
2664 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002665 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002666 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002667 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002668
2669 bnx2x_e1h_disable(bp);
2670 } else {
2671 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002672 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002673
2674 bnx2x_e1h_enable(bp);
2675 }
2676 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2677 }
2678 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002679 bnx2x_config_mf_bw(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002680 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2681 }
2682
2683 /* Report results to MCP */
2684 if (dcc_event)
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002685 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002686 else
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002687 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002688}
2689
Michael Chan28912902009-10-10 13:46:53 +00002690/* must be called under the spq lock */
2691static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2692{
2693 struct eth_spe *next_spe = bp->spq_prod_bd;
2694
2695 if (bp->spq_prod_bd == bp->spq_last_bd) {
2696 bp->spq_prod_bd = bp->spq;
2697 bp->spq_prod_idx = 0;
2698 DP(NETIF_MSG_TIMER, "end of spq\n");
2699 } else {
2700 bp->spq_prod_bd++;
2701 bp->spq_prod_idx++;
2702 }
2703 return next_spe;
2704}
2705
2706/* must be called under the spq lock */
2707static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2708{
2709 int func = BP_FUNC(bp);
2710
2711 /* Make sure that BD data is updated before writing the producer */
2712 wmb();
2713
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002714 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002715 bp->spq_prod_idx);
Michael Chan28912902009-10-10 13:46:53 +00002716 mmiowb();
2717}
2718
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002719/* the slow path queue is odd since completions arrive on the fastpath ring */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002720int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002721 u32 data_hi, u32 data_lo, int common)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002722{
Michael Chan28912902009-10-10 13:46:53 +00002723 struct eth_spe *spe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002724 u16 type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002725
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002726#ifdef BNX2X_STOP_ON_ERROR
2727 if (unlikely(bp->panic))
2728 return -EIO;
2729#endif
2730
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002731 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002732
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002733 if (!atomic_read(&bp->spq_left)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002734 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002735 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002736 bnx2x_panic();
2737 return -EBUSY;
2738 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002739
Michael Chan28912902009-10-10 13:46:53 +00002740 spe = bnx2x_sp_get_next(bp);
2741
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002742 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002743 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002744 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2745 HW_CID(bp, cid));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002746
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002747 if (common)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002748 /* Common ramrods:
2749 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2750 * TRAFFIC_STOP, TRAFFIC_START
2751 */
2752 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2753 & SPE_HDR_CONN_TYPE;
2754 else
2755 /* ETH ramrods: SETUP, HALT */
2756 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2757 & SPE_HDR_CONN_TYPE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002758
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002759 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2760 SPE_HDR_FUNCTION_ID);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002761
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002762 spe->hdr.type = cpu_to_le16(type);
2763
2764 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2765 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2766
2767 /* stats ramrod has it's own slot on the spq */
2768 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2769 /* It's ok if the actual decrement is issued towards the memory
2770 * somewhere between the spin_lock and spin_unlock. Thus no
2771 * more explict memory barrier is needed.
2772 */
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002773 atomic_dec(&bp->spq_left);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002774
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002775 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002776 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2777 "type(0x%x) left %x\n",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002778 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2779 (u32)(U64_LO(bp->spq_mapping) +
2780 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002781 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002782
Michael Chan28912902009-10-10 13:46:53 +00002783 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002784 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002785 return 0;
2786}
2787
2788/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002789static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002790{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002791 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002792 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002793
2794 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002795 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002796 val = (1UL << 31);
2797 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2798 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2799 if (val & (1L << 31))
2800 break;
2801
2802 msleep(5);
2803 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002804 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002805 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002806 rc = -EBUSY;
2807 }
2808
2809 return rc;
2810}
2811
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002812/* release split MCP access lock register */
2813static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002814{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002815 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002816}
2817
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002818#define BNX2X_DEF_SB_ATT_IDX 0x0001
2819#define BNX2X_DEF_SB_IDX 0x0002
2820
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002821static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2822{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002823 struct host_sp_status_block *def_sb = bp->def_status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002824 u16 rc = 0;
2825
2826 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002827 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2828 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002829 rc |= BNX2X_DEF_SB_ATT_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002830 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002831
2832 if (bp->def_idx != def_sb->sp_sb.running_index) {
2833 bp->def_idx = def_sb->sp_sb.running_index;
2834 rc |= BNX2X_DEF_SB_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002835 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002836
2837 /* Do not reorder: indecies reading should complete before handling */
2838 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002839 return rc;
2840}
2841
2842/*
2843 * slow path service functions
2844 */
2845
2846static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2847{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002848 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002849 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2850 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002851 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2852 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002853 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002854 u32 nig_mask = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002855 u32 reg_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002856
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002857 if (bp->attn_state & asserted)
2858 BNX2X_ERR("IGU ERROR\n");
2859
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002860 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2861 aeu_mask = REG_RD(bp, aeu_addr);
2862
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002863 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002864 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002865 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002866 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002867
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002868 REG_WR(bp, aeu_addr, aeu_mask);
2869 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002870
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002871 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002872 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002873 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002874
2875 if (asserted & ATTN_HARD_WIRED_MASK) {
2876 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002877
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002878 bnx2x_acquire_phy_lock(bp);
2879
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002880 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002881 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002882 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002883
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002884 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002885
2886 /* handle unicore attn? */
2887 }
2888 if (asserted & ATTN_SW_TIMER_4_FUNC)
2889 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2890
2891 if (asserted & GPIO_2_FUNC)
2892 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2893
2894 if (asserted & GPIO_3_FUNC)
2895 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2896
2897 if (asserted & GPIO_4_FUNC)
2898 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2899
2900 if (port == 0) {
2901 if (asserted & ATTN_GENERAL_ATTN_1) {
2902 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2903 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2904 }
2905 if (asserted & ATTN_GENERAL_ATTN_2) {
2906 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2907 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2908 }
2909 if (asserted & ATTN_GENERAL_ATTN_3) {
2910 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2911 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2912 }
2913 } else {
2914 if (asserted & ATTN_GENERAL_ATTN_4) {
2915 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2916 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2917 }
2918 if (asserted & ATTN_GENERAL_ATTN_5) {
2919 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2920 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2921 }
2922 if (asserted & ATTN_GENERAL_ATTN_6) {
2923 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2924 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2925 }
2926 }
2927
2928 } /* if hardwired */
2929
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002930 if (bp->common.int_block == INT_BLOCK_HC)
2931 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2932 COMMAND_REG_ATTN_BITS_SET);
2933 else
2934 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2935
2936 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2937 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2938 REG_WR(bp, reg_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002939
2940 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002941 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002942 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002943 bnx2x_release_phy_lock(bp);
2944 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002945}
2946
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002947static inline void bnx2x_fan_failure(struct bnx2x *bp)
2948{
2949 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002950 u32 ext_phy_config;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002951 /* mark the failure */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002952 ext_phy_config =
2953 SHMEM_RD(bp,
2954 dev_info.port_hw_config[port].external_phy_config);
2955
2956 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2957 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002958 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002959 ext_phy_config);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002960
2961 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002962 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2963 " the driver to shutdown the card to prevent permanent"
2964 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002965}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002966
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002967static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2968{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002969 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002970 int reg_offset;
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002971 u32 val;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002972
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002973 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2974 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002975
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002976 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002977
2978 val = REG_RD(bp, reg_offset);
2979 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2980 REG_WR(bp, reg_offset, val);
2981
2982 BNX2X_ERR("SPIO5 hw attention\n");
2983
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002984 /* Fan failure attention */
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002985 bnx2x_hw_reset_phy(&bp->link_params);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002986 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002987 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002988
Eilon Greenstein589abe32009-02-12 08:36:55 +00002989 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2990 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2991 bnx2x_acquire_phy_lock(bp);
2992 bnx2x_handle_module_detect_int(&bp->link_params);
2993 bnx2x_release_phy_lock(bp);
2994 }
2995
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002996 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2997
2998 val = REG_RD(bp, reg_offset);
2999 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
3000 REG_WR(bp, reg_offset, val);
3001
3002 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003003 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003004 bnx2x_panic();
3005 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003006}
3007
3008static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
3009{
3010 u32 val;
3011
Eilon Greenstein0626b892009-02-12 08:38:14 +00003012 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003013
3014 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
3015 BNX2X_ERR("DB hw attention 0x%x\n", val);
3016 /* DORQ discard attention */
3017 if (val & 0x2)
3018 BNX2X_ERR("FATAL error from DORQ\n");
3019 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003020
3021 if (attn & HW_INTERRUT_ASSERT_SET_1) {
3022
3023 int port = BP_PORT(bp);
3024 int reg_offset;
3025
3026 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3027 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3028
3029 val = REG_RD(bp, reg_offset);
3030 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3031 REG_WR(bp, reg_offset, val);
3032
3033 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003034 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003035 bnx2x_panic();
3036 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003037}
3038
3039static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3040{
3041 u32 val;
3042
3043 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3044
3045 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3046 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3047 /* CFC error attention */
3048 if (val & 0x2)
3049 BNX2X_ERR("FATAL error from CFC\n");
3050 }
3051
3052 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3053
3054 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3055 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3056 /* RQ_USDMDP_FIFO_OVERFLOW */
3057 if (val & 0x18000)
3058 BNX2X_ERR("FATAL error from PXP\n");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003059 if (CHIP_IS_E2(bp)) {
3060 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3061 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3062 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003063 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003064
3065 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3066
3067 int port = BP_PORT(bp);
3068 int reg_offset;
3069
3070 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3071 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3072
3073 val = REG_RD(bp, reg_offset);
3074 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3075 REG_WR(bp, reg_offset, val);
3076
3077 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003078 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003079 bnx2x_panic();
3080 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003081}
3082
3083static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3084{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003085 u32 val;
3086
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003087 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3088
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003089 if (attn & BNX2X_PMF_LINK_ASSERT) {
3090 int func = BP_FUNC(bp);
3091
3092 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003093 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3094 func_mf_config[BP_ABS_FUNC(bp)].config);
3095 val = SHMEM_RD(bp,
3096 func_mb[BP_FW_MB_IDX(bp)].drv_status);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003097 if (val & DRV_STATUS_DCC_EVENT_MASK)
3098 bnx2x_dcc_event(bp,
3099 (val & DRV_STATUS_DCC_EVENT_MASK));
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08003100
3101 if (val & DRV_STATUS_SET_MF_BW)
3102 bnx2x_set_mf_bw(bp);
3103
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003104 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003105 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003106 bnx2x_pmf_update(bp);
3107
3108 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003109
3110 BNX2X_ERR("MC assert!\n");
3111 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3112 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3113 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3114 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3115 bnx2x_panic();
3116
3117 } else if (attn & BNX2X_MCP_ASSERT) {
3118
3119 BNX2X_ERR("MCP assert!\n");
3120 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003121 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003122
3123 } else
3124 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3125 }
3126
3127 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003128 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3129 if (attn & BNX2X_GRC_TIMEOUT) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003130 val = CHIP_IS_E1(bp) ? 0 :
3131 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003132 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3133 }
3134 if (attn & BNX2X_GRC_RSV) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003135 val = CHIP_IS_E1(bp) ? 0 :
3136 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003137 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3138 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003139 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003140 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003141}
3142
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003143#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3144#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3145#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3146#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3147#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3148#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003149
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003150/*
3151 * should be run under rtnl lock
3152 */
3153static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3154{
3155 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3156 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3157 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3158 barrier();
3159 mmiowb();
3160}
3161
3162/*
3163 * should be run under rtnl lock
3164 */
3165static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3166{
3167 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3168 val |= (1 << 16);
3169 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3170 barrier();
3171 mmiowb();
3172}
3173
3174/*
3175 * should be run under rtnl lock
3176 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003177bool bnx2x_reset_is_done(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003178{
3179 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3180 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3181 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3182}
3183
3184/*
3185 * should be run under rtnl lock
3186 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003187inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003188{
3189 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3190
3191 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3192
3193 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3194 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3195 barrier();
3196 mmiowb();
3197}
3198
3199/*
3200 * should be run under rtnl lock
3201 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003202u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003203{
3204 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3205
3206 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3207
3208 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3209 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3210 barrier();
3211 mmiowb();
3212
3213 return val1;
3214}
3215
3216/*
3217 * should be run under rtnl lock
3218 */
3219static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3220{
3221 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3222}
3223
3224static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3225{
3226 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3227 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3228}
3229
3230static inline void _print_next_block(int idx, const char *blk)
3231{
3232 if (idx)
3233 pr_cont(", ");
3234 pr_cont("%s", blk);
3235}
3236
3237static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3238{
3239 int i = 0;
3240 u32 cur_bit = 0;
3241 for (i = 0; sig; i++) {
3242 cur_bit = ((u32)0x1 << i);
3243 if (sig & cur_bit) {
3244 switch (cur_bit) {
3245 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3246 _print_next_block(par_num++, "BRB");
3247 break;
3248 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3249 _print_next_block(par_num++, "PARSER");
3250 break;
3251 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3252 _print_next_block(par_num++, "TSDM");
3253 break;
3254 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3255 _print_next_block(par_num++, "SEARCHER");
3256 break;
3257 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3258 _print_next_block(par_num++, "TSEMI");
3259 break;
3260 }
3261
3262 /* Clear the bit */
3263 sig &= ~cur_bit;
3264 }
3265 }
3266
3267 return par_num;
3268}
3269
3270static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3271{
3272 int i = 0;
3273 u32 cur_bit = 0;
3274 for (i = 0; sig; i++) {
3275 cur_bit = ((u32)0x1 << i);
3276 if (sig & cur_bit) {
3277 switch (cur_bit) {
3278 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3279 _print_next_block(par_num++, "PBCLIENT");
3280 break;
3281 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3282 _print_next_block(par_num++, "QM");
3283 break;
3284 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3285 _print_next_block(par_num++, "XSDM");
3286 break;
3287 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3288 _print_next_block(par_num++, "XSEMI");
3289 break;
3290 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3291 _print_next_block(par_num++, "DOORBELLQ");
3292 break;
3293 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3294 _print_next_block(par_num++, "VAUX PCI CORE");
3295 break;
3296 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3297 _print_next_block(par_num++, "DEBUG");
3298 break;
3299 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3300 _print_next_block(par_num++, "USDM");
3301 break;
3302 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3303 _print_next_block(par_num++, "USEMI");
3304 break;
3305 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3306 _print_next_block(par_num++, "UPB");
3307 break;
3308 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3309 _print_next_block(par_num++, "CSDM");
3310 break;
3311 }
3312
3313 /* Clear the bit */
3314 sig &= ~cur_bit;
3315 }
3316 }
3317
3318 return par_num;
3319}
3320
3321static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3322{
3323 int i = 0;
3324 u32 cur_bit = 0;
3325 for (i = 0; sig; i++) {
3326 cur_bit = ((u32)0x1 << i);
3327 if (sig & cur_bit) {
3328 switch (cur_bit) {
3329 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3330 _print_next_block(par_num++, "CSEMI");
3331 break;
3332 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3333 _print_next_block(par_num++, "PXP");
3334 break;
3335 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3336 _print_next_block(par_num++,
3337 "PXPPCICLOCKCLIENT");
3338 break;
3339 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3340 _print_next_block(par_num++, "CFC");
3341 break;
3342 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3343 _print_next_block(par_num++, "CDU");
3344 break;
3345 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3346 _print_next_block(par_num++, "IGU");
3347 break;
3348 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3349 _print_next_block(par_num++, "MISC");
3350 break;
3351 }
3352
3353 /* Clear the bit */
3354 sig &= ~cur_bit;
3355 }
3356 }
3357
3358 return par_num;
3359}
3360
3361static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3362{
3363 int i = 0;
3364 u32 cur_bit = 0;
3365 for (i = 0; sig; i++) {
3366 cur_bit = ((u32)0x1 << i);
3367 if (sig & cur_bit) {
3368 switch (cur_bit) {
3369 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3370 _print_next_block(par_num++, "MCP ROM");
3371 break;
3372 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3373 _print_next_block(par_num++, "MCP UMP RX");
3374 break;
3375 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3376 _print_next_block(par_num++, "MCP UMP TX");
3377 break;
3378 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3379 _print_next_block(par_num++, "MCP SCPAD");
3380 break;
3381 }
3382
3383 /* Clear the bit */
3384 sig &= ~cur_bit;
3385 }
3386 }
3387
3388 return par_num;
3389}
3390
3391static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3392 u32 sig2, u32 sig3)
3393{
3394 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3395 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3396 int par_num = 0;
3397 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3398 "[0]:0x%08x [1]:0x%08x "
3399 "[2]:0x%08x [3]:0x%08x\n",
3400 sig0 & HW_PRTY_ASSERT_SET_0,
3401 sig1 & HW_PRTY_ASSERT_SET_1,
3402 sig2 & HW_PRTY_ASSERT_SET_2,
3403 sig3 & HW_PRTY_ASSERT_SET_3);
3404 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3405 bp->dev->name);
3406 par_num = bnx2x_print_blocks_with_parity0(
3407 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3408 par_num = bnx2x_print_blocks_with_parity1(
3409 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3410 par_num = bnx2x_print_blocks_with_parity2(
3411 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3412 par_num = bnx2x_print_blocks_with_parity3(
3413 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3414 printk("\n");
3415 return true;
3416 } else
3417 return false;
3418}
3419
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003420bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003421{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003422 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003423 int port = BP_PORT(bp);
3424
3425 attn.sig[0] = REG_RD(bp,
3426 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3427 port*4);
3428 attn.sig[1] = REG_RD(bp,
3429 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3430 port*4);
3431 attn.sig[2] = REG_RD(bp,
3432 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3433 port*4);
3434 attn.sig[3] = REG_RD(bp,
3435 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3436 port*4);
3437
3438 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3439 attn.sig[3]);
3440}
3441
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003442
3443static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3444{
3445 u32 val;
3446 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3447
3448 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3449 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3450 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3451 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3452 "ADDRESS_ERROR\n");
3453 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3454 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3455 "INCORRECT_RCV_BEHAVIOR\n");
3456 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3457 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3458 "WAS_ERROR_ATTN\n");
3459 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3460 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3461 "VF_LENGTH_VIOLATION_ATTN\n");
3462 if (val &
3463 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3464 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3465 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3466 if (val &
3467 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3468 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3469 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3470 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3471 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3472 "TCPL_ERROR_ATTN\n");
3473 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3474 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3475 "TCPL_IN_TWO_RCBS_ATTN\n");
3476 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3477 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3478 "CSSNOOP_FIFO_OVERFLOW\n");
3479 }
3480 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3481 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3482 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3483 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3484 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3485 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3486 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3487 "_ATC_TCPL_TO_NOT_PEND\n");
3488 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3489 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3490 "ATC_GPA_MULTIPLE_HITS\n");
3491 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3492 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3493 "ATC_RCPL_TO_EMPTY_CNT\n");
3494 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3495 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3496 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3497 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3498 "ATC_IREQ_LESS_THAN_STU\n");
3499 }
3500
3501 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3502 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3503 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3504 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3505 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3506 }
3507
3508}
3509
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003510static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3511{
3512 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003513 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003514 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003515 u32 reg_addr;
3516 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003517 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003518
3519 /* need to take HW lock because MCP or other port might also
3520 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003521 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003522
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003523 if (bnx2x_chk_parity_attn(bp)) {
3524 bp->recovery_state = BNX2X_RECOVERY_INIT;
3525 bnx2x_set_reset_in_progress(bp);
3526 schedule_delayed_work(&bp->reset_task, 0);
3527 /* Disable HW interrupts */
3528 bnx2x_int_disable(bp);
3529 bnx2x_release_alr(bp);
3530 /* In case of parity errors don't handle attentions so that
3531 * other function would "see" parity errors.
3532 */
3533 return;
3534 }
3535
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003536 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3537 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3538 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3539 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003540 if (CHIP_IS_E2(bp))
3541 attn.sig[4] =
3542 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3543 else
3544 attn.sig[4] = 0;
3545
3546 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3547 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003548
3549 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3550 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003551 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003552
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003553 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3554 "%08x %08x %08x\n",
3555 index,
3556 group_mask->sig[0], group_mask->sig[1],
3557 group_mask->sig[2], group_mask->sig[3],
3558 group_mask->sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003559
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003560 bnx2x_attn_int_deasserted4(bp,
3561 attn.sig[4] & group_mask->sig[4]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003562 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003563 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003564 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003565 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003566 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003567 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003568 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003569 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003570 }
3571 }
3572
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003573 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003574
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003575 if (bp->common.int_block == INT_BLOCK_HC)
3576 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3577 COMMAND_REG_ATTN_BITS_CLR);
3578 else
3579 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003580
3581 val = ~deasserted;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003582 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3583 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003584 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003585
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003586 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003587 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003588
3589 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3590 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3591
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003592 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3593 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003594
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003595 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3596 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003597 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003598 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3599
3600 REG_WR(bp, reg_addr, aeu_mask);
3601 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003602
3603 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3604 bp->attn_state &= ~deasserted;
3605 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3606}
3607
3608static void bnx2x_attn_int(struct bnx2x *bp)
3609{
3610 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003611 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3612 attn_bits);
3613 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3614 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003615 u32 attn_state = bp->attn_state;
3616
3617 /* look for changed bits */
3618 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3619 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3620
3621 DP(NETIF_MSG_HW,
3622 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3623 attn_bits, attn_ack, asserted, deasserted);
3624
3625 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003626 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003627
3628 /* handle bits that were raised */
3629 if (asserted)
3630 bnx2x_attn_int_asserted(bp, asserted);
3631
3632 if (deasserted)
3633 bnx2x_attn_int_deasserted(bp, deasserted);
3634}
3635
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003636static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3637{
3638 /* No memory barriers */
3639 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3640 mmiowb(); /* keep prod updates ordered */
3641}
3642
3643#ifdef BCM_CNIC
3644static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3645 union event_ring_elem *elem)
3646{
3647 if (!bp->cnic_eth_dev.starting_cid ||
3648 cid < bp->cnic_eth_dev.starting_cid)
3649 return 1;
3650
3651 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3652
3653 if (unlikely(elem->message.data.cfc_del_event.error)) {
3654 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3655 cid);
3656 bnx2x_panic_dump(bp);
3657 }
3658 bnx2x_cnic_cfc_comp(bp, cid);
3659 return 0;
3660}
3661#endif
3662
3663static void bnx2x_eq_int(struct bnx2x *bp)
3664{
3665 u16 hw_cons, sw_cons, sw_prod;
3666 union event_ring_elem *elem;
3667 u32 cid;
3668 u8 opcode;
3669 int spqe_cnt = 0;
3670
3671 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3672
3673 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3674 * when we get the the next-page we nned to adjust so the loop
3675 * condition below will be met. The next element is the size of a
3676 * regular element and hence incrementing by 1
3677 */
3678 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3679 hw_cons++;
3680
3681 /* This function may never run in parralel with itself for a
3682 * specific bp, thus there is no need in "paired" read memory
3683 * barrier here.
3684 */
3685 sw_cons = bp->eq_cons;
3686 sw_prod = bp->eq_prod;
3687
3688 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003689 hw_cons, sw_cons, atomic_read(&bp->spq_left));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003690
3691 for (; sw_cons != hw_cons;
3692 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3693
3694
3695 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3696
3697 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3698 opcode = elem->message.opcode;
3699
3700
3701 /* handle eq element */
3702 switch (opcode) {
3703 case EVENT_RING_OPCODE_STAT_QUERY:
3704 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3705 /* nothing to do with stats comp */
3706 continue;
3707
3708 case EVENT_RING_OPCODE_CFC_DEL:
3709 /* handle according to cid range */
3710 /*
3711 * we may want to verify here that the bp state is
3712 * HALTING
3713 */
3714 DP(NETIF_MSG_IFDOWN,
3715 "got delete ramrod for MULTI[%d]\n", cid);
3716#ifdef BCM_CNIC
3717 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3718 goto next_spqe;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003719 if (cid == BNX2X_FCOE_ETH_CID)
3720 bnx2x_fcoe(bp, state) = BNX2X_FP_STATE_CLOSED;
3721 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003722#endif
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003723 bnx2x_fp(bp, cid, state) =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003724 BNX2X_FP_STATE_CLOSED;
3725
3726 goto next_spqe;
3727 }
3728
3729 switch (opcode | bp->state) {
3730 case (EVENT_RING_OPCODE_FUNCTION_START |
3731 BNX2X_STATE_OPENING_WAIT4_PORT):
3732 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3733 bp->state = BNX2X_STATE_FUNC_STARTED;
3734 break;
3735
3736 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3737 BNX2X_STATE_CLOSING_WAIT4_HALT):
3738 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3739 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3740 break;
3741
3742 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3743 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3744 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3745 bp->set_mac_pending = 0;
3746 break;
3747
3748 case (EVENT_RING_OPCODE_SET_MAC |
3749 BNX2X_STATE_CLOSING_WAIT4_HALT):
3750 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3751 bp->set_mac_pending = 0;
3752 break;
3753 default:
3754 /* unknown event log error and continue */
3755 BNX2X_ERR("Unknown EQ event %d\n",
3756 elem->message.opcode);
3757 }
3758next_spqe:
3759 spqe_cnt++;
3760 } /* for */
3761
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003762 smp_mb__before_atomic_inc();
3763 atomic_add(spqe_cnt, &bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003764
3765 bp->eq_cons = sw_cons;
3766 bp->eq_prod = sw_prod;
3767 /* Make sure that above mem writes were issued towards the memory */
3768 smp_wmb();
3769
3770 /* update producer */
3771 bnx2x_update_eq_prod(bp, bp->eq_prod);
3772}
3773
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003774static void bnx2x_sp_task(struct work_struct *work)
3775{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003776 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003777 u16 status;
3778
3779 /* Return here if interrupt is disabled */
3780 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003781 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003782 return;
3783 }
3784
3785 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003786/* if (status == 0) */
3787/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003788
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003789 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003790
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003791 /* HW attentions */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003792 if (status & BNX2X_DEF_SB_ATT_IDX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003793 bnx2x_attn_int(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003794 status &= ~BNX2X_DEF_SB_ATT_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003795 }
3796
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003797 /* SP events: STAT_QUERY and others */
3798 if (status & BNX2X_DEF_SB_IDX) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003799#ifdef BCM_CNIC
3800 struct bnx2x_fastpath *fp = bnx2x_fcoe_fp(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003801
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00003802 if ((!NO_FCOE(bp)) &&
3803 (bnx2x_has_rx_work(fp) || bnx2x_has_tx_work(fp)))
3804 napi_schedule(&bnx2x_fcoe(bp, napi));
3805#endif
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003806 /* Handle EQ completions */
3807 bnx2x_eq_int(bp);
3808
3809 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3810 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3811
3812 status &= ~BNX2X_DEF_SB_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003813 }
3814
3815 if (unlikely(status))
3816 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3817 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003818
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003819 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3820 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003821}
3822
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003823irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003824{
3825 struct net_device *dev = dev_instance;
3826 struct bnx2x *bp = netdev_priv(dev);
3827
3828 /* Return here if interrupt is disabled */
3829 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003830 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003831 return IRQ_HANDLED;
3832 }
3833
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003834 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3835 IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003836
3837#ifdef BNX2X_STOP_ON_ERROR
3838 if (unlikely(bp->panic))
3839 return IRQ_HANDLED;
3840#endif
3841
Michael Chan993ac7b2009-10-10 13:46:56 +00003842#ifdef BCM_CNIC
3843 {
3844 struct cnic_ops *c_ops;
3845
3846 rcu_read_lock();
3847 c_ops = rcu_dereference(bp->cnic_ops);
3848 if (c_ops)
3849 c_ops->cnic_handler(bp->cnic_data, NULL);
3850 rcu_read_unlock();
3851 }
3852#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003853 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003854
3855 return IRQ_HANDLED;
3856}
3857
3858/* end of slow path */
3859
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003860static void bnx2x_timer(unsigned long data)
3861{
3862 struct bnx2x *bp = (struct bnx2x *) data;
3863
3864 if (!netif_running(bp->dev))
3865 return;
3866
3867 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08003868 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003869
3870 if (poll) {
3871 struct bnx2x_fastpath *fp = &bp->fp[0];
3872 int rc;
3873
Eilon Greenstein7961f792009-03-02 07:59:31 +00003874 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003875 rc = bnx2x_rx_int(fp, 1000);
3876 }
3877
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003878 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003879 int mb_idx = BP_FW_MB_IDX(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003880 u32 drv_pulse;
3881 u32 mcp_pulse;
3882
3883 ++bp->fw_drv_pulse_wr_seq;
3884 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3885 /* TBD - add SYSTEM_TIME */
3886 drv_pulse = bp->fw_drv_pulse_wr_seq;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003887 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003888
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003889 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003890 MCP_PULSE_SEQ_MASK);
3891 /* The delta between driver pulse and mcp response
3892 * should be 1 (before mcp response) or 0 (after mcp response)
3893 */
3894 if ((drv_pulse != mcp_pulse) &&
3895 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3896 /* someone lost a heartbeat... */
3897 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3898 drv_pulse, mcp_pulse);
3899 }
3900 }
3901
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07003902 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003903 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003904
Eliezer Tamirf1410642008-02-28 11:51:50 -08003905timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003906 mod_timer(&bp->timer, jiffies + bp->current_interval);
3907}
3908
3909/* end of Statistics */
3910
3911/* nic init */
3912
3913/*
3914 * nic init service functions
3915 */
3916
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003917static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003918{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003919 u32 i;
3920 if (!(len%4) && !(addr%4))
3921 for (i = 0; i < len; i += 4)
3922 REG_WR(bp, addr + i, fill);
3923 else
3924 for (i = 0; i < len; i++)
3925 REG_WR8(bp, addr + i, fill);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003926
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003927}
3928
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003929/* helper: writes FP SP data to FW - data_size in dwords */
3930static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3931 int fw_sb_id,
3932 u32 *sb_data_p,
3933 u32 data_size)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003934{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003935 int index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003936 for (index = 0; index < data_size; index++)
3937 REG_WR(bp, BAR_CSTRORM_INTMEM +
3938 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3939 sizeof(u32)*index,
3940 *(sb_data_p + index));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003941}
3942
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003943static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3944{
3945 u32 *sb_data_p;
3946 u32 data_size = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003947 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003948 struct hc_status_block_data_e1x sb_data_e1x;
3949
3950 /* disable the function first */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003951 if (CHIP_IS_E2(bp)) {
3952 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3953 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3954 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3955 sb_data_e2.common.p_func.vf_valid = false;
3956 sb_data_p = (u32 *)&sb_data_e2;
3957 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3958 } else {
3959 memset(&sb_data_e1x, 0,
3960 sizeof(struct hc_status_block_data_e1x));
3961 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3962 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3963 sb_data_e1x.common.p_func.vf_valid = false;
3964 sb_data_p = (u32 *)&sb_data_e1x;
3965 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3966 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003967 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3968
3969 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3970 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3971 CSTORM_STATUS_BLOCK_SIZE);
3972 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3973 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3974 CSTORM_SYNC_BLOCK_SIZE);
3975}
3976
3977/* helper: writes SP SB data to FW */
3978static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3979 struct hc_sp_status_block_data *sp_sb_data)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003980{
3981 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003982 int i;
3983 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3984 REG_WR(bp, BAR_CSTRORM_INTMEM +
3985 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3986 i*sizeof(u32),
3987 *((u32 *)sp_sb_data + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003988}
3989
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003990static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3991{
3992 int func = BP_FUNC(bp);
3993 struct hc_sp_status_block_data sp_sb_data;
3994 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3995
3996 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3997 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3998 sp_sb_data.p_func.vf_valid = false;
3999
4000 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
4001
4002 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4003 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
4004 CSTORM_SP_STATUS_BLOCK_SIZE);
4005 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
4006 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
4007 CSTORM_SP_SYNC_BLOCK_SIZE);
4008
4009}
4010
4011
4012static inline
4013void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
4014 int igu_sb_id, int igu_seg_id)
4015{
4016 hc_sm->igu_sb_id = igu_sb_id;
4017 hc_sm->igu_seg_id = igu_seg_id;
4018 hc_sm->timer_value = 0xFF;
4019 hc_sm->time_to_expire = 0xFFFFFFFF;
4020}
4021
stephen hemminger8d962862010-10-21 07:50:56 +00004022static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004023 u8 vf_valid, int fw_sb_id, int igu_sb_id)
4024{
4025 int igu_seg_id;
4026
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004027 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004028 struct hc_status_block_data_e1x sb_data_e1x;
4029 struct hc_status_block_sm *hc_sm_p;
4030 struct hc_index_data *hc_index_p;
4031 int data_size;
4032 u32 *sb_data_p;
4033
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004034 if (CHIP_INT_MODE_IS_BC(bp))
4035 igu_seg_id = HC_SEG_ACCESS_NORM;
4036 else
4037 igu_seg_id = IGU_SEG_ACCESS_NORM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004038
4039 bnx2x_zero_fp_sb(bp, fw_sb_id);
4040
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004041 if (CHIP_IS_E2(bp)) {
4042 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4043 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4044 sb_data_e2.common.p_func.vf_id = vfid;
4045 sb_data_e2.common.p_func.vf_valid = vf_valid;
4046 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4047 sb_data_e2.common.same_igu_sb_1b = true;
4048 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4049 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4050 hc_sm_p = sb_data_e2.common.state_machine;
4051 hc_index_p = sb_data_e2.index_data;
4052 sb_data_p = (u32 *)&sb_data_e2;
4053 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4054 } else {
4055 memset(&sb_data_e1x, 0,
4056 sizeof(struct hc_status_block_data_e1x));
4057 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4058 sb_data_e1x.common.p_func.vf_id = 0xff;
4059 sb_data_e1x.common.p_func.vf_valid = false;
4060 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4061 sb_data_e1x.common.same_igu_sb_1b = true;
4062 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4063 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4064 hc_sm_p = sb_data_e1x.common.state_machine;
4065 hc_index_p = sb_data_e1x.index_data;
4066 sb_data_p = (u32 *)&sb_data_e1x;
4067 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4068 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004069
4070 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4071 igu_sb_id, igu_seg_id);
4072 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4073 igu_sb_id, igu_seg_id);
4074
4075 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4076
4077 /* write indecies to HW */
4078 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4079}
4080
4081static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4082 u8 sb_index, u8 disable, u16 usec)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004083{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004084 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004085 u8 ticks = usec / BNX2X_BTR;
4086
4087 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4088
4089 disable = disable ? 1 : (usec ? 0 : 1);
4090 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4091}
4092
4093static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4094 u16 tx_usec, u16 rx_usec)
4095{
4096 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4097 false, rx_usec);
4098 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4099 false, tx_usec);
4100}
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004101
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004102static void bnx2x_init_def_sb(struct bnx2x *bp)
4103{
4104 struct host_sp_status_block *def_sb = bp->def_status_blk;
4105 dma_addr_t mapping = bp->def_status_blk_mapping;
4106 int igu_sp_sb_index;
4107 int igu_seg_id;
4108 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004109 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004110 int reg_offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004111 u64 section;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004112 int index;
4113 struct hc_sp_status_block_data sp_sb_data;
4114 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4115
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004116 if (CHIP_INT_MODE_IS_BC(bp)) {
4117 igu_sp_sb_index = DEF_SB_IGU_ID;
4118 igu_seg_id = HC_SEG_ACCESS_DEF;
4119 } else {
4120 igu_sp_sb_index = bp->igu_dsb_id;
4121 igu_seg_id = IGU_SEG_ACCESS_DEF;
4122 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004123
4124 /* ATTN */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004125 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004126 atten_status_block);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004127 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004128
Eliezer Tamir49d66772008-02-28 11:53:13 -08004129 bp->attn_state = 0;
4130
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004131 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4132 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004133 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004134 int sindex;
4135 /* take care of sig[0]..sig[4] */
4136 for (sindex = 0; sindex < 4; sindex++)
4137 bp->attn_group[index].sig[sindex] =
4138 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004139
4140 if (CHIP_IS_E2(bp))
4141 /*
4142 * enable5 is separate from the rest of the registers,
4143 * and therefore the address skip is 4
4144 * and not 16 between the different groups
4145 */
4146 bp->attn_group[index].sig[4] = REG_RD(bp,
4147 reg_offset + 0x10 + 0x4*index);
4148 else
4149 bp->attn_group[index].sig[4] = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004150 }
4151
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004152 if (bp->common.int_block == INT_BLOCK_HC) {
4153 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4154 HC_REG_ATTN_MSG0_ADDR_L);
4155
4156 REG_WR(bp, reg_offset, U64_LO(section));
4157 REG_WR(bp, reg_offset + 4, U64_HI(section));
4158 } else if (CHIP_IS_E2(bp)) {
4159 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4160 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4161 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004162
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004163 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4164 sp_sb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004165
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004166 bnx2x_zero_sp_sb(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004167
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004168 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4169 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4170 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4171 sp_sb_data.igu_seg_id = igu_seg_id;
4172 sp_sb_data.p_func.pf_id = func;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004173 sp_sb_data.p_func.vnic_id = BP_VN(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004174 sp_sb_data.p_func.vf_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004175
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004176 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004177
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004178 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004179 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004180
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004181 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004182}
4183
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004184void bnx2x_update_coalesce(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004185{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004186 int i;
4187
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004188 for_each_eth_queue(bp, i)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004189 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4190 bp->rx_ticks, bp->tx_ticks);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004191}
4192
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004193static void bnx2x_init_sp_ring(struct bnx2x *bp)
4194{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004195 spin_lock_init(&bp->spq_lock);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00004196 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004197
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004198 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004199 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4200 bp->spq_prod_bd = bp->spq;
4201 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004202}
4203
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004204static void bnx2x_init_eq_ring(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004205{
4206 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004207 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4208 union event_ring_elem *elem =
4209 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004210
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004211 elem->next_page.addr.hi =
4212 cpu_to_le32(U64_HI(bp->eq_mapping +
4213 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4214 elem->next_page.addr.lo =
4215 cpu_to_le32(U64_LO(bp->eq_mapping +
4216 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004217 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004218 bp->eq_cons = 0;
4219 bp->eq_prod = NUM_EQ_DESC;
4220 bp->eq_cons_sb = BNX2X_EQ_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004221}
4222
4223static void bnx2x_init_ind_table(struct bnx2x *bp)
4224{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004225 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004226 int i;
4227
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004228 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004229 return;
4230
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004231 DP(NETIF_MSG_IFUP,
4232 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004233 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004234 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004235 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004236 bp->fp->cl_id + (i % (bp->num_queues -
4237 NONE_ETH_CONTEXT_USE)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004238}
4239
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004240void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004241{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004242 int mode = bp->rx_mode;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004243 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004244 u16 cl_id;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004245 u32 def_q_filters = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004246
Eilon Greenstein581ce432009-07-29 00:20:04 +00004247 /* All but management unicast packets should pass to the host as well */
4248 u32 llh_mask =
4249 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4250 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4251 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4252 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004253
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004254 switch (mode) {
4255 case BNX2X_RX_MODE_NONE: /* no Rx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004256 def_q_filters = BNX2X_ACCEPT_NONE;
4257#ifdef BCM_CNIC
4258 if (!NO_FCOE(bp)) {
4259 cl_id = bnx2x_fcoe(bp, cl_id);
4260 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
4261 }
4262#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004263 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004264
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004265 case BNX2X_RX_MODE_NORMAL:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004266 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4267 BNX2X_ACCEPT_MULTICAST;
4268#ifdef BCM_CNIC
4269 cl_id = bnx2x_fcoe(bp, cl_id);
4270 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
4271 BNX2X_ACCEPT_MULTICAST);
4272#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004273 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004274
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004275 case BNX2X_RX_MODE_ALLMULTI:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004276 def_q_filters |= BNX2X_ACCEPT_UNICAST | BNX2X_ACCEPT_BROADCAST |
4277 BNX2X_ACCEPT_ALL_MULTICAST;
4278#ifdef BCM_CNIC
4279 cl_id = bnx2x_fcoe(bp, cl_id);
4280 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
4281 BNX2X_ACCEPT_MULTICAST);
4282#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004283 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004284
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004285 case BNX2X_RX_MODE_PROMISC:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004286 def_q_filters |= BNX2X_PROMISCUOUS_MODE;
4287#ifdef BCM_CNIC
4288 cl_id = bnx2x_fcoe(bp, cl_id);
4289 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_UNICAST |
4290 BNX2X_ACCEPT_MULTICAST);
4291#endif
Eilon Greenstein581ce432009-07-29 00:20:04 +00004292 /* pass management unicast packets as well */
4293 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004294 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004295
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004296 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004297 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4298 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004299 }
4300
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004301 cl_id = BP_L_ID(bp);
4302 bnx2x_rxq_set_mac_filters(bp, cl_id, def_q_filters);
4303
Eilon Greenstein581ce432009-07-29 00:20:04 +00004304 REG_WR(bp,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004305 (port ? NIG_REG_LLH1_BRB1_DRV_MASK :
4306 NIG_REG_LLH0_BRB1_DRV_MASK), llh_mask);
Eilon Greenstein581ce432009-07-29 00:20:04 +00004307
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004308 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4309 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004310 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n"
4311 "unmatched_ucast 0x%x\n", mode,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004312 bp->mac_filters.ucast_drop_all,
4313 bp->mac_filters.mcast_drop_all,
4314 bp->mac_filters.bcast_drop_all,
4315 bp->mac_filters.ucast_accept_all,
4316 bp->mac_filters.mcast_accept_all,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004317 bp->mac_filters.bcast_accept_all,
4318 bp->mac_filters.unmatched_unicast
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004319 );
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004320
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004321 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004322}
4323
Eilon Greenstein471de712008-08-13 15:49:35 -07004324static void bnx2x_init_internal_common(struct bnx2x *bp)
4325{
4326 int i;
4327
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004328 if (!CHIP_IS_E1(bp)) {
4329
4330 /* xstorm needs to know whether to add ovlan to packets or not,
4331 * in switch-independent we'll write 0 to here... */
4332 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004333 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004334 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004335 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004336 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004337 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004338 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004339 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004340 }
4341
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08004342 if (IS_MF_SI(bp))
4343 /*
4344 * In switch independent mode, the TSTORM needs to accept
4345 * packets that failed classification, since approximate match
4346 * mac addresses aren't written to NIG LLH
4347 */
4348 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4349 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4350
Eilon Greenstein471de712008-08-13 15:49:35 -07004351 /* Zero this manually as its initialization is
4352 currently missing in the initTool */
4353 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4354 REG_WR(bp, BAR_USTRORM_INTMEM +
4355 USTORM_AGG_DATA_OFFSET + i * 4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004356 if (CHIP_IS_E2(bp)) {
4357 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4358 CHIP_INT_MODE_IS_BC(bp) ?
4359 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4360 }
Eilon Greenstein471de712008-08-13 15:49:35 -07004361}
4362
4363static void bnx2x_init_internal_port(struct bnx2x *bp)
4364{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004365 /* port */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004366}
4367
Eilon Greenstein471de712008-08-13 15:49:35 -07004368static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4369{
4370 switch (load_code) {
4371 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004372 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Eilon Greenstein471de712008-08-13 15:49:35 -07004373 bnx2x_init_internal_common(bp);
4374 /* no break */
4375
4376 case FW_MSG_CODE_DRV_LOAD_PORT:
4377 bnx2x_init_internal_port(bp);
4378 /* no break */
4379
4380 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004381 /* internal memory per function is
4382 initialized inside bnx2x_pf_init */
Eilon Greenstein471de712008-08-13 15:49:35 -07004383 break;
4384
4385 default:
4386 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4387 break;
4388 }
4389}
4390
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004391static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4392{
4393 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4394
4395 fp->state = BNX2X_FP_STATE_CLOSED;
4396
4397 fp->index = fp->cid = fp_idx;
4398 fp->cl_id = BP_L_ID(bp) + fp_idx;
4399 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4400 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4401 /* qZone id equals to FW (per path) client id */
4402 fp->cl_qzone_id = fp->cl_id +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004403 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4404 ETH_MAX_RX_CLIENTS_E1H);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004405 /* init shortcut */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004406 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4407 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004408 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4409 /* Setup SB indicies */
4410 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4411 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4412
4413 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4414 "cl_id %d fw_sb %d igu_sb %d\n",
4415 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4416 fp->igu_sb_id);
4417 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4418 fp->fw_sb_id, fp->igu_sb_id);
4419
4420 bnx2x_update_fpsb_idx(fp);
4421}
4422
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004423void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004424{
4425 int i;
4426
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004427 for_each_eth_queue(bp, i)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004428 bnx2x_init_fp_sb(bp, i);
Michael Chan37b091b2009-10-10 13:46:55 +00004429#ifdef BCM_CNIC
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00004430 if (!NO_FCOE(bp))
4431 bnx2x_init_fcoe_fp(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004432
4433 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4434 BNX2X_VF_ID_INVALID, false,
4435 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4436
Michael Chan37b091b2009-10-10 13:46:55 +00004437#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004438
Eilon Greenstein16119782009-03-02 07:59:27 +00004439 /* ensure status block indices were read */
4440 rmb();
4441
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004442 bnx2x_init_def_sb(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07004443 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004444 bnx2x_init_rx_rings(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004445 bnx2x_init_tx_rings(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004446 bnx2x_init_sp_ring(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004447 bnx2x_init_eq_ring(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07004448 bnx2x_init_internal(bp, load_code);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004449 bnx2x_pf_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004450 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08004451 bnx2x_stats_init(bp);
4452
4453 /* At this point, we are ready for interrupts */
4454 atomic_set(&bp->intr_sem, 0);
4455
4456 /* flush all before enabling interrupts */
4457 mb();
4458 mmiowb();
4459
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08004460 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00004461
4462 /* Check for SPIO5 */
4463 bnx2x_attn_int_deasserted0(bp,
4464 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4465 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004466}
4467
4468/* end of nic init */
4469
4470/*
4471 * gzip service functions
4472 */
4473
4474static int bnx2x_gunzip_init(struct bnx2x *bp)
4475{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004476 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4477 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004478 if (bp->gunzip_buf == NULL)
4479 goto gunzip_nomem1;
4480
4481 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4482 if (bp->strm == NULL)
4483 goto gunzip_nomem2;
4484
4485 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4486 GFP_KERNEL);
4487 if (bp->strm->workspace == NULL)
4488 goto gunzip_nomem3;
4489
4490 return 0;
4491
4492gunzip_nomem3:
4493 kfree(bp->strm);
4494 bp->strm = NULL;
4495
4496gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004497 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4498 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004499 bp->gunzip_buf = NULL;
4500
4501gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004502 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4503 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004504 return -ENOMEM;
4505}
4506
4507static void bnx2x_gunzip_end(struct bnx2x *bp)
4508{
4509 kfree(bp->strm->workspace);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004510 kfree(bp->strm);
4511 bp->strm = NULL;
4512
4513 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004514 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4515 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004516 bp->gunzip_buf = NULL;
4517 }
4518}
4519
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004520static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004521{
4522 int n, rc;
4523
4524 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004525 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4526 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004527 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004528 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004529
4530 n = 10;
4531
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004532#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004533
4534 if (zbuf[3] & FNAME)
4535 while ((zbuf[n++] != 0) && (n < len));
4536
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004537 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004538 bp->strm->avail_in = len - n;
4539 bp->strm->next_out = bp->gunzip_buf;
4540 bp->strm->avail_out = FW_BUF_SIZE;
4541
4542 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4543 if (rc != Z_OK)
4544 return rc;
4545
4546 rc = zlib_inflate(bp->strm, Z_FINISH);
4547 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00004548 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4549 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004550
4551 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4552 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004553 netdev_err(bp->dev, "Firmware decompression error:"
4554 " gunzip_outlen (%d) not aligned\n",
4555 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004556 bp->gunzip_outlen >>= 2;
4557
4558 zlib_inflateEnd(bp->strm);
4559
4560 if (rc == Z_STREAM_END)
4561 return 0;
4562
4563 return rc;
4564}
4565
4566/* nic load/unload */
4567
4568/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004569 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004570 */
4571
4572/* send a NIG loopback debug packet */
4573static void bnx2x_lb_pckt(struct bnx2x *bp)
4574{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004575 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004576
4577 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004578 wb_write[0] = 0x55555555;
4579 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004580 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004581 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004582
4583 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004584 wb_write[0] = 0x09000000;
4585 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004586 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004587 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004588}
4589
4590/* some of the internal memories
4591 * are not directly readable from the driver
4592 * to test them we send debug packets
4593 */
4594static int bnx2x_int_mem_test(struct bnx2x *bp)
4595{
4596 int factor;
4597 int count, i;
4598 u32 val = 0;
4599
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004600 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004601 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004602 else if (CHIP_REV_IS_EMUL(bp))
4603 factor = 200;
4604 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004605 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004606
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004607 /* Disable inputs of parser neighbor blocks */
4608 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4609 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4610 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004611 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004612
4613 /* Write 0 to parser credits for CFC search request */
4614 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4615
4616 /* send Ethernet packet */
4617 bnx2x_lb_pckt(bp);
4618
4619 /* TODO do i reset NIG statistic? */
4620 /* Wait until NIG register shows 1 packet of size 0x10 */
4621 count = 1000 * factor;
4622 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004623
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004624 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4625 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004626 if (val == 0x10)
4627 break;
4628
4629 msleep(10);
4630 count--;
4631 }
4632 if (val != 0x10) {
4633 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4634 return -1;
4635 }
4636
4637 /* Wait until PRS register shows 1 packet */
4638 count = 1000 * factor;
4639 while (count) {
4640 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004641 if (val == 1)
4642 break;
4643
4644 msleep(10);
4645 count--;
4646 }
4647 if (val != 0x1) {
4648 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4649 return -2;
4650 }
4651
4652 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004653 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004654 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004655 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004656 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004657 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4658 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004659
4660 DP(NETIF_MSG_HW, "part2\n");
4661
4662 /* Disable inputs of parser neighbor blocks */
4663 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4664 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4665 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004666 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004667
4668 /* Write 0 to parser credits for CFC search request */
4669 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4670
4671 /* send 10 Ethernet packets */
4672 for (i = 0; i < 10; i++)
4673 bnx2x_lb_pckt(bp);
4674
4675 /* Wait until NIG register shows 10 + 1
4676 packets of size 11*0x10 = 0xb0 */
4677 count = 1000 * factor;
4678 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004679
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004680 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4681 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004682 if (val == 0xb0)
4683 break;
4684
4685 msleep(10);
4686 count--;
4687 }
4688 if (val != 0xb0) {
4689 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4690 return -3;
4691 }
4692
4693 /* Wait until PRS register shows 2 packets */
4694 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4695 if (val != 2)
4696 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4697
4698 /* Write 1 to parser credits for CFC search request */
4699 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4700
4701 /* Wait until PRS register shows 3 packets */
4702 msleep(10 * factor);
4703 /* Wait until NIG register shows 1 packet of size 0x10 */
4704 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4705 if (val != 3)
4706 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4707
4708 /* clear NIG EOP FIFO */
4709 for (i = 0; i < 11; i++)
4710 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4711 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4712 if (val != 1) {
4713 BNX2X_ERR("clear of NIG failed\n");
4714 return -4;
4715 }
4716
4717 /* Reset and init BRB, PRS, NIG */
4718 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4719 msleep(50);
4720 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4721 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004722 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4723 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004724#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004725 /* set NIC mode */
4726 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4727#endif
4728
4729 /* Enable inputs of parser neighbor blocks */
4730 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4731 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4732 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004733 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004734
4735 DP(NETIF_MSG_HW, "done\n");
4736
4737 return 0; /* OK */
4738}
4739
4740static void enable_blocks_attention(struct bnx2x *bp)
4741{
4742 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004743 if (CHIP_IS_E2(bp))
4744 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4745 else
4746 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004747 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4748 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004749 /*
4750 * mask read length error interrupts in brb for parser
4751 * (parsing unit and 'checksum and crc' unit)
4752 * these errors are legal (PU reads fixed length and CAC can cause
4753 * read length error on truncated packets)
4754 */
4755 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004756 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4757 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4758 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4759 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4760 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004761/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4762/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004763 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4764 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4765 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004766/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4767/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004768 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4769 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4770 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4771 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004772/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4773/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004774
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004775 if (CHIP_REV_IS_FPGA(bp))
4776 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004777 else if (CHIP_IS_E2(bp))
4778 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4779 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4780 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4781 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4782 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4783 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004784 else
4785 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004786 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4787 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4788 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004789/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4790/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004791 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4792 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004793/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4794 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004795}
4796
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004797static const struct {
4798 u32 addr;
4799 u32 mask;
4800} bnx2x_parity_mask[] = {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004801 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4802 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4803 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4804 {HC_REG_HC_PRTY_MASK, 0x7},
4805 {MISC_REG_MISC_PRTY_MASK, 0x1},
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004806 {QM_REG_QM_PRTY_MASK, 0x0},
4807 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004808 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4809 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004810 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4811 {CDU_REG_CDU_PRTY_MASK, 0x0},
4812 {CFC_REG_CFC_PRTY_MASK, 0x0},
4813 {DBG_REG_DBG_PRTY_MASK, 0x0},
4814 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4815 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4816 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4817 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4818 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4819 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4820 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4821 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4822 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4823 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4824 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4825 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4826 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4827 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4828 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004829};
4830
4831static void enable_blocks_parity(struct bnx2x *bp)
4832{
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004833 int i;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004834
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004835 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004836 REG_WR(bp, bnx2x_parity_mask[i].addr,
4837 bnx2x_parity_mask[i].mask);
4838}
4839
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004840
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004841static void bnx2x_reset_common(struct bnx2x *bp)
4842{
4843 /* reset_common */
4844 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4845 0xd3ffff7f);
4846 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4847}
4848
Eilon Greenstein573f2032009-08-12 08:24:14 +00004849static void bnx2x_init_pxp(struct bnx2x *bp)
4850{
4851 u16 devctl;
4852 int r_order, w_order;
4853
4854 pci_read_config_word(bp->pdev,
4855 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4856 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4857 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4858 if (bp->mrrs == -1)
4859 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4860 else {
4861 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4862 r_order = bp->mrrs;
4863 }
4864
4865 bnx2x_init_pxp_arb(bp, r_order, w_order);
4866}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004867
4868static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4869{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004870 int is_required;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004871 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004872 int port;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004873
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004874 if (BP_NOMCP(bp))
4875 return;
4876
4877 is_required = 0;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004878 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4879 SHARED_HW_CFG_FAN_FAILURE_MASK;
4880
4881 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4882 is_required = 1;
4883
4884 /*
4885 * The fan failure mechanism is usually related to the PHY type since
4886 * the power consumption of the board is affected by the PHY. Currently,
4887 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4888 */
4889 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4890 for (port = PORT_0; port < PORT_MAX; port++) {
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004891 is_required |=
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004892 bnx2x_fan_failure_det_req(
4893 bp,
4894 bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004895 bp->common.shmem2_base,
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004896 port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004897 }
4898
4899 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4900
4901 if (is_required == 0)
4902 return;
4903
4904 /* Fan failure is indicated by SPIO 5 */
4905 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4906 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4907
4908 /* set to active low mode */
4909 val = REG_RD(bp, MISC_REG_SPIO_INT);
4910 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004911 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004912 REG_WR(bp, MISC_REG_SPIO_INT, val);
4913
4914 /* enable interrupt to signal the IGU */
4915 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4916 val |= (1 << MISC_REGISTERS_SPIO_5);
4917 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4918}
4919
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004920static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4921{
4922 u32 offset = 0;
4923
4924 if (CHIP_IS_E1(bp))
4925 return;
4926 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4927 return;
4928
4929 switch (BP_ABS_FUNC(bp)) {
4930 case 0:
4931 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4932 break;
4933 case 1:
4934 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4935 break;
4936 case 2:
4937 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4938 break;
4939 case 3:
4940 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4941 break;
4942 case 4:
4943 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4944 break;
4945 case 5:
4946 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4947 break;
4948 case 6:
4949 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4950 break;
4951 case 7:
4952 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4953 break;
4954 default:
4955 return;
4956 }
4957
4958 REG_WR(bp, offset, pretend_func_num);
4959 REG_RD(bp, offset);
4960 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4961}
4962
4963static void bnx2x_pf_disable(struct bnx2x *bp)
4964{
4965 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4966 val &= ~IGU_PF_CONF_FUNC_EN;
4967
4968 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4969 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4970 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4971}
4972
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004973static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004974{
4975 u32 val, i;
4976
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004977 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004978
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004979 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004980 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4981 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4982
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004983 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004984 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004985 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004986
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004987 if (CHIP_IS_E2(bp)) {
4988 u8 fid;
4989
4990 /**
4991 * 4-port mode or 2-port mode we need to turn of master-enable
4992 * for everyone, after that, turn it back on for self.
4993 * so, we disregard multi-function or not, and always disable
4994 * for all functions on the given path, this means 0,2,4,6 for
4995 * path 0 and 1,3,5,7 for path 1
4996 */
4997 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4998 if (fid == BP_ABS_FUNC(bp)) {
4999 REG_WR(bp,
5000 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
5001 1);
5002 continue;
5003 }
5004
5005 bnx2x_pretend_func(bp, fid);
5006 /* clear pf enable */
5007 bnx2x_pf_disable(bp);
5008 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5009 }
5010 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005011
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005012 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005013 if (CHIP_IS_E1(bp)) {
5014 /* enable HW interrupt from PXP on USDM overflow
5015 bit 16 on INT_MASK_0 */
5016 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005017 }
5018
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005019 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005020 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005021
5022#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005023 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5024 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5025 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5026 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5027 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00005028 /* make sure this value is 0 */
5029 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005030
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005031/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5032 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5033 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5034 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5035 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005036#endif
5037
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005038 bnx2x_ilt_init_page_size(bp, INITOP_SET);
5039
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005040 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5041 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005042
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005043 /* let the HW do it's magic ... */
5044 msleep(100);
5045 /* finish PXP init */
5046 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5047 if (val != 1) {
5048 BNX2X_ERR("PXP2 CFG failed\n");
5049 return -EBUSY;
5050 }
5051 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5052 if (val != 1) {
5053 BNX2X_ERR("PXP2 RD_INIT failed\n");
5054 return -EBUSY;
5055 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005056
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005057 /* Timers bug workaround E2 only. We need to set the entire ILT to
5058 * have entries with value "0" and valid bit on.
5059 * This needs to be done by the first PF that is loaded in a path
5060 * (i.e. common phase)
5061 */
5062 if (CHIP_IS_E2(bp)) {
5063 struct ilt_client_info ilt_cli;
5064 struct bnx2x_ilt ilt;
5065 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5066 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5067
5068 /* initalize dummy TM client */
5069 ilt_cli.start = 0;
5070 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5071 ilt_cli.client_num = ILT_CLIENT_TM;
5072
5073 /* Step 1: set zeroes to all ilt page entries with valid bit on
5074 * Step 2: set the timers first/last ilt entry to point
5075 * to the entire range to prevent ILT range error for 3rd/4th
5076 * vnic (this code assumes existance of the vnic)
5077 *
5078 * both steps performed by call to bnx2x_ilt_client_init_op()
5079 * with dummy TM client
5080 *
5081 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5082 * and his brother are split registers
5083 */
5084 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5085 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5086 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5087
5088 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5089 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5090 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5091 }
5092
5093
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005094 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5095 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005096
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005097 if (CHIP_IS_E2(bp)) {
5098 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5099 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5100 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5101
5102 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5103
5104 /* let the HW do it's magic ... */
5105 do {
5106 msleep(200);
5107 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5108 } while (factor-- && (val != 1));
5109
5110 if (val != 1) {
5111 BNX2X_ERR("ATC_INIT failed\n");
5112 return -EBUSY;
5113 }
5114 }
5115
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005116 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005117
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005118 /* clean the DMAE memory */
5119 bp->dmae_ready = 1;
5120 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005121
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005122 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5123 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5124 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5125 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005126
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005127 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5128 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5129 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5130 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5131
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005132 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005133
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005134 if (CHIP_MODE_IS_4_PORT(bp))
5135 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005136
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005137 /* QM queues pointers table */
5138 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00005139
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005140 /* soft reset pulse */
5141 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5142 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005143
Michael Chan37b091b2009-10-10 13:46:55 +00005144#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005145 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005146#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005147
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005148 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005149 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5150
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005151 if (!CHIP_REV_IS_SLOW(bp)) {
5152 /* enable hw interrupt from doorbell Q */
5153 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5154 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005155
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005156 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005157 if (CHIP_MODE_IS_4_PORT(bp)) {
5158 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5159 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5160 }
5161
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005162 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005163 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00005164#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07005165 /* set NIC mode */
5166 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00005167#endif
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005168 if (!CHIP_IS_E1(bp))
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005169 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005170
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005171 if (CHIP_IS_E2(bp)) {
5172 /* Bit-map indicating which L2 hdrs may appear after the
5173 basic Ethernet header */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005174 int has_ovlan = IS_MF_SD(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005175 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5176 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5177 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005178
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005179 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5180 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5181 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5182 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005183
Eilon Greensteinca003922009-08-12 22:53:28 -07005184 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5185 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5186 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5187 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005188
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005189 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5190 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5191 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5192 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005193
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005194 if (CHIP_MODE_IS_4_PORT(bp))
5195 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5196
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005197 /* sync semi rtc */
5198 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5199 0x80000000);
5200 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5201 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005202
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005203 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5204 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5205 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005206
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005207 if (CHIP_IS_E2(bp)) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005208 int has_ovlan = IS_MF_SD(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005209 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5210 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5211 }
5212
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005213 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07005214 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5215 REG_WR(bp, i, random32());
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005216
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005217 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005218#ifdef BCM_CNIC
5219 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5220 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5221 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5222 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5223 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5224 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5225 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5226 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5227 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5228 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5229#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005230 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005231
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005232 if (sizeof(union cdu_context) != 1024)
5233 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005234 dev_alert(&bp->pdev->dev, "please adjust the size "
5235 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00005236 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005237
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005238 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005239 val = (4 << 24) + (0 << 12) + 1024;
5240 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005241
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005242 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005243 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005244 /* enable context validation interrupt from CFC */
5245 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5246
5247 /* set the thresholds to prevent CFC/CDU race */
5248 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005249
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005250 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005251
5252 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5253 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5254
5255 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005256 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005257
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005258 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005259 /* Reset PCIE errors for debug */
5260 REG_WR(bp, 0x2814, 0xffffffff);
5261 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005262
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005263 if (CHIP_IS_E2(bp)) {
5264 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5265 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5266 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5267 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5268 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5269 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5270 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5271 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5272 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5273 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5274 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5275 }
5276
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005277 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005278 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005279 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005280 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005281
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005282 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005283 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005284 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005285 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005286 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005287 if (CHIP_IS_E2(bp)) {
5288 /* Bit-map indicating which L2 hdrs may appear after the
5289 basic Ethernet header */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005290 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005291 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005292
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005293 if (CHIP_REV_IS_SLOW(bp))
5294 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005295
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005296 /* finish CFC init */
5297 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5298 if (val != 1) {
5299 BNX2X_ERR("CFC LL_INIT failed\n");
5300 return -EBUSY;
5301 }
5302 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5303 if (val != 1) {
5304 BNX2X_ERR("CFC AC_INIT failed\n");
5305 return -EBUSY;
5306 }
5307 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5308 if (val != 1) {
5309 BNX2X_ERR("CFC CAM_INIT failed\n");
5310 return -EBUSY;
5311 }
5312 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005313
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005314 if (CHIP_IS_E1(bp)) {
5315 /* read NIG statistic
5316 to see if this is our first up since powerup */
5317 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5318 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005319
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005320 /* do internal memory self test */
5321 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5322 BNX2X_ERR("internal mem self test failed\n");
5323 return -EBUSY;
5324 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005325 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005326
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005327 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005328 bp->common.shmem_base,
5329 bp->common.shmem2_base);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005330
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005331 bnx2x_setup_fan_failure_detection(bp);
5332
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005333 /* clear PXP2 attentions */
5334 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005335
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005336 enable_blocks_attention(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00005337 if (CHIP_PARITY_SUPPORTED(bp))
5338 enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005339
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005340 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005341 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5342 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5343 CHIP_IS_E1x(bp)) {
5344 u32 shmem_base[2], shmem2_base[2];
5345 shmem_base[0] = bp->common.shmem_base;
5346 shmem2_base[0] = bp->common.shmem2_base;
5347 if (CHIP_IS_E2(bp)) {
5348 shmem_base[1] =
5349 SHMEM2_RD(bp, other_shmem_base_addr);
5350 shmem2_base[1] =
5351 SHMEM2_RD(bp, other_shmem2_base_addr);
5352 }
5353 bnx2x_acquire_phy_lock(bp);
5354 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5355 bp->common.chip_id);
5356 bnx2x_release_phy_lock(bp);
5357 }
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005358 } else
5359 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5360
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005361 return 0;
5362}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005363
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005364static int bnx2x_init_hw_port(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005365{
5366 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005367 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00005368 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005369 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005370
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005371 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005372
5373 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005374
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005375 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005376 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005377
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005378 /* Timers bug workaround: disables the pf_master bit in pglue at
5379 * common phase, we need to enable it here before any dmae access are
5380 * attempted. Therefore we manually added the enable-master to the
5381 * port phase (it also happens in the function phase)
5382 */
5383 if (CHIP_IS_E2(bp))
5384 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5385
Eilon Greensteinca003922009-08-12 22:53:28 -07005386 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5387 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5388 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005389 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005390
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005391 /* QM cid (connection) count */
5392 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005393
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005394#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005395 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00005396 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5397 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005398#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005399
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005400 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005401
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005402 if (CHIP_MODE_IS_4_PORT(bp))
5403 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005404
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005405 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5406 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5407 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5408 /* no pause for emulation and FPGA */
5409 low = 0;
5410 high = 513;
5411 } else {
5412 if (IS_MF(bp))
5413 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5414 else if (bp->dev->mtu > 4096) {
5415 if (bp->flags & ONE_PORT_FLAG)
5416 low = 160;
5417 else {
5418 val = bp->dev->mtu;
5419 /* (24*1024 + val*4)/256 */
5420 low = 96 + (val/64) +
5421 ((val % 64) ? 1 : 0);
5422 }
5423 } else
5424 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5425 high = low + 56; /* 14*1024/256 */
5426 }
5427 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5428 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5429 }
5430
5431 if (CHIP_MODE_IS_4_PORT(bp)) {
5432 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5433 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5434 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5435 BRB1_REG_MAC_GUARANTIED_0), 40);
5436 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005437
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005438 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005439
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005440 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005441 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005442 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005443 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005444
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005445 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5446 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5447 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5448 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005449 if (CHIP_MODE_IS_4_PORT(bp))
5450 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005451
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005452 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005453 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005454
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005455 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005456
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005457 if (!CHIP_IS_E2(bp)) {
5458 /* configure PBF to work without PAUSE mtu 9000 */
5459 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005460
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005461 /* update threshold */
5462 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5463 /* update init credit */
5464 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005465
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005466 /* probe changes */
5467 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5468 udelay(50);
5469 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5470 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005471
Michael Chan37b091b2009-10-10 13:46:55 +00005472#ifdef BCM_CNIC
5473 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005474#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005475 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005476 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005477
5478 if (CHIP_IS_E1(bp)) {
5479 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5480 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5481 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005482 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005483
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005484 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5485
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005486 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005487 /* init aeu_mask_attn_func_0/1:
5488 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5489 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5490 * bits 4-7 are used for "per vn group attention" */
5491 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005492 (IS_MF(bp) ? 0xF7 : 0x7));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005493
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005494 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005495 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005496 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005497 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005498 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005499
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005500 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005501
5502 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5503
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005504 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005505 /* 0x2 disable mf_ov, 0x1 enable */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005506 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005507 (IS_MF_SD(bp) ? 0x1 : 0x2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005508
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005509 if (CHIP_IS_E2(bp)) {
5510 val = 0;
5511 switch (bp->mf_mode) {
5512 case MULTI_FUNCTION_SD:
5513 val = 1;
5514 break;
5515 case MULTI_FUNCTION_SI:
5516 val = 2;
5517 break;
5518 }
5519
5520 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5521 NIG_REG_LLH0_CLS_TYPE), val);
5522 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005523 {
5524 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5525 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5526 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5527 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005528 }
5529
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005530 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005531 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005532 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005533 bp->common.shmem_base,
5534 bp->common.shmem2_base);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005535 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005536 bp->common.shmem2_base, port)) {
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005537 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5538 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5539 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005540 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005541 REG_WR(bp, reg_addr, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005542 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07005543 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005544
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005545 return 0;
5546}
5547
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005548static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5549{
5550 int reg;
5551
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005552 if (CHIP_IS_E1(bp))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005553 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005554 else
5555 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005556
5557 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5558}
5559
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005560static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5561{
5562 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5563}
5564
5565static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5566{
5567 u32 i, base = FUNC_ILT_BASE(func);
5568 for (i = base; i < base + ILT_PER_FUNC; i++)
5569 bnx2x_ilt_wr(bp, i, 0);
5570}
5571
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005572static int bnx2x_init_hw_func(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005573{
5574 int port = BP_PORT(bp);
5575 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005576 struct bnx2x_ilt *ilt = BP_ILT(bp);
5577 u16 cdu_ilt_start;
Eilon Greenstein8badd272009-02-12 08:36:15 +00005578 u32 addr, val;
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005579 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5580 int i, main_mem_width;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005581
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005582 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005583
Eilon Greenstein8badd272009-02-12 08:36:15 +00005584 /* set MSI reconfigure capability */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005585 if (bp->common.int_block == INT_BLOCK_HC) {
5586 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5587 val = REG_RD(bp, addr);
5588 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5589 REG_WR(bp, addr, val);
5590 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00005591
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005592 ilt = BP_ILT(bp);
5593 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005594
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005595 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5596 ilt->lines[cdu_ilt_start + i].page =
5597 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5598 ilt->lines[cdu_ilt_start + i].page_mapping =
5599 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5600 /* cdu ilt pages are allocated manually so there's no need to
5601 set the size */
5602 }
5603 bnx2x_ilt_init_op(bp, INITOP_SET);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005604
Michael Chan37b091b2009-10-10 13:46:55 +00005605#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005606 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
Michael Chan37b091b2009-10-10 13:46:55 +00005607
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005608 /* T1 hash bits value determines the T1 number of entries */
5609 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
Michael Chan37b091b2009-10-10 13:46:55 +00005610#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005611
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005612#ifndef BCM_CNIC
5613 /* set NIC mode */
5614 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5615#endif /* BCM_CNIC */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005616
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005617 if (CHIP_IS_E2(bp)) {
5618 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5619
5620 /* Turn on a single ISR mode in IGU if driver is going to use
5621 * INT#x or MSI
5622 */
5623 if (!(bp->flags & USING_MSIX_FLAG))
5624 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5625 /*
5626 * Timers workaround bug: function init part.
5627 * Need to wait 20msec after initializing ILT,
5628 * needed to make sure there are no requests in
5629 * one of the PXP internal queues with "old" ILT addresses
5630 */
5631 msleep(20);
5632 /*
5633 * Master enable - Due to WB DMAE writes performed before this
5634 * register is re-initialized as part of the regular function
5635 * init
5636 */
5637 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5638 /* Enable the function in IGU */
5639 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5640 }
5641
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005642 bp->dmae_ready = 1;
5643
5644 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5645
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005646 if (CHIP_IS_E2(bp))
5647 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5648
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005649 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5650 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5651 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5652 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5653 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5654 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5655 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5656 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5657 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5658
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005659 if (CHIP_IS_E2(bp)) {
5660 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5661 BP_PATH(bp));
5662 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5663 BP_PATH(bp));
5664 }
5665
5666 if (CHIP_MODE_IS_4_PORT(bp))
5667 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5668
5669 if (CHIP_IS_E2(bp))
5670 REG_WR(bp, QM_REG_PF_EN, 1);
5671
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005672 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005673
5674 if (CHIP_MODE_IS_4_PORT(bp))
5675 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5676
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005677 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5678 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5679 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5680 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5681 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5682 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5683 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5684 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5685 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5686 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5687 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005688 if (CHIP_IS_E2(bp))
5689 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5690
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005691 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5692
5693 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5694
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005695 if (CHIP_IS_E2(bp))
5696 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5697
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005698 if (IS_MF(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005699 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005700 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005701 }
5702
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005703 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5704
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005705 /* HC init per function */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005706 if (bp->common.int_block == INT_BLOCK_HC) {
5707 if (CHIP_IS_E1H(bp)) {
5708 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5709
5710 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5711 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5712 }
5713 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5714
5715 } else {
5716 int num_segs, sb_idx, prod_offset;
5717
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005718 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5719
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005720 if (CHIP_IS_E2(bp)) {
5721 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5722 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5723 }
5724
5725 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5726
5727 if (CHIP_IS_E2(bp)) {
5728 int dsb_idx = 0;
5729 /**
5730 * Producer memory:
5731 * E2 mode: address 0-135 match to the mapping memory;
5732 * 136 - PF0 default prod; 137 - PF1 default prod;
5733 * 138 - PF2 default prod; 139 - PF3 default prod;
5734 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5735 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5736 * 144-147 reserved.
5737 *
5738 * E1.5 mode - In backward compatible mode;
5739 * for non default SB; each even line in the memory
5740 * holds the U producer and each odd line hold
5741 * the C producer. The first 128 producers are for
5742 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5743 * producers are for the DSB for each PF.
5744 * Each PF has five segments: (the order inside each
5745 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5746 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5747 * 144-147 attn prods;
5748 */
5749 /* non-default-status-blocks */
5750 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5751 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5752 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5753 prod_offset = (bp->igu_base_sb + sb_idx) *
5754 num_segs;
5755
5756 for (i = 0; i < num_segs; i++) {
5757 addr = IGU_REG_PROD_CONS_MEMORY +
5758 (prod_offset + i) * 4;
5759 REG_WR(bp, addr, 0);
5760 }
5761 /* send consumer update with value 0 */
5762 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5763 USTORM_ID, 0, IGU_INT_NOP, 1);
5764 bnx2x_igu_clear_sb(bp,
5765 bp->igu_base_sb + sb_idx);
5766 }
5767
5768 /* default-status-blocks */
5769 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5770 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5771
5772 if (CHIP_MODE_IS_4_PORT(bp))
5773 dsb_idx = BP_FUNC(bp);
5774 else
5775 dsb_idx = BP_E1HVN(bp);
5776
5777 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5778 IGU_BC_BASE_DSB_PROD + dsb_idx :
5779 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5780
5781 for (i = 0; i < (num_segs * E1HVN_MAX);
5782 i += E1HVN_MAX) {
5783 addr = IGU_REG_PROD_CONS_MEMORY +
5784 (prod_offset + i)*4;
5785 REG_WR(bp, addr, 0);
5786 }
5787 /* send consumer update with 0 */
5788 if (CHIP_INT_MODE_IS_BC(bp)) {
5789 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5790 USTORM_ID, 0, IGU_INT_NOP, 1);
5791 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5792 CSTORM_ID, 0, IGU_INT_NOP, 1);
5793 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5794 XSTORM_ID, 0, IGU_INT_NOP, 1);
5795 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5796 TSTORM_ID, 0, IGU_INT_NOP, 1);
5797 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5798 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5799 } else {
5800 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5801 USTORM_ID, 0, IGU_INT_NOP, 1);
5802 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5803 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5804 }
5805 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5806
5807 /* !!! these should become driver const once
5808 rf-tool supports split-68 const */
5809 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5810 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5811 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5812 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5813 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5814 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5815 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005816 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005817
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005818 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005819 REG_WR(bp, 0x2114, 0xffffffff);
5820 REG_WR(bp, 0x2120, 0xffffffff);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005821
5822 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5823 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5824 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5825 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5826 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5827 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5828
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005829 if (CHIP_IS_E1x(bp)) {
5830 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5831 main_mem_base = HC_REG_MAIN_MEMORY +
5832 BP_PORT(bp) * (main_mem_size * 4);
5833 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5834 main_mem_width = 8;
5835
5836 val = REG_RD(bp, main_mem_prty_clr);
5837 if (val)
5838 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5839 "block during "
5840 "function init (0x%x)!\n", val);
5841
5842 /* Clear "false" parity errors in MSI-X table */
5843 for (i = main_mem_base;
5844 i < main_mem_base + main_mem_size * 4;
5845 i += main_mem_width) {
5846 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5847 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5848 i, main_mem_width / 4);
5849 }
5850 /* Clear HC parity attention */
5851 REG_RD(bp, main_mem_prty_clr);
5852 }
5853
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00005854 bnx2x_phy_probe(&bp->link_params);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005855
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005856 return 0;
5857}
5858
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005859int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005860{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005861 int rc = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005862
5863 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005864 BP_ABS_FUNC(bp), load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005865
5866 bp->dmae_ready = 0;
5867 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00005868 rc = bnx2x_gunzip_init(bp);
5869 if (rc)
5870 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005871
5872 switch (load_code) {
5873 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005874 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005875 rc = bnx2x_init_hw_common(bp, load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005876 if (rc)
5877 goto init_hw_err;
5878 /* no break */
5879
5880 case FW_MSG_CODE_DRV_LOAD_PORT:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005881 rc = bnx2x_init_hw_port(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005882 if (rc)
5883 goto init_hw_err;
5884 /* no break */
5885
5886 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005887 rc = bnx2x_init_hw_func(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005888 if (rc)
5889 goto init_hw_err;
5890 break;
5891
5892 default:
5893 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5894 break;
5895 }
5896
5897 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005898 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005899
5900 bp->fw_drv_pulse_wr_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005901 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005902 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00005903 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5904 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005905
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005906init_hw_err:
5907 bnx2x_gunzip_end(bp);
5908
5909 return rc;
5910}
5911
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005912void bnx2x_free_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005913{
5914
5915#define BNX2X_PCI_FREE(x, y, size) \
5916 do { \
5917 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005918 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005919 x = NULL; \
5920 y = 0; \
5921 } \
5922 } while (0)
5923
5924#define BNX2X_FREE(x) \
5925 do { \
5926 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005927 kfree((void *)x); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005928 x = NULL; \
5929 } \
5930 } while (0)
5931
5932 int i;
5933
5934 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005935 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005936 for_each_queue(bp, i) {
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005937#ifdef BCM_CNIC
5938 /* FCoE client uses default status block */
5939 if (IS_FCOE_IDX(i)) {
5940 union host_hc_status_block *sb =
5941 &bnx2x_fp(bp, i, status_blk);
5942 memset(sb, 0, sizeof(union host_hc_status_block));
5943 bnx2x_fp(bp, i, status_blk_mapping) = 0;
5944 } else {
5945#endif
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005946 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005947 if (CHIP_IS_E2(bp))
5948 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5949 bnx2x_fp(bp, i, status_blk_mapping),
5950 sizeof(struct host_hc_status_block_e2));
5951 else
5952 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5953 bnx2x_fp(bp, i, status_blk_mapping),
5954 sizeof(struct host_hc_status_block_e1x));
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005955#ifdef BCM_CNIC
5956 }
5957#endif
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005958 }
5959 /* Rx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005960 for_each_rx_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005961
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005962 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005963 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5964 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5965 bnx2x_fp(bp, i, rx_desc_mapping),
5966 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5967
5968 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5969 bnx2x_fp(bp, i, rx_comp_mapping),
5970 sizeof(struct eth_fast_path_rx_cqe) *
5971 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005972
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005973 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07005974 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005975 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5976 bnx2x_fp(bp, i, rx_sge_mapping),
5977 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5978 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005979 /* Tx */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00005980 for_each_tx_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005981
5982 /* fastpath tx rings: tx_buf tx_desc */
5983 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5984 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5985 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005986 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005987 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005988 /* end of fastpath */
5989
5990 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005991 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005992
5993 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005994 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005995
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005996 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5997 bp->context.size);
5998
5999 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
6000
6001 BNX2X_FREE(bp->ilt->lines);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006002
Michael Chan37b091b2009-10-10 13:46:55 +00006003#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006004 if (CHIP_IS_E2(bp))
6005 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
6006 sizeof(struct host_hc_status_block_e2));
6007 else
6008 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
6009 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006010
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006011 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006012#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006013
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006014 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006015
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006016 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
6017 BCM_PAGE_SIZE * NUM_EQ_PAGES);
6018
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006019#undef BNX2X_PCI_FREE
6020#undef BNX2X_KFREE
6021}
6022
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006023static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
6024{
6025 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
6026 if (CHIP_IS_E2(bp)) {
6027 bnx2x_fp(bp, index, sb_index_values) =
6028 (__le16 *)status_blk.e2_sb->sb.index_values;
6029 bnx2x_fp(bp, index, sb_running_index) =
6030 (__le16 *)status_blk.e2_sb->sb.running_index;
6031 } else {
6032 bnx2x_fp(bp, index, sb_index_values) =
6033 (__le16 *)status_blk.e1x_sb->sb.index_values;
6034 bnx2x_fp(bp, index, sb_running_index) =
6035 (__le16 *)status_blk.e1x_sb->sb.running_index;
6036 }
6037}
6038
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006039int bnx2x_alloc_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006040{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006041#define BNX2X_PCI_ALLOC(x, y, size) \
6042 do { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00006043 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006044 if (x == NULL) \
6045 goto alloc_mem_err; \
6046 memset(x, 0, size); \
6047 } while (0)
6048
6049#define BNX2X_ALLOC(x, size) \
6050 do { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006051 x = kzalloc(size, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006052 if (x == NULL) \
6053 goto alloc_mem_err; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006054 } while (0)
6055
6056 int i;
6057
6058 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006059 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006060 for_each_queue(bp, i) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006061 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006062 bnx2x_fp(bp, i, bp) = bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006063 /* status blocks */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006064#ifdef BCM_CNIC
6065 if (!IS_FCOE_IDX(i)) {
6066#endif
6067 if (CHIP_IS_E2(bp))
6068 BNX2X_PCI_ALLOC(sb->e2_sb,
6069 &bnx2x_fp(bp, i, status_blk_mapping),
6070 sizeof(struct host_hc_status_block_e2));
6071 else
6072 BNX2X_PCI_ALLOC(sb->e1x_sb,
6073 &bnx2x_fp(bp, i, status_blk_mapping),
6074 sizeof(struct host_hc_status_block_e1x));
6075#ifdef BCM_CNIC
6076 }
6077#endif
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006078 set_sb_shortcuts(bp, i);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006079 }
6080 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006081 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006082
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006083 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006084 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6085 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6086 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6087 &bnx2x_fp(bp, i, rx_desc_mapping),
6088 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6089
6090 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6091 &bnx2x_fp(bp, i, rx_comp_mapping),
6092 sizeof(struct eth_fast_path_rx_cqe) *
6093 NUM_RCQ_BD);
6094
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006095 /* SGE ring */
6096 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6097 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6098 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6099 &bnx2x_fp(bp, i, rx_sge_mapping),
6100 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006101 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006102 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006103 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006104
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006105 /* fastpath tx rings: tx_buf tx_desc */
6106 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6107 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6108 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6109 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006110 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006111 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006112 /* end of fastpath */
6113
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006114#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006115 if (CHIP_IS_E2(bp))
6116 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
6117 sizeof(struct host_hc_status_block_e2));
6118 else
6119 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
6120 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006121
6122 /* allocate searcher T2 table */
6123 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
6124#endif
6125
6126
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006127 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006128 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006129
6130 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6131 sizeof(struct bnx2x_slowpath));
6132
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006133 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006134
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006135 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6136 bp->context.size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006137
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006138 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006139
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006140 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6141 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006142
6143 /* Slow path ring */
6144 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6145
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006146 /* EQ */
6147 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6148 BCM_PAGE_SIZE * NUM_EQ_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006149 return 0;
6150
6151alloc_mem_err:
6152 bnx2x_free_mem(bp);
6153 return -ENOMEM;
6154
6155#undef BNX2X_PCI_ALLOC
6156#undef BNX2X_ALLOC
6157}
6158
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006159/*
6160 * Init service functions
6161 */
stephen hemminger8d962862010-10-21 07:50:56 +00006162static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6163 int *state_p, int flags);
6164
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006165int bnx2x_func_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006166{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006167 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006168
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006169 /* Wait for completion */
6170 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6171 WAIT_RAMROD_COMMON);
6172}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006173
stephen hemminger8d962862010-10-21 07:50:56 +00006174static int bnx2x_func_stop(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006175{
6176 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006177
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006178 /* Wait for completion */
6179 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6180 0, &(bp->state), WAIT_RAMROD_COMMON);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006181}
6182
Michael Chane665bfd2009-10-10 13:46:54 +00006183/**
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006184 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
Michael Chane665bfd2009-10-10 13:46:54 +00006185 *
6186 * @param bp driver descriptor
6187 * @param set set or clear an entry (1 or 0)
6188 * @param mac pointer to a buffer containing a MAC
6189 * @param cl_bit_vec bit vector of clients to register a MAC for
6190 * @param cam_offset offset in a CAM to use
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006191 * @param is_bcast is the set MAC a broadcast address (for E1 only)
Michael Chane665bfd2009-10-10 13:46:54 +00006192 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006193static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006194 u32 cl_bit_vec, u8 cam_offset,
6195 u8 is_bcast)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006196{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006197 struct mac_configuration_cmd *config =
6198 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6199 int ramrod_flags = WAIT_RAMROD_COMMON;
6200
6201 bp->set_mac_pending = 1;
6202 smp_wmb();
6203
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006204 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006205 config->hdr.offset = cam_offset;
6206 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006207 config->hdr.reserved1 = 0;
6208
6209 /* primary MAC */
6210 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006211 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006212 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006213 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006214 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006215 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07006216 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00006217 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006218 config->config_table[0].vlan_id = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006219 config->config_table[0].pf_id = BP_FUNC(bp);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006220 if (set)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006221 SET_FLAG(config->config_table[0].flags,
6222 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6223 T_ETH_MAC_COMMAND_SET);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006224 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006225 SET_FLAG(config->config_table[0].flags,
6226 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6227 T_ETH_MAC_COMMAND_INVALIDATE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006228
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006229 if (is_bcast)
6230 SET_FLAG(config->config_table[0].flags,
6231 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6232
6233 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006234 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006235 config->config_table[0].msb_mac_addr,
6236 config->config_table[0].middle_mac_addr,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006237 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006238
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006239 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006240 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006241 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6242
6243 /* Wait for a completion */
6244 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006245}
6246
stephen hemminger8d962862010-10-21 07:50:56 +00006247static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6248 int *state_p, int flags)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006249{
6250 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006251 int cnt = 5000;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006252 u8 poll = flags & WAIT_RAMROD_POLL;
6253 u8 common = flags & WAIT_RAMROD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006254
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006255 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6256 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006257
6258 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006259 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006260 if (poll) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006261 if (common)
6262 bnx2x_eq_int(bp);
6263 else {
6264 bnx2x_rx_int(bp->fp, 10);
6265 /* if index is different from 0
6266 * the reply for some commands will
6267 * be on the non default queue
6268 */
6269 if (idx)
6270 bnx2x_rx_int(&bp->fp[idx], 10);
6271 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006272 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006273
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006274 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006275 if (*state_p == state) {
6276#ifdef BNX2X_STOP_ON_ERROR
6277 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6278#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006279 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006280 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006281
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006282 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00006283
6284 if (bp->panic)
6285 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006286 }
6287
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006288 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08006289 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6290 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006291#ifdef BNX2X_STOP_ON_ERROR
6292 bnx2x_panic();
6293#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006294
Eliezer Tamir49d66772008-02-28 11:53:13 -08006295 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006296}
6297
stephen hemminger8d962862010-10-21 07:50:56 +00006298static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
Michael Chane665bfd2009-10-10 13:46:54 +00006299{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006300 if (CHIP_IS_E1H(bp))
6301 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6302 else if (CHIP_MODE_IS_4_PORT(bp))
6303 return BP_FUNC(bp) * 32 + rel_offset;
6304 else
6305 return BP_VN(bp) * 32 + rel_offset;
Michael Chane665bfd2009-10-10 13:46:54 +00006306}
6307
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006308/**
6309 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6310 * relevant. In addition, current implementation is tuned for a
6311 * single ETH MAC.
6312 *
6313 * When multiple unicast ETH MACs PF configuration in switch
6314 * independent mode is required (NetQ, multiple netdev MACs,
6315 * etc.), consider better utilisation of 16 per function MAC
6316 * entries in the LLH memory.
6317 */
6318enum {
6319 LLH_CAM_ISCSI_ETH_LINE = 0,
6320 LLH_CAM_ETH_LINE,
6321 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6322};
6323
6324static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6325 int set,
6326 unsigned char *dev_addr,
6327 int index)
6328{
6329 u32 wb_data[2];
6330 u32 mem_offset, ena_offset, mem_index;
6331 /**
6332 * indexes mapping:
6333 * 0..7 - goes to MEM
6334 * 8..15 - goes to MEM2
6335 */
6336
6337 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6338 return;
6339
6340 /* calculate memory start offset according to the mapping
6341 * and index in the memory */
6342 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6343 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6344 NIG_REG_LLH0_FUNC_MEM;
6345 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6346 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6347 mem_index = index;
6348 } else {
6349 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6350 NIG_REG_P0_LLH_FUNC_MEM2;
6351 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6352 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6353 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6354 }
6355
6356 if (set) {
6357 /* LLH_FUNC_MEM is a u64 WB register */
6358 mem_offset += 8*mem_index;
6359
6360 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6361 (dev_addr[4] << 8) | dev_addr[5]);
6362 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6363
6364 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6365 }
6366
6367 /* enable/disable the entry */
6368 REG_WR(bp, ena_offset + 4*mem_index, set);
6369
6370}
6371
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006372void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
Michael Chane665bfd2009-10-10 13:46:54 +00006373{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006374 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6375 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6376
6377 /* networking MAC */
6378 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6379 (1 << bp->fp->cl_id), cam_offset , 0);
6380
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006381 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6382
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006383 if (CHIP_IS_E1(bp)) {
6384 /* broadcast MAC */
6385 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6386 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6387 }
6388}
6389static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6390{
6391 int i = 0, old;
6392 struct net_device *dev = bp->dev;
6393 struct netdev_hw_addr *ha;
6394 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6395 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6396
6397 netdev_for_each_mc_addr(ha, dev) {
6398 /* copy mac */
6399 config_cmd->config_table[i].msb_mac_addr =
6400 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6401 config_cmd->config_table[i].middle_mac_addr =
6402 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6403 config_cmd->config_table[i].lsb_mac_addr =
6404 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6405
6406 config_cmd->config_table[i].vlan_id = 0;
6407 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6408 config_cmd->config_table[i].clients_bit_vector =
6409 cpu_to_le32(1 << BP_L_ID(bp));
6410
6411 SET_FLAG(config_cmd->config_table[i].flags,
6412 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6413 T_ETH_MAC_COMMAND_SET);
6414
6415 DP(NETIF_MSG_IFUP,
6416 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6417 config_cmd->config_table[i].msb_mac_addr,
6418 config_cmd->config_table[i].middle_mac_addr,
6419 config_cmd->config_table[i].lsb_mac_addr);
6420 i++;
6421 }
6422 old = config_cmd->hdr.length;
6423 if (old > i) {
6424 for (; i < old; i++) {
6425 if (CAM_IS_INVALID(config_cmd->
6426 config_table[i])) {
6427 /* already invalidated */
6428 break;
6429 }
6430 /* invalidate */
6431 SET_FLAG(config_cmd->config_table[i].flags,
6432 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6433 T_ETH_MAC_COMMAND_INVALIDATE);
6434 }
6435 }
6436
6437 config_cmd->hdr.length = i;
6438 config_cmd->hdr.offset = offset;
6439 config_cmd->hdr.client_id = 0xff;
6440 config_cmd->hdr.reserved1 = 0;
6441
6442 bp->set_mac_pending = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006443 smp_wmb();
6444
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006445 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6446 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6447}
6448static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6449{
6450 int i;
6451 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6452 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6453 int ramrod_flags = WAIT_RAMROD_COMMON;
6454
6455 bp->set_mac_pending = 1;
6456 smp_wmb();
6457
6458 for (i = 0; i < config_cmd->hdr.length; i++)
6459 SET_FLAG(config_cmd->config_table[i].flags,
6460 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6461 T_ETH_MAC_COMMAND_INVALIDATE);
6462
6463 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6464 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
Michael Chane665bfd2009-10-10 13:46:54 +00006465
6466 /* Wait for a completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006467 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6468 ramrod_flags);
6469
Michael Chane665bfd2009-10-10 13:46:54 +00006470}
6471
Michael Chan993ac7b2009-10-10 13:46:56 +00006472#ifdef BCM_CNIC
6473/**
6474 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6475 * MAC(s). This function will wait until the ramdord completion
6476 * returns.
6477 *
6478 * @param bp driver handle
6479 * @param set set or clear the CAM entry
6480 *
6481 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6482 */
stephen hemminger8d962862010-10-21 07:50:56 +00006483static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
Michael Chan993ac7b2009-10-10 13:46:56 +00006484{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006485 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6486 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006487 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID +
6488 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006489 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
Michael Chan993ac7b2009-10-10 13:46:56 +00006490
6491 /* Send a SET_MAC ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006492 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6493 cam_offset, 0);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006494
6495 bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006496
6497 return 0;
6498}
6499
6500/**
6501 * Set FCoE L2 MAC(s) at the next enties in the CAM after the
6502 * ETH MAC(s). This function will wait until the ramdord
6503 * completion returns.
6504 *
6505 * @param bp driver handle
6506 * @param set set or clear the CAM entry
6507 *
6508 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6509 */
6510int bnx2x_set_fip_eth_mac_addr(struct bnx2x *bp, int set)
6511{
6512 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6513 /**
6514 * CAM allocation for E1H
6515 * eth unicasts: by func number
6516 * iscsi: by func number
6517 * fip unicast: by func number
6518 * fip multicast: by func number
6519 */
6520 bnx2x_set_mac_addr_gen(bp, set, bp->fip_mac,
6521 cl_bit_vec, bnx2x_e1h_cam_offset(bp, CAM_FIP_ETH_LINE), 0);
6522
6523 return 0;
6524}
6525
6526int bnx2x_set_all_enode_macs(struct bnx2x *bp, int set)
6527{
6528 u32 cl_bit_vec = (1 << bnx2x_fcoe(bp, cl_id));
6529
6530 /**
6531 * CAM allocation for E1H
6532 * eth unicasts: by func number
6533 * iscsi: by func number
6534 * fip unicast: by func number
6535 * fip multicast: by func number
6536 */
6537 bnx2x_set_mac_addr_gen(bp, set, ALL_ENODE_MACS, cl_bit_vec,
6538 bnx2x_e1h_cam_offset(bp, CAM_FIP_MCAST_LINE), 0);
6539
Michael Chan993ac7b2009-10-10 13:46:56 +00006540 return 0;
6541}
6542#endif
6543
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006544static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6545 struct bnx2x_client_init_params *params,
6546 u8 activate,
6547 struct client_init_ramrod_data *data)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006548{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006549 /* Clear the buffer */
6550 memset(data, 0, sizeof(*data));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006551
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006552 /* general */
6553 data->general.client_id = params->rxq_params.cl_id;
6554 data->general.statistics_counter_id = params->rxq_params.stat_id;
6555 data->general.statistics_en_flg =
6556 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006557 data->general.is_fcoe_flg =
6558 (params->ramrod_params.flags & CLIENT_IS_FCOE) ? 1 : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006559 data->general.activate_flg = activate;
6560 data->general.sp_client_id = params->rxq_params.spcl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006561
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006562 /* Rx data */
6563 data->rx.tpa_en_flg =
6564 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6565 data->rx.vmqueue_mode_en_flg = 0;
6566 data->rx.cache_line_alignment_log_size =
6567 params->rxq_params.cache_line_log;
6568 data->rx.enable_dynamic_hc =
6569 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6570 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6571 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6572 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6573
6574 /* We don't set drop flags */
6575 data->rx.drop_ip_cs_err_flg = 0;
6576 data->rx.drop_tcp_cs_err_flg = 0;
6577 data->rx.drop_ttl0_flg = 0;
6578 data->rx.drop_udp_cs_err_flg = 0;
6579
6580 data->rx.inner_vlan_removal_enable_flg =
6581 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6582 data->rx.outer_vlan_removal_enable_flg =
6583 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6584 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6585 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6586 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6587 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6588 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6589 data->rx.bd_page_base.lo =
6590 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6591 data->rx.bd_page_base.hi =
6592 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6593 data->rx.sge_page_base.lo =
6594 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6595 data->rx.sge_page_base.hi =
6596 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6597 data->rx.cqe_page_base.lo =
6598 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6599 data->rx.cqe_page_base.hi =
6600 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6601 data->rx.is_leading_rss =
6602 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6603 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6604
6605 /* Tx data */
6606 data->tx.enforce_security_flg = 0; /* VF specific */
6607 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6608 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6609 data->tx.mtu = 0; /* VF specific */
6610 data->tx.tx_bd_page_base.lo =
6611 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6612 data->tx.tx_bd_page_base.hi =
6613 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6614
6615 /* flow control data */
6616 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6617 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6618 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6619 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6620 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6621 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6622 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6623
6624 data->fc.safc_group_num = params->txq_params.cos;
6625 data->fc.safc_group_en_flg =
6626 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006627 data->fc.traffic_type =
6628 (params->ramrod_params.flags & CLIENT_IS_FCOE) ?
6629 LLFC_TRAFFIC_TYPE_FCOE : LLFC_TRAFFIC_TYPE_NW;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006630}
6631
6632static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6633{
6634 /* ustorm cxt validation */
6635 cxt->ustorm_ag_context.cdu_usage =
6636 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6637 ETH_CONNECTION_TYPE);
6638 /* xcontext validation */
6639 cxt->xstorm_ag_context.cdu_reserved =
6640 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6641 ETH_CONNECTION_TYPE);
6642}
6643
stephen hemminger8d962862010-10-21 07:50:56 +00006644static int bnx2x_setup_fw_client(struct bnx2x *bp,
6645 struct bnx2x_client_init_params *params,
6646 u8 activate,
6647 struct client_init_ramrod_data *data,
6648 dma_addr_t data_mapping)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006649{
6650 u16 hc_usec;
6651 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6652 int ramrod_flags = 0, rc;
6653
6654 /* HC and context validation values */
6655 hc_usec = params->txq_params.hc_rate ?
6656 1000000 / params->txq_params.hc_rate : 0;
6657 bnx2x_update_coalesce_sb_index(bp,
6658 params->txq_params.fw_sb_id,
6659 params->txq_params.sb_cq_index,
6660 !(params->txq_params.flags & QUEUE_FLG_HC),
6661 hc_usec);
6662
6663 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6664
6665 hc_usec = params->rxq_params.hc_rate ?
6666 1000000 / params->rxq_params.hc_rate : 0;
6667 bnx2x_update_coalesce_sb_index(bp,
6668 params->rxq_params.fw_sb_id,
6669 params->rxq_params.sb_cq_index,
6670 !(params->rxq_params.flags & QUEUE_FLG_HC),
6671 hc_usec);
6672
6673 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6674 params->rxq_params.cid);
6675
6676 /* zero stats */
6677 if (params->txq_params.flags & QUEUE_FLG_STATS)
6678 storm_memset_xstats_zero(bp, BP_PORT(bp),
6679 params->txq_params.stat_id);
6680
6681 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6682 storm_memset_ustats_zero(bp, BP_PORT(bp),
6683 params->rxq_params.stat_id);
6684 storm_memset_tstats_zero(bp, BP_PORT(bp),
6685 params->rxq_params.stat_id);
6686 }
6687
6688 /* Fill the ramrod data */
6689 bnx2x_fill_cl_init_data(bp, params, activate, data);
6690
6691 /* SETUP ramrod.
6692 *
6693 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6694 * barrier except from mmiowb() is needed to impose a
6695 * proper ordering of memory operations.
6696 */
6697 mmiowb();
6698
6699
6700 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6701 U64_HI(data_mapping), U64_LO(data_mapping), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006702
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006703 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006704 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6705 params->ramrod_params.index,
6706 params->ramrod_params.pstate,
6707 ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006708 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006709}
6710
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006711/**
6712 * Configure interrupt mode according to current configuration.
6713 * In case of MSI-X it will also try to enable MSI-X.
6714 *
6715 * @param bp
6716 *
6717 * @return int
6718 */
6719static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006720{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006721 int rc = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -07006722
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006723 switch (bp->int_mode) {
6724 case INT_MODE_MSI:
6725 bnx2x_enable_msi(bp);
6726 /* falling through... */
6727 case INT_MODE_INTx:
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006728 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006729 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greensteinca003922009-08-12 22:53:28 -07006730 break;
Eilon Greensteinca003922009-08-12 22:53:28 -07006731 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006732 /* Set number of queues according to bp->multi_mode value */
6733 bnx2x_set_num_queues(bp);
6734
6735 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6736 bp->num_queues);
6737
6738 /* if we can't use MSI-X we only need one fp,
6739 * so try to enable MSI-X with the requested number of fp's
6740 * and fallback to MSI or legacy INTx with one fp
6741 */
6742 rc = bnx2x_enable_msix(bp);
6743 if (rc) {
6744 /* failed to enable MSI-X */
6745 if (bp->multi_mode)
6746 DP(NETIF_MSG_IFUP,
6747 "Multi requested but failed to "
6748 "enable MSI-X (%d), "
6749 "set number of queues to %d\n",
6750 bp->num_queues,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006751 1 + NONE_ETH_CONTEXT_USE);
6752 bp->num_queues = 1 + NONE_ETH_CONTEXT_USE;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006753
6754 if (!(bp->flags & DISABLE_MSI_FLAG))
6755 bnx2x_enable_msi(bp);
6756 }
6757
Eilon Greensteinca003922009-08-12 22:53:28 -07006758 break;
6759 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006760
6761 return rc;
Eilon Greensteinca003922009-08-12 22:53:28 -07006762}
6763
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00006764/* must be called prioir to any HW initializations */
6765static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6766{
6767 return L2_ILT_LINES(bp);
6768}
6769
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006770void bnx2x_ilt_set_info(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006771{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006772 struct ilt_client_info *ilt_client;
6773 struct bnx2x_ilt *ilt = BP_ILT(bp);
6774 u16 line = 0;
6775
6776 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6777 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6778
6779 /* CDU */
6780 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6781 ilt_client->client_num = ILT_CLIENT_CDU;
6782 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6783 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6784 ilt_client->start = line;
6785 line += L2_ILT_LINES(bp);
6786#ifdef BCM_CNIC
6787 line += CNIC_ILT_LINES;
6788#endif
6789 ilt_client->end = line - 1;
6790
6791 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6792 "flags 0x%x, hw psz %d\n",
6793 ilt_client->start,
6794 ilt_client->end,
6795 ilt_client->page_size,
6796 ilt_client->flags,
6797 ilog2(ilt_client->page_size >> 12));
6798
6799 /* QM */
6800 if (QM_INIT(bp->qm_cid_count)) {
6801 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6802 ilt_client->client_num = ILT_CLIENT_QM;
6803 ilt_client->page_size = QM_ILT_PAGE_SZ;
6804 ilt_client->flags = 0;
6805 ilt_client->start = line;
6806
6807 /* 4 bytes for each cid */
6808 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6809 QM_ILT_PAGE_SZ);
6810
6811 ilt_client->end = line - 1;
6812
6813 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6814 "flags 0x%x, hw psz %d\n",
6815 ilt_client->start,
6816 ilt_client->end,
6817 ilt_client->page_size,
6818 ilt_client->flags,
6819 ilog2(ilt_client->page_size >> 12));
6820
6821 }
6822 /* SRC */
6823 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6824#ifdef BCM_CNIC
6825 ilt_client->client_num = ILT_CLIENT_SRC;
6826 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6827 ilt_client->flags = 0;
6828 ilt_client->start = line;
6829 line += SRC_ILT_LINES;
6830 ilt_client->end = line - 1;
6831
6832 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6833 "flags 0x%x, hw psz %d\n",
6834 ilt_client->start,
6835 ilt_client->end,
6836 ilt_client->page_size,
6837 ilt_client->flags,
6838 ilog2(ilt_client->page_size >> 12));
6839
6840#else
6841 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6842#endif
6843
6844 /* TM */
6845 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6846#ifdef BCM_CNIC
6847 ilt_client->client_num = ILT_CLIENT_TM;
6848 ilt_client->page_size = TM_ILT_PAGE_SZ;
6849 ilt_client->flags = 0;
6850 ilt_client->start = line;
6851 line += TM_ILT_LINES;
6852 ilt_client->end = line - 1;
6853
6854 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6855 "flags 0x%x, hw psz %d\n",
6856 ilt_client->start,
6857 ilt_client->end,
6858 ilt_client->page_size,
6859 ilt_client->flags,
6860 ilog2(ilt_client->page_size >> 12));
6861
6862#else
6863 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6864#endif
6865}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006866
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006867int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6868 int is_leading)
6869{
6870 struct bnx2x_client_init_params params = { {0} };
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006871 int rc;
6872
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006873 /* reset IGU state skip FCoE L2 queue */
6874 if (!IS_FCOE_FP(fp))
6875 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006876 IGU_INT_ENABLE, 0);
6877
6878 params.ramrod_params.pstate = &fp->state;
6879 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6880 params.ramrod_params.index = fp->index;
6881 params.ramrod_params.cid = fp->cid;
6882
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006883#ifdef BCM_CNIC
6884 if (IS_FCOE_FP(fp))
6885 params.ramrod_params.flags |= CLIENT_IS_FCOE;
6886
6887#endif
6888
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006889 if (is_leading)
6890 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6891
6892 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6893
6894 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6895
6896 rc = bnx2x_setup_fw_client(bp, &params, 1,
6897 bnx2x_sp(bp, client_init_data),
6898 bnx2x_sp_mapping(bp, client_init_data));
6899 return rc;
6900}
6901
stephen hemminger8d962862010-10-21 07:50:56 +00006902static int bnx2x_stop_fw_client(struct bnx2x *bp,
6903 struct bnx2x_client_ramrod_params *p)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006904{
6905 int rc;
6906
6907 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6908
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006909 /* halt the connection */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006910 *p->pstate = BNX2X_FP_STATE_HALTING;
6911 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6912 p->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006913
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006914 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006915 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6916 p->pstate, poll_flag);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006917 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006918 return rc;
6919
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006920 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6921 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6922 p->cl_id, 0);
6923 /* Wait for completion */
6924 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6925 p->pstate, poll_flag);
6926 if (rc) /* timeout */
6927 return rc;
6928
6929
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006930 /* delete cfc entry */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006931 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006932
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006933 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006934 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6935 p->pstate, WAIT_RAMROD_COMMON);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006936 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006937}
6938
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006939static int bnx2x_stop_client(struct bnx2x *bp, int index)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006940{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006941 struct bnx2x_client_ramrod_params client_stop = {0};
6942 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006943
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006944 client_stop.index = index;
6945 client_stop.cid = fp->cid;
6946 client_stop.cl_id = fp->cl_id;
6947 client_stop.pstate = &(fp->state);
6948 client_stop.poll = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006949
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006950 return bnx2x_stop_fw_client(bp, &client_stop);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006951}
6952
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006953
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006954static void bnx2x_reset_func(struct bnx2x *bp)
6955{
6956 int port = BP_PORT(bp);
6957 int func = BP_FUNC(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006958 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006959 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006960 (CHIP_IS_E2(bp) ?
6961 offsetof(struct hc_status_block_data_e2, common) :
6962 offsetof(struct hc_status_block_data_e1x, common));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006963 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6964 int pfid_offset = offsetof(struct pci_entity, pf_id);
6965
6966 /* Disable the function in the FW */
6967 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6968 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6969 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6970 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6971
6972 /* FP SBs */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00006973 for_each_eth_queue(bp, i) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006974 struct bnx2x_fastpath *fp = &bp->fp[i];
6975 REG_WR8(bp,
6976 BAR_CSTRORM_INTMEM +
6977 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6978 + pfunc_offset_fp + pfid_offset,
6979 HC_FUNCTION_DISABLED);
6980 }
6981
6982 /* SP SB */
6983 REG_WR8(bp,
6984 BAR_CSTRORM_INTMEM +
6985 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6986 pfunc_offset_sp + pfid_offset,
6987 HC_FUNCTION_DISABLED);
6988
6989
6990 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6991 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6992 0);
Eliezer Tamir49d66772008-02-28 11:53:13 -08006993
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006994 /* Configure IGU */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006995 if (bp->common.int_block == INT_BLOCK_HC) {
6996 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6997 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6998 } else {
6999 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
7000 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
7001 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007002
Michael Chan37b091b2009-10-10 13:46:55 +00007003#ifdef BCM_CNIC
7004 /* Disable Timer scan */
7005 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7006 /*
7007 * Wait for at least 10ms and up to 2 second for the timers scan to
7008 * complete
7009 */
7010 for (i = 0; i < 200; i++) {
7011 msleep(10);
7012 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7013 break;
7014 }
7015#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007016 /* Clear ILT */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007017 bnx2x_clear_func_ilt(bp, func);
7018
7019 /* Timers workaround bug for E2: if this is vnic-3,
7020 * we need to set the entire ilt range for this timers.
7021 */
7022 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
7023 struct ilt_client_info ilt_cli;
7024 /* use dummy TM client */
7025 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
7026 ilt_cli.start = 0;
7027 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
7028 ilt_cli.client_num = ILT_CLIENT_TM;
7029
7030 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
7031 }
7032
7033 /* this assumes that reset_port() called before reset_func()*/
7034 if (CHIP_IS_E2(bp))
7035 bnx2x_pf_disable(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007036
7037 bp->dmae_ready = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007038}
7039
7040static void bnx2x_reset_port(struct bnx2x *bp)
7041{
7042 int port = BP_PORT(bp);
7043 u32 val;
7044
7045 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7046
7047 /* Do not rcv packets to BRB */
7048 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7049 /* Do not direct rcv packets that are not for MCP to the BRB */
7050 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7051 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7052
7053 /* Configure AEU */
7054 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7055
7056 msleep(100);
7057 /* Check for BRB port occupancy */
7058 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7059 if (val)
7060 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07007061 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007062
7063 /* TODO: Close Doorbell port? */
7064}
7065
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007066static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7067{
7068 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007069 BP_ABS_FUNC(bp), reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007070
7071 switch (reset_code) {
7072 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7073 bnx2x_reset_port(bp);
7074 bnx2x_reset_func(bp);
7075 bnx2x_reset_common(bp);
7076 break;
7077
7078 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7079 bnx2x_reset_port(bp);
7080 bnx2x_reset_func(bp);
7081 break;
7082
7083 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7084 bnx2x_reset_func(bp);
7085 break;
7086
7087 default:
7088 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7089 break;
7090 }
7091}
7092
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007093#ifdef BCM_CNIC
7094static inline void bnx2x_del_fcoe_eth_macs(struct bnx2x *bp)
7095{
7096 if (bp->flags & FCOE_MACS_SET) {
7097 if (!IS_MF_SD(bp))
7098 bnx2x_set_fip_eth_mac_addr(bp, 0);
7099
7100 bnx2x_set_all_enode_macs(bp, 0);
7101
7102 bp->flags &= ~FCOE_MACS_SET;
7103 }
7104}
7105#endif
7106
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00007107void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007108{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007109 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007110 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007111 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007112
Eilon Greenstein555f6c72009-02-12 08:36:11 +00007113 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007114 for_each_tx_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08007115 struct bnx2x_fastpath *fp = &bp->fp[i];
7116
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007117 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08007118 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007119
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007120 if (!cnt) {
7121 BNX2X_ERR("timeout waiting for queue[%d]\n",
7122 i);
7123#ifdef BNX2X_STOP_ON_ERROR
7124 bnx2x_panic();
7125 return -EBUSY;
7126#else
7127 break;
7128#endif
7129 }
7130 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007131 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007132 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08007133 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007134 /* Give HW time to discard old tx messages */
7135 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007136
Yitchak Gertner65abd742008-08-25 15:26:24 -07007137 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007138 /* invalidate mc list,
7139 * wait and poll (interrupts are off)
7140 */
7141 bnx2x_invlidate_e1_mc_list(bp);
7142 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007143
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007144 } else {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007145 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7146
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007147 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007148
7149 for (i = 0; i < MC_HASH_SIZE; i++)
7150 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7151 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007152
Michael Chan993ac7b2009-10-10 13:46:56 +00007153#ifdef BCM_CNIC
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00007154 bnx2x_del_fcoe_eth_macs(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00007155#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07007156
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007157 if (unload_mode == UNLOAD_NORMAL)
7158 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007159
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007160 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007161 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007162
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007163 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007164 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007165 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007166 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007167 /* The mac address is written to entries 1-4 to
7168 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007169 u8 entry = (BP_E1HVN(bp) + 1)*8;
7170
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007171 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007172 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007173
7174 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7175 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007176 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007177
7178 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007179
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007180 } else
7181 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7182
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007183 /* Close multi and leading connections
7184 Completions for ramrods are collected in a synchronous way */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007185 for_each_queue(bp, i)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007186
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007187 if (bnx2x_stop_client(bp, i))
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007188#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007189 return;
7190#else
7191 goto unload_error;
7192#endif
7193
7194 rc = bnx2x_func_stop(bp);
7195 if (rc) {
7196 BNX2X_ERR("Function stop failed!\n");
7197#ifdef BNX2X_STOP_ON_ERROR
7198 return;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007199#else
7200 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007201#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08007202 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007203#ifndef BNX2X_STOP_ON_ERROR
Eliezer Tamir228241e2008-02-28 11:56:57 -08007204unload_error:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007205#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007206 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007207 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007208 else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007209 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
7210 "%d, %d, %d\n", BP_PATH(bp),
7211 load_count[BP_PATH(bp)][0],
7212 load_count[BP_PATH(bp)][1],
7213 load_count[BP_PATH(bp)][2]);
7214 load_count[BP_PATH(bp)][0]--;
7215 load_count[BP_PATH(bp)][1 + port]--;
7216 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7217 "%d, %d, %d\n", BP_PATH(bp),
7218 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7219 load_count[BP_PATH(bp)][2]);
7220 if (load_count[BP_PATH(bp)][0] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007221 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007222 else if (load_count[BP_PATH(bp)][1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007223 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7224 else
7225 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7226 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007227
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007228 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7229 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7230 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007231
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007232 /* Disable HW interrupts, NAPI */
7233 bnx2x_netif_stop(bp, 1);
7234
7235 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00007236 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007237
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007238 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007239 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007240
7241 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007242 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007243 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Eilon Greenstein356e2382009-02-12 08:38:32 +00007244
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007245}
7246
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00007247void bnx2x_disable_close_the_gate(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007248{
7249 u32 val;
7250
7251 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7252
7253 if (CHIP_IS_E1(bp)) {
7254 int port = BP_PORT(bp);
7255 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7256 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7257
7258 val = REG_RD(bp, addr);
7259 val &= ~(0x300);
7260 REG_WR(bp, addr, val);
7261 } else if (CHIP_IS_E1H(bp)) {
7262 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7263 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7264 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7265 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7266 }
7267}
7268
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007269/* Close gates #2, #3 and #4: */
7270static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7271{
7272 u32 val, addr;
7273
7274 /* Gates #2 and #4a are closed/opened for "not E1" only */
7275 if (!CHIP_IS_E1(bp)) {
7276 /* #4 */
7277 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7278 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7279 close ? (val | 0x1) : (val & (~(u32)1)));
7280 /* #2 */
7281 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7282 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7283 close ? (val | 0x1) : (val & (~(u32)1)));
7284 }
7285
7286 /* #3 */
7287 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7288 val = REG_RD(bp, addr);
7289 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7290
7291 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7292 close ? "closing" : "opening");
7293 mmiowb();
7294}
7295
7296#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7297
7298static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7299{
7300 /* Do some magic... */
7301 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7302 *magic_val = val & SHARED_MF_CLP_MAGIC;
7303 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7304}
7305
7306/* Restore the value of the `magic' bit.
7307 *
7308 * @param pdev Device handle.
7309 * @param magic_val Old value of the `magic' bit.
7310 */
7311static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7312{
7313 /* Restore the `magic' bit value... */
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007314 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7315 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7316 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7317}
7318
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007319/**
7320 * Prepares for MCP reset: takes care of CLP configurations.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007321 *
7322 * @param bp
7323 * @param magic_val Old value of 'magic' bit.
7324 */
7325static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7326{
7327 u32 shmem;
7328 u32 validity_offset;
7329
7330 DP(NETIF_MSG_HW, "Starting\n");
7331
7332 /* Set `magic' bit in order to save MF config */
7333 if (!CHIP_IS_E1(bp))
7334 bnx2x_clp_reset_prep(bp, magic_val);
7335
7336 /* Get shmem offset */
7337 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7338 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7339
7340 /* Clear validity map flags */
7341 if (shmem > 0)
7342 REG_WR(bp, shmem + validity_offset, 0);
7343}
7344
7345#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7346#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7347
7348/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7349 * depending on the HW type.
7350 *
7351 * @param bp
7352 */
7353static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7354{
7355 /* special handling for emulation and FPGA,
7356 wait 10 times longer */
7357 if (CHIP_REV_IS_SLOW(bp))
7358 msleep(MCP_ONE_TIMEOUT*10);
7359 else
7360 msleep(MCP_ONE_TIMEOUT);
7361}
7362
7363static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7364{
7365 u32 shmem, cnt, validity_offset, val;
7366 int rc = 0;
7367
7368 msleep(100);
7369
7370 /* Get shmem offset */
7371 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7372 if (shmem == 0) {
7373 BNX2X_ERR("Shmem 0 return failure\n");
7374 rc = -ENOTTY;
7375 goto exit_lbl;
7376 }
7377
7378 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7379
7380 /* Wait for MCP to come up */
7381 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7382 /* TBD: its best to check validity map of last port.
7383 * currently checks on port 0.
7384 */
7385 val = REG_RD(bp, shmem + validity_offset);
7386 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7387 shmem + validity_offset, val);
7388
7389 /* check that shared memory is valid. */
7390 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7391 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7392 break;
7393
7394 bnx2x_mcp_wait_one(bp);
7395 }
7396
7397 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7398
7399 /* Check that shared memory is valid. This indicates that MCP is up. */
7400 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7401 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7402 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7403 rc = -ENOTTY;
7404 goto exit_lbl;
7405 }
7406
7407exit_lbl:
7408 /* Restore the `magic' bit value */
7409 if (!CHIP_IS_E1(bp))
7410 bnx2x_clp_reset_done(bp, magic_val);
7411
7412 return rc;
7413}
7414
7415static void bnx2x_pxp_prep(struct bnx2x *bp)
7416{
7417 if (!CHIP_IS_E1(bp)) {
7418 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7419 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7420 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7421 mmiowb();
7422 }
7423}
7424
7425/*
7426 * Reset the whole chip except for:
7427 * - PCIE core
7428 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7429 * one reset bit)
7430 * - IGU
7431 * - MISC (including AEU)
7432 * - GRC
7433 * - RBCN, RBCP
7434 */
7435static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7436{
7437 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7438
7439 not_reset_mask1 =
7440 MISC_REGISTERS_RESET_REG_1_RST_HC |
7441 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7442 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7443
7444 not_reset_mask2 =
7445 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7446 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7447 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7448 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7449 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7450 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7451 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7452 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7453
7454 reset_mask1 = 0xffffffff;
7455
7456 if (CHIP_IS_E1(bp))
7457 reset_mask2 = 0xffff;
7458 else
7459 reset_mask2 = 0x1ffff;
7460
7461 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7462 reset_mask1 & (~not_reset_mask1));
7463 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7464 reset_mask2 & (~not_reset_mask2));
7465
7466 barrier();
7467 mmiowb();
7468
7469 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7470 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7471 mmiowb();
7472}
7473
7474static int bnx2x_process_kill(struct bnx2x *bp)
7475{
7476 int cnt = 1000;
7477 u32 val = 0;
7478 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7479
7480
7481 /* Empty the Tetris buffer, wait for 1s */
7482 do {
7483 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7484 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7485 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7486 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7487 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7488 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7489 ((port_is_idle_0 & 0x1) == 0x1) &&
7490 ((port_is_idle_1 & 0x1) == 0x1) &&
7491 (pgl_exp_rom2 == 0xffffffff))
7492 break;
7493 msleep(1);
7494 } while (cnt-- > 0);
7495
7496 if (cnt <= 0) {
7497 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7498 " are still"
7499 " outstanding read requests after 1s!\n");
7500 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7501 " port_is_idle_0=0x%08x,"
7502 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7503 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7504 pgl_exp_rom2);
7505 return -EAGAIN;
7506 }
7507
7508 barrier();
7509
7510 /* Close gates #2, #3 and #4 */
7511 bnx2x_set_234_gates(bp, true);
7512
7513 /* TBD: Indicate that "process kill" is in progress to MCP */
7514
7515 /* Clear "unprepared" bit */
7516 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7517 barrier();
7518
7519 /* Make sure all is written to the chip before the reset */
7520 mmiowb();
7521
7522 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7523 * PSWHST, GRC and PSWRD Tetris buffer.
7524 */
7525 msleep(1);
7526
7527 /* Prepare to chip reset: */
7528 /* MCP */
7529 bnx2x_reset_mcp_prep(bp, &val);
7530
7531 /* PXP */
7532 bnx2x_pxp_prep(bp);
7533 barrier();
7534
7535 /* reset the chip */
7536 bnx2x_process_kill_chip_reset(bp);
7537 barrier();
7538
7539 /* Recover after reset: */
7540 /* MCP */
7541 if (bnx2x_reset_mcp_comp(bp, val))
7542 return -EAGAIN;
7543
7544 /* PXP */
7545 bnx2x_pxp_prep(bp);
7546
7547 /* Open the gates #2, #3 and #4 */
7548 bnx2x_set_234_gates(bp, false);
7549
7550 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7551 * reset state, re-enable attentions. */
7552
7553 return 0;
7554}
7555
7556static int bnx2x_leader_reset(struct bnx2x *bp)
7557{
7558 int rc = 0;
7559 /* Try to recover after the failure */
7560 if (bnx2x_process_kill(bp)) {
7561 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7562 bp->dev->name);
7563 rc = -EAGAIN;
7564 goto exit_leader_reset;
7565 }
7566
7567 /* Clear "reset is in progress" bit and update the driver state */
7568 bnx2x_set_reset_done(bp);
7569 bp->recovery_state = BNX2X_RECOVERY_DONE;
7570
7571exit_leader_reset:
7572 bp->is_leader = 0;
7573 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7574 smp_wmb();
7575 return rc;
7576}
7577
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007578/* Assumption: runs under rtnl lock. This together with the fact
7579 * that it's called only from bnx2x_reset_task() ensure that it
7580 * will never be called when netif_running(bp->dev) is false.
7581 */
7582static void bnx2x_parity_recover(struct bnx2x *bp)
7583{
7584 DP(NETIF_MSG_HW, "Handling parity\n");
7585 while (1) {
7586 switch (bp->recovery_state) {
7587 case BNX2X_RECOVERY_INIT:
7588 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7589 /* Try to get a LEADER_LOCK HW lock */
7590 if (bnx2x_trylock_hw_lock(bp,
7591 HW_LOCK_RESOURCE_RESERVED_08))
7592 bp->is_leader = 1;
7593
7594 /* Stop the driver */
7595 /* If interface has been removed - break */
7596 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7597 return;
7598
7599 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7600 /* Ensure "is_leader" and "recovery_state"
7601 * update values are seen on other CPUs
7602 */
7603 smp_wmb();
7604 break;
7605
7606 case BNX2X_RECOVERY_WAIT:
7607 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7608 if (bp->is_leader) {
7609 u32 load_counter = bnx2x_get_load_cnt(bp);
7610 if (load_counter) {
7611 /* Wait until all other functions get
7612 * down.
7613 */
7614 schedule_delayed_work(&bp->reset_task,
7615 HZ/10);
7616 return;
7617 } else {
7618 /* If all other functions got down -
7619 * try to bring the chip back to
7620 * normal. In any case it's an exit
7621 * point for a leader.
7622 */
7623 if (bnx2x_leader_reset(bp) ||
7624 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7625 printk(KERN_ERR"%s: Recovery "
7626 "has failed. Power cycle is "
7627 "needed.\n", bp->dev->name);
7628 /* Disconnect this device */
7629 netif_device_detach(bp->dev);
7630 /* Block ifup for all function
7631 * of this ASIC until
7632 * "process kill" or power
7633 * cycle.
7634 */
7635 bnx2x_set_reset_in_progress(bp);
7636 /* Shut down the power */
7637 bnx2x_set_power_state(bp,
7638 PCI_D3hot);
7639 return;
7640 }
7641
7642 return;
7643 }
7644 } else { /* non-leader */
7645 if (!bnx2x_reset_is_done(bp)) {
7646 /* Try to get a LEADER_LOCK HW lock as
7647 * long as a former leader may have
7648 * been unloaded by the user or
7649 * released a leadership by another
7650 * reason.
7651 */
7652 if (bnx2x_trylock_hw_lock(bp,
7653 HW_LOCK_RESOURCE_RESERVED_08)) {
7654 /* I'm a leader now! Restart a
7655 * switch case.
7656 */
7657 bp->is_leader = 1;
7658 break;
7659 }
7660
7661 schedule_delayed_work(&bp->reset_task,
7662 HZ/10);
7663 return;
7664
7665 } else { /* A leader has completed
7666 * the "process kill". It's an exit
7667 * point for a non-leader.
7668 */
7669 bnx2x_nic_load(bp, LOAD_NORMAL);
7670 bp->recovery_state =
7671 BNX2X_RECOVERY_DONE;
7672 smp_wmb();
7673 return;
7674 }
7675 }
7676 default:
7677 return;
7678 }
7679 }
7680}
7681
7682/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7683 * scheduled on a general queue in order to prevent a dead lock.
7684 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007685static void bnx2x_reset_task(struct work_struct *work)
7686{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007687 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007688
7689#ifdef BNX2X_STOP_ON_ERROR
7690 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7691 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007692 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007693 return;
7694#endif
7695
7696 rtnl_lock();
7697
7698 if (!netif_running(bp->dev))
7699 goto reset_task_exit;
7700
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007701 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7702 bnx2x_parity_recover(bp);
7703 else {
7704 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7705 bnx2x_nic_load(bp, LOAD_NORMAL);
7706 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007707
7708reset_task_exit:
7709 rtnl_unlock();
7710}
7711
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007712/* end of nic load/unload */
7713
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007714/*
7715 * Init service functions
7716 */
7717
stephen hemminger8d962862010-10-21 07:50:56 +00007718static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007719{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007720 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7721 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7722 return base + (BP_ABS_FUNC(bp)) * stride;
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007723}
7724
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007725static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007726{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007727 u32 reg = bnx2x_get_pretend_reg(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007728
7729 /* Flush all outstanding writes */
7730 mmiowb();
7731
7732 /* Pretend to be function 0 */
7733 REG_WR(bp, reg, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007734 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007735
7736 /* From now we are in the "like-E1" mode */
7737 bnx2x_int_disable(bp);
7738
7739 /* Flush all outstanding writes */
7740 mmiowb();
7741
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007742 /* Restore the original function */
7743 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7744 REG_RD(bp, reg);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007745}
7746
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007747static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007748{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007749 if (CHIP_IS_E1(bp))
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007750 bnx2x_int_disable(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007751 else
7752 bnx2x_undi_int_disable_e1h(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007753}
7754
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007755static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007756{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007757 u32 val;
7758
7759 /* Check if there is any driver already loaded */
7760 val = REG_RD(bp, MISC_REG_UNPREPARED);
7761 if (val == 0x1) {
7762 /* Check if it is the UNDI driver
7763 * UNDI driver initializes CID offset for normal bell to 0x7
7764 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07007765 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007766 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7767 if (val == 0x7) {
7768 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007769 /* save our pf_num */
7770 int orig_pf_num = bp->pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007771 u32 swap_en;
7772 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007773
Eilon Greensteinb4661732009-01-14 06:43:56 +00007774 /* clear the UNDI indication */
7775 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7776
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007777 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7778
7779 /* try unload UNDI on port 0 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007780 bp->pf_num = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007781 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007782 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007783 DRV_MSG_SEQ_NUMBER_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007784 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007785
7786 /* if UNDI is loaded on the other port */
7787 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7788
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007789 /* send "DONE" for previous unload */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007790 bnx2x_fw_command(bp,
7791 DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007792
7793 /* unload UNDI on port 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007794 bp->pf_num = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007795 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007796 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007797 DRV_MSG_SEQ_NUMBER_MASK);
7798 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007799
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007800 bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007801 }
7802
Eilon Greensteinb4661732009-01-14 06:43:56 +00007803 /* now it's safe to release the lock */
7804 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7805
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007806 bnx2x_undi_int_disable(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007807
7808 /* close input traffic and wait for it */
7809 /* Do not rcv packets to BRB */
7810 REG_WR(bp,
7811 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7812 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7813 /* Do not direct rcv packets that are not for MCP to
7814 * the BRB */
7815 REG_WR(bp,
7816 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7817 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7818 /* clear AEU */
7819 REG_WR(bp,
7820 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7821 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7822 msleep(10);
7823
7824 /* save NIG port swap info */
7825 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7826 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007827 /* reset device */
7828 REG_WR(bp,
7829 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007830 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007831 REG_WR(bp,
7832 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7833 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007834 /* take the NIG out of reset and restore swap values */
7835 REG_WR(bp,
7836 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7837 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7838 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7839 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7840
7841 /* send unload done to the MCP */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007842 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007843
7844 /* restore our func and fw_seq */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007845 bp->pf_num = orig_pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007846 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007847 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007848 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00007849 } else
7850 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007851 }
7852}
7853
7854static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7855{
7856 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007857 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007858
7859 /* Get the chip revision id and number. */
7860 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7861 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7862 id = ((val & 0xffff) << 16);
7863 val = REG_RD(bp, MISC_REG_CHIP_REV);
7864 id |= ((val & 0xf) << 12);
7865 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7866 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00007867 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007868 id |= (val & 0xf);
7869 bp->common.chip_id = id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007870
7871 /* Set doorbell size */
7872 bp->db_size = (1 << BNX2X_DB_SHIFT);
7873
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007874 if (CHIP_IS_E2(bp)) {
7875 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7876 if ((val & 1) == 0)
7877 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7878 else
7879 val = (val >> 1) & 1;
7880 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7881 "2_PORT_MODE");
7882 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7883 CHIP_2_PORT_MODE;
7884
7885 if (CHIP_MODE_IS_4_PORT(bp))
7886 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7887 else
7888 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7889 } else {
7890 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7891 bp->pfid = bp->pf_num; /* 0..7 */
7892 }
7893
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007894 /*
7895 * set base FW non-default (fast path) status block id, this value is
7896 * used to initialize the fw_sb_id saved on the fp/queue structure to
7897 * determine the id used by the FW.
7898 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007899 if (CHIP_IS_E1x(bp))
7900 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7901 else /* E2 */
7902 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7903
7904 bp->link_params.chip_id = bp->common.chip_id;
7905 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007906
Eilon Greenstein1c063282009-02-12 08:36:43 +00007907 val = (REG_RD(bp, 0x2874) & 0x55);
7908 if ((bp->common.chip_id & 0x1) ||
7909 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7910 bp->flags |= ONE_PORT_FLAG;
7911 BNX2X_DEV_INFO("single port device\n");
7912 }
7913
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007914 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7915 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7916 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7917 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7918 bp->common.flash_size, bp->common.flash_size);
7919
7920 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007921 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7922 MISC_REG_GENERIC_CR_1 :
7923 MISC_REG_GENERIC_CR_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007924 bp->link_params.shmem_base = bp->common.shmem_base;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007925 bp->link_params.shmem2_base = bp->common.shmem2_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007926 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7927 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007928
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007929 if (!bp->common.shmem_base) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007930 BNX2X_DEV_INFO("MCP not active\n");
7931 bp->flags |= NO_MCP_FLAG;
7932 return;
7933 }
7934
7935 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7936 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7937 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007938 BNX2X_ERR("BAD MCP validity signature\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007939
7940 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00007941 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007942
7943 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7944 SHARED_HW_CFG_LED_MODE_MASK) >>
7945 SHARED_HW_CFG_LED_MODE_SHIFT);
7946
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007947 bp->link_params.feature_config_flags = 0;
7948 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7949 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7950 bp->link_params.feature_config_flags |=
7951 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7952 else
7953 bp->link_params.feature_config_flags &=
7954 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7955
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007956 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7957 bp->common.bc_ver = val;
7958 BNX2X_DEV_INFO("bc_ver %X\n", val);
7959 if (val < BNX2X_BC_VER) {
7960 /* for now only warn
7961 * later we might need to enforce this */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007962 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7963 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007964 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007965 bp->link_params.feature_config_flags |=
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007966 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007967 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7968
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007969 bp->link_params.feature_config_flags |=
7970 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7971 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007972
7973 if (BP_E1HVN(bp) == 0) {
7974 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7975 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7976 } else {
7977 /* no WOL capability for E1HVN != 0 */
7978 bp->flags |= NO_WOL_FLAG;
7979 }
7980 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00007981 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007982
7983 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7984 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7985 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7986 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7987
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007988 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7989 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007990}
7991
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007992#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7993#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7994
7995static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7996{
7997 int pfid = BP_FUNC(bp);
7998 int vn = BP_E1HVN(bp);
7999 int igu_sb_id;
8000 u32 val;
8001 u8 fid;
8002
8003 bp->igu_base_sb = 0xff;
8004 bp->igu_sb_cnt = 0;
8005 if (CHIP_INT_MODE_IS_BC(bp)) {
8006 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008007 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008008
8009 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
8010 FP_SB_MAX_E1x;
8011
8012 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
8013 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
8014
8015 return;
8016 }
8017
8018 /* IGU in normal mode - read CAM */
8019 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
8020 igu_sb_id++) {
8021 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
8022 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
8023 continue;
8024 fid = IGU_FID(val);
8025 if ((fid & IGU_FID_ENCODE_IS_PF)) {
8026 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
8027 continue;
8028 if (IGU_VEC(val) == 0)
8029 /* default status block */
8030 bp->igu_dsb_id = igu_sb_id;
8031 else {
8032 if (bp->igu_base_sb == 0xff)
8033 bp->igu_base_sb = igu_sb_id;
8034 bp->igu_sb_cnt++;
8035 }
8036 }
8037 }
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008038 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8039 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008040 if (bp->igu_sb_cnt == 0)
8041 BNX2X_ERR("CAM configuration error\n");
8042}
8043
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008044static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
8045 u32 switch_cfg)
8046{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008047 int cfg_size = 0, idx, port = BP_PORT(bp);
8048
8049 /* Aggregation of supported attributes of all external phys */
8050 bp->port.supported[0] = 0;
8051 bp->port.supported[1] = 0;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008052 switch (bp->link_params.num_phys) {
8053 case 1:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008054 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
8055 cfg_size = 1;
8056 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008057 case 2:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008058 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
8059 cfg_size = 1;
8060 break;
8061 case 3:
8062 if (bp->link_params.multi_phy_config &
8063 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
8064 bp->port.supported[1] =
8065 bp->link_params.phy[EXT_PHY1].supported;
8066 bp->port.supported[0] =
8067 bp->link_params.phy[EXT_PHY2].supported;
8068 } else {
8069 bp->port.supported[0] =
8070 bp->link_params.phy[EXT_PHY1].supported;
8071 bp->port.supported[1] =
8072 bp->link_params.phy[EXT_PHY2].supported;
8073 }
8074 cfg_size = 2;
8075 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008076 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008077
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008078 if (!(bp->port.supported[0] || bp->port.supported[1])) {
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008079 BNX2X_ERR("NVRAM config error. BAD phy config."
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008080 "PHY1 config 0x%x, PHY2 config 0x%x\n",
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008081 SHMEM_RD(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008082 dev_info.port_hw_config[port].external_phy_config),
8083 SHMEM_RD(bp,
8084 dev_info.port_hw_config[port].external_phy_config2));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008085 return;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008086 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008087
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008088 switch (switch_cfg) {
8089 case SWITCH_CFG_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008090 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
8091 port*0x10);
8092 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008093 break;
8094
8095 case SWITCH_CFG_10G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008096 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
8097 port*0x18);
8098 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008099 break;
8100
8101 default:
8102 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008103 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008104 return;
8105 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008106 /* mask what we support according to speed_cap_mask per configuration */
8107 for (idx = 0; idx < cfg_size; idx++) {
8108 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008109 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008110 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008111
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008112 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008113 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008114 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008115
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008116 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008117 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008118 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008119
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008120 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008121 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008122 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008123
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008124 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008125 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008126 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008127 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008128
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008129 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008130 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008131 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008132
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008133 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008134 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008135 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008136
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008137 }
8138
8139 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8140 bp->port.supported[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008141}
8142
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008143static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008144{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008145 u32 link_config, idx, cfg_size = 0;
8146 bp->port.advertising[0] = 0;
8147 bp->port.advertising[1] = 0;
8148 switch (bp->link_params.num_phys) {
8149 case 1:
8150 case 2:
8151 cfg_size = 1;
8152 break;
8153 case 3:
8154 cfg_size = 2;
8155 break;
8156 }
8157 for (idx = 0; idx < cfg_size; idx++) {
8158 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8159 link_config = bp->port.link_config[idx];
8160 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008161 case PORT_FEATURE_LINK_SPEED_AUTO:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008162 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8163 bp->link_params.req_line_speed[idx] =
8164 SPEED_AUTO_NEG;
8165 bp->port.advertising[idx] |=
8166 bp->port.supported[idx];
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008167 } else {
8168 /* force 10G, no AN */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008169 bp->link_params.req_line_speed[idx] =
8170 SPEED_10000;
8171 bp->port.advertising[idx] |=
8172 (ADVERTISED_10000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008173 ADVERTISED_FIBRE);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008174 continue;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008175 }
8176 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008177
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008178 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008179 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8180 bp->link_params.req_line_speed[idx] =
8181 SPEED_10;
8182 bp->port.advertising[idx] |=
8183 (ADVERTISED_10baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008184 ADVERTISED_TP);
8185 } else {
8186 BNX2X_ERROR("NVRAM config error. "
8187 "Invalid link_config 0x%x"
8188 " speed_cap_mask 0x%x\n",
8189 link_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008190 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008191 return;
8192 }
8193 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008194
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008195 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008196 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8197 bp->link_params.req_line_speed[idx] =
8198 SPEED_10;
8199 bp->link_params.req_duplex[idx] =
8200 DUPLEX_HALF;
8201 bp->port.advertising[idx] |=
8202 (ADVERTISED_10baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008203 ADVERTISED_TP);
8204 } else {
8205 BNX2X_ERROR("NVRAM config error. "
8206 "Invalid link_config 0x%x"
8207 " speed_cap_mask 0x%x\n",
8208 link_config,
8209 bp->link_params.speed_cap_mask[idx]);
8210 return;
8211 }
8212 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008213
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008214 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8215 if (bp->port.supported[idx] &
8216 SUPPORTED_100baseT_Full) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008217 bp->link_params.req_line_speed[idx] =
8218 SPEED_100;
8219 bp->port.advertising[idx] |=
8220 (ADVERTISED_100baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008221 ADVERTISED_TP);
8222 } else {
8223 BNX2X_ERROR("NVRAM config error. "
8224 "Invalid link_config 0x%x"
8225 " speed_cap_mask 0x%x\n",
8226 link_config,
8227 bp->link_params.speed_cap_mask[idx]);
8228 return;
8229 }
8230 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008231
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008232 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8233 if (bp->port.supported[idx] &
8234 SUPPORTED_100baseT_Half) {
8235 bp->link_params.req_line_speed[idx] =
8236 SPEED_100;
8237 bp->link_params.req_duplex[idx] =
8238 DUPLEX_HALF;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008239 bp->port.advertising[idx] |=
8240 (ADVERTISED_100baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008241 ADVERTISED_TP);
8242 } else {
8243 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008244 "Invalid link_config 0x%x"
8245 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008246 link_config,
8247 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008248 return;
8249 }
8250 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008251
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008252 case PORT_FEATURE_LINK_SPEED_1G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008253 if (bp->port.supported[idx] &
8254 SUPPORTED_1000baseT_Full) {
8255 bp->link_params.req_line_speed[idx] =
8256 SPEED_1000;
8257 bp->port.advertising[idx] |=
8258 (ADVERTISED_1000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008259 ADVERTISED_TP);
8260 } else {
8261 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008262 "Invalid link_config 0x%x"
8263 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008264 link_config,
8265 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008266 return;
8267 }
8268 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008269
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008270 case PORT_FEATURE_LINK_SPEED_2_5G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008271 if (bp->port.supported[idx] &
8272 SUPPORTED_2500baseX_Full) {
8273 bp->link_params.req_line_speed[idx] =
8274 SPEED_2500;
8275 bp->port.advertising[idx] |=
8276 (ADVERTISED_2500baseX_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008277 ADVERTISED_TP);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008278 } else {
8279 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008280 "Invalid link_config 0x%x"
8281 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008282 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008283 bp->link_params.speed_cap_mask[idx]);
8284 return;
8285 }
8286 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008287
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008288 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8289 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8290 case PORT_FEATURE_LINK_SPEED_10G_KR:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008291 if (bp->port.supported[idx] &
8292 SUPPORTED_10000baseT_Full) {
8293 bp->link_params.req_line_speed[idx] =
8294 SPEED_10000;
8295 bp->port.advertising[idx] |=
8296 (ADVERTISED_10000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008297 ADVERTISED_FIBRE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008298 } else {
8299 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008300 "Invalid link_config 0x%x"
8301 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008302 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008303 bp->link_params.speed_cap_mask[idx]);
8304 return;
8305 }
8306 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008307
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008308 default:
8309 BNX2X_ERROR("NVRAM config error. "
8310 "BAD link speed link_config 0x%x\n",
8311 link_config);
8312 bp->link_params.req_line_speed[idx] =
8313 SPEED_AUTO_NEG;
8314 bp->port.advertising[idx] =
8315 bp->port.supported[idx];
8316 break;
8317 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008318
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008319 bp->link_params.req_flow_ctrl[idx] = (link_config &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008320 PORT_FEATURE_FLOW_CONTROL_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008321 if ((bp->link_params.req_flow_ctrl[idx] ==
8322 BNX2X_FLOW_CTRL_AUTO) &&
8323 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8324 bp->link_params.req_flow_ctrl[idx] =
8325 BNX2X_FLOW_CTRL_NONE;
8326 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008327
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008328 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8329 " 0x%x advertising 0x%x\n",
8330 bp->link_params.req_line_speed[idx],
8331 bp->link_params.req_duplex[idx],
8332 bp->link_params.req_flow_ctrl[idx],
8333 bp->port.advertising[idx]);
8334 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008335}
8336
Michael Chane665bfd2009-10-10 13:46:54 +00008337static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8338{
8339 mac_hi = cpu_to_be16(mac_hi);
8340 mac_lo = cpu_to_be32(mac_lo);
8341 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8342 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8343}
8344
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008345static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008346{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008347 int port = BP_PORT(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00008348 u32 config;
Joe Perches6f38ad92010-11-14 17:04:31 +00008349 u32 ext_phy_type, ext_phy_config;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008350
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008351 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008352 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008353
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008354 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008355 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008356
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008357 bp->link_params.speed_cap_mask[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008358 SHMEM_RD(bp,
8359 dev_info.port_hw_config[port].speed_capability_mask);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008360 bp->link_params.speed_cap_mask[1] =
8361 SHMEM_RD(bp,
8362 dev_info.port_hw_config[port].speed_capability_mask2);
8363 bp->port.link_config[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008364 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8365
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008366 bp->port.link_config[1] =
8367 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008368
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008369 bp->link_params.multi_phy_config =
8370 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008371 /* If the device is capable of WoL, set the default state according
8372 * to the HW
8373 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008374 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008375 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8376 (config & PORT_FEATURE_WOL_ENABLED));
8377
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008378 BNX2X_DEV_INFO("lane_config 0x%08x "
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008379 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008380 bp->link_params.lane_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008381 bp->link_params.speed_cap_mask[0],
8382 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008383
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008384 bp->link_params.switch_cfg = (bp->port.link_config[0] &
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008385 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008386 bnx2x_phy_probe(&bp->link_params);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008387 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008388
8389 bnx2x_link_settings_requested(bp);
8390
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008391 /*
8392 * If connected directly, work with the internal PHY, otherwise, work
8393 * with the external PHY
8394 */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008395 ext_phy_config =
8396 SHMEM_RD(bp,
8397 dev_info.port_hw_config[port].external_phy_config);
8398 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008399 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008400 bp->mdio.prtad = bp->port.phy_addr;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008401
8402 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8403 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8404 bp->mdio.prtad =
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008405 XGXS_EXT_PHY_ADDR(ext_phy_config);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008406}
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008407
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008408static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8409{
8410 u32 val, val2;
8411 int func = BP_ABS_FUNC(bp);
8412 int port = BP_PORT(bp);
8413
8414 if (BP_NOMCP(bp)) {
8415 BNX2X_ERROR("warning: random MAC workaround active\n");
8416 random_ether_addr(bp->dev->dev_addr);
8417 } else if (IS_MF(bp)) {
8418 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8419 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8420 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8421 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8422 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8423
8424#ifdef BCM_CNIC
8425 /* iSCSI NPAR MAC */
8426 if (IS_MF_SI(bp)) {
8427 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8428 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8429 val2 = MF_CFG_RD(bp, func_ext_config[func].
8430 iscsi_mac_addr_upper);
8431 val = MF_CFG_RD(bp, func_ext_config[func].
8432 iscsi_mac_addr_lower);
8433 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8434 }
8435 }
8436#endif
8437 } else {
8438 /* in SF read MACs from port configuration */
8439 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8440 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8441 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8442
8443#ifdef BCM_CNIC
8444 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8445 iscsi_mac_upper);
8446 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8447 iscsi_mac_lower);
8448 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8449#endif
8450 }
8451
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008452 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8453 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008454
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008455#ifdef BCM_CNIC
8456 /* Inform the upper layers about FCoE MAC */
8457 if (!CHIP_IS_E1x(bp)) {
8458 if (IS_MF_SD(bp))
8459 memcpy(bp->fip_mac, bp->dev->dev_addr,
8460 sizeof(bp->fip_mac));
8461 else
8462 memcpy(bp->fip_mac, bp->iscsi_mac,
8463 sizeof(bp->fip_mac));
8464 }
8465#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008466}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008467
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008468static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8469{
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008470 int /*abs*/func = BP_ABS_FUNC(bp);
8471 int vn, port;
8472 u32 val = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008473 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008474
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008475 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008476
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008477 if (CHIP_IS_E1x(bp)) {
8478 bp->common.int_block = INT_BLOCK_HC;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008479
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008480 bp->igu_dsb_id = DEF_SB_IGU_ID;
8481 bp->igu_base_sb = 0;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00008482 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
8483 NUM_IGU_SB_REQUIRED(bp->l2_cid_count));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008484 } else {
8485 bp->common.int_block = INT_BLOCK_IGU;
8486 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8487 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8488 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8489 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8490 } else
8491 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8492
8493 bnx2x_get_igu_cam_info(bp);
8494
8495 }
8496 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8497 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8498
8499 /*
8500 * Initialize MF configuration
8501 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008502
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008503 bp->mf_ov = 0;
8504 bp->mf_mode = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008505 vn = BP_E1HVN(bp);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008506 port = BP_PORT(bp);
8507
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008508 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008509 DP(NETIF_MSG_PROBE,
8510 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8511 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8512 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008513 if (SHMEM2_HAS(bp, mf_cfg_addr))
8514 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8515 else
8516 bp->common.mf_cfg_base = bp->common.shmem_base +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008517 offsetof(struct shmem_region, func_mb) +
8518 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008519 /*
8520 * get mf configuration:
8521 * 1. existance of MF configuration
8522 * 2. MAC address must be legal (check only upper bytes)
8523 * for Switch-Independent mode;
8524 * OVLAN must be legal for Switch-Dependent mode
8525 * 3. SF_MODE configures specific MF mode
8526 */
8527 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8528 /* get mf configuration */
8529 val = SHMEM_RD(bp,
8530 dev_info.shared_feature_config.config);
8531 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008532
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008533 switch (val) {
8534 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8535 val = MF_CFG_RD(bp, func_mf_config[func].
8536 mac_upper);
8537 /* check for legal mac (upper bytes)*/
8538 if (val != 0xffff) {
8539 bp->mf_mode = MULTI_FUNCTION_SI;
8540 bp->mf_config[vn] = MF_CFG_RD(bp,
8541 func_mf_config[func].config);
8542 } else
8543 DP(NETIF_MSG_PROBE, "illegal MAC "
8544 "address for SI\n");
8545 break;
8546 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8547 /* get OV configuration */
8548 val = MF_CFG_RD(bp,
8549 func_mf_config[FUNC_0].e1hov_tag);
8550 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8551
8552 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8553 bp->mf_mode = MULTI_FUNCTION_SD;
8554 bp->mf_config[vn] = MF_CFG_RD(bp,
8555 func_mf_config[func].config);
8556 } else
8557 DP(NETIF_MSG_PROBE, "illegal OV for "
8558 "SD\n");
8559 break;
8560 default:
8561 /* Unknown configuration: reset mf_config */
8562 bp->mf_config[vn] = 0;
8563 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8564 val);
8565 }
8566 }
8567
Eilon Greenstein2691d512009-08-12 08:22:08 +00008568 BNX2X_DEV_INFO("%s function mode\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008569 IS_MF(bp) ? "multi" : "single");
Eilon Greenstein2691d512009-08-12 08:22:08 +00008570
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008571 switch (bp->mf_mode) {
8572 case MULTI_FUNCTION_SD:
8573 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8574 FUNC_MF_CFG_E1HOV_TAG_MASK;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008575 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008576 bp->mf_ov = val;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008577 BNX2X_DEV_INFO("MF OV for func %d is %d"
8578 " (0x%04x)\n", func,
8579 bp->mf_ov, bp->mf_ov);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008580 } else {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008581 BNX2X_ERR("No valid MF OV for func %d,"
8582 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008583 rc = -EPERM;
8584 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008585 break;
8586 case MULTI_FUNCTION_SI:
8587 BNX2X_DEV_INFO("func %d is in MF "
8588 "switch-independent mode\n", func);
8589 break;
8590 default:
8591 if (vn) {
8592 BNX2X_ERR("VN %d in single function mode,"
8593 " aborting\n", vn);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008594 rc = -EPERM;
8595 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008596 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008597 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008598
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008599 }
8600
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008601 /* adjust igu_sb_cnt to MF for E1x */
8602 if (CHIP_IS_E1x(bp) && IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008603 bp->igu_sb_cnt /= E1HVN_MAX;
8604
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008605 /*
8606 * adjust E2 sb count: to be removed when FW will support
8607 * more then 16 L2 clients
8608 */
8609#define MAX_L2_CLIENTS 16
8610 if (CHIP_IS_E2(bp))
8611 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8612 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8613
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008614 if (!BP_NOMCP(bp)) {
8615 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008616
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008617 bp->fw_seq =
8618 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8619 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008620 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8621 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008622
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008623 /* Get MAC addresses */
8624 bnx2x_get_mac_hwinfo(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008625
8626 return rc;
8627}
8628
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008629static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8630{
8631 int cnt, i, block_end, rodi;
8632 char vpd_data[BNX2X_VPD_LEN+1];
8633 char str_id_reg[VENDOR_ID_LEN+1];
8634 char str_id_cap[VENDOR_ID_LEN+1];
8635 u8 len;
8636
8637 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8638 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8639
8640 if (cnt < BNX2X_VPD_LEN)
8641 goto out_not_found;
8642
8643 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8644 PCI_VPD_LRDT_RO_DATA);
8645 if (i < 0)
8646 goto out_not_found;
8647
8648
8649 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8650 pci_vpd_lrdt_size(&vpd_data[i]);
8651
8652 i += PCI_VPD_LRDT_TAG_SIZE;
8653
8654 if (block_end > BNX2X_VPD_LEN)
8655 goto out_not_found;
8656
8657 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8658 PCI_VPD_RO_KEYWORD_MFR_ID);
8659 if (rodi < 0)
8660 goto out_not_found;
8661
8662 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8663
8664 if (len != VENDOR_ID_LEN)
8665 goto out_not_found;
8666
8667 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8668
8669 /* vendor specific info */
8670 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8671 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8672 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8673 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8674
8675 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8676 PCI_VPD_RO_KEYWORD_VENDOR0);
8677 if (rodi >= 0) {
8678 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8679
8680 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8681
8682 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8683 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8684 bp->fw_ver[len] = ' ';
8685 }
8686 }
8687 return;
8688 }
8689out_not_found:
8690 return;
8691}
8692
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008693static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8694{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008695 int func;
Eilon Greenstein87942b42009-02-12 08:36:49 +00008696 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008697 int rc;
8698
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008699 /* Disable interrupt handling until HW is initialized */
8700 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008701 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008702
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008703 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008704 mutex_init(&bp->fw_mb_mutex);
David S. Millerbb7e95c2010-07-27 21:01:35 -07008705 spin_lock_init(&bp->stats_lock);
Michael Chan993ac7b2009-10-10 13:46:56 +00008706#ifdef BCM_CNIC
8707 mutex_init(&bp->cnic_mutex);
8708#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008709
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008710 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008711 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008712
8713 rc = bnx2x_get_hwinfo(bp);
8714
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008715 if (!rc)
8716 rc = bnx2x_alloc_mem_bp(bp);
8717
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008718 bnx2x_read_fwinfo(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008719
8720 func = BP_FUNC(bp);
8721
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008722 /* need to reset chip if undi was active */
8723 if (!BP_NOMCP(bp))
8724 bnx2x_undi_unload(bp);
8725
8726 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008727 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008728
8729 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008730 dev_err(&bp->pdev->dev, "MCP disabled, "
8731 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008732
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008733 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008734 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8735 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008736 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8737 "requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008738 multi_mode = ETH_RSS_MODE_DISABLED;
8739 }
8740 bp->multi_mode = multi_mode;
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008741 bp->int_mode = int_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008742
Dmitry Kravkov4fd89b7a2010-04-01 19:45:34 -07008743 bp->dev->features |= NETIF_F_GRO;
8744
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008745 /* Set TPA flags */
8746 if (disable_tpa) {
8747 bp->flags &= ~TPA_ENABLE_FLAG;
8748 bp->dev->features &= ~NETIF_F_LRO;
8749 } else {
8750 bp->flags |= TPA_ENABLE_FLAG;
8751 bp->dev->features |= NETIF_F_LRO;
8752 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008753 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008754
Eilon Greensteina18f5122009-08-12 08:23:26 +00008755 if (CHIP_IS_E1(bp))
8756 bp->dropless_fc = 0;
8757 else
8758 bp->dropless_fc = dropless_fc;
8759
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008760 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008761
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008762 bp->tx_ring_size = MAX_TX_AVAIL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008763
8764 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008765
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008766 /* make sure that the numbers are in the right granularity */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008767 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8768 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008769
Eilon Greenstein87942b42009-02-12 08:36:49 +00008770 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8771 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008772
8773 init_timer(&bp->timer);
8774 bp->timer.expires = jiffies + bp->current_interval;
8775 bp->timer.data = (unsigned long) bp;
8776 bp->timer.function = bnx2x_timer;
8777
8778 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008779}
8780
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008781
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008782/****************************************************************************
8783* General service functions
8784****************************************************************************/
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008785
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008786/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008787static int bnx2x_open(struct net_device *dev)
8788{
8789 struct bnx2x *bp = netdev_priv(dev);
8790
Eilon Greenstein6eccabb2009-01-22 03:37:48 +00008791 netif_carrier_off(dev);
8792
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008793 bnx2x_set_power_state(bp, PCI_D0);
8794
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008795 if (!bnx2x_reset_is_done(bp)) {
8796 do {
8797 /* Reset MCP mail box sequence if there is on going
8798 * recovery
8799 */
8800 bp->fw_seq = 0;
8801
8802 /* If it's the first function to load and reset done
8803 * is still not cleared it may mean that. We don't
8804 * check the attention state here because it may have
8805 * already been cleared by a "common" reset but we
8806 * shell proceed with "process kill" anyway.
8807 */
8808 if ((bnx2x_get_load_cnt(bp) == 0) &&
8809 bnx2x_trylock_hw_lock(bp,
8810 HW_LOCK_RESOURCE_RESERVED_08) &&
8811 (!bnx2x_leader_reset(bp))) {
8812 DP(NETIF_MSG_HW, "Recovered in open\n");
8813 break;
8814 }
8815
8816 bnx2x_set_power_state(bp, PCI_D3hot);
8817
8818 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8819 " completed yet. Try again later. If u still see this"
8820 " message after a few retries then power cycle is"
8821 " required.\n", bp->dev->name);
8822
8823 return -EAGAIN;
8824 } while (0);
8825 }
8826
8827 bp->recovery_state = BNX2X_RECOVERY_DONE;
8828
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008829 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008830}
8831
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008832/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008833static int bnx2x_close(struct net_device *dev)
8834{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008835 struct bnx2x *bp = netdev_priv(dev);
8836
8837 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008838 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +00008839 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008840
8841 return 0;
8842}
8843
Eilon Greensteinf5372252009-02-12 08:38:30 +00008844/* called with netif_tx_lock from dev_mcast.c */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008845void bnx2x_set_rx_mode(struct net_device *dev)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008846{
8847 struct bnx2x *bp = netdev_priv(dev);
8848 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8849 int port = BP_PORT(bp);
8850
8851 if (bp->state != BNX2X_STATE_OPEN) {
8852 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8853 return;
8854 }
8855
8856 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8857
8858 if (dev->flags & IFF_PROMISC)
8859 rx_mode = BNX2X_RX_MODE_PROMISC;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008860 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00008861 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8862 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008863 rx_mode = BNX2X_RX_MODE_ALLMULTI;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008864 else { /* some multicasts */
8865 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008866 /*
8867 * set mc list, do not wait as wait implies sleep
8868 * and set_rx_mode can be invoked from non-sleepable
8869 * context
8870 */
8871 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8872 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8873 BNX2X_MAX_MULTICAST*(1 + port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008874
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008875 bnx2x_set_e1_mc_list(bp, offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008876 } else { /* E1H */
8877 /* Accept one or more multicasts */
Jiri Pirko22bedad32010-04-01 21:22:57 +00008878 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008879 u32 mc_filter[MC_HASH_SIZE];
8880 u32 crc, bit, regidx;
8881 int i;
8882
8883 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8884
Jiri Pirko22bedad32010-04-01 21:22:57 +00008885 netdev_for_each_mc_addr(ha, dev) {
Johannes Berg7c510e42008-10-27 17:47:26 -07008886 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008887 bnx2x_mc_addr(ha));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008888
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008889 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8890 ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008891 bit = (crc >> 24) & 0xff;
8892 regidx = bit >> 5;
8893 bit &= 0x1f;
8894 mc_filter[regidx] |= (1 << bit);
8895 }
8896
8897 for (i = 0; i < MC_HASH_SIZE; i++)
8898 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8899 mc_filter[i]);
8900 }
8901 }
8902
8903 bp->rx_mode = rx_mode;
8904 bnx2x_set_storm_rx_mode(bp);
8905}
8906
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008907/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008908static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8909 int devad, u16 addr)
8910{
8911 struct bnx2x *bp = netdev_priv(netdev);
8912 u16 value;
8913 int rc;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008914
8915 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8916 prtad, devad, addr);
8917
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008918 /* The HW expects different devad if CL22 is used */
8919 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8920
8921 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008922 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008923 bnx2x_release_phy_lock(bp);
8924 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8925
8926 if (!rc)
8927 rc = value;
8928 return rc;
8929}
8930
8931/* called with rtnl_lock */
8932static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8933 u16 addr, u16 value)
8934{
8935 struct bnx2x *bp = netdev_priv(netdev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008936 int rc;
8937
8938 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8939 " value 0x%x\n", prtad, devad, addr, value);
8940
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008941 /* The HW expects different devad if CL22 is used */
8942 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8943
8944 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008945 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008946 bnx2x_release_phy_lock(bp);
8947 return rc;
8948}
8949
8950/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008951static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8952{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008953 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008954 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008955
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008956 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8957 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008958
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008959 if (!netif_running(dev))
8960 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008961
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008962 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008963}
8964
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008965#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008966static void poll_bnx2x(struct net_device *dev)
8967{
8968 struct bnx2x *bp = netdev_priv(dev);
8969
8970 disable_irq(bp->pdev->irq);
8971 bnx2x_interrupt(bp->pdev->irq, dev);
8972 enable_irq(bp->pdev->irq);
8973}
8974#endif
8975
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008976static const struct net_device_ops bnx2x_netdev_ops = {
8977 .ndo_open = bnx2x_open,
8978 .ndo_stop = bnx2x_close,
8979 .ndo_start_xmit = bnx2x_start_xmit,
Vladislav Zolotarov8307fa32010-12-13 05:44:09 +00008980 .ndo_select_queue = bnx2x_select_queue,
Eilon Greenstein356e2382009-02-12 08:38:32 +00008981 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008982 .ndo_set_mac_address = bnx2x_change_mac_addr,
8983 .ndo_validate_addr = eth_validate_addr,
8984 .ndo_do_ioctl = bnx2x_ioctl,
8985 .ndo_change_mtu = bnx2x_change_mtu,
8986 .ndo_tx_timeout = bnx2x_tx_timeout,
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008987#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008988 .ndo_poll_controller = poll_bnx2x,
8989#endif
8990};
8991
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008992static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8993 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008994{
8995 struct bnx2x *bp;
8996 int rc;
8997
8998 SET_NETDEV_DEV(dev, &pdev->dev);
8999 bp = netdev_priv(dev);
9000
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009001 bp->dev = dev;
9002 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009003 bp->flags = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009004 bp->pf_num = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009005
9006 rc = pci_enable_device(pdev);
9007 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009008 dev_err(&bp->pdev->dev,
9009 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009010 goto err_out;
9011 }
9012
9013 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009014 dev_err(&bp->pdev->dev,
9015 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009016 rc = -ENODEV;
9017 goto err_out_disable;
9018 }
9019
9020 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009021 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
9022 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009023 rc = -ENODEV;
9024 goto err_out_disable;
9025 }
9026
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009027 if (atomic_read(&pdev->enable_cnt) == 1) {
9028 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
9029 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009030 dev_err(&bp->pdev->dev,
9031 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009032 goto err_out_disable;
9033 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009034
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009035 pci_set_master(pdev);
9036 pci_save_state(pdev);
9037 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009038
9039 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
9040 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009041 dev_err(&bp->pdev->dev,
9042 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009043 rc = -EIO;
9044 goto err_out_release;
9045 }
9046
9047 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
9048 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009049 dev_err(&bp->pdev->dev,
9050 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009051 rc = -EIO;
9052 goto err_out_release;
9053 }
9054
FUJITA Tomonori1a983142010-04-04 01:51:03 +00009055 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009056 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00009057 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009058 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
9059 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009060 rc = -EIO;
9061 goto err_out_release;
9062 }
9063
FUJITA Tomonori1a983142010-04-04 01:51:03 +00009064 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009065 dev_err(&bp->pdev->dev,
9066 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009067 rc = -EIO;
9068 goto err_out_release;
9069 }
9070
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009071 dev->mem_start = pci_resource_start(pdev, 0);
9072 dev->base_addr = dev->mem_start;
9073 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009074
9075 dev->irq = pdev->irq;
9076
Arjan van de Ven275f1652008-10-20 21:42:39 -07009077 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009078 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009079 dev_err(&bp->pdev->dev,
9080 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009081 rc = -ENOMEM;
9082 goto err_out_release;
9083 }
9084
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009085 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009086 min_t(u64, BNX2X_DB_SIZE(bp),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009087 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009088 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009089 dev_err(&bp->pdev->dev,
9090 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009091 rc = -ENOMEM;
9092 goto err_out_unmap;
9093 }
9094
9095 bnx2x_set_power_state(bp, PCI_D0);
9096
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009097 /* clean indirect addresses */
9098 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
9099 PCICFG_VENDOR_ID_OFFSET);
9100 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
9101 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
9102 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
9103 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009104
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009105 /* Reset the load counter */
9106 bnx2x_clear_load_cnt(bp);
9107
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009108 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009109
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08009110 dev->netdev_ops = &bnx2x_netdev_ops;
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00009111 bnx2x_set_ethtool_ops(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009112 dev->features |= NETIF_F_SG;
Michał Mirosław79032642010-11-30 06:38:00 +00009113 dev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009114 if (bp->flags & USING_DAC_FLAG)
9115 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +00009116 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9117 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009118 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein5316bc02009-07-21 05:47:43 +00009119
9120 dev->vlan_features |= NETIF_F_SG;
Michał Mirosław79032642010-11-30 06:38:00 +00009121 dev->vlan_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
Eilon Greenstein5316bc02009-07-21 05:47:43 +00009122 if (bp->flags & USING_DAC_FLAG)
9123 dev->vlan_features |= NETIF_F_HIGHDMA;
9124 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9125 dev->vlan_features |= NETIF_F_TSO6;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009126
Eilon Greenstein01cd4522009-08-12 08:23:08 +00009127 /* get_port_hwinfo() will set prtad and mmds properly */
9128 bp->mdio.prtad = MDIO_PRTAD_NONE;
9129 bp->mdio.mmds = 0;
9130 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
9131 bp->mdio.dev = dev;
9132 bp->mdio.mdio_read = bnx2x_mdio_read;
9133 bp->mdio.mdio_write = bnx2x_mdio_write;
9134
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009135 return 0;
9136
9137err_out_unmap:
9138 if (bp->regview) {
9139 iounmap(bp->regview);
9140 bp->regview = NULL;
9141 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009142 if (bp->doorbells) {
9143 iounmap(bp->doorbells);
9144 bp->doorbells = NULL;
9145 }
9146
9147err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009148 if (atomic_read(&pdev->enable_cnt) == 1)
9149 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009150
9151err_out_disable:
9152 pci_disable_device(pdev);
9153 pci_set_drvdata(pdev, NULL);
9154
9155err_out:
9156 return rc;
9157}
9158
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009159static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9160 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -08009161{
9162 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9163
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009164 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9165
9166 /* return value of 1=2.5GHz 2=5GHz */
9167 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -08009168}
9169
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009170static int bnx2x_check_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009171{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009172 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009173 struct bnx2x_fw_file_hdr *fw_hdr;
9174 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009175 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009176 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009177 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009178 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009179
9180 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9181 return -EINVAL;
9182
9183 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9184 sections = (struct bnx2x_fw_file_section *)fw_hdr;
9185
9186 /* Make sure none of the offsets and sizes make us read beyond
9187 * the end of the firmware data */
9188 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9189 offset = be32_to_cpu(sections[i].offset);
9190 len = be32_to_cpu(sections[i].len);
9191 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009192 dev_err(&bp->pdev->dev,
9193 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009194 return -EINVAL;
9195 }
9196 }
9197
9198 /* Likewise for the init_ops offsets */
9199 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9200 ops_offsets = (u16 *)(firmware->data + offset);
9201 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9202
9203 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9204 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009205 dev_err(&bp->pdev->dev,
9206 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009207 return -EINVAL;
9208 }
9209 }
9210
9211 /* Check FW version */
9212 offset = be32_to_cpu(fw_hdr->fw_version.offset);
9213 fw_ver = firmware->data + offset;
9214 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9215 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9216 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9217 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009218 dev_err(&bp->pdev->dev,
9219 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009220 fw_ver[0], fw_ver[1], fw_ver[2],
9221 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9222 BCM_5710_FW_MINOR_VERSION,
9223 BCM_5710_FW_REVISION_VERSION,
9224 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009225 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009226 }
9227
9228 return 0;
9229}
9230
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009231static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009232{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009233 const __be32 *source = (const __be32 *)_source;
9234 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009235 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009236
9237 for (i = 0; i < n/4; i++)
9238 target[i] = be32_to_cpu(source[i]);
9239}
9240
9241/*
9242 Ops array is stored in the following format:
9243 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9244 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009245static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009246{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009247 const __be32 *source = (const __be32 *)_source;
9248 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009249 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009250
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009251 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009252 tmp = be32_to_cpu(source[j]);
9253 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009254 target[i].offset = tmp & 0xffffff;
9255 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009256 }
9257}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009258
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009259/**
9260 * IRO array is stored in the following format:
9261 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9262 */
9263static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9264{
9265 const __be32 *source = (const __be32 *)_source;
9266 struct iro *target = (struct iro *)_target;
9267 u32 i, j, tmp;
9268
9269 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9270 target[i].base = be32_to_cpu(source[j]);
9271 j++;
9272 tmp = be32_to_cpu(source[j]);
9273 target[i].m1 = (tmp >> 16) & 0xffff;
9274 target[i].m2 = tmp & 0xffff;
9275 j++;
9276 tmp = be32_to_cpu(source[j]);
9277 target[i].m3 = (tmp >> 16) & 0xffff;
9278 target[i].size = tmp & 0xffff;
9279 j++;
9280 }
9281}
9282
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009283static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009284{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009285 const __be16 *source = (const __be16 *)_source;
9286 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009287 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009288
9289 for (i = 0; i < n/2; i++)
9290 target[i] = be16_to_cpu(source[i]);
9291}
9292
Joe Perches7995c642010-02-17 15:01:52 +00009293#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
9294do { \
9295 u32 len = be32_to_cpu(fw_hdr->arr.len); \
9296 bp->arr = kmalloc(len, GFP_KERNEL); \
9297 if (!bp->arr) { \
9298 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9299 goto lbl; \
9300 } \
9301 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
9302 (u8 *)bp->arr, len); \
9303} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009304
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009305int bnx2x_init_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009306{
Ben Hutchings45229b42009-11-07 11:53:39 +00009307 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009308 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +00009309 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009310
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009311 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00009312 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009313 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00009314 fw_file_name = FW_FILE_NAME_E1H;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009315 else if (CHIP_IS_E2(bp))
9316 fw_file_name = FW_FILE_NAME_E2;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009317 else {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009318 BNX2X_ERR("Unsupported chip revision\n");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009319 return -EINVAL;
9320 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009321
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009322 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009323
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009324 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009325 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009326 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009327 goto request_firmware_exit;
9328 }
9329
9330 rc = bnx2x_check_firmware(bp);
9331 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009332 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009333 goto request_firmware_exit;
9334 }
9335
9336 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9337
9338 /* Initialize the pointers to the init arrays */
9339 /* Blob */
9340 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9341
9342 /* Opcodes */
9343 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9344
9345 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009346 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9347 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009348
9349 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +00009350 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9351 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9352 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9353 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9354 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9355 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9356 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9357 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9358 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9359 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9360 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9361 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9362 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9363 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9364 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9365 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009366 /* IRO */
9367 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009368
9369 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009370
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009371iro_alloc_err:
9372 kfree(bp->init_ops_offsets);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009373init_offsets_alloc_err:
9374 kfree(bp->init_ops);
9375init_ops_alloc_err:
9376 kfree(bp->init_data);
9377request_firmware_exit:
9378 release_firmware(bp->firmware);
9379
9380 return rc;
9381}
9382
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009383static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9384{
9385 int cid_count = L2_FP_COUNT(l2_cid_count);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009386
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009387#ifdef BCM_CNIC
9388 cid_count += CNIC_CID_MAX;
9389#endif
9390 return roundup(cid_count, QM_CID_ROUND);
9391}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009392
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009393static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9394 const struct pci_device_id *ent)
9395{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009396 struct net_device *dev = NULL;
9397 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009398 int pcie_width, pcie_speed;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009399 int rc, cid_count;
9400
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009401 switch (ent->driver_data) {
9402 case BCM57710:
9403 case BCM57711:
9404 case BCM57711E:
9405 cid_count = FP_SB_MAX_E1x;
9406 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009407
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009408 case BCM57712:
9409 case BCM57712E:
9410 cid_count = FP_SB_MAX_E2;
9411 break;
9412
9413 default:
9414 pr_err("Unknown board_type (%ld), aborting\n",
9415 ent->driver_data);
Vasiliy Kulikov870634b2010-11-14 10:08:34 +00009416 return -ENODEV;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009417 }
9418
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009419 cid_count += NONE_ETH_CONTEXT_USE + CNIC_CONTEXT_USE;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009420
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009421 /* dev zeroed in init_etherdev */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009422 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009423 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009424 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009425 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009426 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009427
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009428 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +00009429 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009430
Eilon Greensteindf4770de2009-08-12 08:23:28 +00009431 pci_set_drvdata(pdev, dev);
9432
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009433 bp->l2_cid_count = cid_count;
9434
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009435 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009436 if (rc < 0) {
9437 free_netdev(dev);
9438 return rc;
9439 }
9440
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009441 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009442 if (rc)
9443 goto init_one_exit;
9444
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009445 /* calc qm_cid_count */
9446 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9447
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009448#ifdef BCM_CNIC
9449 /* disable FCOE L2 queue for E1x*/
9450 if (CHIP_IS_E1x(bp))
9451 bp->flags |= NO_FCOE_FLAG;
9452
9453#endif
9454
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009455 /* Configure interupt mode: try to enable MSI-X/MSI if
9456 * needed, set bp->num_queues appropriately.
9457 */
9458 bnx2x_set_int_mode(bp);
9459
9460 /* Add all NAPI objects */
9461 bnx2x_add_all_napi(bp);
9462
Vladislav Zolotarovb3400072010-11-24 11:09:50 -08009463 rc = register_netdev(dev);
9464 if (rc) {
9465 dev_err(&pdev->dev, "Cannot register net device\n");
9466 goto init_one_exit;
9467 }
9468
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009469#ifdef BCM_CNIC
9470 if (!NO_FCOE(bp)) {
9471 /* Add storage MAC address */
9472 rtnl_lock();
9473 dev_addr_add(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9474 rtnl_unlock();
9475 }
9476#endif
9477
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009478 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009479
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009480 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9481 " IRQ %d, ", board_info[ent->driver_data].name,
9482 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009483 pcie_width,
9484 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9485 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9486 "5GHz (Gen2)" : "2.5GHz",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009487 dev->base_addr, bp->pdev->irq);
9488 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +00009489
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009490 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009491
9492init_one_exit:
9493 if (bp->regview)
9494 iounmap(bp->regview);
9495
9496 if (bp->doorbells)
9497 iounmap(bp->doorbells);
9498
9499 free_netdev(dev);
9500
9501 if (atomic_read(&pdev->enable_cnt) == 1)
9502 pci_release_regions(pdev);
9503
9504 pci_disable_device(pdev);
9505 pci_set_drvdata(pdev, NULL);
9506
9507 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009508}
9509
9510static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9511{
9512 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08009513 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009514
Eliezer Tamir228241e2008-02-28 11:56:57 -08009515 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009516 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08009517 return;
9518 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08009519 bp = netdev_priv(dev);
9520
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009521#ifdef BCM_CNIC
9522 /* Delete storage MAC address */
9523 if (!NO_FCOE(bp)) {
9524 rtnl_lock();
9525 dev_addr_del(bp->dev, bp->fip_mac, NETDEV_HW_ADDR_T_SAN);
9526 rtnl_unlock();
9527 }
9528#endif
9529
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009530 unregister_netdev(dev);
9531
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009532 /* Delete all NAPI objects */
9533 bnx2x_del_all_napi(bp);
9534
9535 /* Disable MSI/MSI-X */
9536 bnx2x_disable_msi(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009537
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009538 /* Make sure RESET task is not scheduled before continuing */
9539 cancel_delayed_work_sync(&bp->reset_task);
9540
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009541 if (bp->regview)
9542 iounmap(bp->regview);
9543
9544 if (bp->doorbells)
9545 iounmap(bp->doorbells);
9546
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009547 bnx2x_free_mem_bp(bp);
9548
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009549 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009550
9551 if (atomic_read(&pdev->enable_cnt) == 1)
9552 pci_release_regions(pdev);
9553
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009554 pci_disable_device(pdev);
9555 pci_set_drvdata(pdev, NULL);
9556}
9557
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009558static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9559{
9560 int i;
9561
9562 bp->state = BNX2X_STATE_ERROR;
9563
9564 bp->rx_mode = BNX2X_RX_MODE_NONE;
9565
9566 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07009567 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009568
9569 del_timer_sync(&bp->timer);
9570 bp->stats_state = STATS_STATE_DISABLED;
9571 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9572
9573 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009574 bnx2x_free_irq(bp);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009575
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009576 /* Free SKBs, SGEs, TPA pool and driver internals */
9577 bnx2x_free_skbs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009578
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009579 for_each_rx_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009580 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009581
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009582 bnx2x_free_mem(bp);
9583
9584 bp->state = BNX2X_STATE_CLOSED;
9585
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009586 return 0;
9587}
9588
9589static void bnx2x_eeh_recover(struct bnx2x *bp)
9590{
9591 u32 val;
9592
9593 mutex_init(&bp->port.phy_mutex);
9594
9595 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9596 bp->link_params.shmem_base = bp->common.shmem_base;
9597 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9598
9599 if (!bp->common.shmem_base ||
9600 (bp->common.shmem_base < 0xA0000) ||
9601 (bp->common.shmem_base >= 0xC0000)) {
9602 BNX2X_DEV_INFO("MCP not active\n");
9603 bp->flags |= NO_MCP_FLAG;
9604 return;
9605 }
9606
9607 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9608 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9609 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9610 BNX2X_ERR("BAD MCP validity signature\n");
9611
9612 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009613 bp->fw_seq =
9614 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9615 DRV_MSG_SEQ_NUMBER_MASK);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009616 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9617 }
9618}
9619
Wendy Xiong493adb12008-06-23 20:36:22 -07009620/**
9621 * bnx2x_io_error_detected - called when PCI error is detected
9622 * @pdev: Pointer to PCI device
9623 * @state: The current pci connection state
9624 *
9625 * This function is called after a PCI bus error affecting
9626 * this device has been detected.
9627 */
9628static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9629 pci_channel_state_t state)
9630{
9631 struct net_device *dev = pci_get_drvdata(pdev);
9632 struct bnx2x *bp = netdev_priv(dev);
9633
9634 rtnl_lock();
9635
9636 netif_device_detach(dev);
9637
Dean Nelson07ce50e2009-07-31 09:13:25 +00009638 if (state == pci_channel_io_perm_failure) {
9639 rtnl_unlock();
9640 return PCI_ERS_RESULT_DISCONNECT;
9641 }
9642
Wendy Xiong493adb12008-06-23 20:36:22 -07009643 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009644 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -07009645
9646 pci_disable_device(pdev);
9647
9648 rtnl_unlock();
9649
9650 /* Request a slot reset */
9651 return PCI_ERS_RESULT_NEED_RESET;
9652}
9653
9654/**
9655 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9656 * @pdev: Pointer to PCI device
9657 *
9658 * Restart the card from scratch, as if from a cold-boot.
9659 */
9660static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9661{
9662 struct net_device *dev = pci_get_drvdata(pdev);
9663 struct bnx2x *bp = netdev_priv(dev);
9664
9665 rtnl_lock();
9666
9667 if (pci_enable_device(pdev)) {
9668 dev_err(&pdev->dev,
9669 "Cannot re-enable PCI device after reset\n");
9670 rtnl_unlock();
9671 return PCI_ERS_RESULT_DISCONNECT;
9672 }
9673
9674 pci_set_master(pdev);
9675 pci_restore_state(pdev);
9676
9677 if (netif_running(dev))
9678 bnx2x_set_power_state(bp, PCI_D0);
9679
9680 rtnl_unlock();
9681
9682 return PCI_ERS_RESULT_RECOVERED;
9683}
9684
9685/**
9686 * bnx2x_io_resume - called when traffic can start flowing again
9687 * @pdev: Pointer to PCI device
9688 *
9689 * This callback is called when the error recovery driver tells us that
9690 * its OK to resume normal operation.
9691 */
9692static void bnx2x_io_resume(struct pci_dev *pdev)
9693{
9694 struct net_device *dev = pci_get_drvdata(pdev);
9695 struct bnx2x *bp = netdev_priv(dev);
9696
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009697 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009698 printk(KERN_ERR "Handling parity error recovery. "
9699 "Try again later\n");
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009700 return;
9701 }
9702
Wendy Xiong493adb12008-06-23 20:36:22 -07009703 rtnl_lock();
9704
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009705 bnx2x_eeh_recover(bp);
9706
Wendy Xiong493adb12008-06-23 20:36:22 -07009707 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009708 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -07009709
9710 netif_device_attach(dev);
9711
9712 rtnl_unlock();
9713}
9714
9715static struct pci_error_handlers bnx2x_err_handler = {
9716 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +00009717 .slot_reset = bnx2x_io_slot_reset,
9718 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -07009719};
9720
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009721static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -07009722 .name = DRV_MODULE_NAME,
9723 .id_table = bnx2x_pci_tbl,
9724 .probe = bnx2x_init_one,
9725 .remove = __devexit_p(bnx2x_remove_one),
9726 .suspend = bnx2x_suspend,
9727 .resume = bnx2x_resume,
9728 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009729};
9730
9731static int __init bnx2x_init(void)
9732{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009733 int ret;
9734
Joe Perches7995c642010-02-17 15:01:52 +00009735 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +00009736
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009737 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9738 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +00009739 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009740 return -ENOMEM;
9741 }
9742
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009743 ret = pci_register_driver(&bnx2x_pci_driver);
9744 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +00009745 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009746 destroy_workqueue(bnx2x_wq);
9747 }
9748 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009749}
9750
9751static void __exit bnx2x_cleanup(void)
9752{
9753 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009754
9755 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009756}
9757
9758module_init(bnx2x_init);
9759module_exit(bnx2x_cleanup);
9760
Michael Chan993ac7b2009-10-10 13:46:56 +00009761#ifdef BCM_CNIC
9762
9763/* count denotes the number of new completions we have seen */
9764static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9765{
9766 struct eth_spe *spe;
9767
9768#ifdef BNX2X_STOP_ON_ERROR
9769 if (unlikely(bp->panic))
9770 return;
9771#endif
9772
9773 spin_lock_bh(&bp->spq_lock);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009774 BUG_ON(bp->cnic_spq_pending < count);
Michael Chan993ac7b2009-10-10 13:46:56 +00009775 bp->cnic_spq_pending -= count;
9776
Michael Chan993ac7b2009-10-10 13:46:56 +00009777
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009778 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9779 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9780 & SPE_HDR_CONN_TYPE) >>
9781 SPE_HDR_CONN_TYPE_SHIFT;
9782
9783 /* Set validation for iSCSI L2 client before sending SETUP
9784 * ramrod
9785 */
9786 if (type == ETH_CONNECTION_TYPE) {
9787 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9788 hdr.conn_and_cmd_data) >>
9789 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9790
9791 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9792 bnx2x_set_ctx_validation(&bp->context.
9793 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9794 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9795 }
9796
9797 /* There may be not more than 8 L2 and COMMON SPEs and not more
9798 * than 8 L5 SPEs in the air.
9799 */
9800 if ((type == NONE_CONNECTION_TYPE) ||
9801 (type == ETH_CONNECTION_TYPE)) {
9802 if (!atomic_read(&bp->spq_left))
9803 break;
9804 else
9805 atomic_dec(&bp->spq_left);
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009806 } else if ((type == ISCSI_CONNECTION_TYPE) ||
9807 (type == FCOE_CONNECTION_TYPE)) {
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009808 if (bp->cnic_spq_pending >=
9809 bp->cnic_eth_dev.max_kwqe_pending)
9810 break;
9811 else
9812 bp->cnic_spq_pending++;
9813 } else {
9814 BNX2X_ERR("Unknown SPE type: %d\n", type);
9815 bnx2x_panic();
Michael Chan993ac7b2009-10-10 13:46:56 +00009816 break;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009817 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009818
9819 spe = bnx2x_sp_get_next(bp);
9820 *spe = *bp->cnic_kwq_cons;
9821
Michael Chan993ac7b2009-10-10 13:46:56 +00009822 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9823 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9824
9825 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9826 bp->cnic_kwq_cons = bp->cnic_kwq;
9827 else
9828 bp->cnic_kwq_cons++;
9829 }
9830 bnx2x_sp_prod_update(bp);
9831 spin_unlock_bh(&bp->spq_lock);
9832}
9833
9834static int bnx2x_cnic_sp_queue(struct net_device *dev,
9835 struct kwqe_16 *kwqes[], u32 count)
9836{
9837 struct bnx2x *bp = netdev_priv(dev);
9838 int i;
9839
9840#ifdef BNX2X_STOP_ON_ERROR
9841 if (unlikely(bp->panic))
9842 return -EIO;
9843#endif
9844
9845 spin_lock_bh(&bp->spq_lock);
9846
9847 for (i = 0; i < count; i++) {
9848 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9849
9850 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9851 break;
9852
9853 *bp->cnic_kwq_prod = *spe;
9854
9855 bp->cnic_kwq_pending++;
9856
9857 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9858 spe->hdr.conn_and_cmd_data, spe->hdr.type,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009859 spe->data.update_data_addr.hi,
9860 spe->data.update_data_addr.lo,
Michael Chan993ac7b2009-10-10 13:46:56 +00009861 bp->cnic_kwq_pending);
9862
9863 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9864 bp->cnic_kwq_prod = bp->cnic_kwq;
9865 else
9866 bp->cnic_kwq_prod++;
9867 }
9868
9869 spin_unlock_bh(&bp->spq_lock);
9870
9871 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9872 bnx2x_cnic_sp_post(bp, 0);
9873
9874 return i;
9875}
9876
9877static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9878{
9879 struct cnic_ops *c_ops;
9880 int rc = 0;
9881
9882 mutex_lock(&bp->cnic_mutex);
9883 c_ops = bp->cnic_ops;
9884 if (c_ops)
9885 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9886 mutex_unlock(&bp->cnic_mutex);
9887
9888 return rc;
9889}
9890
9891static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9892{
9893 struct cnic_ops *c_ops;
9894 int rc = 0;
9895
9896 rcu_read_lock();
9897 c_ops = rcu_dereference(bp->cnic_ops);
9898 if (c_ops)
9899 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9900 rcu_read_unlock();
9901
9902 return rc;
9903}
9904
9905/*
9906 * for commands that have no data
9907 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009908int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
Michael Chan993ac7b2009-10-10 13:46:56 +00009909{
9910 struct cnic_ctl_info ctl = {0};
9911
9912 ctl.cmd = cmd;
9913
9914 return bnx2x_cnic_ctl_send(bp, &ctl);
9915}
9916
9917static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9918{
9919 struct cnic_ctl_info ctl;
9920
9921 /* first we tell CNIC and only then we count this as a completion */
9922 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9923 ctl.data.comp.cid = cid;
9924
9925 bnx2x_cnic_ctl_send_bh(bp, &ctl);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009926 bnx2x_cnic_sp_post(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009927}
9928
9929static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9930{
9931 struct bnx2x *bp = netdev_priv(dev);
9932 int rc = 0;
9933
9934 switch (ctl->cmd) {
9935 case DRV_CTL_CTXTBL_WR_CMD: {
9936 u32 index = ctl->data.io.offset;
9937 dma_addr_t addr = ctl->data.io.dma_addr;
9938
9939 bnx2x_ilt_wr(bp, index, addr);
9940 break;
9941 }
9942
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009943 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9944 int count = ctl->data.credit.credit_count;
Michael Chan993ac7b2009-10-10 13:46:56 +00009945
9946 bnx2x_cnic_sp_post(bp, count);
9947 break;
9948 }
9949
9950 /* rtnl_lock is held. */
9951 case DRV_CTL_START_L2_CMD: {
9952 u32 cli = ctl->data.ring.client_id;
9953
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +00009954 /* Clear FCoE FIP and ALL ENODE MACs addresses first */
9955 bnx2x_del_fcoe_eth_macs(bp);
9956
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009957 /* Set iSCSI MAC address */
9958 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9959
9960 mmiowb();
9961 barrier();
9962
9963 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9964 * because it's the only way for UIO Client to accept
9965 * multicasts (in non-promiscuous mode only one Client per
9966 * function will receive multicast packets (leading in our
9967 * case).
9968 */
9969 bnx2x_rxq_set_mac_filters(bp, cli,
9970 BNX2X_ACCEPT_UNICAST |
9971 BNX2X_ACCEPT_BROADCAST |
9972 BNX2X_ACCEPT_ALL_MULTICAST);
9973 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9974
Michael Chan993ac7b2009-10-10 13:46:56 +00009975 break;
9976 }
9977
9978 /* rtnl_lock is held. */
9979 case DRV_CTL_STOP_L2_CMD: {
9980 u32 cli = ctl->data.ring.client_id;
9981
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009982 /* Stop accepting on iSCSI L2 ring */
9983 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9984 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9985
9986 mmiowb();
9987 barrier();
9988
9989 /* Unset iSCSI L2 MAC */
9990 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009991 break;
9992 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009993 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9994 int count = ctl->data.credit.credit_count;
9995
9996 smp_mb__before_atomic_inc();
9997 atomic_add(count, &bp->spq_left);
9998 smp_mb__after_atomic_inc();
9999 break;
10000 }
Michael Chan993ac7b2009-10-10 13:46:56 +000010001
10002 default:
10003 BNX2X_ERR("unknown command %x\n", ctl->cmd);
10004 rc = -EINVAL;
10005 }
10006
10007 return rc;
10008}
10009
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000010010void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
Michael Chan993ac7b2009-10-10 13:46:56 +000010011{
10012 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10013
10014 if (bp->flags & USING_MSIX_FLAG) {
10015 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
10016 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
10017 cp->irq_arr[0].vector = bp->msix_table[1].vector;
10018 } else {
10019 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
10020 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
10021 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000010022 if (CHIP_IS_E2(bp))
10023 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
10024 else
10025 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
10026
Michael Chan993ac7b2009-10-10 13:46:56 +000010027 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010028 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +000010029 cp->irq_arr[1].status_blk = bp->def_status_blk;
10030 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010031 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
Michael Chan993ac7b2009-10-10 13:46:56 +000010032
10033 cp->num_irq = 2;
10034}
10035
10036static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
10037 void *data)
10038{
10039 struct bnx2x *bp = netdev_priv(dev);
10040 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10041
10042 if (ops == NULL)
10043 return -EINVAL;
10044
10045 if (atomic_read(&bp->intr_sem) != 0)
10046 return -EBUSY;
10047
10048 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
10049 if (!bp->cnic_kwq)
10050 return -ENOMEM;
10051
10052 bp->cnic_kwq_cons = bp->cnic_kwq;
10053 bp->cnic_kwq_prod = bp->cnic_kwq;
10054 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
10055
10056 bp->cnic_spq_pending = 0;
10057 bp->cnic_kwq_pending = 0;
10058
10059 bp->cnic_data = data;
10060
10061 cp->num_irq = 0;
10062 cp->drv_state = CNIC_DRV_STATE_REGD;
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010063 cp->iro_arr = bp->iro_arr;
Michael Chan993ac7b2009-10-10 13:46:56 +000010064
Michael Chan993ac7b2009-10-10 13:46:56 +000010065 bnx2x_setup_cnic_irq_info(bp);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010066
Michael Chan993ac7b2009-10-10 13:46:56 +000010067 rcu_assign_pointer(bp->cnic_ops, ops);
10068
10069 return 0;
10070}
10071
10072static int bnx2x_unregister_cnic(struct net_device *dev)
10073{
10074 struct bnx2x *bp = netdev_priv(dev);
10075 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10076
10077 mutex_lock(&bp->cnic_mutex);
Michael Chan993ac7b2009-10-10 13:46:56 +000010078 cp->drv_state = 0;
10079 rcu_assign_pointer(bp->cnic_ops, NULL);
10080 mutex_unlock(&bp->cnic_mutex);
10081 synchronize_rcu();
10082 kfree(bp->cnic_kwq);
10083 bp->cnic_kwq = NULL;
10084
10085 return 0;
10086}
10087
10088struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
10089{
10090 struct bnx2x *bp = netdev_priv(dev);
10091 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
10092
10093 cp->drv_owner = THIS_MODULE;
10094 cp->chip_id = CHIP_ID(bp);
10095 cp->pdev = bp->pdev;
10096 cp->io_base = bp->regview;
10097 cp->io_base2 = bp->doorbells;
10098 cp->max_kwqe_pending = 8;
Dmitry Kravkov523224a2010-10-06 03:23:26 +000010099 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010100 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
10101 bnx2x_cid_ilt_lines(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +000010102 cp->ctx_tbl_len = CNIC_ILT_LINES;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010103 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
Michael Chan993ac7b2009-10-10 13:46:56 +000010104 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
10105 cp->drv_ctl = bnx2x_drv_ctl;
10106 cp->drv_register_cnic = bnx2x_register_cnic;
10107 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
Vladislav Zolotarovec6ba942010-12-13 05:44:01 +000010108 cp->fcoe_init_cid = BNX2X_FCOE_ETH_CID;
10109 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID +
10110 BP_E1HVN(bp) * NONE_ETH_CONTEXT_USE;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010111 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
Michael Chan993ac7b2009-10-10 13:46:56 +000010112
Dmitry Kravkovc2bff632010-10-06 03:33:18 +000010113 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
10114 "starting cid %d\n",
10115 cp->ctx_blk_size,
10116 cp->ctx_tbl_offset,
10117 cp->ctx_tbl_len,
10118 cp->starting_cid);
Michael Chan993ac7b2009-10-10 13:46:56 +000010119 return cp;
10120}
10121EXPORT_SYMBOL(bnx2x_cnic_probe);
10122
10123#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070010124