blob: 1552fc3c1351d68741f40263990700b60eea9f06 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020026#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080040#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020041#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070044#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020045#include <linux/workqueue.h>
46#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070047#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020048#include <linux/prefetch.h>
49#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020050#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000051#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020052
Dmitry Kravkovb0efbb92010-07-27 12:33:43 +000053#define BNX2X_MAIN
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070056#include "bnx2x_init_ops.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000057#include "bnx2x_cmn.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020058
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070059#include <linux/firmware.h>
60#include "bnx2x_fw_file_hdr.h"
61/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000062#define FW_FILE_VERSION \
63 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
Dmitry Kravkov560131f2010-10-06 03:18:47 +000067#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000069#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070070
Eilon Greenstein34f80b02008-06-23 20:33:01 -070071/* Time in jiffies before concluding the transmitter is hung */
72#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020073
Andrew Morton53a10562008-02-09 23:16:41 -080074static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070075 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020076 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070078MODULE_AUTHOR("Eliezer Tamir");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000079MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020081MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000083MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000085MODULE_FIRMWARE(FW_FILE_NAME_E2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020086
Eilon Greenstein555f6c72009-02-12 08:36:11 +000087static int multi_mode = 1;
88module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070089MODULE_PARM_DESC(multi_mode, " Multi queue mode "
90 "(0 Disable; 1 Enable (default))");
91
Dmitry Kravkovd6214d72010-10-06 03:32:10 +000092int num_queues;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000093module_param(num_queues, int, 0);
94MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
95 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000096
Eilon Greenstein19680c42008-08-13 15:47:33 -070097static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070098module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000099MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000100
101static int int_mode;
102module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000103MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
104 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000105
Eilon Greensteina18f5122009-08-12 08:23:26 +0000106static int dropless_fc;
107module_param(dropless_fc, int, 0);
108MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109
Eilon Greenstein9898f862009-02-12 08:38:27 +0000110static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200111module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000112MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000113
114static int mrrs = -1;
115module_param(mrrs, int, 0);
116MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
Eilon Greenstein9898f862009-02-12 08:38:27 +0000118static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200119module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000120MODULE_PARM_DESC(debug, " Default debug msglevel");
121
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800122static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200123
124enum bnx2x_board_type {
125 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700126 BCM57711 = 1,
127 BCM57711E = 2,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000128 BCM57712 = 3,
129 BCM57712E = 4
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200130};
131
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700132/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800133static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200134 char *name;
135} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700136 { "Broadcom NetXtreme II BCM57710 XGb" },
137 { "Broadcom NetXtreme II BCM57711 XGb" },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000138 { "Broadcom NetXtreme II BCM57711E XGb" },
139 { "Broadcom NetXtreme II BCM57712 XGb" },
140 { "Broadcom NetXtreme II BCM57712E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200141};
142
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000143#ifndef PCI_DEVICE_ID_NX2_57712
144#define PCI_DEVICE_ID_NX2_57712 0x1662
145#endif
146#ifndef PCI_DEVICE_ID_NX2_57712E
147#define PCI_DEVICE_ID_NX2_57712E 0x1663
148#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700149
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000150static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200156 { 0 }
157};
158
159MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
160
161/****************************************************************************
162* General service functions
163****************************************************************************/
164
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000165static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166 u32 addr, dma_addr_t mapping)
167{
168 REG_WR(bp, addr, U64_LO(mapping));
169 REG_WR(bp, addr + 4, U64_HI(mapping));
170}
171
172static inline void __storm_memset_fill(struct bnx2x *bp,
173 u32 addr, size_t size, u32 val)
174{
175 int i;
176 for (i = 0; i < size/4; i++)
177 REG_WR(bp, addr + (i * 4), val);
178}
179
180static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181 u8 port, u16 stat_id)
182{
183 size_t size = sizeof(struct ustorm_per_client_stats);
184
185 u32 addr = BAR_USTRORM_INTMEM +
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188 __storm_memset_fill(bp, addr, size, 0);
189}
190
191static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192 u8 port, u16 stat_id)
193{
194 size_t size = sizeof(struct tstorm_per_client_stats);
195
196 u32 addr = BAR_TSTRORM_INTMEM +
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199 __storm_memset_fill(bp, addr, size, 0);
200}
201
202static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203 u8 port, u16 stat_id)
204{
205 size_t size = sizeof(struct xstorm_per_client_stats);
206
207 u32 addr = BAR_XSTRORM_INTMEM +
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210 __storm_memset_fill(bp, addr, size, 0);
211}
212
213
214static inline void storm_memset_spq_addr(struct bnx2x *bp,
215 dma_addr_t mapping, u16 abs_fid)
216{
217 u32 addr = XSEM_REG_FAST_MEMORY +
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220 __storm_memset_dma_mapping(bp, addr, mapping);
221}
222
223static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224{
225 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226}
227
228static inline void storm_memset_func_cfg(struct bnx2x *bp,
229 struct tstorm_eth_function_common_config *tcfg,
230 u16 abs_fid)
231{
232 size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234 u32 addr = BAR_TSTRORM_INTMEM +
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238}
239
240static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241 struct stats_indication_flags *flags,
242 u16 abs_fid)
243{
244 size_t size = sizeof(struct stats_indication_flags);
245
246 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248 __storm_memset_struct(bp, addr, size, (u32 *)flags);
249}
250
251static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252 struct stats_indication_flags *flags,
253 u16 abs_fid)
254{
255 size_t size = sizeof(struct stats_indication_flags);
256
257 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259 __storm_memset_struct(bp, addr, size, (u32 *)flags);
260}
261
262static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263 struct stats_indication_flags *flags,
264 u16 abs_fid)
265{
266 size_t size = sizeof(struct stats_indication_flags);
267
268 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270 __storm_memset_struct(bp, addr, size, (u32 *)flags);
271}
272
273static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274 struct stats_indication_flags *flags,
275 u16 abs_fid)
276{
277 size_t size = sizeof(struct stats_indication_flags);
278
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281 __storm_memset_struct(bp, addr, size, (u32 *)flags);
282}
283
284static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285 dma_addr_t mapping, u16 abs_fid)
286{
287 u32 addr = BAR_XSTRORM_INTMEM +
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290 __storm_memset_dma_mapping(bp, addr, mapping);
291}
292
293static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294 dma_addr_t mapping, u16 abs_fid)
295{
296 u32 addr = BAR_TSTRORM_INTMEM +
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299 __storm_memset_dma_mapping(bp, addr, mapping);
300}
301
302static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303 dma_addr_t mapping, u16 abs_fid)
304{
305 u32 addr = BAR_USTRORM_INTMEM +
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308 __storm_memset_dma_mapping(bp, addr, mapping);
309}
310
311static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312 dma_addr_t mapping, u16 abs_fid)
313{
314 u32 addr = BAR_CSTRORM_INTMEM +
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317 __storm_memset_dma_mapping(bp, addr, mapping);
318}
319
320static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321 u16 pf_id)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330 pf_id);
331}
332
333static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334 u8 enable)
335{
336 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343 enable);
344}
345
346static inline void storm_memset_eq_data(struct bnx2x *bp,
347 struct event_ring_data *eq_data,
348 u16 pfid)
349{
350 size_t size = sizeof(struct event_ring_data);
351
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
353
354 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355}
356
357static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
358 u16 pfid)
359{
360 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361 REG_WR16(bp, addr, eq_prod);
362}
363
364static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365 u16 fw_sb_id, u8 sb_index,
366 u8 ticks)
367{
368
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000369 int index_offset = CHIP_IS_E2(bp) ?
370 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000371 offsetof(struct hc_status_block_data_e1x, index_data);
372 u32 addr = BAR_CSTRORM_INTMEM +
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
374 index_offset +
375 sizeof(struct hc_index_data)*sb_index +
376 offsetof(struct hc_index_data, timeout);
377 REG_WR8(bp, addr, ticks);
378 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port, fw_sb_id, sb_index, ticks);
380}
381static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382 u16 fw_sb_id, u8 sb_index,
383 u8 disable)
384{
385 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000386 int index_offset = CHIP_IS_E2(bp) ?
387 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000388 offsetof(struct hc_status_block_data_e1x, index_data);
389 u32 addr = BAR_CSTRORM_INTMEM +
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
391 index_offset +
392 sizeof(struct hc_index_data)*sb_index +
393 offsetof(struct hc_index_data, flags);
394 u16 flags = REG_RD16(bp, addr);
395 /* clear and set */
396 flags &= ~HC_INDEX_DATA_HC_ENABLED;
397 flags |= enable_flag;
398 REG_WR16(bp, addr, flags);
399 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port, fw_sb_id, sb_index, disable);
401}
402
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200403/* used only at init
404 * locking is done by mcp
405 */
stephen hemminger8d962862010-10-21 07:50:56 +0000406static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200407{
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411 PCICFG_VENDOR_ID_OFFSET);
412}
413
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200414static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
415{
416 u32 val;
417
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421 PCICFG_VENDOR_ID_OFFSET);
422
423 return val;
424}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200425
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000426#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430#define DMAE_DP_DST_NONE "dst_addr [none]"
431
stephen hemminger8d962862010-10-21 07:50:56 +0000432static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
433 int msglvl)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000434{
435 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
436
437 switch (dmae->opcode & DMAE_COMMAND_DST) {
438 case DMAE_CMD_DST_PCI:
439 if (src_type == DMAE_CMD_SRC_PCI)
440 DP(msglvl, "DMAE: opcode 0x%08x\n"
441 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
442 "comp_addr [%x:%08x], comp_val 0x%08x\n",
443 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
444 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
445 dmae->comp_addr_hi, dmae->comp_addr_lo,
446 dmae->comp_val);
447 else
448 DP(msglvl, "DMAE: opcode 0x%08x\n"
449 "src [%08x], len [%d*4], dst [%x:%08x]\n"
450 "comp_addr [%x:%08x], comp_val 0x%08x\n",
451 dmae->opcode, dmae->src_addr_lo >> 2,
452 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
453 dmae->comp_addr_hi, dmae->comp_addr_lo,
454 dmae->comp_val);
455 break;
456 case DMAE_CMD_DST_GRC:
457 if (src_type == DMAE_CMD_SRC_PCI)
458 DP(msglvl, "DMAE: opcode 0x%08x\n"
459 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
460 "comp_addr [%x:%08x], comp_val 0x%08x\n",
461 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
462 dmae->len, dmae->dst_addr_lo >> 2,
463 dmae->comp_addr_hi, dmae->comp_addr_lo,
464 dmae->comp_val);
465 else
466 DP(msglvl, "DMAE: opcode 0x%08x\n"
467 "src [%08x], len [%d*4], dst [%08x]\n"
468 "comp_addr [%x:%08x], comp_val 0x%08x\n",
469 dmae->opcode, dmae->src_addr_lo >> 2,
470 dmae->len, dmae->dst_addr_lo >> 2,
471 dmae->comp_addr_hi, dmae->comp_addr_lo,
472 dmae->comp_val);
473 break;
474 default:
475 if (src_type == DMAE_CMD_SRC_PCI)
476 DP(msglvl, "DMAE: opcode 0x%08x\n"
477 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
478 "dst_addr [none]\n"
479 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
480 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
481 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
482 dmae->comp_val);
483 else
484 DP(msglvl, "DMAE: opcode 0x%08x\n"
485 DP_LEVEL "src_addr [%08x] len [%d * 4] "
486 "dst_addr [none]\n"
487 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
488 dmae->opcode, dmae->src_addr_lo >> 2,
489 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
490 dmae->comp_val);
491 break;
492 }
493
494}
495
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000496const u32 dmae_reg_go_c[] = {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200497 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
498 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
499 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
500 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
501};
502
503/* copy command into DMAE command memory and set DMAE command go */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000504void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200505{
506 u32 cmd_offset;
507 int i;
508
509 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
510 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
511 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
512
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700513 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
514 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200515 }
516 REG_WR(bp, dmae_reg_go_c[idx], 1);
517}
518
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000519u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
520{
521 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
522 DMAE_CMD_C_ENABLE);
523}
524
525u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
526{
527 return opcode & ~DMAE_CMD_SRC_RESET;
528}
529
530u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
531 bool with_comp, u8 comp_type)
532{
533 u32 opcode = 0;
534
535 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
536 (dst_type << DMAE_COMMAND_DST_SHIFT));
537
538 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
539
540 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
541 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
542 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
543 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
544
545#ifdef __BIG_ENDIAN
546 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
547#else
548 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
549#endif
550 if (with_comp)
551 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
552 return opcode;
553}
554
stephen hemminger8d962862010-10-21 07:50:56 +0000555static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
556 struct dmae_command *dmae,
557 u8 src_type, u8 dst_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000558{
559 memset(dmae, 0, sizeof(struct dmae_command));
560
561 /* set the opcode */
562 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
563 true, DMAE_COMP_PCI);
564
565 /* fill in the completion parameters */
566 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
567 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
568 dmae->comp_val = DMAE_COMP_VAL;
569}
570
571/* issue a dmae command over the init-channel and wailt for completion */
stephen hemminger8d962862010-10-21 07:50:56 +0000572static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
573 struct dmae_command *dmae)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000574{
575 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
576 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
577 int rc = 0;
578
579 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
580 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
581 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
582
583 /* lock the dmae channel */
584 mutex_lock(&bp->dmae_mutex);
585
586 /* reset completion */
587 *wb_comp = 0;
588
589 /* post the command on the channel used for initializations */
590 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
591
592 /* wait for completion */
593 udelay(5);
594 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
595 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
596
597 if (!cnt) {
598 BNX2X_ERR("DMAE timeout!\n");
599 rc = DMAE_TIMEOUT;
600 goto unlock;
601 }
602 cnt--;
603 udelay(50);
604 }
605 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
606 BNX2X_ERR("DMAE PCI error!\n");
607 rc = DMAE_PCI_ERROR;
608 }
609
610 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
611 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
612 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
613
614unlock:
615 mutex_unlock(&bp->dmae_mutex);
616 return rc;
617}
618
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700619void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
620 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200621{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000622 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700623
624 if (!bp->dmae_ready) {
625 u32 *data = bnx2x_sp(bp, wb_data[0]);
626
627 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
628 " using indirect\n", dst_addr, len32);
629 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
630 return;
631 }
632
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000633 /* set opcode and fixed command fields */
634 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200635
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000636 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000637 dmae.src_addr_lo = U64_LO(dma_addr);
638 dmae.src_addr_hi = U64_HI(dma_addr);
639 dmae.dst_addr_lo = dst_addr >> 2;
640 dmae.dst_addr_hi = 0;
641 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200642
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000643 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200644
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000645 /* issue the command and wait for completion */
646 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647}
648
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700649void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200650{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000651 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700652
653 if (!bp->dmae_ready) {
654 u32 *data = bnx2x_sp(bp, wb_data[0]);
655 int i;
656
657 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
658 " using indirect\n", src_addr, len32);
659 for (i = 0; i < len32; i++)
660 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
661 return;
662 }
663
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000664 /* set opcode and fixed command fields */
665 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200666
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000667 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000668 dmae.src_addr_lo = src_addr >> 2;
669 dmae.src_addr_hi = 0;
670 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
671 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
672 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200673
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000674 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200675
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000676 /* issue the command and wait for completion */
677 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200678}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200679
stephen hemminger8d962862010-10-21 07:50:56 +0000680static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
681 u32 addr, u32 len)
Eilon Greenstein573f2032009-08-12 08:24:14 +0000682{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000683 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000684 int offset = 0;
685
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000686 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000687 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000688 addr + offset, dmae_wr_max);
689 offset += dmae_wr_max * 4;
690 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000691 }
692
693 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
694}
695
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700696/* used only for slowpath so not inlined */
697static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
698{
699 u32 wb_write[2];
700
701 wb_write[0] = val_hi;
702 wb_write[1] = val_lo;
703 REG_WR_DMAE(bp, reg, wb_write, 2);
704}
705
706#ifdef USE_WB_RD
707static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
708{
709 u32 wb_data[2];
710
711 REG_RD_DMAE(bp, reg, wb_data, 2);
712
713 return HILO_U64(wb_data[0], wb_data[1]);
714}
715#endif
716
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200717static int bnx2x_mc_assert(struct bnx2x *bp)
718{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200719 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700720 int i, rc = 0;
721 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200722
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700723 /* XSTORM */
724 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
725 XSTORM_ASSERT_LIST_INDEX_OFFSET);
726 if (last_idx)
727 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200728
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700729 /* print the asserts */
730 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200731
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700732 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
733 XSTORM_ASSERT_LIST_OFFSET(i));
734 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
735 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
736 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
737 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
738 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
739 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200740
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700741 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
742 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
743 " 0x%08x 0x%08x 0x%08x\n",
744 i, row3, row2, row1, row0);
745 rc++;
746 } else {
747 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200748 }
749 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700750
751 /* TSTORM */
752 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
753 TSTORM_ASSERT_LIST_INDEX_OFFSET);
754 if (last_idx)
755 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
756
757 /* print the asserts */
758 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
759
760 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
761 TSTORM_ASSERT_LIST_OFFSET(i));
762 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
763 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
764 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
765 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
766 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
767 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
768
769 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
770 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
771 " 0x%08x 0x%08x 0x%08x\n",
772 i, row3, row2, row1, row0);
773 rc++;
774 } else {
775 break;
776 }
777 }
778
779 /* CSTORM */
780 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
781 CSTORM_ASSERT_LIST_INDEX_OFFSET);
782 if (last_idx)
783 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
784
785 /* print the asserts */
786 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
787
788 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
789 CSTORM_ASSERT_LIST_OFFSET(i));
790 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
791 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
792 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
793 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
794 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
795 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
796
797 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
798 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
799 " 0x%08x 0x%08x 0x%08x\n",
800 i, row3, row2, row1, row0);
801 rc++;
802 } else {
803 break;
804 }
805 }
806
807 /* USTORM */
808 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
809 USTORM_ASSERT_LIST_INDEX_OFFSET);
810 if (last_idx)
811 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
812
813 /* print the asserts */
814 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
815
816 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
817 USTORM_ASSERT_LIST_OFFSET(i));
818 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
819 USTORM_ASSERT_LIST_OFFSET(i) + 4);
820 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
821 USTORM_ASSERT_LIST_OFFSET(i) + 8);
822 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
823 USTORM_ASSERT_LIST_OFFSET(i) + 12);
824
825 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
826 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
827 " 0x%08x 0x%08x 0x%08x\n",
828 i, row3, row2, row1, row0);
829 rc++;
830 } else {
831 break;
832 }
833 }
834
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200835 return rc;
836}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800837
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200838static void bnx2x_fw_dump(struct bnx2x *bp)
839{
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000840 u32 addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200841 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000842 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200843 int word;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000844 u32 trace_shmem_base;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000845 if (BP_NOMCP(bp)) {
846 BNX2X_ERR("NO MCP - can not dump\n");
847 return;
848 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000849
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000850 if (BP_PATH(bp) == 0)
851 trace_shmem_base = bp->common.shmem_base;
852 else
853 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
854 addr = trace_shmem_base - 0x0800 + 4;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000855 mark = REG_RD(bp, addr);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000856 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
857 + ((mark + 0x3) & ~0x3) - 0x08000000;
Joe Perches7995c642010-02-17 15:01:52 +0000858 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200859
Joe Perches7995c642010-02-17 15:01:52 +0000860 pr_err("");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000861 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200862 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000863 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200864 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000865 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200866 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000867 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200868 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000869 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200870 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000871 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200872 }
Joe Perches7995c642010-02-17 15:01:52 +0000873 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200874}
875
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000876void bnx2x_panic_dump(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200877{
878 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000879 u16 j;
880 struct hc_sp_status_block_data sp_sb_data;
881 int func = BP_FUNC(bp);
882#ifdef BNX2X_STOP_ON_ERROR
883 u16 start = 0, end = 0;
884#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200885
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700886 bp->stats_state = STATS_STATE_DISABLED;
887 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
888
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200889 BNX2X_ERR("begin crash dump -----------------\n");
890
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000891 /* Indices */
892 /* Common */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000893 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000894 " spq_prod_idx(0x%x)\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000895 bp->def_idx, bp->def_att_idx,
896 bp->attn_state, bp->spq_prod_idx);
897 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
898 bp->def_status_blk->atten_status_block.attn_bits,
899 bp->def_status_blk->atten_status_block.attn_bits_ack,
900 bp->def_status_blk->atten_status_block.status_block_id,
901 bp->def_status_blk->atten_status_block.attn_bits_index);
902 BNX2X_ERR(" def (");
903 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
904 pr_cont("0x%x%s",
905 bp->def_status_blk->sp_sb.index_values[i],
906 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000907
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000908 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
909 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
910 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
911 i*sizeof(u32));
912
913 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
914 "pf_id(0x%x) vnic_id(0x%x) "
915 "vf_id(0x%x) vf_valid (0x%x)\n",
916 sp_sb_data.igu_sb_id,
917 sp_sb_data.igu_seg_id,
918 sp_sb_data.p_func.pf_id,
919 sp_sb_data.p_func.vnic_id,
920 sp_sb_data.p_func.vf_id,
921 sp_sb_data.p_func.vf_valid);
922
923
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000924 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000925 struct bnx2x_fastpath *fp = &bp->fp[i];
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000926 int loop;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000927 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000928 struct hc_status_block_data_e1x sb_data_e1x;
929 struct hc_status_block_sm *hc_sm_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000930 CHIP_IS_E2(bp) ?
931 sb_data_e2.common.state_machine :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000932 sb_data_e1x.common.state_machine;
933 struct hc_index_data *hc_index_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000934 CHIP_IS_E2(bp) ?
935 sb_data_e2.index_data :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000936 sb_data_e1x.index_data;
937 int data_size;
938 u32 *sb_data_p;
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000939
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000940 /* Rx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000941 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000942 " rx_comp_prod(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000943 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000944 i, fp->rx_bd_prod, fp->rx_bd_cons,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000945 fp->rx_comp_prod,
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000946 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000947 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000948 " fp_hc_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000949 fp->rx_sge_prod, fp->last_max_sge,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000950 le16_to_cpu(fp->fp_hc_idx));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000951
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000952 /* Tx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000953 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
954 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
955 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200956 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700957 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000958
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000959 loop = CHIP_IS_E2(bp) ?
960 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000961
962 /* host sb data */
963
964 BNX2X_ERR(" run indexes (");
965 for (j = 0; j < HC_SB_MAX_SM; j++)
966 pr_cont("0x%x%s",
967 fp->sb_running_index[j],
968 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
969
970 BNX2X_ERR(" indexes (");
971 for (j = 0; j < loop; j++)
972 pr_cont("0x%x%s",
973 fp->sb_index_values[j],
974 (j == loop - 1) ? ")" : " ");
975 /* fw sb data */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000976 data_size = CHIP_IS_E2(bp) ?
977 sizeof(struct hc_status_block_data_e2) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000978 sizeof(struct hc_status_block_data_e1x);
979 data_size /= sizeof(u32);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000980 sb_data_p = CHIP_IS_E2(bp) ?
981 (u32 *)&sb_data_e2 :
982 (u32 *)&sb_data_e1x;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000983 /* copy sb data in here */
984 for (j = 0; j < data_size; j++)
985 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
986 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
987 j * sizeof(u32));
988
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000989 if (CHIP_IS_E2(bp)) {
990 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
991 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
992 sb_data_e2.common.p_func.pf_id,
993 sb_data_e2.common.p_func.vf_id,
994 sb_data_e2.common.p_func.vf_valid,
995 sb_data_e2.common.p_func.vnic_id,
996 sb_data_e2.common.same_igu_sb_1b);
997 } else {
998 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
999 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1000 sb_data_e1x.common.p_func.pf_id,
1001 sb_data_e1x.common.p_func.vf_id,
1002 sb_data_e1x.common.p_func.vf_valid,
1003 sb_data_e1x.common.p_func.vnic_id,
1004 sb_data_e1x.common.same_igu_sb_1b);
1005 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001006
1007 /* SB_SMs data */
1008 for (j = 0; j < HC_SB_MAX_SM; j++) {
1009 pr_cont("SM[%d] __flags (0x%x) "
1010 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1011 "time_to_expire (0x%x) "
1012 "timer_value(0x%x)\n", j,
1013 hc_sm_p[j].__flags,
1014 hc_sm_p[j].igu_sb_id,
1015 hc_sm_p[j].igu_seg_id,
1016 hc_sm_p[j].time_to_expire,
1017 hc_sm_p[j].timer_value);
1018 }
1019
1020 /* Indecies data */
1021 for (j = 0; j < loop; j++) {
1022 pr_cont("INDEX[%d] flags (0x%x) "
1023 "timeout (0x%x)\n", j,
1024 hc_index_p[j].flags,
1025 hc_index_p[j].timeout);
1026 }
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001027 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001028
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001029#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001030 /* Rings */
1031 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001032 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001033 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001034
1035 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1036 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001037 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001038 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1039 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1040
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001041 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1042 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001043 }
1044
Eilon Greenstein3196a882008-08-13 15:58:49 -07001045 start = RX_SGE(fp->rx_sge_prod);
1046 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001047 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001048 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1049 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1050
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001051 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1052 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001053 }
1054
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001055 start = RCQ_BD(fp->rx_comp_cons - 10);
1056 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001057 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001058 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1059
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001060 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1061 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001062 }
1063 }
1064
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001065 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001066 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001067 struct bnx2x_fastpath *fp = &bp->fp[i];
1068
1069 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1070 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1071 for (j = start; j != end; j = TX_BD(j + 1)) {
1072 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1073
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001074 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1075 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001076 }
1077
1078 start = TX_BD(fp->tx_bd_cons - 10);
1079 end = TX_BD(fp->tx_bd_cons + 254);
1080 for (j = start; j != end; j = TX_BD(j + 1)) {
1081 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1082
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001083 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1084 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001085 }
1086 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001087#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001088 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001089 bnx2x_mc_assert(bp);
1090 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001091}
1092
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001093static void bnx2x_hc_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001094{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001095 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001096 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1097 u32 val = REG_RD(bp, addr);
1098 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001099 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001100
1101 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001102 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1103 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001104 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1105 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +00001106 } else if (msi) {
1107 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1108 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1109 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1110 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001111 } else {
1112 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001113 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001114 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1115 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001116
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001117 if (!CHIP_IS_E1(bp)) {
1118 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1119 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001120
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001121 REG_WR(bp, addr, val);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001122
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001123 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1124 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001125 }
1126
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001127 if (CHIP_IS_E1(bp))
1128 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1129
Eilon Greenstein8badd272009-02-12 08:36:15 +00001130 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1131 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001132
1133 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001134 /*
1135 * Ensure that HC_CONFIG is written before leading/trailing edge config
1136 */
1137 mmiowb();
1138 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001139
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001140 if (!CHIP_IS_E1(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001141 /* init leading/trailing edge */
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001142 if (IS_MF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001143 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001144 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001145 /* enable nig and gpio3 attention */
1146 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001147 } else
1148 val = 0xffff;
1149
1150 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1151 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1152 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001153
1154 /* Make sure that interrupts are indeed enabled from here on */
1155 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001156}
1157
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001158static void bnx2x_igu_int_enable(struct bnx2x *bp)
1159{
1160 u32 val;
1161 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1162 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1163
1164 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1165
1166 if (msix) {
1167 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1168 IGU_PF_CONF_SINGLE_ISR_EN);
1169 val |= (IGU_PF_CONF_FUNC_EN |
1170 IGU_PF_CONF_MSI_MSIX_EN |
1171 IGU_PF_CONF_ATTN_BIT_EN);
1172 } else if (msi) {
1173 val &= ~IGU_PF_CONF_INT_LINE_EN;
1174 val |= (IGU_PF_CONF_FUNC_EN |
1175 IGU_PF_CONF_MSI_MSIX_EN |
1176 IGU_PF_CONF_ATTN_BIT_EN |
1177 IGU_PF_CONF_SINGLE_ISR_EN);
1178 } else {
1179 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1180 val |= (IGU_PF_CONF_FUNC_EN |
1181 IGU_PF_CONF_INT_LINE_EN |
1182 IGU_PF_CONF_ATTN_BIT_EN |
1183 IGU_PF_CONF_SINGLE_ISR_EN);
1184 }
1185
1186 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1187 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1188
1189 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1190
1191 barrier();
1192
1193 /* init leading/trailing edge */
1194 if (IS_MF(bp)) {
1195 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1196 if (bp->port.pmf)
1197 /* enable nig and gpio3 attention */
1198 val |= 0x1100;
1199 } else
1200 val = 0xffff;
1201
1202 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1203 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1204
1205 /* Make sure that interrupts are indeed enabled from here on */
1206 mmiowb();
1207}
1208
1209void bnx2x_int_enable(struct bnx2x *bp)
1210{
1211 if (bp->common.int_block == INT_BLOCK_HC)
1212 bnx2x_hc_int_enable(bp);
1213 else
1214 bnx2x_igu_int_enable(bp);
1215}
1216
1217static void bnx2x_hc_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001218{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001219 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001220 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1221 u32 val = REG_RD(bp, addr);
1222
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001223 /*
1224 * in E1 we must use only PCI configuration space to disable
1225 * MSI/MSIX capablility
1226 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1227 */
1228 if (CHIP_IS_E1(bp)) {
1229 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1230 * Use mask register to prevent from HC sending interrupts
1231 * after we exit the function
1232 */
1233 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1234
1235 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1236 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1237 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1238 } else
1239 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1240 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1241 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1242 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001243
1244 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1245 val, port, addr);
1246
Eilon Greenstein8badd272009-02-12 08:36:15 +00001247 /* flush all outstanding writes */
1248 mmiowb();
1249
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001250 REG_WR(bp, addr, val);
1251 if (REG_RD(bp, addr) != val)
1252 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1253}
1254
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001255static void bnx2x_igu_int_disable(struct bnx2x *bp)
1256{
1257 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1258
1259 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1260 IGU_PF_CONF_INT_LINE_EN |
1261 IGU_PF_CONF_ATTN_BIT_EN);
1262
1263 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1264
1265 /* flush all outstanding writes */
1266 mmiowb();
1267
1268 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1269 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1270 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1271}
1272
stephen hemminger8d962862010-10-21 07:50:56 +00001273static void bnx2x_int_disable(struct bnx2x *bp)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001274{
1275 if (bp->common.int_block == INT_BLOCK_HC)
1276 bnx2x_hc_int_disable(bp);
1277 else
1278 bnx2x_igu_int_disable(bp);
1279}
1280
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001281void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001282{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001283 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001284 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001285
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001286 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001287 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +00001288 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1289
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07001290 if (disable_hw)
1291 /* prevent the HW from sending interrupts */
1292 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001293
1294 /* make sure all ISRs are done */
1295 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001296 synchronize_irq(bp->msix_table[0].vector);
1297 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +00001298#ifdef BCM_CNIC
1299 offset++;
1300#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001301 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +00001302 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001303 } else
1304 synchronize_irq(bp->pdev->irq);
1305
1306 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001307 cancel_delayed_work(&bp->sp_task);
1308 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001309}
1310
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001311/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001312
1313/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001314 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001315 */
1316
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001317/* Return true if succeeded to acquire the lock */
1318static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1319{
1320 u32 lock_status;
1321 u32 resource_bit = (1 << resource);
1322 int func = BP_FUNC(bp);
1323 u32 hw_lock_control_reg;
1324
1325 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1326
1327 /* Validating that the resource is within range */
1328 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1329 DP(NETIF_MSG_HW,
1330 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1331 resource, HW_LOCK_MAX_RESOURCE_VALUE);
Eric Dumazet0fdf4d02010-08-26 22:03:53 -07001332 return false;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001333 }
1334
1335 if (func <= 5)
1336 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1337 else
1338 hw_lock_control_reg =
1339 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1340
1341 /* Try to acquire the lock */
1342 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1343 lock_status = REG_RD(bp, hw_lock_control_reg);
1344 if (lock_status & resource_bit)
1345 return true;
1346
1347 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1348 return false;
1349}
1350
Michael Chan993ac7b2009-10-10 13:46:56 +00001351#ifdef BCM_CNIC
1352static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1353#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001354
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001355void bnx2x_sp_event(struct bnx2x_fastpath *fp,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001356 union eth_rx_cqe *rr_cqe)
1357{
1358 struct bnx2x *bp = fp->bp;
1359 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1360 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1361
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001362 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001363 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001364 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001365 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001366
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001367 switch (command | fp->state) {
1368 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1369 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1370 fp->state = BNX2X_FP_STATE_OPEN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001371 break;
1372
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001373 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1374 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001375 fp->state = BNX2X_FP_STATE_HALTED;
1376 break;
1377
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001378 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1379 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1380 fp->state = BNX2X_FP_STATE_TERMINATED;
Eliezer Tamir49d66772008-02-28 11:53:13 -08001381 break;
1382
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001383 default:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001384 BNX2X_ERR("unexpected MC reply (%d) "
1385 "fp[%d] state is %x\n",
1386 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001387 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001388 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001389
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00001390 smp_mb__before_atomic_inc();
1391 atomic_inc(&bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001392 /* push the change in fp->state and towards the memory */
1393 smp_wmb();
1394
1395 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001396}
1397
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001398irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001399{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001400 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001401 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001402 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001403 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001404
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001405 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001406 if (unlikely(status == 0)) {
1407 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1408 return IRQ_NONE;
1409 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001410 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001411
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001412 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001413 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1414 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1415 return IRQ_HANDLED;
1416 }
1417
Eilon Greenstein3196a882008-08-13 15:58:49 -07001418#ifdef BNX2X_STOP_ON_ERROR
1419 if (unlikely(bp->panic))
1420 return IRQ_HANDLED;
1421#endif
1422
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001423 for_each_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07001424 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001425
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001426 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
Eilon Greensteinca003922009-08-12 22:53:28 -07001427 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001428 /* Handle Rx and Tx according to SB id */
1429 prefetch(fp->rx_cons_sb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001430 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001431 prefetch(&fp->sb_running_index[SM_RX_ID]);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001432 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001433 status &= ~mask;
1434 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001435 }
1436
Michael Chan993ac7b2009-10-10 13:46:56 +00001437#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001438 mask = 0x2;
Michael Chan993ac7b2009-10-10 13:46:56 +00001439 if (status & (mask | 0x1)) {
1440 struct cnic_ops *c_ops = NULL;
1441
1442 rcu_read_lock();
1443 c_ops = rcu_dereference(bp->cnic_ops);
1444 if (c_ops)
1445 c_ops->cnic_handler(bp->cnic_data, NULL);
1446 rcu_read_unlock();
1447
1448 status &= ~mask;
1449 }
1450#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001451
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001452 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001453 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001454
1455 status &= ~0x1;
1456 if (!status)
1457 return IRQ_HANDLED;
1458 }
1459
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001460 if (unlikely(status))
1461 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001462 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001463
1464 return IRQ_HANDLED;
1465}
1466
1467/* end of fast path */
1468
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001469
1470/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001471
1472/*
1473 * General service functions
1474 */
1475
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001476int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001477{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001478 u32 lock_status;
1479 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001480 int func = BP_FUNC(bp);
1481 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001482 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001483
1484 /* Validating that the resource is within range */
1485 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1486 DP(NETIF_MSG_HW,
1487 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1488 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1489 return -EINVAL;
1490 }
1491
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001492 if (func <= 5) {
1493 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1494 } else {
1495 hw_lock_control_reg =
1496 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1497 }
1498
Eliezer Tamirf1410642008-02-28 11:51:50 -08001499 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001500 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001501 if (lock_status & resource_bit) {
1502 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1503 lock_status, resource_bit);
1504 return -EEXIST;
1505 }
1506
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001507 /* Try for 5 second every 5ms */
1508 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001509 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001510 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1511 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001512 if (lock_status & resource_bit)
1513 return 0;
1514
1515 msleep(5);
1516 }
1517 DP(NETIF_MSG_HW, "Timeout\n");
1518 return -EAGAIN;
1519}
1520
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001521int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001522{
1523 u32 lock_status;
1524 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001525 int func = BP_FUNC(bp);
1526 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001527
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001528 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1529
Eliezer Tamirf1410642008-02-28 11:51:50 -08001530 /* Validating that the resource is within range */
1531 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1532 DP(NETIF_MSG_HW,
1533 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1534 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1535 return -EINVAL;
1536 }
1537
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001538 if (func <= 5) {
1539 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1540 } else {
1541 hw_lock_control_reg =
1542 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1543 }
1544
Eliezer Tamirf1410642008-02-28 11:51:50 -08001545 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001546 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001547 if (!(lock_status & resource_bit)) {
1548 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1549 lock_status, resource_bit);
1550 return -EFAULT;
1551 }
1552
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001553 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001554 return 0;
1555}
1556
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001557
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001558int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1559{
1560 /* The GPIO should be swapped if swap register is set and active */
1561 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1562 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1563 int gpio_shift = gpio_num +
1564 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1565 u32 gpio_mask = (1 << gpio_shift);
1566 u32 gpio_reg;
1567 int value;
1568
1569 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1570 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1571 return -EINVAL;
1572 }
1573
1574 /* read GPIO value */
1575 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1576
1577 /* get the requested pin value */
1578 if ((gpio_reg & gpio_mask) == gpio_mask)
1579 value = 1;
1580 else
1581 value = 0;
1582
1583 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1584
1585 return value;
1586}
1587
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001588int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001589{
1590 /* The GPIO should be swapped if swap register is set and active */
1591 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001592 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001593 int gpio_shift = gpio_num +
1594 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1595 u32 gpio_mask = (1 << gpio_shift);
1596 u32 gpio_reg;
1597
1598 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1599 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1600 return -EINVAL;
1601 }
1602
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001603 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001604 /* read GPIO and mask except the float bits */
1605 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1606
1607 switch (mode) {
1608 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1609 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1610 gpio_num, gpio_shift);
1611 /* clear FLOAT and set CLR */
1612 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1613 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1614 break;
1615
1616 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1617 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1618 gpio_num, gpio_shift);
1619 /* clear FLOAT and set SET */
1620 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1621 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1622 break;
1623
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001624 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001625 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1626 gpio_num, gpio_shift);
1627 /* set FLOAT */
1628 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1629 break;
1630
1631 default:
1632 break;
1633 }
1634
1635 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001636 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001637
1638 return 0;
1639}
1640
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001641int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1642{
1643 /* The GPIO should be swapped if swap register is set and active */
1644 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1645 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1646 int gpio_shift = gpio_num +
1647 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1648 u32 gpio_mask = (1 << gpio_shift);
1649 u32 gpio_reg;
1650
1651 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1652 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1653 return -EINVAL;
1654 }
1655
1656 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1657 /* read GPIO int */
1658 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1659
1660 switch (mode) {
1661 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1662 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1663 "output low\n", gpio_num, gpio_shift);
1664 /* clear SET and set CLR */
1665 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1666 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1667 break;
1668
1669 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1670 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1671 "output high\n", gpio_num, gpio_shift);
1672 /* clear CLR and set SET */
1673 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1674 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1675 break;
1676
1677 default:
1678 break;
1679 }
1680
1681 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1682 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1683
1684 return 0;
1685}
1686
Eliezer Tamirf1410642008-02-28 11:51:50 -08001687static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1688{
1689 u32 spio_mask = (1 << spio_num);
1690 u32 spio_reg;
1691
1692 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1693 (spio_num > MISC_REGISTERS_SPIO_7)) {
1694 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1695 return -EINVAL;
1696 }
1697
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001698 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001699 /* read SPIO and mask except the float bits */
1700 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1701
1702 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001703 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001704 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1705 /* clear FLOAT and set CLR */
1706 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1707 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1708 break;
1709
Eilon Greenstein6378c022008-08-13 15:59:25 -07001710 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001711 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1712 /* clear FLOAT and set SET */
1713 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1714 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1715 break;
1716
1717 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1718 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1719 /* set FLOAT */
1720 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1721 break;
1722
1723 default:
1724 break;
1725 }
1726
1727 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001728 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001729
1730 return 0;
1731}
1732
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001733int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1734{
1735 u32 sel_phy_idx = 0;
1736 if (bp->link_vars.link_up) {
1737 sel_phy_idx = EXT_PHY1;
1738 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1739 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1740 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1741 sel_phy_idx = EXT_PHY2;
1742 } else {
1743
1744 switch (bnx2x_phy_selection(&bp->link_params)) {
1745 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1746 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1747 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1748 sel_phy_idx = EXT_PHY1;
1749 break;
1750 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1751 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1752 sel_phy_idx = EXT_PHY2;
1753 break;
1754 }
1755 }
1756 /*
1757 * The selected actived PHY is always after swapping (in case PHY
1758 * swapping is enabled). So when swapping is enabled, we need to reverse
1759 * the configuration
1760 */
1761
1762 if (bp->link_params.multi_phy_config &
1763 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1764 if (sel_phy_idx == EXT_PHY1)
1765 sel_phy_idx = EXT_PHY2;
1766 else if (sel_phy_idx == EXT_PHY2)
1767 sel_phy_idx = EXT_PHY1;
1768 }
1769 return LINK_CONFIG_IDX(sel_phy_idx);
1770}
1771
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001772void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001773{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001774 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
Eilon Greensteinad33ea32009-01-14 21:24:57 -08001775 switch (bp->link_vars.ieee_fc &
1776 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001777 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001778 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001779 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001780 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001781
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001782 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001783 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001784 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001785 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001786
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001787 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001788 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001789 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001790
Eliezer Tamirf1410642008-02-28 11:51:50 -08001791 default:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001792 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001793 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001794 break;
1795 }
1796}
1797
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001798u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001799{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001800 if (!BP_NOMCP(bp)) {
1801 u8 rc;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001802 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1803 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
Eilon Greenstein19680c42008-08-13 15:47:33 -07001804 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001805 /* It is recommended to turn off RX FC for jumbo frames
1806 for better performance */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001807 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
David S. Millerc0700f92008-12-16 23:53:20 -08001808 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001809 else
David S. Millerc0700f92008-12-16 23:53:20 -08001810 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001811
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001812 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001813
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001814 if (load_mode == LOAD_DIAG) {
Yaniv Rosnerde6eae12010-09-07 11:41:13 +00001815 bp->link_params.loopback_mode = LOOPBACK_XGXS;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001816 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1817 }
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001818
Eilon Greenstein19680c42008-08-13 15:47:33 -07001819 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001820
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001821 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001822
Eilon Greenstein3c96c682009-01-14 21:25:31 -08001823 bnx2x_calc_fc_adv(bp);
1824
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001825 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1826 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001827 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001828 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001829 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
Eilon Greenstein19680c42008-08-13 15:47:33 -07001830 return rc;
1831 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001832 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07001833 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001834}
1835
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001836void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001837{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001838 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001839 bnx2x_acquire_phy_lock(bp);
Yaniv Rosner54c2fb72010-09-01 09:51:23 +00001840 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001841 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001842 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001843
Eilon Greenstein19680c42008-08-13 15:47:33 -07001844 bnx2x_calc_fc_adv(bp);
1845 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001846 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001847}
1848
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001849static void bnx2x__link_reset(struct bnx2x *bp)
1850{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001851 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001852 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00001853 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001854 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001855 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001856 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001857}
1858
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001859u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001860{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001861 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001862
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001863 if (!BP_NOMCP(bp)) {
1864 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001865 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1866 is_serdes);
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001867 bnx2x_release_phy_lock(bp);
1868 } else
1869 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001870
1871 return rc;
1872}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001873
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001874static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001875{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001876 u32 r_param = bp->link_vars.line_speed / 8;
1877 u32 fair_periodic_timeout_usec;
1878 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001879
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001880 memset(&(bp->cmng.rs_vars), 0,
1881 sizeof(struct rate_shaping_vars_per_port));
1882 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001883
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001884 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1885 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001886
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001887 /* this is the threshold below which no timer arming will occur
1888 1.25 coefficient is for the threshold to be a little bigger
1889 than the real time, to compensate for timer in-accuracy */
1890 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001891 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1892
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001893 /* resolution of fairness timer */
1894 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1895 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1896 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001897
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001898 /* this is the threshold below which we won't arm the timer anymore */
1899 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001900
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001901 /* we multiply by 1e3/8 to get bytes/msec.
1902 We don't want the credits to pass a credit
1903 of the t_fair*FAIR_MEM (algorithm resolution) */
1904 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1905 /* since each tick is 4 usec */
1906 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001907}
1908
Eilon Greenstein2691d512009-08-12 08:22:08 +00001909/* Calculates the sum of vn_min_rates.
1910 It's needed for further normalizing of the min_rates.
1911 Returns:
1912 sum of vn_min_rates.
1913 or
1914 0 - if all the min_rates are 0.
1915 In the later case fainess algorithm should be deactivated.
1916 If not all min_rates are zero then those that are zeroes will be set to 1.
1917 */
1918static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1919{
1920 int all_zero = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001921 int vn;
1922
1923 bp->vn_weight_sum = 0;
1924 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001925 u32 vn_cfg = bp->mf_config[vn];
Eilon Greenstein2691d512009-08-12 08:22:08 +00001926 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1927 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1928
1929 /* Skip hidden vns */
1930 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1931 continue;
1932
1933 /* If min rate is zero - set it to 1 */
1934 if (!vn_min_rate)
1935 vn_min_rate = DEF_MIN_RATE;
1936 else
1937 all_zero = 0;
1938
1939 bp->vn_weight_sum += vn_min_rate;
1940 }
1941
1942 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001943 if (all_zero) {
1944 bp->cmng.flags.cmng_enables &=
1945 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1946 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1947 " fairness will be disabled\n");
1948 } else
1949 bp->cmng.flags.cmng_enables |=
1950 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001951}
1952
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001953static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001954{
1955 struct rate_shaping_vars_per_vn m_rs_vn;
1956 struct fairness_vars_per_vn m_fair_vn;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001957 u32 vn_cfg = bp->mf_config[vn];
1958 int func = 2*vn + BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001959 u16 vn_min_rate, vn_max_rate;
1960 int i;
1961
1962 /* If function is hidden - set min and max to zeroes */
1963 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1964 vn_min_rate = 0;
1965 vn_max_rate = 0;
1966
1967 } else {
1968 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1969 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001970 /* If min rate is zero - set it to 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001971 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001972 vn_min_rate = DEF_MIN_RATE;
1973 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1974 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1975 }
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001976
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001977 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001978 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001979 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001980
1981 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1982 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1983
1984 /* global vn counter - maximal Mbps for this vn */
1985 m_rs_vn.vn_counter.rate = vn_max_rate;
1986
1987 /* quota - number of bytes transmitted in this period */
1988 m_rs_vn.vn_counter.quota =
1989 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1990
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001991 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001992 /* credit for each period of the fairness algorithm:
1993 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001994 vn_weight_sum should not be larger than 10000, thus
1995 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1996 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001997 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001998 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1999 (8 * bp->vn_weight_sum))),
2000 (bp->cmng.fair_vars.fair_threshold * 2));
2001 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002002 m_fair_vn.vn_credit_delta);
2003 }
2004
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002005 /* Store it to internal memory */
2006 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2007 REG_WR(bp, BAR_XSTRORM_INTMEM +
2008 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2009 ((u32 *)(&m_rs_vn))[i]);
2010
2011 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2012 REG_WR(bp, BAR_XSTRORM_INTMEM +
2013 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2014 ((u32 *)(&m_fair_vn))[i]);
2015}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002016
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002017static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2018{
2019 if (CHIP_REV_IS_SLOW(bp))
2020 return CMNG_FNS_NONE;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002021 if (IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002022 return CMNG_FNS_MINMAX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002023
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002024 return CMNG_FNS_NONE;
2025}
2026
2027static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2028{
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002029 int vn, n = (CHIP_MODE_IS_4_PORT(bp) ? 2 : 1);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002030
2031 if (BP_NOMCP(bp))
2032 return; /* what should be the default bvalue in this case */
2033
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002034 /* For 2 port configuration the absolute function number formula
2035 * is:
2036 * abs_func = 2 * vn + BP_PORT + BP_PATH
2037 *
2038 * and there are 4 functions per port
2039 *
2040 * For 4 port configuration it is
2041 * abs_func = 4 * vn + 2 * BP_PORT + BP_PATH
2042 *
2043 * and there are 2 functions per port
2044 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002045 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002046 int /*abs*/func = n * (2 * vn + BP_PORT(bp)) + BP_PATH(bp);
2047
2048 if (func >= E1H_FUNC_MAX)
2049 break;
2050
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002051 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002052 MF_CFG_RD(bp, func_mf_config[func].config);
2053 }
2054}
2055
2056static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2057{
2058
2059 if (cmng_type == CMNG_FNS_MINMAX) {
2060 int vn;
2061
2062 /* clear cmng_enables */
2063 bp->cmng.flags.cmng_enables = 0;
2064
2065 /* read mf conf from shmem */
2066 if (read_cfg)
2067 bnx2x_read_mf_cfg(bp);
2068
2069 /* Init rate shaping and fairness contexts */
2070 bnx2x_init_port_minmax(bp);
2071
2072 /* vn_weight_sum and enable fairness if not 0 */
2073 bnx2x_calc_vn_weight_sum(bp);
2074
2075 /* calculate and set min-max rate for each vn */
2076 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2077 bnx2x_init_vn_minmax(bp, vn);
2078
2079 /* always enable rate shaping and fairness */
2080 bp->cmng.flags.cmng_enables |=
2081 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2082 if (!bp->vn_weight_sum)
2083 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2084 " fairness will be disabled\n");
2085 return;
2086 }
2087
2088 /* rate shaping and fairness are disabled */
2089 DP(NETIF_MSG_IFUP,
2090 "rate shaping and fairness are disabled\n");
2091}
2092
2093static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2094{
2095 int port = BP_PORT(bp);
2096 int func;
2097 int vn;
2098
2099 /* Set the attention towards other drivers on the same port */
2100 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2101 if (vn == BP_E1HVN(bp))
2102 continue;
2103
2104 func = ((vn << 1) | port);
2105 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2106 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2107 }
2108}
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002109
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002110/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002111static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002112{
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002113 u32 prev_link_status = bp->link_vars.link_status;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002114 /* Make sure that we are synced with the current statistics */
2115 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2116
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002117 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002118
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002119 if (bp->link_vars.link_up) {
2120
Eilon Greenstein1c063282009-02-12 08:36:43 +00002121 /* dropless flow control */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002122 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002123 int port = BP_PORT(bp);
2124 u32 pause_enabled = 0;
2125
2126 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2127 pause_enabled = 1;
2128
2129 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002130 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002131 pause_enabled);
2132 }
2133
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002134 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2135 struct host_port_stats *pstats;
2136
2137 pstats = bnx2x_sp(bp, port_stats);
2138 /* reset old bmac stats */
2139 memset(&(pstats->mac_stx[0]), 0,
2140 sizeof(struct mac_stx));
2141 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002142 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002143 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2144 }
2145
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002146 /* indicate link status only if link status actually changed */
2147 if (prev_link_status != bp->link_vars.link_status)
2148 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002149
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002150 if (IS_MF(bp))
2151 bnx2x_link_sync_notify(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002152
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002153 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2154 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002155
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002156 if (cmng_fns != CMNG_FNS_NONE) {
2157 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2158 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2159 } else
2160 /* rate shaping and fairness are disabled */
2161 DP(NETIF_MSG_IFUP,
2162 "single function mode without fairness\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002163 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002164}
2165
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002166void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002167{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002168 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002169 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002170
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002171 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2172
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002173 if (bp->link_vars.link_up)
2174 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2175 else
2176 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2177
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002178 /* the link status update could be the result of a DCC event
2179 hence re-read the shmem mf configuration */
2180 bnx2x_read_mf_cfg(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002181
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002182 /* indicate link status */
2183 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002184}
2185
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002186static void bnx2x_pmf_update(struct bnx2x *bp)
2187{
2188 int port = BP_PORT(bp);
2189 u32 val;
2190
2191 bp->port.pmf = 1;
2192 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2193
2194 /* enable nig attention */
2195 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002196 if (bp->common.int_block == INT_BLOCK_HC) {
2197 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2198 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2199 } else if (CHIP_IS_E2(bp)) {
2200 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2201 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2202 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002203
2204 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002205}
2206
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002207/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002208
2209/* slow path */
2210
2211/*
2212 * General service functions
2213 */
2214
Eilon Greenstein2691d512009-08-12 08:22:08 +00002215/* send the MCP a request, block until there is a reply */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002216u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
Eilon Greenstein2691d512009-08-12 08:22:08 +00002217{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002218 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002219 u32 seq = ++bp->fw_seq;
2220 u32 rc = 0;
2221 u32 cnt = 1;
2222 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2223
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002224 mutex_lock(&bp->fw_mb_mutex);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002225 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2226 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2227
Eilon Greenstein2691d512009-08-12 08:22:08 +00002228 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2229
2230 do {
2231 /* let the FW do it's magic ... */
2232 msleep(delay);
2233
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002234 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002235
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002236 /* Give the FW up to 5 second (500*10ms) */
2237 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002238
2239 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2240 cnt*delay, rc, seq);
2241
2242 /* is this a reply to our command? */
2243 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2244 rc &= FW_MSG_CODE_MASK;
2245 else {
2246 /* FW BUG! */
2247 BNX2X_ERR("FW failed to respond!\n");
2248 bnx2x_fw_dump(bp);
2249 rc = 0;
2250 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002251 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002252
2253 return rc;
2254}
2255
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002256/* must be called under rtnl_lock */
stephen hemminger8d962862010-10-21 07:50:56 +00002257static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002258{
2259 u32 mask = (1 << cl_id);
2260
2261 /* initial seeting is BNX2X_ACCEPT_NONE */
2262 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2263 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2264 u8 unmatched_unicast = 0;
2265
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002266 if (filters & BNX2X_ACCEPT_UNMATCHED_UCAST)
2267 unmatched_unicast = 1;
2268
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002269 if (filters & BNX2X_PROMISCUOUS_MODE) {
2270 /* promiscious - accept all, drop none */
2271 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2272 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002273 if (IS_MF_SI(bp)) {
2274 /*
2275 * SI mode defines to accept in promiscuos mode
2276 * only unmatched packets
2277 */
2278 unmatched_unicast = 1;
2279 accp_all_ucast = 0;
2280 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002281 }
2282 if (filters & BNX2X_ACCEPT_UNICAST) {
2283 /* accept matched ucast */
2284 drop_all_ucast = 0;
2285 }
2286 if (filters & BNX2X_ACCEPT_MULTICAST) {
2287 /* accept matched mcast */
2288 drop_all_mcast = 0;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002289 if (IS_MF_SI(bp))
2290 /* since mcast addresses won't arrive with ovlan,
2291 * fw needs to accept all of them in
2292 * switch-independent mode */
2293 accp_all_mcast = 1;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002294 }
2295 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2296 /* accept all mcast */
2297 drop_all_ucast = 0;
2298 accp_all_ucast = 1;
2299 }
2300 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2301 /* accept all mcast */
2302 drop_all_mcast = 0;
2303 accp_all_mcast = 1;
2304 }
2305 if (filters & BNX2X_ACCEPT_BROADCAST) {
2306 /* accept (all) bcast */
2307 drop_all_bcast = 0;
2308 accp_all_bcast = 1;
2309 }
2310
2311 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2312 bp->mac_filters.ucast_drop_all | mask :
2313 bp->mac_filters.ucast_drop_all & ~mask;
2314
2315 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2316 bp->mac_filters.mcast_drop_all | mask :
2317 bp->mac_filters.mcast_drop_all & ~mask;
2318
2319 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2320 bp->mac_filters.bcast_drop_all | mask :
2321 bp->mac_filters.bcast_drop_all & ~mask;
2322
2323 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2324 bp->mac_filters.ucast_accept_all | mask :
2325 bp->mac_filters.ucast_accept_all & ~mask;
2326
2327 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2328 bp->mac_filters.mcast_accept_all | mask :
2329 bp->mac_filters.mcast_accept_all & ~mask;
2330
2331 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2332 bp->mac_filters.bcast_accept_all | mask :
2333 bp->mac_filters.bcast_accept_all & ~mask;
2334
2335 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2336 bp->mac_filters.unmatched_unicast | mask :
2337 bp->mac_filters.unmatched_unicast & ~mask;
2338}
2339
stephen hemminger8d962862010-10-21 07:50:56 +00002340static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002341{
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002342 struct tstorm_eth_function_common_config tcfg = {0};
2343 u16 rss_flgs;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002344
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002345 /* tpa */
2346 if (p->func_flgs & FUNC_FLG_TPA)
2347 tcfg.config_flags |=
2348 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002349
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002350 /* set rss flags */
2351 rss_flgs = (p->rss->mode <<
2352 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002353
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002354 if (p->rss->cap & RSS_IPV4_CAP)
2355 rss_flgs |= RSS_IPV4_CAP_MASK;
2356 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2357 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2358 if (p->rss->cap & RSS_IPV6_CAP)
2359 rss_flgs |= RSS_IPV6_CAP_MASK;
2360 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2361 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002362
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002363 tcfg.config_flags |= rss_flgs;
2364 tcfg.rss_result_mask = p->rss->result_mask;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002365
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002366 storm_memset_func_cfg(bp, &tcfg, p->func_id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002367
2368 /* Enable the function in the FW */
2369 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2370 storm_memset_func_en(bp, p->func_id, 1);
2371
2372 /* statistics */
2373 if (p->func_flgs & FUNC_FLG_STATS) {
2374 struct stats_indication_flags stats_flags = {0};
2375 stats_flags.collect_eth = 1;
2376
2377 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2378 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2379
2380 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2381 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2382
2383 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2384 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2385
2386 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2387 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2388 }
2389
2390 /* spq */
2391 if (p->func_flgs & FUNC_FLG_SPQ) {
2392 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2393 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2394 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2395 }
2396}
2397
2398static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2399 struct bnx2x_fastpath *fp)
2400{
2401 u16 flags = 0;
2402
2403 /* calculate queue flags */
2404 flags |= QUEUE_FLG_CACHE_ALIGN;
2405 flags |= QUEUE_FLG_HC;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002406 flags |= IS_MF_SD(bp) ? QUEUE_FLG_OV : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002407
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002408 flags |= QUEUE_FLG_VLAN;
2409 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002410
2411 if (!fp->disable_tpa)
2412 flags |= QUEUE_FLG_TPA;
2413
2414 flags |= QUEUE_FLG_STATS;
2415
2416 return flags;
2417}
2418
2419static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2420 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2421 struct bnx2x_rxq_init_params *rxq_init)
2422{
2423 u16 max_sge = 0;
2424 u16 sge_sz = 0;
2425 u16 tpa_agg_size = 0;
2426
2427 /* calculate queue flags */
2428 u16 flags = bnx2x_get_cl_flags(bp, fp);
2429
2430 if (!fp->disable_tpa) {
2431 pause->sge_th_hi = 250;
2432 pause->sge_th_lo = 150;
2433 tpa_agg_size = min_t(u32,
2434 (min_t(u32, 8, MAX_SKB_FRAGS) *
2435 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2436 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2437 SGE_PAGE_SHIFT;
2438 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2439 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2440 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2441 0xffff);
2442 }
2443
2444 /* pause - not for e1 */
2445 if (!CHIP_IS_E1(bp)) {
2446 pause->bd_th_hi = 350;
2447 pause->bd_th_lo = 250;
2448 pause->rcq_th_hi = 350;
2449 pause->rcq_th_lo = 250;
2450 pause->sge_th_hi = 0;
2451 pause->sge_th_lo = 0;
2452 pause->pri_map = 1;
2453 }
2454
2455 /* rxq setup */
2456 rxq_init->flags = flags;
2457 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2458 rxq_init->dscr_map = fp->rx_desc_mapping;
2459 rxq_init->sge_map = fp->rx_sge_mapping;
2460 rxq_init->rcq_map = fp->rx_comp_mapping;
2461 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2462 rxq_init->mtu = bp->dev->mtu;
2463 rxq_init->buf_sz = bp->rx_buf_size;
2464 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2465 rxq_init->cl_id = fp->cl_id;
2466 rxq_init->spcl_id = fp->cl_id;
2467 rxq_init->stat_id = fp->cl_id;
2468 rxq_init->tpa_agg_sz = tpa_agg_size;
2469 rxq_init->sge_buf_sz = sge_sz;
2470 rxq_init->max_sges_pkt = max_sge;
2471 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2472 rxq_init->fw_sb_id = fp->fw_sb_id;
2473
2474 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2475
2476 rxq_init->cid = HW_CID(bp, fp->cid);
2477
2478 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2479}
2480
2481static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2482 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2483{
2484 u16 flags = bnx2x_get_cl_flags(bp, fp);
2485
2486 txq_init->flags = flags;
2487 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2488 txq_init->dscr_map = fp->tx_desc_mapping;
2489 txq_init->stat_id = fp->cl_id;
2490 txq_init->cid = HW_CID(bp, fp->cid);
2491 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2492 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2493 txq_init->fw_sb_id = fp->fw_sb_id;
2494 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2495}
2496
stephen hemminger8d962862010-10-21 07:50:56 +00002497static void bnx2x_pf_init(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002498{
2499 struct bnx2x_func_init_params func_init = {0};
2500 struct bnx2x_rss_params rss = {0};
2501 struct event_ring_data eq_data = { {0} };
2502 u16 flags;
2503
2504 /* pf specific setups */
2505 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002506 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002507
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002508 if (CHIP_IS_E2(bp)) {
2509 /* reset IGU PF statistics: MSIX + ATTN */
2510 /* PF */
2511 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2512 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2513 (CHIP_MODE_IS_4_PORT(bp) ?
2514 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2515 /* ATTN */
2516 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2517 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2518 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2519 (CHIP_MODE_IS_4_PORT(bp) ?
2520 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2521 }
2522
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002523 /* function setup flags */
2524 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2525
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002526 if (CHIP_IS_E1x(bp))
2527 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2528 else
2529 flags |= FUNC_FLG_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002530
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002531 /* function setup */
2532
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002533 /**
2534 * Although RSS is meaningless when there is a single HW queue we
2535 * still need it enabled in order to have HW Rx hash generated.
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002536 */
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002537 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2538 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2539 rss.mode = bp->multi_mode;
2540 rss.result_mask = MULTI_MASK;
2541 func_init.rss = &rss;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002542
2543 func_init.func_flgs = flags;
2544 func_init.pf_id = BP_FUNC(bp);
2545 func_init.func_id = BP_FUNC(bp);
2546 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2547 func_init.spq_map = bp->spq_mapping;
2548 func_init.spq_prod = bp->spq_prod_idx;
2549
2550 bnx2x_func_init(bp, &func_init);
2551
2552 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2553
2554 /*
2555 Congestion management values depend on the link rate
2556 There is no active link so initial link rate is set to 10 Gbps.
2557 When the link comes up The congestion management values are
2558 re-calculated according to the actual link rate.
2559 */
2560 bp->link_vars.line_speed = SPEED_10000;
2561 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2562
2563 /* Only the PMF sets the HW */
2564 if (bp->port.pmf)
2565 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2566
2567 /* no rx until link is up */
2568 bp->rx_mode = BNX2X_RX_MODE_NONE;
2569 bnx2x_set_storm_rx_mode(bp);
2570
2571 /* init Event Queue */
2572 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2573 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2574 eq_data.producer = bp->eq_prod;
2575 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2576 eq_data.sb_id = DEF_SB_ID;
2577 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2578}
2579
2580
Eilon Greenstein2691d512009-08-12 08:22:08 +00002581static void bnx2x_e1h_disable(struct bnx2x *bp)
2582{
2583 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002584
2585 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002586
2587 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2588
Eilon Greenstein2691d512009-08-12 08:22:08 +00002589 netif_carrier_off(bp->dev);
2590}
2591
2592static void bnx2x_e1h_enable(struct bnx2x *bp)
2593{
2594 int port = BP_PORT(bp);
2595
2596 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2597
Eilon Greenstein2691d512009-08-12 08:22:08 +00002598 /* Tx queue should be only reenabled */
2599 netif_tx_wake_all_queues(bp->dev);
2600
Eilon Greenstein061bc702009-10-15 00:18:47 -07002601 /*
2602 * Should not call netif_carrier_on since it will be called if the link
2603 * is up when checking for link state
2604 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002605}
2606
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002607/* called due to MCP event (on pmf):
2608 * reread new bandwidth configuration
2609 * configure FW
2610 * notify others function about the change
2611 */
2612static inline void bnx2x_config_mf_bw(struct bnx2x *bp)
2613{
2614 if (bp->link_vars.link_up) {
2615 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2616 bnx2x_link_sync_notify(bp);
2617 }
2618 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2619}
2620
2621static inline void bnx2x_set_mf_bw(struct bnx2x *bp)
2622{
2623 bnx2x_config_mf_bw(bp);
2624 bnx2x_fw_command(bp, DRV_MSG_CODE_SET_MF_BW_ACK, 0);
2625}
2626
Eilon Greenstein2691d512009-08-12 08:22:08 +00002627static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2628{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002629 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002630
2631 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2632
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002633 /*
2634 * This is the only place besides the function initialization
2635 * where the bp->flags can change so it is done without any
2636 * locks
2637 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002638 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002639 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002640 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002641
2642 bnx2x_e1h_disable(bp);
2643 } else {
2644 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002645 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002646
2647 bnx2x_e1h_enable(bp);
2648 }
2649 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2650 }
2651 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08002652 bnx2x_config_mf_bw(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002653 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2654 }
2655
2656 /* Report results to MCP */
2657 if (dcc_event)
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002658 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002659 else
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002660 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002661}
2662
Michael Chan28912902009-10-10 13:46:53 +00002663/* must be called under the spq lock */
2664static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2665{
2666 struct eth_spe *next_spe = bp->spq_prod_bd;
2667
2668 if (bp->spq_prod_bd == bp->spq_last_bd) {
2669 bp->spq_prod_bd = bp->spq;
2670 bp->spq_prod_idx = 0;
2671 DP(NETIF_MSG_TIMER, "end of spq\n");
2672 } else {
2673 bp->spq_prod_bd++;
2674 bp->spq_prod_idx++;
2675 }
2676 return next_spe;
2677}
2678
2679/* must be called under the spq lock */
2680static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2681{
2682 int func = BP_FUNC(bp);
2683
2684 /* Make sure that BD data is updated before writing the producer */
2685 wmb();
2686
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002687 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002688 bp->spq_prod_idx);
Michael Chan28912902009-10-10 13:46:53 +00002689 mmiowb();
2690}
2691
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002692/* the slow path queue is odd since completions arrive on the fastpath ring */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002693int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002694 u32 data_hi, u32 data_lo, int common)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002695{
Michael Chan28912902009-10-10 13:46:53 +00002696 struct eth_spe *spe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002697 u16 type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002698
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002699#ifdef BNX2X_STOP_ON_ERROR
2700 if (unlikely(bp->panic))
2701 return -EIO;
2702#endif
2703
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002704 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002705
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002706 if (!atomic_read(&bp->spq_left)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002707 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002708 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002709 bnx2x_panic();
2710 return -EBUSY;
2711 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002712
Michael Chan28912902009-10-10 13:46:53 +00002713 spe = bnx2x_sp_get_next(bp);
2714
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002715 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002716 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002717 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2718 HW_CID(bp, cid));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002719
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002720 if (common)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002721 /* Common ramrods:
2722 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2723 * TRAFFIC_STOP, TRAFFIC_START
2724 */
2725 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2726 & SPE_HDR_CONN_TYPE;
2727 else
2728 /* ETH ramrods: SETUP, HALT */
2729 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2730 & SPE_HDR_CONN_TYPE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002731
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002732 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2733 SPE_HDR_FUNCTION_ID);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002734
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002735 spe->hdr.type = cpu_to_le16(type);
2736
2737 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2738 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2739
2740 /* stats ramrod has it's own slot on the spq */
2741 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2742 /* It's ok if the actual decrement is issued towards the memory
2743 * somewhere between the spin_lock and spin_unlock. Thus no
2744 * more explict memory barrier is needed.
2745 */
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002746 atomic_dec(&bp->spq_left);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002747
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002748 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002749 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2750 "type(0x%x) left %x\n",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002751 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2752 (u32)(U64_LO(bp->spq_mapping) +
2753 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002754 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002755
Michael Chan28912902009-10-10 13:46:53 +00002756 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002757 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002758 return 0;
2759}
2760
2761/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002762static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002763{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002764 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002765 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002766
2767 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002768 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002769 val = (1UL << 31);
2770 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2771 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2772 if (val & (1L << 31))
2773 break;
2774
2775 msleep(5);
2776 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002777 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002778 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002779 rc = -EBUSY;
2780 }
2781
2782 return rc;
2783}
2784
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002785/* release split MCP access lock register */
2786static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002787{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002788 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002789}
2790
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002791#define BNX2X_DEF_SB_ATT_IDX 0x0001
2792#define BNX2X_DEF_SB_IDX 0x0002
2793
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002794static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2795{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002796 struct host_sp_status_block *def_sb = bp->def_status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002797 u16 rc = 0;
2798
2799 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002800 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2801 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002802 rc |= BNX2X_DEF_SB_ATT_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002803 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002804
2805 if (bp->def_idx != def_sb->sp_sb.running_index) {
2806 bp->def_idx = def_sb->sp_sb.running_index;
2807 rc |= BNX2X_DEF_SB_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002808 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002809
2810 /* Do not reorder: indecies reading should complete before handling */
2811 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002812 return rc;
2813}
2814
2815/*
2816 * slow path service functions
2817 */
2818
2819static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2820{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002821 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002822 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2823 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002824 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2825 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002826 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002827 u32 nig_mask = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002828 u32 reg_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002829
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002830 if (bp->attn_state & asserted)
2831 BNX2X_ERR("IGU ERROR\n");
2832
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002833 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2834 aeu_mask = REG_RD(bp, aeu_addr);
2835
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002836 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002837 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002838 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002839 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002840
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002841 REG_WR(bp, aeu_addr, aeu_mask);
2842 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002843
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002844 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002845 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002846 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002847
2848 if (asserted & ATTN_HARD_WIRED_MASK) {
2849 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002850
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002851 bnx2x_acquire_phy_lock(bp);
2852
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002853 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002854 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002855 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002856
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002857 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002858
2859 /* handle unicore attn? */
2860 }
2861 if (asserted & ATTN_SW_TIMER_4_FUNC)
2862 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2863
2864 if (asserted & GPIO_2_FUNC)
2865 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2866
2867 if (asserted & GPIO_3_FUNC)
2868 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2869
2870 if (asserted & GPIO_4_FUNC)
2871 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2872
2873 if (port == 0) {
2874 if (asserted & ATTN_GENERAL_ATTN_1) {
2875 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2876 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2877 }
2878 if (asserted & ATTN_GENERAL_ATTN_2) {
2879 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2880 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2881 }
2882 if (asserted & ATTN_GENERAL_ATTN_3) {
2883 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2884 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2885 }
2886 } else {
2887 if (asserted & ATTN_GENERAL_ATTN_4) {
2888 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2889 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2890 }
2891 if (asserted & ATTN_GENERAL_ATTN_5) {
2892 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2893 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2894 }
2895 if (asserted & ATTN_GENERAL_ATTN_6) {
2896 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2897 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2898 }
2899 }
2900
2901 } /* if hardwired */
2902
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002903 if (bp->common.int_block == INT_BLOCK_HC)
2904 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2905 COMMAND_REG_ATTN_BITS_SET);
2906 else
2907 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2908
2909 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2910 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2911 REG_WR(bp, reg_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002912
2913 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002914 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002915 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002916 bnx2x_release_phy_lock(bp);
2917 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002918}
2919
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002920static inline void bnx2x_fan_failure(struct bnx2x *bp)
2921{
2922 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002923 u32 ext_phy_config;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002924 /* mark the failure */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002925 ext_phy_config =
2926 SHMEM_RD(bp,
2927 dev_info.port_hw_config[port].external_phy_config);
2928
2929 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2930 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002931 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002932 ext_phy_config);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002933
2934 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002935 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2936 " the driver to shutdown the card to prevent permanent"
2937 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002938}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002939
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002940static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2941{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002942 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002943 int reg_offset;
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002944 u32 val;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002945
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002946 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2947 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002948
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002949 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002950
2951 val = REG_RD(bp, reg_offset);
2952 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2953 REG_WR(bp, reg_offset, val);
2954
2955 BNX2X_ERR("SPIO5 hw attention\n");
2956
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002957 /* Fan failure attention */
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002958 bnx2x_hw_reset_phy(&bp->link_params);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002959 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002960 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002961
Eilon Greenstein589abe32009-02-12 08:36:55 +00002962 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2963 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2964 bnx2x_acquire_phy_lock(bp);
2965 bnx2x_handle_module_detect_int(&bp->link_params);
2966 bnx2x_release_phy_lock(bp);
2967 }
2968
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002969 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2970
2971 val = REG_RD(bp, reg_offset);
2972 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2973 REG_WR(bp, reg_offset, val);
2974
2975 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002976 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002977 bnx2x_panic();
2978 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002979}
2980
2981static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2982{
2983 u32 val;
2984
Eilon Greenstein0626b892009-02-12 08:38:14 +00002985 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002986
2987 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2988 BNX2X_ERR("DB hw attention 0x%x\n", val);
2989 /* DORQ discard attention */
2990 if (val & 0x2)
2991 BNX2X_ERR("FATAL error from DORQ\n");
2992 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002993
2994 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2995
2996 int port = BP_PORT(bp);
2997 int reg_offset;
2998
2999 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
3000 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
3001
3002 val = REG_RD(bp, reg_offset);
3003 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
3004 REG_WR(bp, reg_offset, val);
3005
3006 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003007 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003008 bnx2x_panic();
3009 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003010}
3011
3012static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
3013{
3014 u32 val;
3015
3016 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
3017
3018 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
3019 BNX2X_ERR("CFC hw attention 0x%x\n", val);
3020 /* CFC error attention */
3021 if (val & 0x2)
3022 BNX2X_ERR("FATAL error from CFC\n");
3023 }
3024
3025 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
3026
3027 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
3028 BNX2X_ERR("PXP hw attention 0x%x\n", val);
3029 /* RQ_USDMDP_FIFO_OVERFLOW */
3030 if (val & 0x18000)
3031 BNX2X_ERR("FATAL error from PXP\n");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003032 if (CHIP_IS_E2(bp)) {
3033 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
3034 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
3035 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003036 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003037
3038 if (attn & HW_INTERRUT_ASSERT_SET_2) {
3039
3040 int port = BP_PORT(bp);
3041 int reg_offset;
3042
3043 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
3044 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
3045
3046 val = REG_RD(bp, reg_offset);
3047 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3048 REG_WR(bp, reg_offset, val);
3049
3050 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003051 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003052 bnx2x_panic();
3053 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003054}
3055
3056static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3057{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003058 u32 val;
3059
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003060 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3061
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003062 if (attn & BNX2X_PMF_LINK_ASSERT) {
3063 int func = BP_FUNC(bp);
3064
3065 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003066 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3067 func_mf_config[BP_ABS_FUNC(bp)].config);
3068 val = SHMEM_RD(bp,
3069 func_mb[BP_FW_MB_IDX(bp)].drv_status);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003070 if (val & DRV_STATUS_DCC_EVENT_MASK)
3071 bnx2x_dcc_event(bp,
3072 (val & DRV_STATUS_DCC_EVENT_MASK));
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08003073
3074 if (val & DRV_STATUS_SET_MF_BW)
3075 bnx2x_set_mf_bw(bp);
3076
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003077 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003078 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003079 bnx2x_pmf_update(bp);
3080
3081 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003082
3083 BNX2X_ERR("MC assert!\n");
3084 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3085 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3086 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3087 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3088 bnx2x_panic();
3089
3090 } else if (attn & BNX2X_MCP_ASSERT) {
3091
3092 BNX2X_ERR("MCP assert!\n");
3093 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003094 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003095
3096 } else
3097 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3098 }
3099
3100 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003101 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3102 if (attn & BNX2X_GRC_TIMEOUT) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003103 val = CHIP_IS_E1(bp) ? 0 :
3104 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003105 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3106 }
3107 if (attn & BNX2X_GRC_RSV) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003108 val = CHIP_IS_E1(bp) ? 0 :
3109 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003110 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3111 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003112 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003113 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003114}
3115
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003116#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3117#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3118#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3119#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3120#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3121#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003122
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003123/*
3124 * should be run under rtnl lock
3125 */
3126static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3127{
3128 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3129 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3130 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3131 barrier();
3132 mmiowb();
3133}
3134
3135/*
3136 * should be run under rtnl lock
3137 */
3138static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3139{
3140 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3141 val |= (1 << 16);
3142 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3143 barrier();
3144 mmiowb();
3145}
3146
3147/*
3148 * should be run under rtnl lock
3149 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003150bool bnx2x_reset_is_done(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003151{
3152 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3153 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3154 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3155}
3156
3157/*
3158 * should be run under rtnl lock
3159 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003160inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003161{
3162 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3163
3164 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3165
3166 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3167 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3168 barrier();
3169 mmiowb();
3170}
3171
3172/*
3173 * should be run under rtnl lock
3174 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003175u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003176{
3177 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3178
3179 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3180
3181 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3182 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3183 barrier();
3184 mmiowb();
3185
3186 return val1;
3187}
3188
3189/*
3190 * should be run under rtnl lock
3191 */
3192static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3193{
3194 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3195}
3196
3197static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3198{
3199 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3200 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3201}
3202
3203static inline void _print_next_block(int idx, const char *blk)
3204{
3205 if (idx)
3206 pr_cont(", ");
3207 pr_cont("%s", blk);
3208}
3209
3210static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3211{
3212 int i = 0;
3213 u32 cur_bit = 0;
3214 for (i = 0; sig; i++) {
3215 cur_bit = ((u32)0x1 << i);
3216 if (sig & cur_bit) {
3217 switch (cur_bit) {
3218 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3219 _print_next_block(par_num++, "BRB");
3220 break;
3221 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3222 _print_next_block(par_num++, "PARSER");
3223 break;
3224 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3225 _print_next_block(par_num++, "TSDM");
3226 break;
3227 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3228 _print_next_block(par_num++, "SEARCHER");
3229 break;
3230 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3231 _print_next_block(par_num++, "TSEMI");
3232 break;
3233 }
3234
3235 /* Clear the bit */
3236 sig &= ~cur_bit;
3237 }
3238 }
3239
3240 return par_num;
3241}
3242
3243static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3244{
3245 int i = 0;
3246 u32 cur_bit = 0;
3247 for (i = 0; sig; i++) {
3248 cur_bit = ((u32)0x1 << i);
3249 if (sig & cur_bit) {
3250 switch (cur_bit) {
3251 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3252 _print_next_block(par_num++, "PBCLIENT");
3253 break;
3254 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3255 _print_next_block(par_num++, "QM");
3256 break;
3257 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3258 _print_next_block(par_num++, "XSDM");
3259 break;
3260 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3261 _print_next_block(par_num++, "XSEMI");
3262 break;
3263 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3264 _print_next_block(par_num++, "DOORBELLQ");
3265 break;
3266 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3267 _print_next_block(par_num++, "VAUX PCI CORE");
3268 break;
3269 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3270 _print_next_block(par_num++, "DEBUG");
3271 break;
3272 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3273 _print_next_block(par_num++, "USDM");
3274 break;
3275 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3276 _print_next_block(par_num++, "USEMI");
3277 break;
3278 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3279 _print_next_block(par_num++, "UPB");
3280 break;
3281 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3282 _print_next_block(par_num++, "CSDM");
3283 break;
3284 }
3285
3286 /* Clear the bit */
3287 sig &= ~cur_bit;
3288 }
3289 }
3290
3291 return par_num;
3292}
3293
3294static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3295{
3296 int i = 0;
3297 u32 cur_bit = 0;
3298 for (i = 0; sig; i++) {
3299 cur_bit = ((u32)0x1 << i);
3300 if (sig & cur_bit) {
3301 switch (cur_bit) {
3302 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3303 _print_next_block(par_num++, "CSEMI");
3304 break;
3305 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3306 _print_next_block(par_num++, "PXP");
3307 break;
3308 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3309 _print_next_block(par_num++,
3310 "PXPPCICLOCKCLIENT");
3311 break;
3312 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3313 _print_next_block(par_num++, "CFC");
3314 break;
3315 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3316 _print_next_block(par_num++, "CDU");
3317 break;
3318 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3319 _print_next_block(par_num++, "IGU");
3320 break;
3321 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3322 _print_next_block(par_num++, "MISC");
3323 break;
3324 }
3325
3326 /* Clear the bit */
3327 sig &= ~cur_bit;
3328 }
3329 }
3330
3331 return par_num;
3332}
3333
3334static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3335{
3336 int i = 0;
3337 u32 cur_bit = 0;
3338 for (i = 0; sig; i++) {
3339 cur_bit = ((u32)0x1 << i);
3340 if (sig & cur_bit) {
3341 switch (cur_bit) {
3342 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3343 _print_next_block(par_num++, "MCP ROM");
3344 break;
3345 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3346 _print_next_block(par_num++, "MCP UMP RX");
3347 break;
3348 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3349 _print_next_block(par_num++, "MCP UMP TX");
3350 break;
3351 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3352 _print_next_block(par_num++, "MCP SCPAD");
3353 break;
3354 }
3355
3356 /* Clear the bit */
3357 sig &= ~cur_bit;
3358 }
3359 }
3360
3361 return par_num;
3362}
3363
3364static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3365 u32 sig2, u32 sig3)
3366{
3367 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3368 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3369 int par_num = 0;
3370 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3371 "[0]:0x%08x [1]:0x%08x "
3372 "[2]:0x%08x [3]:0x%08x\n",
3373 sig0 & HW_PRTY_ASSERT_SET_0,
3374 sig1 & HW_PRTY_ASSERT_SET_1,
3375 sig2 & HW_PRTY_ASSERT_SET_2,
3376 sig3 & HW_PRTY_ASSERT_SET_3);
3377 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3378 bp->dev->name);
3379 par_num = bnx2x_print_blocks_with_parity0(
3380 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3381 par_num = bnx2x_print_blocks_with_parity1(
3382 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3383 par_num = bnx2x_print_blocks_with_parity2(
3384 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3385 par_num = bnx2x_print_blocks_with_parity3(
3386 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3387 printk("\n");
3388 return true;
3389 } else
3390 return false;
3391}
3392
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003393bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003394{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003395 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003396 int port = BP_PORT(bp);
3397
3398 attn.sig[0] = REG_RD(bp,
3399 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3400 port*4);
3401 attn.sig[1] = REG_RD(bp,
3402 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3403 port*4);
3404 attn.sig[2] = REG_RD(bp,
3405 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3406 port*4);
3407 attn.sig[3] = REG_RD(bp,
3408 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3409 port*4);
3410
3411 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3412 attn.sig[3]);
3413}
3414
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003415
3416static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3417{
3418 u32 val;
3419 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3420
3421 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3422 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3423 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3424 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3425 "ADDRESS_ERROR\n");
3426 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3427 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3428 "INCORRECT_RCV_BEHAVIOR\n");
3429 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3430 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3431 "WAS_ERROR_ATTN\n");
3432 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3433 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3434 "VF_LENGTH_VIOLATION_ATTN\n");
3435 if (val &
3436 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3437 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3438 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3439 if (val &
3440 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3441 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3442 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3443 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3444 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3445 "TCPL_ERROR_ATTN\n");
3446 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3447 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3448 "TCPL_IN_TWO_RCBS_ATTN\n");
3449 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3450 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3451 "CSSNOOP_FIFO_OVERFLOW\n");
3452 }
3453 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3454 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3455 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3456 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3457 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3458 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3459 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3460 "_ATC_TCPL_TO_NOT_PEND\n");
3461 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3462 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3463 "ATC_GPA_MULTIPLE_HITS\n");
3464 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3465 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3466 "ATC_RCPL_TO_EMPTY_CNT\n");
3467 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3468 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3469 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3470 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3471 "ATC_IREQ_LESS_THAN_STU\n");
3472 }
3473
3474 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3475 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3476 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3477 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3478 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3479 }
3480
3481}
3482
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003483static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3484{
3485 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003486 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003487 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003488 u32 reg_addr;
3489 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003490 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003491
3492 /* need to take HW lock because MCP or other port might also
3493 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003494 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003495
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003496 if (bnx2x_chk_parity_attn(bp)) {
3497 bp->recovery_state = BNX2X_RECOVERY_INIT;
3498 bnx2x_set_reset_in_progress(bp);
3499 schedule_delayed_work(&bp->reset_task, 0);
3500 /* Disable HW interrupts */
3501 bnx2x_int_disable(bp);
3502 bnx2x_release_alr(bp);
3503 /* In case of parity errors don't handle attentions so that
3504 * other function would "see" parity errors.
3505 */
3506 return;
3507 }
3508
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003509 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3510 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3511 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3512 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003513 if (CHIP_IS_E2(bp))
3514 attn.sig[4] =
3515 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3516 else
3517 attn.sig[4] = 0;
3518
3519 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3520 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003521
3522 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3523 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003524 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003525
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003526 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3527 "%08x %08x %08x\n",
3528 index,
3529 group_mask->sig[0], group_mask->sig[1],
3530 group_mask->sig[2], group_mask->sig[3],
3531 group_mask->sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003532
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003533 bnx2x_attn_int_deasserted4(bp,
3534 attn.sig[4] & group_mask->sig[4]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003535 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003536 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003537 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003538 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003539 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003540 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003541 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003542 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003543 }
3544 }
3545
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003546 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003547
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003548 if (bp->common.int_block == INT_BLOCK_HC)
3549 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3550 COMMAND_REG_ATTN_BITS_CLR);
3551 else
3552 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003553
3554 val = ~deasserted;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003555 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3556 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003557 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003558
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003559 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003560 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003561
3562 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3563 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3564
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003565 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3566 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003567
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003568 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3569 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003570 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003571 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3572
3573 REG_WR(bp, reg_addr, aeu_mask);
3574 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003575
3576 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3577 bp->attn_state &= ~deasserted;
3578 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3579}
3580
3581static void bnx2x_attn_int(struct bnx2x *bp)
3582{
3583 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003584 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3585 attn_bits);
3586 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3587 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003588 u32 attn_state = bp->attn_state;
3589
3590 /* look for changed bits */
3591 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3592 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3593
3594 DP(NETIF_MSG_HW,
3595 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3596 attn_bits, attn_ack, asserted, deasserted);
3597
3598 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003599 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003600
3601 /* handle bits that were raised */
3602 if (asserted)
3603 bnx2x_attn_int_asserted(bp, asserted);
3604
3605 if (deasserted)
3606 bnx2x_attn_int_deasserted(bp, deasserted);
3607}
3608
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003609static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3610{
3611 /* No memory barriers */
3612 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3613 mmiowb(); /* keep prod updates ordered */
3614}
3615
3616#ifdef BCM_CNIC
3617static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3618 union event_ring_elem *elem)
3619{
3620 if (!bp->cnic_eth_dev.starting_cid ||
3621 cid < bp->cnic_eth_dev.starting_cid)
3622 return 1;
3623
3624 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3625
3626 if (unlikely(elem->message.data.cfc_del_event.error)) {
3627 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3628 cid);
3629 bnx2x_panic_dump(bp);
3630 }
3631 bnx2x_cnic_cfc_comp(bp, cid);
3632 return 0;
3633}
3634#endif
3635
3636static void bnx2x_eq_int(struct bnx2x *bp)
3637{
3638 u16 hw_cons, sw_cons, sw_prod;
3639 union event_ring_elem *elem;
3640 u32 cid;
3641 u8 opcode;
3642 int spqe_cnt = 0;
3643
3644 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3645
3646 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3647 * when we get the the next-page we nned to adjust so the loop
3648 * condition below will be met. The next element is the size of a
3649 * regular element and hence incrementing by 1
3650 */
3651 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3652 hw_cons++;
3653
3654 /* This function may never run in parralel with itself for a
3655 * specific bp, thus there is no need in "paired" read memory
3656 * barrier here.
3657 */
3658 sw_cons = bp->eq_cons;
3659 sw_prod = bp->eq_prod;
3660
3661 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003662 hw_cons, sw_cons, atomic_read(&bp->spq_left));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003663
3664 for (; sw_cons != hw_cons;
3665 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3666
3667
3668 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3669
3670 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3671 opcode = elem->message.opcode;
3672
3673
3674 /* handle eq element */
3675 switch (opcode) {
3676 case EVENT_RING_OPCODE_STAT_QUERY:
3677 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3678 /* nothing to do with stats comp */
3679 continue;
3680
3681 case EVENT_RING_OPCODE_CFC_DEL:
3682 /* handle according to cid range */
3683 /*
3684 * we may want to verify here that the bp state is
3685 * HALTING
3686 */
3687 DP(NETIF_MSG_IFDOWN,
3688 "got delete ramrod for MULTI[%d]\n", cid);
3689#ifdef BCM_CNIC
3690 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3691 goto next_spqe;
3692#endif
3693 bnx2x_fp(bp, cid, state) =
3694 BNX2X_FP_STATE_CLOSED;
3695
3696 goto next_spqe;
3697 }
3698
3699 switch (opcode | bp->state) {
3700 case (EVENT_RING_OPCODE_FUNCTION_START |
3701 BNX2X_STATE_OPENING_WAIT4_PORT):
3702 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3703 bp->state = BNX2X_STATE_FUNC_STARTED;
3704 break;
3705
3706 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3707 BNX2X_STATE_CLOSING_WAIT4_HALT):
3708 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3709 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3710 break;
3711
3712 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3713 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3714 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3715 bp->set_mac_pending = 0;
3716 break;
3717
3718 case (EVENT_RING_OPCODE_SET_MAC |
3719 BNX2X_STATE_CLOSING_WAIT4_HALT):
3720 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3721 bp->set_mac_pending = 0;
3722 break;
3723 default:
3724 /* unknown event log error and continue */
3725 BNX2X_ERR("Unknown EQ event %d\n",
3726 elem->message.opcode);
3727 }
3728next_spqe:
3729 spqe_cnt++;
3730 } /* for */
3731
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003732 smp_mb__before_atomic_inc();
3733 atomic_add(spqe_cnt, &bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003734
3735 bp->eq_cons = sw_cons;
3736 bp->eq_prod = sw_prod;
3737 /* Make sure that above mem writes were issued towards the memory */
3738 smp_wmb();
3739
3740 /* update producer */
3741 bnx2x_update_eq_prod(bp, bp->eq_prod);
3742}
3743
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003744static void bnx2x_sp_task(struct work_struct *work)
3745{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003746 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003747 u16 status;
3748
3749 /* Return here if interrupt is disabled */
3750 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003751 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003752 return;
3753 }
3754
3755 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003756/* if (status == 0) */
3757/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003758
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003759 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003760
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003761 /* HW attentions */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003762 if (status & BNX2X_DEF_SB_ATT_IDX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003763 bnx2x_attn_int(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003764 status &= ~BNX2X_DEF_SB_ATT_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003765 }
3766
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003767 /* SP events: STAT_QUERY and others */
3768 if (status & BNX2X_DEF_SB_IDX) {
3769
3770 /* Handle EQ completions */
3771 bnx2x_eq_int(bp);
3772
3773 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3774 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3775
3776 status &= ~BNX2X_DEF_SB_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003777 }
3778
3779 if (unlikely(status))
3780 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3781 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003782
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003783 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3784 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003785}
3786
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003787irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003788{
3789 struct net_device *dev = dev_instance;
3790 struct bnx2x *bp = netdev_priv(dev);
3791
3792 /* Return here if interrupt is disabled */
3793 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003794 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003795 return IRQ_HANDLED;
3796 }
3797
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003798 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3799 IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003800
3801#ifdef BNX2X_STOP_ON_ERROR
3802 if (unlikely(bp->panic))
3803 return IRQ_HANDLED;
3804#endif
3805
Michael Chan993ac7b2009-10-10 13:46:56 +00003806#ifdef BCM_CNIC
3807 {
3808 struct cnic_ops *c_ops;
3809
3810 rcu_read_lock();
3811 c_ops = rcu_dereference(bp->cnic_ops);
3812 if (c_ops)
3813 c_ops->cnic_handler(bp->cnic_data, NULL);
3814 rcu_read_unlock();
3815 }
3816#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003817 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003818
3819 return IRQ_HANDLED;
3820}
3821
3822/* end of slow path */
3823
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003824static void bnx2x_timer(unsigned long data)
3825{
3826 struct bnx2x *bp = (struct bnx2x *) data;
3827
3828 if (!netif_running(bp->dev))
3829 return;
3830
3831 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08003832 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003833
3834 if (poll) {
3835 struct bnx2x_fastpath *fp = &bp->fp[0];
3836 int rc;
3837
Eilon Greenstein7961f792009-03-02 07:59:31 +00003838 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003839 rc = bnx2x_rx_int(fp, 1000);
3840 }
3841
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003842 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003843 int mb_idx = BP_FW_MB_IDX(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003844 u32 drv_pulse;
3845 u32 mcp_pulse;
3846
3847 ++bp->fw_drv_pulse_wr_seq;
3848 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3849 /* TBD - add SYSTEM_TIME */
3850 drv_pulse = bp->fw_drv_pulse_wr_seq;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003851 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003852
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003853 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003854 MCP_PULSE_SEQ_MASK);
3855 /* The delta between driver pulse and mcp response
3856 * should be 1 (before mcp response) or 0 (after mcp response)
3857 */
3858 if ((drv_pulse != mcp_pulse) &&
3859 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3860 /* someone lost a heartbeat... */
3861 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3862 drv_pulse, mcp_pulse);
3863 }
3864 }
3865
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07003866 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003867 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003868
Eliezer Tamirf1410642008-02-28 11:51:50 -08003869timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003870 mod_timer(&bp->timer, jiffies + bp->current_interval);
3871}
3872
3873/* end of Statistics */
3874
3875/* nic init */
3876
3877/*
3878 * nic init service functions
3879 */
3880
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003881static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003882{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003883 u32 i;
3884 if (!(len%4) && !(addr%4))
3885 for (i = 0; i < len; i += 4)
3886 REG_WR(bp, addr + i, fill);
3887 else
3888 for (i = 0; i < len; i++)
3889 REG_WR8(bp, addr + i, fill);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003890
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003891}
3892
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003893/* helper: writes FP SP data to FW - data_size in dwords */
3894static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3895 int fw_sb_id,
3896 u32 *sb_data_p,
3897 u32 data_size)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003898{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003899 int index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003900 for (index = 0; index < data_size; index++)
3901 REG_WR(bp, BAR_CSTRORM_INTMEM +
3902 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3903 sizeof(u32)*index,
3904 *(sb_data_p + index));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003905}
3906
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003907static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3908{
3909 u32 *sb_data_p;
3910 u32 data_size = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003911 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003912 struct hc_status_block_data_e1x sb_data_e1x;
3913
3914 /* disable the function first */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003915 if (CHIP_IS_E2(bp)) {
3916 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3917 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3918 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3919 sb_data_e2.common.p_func.vf_valid = false;
3920 sb_data_p = (u32 *)&sb_data_e2;
3921 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3922 } else {
3923 memset(&sb_data_e1x, 0,
3924 sizeof(struct hc_status_block_data_e1x));
3925 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3926 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3927 sb_data_e1x.common.p_func.vf_valid = false;
3928 sb_data_p = (u32 *)&sb_data_e1x;
3929 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3930 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003931 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3932
3933 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3934 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3935 CSTORM_STATUS_BLOCK_SIZE);
3936 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3937 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3938 CSTORM_SYNC_BLOCK_SIZE);
3939}
3940
3941/* helper: writes SP SB data to FW */
3942static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3943 struct hc_sp_status_block_data *sp_sb_data)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003944{
3945 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003946 int i;
3947 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3948 REG_WR(bp, BAR_CSTRORM_INTMEM +
3949 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3950 i*sizeof(u32),
3951 *((u32 *)sp_sb_data + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003952}
3953
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003954static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3955{
3956 int func = BP_FUNC(bp);
3957 struct hc_sp_status_block_data sp_sb_data;
3958 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3959
3960 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3961 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3962 sp_sb_data.p_func.vf_valid = false;
3963
3964 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3965
3966 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3967 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3968 CSTORM_SP_STATUS_BLOCK_SIZE);
3969 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3970 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3971 CSTORM_SP_SYNC_BLOCK_SIZE);
3972
3973}
3974
3975
3976static inline
3977void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3978 int igu_sb_id, int igu_seg_id)
3979{
3980 hc_sm->igu_sb_id = igu_sb_id;
3981 hc_sm->igu_seg_id = igu_seg_id;
3982 hc_sm->timer_value = 0xFF;
3983 hc_sm->time_to_expire = 0xFFFFFFFF;
3984}
3985
stephen hemminger8d962862010-10-21 07:50:56 +00003986static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003987 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3988{
3989 int igu_seg_id;
3990
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003991 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003992 struct hc_status_block_data_e1x sb_data_e1x;
3993 struct hc_status_block_sm *hc_sm_p;
3994 struct hc_index_data *hc_index_p;
3995 int data_size;
3996 u32 *sb_data_p;
3997
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003998 if (CHIP_INT_MODE_IS_BC(bp))
3999 igu_seg_id = HC_SEG_ACCESS_NORM;
4000 else
4001 igu_seg_id = IGU_SEG_ACCESS_NORM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004002
4003 bnx2x_zero_fp_sb(bp, fw_sb_id);
4004
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004005 if (CHIP_IS_E2(bp)) {
4006 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
4007 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
4008 sb_data_e2.common.p_func.vf_id = vfid;
4009 sb_data_e2.common.p_func.vf_valid = vf_valid;
4010 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
4011 sb_data_e2.common.same_igu_sb_1b = true;
4012 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
4013 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
4014 hc_sm_p = sb_data_e2.common.state_machine;
4015 hc_index_p = sb_data_e2.index_data;
4016 sb_data_p = (u32 *)&sb_data_e2;
4017 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
4018 } else {
4019 memset(&sb_data_e1x, 0,
4020 sizeof(struct hc_status_block_data_e1x));
4021 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
4022 sb_data_e1x.common.p_func.vf_id = 0xff;
4023 sb_data_e1x.common.p_func.vf_valid = false;
4024 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
4025 sb_data_e1x.common.same_igu_sb_1b = true;
4026 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
4027 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
4028 hc_sm_p = sb_data_e1x.common.state_machine;
4029 hc_index_p = sb_data_e1x.index_data;
4030 sb_data_p = (u32 *)&sb_data_e1x;
4031 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
4032 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004033
4034 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
4035 igu_sb_id, igu_seg_id);
4036 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
4037 igu_sb_id, igu_seg_id);
4038
4039 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
4040
4041 /* write indecies to HW */
4042 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
4043}
4044
4045static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
4046 u8 sb_index, u8 disable, u16 usec)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004047{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004048 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004049 u8 ticks = usec / BNX2X_BTR;
4050
4051 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4052
4053 disable = disable ? 1 : (usec ? 0 : 1);
4054 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4055}
4056
4057static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4058 u16 tx_usec, u16 rx_usec)
4059{
4060 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4061 false, rx_usec);
4062 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4063 false, tx_usec);
4064}
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004065
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004066static void bnx2x_init_def_sb(struct bnx2x *bp)
4067{
4068 struct host_sp_status_block *def_sb = bp->def_status_blk;
4069 dma_addr_t mapping = bp->def_status_blk_mapping;
4070 int igu_sp_sb_index;
4071 int igu_seg_id;
4072 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004073 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004074 int reg_offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004075 u64 section;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004076 int index;
4077 struct hc_sp_status_block_data sp_sb_data;
4078 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4079
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004080 if (CHIP_INT_MODE_IS_BC(bp)) {
4081 igu_sp_sb_index = DEF_SB_IGU_ID;
4082 igu_seg_id = HC_SEG_ACCESS_DEF;
4083 } else {
4084 igu_sp_sb_index = bp->igu_dsb_id;
4085 igu_seg_id = IGU_SEG_ACCESS_DEF;
4086 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004087
4088 /* ATTN */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004089 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004090 atten_status_block);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004091 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004092
Eliezer Tamir49d66772008-02-28 11:53:13 -08004093 bp->attn_state = 0;
4094
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004095 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4096 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004097 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004098 int sindex;
4099 /* take care of sig[0]..sig[4] */
4100 for (sindex = 0; sindex < 4; sindex++)
4101 bp->attn_group[index].sig[sindex] =
4102 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004103
4104 if (CHIP_IS_E2(bp))
4105 /*
4106 * enable5 is separate from the rest of the registers,
4107 * and therefore the address skip is 4
4108 * and not 16 between the different groups
4109 */
4110 bp->attn_group[index].sig[4] = REG_RD(bp,
4111 reg_offset + 0x10 + 0x4*index);
4112 else
4113 bp->attn_group[index].sig[4] = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004114 }
4115
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004116 if (bp->common.int_block == INT_BLOCK_HC) {
4117 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4118 HC_REG_ATTN_MSG0_ADDR_L);
4119
4120 REG_WR(bp, reg_offset, U64_LO(section));
4121 REG_WR(bp, reg_offset + 4, U64_HI(section));
4122 } else if (CHIP_IS_E2(bp)) {
4123 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4124 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4125 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004126
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004127 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4128 sp_sb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004129
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004130 bnx2x_zero_sp_sb(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004131
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004132 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4133 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4134 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4135 sp_sb_data.igu_seg_id = igu_seg_id;
4136 sp_sb_data.p_func.pf_id = func;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004137 sp_sb_data.p_func.vnic_id = BP_VN(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004138 sp_sb_data.p_func.vf_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004139
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004140 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004141
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004142 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004143 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004144
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004145 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004146}
4147
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004148void bnx2x_update_coalesce(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004149{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004150 int i;
4151
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004152 for_each_queue(bp, i)
4153 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4154 bp->rx_ticks, bp->tx_ticks);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004155}
4156
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004157static void bnx2x_init_sp_ring(struct bnx2x *bp)
4158{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004159 spin_lock_init(&bp->spq_lock);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00004160 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004161
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004162 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004163 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4164 bp->spq_prod_bd = bp->spq;
4165 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004166}
4167
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004168static void bnx2x_init_eq_ring(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004169{
4170 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004171 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4172 union event_ring_elem *elem =
4173 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004174
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004175 elem->next_page.addr.hi =
4176 cpu_to_le32(U64_HI(bp->eq_mapping +
4177 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4178 elem->next_page.addr.lo =
4179 cpu_to_le32(U64_LO(bp->eq_mapping +
4180 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004181 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004182 bp->eq_cons = 0;
4183 bp->eq_prod = NUM_EQ_DESC;
4184 bp->eq_cons_sb = BNX2X_EQ_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004185}
4186
4187static void bnx2x_init_ind_table(struct bnx2x *bp)
4188{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004189 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004190 int i;
4191
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004192 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004193 return;
4194
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004195 DP(NETIF_MSG_IFUP,
4196 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004197 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004198 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004199 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004200 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004201}
4202
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004203void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004204{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004205 int mode = bp->rx_mode;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004206 u16 cl_id;
4207
Eilon Greenstein581ce432009-07-29 00:20:04 +00004208 /* All but management unicast packets should pass to the host as well */
4209 u32 llh_mask =
4210 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4211 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4212 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4213 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004214
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004215 switch (mode) {
4216 case BNX2X_RX_MODE_NONE: /* no Rx */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004217 cl_id = BP_L_ID(bp);
4218 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004219 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004220
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004221 case BNX2X_RX_MODE_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004222 cl_id = BP_L_ID(bp);
4223 bnx2x_rxq_set_mac_filters(bp, cl_id,
4224 BNX2X_ACCEPT_UNICAST |
4225 BNX2X_ACCEPT_BROADCAST |
4226 BNX2X_ACCEPT_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004227 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004228
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004229 case BNX2X_RX_MODE_ALLMULTI:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004230 cl_id = BP_L_ID(bp);
4231 bnx2x_rxq_set_mac_filters(bp, cl_id,
4232 BNX2X_ACCEPT_UNICAST |
4233 BNX2X_ACCEPT_BROADCAST |
4234 BNX2X_ACCEPT_ALL_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004235 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004236
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004237 case BNX2X_RX_MODE_PROMISC:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004238 cl_id = BP_L_ID(bp);
4239 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4240
Eilon Greenstein581ce432009-07-29 00:20:04 +00004241 /* pass management unicast packets as well */
4242 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004243 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004244
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004245 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004246 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4247 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004248 }
4249
Eilon Greenstein581ce432009-07-29 00:20:04 +00004250 REG_WR(bp,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004251 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4252 NIG_REG_LLH0_BRB1_DRV_MASK,
Eilon Greenstein581ce432009-07-29 00:20:04 +00004253 llh_mask);
4254
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004255 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4256 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4257 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4258 bp->mac_filters.ucast_drop_all,
4259 bp->mac_filters.mcast_drop_all,
4260 bp->mac_filters.bcast_drop_all,
4261 bp->mac_filters.ucast_accept_all,
4262 bp->mac_filters.mcast_accept_all,
4263 bp->mac_filters.bcast_accept_all
4264 );
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004265
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004266 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004267}
4268
Eilon Greenstein471de712008-08-13 15:49:35 -07004269static void bnx2x_init_internal_common(struct bnx2x *bp)
4270{
4271 int i;
4272
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004273 if (!CHIP_IS_E1(bp)) {
4274
4275 /* xstorm needs to know whether to add ovlan to packets or not,
4276 * in switch-independent we'll write 0 to here... */
4277 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004278 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004279 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004280 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004281 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004282 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004283 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004284 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004285 }
4286
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08004287 if (IS_MF_SI(bp))
4288 /*
4289 * In switch independent mode, the TSTORM needs to accept
4290 * packets that failed classification, since approximate match
4291 * mac addresses aren't written to NIG LLH
4292 */
4293 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4294 TSTORM_ACCEPT_CLASSIFY_FAILED_OFFSET, 2);
4295
Eilon Greenstein471de712008-08-13 15:49:35 -07004296 /* Zero this manually as its initialization is
4297 currently missing in the initTool */
4298 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4299 REG_WR(bp, BAR_USTRORM_INTMEM +
4300 USTORM_AGG_DATA_OFFSET + i * 4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004301 if (CHIP_IS_E2(bp)) {
4302 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4303 CHIP_INT_MODE_IS_BC(bp) ?
4304 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4305 }
Eilon Greenstein471de712008-08-13 15:49:35 -07004306}
4307
4308static void bnx2x_init_internal_port(struct bnx2x *bp)
4309{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004310 /* port */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004311}
4312
Eilon Greenstein471de712008-08-13 15:49:35 -07004313static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4314{
4315 switch (load_code) {
4316 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004317 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Eilon Greenstein471de712008-08-13 15:49:35 -07004318 bnx2x_init_internal_common(bp);
4319 /* no break */
4320
4321 case FW_MSG_CODE_DRV_LOAD_PORT:
4322 bnx2x_init_internal_port(bp);
4323 /* no break */
4324
4325 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004326 /* internal memory per function is
4327 initialized inside bnx2x_pf_init */
Eilon Greenstein471de712008-08-13 15:49:35 -07004328 break;
4329
4330 default:
4331 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4332 break;
4333 }
4334}
4335
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004336static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4337{
4338 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4339
4340 fp->state = BNX2X_FP_STATE_CLOSED;
4341
4342 fp->index = fp->cid = fp_idx;
4343 fp->cl_id = BP_L_ID(bp) + fp_idx;
4344 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4345 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4346 /* qZone id equals to FW (per path) client id */
4347 fp->cl_qzone_id = fp->cl_id +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004348 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4349 ETH_MAX_RX_CLIENTS_E1H);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004350 /* init shortcut */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004351 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4352 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004353 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4354 /* Setup SB indicies */
4355 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4356 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4357
4358 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4359 "cl_id %d fw_sb %d igu_sb %d\n",
4360 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4361 fp->igu_sb_id);
4362 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4363 fp->fw_sb_id, fp->igu_sb_id);
4364
4365 bnx2x_update_fpsb_idx(fp);
4366}
4367
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004368void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004369{
4370 int i;
4371
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004372 for_each_queue(bp, i)
4373 bnx2x_init_fp_sb(bp, i);
Michael Chan37b091b2009-10-10 13:46:55 +00004374#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004375
4376 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4377 BNX2X_VF_ID_INVALID, false,
4378 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4379
Michael Chan37b091b2009-10-10 13:46:55 +00004380#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004381
Eilon Greenstein16119782009-03-02 07:59:27 +00004382 /* ensure status block indices were read */
4383 rmb();
4384
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004385 bnx2x_init_def_sb(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07004386 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004387 bnx2x_init_rx_rings(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004388 bnx2x_init_tx_rings(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004389 bnx2x_init_sp_ring(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004390 bnx2x_init_eq_ring(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07004391 bnx2x_init_internal(bp, load_code);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004392 bnx2x_pf_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004393 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08004394 bnx2x_stats_init(bp);
4395
4396 /* At this point, we are ready for interrupts */
4397 atomic_set(&bp->intr_sem, 0);
4398
4399 /* flush all before enabling interrupts */
4400 mb();
4401 mmiowb();
4402
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08004403 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00004404
4405 /* Check for SPIO5 */
4406 bnx2x_attn_int_deasserted0(bp,
4407 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4408 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004409}
4410
4411/* end of nic init */
4412
4413/*
4414 * gzip service functions
4415 */
4416
4417static int bnx2x_gunzip_init(struct bnx2x *bp)
4418{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004419 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4420 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004421 if (bp->gunzip_buf == NULL)
4422 goto gunzip_nomem1;
4423
4424 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4425 if (bp->strm == NULL)
4426 goto gunzip_nomem2;
4427
4428 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4429 GFP_KERNEL);
4430 if (bp->strm->workspace == NULL)
4431 goto gunzip_nomem3;
4432
4433 return 0;
4434
4435gunzip_nomem3:
4436 kfree(bp->strm);
4437 bp->strm = NULL;
4438
4439gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004440 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4441 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004442 bp->gunzip_buf = NULL;
4443
4444gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004445 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4446 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004447 return -ENOMEM;
4448}
4449
4450static void bnx2x_gunzip_end(struct bnx2x *bp)
4451{
4452 kfree(bp->strm->workspace);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004453 kfree(bp->strm);
4454 bp->strm = NULL;
4455
4456 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004457 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4458 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004459 bp->gunzip_buf = NULL;
4460 }
4461}
4462
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004463static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004464{
4465 int n, rc;
4466
4467 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004468 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4469 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004470 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004471 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004472
4473 n = 10;
4474
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004475#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004476
4477 if (zbuf[3] & FNAME)
4478 while ((zbuf[n++] != 0) && (n < len));
4479
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004480 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004481 bp->strm->avail_in = len - n;
4482 bp->strm->next_out = bp->gunzip_buf;
4483 bp->strm->avail_out = FW_BUF_SIZE;
4484
4485 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4486 if (rc != Z_OK)
4487 return rc;
4488
4489 rc = zlib_inflate(bp->strm, Z_FINISH);
4490 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00004491 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4492 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004493
4494 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4495 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004496 netdev_err(bp->dev, "Firmware decompression error:"
4497 " gunzip_outlen (%d) not aligned\n",
4498 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004499 bp->gunzip_outlen >>= 2;
4500
4501 zlib_inflateEnd(bp->strm);
4502
4503 if (rc == Z_STREAM_END)
4504 return 0;
4505
4506 return rc;
4507}
4508
4509/* nic load/unload */
4510
4511/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004512 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004513 */
4514
4515/* send a NIG loopback debug packet */
4516static void bnx2x_lb_pckt(struct bnx2x *bp)
4517{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004518 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004519
4520 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004521 wb_write[0] = 0x55555555;
4522 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004523 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004524 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004525
4526 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004527 wb_write[0] = 0x09000000;
4528 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004529 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004530 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004531}
4532
4533/* some of the internal memories
4534 * are not directly readable from the driver
4535 * to test them we send debug packets
4536 */
4537static int bnx2x_int_mem_test(struct bnx2x *bp)
4538{
4539 int factor;
4540 int count, i;
4541 u32 val = 0;
4542
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004543 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004544 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004545 else if (CHIP_REV_IS_EMUL(bp))
4546 factor = 200;
4547 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004548 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004549
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004550 /* Disable inputs of parser neighbor blocks */
4551 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4552 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4553 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004554 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004555
4556 /* Write 0 to parser credits for CFC search request */
4557 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4558
4559 /* send Ethernet packet */
4560 bnx2x_lb_pckt(bp);
4561
4562 /* TODO do i reset NIG statistic? */
4563 /* Wait until NIG register shows 1 packet of size 0x10 */
4564 count = 1000 * factor;
4565 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004566
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004567 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4568 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004569 if (val == 0x10)
4570 break;
4571
4572 msleep(10);
4573 count--;
4574 }
4575 if (val != 0x10) {
4576 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4577 return -1;
4578 }
4579
4580 /* Wait until PRS register shows 1 packet */
4581 count = 1000 * factor;
4582 while (count) {
4583 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004584 if (val == 1)
4585 break;
4586
4587 msleep(10);
4588 count--;
4589 }
4590 if (val != 0x1) {
4591 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4592 return -2;
4593 }
4594
4595 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004596 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004597 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004598 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004599 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004600 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4601 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004602
4603 DP(NETIF_MSG_HW, "part2\n");
4604
4605 /* Disable inputs of parser neighbor blocks */
4606 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4607 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4608 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004609 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004610
4611 /* Write 0 to parser credits for CFC search request */
4612 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4613
4614 /* send 10 Ethernet packets */
4615 for (i = 0; i < 10; i++)
4616 bnx2x_lb_pckt(bp);
4617
4618 /* Wait until NIG register shows 10 + 1
4619 packets of size 11*0x10 = 0xb0 */
4620 count = 1000 * factor;
4621 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004622
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004623 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4624 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004625 if (val == 0xb0)
4626 break;
4627
4628 msleep(10);
4629 count--;
4630 }
4631 if (val != 0xb0) {
4632 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4633 return -3;
4634 }
4635
4636 /* Wait until PRS register shows 2 packets */
4637 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4638 if (val != 2)
4639 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4640
4641 /* Write 1 to parser credits for CFC search request */
4642 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4643
4644 /* Wait until PRS register shows 3 packets */
4645 msleep(10 * factor);
4646 /* Wait until NIG register shows 1 packet of size 0x10 */
4647 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4648 if (val != 3)
4649 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4650
4651 /* clear NIG EOP FIFO */
4652 for (i = 0; i < 11; i++)
4653 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4654 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4655 if (val != 1) {
4656 BNX2X_ERR("clear of NIG failed\n");
4657 return -4;
4658 }
4659
4660 /* Reset and init BRB, PRS, NIG */
4661 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4662 msleep(50);
4663 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4664 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004665 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4666 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004667#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004668 /* set NIC mode */
4669 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4670#endif
4671
4672 /* Enable inputs of parser neighbor blocks */
4673 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4674 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4675 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004676 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004677
4678 DP(NETIF_MSG_HW, "done\n");
4679
4680 return 0; /* OK */
4681}
4682
4683static void enable_blocks_attention(struct bnx2x *bp)
4684{
4685 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004686 if (CHIP_IS_E2(bp))
4687 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4688 else
4689 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004690 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4691 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004692 /*
4693 * mask read length error interrupts in brb for parser
4694 * (parsing unit and 'checksum and crc' unit)
4695 * these errors are legal (PU reads fixed length and CAC can cause
4696 * read length error on truncated packets)
4697 */
4698 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004699 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4700 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4701 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4702 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4703 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004704/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4705/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004706 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4707 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4708 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004709/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4710/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004711 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4712 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4713 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4714 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004715/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4716/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004717
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004718 if (CHIP_REV_IS_FPGA(bp))
4719 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004720 else if (CHIP_IS_E2(bp))
4721 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4722 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4723 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4724 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4725 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4726 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004727 else
4728 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004729 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4730 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4731 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004732/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4733/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004734 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4735 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004736/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4737 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004738}
4739
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004740static const struct {
4741 u32 addr;
4742 u32 mask;
4743} bnx2x_parity_mask[] = {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004744 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4745 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4746 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4747 {HC_REG_HC_PRTY_MASK, 0x7},
4748 {MISC_REG_MISC_PRTY_MASK, 0x1},
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004749 {QM_REG_QM_PRTY_MASK, 0x0},
4750 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004751 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4752 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004753 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4754 {CDU_REG_CDU_PRTY_MASK, 0x0},
4755 {CFC_REG_CFC_PRTY_MASK, 0x0},
4756 {DBG_REG_DBG_PRTY_MASK, 0x0},
4757 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4758 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4759 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4760 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4761 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4762 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4763 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4764 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4765 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4766 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4767 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4768 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4769 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4770 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4771 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004772};
4773
4774static void enable_blocks_parity(struct bnx2x *bp)
4775{
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004776 int i;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004777
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004778 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004779 REG_WR(bp, bnx2x_parity_mask[i].addr,
4780 bnx2x_parity_mask[i].mask);
4781}
4782
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004783
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004784static void bnx2x_reset_common(struct bnx2x *bp)
4785{
4786 /* reset_common */
4787 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4788 0xd3ffff7f);
4789 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4790}
4791
Eilon Greenstein573f2032009-08-12 08:24:14 +00004792static void bnx2x_init_pxp(struct bnx2x *bp)
4793{
4794 u16 devctl;
4795 int r_order, w_order;
4796
4797 pci_read_config_word(bp->pdev,
4798 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4799 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4800 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4801 if (bp->mrrs == -1)
4802 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4803 else {
4804 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4805 r_order = bp->mrrs;
4806 }
4807
4808 bnx2x_init_pxp_arb(bp, r_order, w_order);
4809}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004810
4811static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4812{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004813 int is_required;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004814 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004815 int port;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004816
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004817 if (BP_NOMCP(bp))
4818 return;
4819
4820 is_required = 0;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004821 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4822 SHARED_HW_CFG_FAN_FAILURE_MASK;
4823
4824 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4825 is_required = 1;
4826
4827 /*
4828 * The fan failure mechanism is usually related to the PHY type since
4829 * the power consumption of the board is affected by the PHY. Currently,
4830 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4831 */
4832 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4833 for (port = PORT_0; port < PORT_MAX; port++) {
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004834 is_required |=
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004835 bnx2x_fan_failure_det_req(
4836 bp,
4837 bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004838 bp->common.shmem2_base,
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004839 port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004840 }
4841
4842 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4843
4844 if (is_required == 0)
4845 return;
4846
4847 /* Fan failure is indicated by SPIO 5 */
4848 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4849 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4850
4851 /* set to active low mode */
4852 val = REG_RD(bp, MISC_REG_SPIO_INT);
4853 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004854 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004855 REG_WR(bp, MISC_REG_SPIO_INT, val);
4856
4857 /* enable interrupt to signal the IGU */
4858 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4859 val |= (1 << MISC_REGISTERS_SPIO_5);
4860 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4861}
4862
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004863static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4864{
4865 u32 offset = 0;
4866
4867 if (CHIP_IS_E1(bp))
4868 return;
4869 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4870 return;
4871
4872 switch (BP_ABS_FUNC(bp)) {
4873 case 0:
4874 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4875 break;
4876 case 1:
4877 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4878 break;
4879 case 2:
4880 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4881 break;
4882 case 3:
4883 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4884 break;
4885 case 4:
4886 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4887 break;
4888 case 5:
4889 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4890 break;
4891 case 6:
4892 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4893 break;
4894 case 7:
4895 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4896 break;
4897 default:
4898 return;
4899 }
4900
4901 REG_WR(bp, offset, pretend_func_num);
4902 REG_RD(bp, offset);
4903 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4904}
4905
4906static void bnx2x_pf_disable(struct bnx2x *bp)
4907{
4908 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4909 val &= ~IGU_PF_CONF_FUNC_EN;
4910
4911 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4912 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4913 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4914}
4915
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004916static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004917{
4918 u32 val, i;
4919
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004920 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004921
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004922 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004923 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4924 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4925
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004926 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004927 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004928 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004929
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004930 if (CHIP_IS_E2(bp)) {
4931 u8 fid;
4932
4933 /**
4934 * 4-port mode or 2-port mode we need to turn of master-enable
4935 * for everyone, after that, turn it back on for self.
4936 * so, we disregard multi-function or not, and always disable
4937 * for all functions on the given path, this means 0,2,4,6 for
4938 * path 0 and 1,3,5,7 for path 1
4939 */
4940 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4941 if (fid == BP_ABS_FUNC(bp)) {
4942 REG_WR(bp,
4943 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4944 1);
4945 continue;
4946 }
4947
4948 bnx2x_pretend_func(bp, fid);
4949 /* clear pf enable */
4950 bnx2x_pf_disable(bp);
4951 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4952 }
4953 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004954
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004955 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004956 if (CHIP_IS_E1(bp)) {
4957 /* enable HW interrupt from PXP on USDM overflow
4958 bit 16 on INT_MASK_0 */
4959 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004960 }
4961
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004962 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004963 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004964
4965#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004966 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4967 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4968 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4969 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4970 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00004971 /* make sure this value is 0 */
4972 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004973
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004974/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4975 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4976 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4977 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4978 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004979#endif
4980
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004981 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4982
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004983 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4984 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004985
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004986 /* let the HW do it's magic ... */
4987 msleep(100);
4988 /* finish PXP init */
4989 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4990 if (val != 1) {
4991 BNX2X_ERR("PXP2 CFG failed\n");
4992 return -EBUSY;
4993 }
4994 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4995 if (val != 1) {
4996 BNX2X_ERR("PXP2 RD_INIT failed\n");
4997 return -EBUSY;
4998 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004999
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005000 /* Timers bug workaround E2 only. We need to set the entire ILT to
5001 * have entries with value "0" and valid bit on.
5002 * This needs to be done by the first PF that is loaded in a path
5003 * (i.e. common phase)
5004 */
5005 if (CHIP_IS_E2(bp)) {
5006 struct ilt_client_info ilt_cli;
5007 struct bnx2x_ilt ilt;
5008 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
5009 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
5010
5011 /* initalize dummy TM client */
5012 ilt_cli.start = 0;
5013 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
5014 ilt_cli.client_num = ILT_CLIENT_TM;
5015
5016 /* Step 1: set zeroes to all ilt page entries with valid bit on
5017 * Step 2: set the timers first/last ilt entry to point
5018 * to the entire range to prevent ILT range error for 3rd/4th
5019 * vnic (this code assumes existance of the vnic)
5020 *
5021 * both steps performed by call to bnx2x_ilt_client_init_op()
5022 * with dummy TM client
5023 *
5024 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
5025 * and his brother are split registers
5026 */
5027 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
5028 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
5029 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
5030
5031 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
5032 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
5033 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
5034 }
5035
5036
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005037 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5038 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005039
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005040 if (CHIP_IS_E2(bp)) {
5041 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
5042 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
5043 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
5044
5045 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
5046
5047 /* let the HW do it's magic ... */
5048 do {
5049 msleep(200);
5050 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
5051 } while (factor-- && (val != 1));
5052
5053 if (val != 1) {
5054 BNX2X_ERR("ATC_INIT failed\n");
5055 return -EBUSY;
5056 }
5057 }
5058
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005059 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005060
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005061 /* clean the DMAE memory */
5062 bp->dmae_ready = 1;
5063 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005064
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005065 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5066 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5067 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5068 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005069
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005070 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5071 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5072 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5073 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5074
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005075 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005076
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005077 if (CHIP_MODE_IS_4_PORT(bp))
5078 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005079
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005080 /* QM queues pointers table */
5081 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00005082
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005083 /* soft reset pulse */
5084 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5085 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005086
Michael Chan37b091b2009-10-10 13:46:55 +00005087#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005088 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005089#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005090
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005091 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005092 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5093
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005094 if (!CHIP_REV_IS_SLOW(bp)) {
5095 /* enable hw interrupt from doorbell Q */
5096 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5097 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005098
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005099 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005100 if (CHIP_MODE_IS_4_PORT(bp)) {
5101 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5102 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5103 }
5104
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005105 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005106 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00005107#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07005108 /* set NIC mode */
5109 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00005110#endif
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005111 if (!CHIP_IS_E1(bp))
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005112 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF_SD(bp));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005113
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005114 if (CHIP_IS_E2(bp)) {
5115 /* Bit-map indicating which L2 hdrs may appear after the
5116 basic Ethernet header */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005117 int has_ovlan = IS_MF_SD(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005118 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5119 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5120 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005121
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005122 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5123 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5124 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5125 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005126
Eilon Greensteinca003922009-08-12 22:53:28 -07005127 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5128 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5129 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5130 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005131
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005132 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5133 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5134 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5135 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005136
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005137 if (CHIP_MODE_IS_4_PORT(bp))
5138 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5139
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005140 /* sync semi rtc */
5141 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5142 0x80000000);
5143 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5144 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005145
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005146 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5147 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5148 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005149
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005150 if (CHIP_IS_E2(bp)) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005151 int has_ovlan = IS_MF_SD(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005152 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5153 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5154 }
5155
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005156 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07005157 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5158 REG_WR(bp, i, random32());
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005159
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005160 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005161#ifdef BCM_CNIC
5162 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5163 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5164 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5165 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5166 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5167 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5168 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5169 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5170 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5171 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5172#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005173 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005174
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005175 if (sizeof(union cdu_context) != 1024)
5176 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005177 dev_alert(&bp->pdev->dev, "please adjust the size "
5178 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00005179 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005180
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005181 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005182 val = (4 << 24) + (0 << 12) + 1024;
5183 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005184
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005185 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005186 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005187 /* enable context validation interrupt from CFC */
5188 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5189
5190 /* set the thresholds to prevent CFC/CDU race */
5191 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005192
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005193 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005194
5195 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5196 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5197
5198 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005199 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005200
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005201 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005202 /* Reset PCIE errors for debug */
5203 REG_WR(bp, 0x2814, 0xffffffff);
5204 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005205
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005206 if (CHIP_IS_E2(bp)) {
5207 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5208 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5209 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5210 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5211 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5212 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5213 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5214 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5215 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5216 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5217 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5218 }
5219
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005220 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005221 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005222 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005223 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005224
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005225 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005226 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005227 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005228 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF_SD(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005229 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005230 if (CHIP_IS_E2(bp)) {
5231 /* Bit-map indicating which L2 hdrs may appear after the
5232 basic Ethernet header */
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005233 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF_SD(bp) ? 7 : 6));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005234 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005235
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005236 if (CHIP_REV_IS_SLOW(bp))
5237 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005238
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005239 /* finish CFC init */
5240 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5241 if (val != 1) {
5242 BNX2X_ERR("CFC LL_INIT failed\n");
5243 return -EBUSY;
5244 }
5245 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5246 if (val != 1) {
5247 BNX2X_ERR("CFC AC_INIT failed\n");
5248 return -EBUSY;
5249 }
5250 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5251 if (val != 1) {
5252 BNX2X_ERR("CFC CAM_INIT failed\n");
5253 return -EBUSY;
5254 }
5255 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005256
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005257 if (CHIP_IS_E1(bp)) {
5258 /* read NIG statistic
5259 to see if this is our first up since powerup */
5260 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5261 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005262
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005263 /* do internal memory self test */
5264 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5265 BNX2X_ERR("internal mem self test failed\n");
5266 return -EBUSY;
5267 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005268 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005269
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005270 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005271 bp->common.shmem_base,
5272 bp->common.shmem2_base);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005273
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005274 bnx2x_setup_fan_failure_detection(bp);
5275
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005276 /* clear PXP2 attentions */
5277 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005278
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005279 enable_blocks_attention(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00005280 if (CHIP_PARITY_SUPPORTED(bp))
5281 enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005282
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005283 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005284 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5285 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5286 CHIP_IS_E1x(bp)) {
5287 u32 shmem_base[2], shmem2_base[2];
5288 shmem_base[0] = bp->common.shmem_base;
5289 shmem2_base[0] = bp->common.shmem2_base;
5290 if (CHIP_IS_E2(bp)) {
5291 shmem_base[1] =
5292 SHMEM2_RD(bp, other_shmem_base_addr);
5293 shmem2_base[1] =
5294 SHMEM2_RD(bp, other_shmem2_base_addr);
5295 }
5296 bnx2x_acquire_phy_lock(bp);
5297 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5298 bp->common.chip_id);
5299 bnx2x_release_phy_lock(bp);
5300 }
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005301 } else
5302 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5303
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005304 return 0;
5305}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005306
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005307static int bnx2x_init_hw_port(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005308{
5309 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005310 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00005311 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005312 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005313
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005314 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005315
5316 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005317
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005318 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005319 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005320
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005321 /* Timers bug workaround: disables the pf_master bit in pglue at
5322 * common phase, we need to enable it here before any dmae access are
5323 * attempted. Therefore we manually added the enable-master to the
5324 * port phase (it also happens in the function phase)
5325 */
5326 if (CHIP_IS_E2(bp))
5327 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5328
Eilon Greensteinca003922009-08-12 22:53:28 -07005329 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5330 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5331 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005332 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005333
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005334 /* QM cid (connection) count */
5335 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005336
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005337#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005338 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00005339 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5340 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005341#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005342
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005343 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005344
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005345 if (CHIP_MODE_IS_4_PORT(bp))
5346 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005347
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005348 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5349 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5350 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5351 /* no pause for emulation and FPGA */
5352 low = 0;
5353 high = 513;
5354 } else {
5355 if (IS_MF(bp))
5356 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5357 else if (bp->dev->mtu > 4096) {
5358 if (bp->flags & ONE_PORT_FLAG)
5359 low = 160;
5360 else {
5361 val = bp->dev->mtu;
5362 /* (24*1024 + val*4)/256 */
5363 low = 96 + (val/64) +
5364 ((val % 64) ? 1 : 0);
5365 }
5366 } else
5367 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5368 high = low + 56; /* 14*1024/256 */
5369 }
5370 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5371 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5372 }
5373
5374 if (CHIP_MODE_IS_4_PORT(bp)) {
5375 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5376 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5377 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5378 BRB1_REG_MAC_GUARANTIED_0), 40);
5379 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005380
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005381 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005382
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005383 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005384 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005385 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005386 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005387
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005388 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5389 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5390 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5391 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005392 if (CHIP_MODE_IS_4_PORT(bp))
5393 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005394
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005395 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005396 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005397
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005398 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005399
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005400 if (!CHIP_IS_E2(bp)) {
5401 /* configure PBF to work without PAUSE mtu 9000 */
5402 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005403
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005404 /* update threshold */
5405 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5406 /* update init credit */
5407 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005408
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005409 /* probe changes */
5410 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5411 udelay(50);
5412 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5413 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005414
Michael Chan37b091b2009-10-10 13:46:55 +00005415#ifdef BCM_CNIC
5416 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005417#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005418 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005419 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005420
5421 if (CHIP_IS_E1(bp)) {
5422 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5423 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5424 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005425 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005426
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005427 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5428
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005429 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005430 /* init aeu_mask_attn_func_0/1:
5431 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5432 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5433 * bits 4-7 are used for "per vn group attention" */
5434 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005435 (IS_MF(bp) ? 0xF7 : 0x7));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005436
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005437 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005438 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005439 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005440 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005441 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005442
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005443 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005444
5445 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5446
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005447 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005448 /* 0x2 disable mf_ov, 0x1 enable */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005449 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08005450 (IS_MF_SD(bp) ? 0x1 : 0x2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005451
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005452 if (CHIP_IS_E2(bp)) {
5453 val = 0;
5454 switch (bp->mf_mode) {
5455 case MULTI_FUNCTION_SD:
5456 val = 1;
5457 break;
5458 case MULTI_FUNCTION_SI:
5459 val = 2;
5460 break;
5461 }
5462
5463 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5464 NIG_REG_LLH0_CLS_TYPE), val);
5465 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005466 {
5467 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5468 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5469 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5470 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005471 }
5472
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005473 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005474 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005475 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005476 bp->common.shmem_base,
5477 bp->common.shmem2_base);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005478 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005479 bp->common.shmem2_base, port)) {
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005480 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5481 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5482 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005483 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005484 REG_WR(bp, reg_addr, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005485 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07005486 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005487
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005488 return 0;
5489}
5490
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005491static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5492{
5493 int reg;
5494
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005495 if (CHIP_IS_E1(bp))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005496 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005497 else
5498 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005499
5500 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5501}
5502
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005503static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5504{
5505 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5506}
5507
5508static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5509{
5510 u32 i, base = FUNC_ILT_BASE(func);
5511 for (i = base; i < base + ILT_PER_FUNC; i++)
5512 bnx2x_ilt_wr(bp, i, 0);
5513}
5514
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005515static int bnx2x_init_hw_func(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005516{
5517 int port = BP_PORT(bp);
5518 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005519 struct bnx2x_ilt *ilt = BP_ILT(bp);
5520 u16 cdu_ilt_start;
Eilon Greenstein8badd272009-02-12 08:36:15 +00005521 u32 addr, val;
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005522 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5523 int i, main_mem_width;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005524
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005525 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005526
Eilon Greenstein8badd272009-02-12 08:36:15 +00005527 /* set MSI reconfigure capability */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005528 if (bp->common.int_block == INT_BLOCK_HC) {
5529 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5530 val = REG_RD(bp, addr);
5531 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5532 REG_WR(bp, addr, val);
5533 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00005534
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005535 ilt = BP_ILT(bp);
5536 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005537
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005538 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5539 ilt->lines[cdu_ilt_start + i].page =
5540 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5541 ilt->lines[cdu_ilt_start + i].page_mapping =
5542 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5543 /* cdu ilt pages are allocated manually so there's no need to
5544 set the size */
5545 }
5546 bnx2x_ilt_init_op(bp, INITOP_SET);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005547
Michael Chan37b091b2009-10-10 13:46:55 +00005548#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005549 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
Michael Chan37b091b2009-10-10 13:46:55 +00005550
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005551 /* T1 hash bits value determines the T1 number of entries */
5552 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
Michael Chan37b091b2009-10-10 13:46:55 +00005553#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005554
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005555#ifndef BCM_CNIC
5556 /* set NIC mode */
5557 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5558#endif /* BCM_CNIC */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005559
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005560 if (CHIP_IS_E2(bp)) {
5561 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5562
5563 /* Turn on a single ISR mode in IGU if driver is going to use
5564 * INT#x or MSI
5565 */
5566 if (!(bp->flags & USING_MSIX_FLAG))
5567 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5568 /*
5569 * Timers workaround bug: function init part.
5570 * Need to wait 20msec after initializing ILT,
5571 * needed to make sure there are no requests in
5572 * one of the PXP internal queues with "old" ILT addresses
5573 */
5574 msleep(20);
5575 /*
5576 * Master enable - Due to WB DMAE writes performed before this
5577 * register is re-initialized as part of the regular function
5578 * init
5579 */
5580 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5581 /* Enable the function in IGU */
5582 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5583 }
5584
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005585 bp->dmae_ready = 1;
5586
5587 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5588
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005589 if (CHIP_IS_E2(bp))
5590 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5591
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005592 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5593 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5594 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5595 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5596 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5597 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5598 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5599 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5600 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5601
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005602 if (CHIP_IS_E2(bp)) {
5603 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5604 BP_PATH(bp));
5605 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5606 BP_PATH(bp));
5607 }
5608
5609 if (CHIP_MODE_IS_4_PORT(bp))
5610 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5611
5612 if (CHIP_IS_E2(bp))
5613 REG_WR(bp, QM_REG_PF_EN, 1);
5614
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005615 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005616
5617 if (CHIP_MODE_IS_4_PORT(bp))
5618 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5619
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005620 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5621 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5622 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5623 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5624 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5625 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5626 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5627 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5628 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5629 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5630 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005631 if (CHIP_IS_E2(bp))
5632 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5633
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005634 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5635
5636 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5637
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005638 if (CHIP_IS_E2(bp))
5639 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5640
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005641 if (IS_MF(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005642 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005643 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005644 }
5645
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005646 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5647
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005648 /* HC init per function */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005649 if (bp->common.int_block == INT_BLOCK_HC) {
5650 if (CHIP_IS_E1H(bp)) {
5651 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5652
5653 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5654 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5655 }
5656 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5657
5658 } else {
5659 int num_segs, sb_idx, prod_offset;
5660
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005661 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5662
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005663 if (CHIP_IS_E2(bp)) {
5664 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5665 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5666 }
5667
5668 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5669
5670 if (CHIP_IS_E2(bp)) {
5671 int dsb_idx = 0;
5672 /**
5673 * Producer memory:
5674 * E2 mode: address 0-135 match to the mapping memory;
5675 * 136 - PF0 default prod; 137 - PF1 default prod;
5676 * 138 - PF2 default prod; 139 - PF3 default prod;
5677 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5678 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5679 * 144-147 reserved.
5680 *
5681 * E1.5 mode - In backward compatible mode;
5682 * for non default SB; each even line in the memory
5683 * holds the U producer and each odd line hold
5684 * the C producer. The first 128 producers are for
5685 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5686 * producers are for the DSB for each PF.
5687 * Each PF has five segments: (the order inside each
5688 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5689 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5690 * 144-147 attn prods;
5691 */
5692 /* non-default-status-blocks */
5693 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5694 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5695 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5696 prod_offset = (bp->igu_base_sb + sb_idx) *
5697 num_segs;
5698
5699 for (i = 0; i < num_segs; i++) {
5700 addr = IGU_REG_PROD_CONS_MEMORY +
5701 (prod_offset + i) * 4;
5702 REG_WR(bp, addr, 0);
5703 }
5704 /* send consumer update with value 0 */
5705 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5706 USTORM_ID, 0, IGU_INT_NOP, 1);
5707 bnx2x_igu_clear_sb(bp,
5708 bp->igu_base_sb + sb_idx);
5709 }
5710
5711 /* default-status-blocks */
5712 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5713 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5714
5715 if (CHIP_MODE_IS_4_PORT(bp))
5716 dsb_idx = BP_FUNC(bp);
5717 else
5718 dsb_idx = BP_E1HVN(bp);
5719
5720 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5721 IGU_BC_BASE_DSB_PROD + dsb_idx :
5722 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5723
5724 for (i = 0; i < (num_segs * E1HVN_MAX);
5725 i += E1HVN_MAX) {
5726 addr = IGU_REG_PROD_CONS_MEMORY +
5727 (prod_offset + i)*4;
5728 REG_WR(bp, addr, 0);
5729 }
5730 /* send consumer update with 0 */
5731 if (CHIP_INT_MODE_IS_BC(bp)) {
5732 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5733 USTORM_ID, 0, IGU_INT_NOP, 1);
5734 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5735 CSTORM_ID, 0, IGU_INT_NOP, 1);
5736 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5737 XSTORM_ID, 0, IGU_INT_NOP, 1);
5738 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5739 TSTORM_ID, 0, IGU_INT_NOP, 1);
5740 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5741 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5742 } else {
5743 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5744 USTORM_ID, 0, IGU_INT_NOP, 1);
5745 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5746 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5747 }
5748 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5749
5750 /* !!! these should become driver const once
5751 rf-tool supports split-68 const */
5752 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5753 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5754 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5755 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5756 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5757 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5758 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005759 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005760
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005761 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005762 REG_WR(bp, 0x2114, 0xffffffff);
5763 REG_WR(bp, 0x2120, 0xffffffff);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005764
5765 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5766 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5767 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5768 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5769 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5770 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5771
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005772 if (CHIP_IS_E1x(bp)) {
5773 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5774 main_mem_base = HC_REG_MAIN_MEMORY +
5775 BP_PORT(bp) * (main_mem_size * 4);
5776 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5777 main_mem_width = 8;
5778
5779 val = REG_RD(bp, main_mem_prty_clr);
5780 if (val)
5781 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5782 "block during "
5783 "function init (0x%x)!\n", val);
5784
5785 /* Clear "false" parity errors in MSI-X table */
5786 for (i = main_mem_base;
5787 i < main_mem_base + main_mem_size * 4;
5788 i += main_mem_width) {
5789 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5790 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5791 i, main_mem_width / 4);
5792 }
5793 /* Clear HC parity attention */
5794 REG_RD(bp, main_mem_prty_clr);
5795 }
5796
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00005797 bnx2x_phy_probe(&bp->link_params);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005798
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005799 return 0;
5800}
5801
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005802int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005803{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005804 int rc = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005805
5806 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005807 BP_ABS_FUNC(bp), load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005808
5809 bp->dmae_ready = 0;
5810 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00005811 rc = bnx2x_gunzip_init(bp);
5812 if (rc)
5813 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005814
5815 switch (load_code) {
5816 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005817 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005818 rc = bnx2x_init_hw_common(bp, load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005819 if (rc)
5820 goto init_hw_err;
5821 /* no break */
5822
5823 case FW_MSG_CODE_DRV_LOAD_PORT:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005824 rc = bnx2x_init_hw_port(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005825 if (rc)
5826 goto init_hw_err;
5827 /* no break */
5828
5829 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005830 rc = bnx2x_init_hw_func(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005831 if (rc)
5832 goto init_hw_err;
5833 break;
5834
5835 default:
5836 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5837 break;
5838 }
5839
5840 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005841 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005842
5843 bp->fw_drv_pulse_wr_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005844 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005845 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00005846 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5847 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005848
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005849init_hw_err:
5850 bnx2x_gunzip_end(bp);
5851
5852 return rc;
5853}
5854
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005855void bnx2x_free_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005856{
5857
5858#define BNX2X_PCI_FREE(x, y, size) \
5859 do { \
5860 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005861 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005862 x = NULL; \
5863 y = 0; \
5864 } \
5865 } while (0)
5866
5867#define BNX2X_FREE(x) \
5868 do { \
5869 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005870 kfree((void *)x); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005871 x = NULL; \
5872 } \
5873 } while (0)
5874
5875 int i;
5876
5877 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005878 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005879 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005880 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005881 if (CHIP_IS_E2(bp))
5882 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5883 bnx2x_fp(bp, i, status_blk_mapping),
5884 sizeof(struct host_hc_status_block_e2));
5885 else
5886 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5887 bnx2x_fp(bp, i, status_blk_mapping),
5888 sizeof(struct host_hc_status_block_e1x));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005889 }
5890 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005891 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005892
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005893 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005894 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5895 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5896 bnx2x_fp(bp, i, rx_desc_mapping),
5897 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5898
5899 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5900 bnx2x_fp(bp, i, rx_comp_mapping),
5901 sizeof(struct eth_fast_path_rx_cqe) *
5902 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005903
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005904 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07005905 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005906 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5907 bnx2x_fp(bp, i, rx_sge_mapping),
5908 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5909 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005910 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005911 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005912
5913 /* fastpath tx rings: tx_buf tx_desc */
5914 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5915 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5916 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005917 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005918 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005919 /* end of fastpath */
5920
5921 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005922 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005923
5924 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005925 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005926
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005927 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5928 bp->context.size);
5929
5930 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5931
5932 BNX2X_FREE(bp->ilt->lines);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005933
Michael Chan37b091b2009-10-10 13:46:55 +00005934#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005935 if (CHIP_IS_E2(bp))
5936 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5937 sizeof(struct host_hc_status_block_e2));
5938 else
5939 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5940 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005941
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005942 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005943#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005944
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005945 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005946
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005947 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5948 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5949
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005950#undef BNX2X_PCI_FREE
5951#undef BNX2X_KFREE
5952}
5953
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005954static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5955{
5956 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5957 if (CHIP_IS_E2(bp)) {
5958 bnx2x_fp(bp, index, sb_index_values) =
5959 (__le16 *)status_blk.e2_sb->sb.index_values;
5960 bnx2x_fp(bp, index, sb_running_index) =
5961 (__le16 *)status_blk.e2_sb->sb.running_index;
5962 } else {
5963 bnx2x_fp(bp, index, sb_index_values) =
5964 (__le16 *)status_blk.e1x_sb->sb.index_values;
5965 bnx2x_fp(bp, index, sb_running_index) =
5966 (__le16 *)status_blk.e1x_sb->sb.running_index;
5967 }
5968}
5969
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005970int bnx2x_alloc_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005971{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005972#define BNX2X_PCI_ALLOC(x, y, size) \
5973 do { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00005974 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005975 if (x == NULL) \
5976 goto alloc_mem_err; \
5977 memset(x, 0, size); \
5978 } while (0)
5979
5980#define BNX2X_ALLOC(x, size) \
5981 do { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005982 x = kzalloc(size, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005983 if (x == NULL) \
5984 goto alloc_mem_err; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005985 } while (0)
5986
5987 int i;
5988
5989 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005990 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005991 for_each_queue(bp, i) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005992 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005993 bnx2x_fp(bp, i, bp) = bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005994 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005995 if (CHIP_IS_E2(bp))
5996 BNX2X_PCI_ALLOC(sb->e2_sb,
5997 &bnx2x_fp(bp, i, status_blk_mapping),
5998 sizeof(struct host_hc_status_block_e2));
5999 else
6000 BNX2X_PCI_ALLOC(sb->e1x_sb,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006001 &bnx2x_fp(bp, i, status_blk_mapping),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006002 sizeof(struct host_hc_status_block_e1x));
6003
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006004 set_sb_shortcuts(bp, i);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006005 }
6006 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006007 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006008
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006009 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006010 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6011 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6012 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6013 &bnx2x_fp(bp, i, rx_desc_mapping),
6014 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6015
6016 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6017 &bnx2x_fp(bp, i, rx_comp_mapping),
6018 sizeof(struct eth_fast_path_rx_cqe) *
6019 NUM_RCQ_BD);
6020
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07006021 /* SGE ring */
6022 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6023 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6024 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6025 &bnx2x_fp(bp, i, rx_sge_mapping),
6026 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006027 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006028 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006029 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006030
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006031 /* fastpath tx rings: tx_buf tx_desc */
6032 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6033 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6034 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6035 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07006036 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006037 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006038 /* end of fastpath */
6039
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006040#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006041 if (CHIP_IS_E2(bp))
6042 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
6043 sizeof(struct host_hc_status_block_e2));
6044 else
6045 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
6046 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006047
6048 /* allocate searcher T2 table */
6049 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
6050#endif
6051
6052
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006053 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006054 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006055
6056 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6057 sizeof(struct bnx2x_slowpath));
6058
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006059 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006060
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006061 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6062 bp->context.size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006063
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006064 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006065
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006066 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6067 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006068
6069 /* Slow path ring */
6070 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6071
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006072 /* EQ */
6073 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6074 BCM_PAGE_SIZE * NUM_EQ_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006075 return 0;
6076
6077alloc_mem_err:
6078 bnx2x_free_mem(bp);
6079 return -ENOMEM;
6080
6081#undef BNX2X_PCI_ALLOC
6082#undef BNX2X_ALLOC
6083}
6084
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006085/*
6086 * Init service functions
6087 */
stephen hemminger8d962862010-10-21 07:50:56 +00006088static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6089 int *state_p, int flags);
6090
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006091int bnx2x_func_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006092{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006093 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006094
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006095 /* Wait for completion */
6096 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6097 WAIT_RAMROD_COMMON);
6098}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006099
stephen hemminger8d962862010-10-21 07:50:56 +00006100static int bnx2x_func_stop(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006101{
6102 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006103
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006104 /* Wait for completion */
6105 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6106 0, &(bp->state), WAIT_RAMROD_COMMON);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006107}
6108
Michael Chane665bfd2009-10-10 13:46:54 +00006109/**
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006110 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
Michael Chane665bfd2009-10-10 13:46:54 +00006111 *
6112 * @param bp driver descriptor
6113 * @param set set or clear an entry (1 or 0)
6114 * @param mac pointer to a buffer containing a MAC
6115 * @param cl_bit_vec bit vector of clients to register a MAC for
6116 * @param cam_offset offset in a CAM to use
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006117 * @param is_bcast is the set MAC a broadcast address (for E1 only)
Michael Chane665bfd2009-10-10 13:46:54 +00006118 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006119static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006120 u32 cl_bit_vec, u8 cam_offset,
6121 u8 is_bcast)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006122{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006123 struct mac_configuration_cmd *config =
6124 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6125 int ramrod_flags = WAIT_RAMROD_COMMON;
6126
6127 bp->set_mac_pending = 1;
6128 smp_wmb();
6129
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006130 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006131 config->hdr.offset = cam_offset;
6132 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006133 config->hdr.reserved1 = 0;
6134
6135 /* primary MAC */
6136 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006137 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006138 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006139 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006140 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006141 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07006142 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00006143 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006144 config->config_table[0].vlan_id = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006145 config->config_table[0].pf_id = BP_FUNC(bp);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006146 if (set)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006147 SET_FLAG(config->config_table[0].flags,
6148 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6149 T_ETH_MAC_COMMAND_SET);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006150 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006151 SET_FLAG(config->config_table[0].flags,
6152 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6153 T_ETH_MAC_COMMAND_INVALIDATE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006154
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006155 if (is_bcast)
6156 SET_FLAG(config->config_table[0].flags,
6157 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6158
6159 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006160 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006161 config->config_table[0].msb_mac_addr,
6162 config->config_table[0].middle_mac_addr,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006163 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006164
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006165 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006166 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006167 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6168
6169 /* Wait for a completion */
6170 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006171}
6172
stephen hemminger8d962862010-10-21 07:50:56 +00006173static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6174 int *state_p, int flags)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006175{
6176 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006177 int cnt = 5000;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006178 u8 poll = flags & WAIT_RAMROD_POLL;
6179 u8 common = flags & WAIT_RAMROD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006180
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006181 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6182 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006183
6184 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006185 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006186 if (poll) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006187 if (common)
6188 bnx2x_eq_int(bp);
6189 else {
6190 bnx2x_rx_int(bp->fp, 10);
6191 /* if index is different from 0
6192 * the reply for some commands will
6193 * be on the non default queue
6194 */
6195 if (idx)
6196 bnx2x_rx_int(&bp->fp[idx], 10);
6197 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006198 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006199
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006200 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006201 if (*state_p == state) {
6202#ifdef BNX2X_STOP_ON_ERROR
6203 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6204#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006205 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006206 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006207
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006208 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00006209
6210 if (bp->panic)
6211 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006212 }
6213
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006214 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08006215 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6216 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006217#ifdef BNX2X_STOP_ON_ERROR
6218 bnx2x_panic();
6219#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006220
Eliezer Tamir49d66772008-02-28 11:53:13 -08006221 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006222}
6223
stephen hemminger8d962862010-10-21 07:50:56 +00006224static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
Michael Chane665bfd2009-10-10 13:46:54 +00006225{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006226 if (CHIP_IS_E1H(bp))
6227 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6228 else if (CHIP_MODE_IS_4_PORT(bp))
6229 return BP_FUNC(bp) * 32 + rel_offset;
6230 else
6231 return BP_VN(bp) * 32 + rel_offset;
Michael Chane665bfd2009-10-10 13:46:54 +00006232}
6233
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006234/**
6235 * LLH CAM line allocations: currently only iSCSI and ETH macs are
6236 * relevant. In addition, current implementation is tuned for a
6237 * single ETH MAC.
6238 *
6239 * When multiple unicast ETH MACs PF configuration in switch
6240 * independent mode is required (NetQ, multiple netdev MACs,
6241 * etc.), consider better utilisation of 16 per function MAC
6242 * entries in the LLH memory.
6243 */
6244enum {
6245 LLH_CAM_ISCSI_ETH_LINE = 0,
6246 LLH_CAM_ETH_LINE,
6247 LLH_CAM_MAX_PF_LINE = NIG_REG_LLH1_FUNC_MEM_SIZE
6248};
6249
6250static void bnx2x_set_mac_in_nig(struct bnx2x *bp,
6251 int set,
6252 unsigned char *dev_addr,
6253 int index)
6254{
6255 u32 wb_data[2];
6256 u32 mem_offset, ena_offset, mem_index;
6257 /**
6258 * indexes mapping:
6259 * 0..7 - goes to MEM
6260 * 8..15 - goes to MEM2
6261 */
6262
6263 if (!IS_MF_SI(bp) || index > LLH_CAM_MAX_PF_LINE)
6264 return;
6265
6266 /* calculate memory start offset according to the mapping
6267 * and index in the memory */
6268 if (index < NIG_LLH_FUNC_MEM_MAX_OFFSET) {
6269 mem_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM :
6270 NIG_REG_LLH0_FUNC_MEM;
6271 ena_offset = BP_PORT(bp) ? NIG_REG_LLH1_FUNC_MEM_ENABLE :
6272 NIG_REG_LLH0_FUNC_MEM_ENABLE;
6273 mem_index = index;
6274 } else {
6275 mem_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2 :
6276 NIG_REG_P0_LLH_FUNC_MEM2;
6277 ena_offset = BP_PORT(bp) ? NIG_REG_P1_LLH_FUNC_MEM2_ENABLE :
6278 NIG_REG_P0_LLH_FUNC_MEM2_ENABLE;
6279 mem_index = index - NIG_LLH_FUNC_MEM_MAX_OFFSET;
6280 }
6281
6282 if (set) {
6283 /* LLH_FUNC_MEM is a u64 WB register */
6284 mem_offset += 8*mem_index;
6285
6286 wb_data[0] = ((dev_addr[2] << 24) | (dev_addr[3] << 16) |
6287 (dev_addr[4] << 8) | dev_addr[5]);
6288 wb_data[1] = ((dev_addr[0] << 8) | dev_addr[1]);
6289
6290 REG_WR_DMAE(bp, mem_offset, wb_data, 2);
6291 }
6292
6293 /* enable/disable the entry */
6294 REG_WR(bp, ena_offset + 4*mem_index, set);
6295
6296}
6297
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006298void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
Michael Chane665bfd2009-10-10 13:46:54 +00006299{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006300 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6301 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6302
6303 /* networking MAC */
6304 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6305 (1 << bp->fp->cl_id), cam_offset , 0);
6306
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006307 bnx2x_set_mac_in_nig(bp, set, bp->dev->dev_addr, LLH_CAM_ETH_LINE);
6308
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006309 if (CHIP_IS_E1(bp)) {
6310 /* broadcast MAC */
6311 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6312 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6313 }
6314}
6315static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6316{
6317 int i = 0, old;
6318 struct net_device *dev = bp->dev;
6319 struct netdev_hw_addr *ha;
6320 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6321 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6322
6323 netdev_for_each_mc_addr(ha, dev) {
6324 /* copy mac */
6325 config_cmd->config_table[i].msb_mac_addr =
6326 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6327 config_cmd->config_table[i].middle_mac_addr =
6328 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6329 config_cmd->config_table[i].lsb_mac_addr =
6330 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6331
6332 config_cmd->config_table[i].vlan_id = 0;
6333 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6334 config_cmd->config_table[i].clients_bit_vector =
6335 cpu_to_le32(1 << BP_L_ID(bp));
6336
6337 SET_FLAG(config_cmd->config_table[i].flags,
6338 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6339 T_ETH_MAC_COMMAND_SET);
6340
6341 DP(NETIF_MSG_IFUP,
6342 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6343 config_cmd->config_table[i].msb_mac_addr,
6344 config_cmd->config_table[i].middle_mac_addr,
6345 config_cmd->config_table[i].lsb_mac_addr);
6346 i++;
6347 }
6348 old = config_cmd->hdr.length;
6349 if (old > i) {
6350 for (; i < old; i++) {
6351 if (CAM_IS_INVALID(config_cmd->
6352 config_table[i])) {
6353 /* already invalidated */
6354 break;
6355 }
6356 /* invalidate */
6357 SET_FLAG(config_cmd->config_table[i].flags,
6358 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6359 T_ETH_MAC_COMMAND_INVALIDATE);
6360 }
6361 }
6362
6363 config_cmd->hdr.length = i;
6364 config_cmd->hdr.offset = offset;
6365 config_cmd->hdr.client_id = 0xff;
6366 config_cmd->hdr.reserved1 = 0;
6367
6368 bp->set_mac_pending = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006369 smp_wmb();
6370
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006371 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6372 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6373}
6374static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6375{
6376 int i;
6377 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6378 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6379 int ramrod_flags = WAIT_RAMROD_COMMON;
6380
6381 bp->set_mac_pending = 1;
6382 smp_wmb();
6383
6384 for (i = 0; i < config_cmd->hdr.length; i++)
6385 SET_FLAG(config_cmd->config_table[i].flags,
6386 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6387 T_ETH_MAC_COMMAND_INVALIDATE);
6388
6389 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6390 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
Michael Chane665bfd2009-10-10 13:46:54 +00006391
6392 /* Wait for a completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006393 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6394 ramrod_flags);
6395
Michael Chane665bfd2009-10-10 13:46:54 +00006396}
6397
Michael Chan993ac7b2009-10-10 13:46:56 +00006398#ifdef BCM_CNIC
6399/**
6400 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6401 * MAC(s). This function will wait until the ramdord completion
6402 * returns.
6403 *
6404 * @param bp driver handle
6405 * @param set set or clear the CAM entry
6406 *
6407 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6408 */
stephen hemminger8d962862010-10-21 07:50:56 +00006409static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
Michael Chan993ac7b2009-10-10 13:46:56 +00006410{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006411 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6412 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6413 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6414 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
Michael Chan993ac7b2009-10-10 13:46:56 +00006415
6416 /* Send a SET_MAC ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006417 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6418 cam_offset, 0);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08006419
6420 bnx2x_set_mac_in_nig(bp, set, bp->iscsi_mac, LLH_CAM_ISCSI_ETH_LINE);
Michael Chan993ac7b2009-10-10 13:46:56 +00006421 return 0;
6422}
6423#endif
6424
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006425static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6426 struct bnx2x_client_init_params *params,
6427 u8 activate,
6428 struct client_init_ramrod_data *data)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006429{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006430 /* Clear the buffer */
6431 memset(data, 0, sizeof(*data));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006432
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006433 /* general */
6434 data->general.client_id = params->rxq_params.cl_id;
6435 data->general.statistics_counter_id = params->rxq_params.stat_id;
6436 data->general.statistics_en_flg =
6437 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6438 data->general.activate_flg = activate;
6439 data->general.sp_client_id = params->rxq_params.spcl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006440
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006441 /* Rx data */
6442 data->rx.tpa_en_flg =
6443 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6444 data->rx.vmqueue_mode_en_flg = 0;
6445 data->rx.cache_line_alignment_log_size =
6446 params->rxq_params.cache_line_log;
6447 data->rx.enable_dynamic_hc =
6448 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6449 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6450 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6451 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6452
6453 /* We don't set drop flags */
6454 data->rx.drop_ip_cs_err_flg = 0;
6455 data->rx.drop_tcp_cs_err_flg = 0;
6456 data->rx.drop_ttl0_flg = 0;
6457 data->rx.drop_udp_cs_err_flg = 0;
6458
6459 data->rx.inner_vlan_removal_enable_flg =
6460 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6461 data->rx.outer_vlan_removal_enable_flg =
6462 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6463 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6464 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6465 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6466 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6467 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6468 data->rx.bd_page_base.lo =
6469 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6470 data->rx.bd_page_base.hi =
6471 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6472 data->rx.sge_page_base.lo =
6473 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6474 data->rx.sge_page_base.hi =
6475 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6476 data->rx.cqe_page_base.lo =
6477 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6478 data->rx.cqe_page_base.hi =
6479 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6480 data->rx.is_leading_rss =
6481 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6482 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6483
6484 /* Tx data */
6485 data->tx.enforce_security_flg = 0; /* VF specific */
6486 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6487 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6488 data->tx.mtu = 0; /* VF specific */
6489 data->tx.tx_bd_page_base.lo =
6490 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6491 data->tx.tx_bd_page_base.hi =
6492 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6493
6494 /* flow control data */
6495 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6496 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6497 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6498 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6499 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6500 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6501 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6502
6503 data->fc.safc_group_num = params->txq_params.cos;
6504 data->fc.safc_group_en_flg =
6505 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6506 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6507}
6508
6509static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6510{
6511 /* ustorm cxt validation */
6512 cxt->ustorm_ag_context.cdu_usage =
6513 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6514 ETH_CONNECTION_TYPE);
6515 /* xcontext validation */
6516 cxt->xstorm_ag_context.cdu_reserved =
6517 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6518 ETH_CONNECTION_TYPE);
6519}
6520
stephen hemminger8d962862010-10-21 07:50:56 +00006521static int bnx2x_setup_fw_client(struct bnx2x *bp,
6522 struct bnx2x_client_init_params *params,
6523 u8 activate,
6524 struct client_init_ramrod_data *data,
6525 dma_addr_t data_mapping)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006526{
6527 u16 hc_usec;
6528 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6529 int ramrod_flags = 0, rc;
6530
6531 /* HC and context validation values */
6532 hc_usec = params->txq_params.hc_rate ?
6533 1000000 / params->txq_params.hc_rate : 0;
6534 bnx2x_update_coalesce_sb_index(bp,
6535 params->txq_params.fw_sb_id,
6536 params->txq_params.sb_cq_index,
6537 !(params->txq_params.flags & QUEUE_FLG_HC),
6538 hc_usec);
6539
6540 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6541
6542 hc_usec = params->rxq_params.hc_rate ?
6543 1000000 / params->rxq_params.hc_rate : 0;
6544 bnx2x_update_coalesce_sb_index(bp,
6545 params->rxq_params.fw_sb_id,
6546 params->rxq_params.sb_cq_index,
6547 !(params->rxq_params.flags & QUEUE_FLG_HC),
6548 hc_usec);
6549
6550 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6551 params->rxq_params.cid);
6552
6553 /* zero stats */
6554 if (params->txq_params.flags & QUEUE_FLG_STATS)
6555 storm_memset_xstats_zero(bp, BP_PORT(bp),
6556 params->txq_params.stat_id);
6557
6558 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6559 storm_memset_ustats_zero(bp, BP_PORT(bp),
6560 params->rxq_params.stat_id);
6561 storm_memset_tstats_zero(bp, BP_PORT(bp),
6562 params->rxq_params.stat_id);
6563 }
6564
6565 /* Fill the ramrod data */
6566 bnx2x_fill_cl_init_data(bp, params, activate, data);
6567
6568 /* SETUP ramrod.
6569 *
6570 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6571 * barrier except from mmiowb() is needed to impose a
6572 * proper ordering of memory operations.
6573 */
6574 mmiowb();
6575
6576
6577 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6578 U64_HI(data_mapping), U64_LO(data_mapping), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006579
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006580 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006581 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6582 params->ramrod_params.index,
6583 params->ramrod_params.pstate,
6584 ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006585 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006586}
6587
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006588/**
6589 * Configure interrupt mode according to current configuration.
6590 * In case of MSI-X it will also try to enable MSI-X.
6591 *
6592 * @param bp
6593 *
6594 * @return int
6595 */
6596static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006597{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006598 int rc = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -07006599
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006600 switch (bp->int_mode) {
6601 case INT_MODE_MSI:
6602 bnx2x_enable_msi(bp);
6603 /* falling through... */
6604 case INT_MODE_INTx:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006605 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006606 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greensteinca003922009-08-12 22:53:28 -07006607 break;
Eilon Greensteinca003922009-08-12 22:53:28 -07006608 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006609 /* Set number of queues according to bp->multi_mode value */
6610 bnx2x_set_num_queues(bp);
6611
6612 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6613 bp->num_queues);
6614
6615 /* if we can't use MSI-X we only need one fp,
6616 * so try to enable MSI-X with the requested number of fp's
6617 * and fallback to MSI or legacy INTx with one fp
6618 */
6619 rc = bnx2x_enable_msix(bp);
6620 if (rc) {
6621 /* failed to enable MSI-X */
6622 if (bp->multi_mode)
6623 DP(NETIF_MSG_IFUP,
6624 "Multi requested but failed to "
6625 "enable MSI-X (%d), "
6626 "set number of queues to %d\n",
6627 bp->num_queues,
6628 1);
6629 bp->num_queues = 1;
6630
6631 if (!(bp->flags & DISABLE_MSI_FLAG))
6632 bnx2x_enable_msi(bp);
6633 }
6634
Eilon Greensteinca003922009-08-12 22:53:28 -07006635 break;
6636 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006637
6638 return rc;
Eilon Greensteinca003922009-08-12 22:53:28 -07006639}
6640
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00006641/* must be called prioir to any HW initializations */
6642static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6643{
6644 return L2_ILT_LINES(bp);
6645}
6646
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006647void bnx2x_ilt_set_info(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006648{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006649 struct ilt_client_info *ilt_client;
6650 struct bnx2x_ilt *ilt = BP_ILT(bp);
6651 u16 line = 0;
6652
6653 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6654 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6655
6656 /* CDU */
6657 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6658 ilt_client->client_num = ILT_CLIENT_CDU;
6659 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6660 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6661 ilt_client->start = line;
6662 line += L2_ILT_LINES(bp);
6663#ifdef BCM_CNIC
6664 line += CNIC_ILT_LINES;
6665#endif
6666 ilt_client->end = line - 1;
6667
6668 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6669 "flags 0x%x, hw psz %d\n",
6670 ilt_client->start,
6671 ilt_client->end,
6672 ilt_client->page_size,
6673 ilt_client->flags,
6674 ilog2(ilt_client->page_size >> 12));
6675
6676 /* QM */
6677 if (QM_INIT(bp->qm_cid_count)) {
6678 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6679 ilt_client->client_num = ILT_CLIENT_QM;
6680 ilt_client->page_size = QM_ILT_PAGE_SZ;
6681 ilt_client->flags = 0;
6682 ilt_client->start = line;
6683
6684 /* 4 bytes for each cid */
6685 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6686 QM_ILT_PAGE_SZ);
6687
6688 ilt_client->end = line - 1;
6689
6690 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6691 "flags 0x%x, hw psz %d\n",
6692 ilt_client->start,
6693 ilt_client->end,
6694 ilt_client->page_size,
6695 ilt_client->flags,
6696 ilog2(ilt_client->page_size >> 12));
6697
6698 }
6699 /* SRC */
6700 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6701#ifdef BCM_CNIC
6702 ilt_client->client_num = ILT_CLIENT_SRC;
6703 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6704 ilt_client->flags = 0;
6705 ilt_client->start = line;
6706 line += SRC_ILT_LINES;
6707 ilt_client->end = line - 1;
6708
6709 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6710 "flags 0x%x, hw psz %d\n",
6711 ilt_client->start,
6712 ilt_client->end,
6713 ilt_client->page_size,
6714 ilt_client->flags,
6715 ilog2(ilt_client->page_size >> 12));
6716
6717#else
6718 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6719#endif
6720
6721 /* TM */
6722 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6723#ifdef BCM_CNIC
6724 ilt_client->client_num = ILT_CLIENT_TM;
6725 ilt_client->page_size = TM_ILT_PAGE_SZ;
6726 ilt_client->flags = 0;
6727 ilt_client->start = line;
6728 line += TM_ILT_LINES;
6729 ilt_client->end = line - 1;
6730
6731 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6732 "flags 0x%x, hw psz %d\n",
6733 ilt_client->start,
6734 ilt_client->end,
6735 ilt_client->page_size,
6736 ilt_client->flags,
6737 ilog2(ilt_client->page_size >> 12));
6738
6739#else
6740 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6741#endif
6742}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006743
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006744int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6745 int is_leading)
6746{
6747 struct bnx2x_client_init_params params = { {0} };
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006748 int rc;
6749
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006750 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6751 IGU_INT_ENABLE, 0);
6752
6753 params.ramrod_params.pstate = &fp->state;
6754 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6755 params.ramrod_params.index = fp->index;
6756 params.ramrod_params.cid = fp->cid;
6757
6758 if (is_leading)
6759 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6760
6761 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6762
6763 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6764
6765 rc = bnx2x_setup_fw_client(bp, &params, 1,
6766 bnx2x_sp(bp, client_init_data),
6767 bnx2x_sp_mapping(bp, client_init_data));
6768 return rc;
6769}
6770
stephen hemminger8d962862010-10-21 07:50:56 +00006771static int bnx2x_stop_fw_client(struct bnx2x *bp,
6772 struct bnx2x_client_ramrod_params *p)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006773{
6774 int rc;
6775
6776 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6777
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006778 /* halt the connection */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006779 *p->pstate = BNX2X_FP_STATE_HALTING;
6780 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6781 p->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006782
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006783 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006784 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6785 p->pstate, poll_flag);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006786 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006787 return rc;
6788
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006789 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6790 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6791 p->cl_id, 0);
6792 /* Wait for completion */
6793 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6794 p->pstate, poll_flag);
6795 if (rc) /* timeout */
6796 return rc;
6797
6798
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006799 /* delete cfc entry */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006800 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006801
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006802 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006803 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6804 p->pstate, WAIT_RAMROD_COMMON);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006805 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006806}
6807
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006808static int bnx2x_stop_client(struct bnx2x *bp, int index)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006809{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006810 struct bnx2x_client_ramrod_params client_stop = {0};
6811 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006812
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006813 client_stop.index = index;
6814 client_stop.cid = fp->cid;
6815 client_stop.cl_id = fp->cl_id;
6816 client_stop.pstate = &(fp->state);
6817 client_stop.poll = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006818
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006819 return bnx2x_stop_fw_client(bp, &client_stop);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006820}
6821
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006822
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006823static void bnx2x_reset_func(struct bnx2x *bp)
6824{
6825 int port = BP_PORT(bp);
6826 int func = BP_FUNC(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006827 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006828 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006829 (CHIP_IS_E2(bp) ?
6830 offsetof(struct hc_status_block_data_e2, common) :
6831 offsetof(struct hc_status_block_data_e1x, common));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006832 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6833 int pfid_offset = offsetof(struct pci_entity, pf_id);
6834
6835 /* Disable the function in the FW */
6836 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6837 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6838 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6839 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6840
6841 /* FP SBs */
6842 for_each_queue(bp, i) {
6843 struct bnx2x_fastpath *fp = &bp->fp[i];
6844 REG_WR8(bp,
6845 BAR_CSTRORM_INTMEM +
6846 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6847 + pfunc_offset_fp + pfid_offset,
6848 HC_FUNCTION_DISABLED);
6849 }
6850
6851 /* SP SB */
6852 REG_WR8(bp,
6853 BAR_CSTRORM_INTMEM +
6854 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6855 pfunc_offset_sp + pfid_offset,
6856 HC_FUNCTION_DISABLED);
6857
6858
6859 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6860 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6861 0);
Eliezer Tamir49d66772008-02-28 11:53:13 -08006862
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006863 /* Configure IGU */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006864 if (bp->common.int_block == INT_BLOCK_HC) {
6865 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6866 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6867 } else {
6868 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6869 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6870 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006871
Michael Chan37b091b2009-10-10 13:46:55 +00006872#ifdef BCM_CNIC
6873 /* Disable Timer scan */
6874 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6875 /*
6876 * Wait for at least 10ms and up to 2 second for the timers scan to
6877 * complete
6878 */
6879 for (i = 0; i < 200; i++) {
6880 msleep(10);
6881 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6882 break;
6883 }
6884#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006885 /* Clear ILT */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006886 bnx2x_clear_func_ilt(bp, func);
6887
6888 /* Timers workaround bug for E2: if this is vnic-3,
6889 * we need to set the entire ilt range for this timers.
6890 */
6891 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6892 struct ilt_client_info ilt_cli;
6893 /* use dummy TM client */
6894 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6895 ilt_cli.start = 0;
6896 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6897 ilt_cli.client_num = ILT_CLIENT_TM;
6898
6899 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6900 }
6901
6902 /* this assumes that reset_port() called before reset_func()*/
6903 if (CHIP_IS_E2(bp))
6904 bnx2x_pf_disable(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006905
6906 bp->dmae_ready = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006907}
6908
6909static void bnx2x_reset_port(struct bnx2x *bp)
6910{
6911 int port = BP_PORT(bp);
6912 u32 val;
6913
6914 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6915
6916 /* Do not rcv packets to BRB */
6917 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6918 /* Do not direct rcv packets that are not for MCP to the BRB */
6919 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6920 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6921
6922 /* Configure AEU */
6923 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6924
6925 msleep(100);
6926 /* Check for BRB port occupancy */
6927 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6928 if (val)
6929 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07006930 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006931
6932 /* TODO: Close Doorbell port? */
6933}
6934
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006935static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6936{
6937 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006938 BP_ABS_FUNC(bp), reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006939
6940 switch (reset_code) {
6941 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6942 bnx2x_reset_port(bp);
6943 bnx2x_reset_func(bp);
6944 bnx2x_reset_common(bp);
6945 break;
6946
6947 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6948 bnx2x_reset_port(bp);
6949 bnx2x_reset_func(bp);
6950 break;
6951
6952 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6953 bnx2x_reset_func(bp);
6954 break;
6955
6956 default:
6957 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6958 break;
6959 }
6960}
6961
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006962void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006963{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006964 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006965 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006966 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006967
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006968 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006969 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08006970 struct bnx2x_fastpath *fp = &bp->fp[i];
6971
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006972 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08006973 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006974
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006975 if (!cnt) {
6976 BNX2X_ERR("timeout waiting for queue[%d]\n",
6977 i);
6978#ifdef BNX2X_STOP_ON_ERROR
6979 bnx2x_panic();
6980 return -EBUSY;
6981#else
6982 break;
6983#endif
6984 }
6985 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006986 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006987 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08006988 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006989 /* Give HW time to discard old tx messages */
6990 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006991
Yitchak Gertner65abd742008-08-25 15:26:24 -07006992 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006993 /* invalidate mc list,
6994 * wait and poll (interrupts are off)
6995 */
6996 bnx2x_invlidate_e1_mc_list(bp);
6997 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006998
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006999 } else {
Yitchak Gertner65abd742008-08-25 15:26:24 -07007000 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7001
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007002 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07007003
7004 for (i = 0; i < MC_HASH_SIZE; i++)
7005 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7006 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007007
Michael Chan993ac7b2009-10-10 13:46:56 +00007008#ifdef BCM_CNIC
7009 /* Clear iSCSI L2 MAC */
7010 mutex_lock(&bp->cnic_mutex);
7011 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7012 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7013 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7014 }
7015 mutex_unlock(&bp->cnic_mutex);
7016#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07007017
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007018 if (unload_mode == UNLOAD_NORMAL)
7019 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007020
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007021 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007022 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007023
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00007024 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007025 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007026 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007027 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007028 /* The mac address is written to entries 1-4 to
7029 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007030 u8 entry = (BP_E1HVN(bp) + 1)*8;
7031
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007032 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007033 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007034
7035 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7036 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07007037 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007038
7039 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08007040
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007041 } else
7042 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7043
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007044 /* Close multi and leading connections
7045 Completions for ramrods are collected in a synchronous way */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007046 for_each_queue(bp, i)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007047
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007048 if (bnx2x_stop_client(bp, i))
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007049#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007050 return;
7051#else
7052 goto unload_error;
7053#endif
7054
7055 rc = bnx2x_func_stop(bp);
7056 if (rc) {
7057 BNX2X_ERR("Function stop failed!\n");
7058#ifdef BNX2X_STOP_ON_ERROR
7059 return;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007060#else
7061 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007062#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08007063 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007064#ifndef BNX2X_STOP_ON_ERROR
Eliezer Tamir228241e2008-02-28 11:56:57 -08007065unload_error:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007066#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007067 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007068 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007069 else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007070 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
7071 "%d, %d, %d\n", BP_PATH(bp),
7072 load_count[BP_PATH(bp)][0],
7073 load_count[BP_PATH(bp)][1],
7074 load_count[BP_PATH(bp)][2]);
7075 load_count[BP_PATH(bp)][0]--;
7076 load_count[BP_PATH(bp)][1 + port]--;
7077 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
7078 "%d, %d, %d\n", BP_PATH(bp),
7079 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
7080 load_count[BP_PATH(bp)][2]);
7081 if (load_count[BP_PATH(bp)][0] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007082 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007083 else if (load_count[BP_PATH(bp)][1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007084 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7085 else
7086 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7087 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007088
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007089 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7090 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7091 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007092
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007093 /* Disable HW interrupts, NAPI */
7094 bnx2x_netif_stop(bp, 1);
7095
7096 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00007097 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007098
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007099 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08007100 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007101
7102 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007103 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007104 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Eilon Greenstein356e2382009-02-12 08:38:32 +00007105
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007106}
7107
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00007108void bnx2x_disable_close_the_gate(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007109{
7110 u32 val;
7111
7112 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
7113
7114 if (CHIP_IS_E1(bp)) {
7115 int port = BP_PORT(bp);
7116 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7117 MISC_REG_AEU_MASK_ATTN_FUNC_0;
7118
7119 val = REG_RD(bp, addr);
7120 val &= ~(0x300);
7121 REG_WR(bp, addr, val);
7122 } else if (CHIP_IS_E1H(bp)) {
7123 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
7124 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
7125 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
7126 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
7127 }
7128}
7129
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007130/* Close gates #2, #3 and #4: */
7131static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7132{
7133 u32 val, addr;
7134
7135 /* Gates #2 and #4a are closed/opened for "not E1" only */
7136 if (!CHIP_IS_E1(bp)) {
7137 /* #4 */
7138 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7139 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7140 close ? (val | 0x1) : (val & (~(u32)1)));
7141 /* #2 */
7142 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7143 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7144 close ? (val | 0x1) : (val & (~(u32)1)));
7145 }
7146
7147 /* #3 */
7148 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7149 val = REG_RD(bp, addr);
7150 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7151
7152 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7153 close ? "closing" : "opening");
7154 mmiowb();
7155}
7156
7157#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7158
7159static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7160{
7161 /* Do some magic... */
7162 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7163 *magic_val = val & SHARED_MF_CLP_MAGIC;
7164 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7165}
7166
7167/* Restore the value of the `magic' bit.
7168 *
7169 * @param pdev Device handle.
7170 * @param magic_val Old value of the `magic' bit.
7171 */
7172static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7173{
7174 /* Restore the `magic' bit value... */
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007175 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7176 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7177 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7178}
7179
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007180/**
7181 * Prepares for MCP reset: takes care of CLP configurations.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007182 *
7183 * @param bp
7184 * @param magic_val Old value of 'magic' bit.
7185 */
7186static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7187{
7188 u32 shmem;
7189 u32 validity_offset;
7190
7191 DP(NETIF_MSG_HW, "Starting\n");
7192
7193 /* Set `magic' bit in order to save MF config */
7194 if (!CHIP_IS_E1(bp))
7195 bnx2x_clp_reset_prep(bp, magic_val);
7196
7197 /* Get shmem offset */
7198 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7199 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7200
7201 /* Clear validity map flags */
7202 if (shmem > 0)
7203 REG_WR(bp, shmem + validity_offset, 0);
7204}
7205
7206#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7207#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7208
7209/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7210 * depending on the HW type.
7211 *
7212 * @param bp
7213 */
7214static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7215{
7216 /* special handling for emulation and FPGA,
7217 wait 10 times longer */
7218 if (CHIP_REV_IS_SLOW(bp))
7219 msleep(MCP_ONE_TIMEOUT*10);
7220 else
7221 msleep(MCP_ONE_TIMEOUT);
7222}
7223
7224static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7225{
7226 u32 shmem, cnt, validity_offset, val;
7227 int rc = 0;
7228
7229 msleep(100);
7230
7231 /* Get shmem offset */
7232 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7233 if (shmem == 0) {
7234 BNX2X_ERR("Shmem 0 return failure\n");
7235 rc = -ENOTTY;
7236 goto exit_lbl;
7237 }
7238
7239 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7240
7241 /* Wait for MCP to come up */
7242 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7243 /* TBD: its best to check validity map of last port.
7244 * currently checks on port 0.
7245 */
7246 val = REG_RD(bp, shmem + validity_offset);
7247 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7248 shmem + validity_offset, val);
7249
7250 /* check that shared memory is valid. */
7251 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7252 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7253 break;
7254
7255 bnx2x_mcp_wait_one(bp);
7256 }
7257
7258 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7259
7260 /* Check that shared memory is valid. This indicates that MCP is up. */
7261 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7262 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7263 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7264 rc = -ENOTTY;
7265 goto exit_lbl;
7266 }
7267
7268exit_lbl:
7269 /* Restore the `magic' bit value */
7270 if (!CHIP_IS_E1(bp))
7271 bnx2x_clp_reset_done(bp, magic_val);
7272
7273 return rc;
7274}
7275
7276static void bnx2x_pxp_prep(struct bnx2x *bp)
7277{
7278 if (!CHIP_IS_E1(bp)) {
7279 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7280 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7281 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7282 mmiowb();
7283 }
7284}
7285
7286/*
7287 * Reset the whole chip except for:
7288 * - PCIE core
7289 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7290 * one reset bit)
7291 * - IGU
7292 * - MISC (including AEU)
7293 * - GRC
7294 * - RBCN, RBCP
7295 */
7296static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7297{
7298 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7299
7300 not_reset_mask1 =
7301 MISC_REGISTERS_RESET_REG_1_RST_HC |
7302 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7303 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7304
7305 not_reset_mask2 =
7306 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7307 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7308 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7309 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7310 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7311 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7312 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7313 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7314
7315 reset_mask1 = 0xffffffff;
7316
7317 if (CHIP_IS_E1(bp))
7318 reset_mask2 = 0xffff;
7319 else
7320 reset_mask2 = 0x1ffff;
7321
7322 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7323 reset_mask1 & (~not_reset_mask1));
7324 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7325 reset_mask2 & (~not_reset_mask2));
7326
7327 barrier();
7328 mmiowb();
7329
7330 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7331 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7332 mmiowb();
7333}
7334
7335static int bnx2x_process_kill(struct bnx2x *bp)
7336{
7337 int cnt = 1000;
7338 u32 val = 0;
7339 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7340
7341
7342 /* Empty the Tetris buffer, wait for 1s */
7343 do {
7344 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7345 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7346 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7347 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7348 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7349 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7350 ((port_is_idle_0 & 0x1) == 0x1) &&
7351 ((port_is_idle_1 & 0x1) == 0x1) &&
7352 (pgl_exp_rom2 == 0xffffffff))
7353 break;
7354 msleep(1);
7355 } while (cnt-- > 0);
7356
7357 if (cnt <= 0) {
7358 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7359 " are still"
7360 " outstanding read requests after 1s!\n");
7361 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7362 " port_is_idle_0=0x%08x,"
7363 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7364 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7365 pgl_exp_rom2);
7366 return -EAGAIN;
7367 }
7368
7369 barrier();
7370
7371 /* Close gates #2, #3 and #4 */
7372 bnx2x_set_234_gates(bp, true);
7373
7374 /* TBD: Indicate that "process kill" is in progress to MCP */
7375
7376 /* Clear "unprepared" bit */
7377 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7378 barrier();
7379
7380 /* Make sure all is written to the chip before the reset */
7381 mmiowb();
7382
7383 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7384 * PSWHST, GRC and PSWRD Tetris buffer.
7385 */
7386 msleep(1);
7387
7388 /* Prepare to chip reset: */
7389 /* MCP */
7390 bnx2x_reset_mcp_prep(bp, &val);
7391
7392 /* PXP */
7393 bnx2x_pxp_prep(bp);
7394 barrier();
7395
7396 /* reset the chip */
7397 bnx2x_process_kill_chip_reset(bp);
7398 barrier();
7399
7400 /* Recover after reset: */
7401 /* MCP */
7402 if (bnx2x_reset_mcp_comp(bp, val))
7403 return -EAGAIN;
7404
7405 /* PXP */
7406 bnx2x_pxp_prep(bp);
7407
7408 /* Open the gates #2, #3 and #4 */
7409 bnx2x_set_234_gates(bp, false);
7410
7411 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7412 * reset state, re-enable attentions. */
7413
7414 return 0;
7415}
7416
7417static int bnx2x_leader_reset(struct bnx2x *bp)
7418{
7419 int rc = 0;
7420 /* Try to recover after the failure */
7421 if (bnx2x_process_kill(bp)) {
7422 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7423 bp->dev->name);
7424 rc = -EAGAIN;
7425 goto exit_leader_reset;
7426 }
7427
7428 /* Clear "reset is in progress" bit and update the driver state */
7429 bnx2x_set_reset_done(bp);
7430 bp->recovery_state = BNX2X_RECOVERY_DONE;
7431
7432exit_leader_reset:
7433 bp->is_leader = 0;
7434 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7435 smp_wmb();
7436 return rc;
7437}
7438
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007439/* Assumption: runs under rtnl lock. This together with the fact
7440 * that it's called only from bnx2x_reset_task() ensure that it
7441 * will never be called when netif_running(bp->dev) is false.
7442 */
7443static void bnx2x_parity_recover(struct bnx2x *bp)
7444{
7445 DP(NETIF_MSG_HW, "Handling parity\n");
7446 while (1) {
7447 switch (bp->recovery_state) {
7448 case BNX2X_RECOVERY_INIT:
7449 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7450 /* Try to get a LEADER_LOCK HW lock */
7451 if (bnx2x_trylock_hw_lock(bp,
7452 HW_LOCK_RESOURCE_RESERVED_08))
7453 bp->is_leader = 1;
7454
7455 /* Stop the driver */
7456 /* If interface has been removed - break */
7457 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7458 return;
7459
7460 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7461 /* Ensure "is_leader" and "recovery_state"
7462 * update values are seen on other CPUs
7463 */
7464 smp_wmb();
7465 break;
7466
7467 case BNX2X_RECOVERY_WAIT:
7468 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7469 if (bp->is_leader) {
7470 u32 load_counter = bnx2x_get_load_cnt(bp);
7471 if (load_counter) {
7472 /* Wait until all other functions get
7473 * down.
7474 */
7475 schedule_delayed_work(&bp->reset_task,
7476 HZ/10);
7477 return;
7478 } else {
7479 /* If all other functions got down -
7480 * try to bring the chip back to
7481 * normal. In any case it's an exit
7482 * point for a leader.
7483 */
7484 if (bnx2x_leader_reset(bp) ||
7485 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7486 printk(KERN_ERR"%s: Recovery "
7487 "has failed. Power cycle is "
7488 "needed.\n", bp->dev->name);
7489 /* Disconnect this device */
7490 netif_device_detach(bp->dev);
7491 /* Block ifup for all function
7492 * of this ASIC until
7493 * "process kill" or power
7494 * cycle.
7495 */
7496 bnx2x_set_reset_in_progress(bp);
7497 /* Shut down the power */
7498 bnx2x_set_power_state(bp,
7499 PCI_D3hot);
7500 return;
7501 }
7502
7503 return;
7504 }
7505 } else { /* non-leader */
7506 if (!bnx2x_reset_is_done(bp)) {
7507 /* Try to get a LEADER_LOCK HW lock as
7508 * long as a former leader may have
7509 * been unloaded by the user or
7510 * released a leadership by another
7511 * reason.
7512 */
7513 if (bnx2x_trylock_hw_lock(bp,
7514 HW_LOCK_RESOURCE_RESERVED_08)) {
7515 /* I'm a leader now! Restart a
7516 * switch case.
7517 */
7518 bp->is_leader = 1;
7519 break;
7520 }
7521
7522 schedule_delayed_work(&bp->reset_task,
7523 HZ/10);
7524 return;
7525
7526 } else { /* A leader has completed
7527 * the "process kill". It's an exit
7528 * point for a non-leader.
7529 */
7530 bnx2x_nic_load(bp, LOAD_NORMAL);
7531 bp->recovery_state =
7532 BNX2X_RECOVERY_DONE;
7533 smp_wmb();
7534 return;
7535 }
7536 }
7537 default:
7538 return;
7539 }
7540 }
7541}
7542
7543/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7544 * scheduled on a general queue in order to prevent a dead lock.
7545 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007546static void bnx2x_reset_task(struct work_struct *work)
7547{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007548 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007549
7550#ifdef BNX2X_STOP_ON_ERROR
7551 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7552 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007553 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007554 return;
7555#endif
7556
7557 rtnl_lock();
7558
7559 if (!netif_running(bp->dev))
7560 goto reset_task_exit;
7561
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007562 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7563 bnx2x_parity_recover(bp);
7564 else {
7565 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7566 bnx2x_nic_load(bp, LOAD_NORMAL);
7567 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007568
7569reset_task_exit:
7570 rtnl_unlock();
7571}
7572
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007573/* end of nic load/unload */
7574
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007575/*
7576 * Init service functions
7577 */
7578
stephen hemminger8d962862010-10-21 07:50:56 +00007579static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007580{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007581 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7582 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7583 return base + (BP_ABS_FUNC(bp)) * stride;
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007584}
7585
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007586static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007587{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007588 u32 reg = bnx2x_get_pretend_reg(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007589
7590 /* Flush all outstanding writes */
7591 mmiowb();
7592
7593 /* Pretend to be function 0 */
7594 REG_WR(bp, reg, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007595 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007596
7597 /* From now we are in the "like-E1" mode */
7598 bnx2x_int_disable(bp);
7599
7600 /* Flush all outstanding writes */
7601 mmiowb();
7602
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007603 /* Restore the original function */
7604 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7605 REG_RD(bp, reg);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007606}
7607
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007608static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007609{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007610 if (CHIP_IS_E1(bp))
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007611 bnx2x_int_disable(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007612 else
7613 bnx2x_undi_int_disable_e1h(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007614}
7615
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007616static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007617{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007618 u32 val;
7619
7620 /* Check if there is any driver already loaded */
7621 val = REG_RD(bp, MISC_REG_UNPREPARED);
7622 if (val == 0x1) {
7623 /* Check if it is the UNDI driver
7624 * UNDI driver initializes CID offset for normal bell to 0x7
7625 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07007626 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007627 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7628 if (val == 0x7) {
7629 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007630 /* save our pf_num */
7631 int orig_pf_num = bp->pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007632 u32 swap_en;
7633 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007634
Eilon Greensteinb4661732009-01-14 06:43:56 +00007635 /* clear the UNDI indication */
7636 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7637
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007638 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7639
7640 /* try unload UNDI on port 0 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007641 bp->pf_num = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007642 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007643 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007644 DRV_MSG_SEQ_NUMBER_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007645 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007646
7647 /* if UNDI is loaded on the other port */
7648 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7649
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007650 /* send "DONE" for previous unload */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007651 bnx2x_fw_command(bp,
7652 DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007653
7654 /* unload UNDI on port 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007655 bp->pf_num = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007656 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007657 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007658 DRV_MSG_SEQ_NUMBER_MASK);
7659 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007660
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007661 bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007662 }
7663
Eilon Greensteinb4661732009-01-14 06:43:56 +00007664 /* now it's safe to release the lock */
7665 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7666
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007667 bnx2x_undi_int_disable(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007668
7669 /* close input traffic and wait for it */
7670 /* Do not rcv packets to BRB */
7671 REG_WR(bp,
7672 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7673 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7674 /* Do not direct rcv packets that are not for MCP to
7675 * the BRB */
7676 REG_WR(bp,
7677 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7678 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7679 /* clear AEU */
7680 REG_WR(bp,
7681 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7682 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7683 msleep(10);
7684
7685 /* save NIG port swap info */
7686 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7687 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007688 /* reset device */
7689 REG_WR(bp,
7690 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007691 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007692 REG_WR(bp,
7693 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7694 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007695 /* take the NIG out of reset and restore swap values */
7696 REG_WR(bp,
7697 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7698 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7699 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7700 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7701
7702 /* send unload done to the MCP */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007703 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007704
7705 /* restore our func and fw_seq */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007706 bp->pf_num = orig_pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007707 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007708 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007709 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00007710 } else
7711 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007712 }
7713}
7714
7715static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7716{
7717 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007718 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007719
7720 /* Get the chip revision id and number. */
7721 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7722 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7723 id = ((val & 0xffff) << 16);
7724 val = REG_RD(bp, MISC_REG_CHIP_REV);
7725 id |= ((val & 0xf) << 12);
7726 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7727 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00007728 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007729 id |= (val & 0xf);
7730 bp->common.chip_id = id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007731
7732 /* Set doorbell size */
7733 bp->db_size = (1 << BNX2X_DB_SHIFT);
7734
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007735 if (CHIP_IS_E2(bp)) {
7736 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7737 if ((val & 1) == 0)
7738 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7739 else
7740 val = (val >> 1) & 1;
7741 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7742 "2_PORT_MODE");
7743 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7744 CHIP_2_PORT_MODE;
7745
7746 if (CHIP_MODE_IS_4_PORT(bp))
7747 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7748 else
7749 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7750 } else {
7751 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7752 bp->pfid = bp->pf_num; /* 0..7 */
7753 }
7754
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007755 /*
7756 * set base FW non-default (fast path) status block id, this value is
7757 * used to initialize the fw_sb_id saved on the fp/queue structure to
7758 * determine the id used by the FW.
7759 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007760 if (CHIP_IS_E1x(bp))
7761 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7762 else /* E2 */
7763 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7764
7765 bp->link_params.chip_id = bp->common.chip_id;
7766 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007767
Eilon Greenstein1c063282009-02-12 08:36:43 +00007768 val = (REG_RD(bp, 0x2874) & 0x55);
7769 if ((bp->common.chip_id & 0x1) ||
7770 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7771 bp->flags |= ONE_PORT_FLAG;
7772 BNX2X_DEV_INFO("single port device\n");
7773 }
7774
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007775 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7776 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7777 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7778 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7779 bp->common.flash_size, bp->common.flash_size);
7780
7781 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007782 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7783 MISC_REG_GENERIC_CR_1 :
7784 MISC_REG_GENERIC_CR_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007785 bp->link_params.shmem_base = bp->common.shmem_base;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007786 bp->link_params.shmem2_base = bp->common.shmem2_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007787 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7788 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007789
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007790 if (!bp->common.shmem_base) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007791 BNX2X_DEV_INFO("MCP not active\n");
7792 bp->flags |= NO_MCP_FLAG;
7793 return;
7794 }
7795
7796 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7797 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7798 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007799 BNX2X_ERR("BAD MCP validity signature\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007800
7801 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00007802 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007803
7804 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7805 SHARED_HW_CFG_LED_MODE_MASK) >>
7806 SHARED_HW_CFG_LED_MODE_SHIFT);
7807
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007808 bp->link_params.feature_config_flags = 0;
7809 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7810 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7811 bp->link_params.feature_config_flags |=
7812 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7813 else
7814 bp->link_params.feature_config_flags &=
7815 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7816
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007817 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7818 bp->common.bc_ver = val;
7819 BNX2X_DEV_INFO("bc_ver %X\n", val);
7820 if (val < BNX2X_BC_VER) {
7821 /* for now only warn
7822 * later we might need to enforce this */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007823 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7824 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007825 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007826 bp->link_params.feature_config_flags |=
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007827 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007828 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7829
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007830 bp->link_params.feature_config_flags |=
7831 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7832 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007833
7834 if (BP_E1HVN(bp) == 0) {
7835 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7836 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7837 } else {
7838 /* no WOL capability for E1HVN != 0 */
7839 bp->flags |= NO_WOL_FLAG;
7840 }
7841 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00007842 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007843
7844 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7845 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7846 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7847 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7848
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007849 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7850 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007851}
7852
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007853#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7854#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7855
7856static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7857{
7858 int pfid = BP_FUNC(bp);
7859 int vn = BP_E1HVN(bp);
7860 int igu_sb_id;
7861 u32 val;
7862 u8 fid;
7863
7864 bp->igu_base_sb = 0xff;
7865 bp->igu_sb_cnt = 0;
7866 if (CHIP_INT_MODE_IS_BC(bp)) {
7867 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7868 bp->l2_cid_count);
7869
7870 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7871 FP_SB_MAX_E1x;
7872
7873 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7874 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7875
7876 return;
7877 }
7878
7879 /* IGU in normal mode - read CAM */
7880 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7881 igu_sb_id++) {
7882 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7883 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7884 continue;
7885 fid = IGU_FID(val);
7886 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7887 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7888 continue;
7889 if (IGU_VEC(val) == 0)
7890 /* default status block */
7891 bp->igu_dsb_id = igu_sb_id;
7892 else {
7893 if (bp->igu_base_sb == 0xff)
7894 bp->igu_base_sb = igu_sb_id;
7895 bp->igu_sb_cnt++;
7896 }
7897 }
7898 }
7899 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7900 if (bp->igu_sb_cnt == 0)
7901 BNX2X_ERR("CAM configuration error\n");
7902}
7903
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007904static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7905 u32 switch_cfg)
7906{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007907 int cfg_size = 0, idx, port = BP_PORT(bp);
7908
7909 /* Aggregation of supported attributes of all external phys */
7910 bp->port.supported[0] = 0;
7911 bp->port.supported[1] = 0;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007912 switch (bp->link_params.num_phys) {
7913 case 1:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007914 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7915 cfg_size = 1;
7916 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007917 case 2:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007918 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7919 cfg_size = 1;
7920 break;
7921 case 3:
7922 if (bp->link_params.multi_phy_config &
7923 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7924 bp->port.supported[1] =
7925 bp->link_params.phy[EXT_PHY1].supported;
7926 bp->port.supported[0] =
7927 bp->link_params.phy[EXT_PHY2].supported;
7928 } else {
7929 bp->port.supported[0] =
7930 bp->link_params.phy[EXT_PHY1].supported;
7931 bp->port.supported[1] =
7932 bp->link_params.phy[EXT_PHY2].supported;
7933 }
7934 cfg_size = 2;
7935 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007936 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007937
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007938 if (!(bp->port.supported[0] || bp->port.supported[1])) {
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007939 BNX2X_ERR("NVRAM config error. BAD phy config."
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007940 "PHY1 config 0x%x, PHY2 config 0x%x\n",
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007941 SHMEM_RD(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007942 dev_info.port_hw_config[port].external_phy_config),
7943 SHMEM_RD(bp,
7944 dev_info.port_hw_config[port].external_phy_config2));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007945 return;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007946 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007947
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007948 switch (switch_cfg) {
7949 case SWITCH_CFG_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007950 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7951 port*0x10);
7952 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007953 break;
7954
7955 case SWITCH_CFG_10G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007956 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7957 port*0x18);
7958 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007959 break;
7960
7961 default:
7962 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007963 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007964 return;
7965 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007966 /* mask what we support according to speed_cap_mask per configuration */
7967 for (idx = 0; idx < cfg_size; idx++) {
7968 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007969 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007970 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007971
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007972 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007973 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007974 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007975
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007976 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007977 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007978 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007979
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007980 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007981 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007982 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007983
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007984 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007985 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007986 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007987 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007988
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007989 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007990 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007991 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007992
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007993 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007994 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007995 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007996
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007997 }
7998
7999 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
8000 bp->port.supported[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008001}
8002
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008003static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008004{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008005 u32 link_config, idx, cfg_size = 0;
8006 bp->port.advertising[0] = 0;
8007 bp->port.advertising[1] = 0;
8008 switch (bp->link_params.num_phys) {
8009 case 1:
8010 case 2:
8011 cfg_size = 1;
8012 break;
8013 case 3:
8014 cfg_size = 2;
8015 break;
8016 }
8017 for (idx = 0; idx < cfg_size; idx++) {
8018 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
8019 link_config = bp->port.link_config[idx];
8020 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008021 case PORT_FEATURE_LINK_SPEED_AUTO:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008022 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
8023 bp->link_params.req_line_speed[idx] =
8024 SPEED_AUTO_NEG;
8025 bp->port.advertising[idx] |=
8026 bp->port.supported[idx];
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008027 } else {
8028 /* force 10G, no AN */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008029 bp->link_params.req_line_speed[idx] =
8030 SPEED_10000;
8031 bp->port.advertising[idx] |=
8032 (ADVERTISED_10000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008033 ADVERTISED_FIBRE);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008034 continue;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008035 }
8036 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008037
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008038 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008039 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
8040 bp->link_params.req_line_speed[idx] =
8041 SPEED_10;
8042 bp->port.advertising[idx] |=
8043 (ADVERTISED_10baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008044 ADVERTISED_TP);
8045 } else {
8046 BNX2X_ERROR("NVRAM config error. "
8047 "Invalid link_config 0x%x"
8048 " speed_cap_mask 0x%x\n",
8049 link_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008050 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008051 return;
8052 }
8053 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008054
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008055 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008056 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
8057 bp->link_params.req_line_speed[idx] =
8058 SPEED_10;
8059 bp->link_params.req_duplex[idx] =
8060 DUPLEX_HALF;
8061 bp->port.advertising[idx] |=
8062 (ADVERTISED_10baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008063 ADVERTISED_TP);
8064 } else {
8065 BNX2X_ERROR("NVRAM config error. "
8066 "Invalid link_config 0x%x"
8067 " speed_cap_mask 0x%x\n",
8068 link_config,
8069 bp->link_params.speed_cap_mask[idx]);
8070 return;
8071 }
8072 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008073
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008074 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8075 if (bp->port.supported[idx] &
8076 SUPPORTED_100baseT_Full) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008077 bp->link_params.req_line_speed[idx] =
8078 SPEED_100;
8079 bp->port.advertising[idx] |=
8080 (ADVERTISED_100baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008081 ADVERTISED_TP);
8082 } else {
8083 BNX2X_ERROR("NVRAM config error. "
8084 "Invalid link_config 0x%x"
8085 " speed_cap_mask 0x%x\n",
8086 link_config,
8087 bp->link_params.speed_cap_mask[idx]);
8088 return;
8089 }
8090 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008091
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008092 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8093 if (bp->port.supported[idx] &
8094 SUPPORTED_100baseT_Half) {
8095 bp->link_params.req_line_speed[idx] =
8096 SPEED_100;
8097 bp->link_params.req_duplex[idx] =
8098 DUPLEX_HALF;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008099 bp->port.advertising[idx] |=
8100 (ADVERTISED_100baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008101 ADVERTISED_TP);
8102 } else {
8103 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008104 "Invalid link_config 0x%x"
8105 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008106 link_config,
8107 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008108 return;
8109 }
8110 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008111
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008112 case PORT_FEATURE_LINK_SPEED_1G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008113 if (bp->port.supported[idx] &
8114 SUPPORTED_1000baseT_Full) {
8115 bp->link_params.req_line_speed[idx] =
8116 SPEED_1000;
8117 bp->port.advertising[idx] |=
8118 (ADVERTISED_1000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008119 ADVERTISED_TP);
8120 } else {
8121 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008122 "Invalid link_config 0x%x"
8123 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008124 link_config,
8125 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008126 return;
8127 }
8128 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008129
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008130 case PORT_FEATURE_LINK_SPEED_2_5G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008131 if (bp->port.supported[idx] &
8132 SUPPORTED_2500baseX_Full) {
8133 bp->link_params.req_line_speed[idx] =
8134 SPEED_2500;
8135 bp->port.advertising[idx] |=
8136 (ADVERTISED_2500baseX_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008137 ADVERTISED_TP);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008138 } else {
8139 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008140 "Invalid link_config 0x%x"
8141 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008142 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008143 bp->link_params.speed_cap_mask[idx]);
8144 return;
8145 }
8146 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008147
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008148 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8149 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8150 case PORT_FEATURE_LINK_SPEED_10G_KR:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008151 if (bp->port.supported[idx] &
8152 SUPPORTED_10000baseT_Full) {
8153 bp->link_params.req_line_speed[idx] =
8154 SPEED_10000;
8155 bp->port.advertising[idx] |=
8156 (ADVERTISED_10000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008157 ADVERTISED_FIBRE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008158 } else {
8159 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008160 "Invalid link_config 0x%x"
8161 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008162 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008163 bp->link_params.speed_cap_mask[idx]);
8164 return;
8165 }
8166 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008167
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008168 default:
8169 BNX2X_ERROR("NVRAM config error. "
8170 "BAD link speed link_config 0x%x\n",
8171 link_config);
8172 bp->link_params.req_line_speed[idx] =
8173 SPEED_AUTO_NEG;
8174 bp->port.advertising[idx] =
8175 bp->port.supported[idx];
8176 break;
8177 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008178
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008179 bp->link_params.req_flow_ctrl[idx] = (link_config &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008180 PORT_FEATURE_FLOW_CONTROL_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008181 if ((bp->link_params.req_flow_ctrl[idx] ==
8182 BNX2X_FLOW_CTRL_AUTO) &&
8183 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8184 bp->link_params.req_flow_ctrl[idx] =
8185 BNX2X_FLOW_CTRL_NONE;
8186 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008187
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008188 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8189 " 0x%x advertising 0x%x\n",
8190 bp->link_params.req_line_speed[idx],
8191 bp->link_params.req_duplex[idx],
8192 bp->link_params.req_flow_ctrl[idx],
8193 bp->port.advertising[idx]);
8194 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008195}
8196
Michael Chane665bfd2009-10-10 13:46:54 +00008197static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8198{
8199 mac_hi = cpu_to_be16(mac_hi);
8200 mac_lo = cpu_to_be32(mac_lo);
8201 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8202 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8203}
8204
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008205static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008206{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008207 int port = BP_PORT(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00008208 u32 config;
Joe Perches6f38ad92010-11-14 17:04:31 +00008209 u32 ext_phy_type, ext_phy_config;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008210
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008211 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008212 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008213
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008214 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008215 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008216
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008217 bp->link_params.speed_cap_mask[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008218 SHMEM_RD(bp,
8219 dev_info.port_hw_config[port].speed_capability_mask);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008220 bp->link_params.speed_cap_mask[1] =
8221 SHMEM_RD(bp,
8222 dev_info.port_hw_config[port].speed_capability_mask2);
8223 bp->port.link_config[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008224 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8225
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008226 bp->port.link_config[1] =
8227 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008228
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008229 bp->link_params.multi_phy_config =
8230 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008231 /* If the device is capable of WoL, set the default state according
8232 * to the HW
8233 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008234 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008235 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8236 (config & PORT_FEATURE_WOL_ENABLED));
8237
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008238 BNX2X_DEV_INFO("lane_config 0x%08x "
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008239 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008240 bp->link_params.lane_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008241 bp->link_params.speed_cap_mask[0],
8242 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008243
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008244 bp->link_params.switch_cfg = (bp->port.link_config[0] &
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008245 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008246 bnx2x_phy_probe(&bp->link_params);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008247 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008248
8249 bnx2x_link_settings_requested(bp);
8250
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008251 /*
8252 * If connected directly, work with the internal PHY, otherwise, work
8253 * with the external PHY
8254 */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008255 ext_phy_config =
8256 SHMEM_RD(bp,
8257 dev_info.port_hw_config[port].external_phy_config);
8258 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008259 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008260 bp->mdio.prtad = bp->port.phy_addr;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008261
8262 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8263 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8264 bp->mdio.prtad =
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008265 XGXS_EXT_PHY_ADDR(ext_phy_config);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008266}
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008267
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008268static void __devinit bnx2x_get_mac_hwinfo(struct bnx2x *bp)
8269{
8270 u32 val, val2;
8271 int func = BP_ABS_FUNC(bp);
8272 int port = BP_PORT(bp);
8273
8274 if (BP_NOMCP(bp)) {
8275 BNX2X_ERROR("warning: random MAC workaround active\n");
8276 random_ether_addr(bp->dev->dev_addr);
8277 } else if (IS_MF(bp)) {
8278 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8279 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
8280 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8281 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT))
8282 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8283
8284#ifdef BCM_CNIC
8285 /* iSCSI NPAR MAC */
8286 if (IS_MF_SI(bp)) {
8287 u32 cfg = MF_CFG_RD(bp, func_ext_config[func].func_cfg);
8288 if (cfg & MACP_FUNC_CFG_FLAGS_ISCSI_OFFLOAD) {
8289 val2 = MF_CFG_RD(bp, func_ext_config[func].
8290 iscsi_mac_addr_upper);
8291 val = MF_CFG_RD(bp, func_ext_config[func].
8292 iscsi_mac_addr_lower);
8293 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8294 }
8295 }
8296#endif
8297 } else {
8298 /* in SF read MACs from port configuration */
8299 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8300 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8301 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8302
8303#ifdef BCM_CNIC
8304 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].
8305 iscsi_mac_upper);
8306 val = SHMEM_RD(bp, dev_info.port_hw_config[port].
8307 iscsi_mac_lower);
8308 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8309#endif
8310 }
8311
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008312 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8313 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008314
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008315}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008316
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008317static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8318{
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008319 int /*abs*/func = BP_ABS_FUNC(bp);
8320 int vn, port;
8321 u32 val = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008322 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008323
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008324 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008325
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008326 if (CHIP_IS_E1x(bp)) {
8327 bp->common.int_block = INT_BLOCK_HC;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008328
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008329 bp->igu_dsb_id = DEF_SB_IGU_ID;
8330 bp->igu_base_sb = 0;
8331 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8332 } else {
8333 bp->common.int_block = INT_BLOCK_IGU;
8334 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8335 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8336 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8337 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8338 } else
8339 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8340
8341 bnx2x_get_igu_cam_info(bp);
8342
8343 }
8344 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8345 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8346
8347 /*
8348 * Initialize MF configuration
8349 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008350
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008351 bp->mf_ov = 0;
8352 bp->mf_mode = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008353 vn = BP_E1HVN(bp);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008354 port = BP_PORT(bp);
8355
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008356 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008357 DP(NETIF_MSG_PROBE,
8358 "shmem2base 0x%x, size %d, mfcfg offset %d\n",
8359 bp->common.shmem2_base, SHMEM2_RD(bp, size),
8360 (u32)offsetof(struct shmem2_region, mf_cfg_addr));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008361 if (SHMEM2_HAS(bp, mf_cfg_addr))
8362 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8363 else
8364 bp->common.mf_cfg_base = bp->common.shmem_base +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008365 offsetof(struct shmem_region, func_mb) +
8366 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008367 /*
8368 * get mf configuration:
8369 * 1. existance of MF configuration
8370 * 2. MAC address must be legal (check only upper bytes)
8371 * for Switch-Independent mode;
8372 * OVLAN must be legal for Switch-Dependent mode
8373 * 3. SF_MODE configures specific MF mode
8374 */
8375 if (bp->common.mf_cfg_base != SHMEM_MF_CFG_ADDR_NONE) {
8376 /* get mf configuration */
8377 val = SHMEM_RD(bp,
8378 dev_info.shared_feature_config.config);
8379 val &= SHARED_FEAT_CFG_FORCE_SF_MODE_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008380
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008381 switch (val) {
8382 case SHARED_FEAT_CFG_FORCE_SF_MODE_SWITCH_INDEPT:
8383 val = MF_CFG_RD(bp, func_mf_config[func].
8384 mac_upper);
8385 /* check for legal mac (upper bytes)*/
8386 if (val != 0xffff) {
8387 bp->mf_mode = MULTI_FUNCTION_SI;
8388 bp->mf_config[vn] = MF_CFG_RD(bp,
8389 func_mf_config[func].config);
8390 } else
8391 DP(NETIF_MSG_PROBE, "illegal MAC "
8392 "address for SI\n");
8393 break;
8394 case SHARED_FEAT_CFG_FORCE_SF_MODE_MF_ALLOWED:
8395 /* get OV configuration */
8396 val = MF_CFG_RD(bp,
8397 func_mf_config[FUNC_0].e1hov_tag);
8398 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
8399
8400 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8401 bp->mf_mode = MULTI_FUNCTION_SD;
8402 bp->mf_config[vn] = MF_CFG_RD(bp,
8403 func_mf_config[func].config);
8404 } else
8405 DP(NETIF_MSG_PROBE, "illegal OV for "
8406 "SD\n");
8407 break;
8408 default:
8409 /* Unknown configuration: reset mf_config */
8410 bp->mf_config[vn] = 0;
8411 DP(NETIF_MSG_PROBE, "Unkown MF mode 0x%x\n",
8412 val);
8413 }
8414 }
8415
Eilon Greenstein2691d512009-08-12 08:22:08 +00008416 BNX2X_DEV_INFO("%s function mode\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008417 IS_MF(bp) ? "multi" : "single");
Eilon Greenstein2691d512009-08-12 08:22:08 +00008418
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008419 switch (bp->mf_mode) {
8420 case MULTI_FUNCTION_SD:
8421 val = MF_CFG_RD(bp, func_mf_config[func].e1hov_tag) &
8422 FUNC_MF_CFG_E1HOV_TAG_MASK;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008423 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008424 bp->mf_ov = val;
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008425 BNX2X_DEV_INFO("MF OV for func %d is %d"
8426 " (0x%04x)\n", func,
8427 bp->mf_ov, bp->mf_ov);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008428 } else {
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008429 BNX2X_ERR("No valid MF OV for func %d,"
8430 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008431 rc = -EPERM;
8432 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008433 break;
8434 case MULTI_FUNCTION_SI:
8435 BNX2X_DEV_INFO("func %d is in MF "
8436 "switch-independent mode\n", func);
8437 break;
8438 default:
8439 if (vn) {
8440 BNX2X_ERR("VN %d in single function mode,"
8441 " aborting\n", vn);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008442 rc = -EPERM;
8443 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008444 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008445 }
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008446
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008447 }
8448
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008449 /* adjust igu_sb_cnt to MF for E1x */
8450 if (CHIP_IS_E1x(bp) && IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008451 bp->igu_sb_cnt /= E1HVN_MAX;
8452
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008453 /*
8454 * adjust E2 sb count: to be removed when FW will support
8455 * more then 16 L2 clients
8456 */
8457#define MAX_L2_CLIENTS 16
8458 if (CHIP_IS_E2(bp))
8459 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8460 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8461
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008462 if (!BP_NOMCP(bp)) {
8463 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008464
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008465 bp->fw_seq =
8466 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8467 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008468 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8469 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008470
Dmitry Kravkov0793f83f2010-12-01 12:39:28 -08008471 /* Get MAC addresses */
8472 bnx2x_get_mac_hwinfo(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008473
8474 return rc;
8475}
8476
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008477static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8478{
8479 int cnt, i, block_end, rodi;
8480 char vpd_data[BNX2X_VPD_LEN+1];
8481 char str_id_reg[VENDOR_ID_LEN+1];
8482 char str_id_cap[VENDOR_ID_LEN+1];
8483 u8 len;
8484
8485 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8486 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8487
8488 if (cnt < BNX2X_VPD_LEN)
8489 goto out_not_found;
8490
8491 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8492 PCI_VPD_LRDT_RO_DATA);
8493 if (i < 0)
8494 goto out_not_found;
8495
8496
8497 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8498 pci_vpd_lrdt_size(&vpd_data[i]);
8499
8500 i += PCI_VPD_LRDT_TAG_SIZE;
8501
8502 if (block_end > BNX2X_VPD_LEN)
8503 goto out_not_found;
8504
8505 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8506 PCI_VPD_RO_KEYWORD_MFR_ID);
8507 if (rodi < 0)
8508 goto out_not_found;
8509
8510 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8511
8512 if (len != VENDOR_ID_LEN)
8513 goto out_not_found;
8514
8515 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8516
8517 /* vendor specific info */
8518 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8519 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8520 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8521 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8522
8523 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8524 PCI_VPD_RO_KEYWORD_VENDOR0);
8525 if (rodi >= 0) {
8526 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8527
8528 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8529
8530 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8531 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8532 bp->fw_ver[len] = ' ';
8533 }
8534 }
8535 return;
8536 }
8537out_not_found:
8538 return;
8539}
8540
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008541static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8542{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008543 int func;
Eilon Greenstein87942b42009-02-12 08:36:49 +00008544 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008545 int rc;
8546
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008547 /* Disable interrupt handling until HW is initialized */
8548 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008549 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008550
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008551 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008552 mutex_init(&bp->fw_mb_mutex);
David S. Millerbb7e95c2010-07-27 21:01:35 -07008553 spin_lock_init(&bp->stats_lock);
Michael Chan993ac7b2009-10-10 13:46:56 +00008554#ifdef BCM_CNIC
8555 mutex_init(&bp->cnic_mutex);
8556#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008557
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008558 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008559 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008560
8561 rc = bnx2x_get_hwinfo(bp);
8562
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008563 if (!rc)
8564 rc = bnx2x_alloc_mem_bp(bp);
8565
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008566 bnx2x_read_fwinfo(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008567
8568 func = BP_FUNC(bp);
8569
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008570 /* need to reset chip if undi was active */
8571 if (!BP_NOMCP(bp))
8572 bnx2x_undi_unload(bp);
8573
8574 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008575 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008576
8577 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008578 dev_err(&bp->pdev->dev, "MCP disabled, "
8579 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008580
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008581 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008582 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8583 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008584 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8585 "requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008586 multi_mode = ETH_RSS_MODE_DISABLED;
8587 }
8588 bp->multi_mode = multi_mode;
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008589 bp->int_mode = int_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008590
Dmitry Kravkov4fd89b7a2010-04-01 19:45:34 -07008591 bp->dev->features |= NETIF_F_GRO;
8592
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008593 /* Set TPA flags */
8594 if (disable_tpa) {
8595 bp->flags &= ~TPA_ENABLE_FLAG;
8596 bp->dev->features &= ~NETIF_F_LRO;
8597 } else {
8598 bp->flags |= TPA_ENABLE_FLAG;
8599 bp->dev->features |= NETIF_F_LRO;
8600 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008601 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008602
Eilon Greensteina18f5122009-08-12 08:23:26 +00008603 if (CHIP_IS_E1(bp))
8604 bp->dropless_fc = 0;
8605 else
8606 bp->dropless_fc = dropless_fc;
8607
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008608 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008609
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008610 bp->tx_ring_size = MAX_TX_AVAIL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008611
8612 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008613
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008614 /* make sure that the numbers are in the right granularity */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008615 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8616 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008617
Eilon Greenstein87942b42009-02-12 08:36:49 +00008618 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8619 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008620
8621 init_timer(&bp->timer);
8622 bp->timer.expires = jiffies + bp->current_interval;
8623 bp->timer.data = (unsigned long) bp;
8624 bp->timer.function = bnx2x_timer;
8625
8626 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008627}
8628
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008629
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008630/****************************************************************************
8631* General service functions
8632****************************************************************************/
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008633
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008634/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008635static int bnx2x_open(struct net_device *dev)
8636{
8637 struct bnx2x *bp = netdev_priv(dev);
8638
Eilon Greenstein6eccabb2009-01-22 03:37:48 +00008639 netif_carrier_off(dev);
8640
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008641 bnx2x_set_power_state(bp, PCI_D0);
8642
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008643 if (!bnx2x_reset_is_done(bp)) {
8644 do {
8645 /* Reset MCP mail box sequence if there is on going
8646 * recovery
8647 */
8648 bp->fw_seq = 0;
8649
8650 /* If it's the first function to load and reset done
8651 * is still not cleared it may mean that. We don't
8652 * check the attention state here because it may have
8653 * already been cleared by a "common" reset but we
8654 * shell proceed with "process kill" anyway.
8655 */
8656 if ((bnx2x_get_load_cnt(bp) == 0) &&
8657 bnx2x_trylock_hw_lock(bp,
8658 HW_LOCK_RESOURCE_RESERVED_08) &&
8659 (!bnx2x_leader_reset(bp))) {
8660 DP(NETIF_MSG_HW, "Recovered in open\n");
8661 break;
8662 }
8663
8664 bnx2x_set_power_state(bp, PCI_D3hot);
8665
8666 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8667 " completed yet. Try again later. If u still see this"
8668 " message after a few retries then power cycle is"
8669 " required.\n", bp->dev->name);
8670
8671 return -EAGAIN;
8672 } while (0);
8673 }
8674
8675 bp->recovery_state = BNX2X_RECOVERY_DONE;
8676
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008677 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008678}
8679
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008680/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008681static int bnx2x_close(struct net_device *dev)
8682{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008683 struct bnx2x *bp = netdev_priv(dev);
8684
8685 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008686 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +00008687 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008688
8689 return 0;
8690}
8691
Eilon Greensteinf5372252009-02-12 08:38:30 +00008692/* called with netif_tx_lock from dev_mcast.c */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008693void bnx2x_set_rx_mode(struct net_device *dev)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008694{
8695 struct bnx2x *bp = netdev_priv(dev);
8696 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8697 int port = BP_PORT(bp);
8698
8699 if (bp->state != BNX2X_STATE_OPEN) {
8700 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8701 return;
8702 }
8703
8704 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8705
8706 if (dev->flags & IFF_PROMISC)
8707 rx_mode = BNX2X_RX_MODE_PROMISC;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008708 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00008709 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8710 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008711 rx_mode = BNX2X_RX_MODE_ALLMULTI;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008712 else { /* some multicasts */
8713 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008714 /*
8715 * set mc list, do not wait as wait implies sleep
8716 * and set_rx_mode can be invoked from non-sleepable
8717 * context
8718 */
8719 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8720 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8721 BNX2X_MAX_MULTICAST*(1 + port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008722
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008723 bnx2x_set_e1_mc_list(bp, offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008724 } else { /* E1H */
8725 /* Accept one or more multicasts */
Jiri Pirko22bedad32010-04-01 21:22:57 +00008726 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008727 u32 mc_filter[MC_HASH_SIZE];
8728 u32 crc, bit, regidx;
8729 int i;
8730
8731 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8732
Jiri Pirko22bedad32010-04-01 21:22:57 +00008733 netdev_for_each_mc_addr(ha, dev) {
Johannes Berg7c510e42008-10-27 17:47:26 -07008734 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008735 bnx2x_mc_addr(ha));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008736
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008737 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8738 ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008739 bit = (crc >> 24) & 0xff;
8740 regidx = bit >> 5;
8741 bit &= 0x1f;
8742 mc_filter[regidx] |= (1 << bit);
8743 }
8744
8745 for (i = 0; i < MC_HASH_SIZE; i++)
8746 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8747 mc_filter[i]);
8748 }
8749 }
8750
8751 bp->rx_mode = rx_mode;
8752 bnx2x_set_storm_rx_mode(bp);
8753}
8754
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008755/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008756static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8757 int devad, u16 addr)
8758{
8759 struct bnx2x *bp = netdev_priv(netdev);
8760 u16 value;
8761 int rc;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008762
8763 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8764 prtad, devad, addr);
8765
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008766 /* The HW expects different devad if CL22 is used */
8767 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8768
8769 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008770 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008771 bnx2x_release_phy_lock(bp);
8772 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8773
8774 if (!rc)
8775 rc = value;
8776 return rc;
8777}
8778
8779/* called with rtnl_lock */
8780static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8781 u16 addr, u16 value)
8782{
8783 struct bnx2x *bp = netdev_priv(netdev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008784 int rc;
8785
8786 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8787 " value 0x%x\n", prtad, devad, addr, value);
8788
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008789 /* The HW expects different devad if CL22 is used */
8790 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8791
8792 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008793 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008794 bnx2x_release_phy_lock(bp);
8795 return rc;
8796}
8797
8798/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008799static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8800{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008801 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008802 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008803
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008804 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8805 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008806
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008807 if (!netif_running(dev))
8808 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008809
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008810 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008811}
8812
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008813#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008814static void poll_bnx2x(struct net_device *dev)
8815{
8816 struct bnx2x *bp = netdev_priv(dev);
8817
8818 disable_irq(bp->pdev->irq);
8819 bnx2x_interrupt(bp->pdev->irq, dev);
8820 enable_irq(bp->pdev->irq);
8821}
8822#endif
8823
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008824static const struct net_device_ops bnx2x_netdev_ops = {
8825 .ndo_open = bnx2x_open,
8826 .ndo_stop = bnx2x_close,
8827 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +00008828 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008829 .ndo_set_mac_address = bnx2x_change_mac_addr,
8830 .ndo_validate_addr = eth_validate_addr,
8831 .ndo_do_ioctl = bnx2x_ioctl,
8832 .ndo_change_mtu = bnx2x_change_mtu,
8833 .ndo_tx_timeout = bnx2x_tx_timeout,
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008834#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008835 .ndo_poll_controller = poll_bnx2x,
8836#endif
8837};
8838
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008839static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8840 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008841{
8842 struct bnx2x *bp;
8843 int rc;
8844
8845 SET_NETDEV_DEV(dev, &pdev->dev);
8846 bp = netdev_priv(dev);
8847
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008848 bp->dev = dev;
8849 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008850 bp->flags = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008851 bp->pf_num = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008852
8853 rc = pci_enable_device(pdev);
8854 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008855 dev_err(&bp->pdev->dev,
8856 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008857 goto err_out;
8858 }
8859
8860 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008861 dev_err(&bp->pdev->dev,
8862 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008863 rc = -ENODEV;
8864 goto err_out_disable;
8865 }
8866
8867 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008868 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8869 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008870 rc = -ENODEV;
8871 goto err_out_disable;
8872 }
8873
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008874 if (atomic_read(&pdev->enable_cnt) == 1) {
8875 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8876 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008877 dev_err(&bp->pdev->dev,
8878 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008879 goto err_out_disable;
8880 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008881
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008882 pci_set_master(pdev);
8883 pci_save_state(pdev);
8884 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008885
8886 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8887 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008888 dev_err(&bp->pdev->dev,
8889 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008890 rc = -EIO;
8891 goto err_out_release;
8892 }
8893
8894 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8895 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008896 dev_err(&bp->pdev->dev,
8897 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008898 rc = -EIO;
8899 goto err_out_release;
8900 }
8901
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008902 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008903 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008904 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008905 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8906 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008907 rc = -EIO;
8908 goto err_out_release;
8909 }
8910
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008911 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008912 dev_err(&bp->pdev->dev,
8913 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008914 rc = -EIO;
8915 goto err_out_release;
8916 }
8917
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008918 dev->mem_start = pci_resource_start(pdev, 0);
8919 dev->base_addr = dev->mem_start;
8920 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008921
8922 dev->irq = pdev->irq;
8923
Arjan van de Ven275f1652008-10-20 21:42:39 -07008924 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008925 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008926 dev_err(&bp->pdev->dev,
8927 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008928 rc = -ENOMEM;
8929 goto err_out_release;
8930 }
8931
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008932 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008933 min_t(u64, BNX2X_DB_SIZE(bp),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008934 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008935 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008936 dev_err(&bp->pdev->dev,
8937 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008938 rc = -ENOMEM;
8939 goto err_out_unmap;
8940 }
8941
8942 bnx2x_set_power_state(bp, PCI_D0);
8943
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008944 /* clean indirect addresses */
8945 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8946 PCICFG_VENDOR_ID_OFFSET);
8947 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8948 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8949 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8950 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008951
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008952 /* Reset the load counter */
8953 bnx2x_clear_load_cnt(bp);
8954
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008955 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008956
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008957 dev->netdev_ops = &bnx2x_netdev_ops;
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008958 bnx2x_set_ethtool_ops(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008959 dev->features |= NETIF_F_SG;
8960 dev->features |= NETIF_F_HW_CSUM;
8961 if (bp->flags & USING_DAC_FLAG)
8962 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +00008963 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8964 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008965 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein5316bc02009-07-21 05:47:43 +00008966
8967 dev->vlan_features |= NETIF_F_SG;
8968 dev->vlan_features |= NETIF_F_HW_CSUM;
8969 if (bp->flags & USING_DAC_FLAG)
8970 dev->vlan_features |= NETIF_F_HIGHDMA;
8971 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8972 dev->vlan_features |= NETIF_F_TSO6;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008973
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008974 /* get_port_hwinfo() will set prtad and mmds properly */
8975 bp->mdio.prtad = MDIO_PRTAD_NONE;
8976 bp->mdio.mmds = 0;
8977 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8978 bp->mdio.dev = dev;
8979 bp->mdio.mdio_read = bnx2x_mdio_read;
8980 bp->mdio.mdio_write = bnx2x_mdio_write;
8981
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008982 return 0;
8983
8984err_out_unmap:
8985 if (bp->regview) {
8986 iounmap(bp->regview);
8987 bp->regview = NULL;
8988 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008989 if (bp->doorbells) {
8990 iounmap(bp->doorbells);
8991 bp->doorbells = NULL;
8992 }
8993
8994err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008995 if (atomic_read(&pdev->enable_cnt) == 1)
8996 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008997
8998err_out_disable:
8999 pci_disable_device(pdev);
9000 pci_set_drvdata(pdev, NULL);
9001
9002err_out:
9003 return rc;
9004}
9005
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009006static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
9007 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -08009008{
9009 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
9010
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009011 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
9012
9013 /* return value of 1=2.5GHz 2=5GHz */
9014 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -08009015}
9016
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009017static int bnx2x_check_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009018{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009019 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009020 struct bnx2x_fw_file_hdr *fw_hdr;
9021 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009022 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009023 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009024 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009025 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009026
9027 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
9028 return -EINVAL;
9029
9030 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
9031 sections = (struct bnx2x_fw_file_section *)fw_hdr;
9032
9033 /* Make sure none of the offsets and sizes make us read beyond
9034 * the end of the firmware data */
9035 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
9036 offset = be32_to_cpu(sections[i].offset);
9037 len = be32_to_cpu(sections[i].len);
9038 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009039 dev_err(&bp->pdev->dev,
9040 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009041 return -EINVAL;
9042 }
9043 }
9044
9045 /* Likewise for the init_ops offsets */
9046 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
9047 ops_offsets = (u16 *)(firmware->data + offset);
9048 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
9049
9050 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
9051 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009052 dev_err(&bp->pdev->dev,
9053 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009054 return -EINVAL;
9055 }
9056 }
9057
9058 /* Check FW version */
9059 offset = be32_to_cpu(fw_hdr->fw_version.offset);
9060 fw_ver = firmware->data + offset;
9061 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
9062 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
9063 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
9064 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009065 dev_err(&bp->pdev->dev,
9066 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009067 fw_ver[0], fw_ver[1], fw_ver[2],
9068 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
9069 BCM_5710_FW_MINOR_VERSION,
9070 BCM_5710_FW_REVISION_VERSION,
9071 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009072 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009073 }
9074
9075 return 0;
9076}
9077
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009078static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009079{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009080 const __be32 *source = (const __be32 *)_source;
9081 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009082 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009083
9084 for (i = 0; i < n/4; i++)
9085 target[i] = be32_to_cpu(source[i]);
9086}
9087
9088/*
9089 Ops array is stored in the following format:
9090 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
9091 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009092static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009093{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009094 const __be32 *source = (const __be32 *)_source;
9095 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009096 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009097
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009098 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009099 tmp = be32_to_cpu(source[j]);
9100 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009101 target[i].offset = tmp & 0xffffff;
9102 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009103 }
9104}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009105
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009106/**
9107 * IRO array is stored in the following format:
9108 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
9109 */
9110static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
9111{
9112 const __be32 *source = (const __be32 *)_source;
9113 struct iro *target = (struct iro *)_target;
9114 u32 i, j, tmp;
9115
9116 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
9117 target[i].base = be32_to_cpu(source[j]);
9118 j++;
9119 tmp = be32_to_cpu(source[j]);
9120 target[i].m1 = (tmp >> 16) & 0xffff;
9121 target[i].m2 = tmp & 0xffff;
9122 j++;
9123 tmp = be32_to_cpu(source[j]);
9124 target[i].m3 = (tmp >> 16) & 0xffff;
9125 target[i].size = tmp & 0xffff;
9126 j++;
9127 }
9128}
9129
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009130static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009131{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009132 const __be16 *source = (const __be16 *)_source;
9133 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009134 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009135
9136 for (i = 0; i < n/2; i++)
9137 target[i] = be16_to_cpu(source[i]);
9138}
9139
Joe Perches7995c642010-02-17 15:01:52 +00009140#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
9141do { \
9142 u32 len = be32_to_cpu(fw_hdr->arr.len); \
9143 bp->arr = kmalloc(len, GFP_KERNEL); \
9144 if (!bp->arr) { \
9145 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
9146 goto lbl; \
9147 } \
9148 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
9149 (u8 *)bp->arr, len); \
9150} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009151
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009152int bnx2x_init_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009153{
Ben Hutchings45229b42009-11-07 11:53:39 +00009154 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009155 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +00009156 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009157
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009158 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00009159 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009160 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00009161 fw_file_name = FW_FILE_NAME_E1H;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009162 else if (CHIP_IS_E2(bp))
9163 fw_file_name = FW_FILE_NAME_E2;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009164 else {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009165 BNX2X_ERR("Unsupported chip revision\n");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009166 return -EINVAL;
9167 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009168
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009169 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009170
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009171 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009172 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009173 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009174 goto request_firmware_exit;
9175 }
9176
9177 rc = bnx2x_check_firmware(bp);
9178 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00009179 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009180 goto request_firmware_exit;
9181 }
9182
9183 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
9184
9185 /* Initialize the pointers to the init arrays */
9186 /* Blob */
9187 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
9188
9189 /* Opcodes */
9190 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
9191
9192 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009193 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
9194 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009195
9196 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +00009197 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9198 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9199 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9200 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9201 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9202 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9203 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9204 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9205 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9206 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9207 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9208 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9209 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9210 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9211 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9212 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009213 /* IRO */
9214 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009215
9216 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009217
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009218iro_alloc_err:
9219 kfree(bp->init_ops_offsets);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009220init_offsets_alloc_err:
9221 kfree(bp->init_ops);
9222init_ops_alloc_err:
9223 kfree(bp->init_data);
9224request_firmware_exit:
9225 release_firmware(bp->firmware);
9226
9227 return rc;
9228}
9229
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009230static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9231{
9232 int cid_count = L2_FP_COUNT(l2_cid_count);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009233
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009234#ifdef BCM_CNIC
9235 cid_count += CNIC_CID_MAX;
9236#endif
9237 return roundup(cid_count, QM_CID_ROUND);
9238}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009239
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009240static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9241 const struct pci_device_id *ent)
9242{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009243 struct net_device *dev = NULL;
9244 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009245 int pcie_width, pcie_speed;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009246 int rc, cid_count;
9247
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009248 switch (ent->driver_data) {
9249 case BCM57710:
9250 case BCM57711:
9251 case BCM57711E:
9252 cid_count = FP_SB_MAX_E1x;
9253 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009254
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009255 case BCM57712:
9256 case BCM57712E:
9257 cid_count = FP_SB_MAX_E2;
9258 break;
9259
9260 default:
9261 pr_err("Unknown board_type (%ld), aborting\n",
9262 ent->driver_data);
Vasiliy Kulikov870634b2010-11-14 10:08:34 +00009263 return -ENODEV;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009264 }
9265
9266 cid_count += CNIC_CONTEXT_USE;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009267
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009268 /* dev zeroed in init_etherdev */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009269 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009270 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009271 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009272 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009273 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009274
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009275 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +00009276 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009277
Eilon Greensteindf4770de2009-08-12 08:23:28 +00009278 pci_set_drvdata(pdev, dev);
9279
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009280 bp->l2_cid_count = cid_count;
9281
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009282 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009283 if (rc < 0) {
9284 free_netdev(dev);
9285 return rc;
9286 }
9287
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009288 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009289 if (rc)
9290 goto init_one_exit;
9291
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009292 /* calc qm_cid_count */
9293 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9294
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009295 /* Configure interupt mode: try to enable MSI-X/MSI if
9296 * needed, set bp->num_queues appropriately.
9297 */
9298 bnx2x_set_int_mode(bp);
9299
9300 /* Add all NAPI objects */
9301 bnx2x_add_all_napi(bp);
9302
Vladislav Zolotarovb3400072010-11-24 11:09:50 -08009303 rc = register_netdev(dev);
9304 if (rc) {
9305 dev_err(&pdev->dev, "Cannot register net device\n");
9306 goto init_one_exit;
9307 }
9308
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009309 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009310
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009311 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9312 " IRQ %d, ", board_info[ent->driver_data].name,
9313 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009314 pcie_width,
9315 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9316 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9317 "5GHz (Gen2)" : "2.5GHz",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009318 dev->base_addr, bp->pdev->irq);
9319 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +00009320
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009321 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009322
9323init_one_exit:
9324 if (bp->regview)
9325 iounmap(bp->regview);
9326
9327 if (bp->doorbells)
9328 iounmap(bp->doorbells);
9329
9330 free_netdev(dev);
9331
9332 if (atomic_read(&pdev->enable_cnt) == 1)
9333 pci_release_regions(pdev);
9334
9335 pci_disable_device(pdev);
9336 pci_set_drvdata(pdev, NULL);
9337
9338 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009339}
9340
9341static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9342{
9343 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08009344 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009345
Eliezer Tamir228241e2008-02-28 11:56:57 -08009346 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009347 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08009348 return;
9349 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08009350 bp = netdev_priv(dev);
9351
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009352 unregister_netdev(dev);
9353
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009354 /* Delete all NAPI objects */
9355 bnx2x_del_all_napi(bp);
9356
9357 /* Disable MSI/MSI-X */
9358 bnx2x_disable_msi(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009359
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009360 /* Make sure RESET task is not scheduled before continuing */
9361 cancel_delayed_work_sync(&bp->reset_task);
9362
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009363 if (bp->regview)
9364 iounmap(bp->regview);
9365
9366 if (bp->doorbells)
9367 iounmap(bp->doorbells);
9368
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009369 bnx2x_free_mem_bp(bp);
9370
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009371 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009372
9373 if (atomic_read(&pdev->enable_cnt) == 1)
9374 pci_release_regions(pdev);
9375
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009376 pci_disable_device(pdev);
9377 pci_set_drvdata(pdev, NULL);
9378}
9379
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009380static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9381{
9382 int i;
9383
9384 bp->state = BNX2X_STATE_ERROR;
9385
9386 bp->rx_mode = BNX2X_RX_MODE_NONE;
9387
9388 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07009389 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009390
9391 del_timer_sync(&bp->timer);
9392 bp->stats_state = STATS_STATE_DISABLED;
9393 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9394
9395 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009396 bnx2x_free_irq(bp);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009397
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009398 /* Free SKBs, SGEs, TPA pool and driver internals */
9399 bnx2x_free_skbs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009400
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00009401 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009402 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009403
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009404 bnx2x_free_mem(bp);
9405
9406 bp->state = BNX2X_STATE_CLOSED;
9407
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009408 return 0;
9409}
9410
9411static void bnx2x_eeh_recover(struct bnx2x *bp)
9412{
9413 u32 val;
9414
9415 mutex_init(&bp->port.phy_mutex);
9416
9417 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9418 bp->link_params.shmem_base = bp->common.shmem_base;
9419 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9420
9421 if (!bp->common.shmem_base ||
9422 (bp->common.shmem_base < 0xA0000) ||
9423 (bp->common.shmem_base >= 0xC0000)) {
9424 BNX2X_DEV_INFO("MCP not active\n");
9425 bp->flags |= NO_MCP_FLAG;
9426 return;
9427 }
9428
9429 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9430 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9431 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9432 BNX2X_ERR("BAD MCP validity signature\n");
9433
9434 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009435 bp->fw_seq =
9436 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9437 DRV_MSG_SEQ_NUMBER_MASK);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009438 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9439 }
9440}
9441
Wendy Xiong493adb12008-06-23 20:36:22 -07009442/**
9443 * bnx2x_io_error_detected - called when PCI error is detected
9444 * @pdev: Pointer to PCI device
9445 * @state: The current pci connection state
9446 *
9447 * This function is called after a PCI bus error affecting
9448 * this device has been detected.
9449 */
9450static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9451 pci_channel_state_t state)
9452{
9453 struct net_device *dev = pci_get_drvdata(pdev);
9454 struct bnx2x *bp = netdev_priv(dev);
9455
9456 rtnl_lock();
9457
9458 netif_device_detach(dev);
9459
Dean Nelson07ce50e2009-07-31 09:13:25 +00009460 if (state == pci_channel_io_perm_failure) {
9461 rtnl_unlock();
9462 return PCI_ERS_RESULT_DISCONNECT;
9463 }
9464
Wendy Xiong493adb12008-06-23 20:36:22 -07009465 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009466 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -07009467
9468 pci_disable_device(pdev);
9469
9470 rtnl_unlock();
9471
9472 /* Request a slot reset */
9473 return PCI_ERS_RESULT_NEED_RESET;
9474}
9475
9476/**
9477 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9478 * @pdev: Pointer to PCI device
9479 *
9480 * Restart the card from scratch, as if from a cold-boot.
9481 */
9482static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9483{
9484 struct net_device *dev = pci_get_drvdata(pdev);
9485 struct bnx2x *bp = netdev_priv(dev);
9486
9487 rtnl_lock();
9488
9489 if (pci_enable_device(pdev)) {
9490 dev_err(&pdev->dev,
9491 "Cannot re-enable PCI device after reset\n");
9492 rtnl_unlock();
9493 return PCI_ERS_RESULT_DISCONNECT;
9494 }
9495
9496 pci_set_master(pdev);
9497 pci_restore_state(pdev);
9498
9499 if (netif_running(dev))
9500 bnx2x_set_power_state(bp, PCI_D0);
9501
9502 rtnl_unlock();
9503
9504 return PCI_ERS_RESULT_RECOVERED;
9505}
9506
9507/**
9508 * bnx2x_io_resume - called when traffic can start flowing again
9509 * @pdev: Pointer to PCI device
9510 *
9511 * This callback is called when the error recovery driver tells us that
9512 * its OK to resume normal operation.
9513 */
9514static void bnx2x_io_resume(struct pci_dev *pdev)
9515{
9516 struct net_device *dev = pci_get_drvdata(pdev);
9517 struct bnx2x *bp = netdev_priv(dev);
9518
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009519 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009520 printk(KERN_ERR "Handling parity error recovery. "
9521 "Try again later\n");
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009522 return;
9523 }
9524
Wendy Xiong493adb12008-06-23 20:36:22 -07009525 rtnl_lock();
9526
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009527 bnx2x_eeh_recover(bp);
9528
Wendy Xiong493adb12008-06-23 20:36:22 -07009529 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009530 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -07009531
9532 netif_device_attach(dev);
9533
9534 rtnl_unlock();
9535}
9536
9537static struct pci_error_handlers bnx2x_err_handler = {
9538 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +00009539 .slot_reset = bnx2x_io_slot_reset,
9540 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -07009541};
9542
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009543static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -07009544 .name = DRV_MODULE_NAME,
9545 .id_table = bnx2x_pci_tbl,
9546 .probe = bnx2x_init_one,
9547 .remove = __devexit_p(bnx2x_remove_one),
9548 .suspend = bnx2x_suspend,
9549 .resume = bnx2x_resume,
9550 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009551};
9552
9553static int __init bnx2x_init(void)
9554{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009555 int ret;
9556
Joe Perches7995c642010-02-17 15:01:52 +00009557 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +00009558
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009559 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9560 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +00009561 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009562 return -ENOMEM;
9563 }
9564
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009565 ret = pci_register_driver(&bnx2x_pci_driver);
9566 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +00009567 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009568 destroy_workqueue(bnx2x_wq);
9569 }
9570 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009571}
9572
9573static void __exit bnx2x_cleanup(void)
9574{
9575 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009576
9577 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009578}
9579
9580module_init(bnx2x_init);
9581module_exit(bnx2x_cleanup);
9582
Michael Chan993ac7b2009-10-10 13:46:56 +00009583#ifdef BCM_CNIC
9584
9585/* count denotes the number of new completions we have seen */
9586static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9587{
9588 struct eth_spe *spe;
9589
9590#ifdef BNX2X_STOP_ON_ERROR
9591 if (unlikely(bp->panic))
9592 return;
9593#endif
9594
9595 spin_lock_bh(&bp->spq_lock);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009596 BUG_ON(bp->cnic_spq_pending < count);
Michael Chan993ac7b2009-10-10 13:46:56 +00009597 bp->cnic_spq_pending -= count;
9598
Michael Chan993ac7b2009-10-10 13:46:56 +00009599
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009600 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9601 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9602 & SPE_HDR_CONN_TYPE) >>
9603 SPE_HDR_CONN_TYPE_SHIFT;
9604
9605 /* Set validation for iSCSI L2 client before sending SETUP
9606 * ramrod
9607 */
9608 if (type == ETH_CONNECTION_TYPE) {
9609 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9610 hdr.conn_and_cmd_data) >>
9611 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9612
9613 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9614 bnx2x_set_ctx_validation(&bp->context.
9615 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9616 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9617 }
9618
9619 /* There may be not more than 8 L2 and COMMON SPEs and not more
9620 * than 8 L5 SPEs in the air.
9621 */
9622 if ((type == NONE_CONNECTION_TYPE) ||
9623 (type == ETH_CONNECTION_TYPE)) {
9624 if (!atomic_read(&bp->spq_left))
9625 break;
9626 else
9627 atomic_dec(&bp->spq_left);
9628 } else if (type == ISCSI_CONNECTION_TYPE) {
9629 if (bp->cnic_spq_pending >=
9630 bp->cnic_eth_dev.max_kwqe_pending)
9631 break;
9632 else
9633 bp->cnic_spq_pending++;
9634 } else {
9635 BNX2X_ERR("Unknown SPE type: %d\n", type);
9636 bnx2x_panic();
Michael Chan993ac7b2009-10-10 13:46:56 +00009637 break;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009638 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009639
9640 spe = bnx2x_sp_get_next(bp);
9641 *spe = *bp->cnic_kwq_cons;
9642
Michael Chan993ac7b2009-10-10 13:46:56 +00009643 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9644 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9645
9646 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9647 bp->cnic_kwq_cons = bp->cnic_kwq;
9648 else
9649 bp->cnic_kwq_cons++;
9650 }
9651 bnx2x_sp_prod_update(bp);
9652 spin_unlock_bh(&bp->spq_lock);
9653}
9654
9655static int bnx2x_cnic_sp_queue(struct net_device *dev,
9656 struct kwqe_16 *kwqes[], u32 count)
9657{
9658 struct bnx2x *bp = netdev_priv(dev);
9659 int i;
9660
9661#ifdef BNX2X_STOP_ON_ERROR
9662 if (unlikely(bp->panic))
9663 return -EIO;
9664#endif
9665
9666 spin_lock_bh(&bp->spq_lock);
9667
9668 for (i = 0; i < count; i++) {
9669 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9670
9671 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9672 break;
9673
9674 *bp->cnic_kwq_prod = *spe;
9675
9676 bp->cnic_kwq_pending++;
9677
9678 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9679 spe->hdr.conn_and_cmd_data, spe->hdr.type,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009680 spe->data.update_data_addr.hi,
9681 spe->data.update_data_addr.lo,
Michael Chan993ac7b2009-10-10 13:46:56 +00009682 bp->cnic_kwq_pending);
9683
9684 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9685 bp->cnic_kwq_prod = bp->cnic_kwq;
9686 else
9687 bp->cnic_kwq_prod++;
9688 }
9689
9690 spin_unlock_bh(&bp->spq_lock);
9691
9692 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9693 bnx2x_cnic_sp_post(bp, 0);
9694
9695 return i;
9696}
9697
9698static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9699{
9700 struct cnic_ops *c_ops;
9701 int rc = 0;
9702
9703 mutex_lock(&bp->cnic_mutex);
9704 c_ops = bp->cnic_ops;
9705 if (c_ops)
9706 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9707 mutex_unlock(&bp->cnic_mutex);
9708
9709 return rc;
9710}
9711
9712static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9713{
9714 struct cnic_ops *c_ops;
9715 int rc = 0;
9716
9717 rcu_read_lock();
9718 c_ops = rcu_dereference(bp->cnic_ops);
9719 if (c_ops)
9720 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9721 rcu_read_unlock();
9722
9723 return rc;
9724}
9725
9726/*
9727 * for commands that have no data
9728 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009729int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
Michael Chan993ac7b2009-10-10 13:46:56 +00009730{
9731 struct cnic_ctl_info ctl = {0};
9732
9733 ctl.cmd = cmd;
9734
9735 return bnx2x_cnic_ctl_send(bp, &ctl);
9736}
9737
9738static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9739{
9740 struct cnic_ctl_info ctl;
9741
9742 /* first we tell CNIC and only then we count this as a completion */
9743 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9744 ctl.data.comp.cid = cid;
9745
9746 bnx2x_cnic_ctl_send_bh(bp, &ctl);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009747 bnx2x_cnic_sp_post(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009748}
9749
9750static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9751{
9752 struct bnx2x *bp = netdev_priv(dev);
9753 int rc = 0;
9754
9755 switch (ctl->cmd) {
9756 case DRV_CTL_CTXTBL_WR_CMD: {
9757 u32 index = ctl->data.io.offset;
9758 dma_addr_t addr = ctl->data.io.dma_addr;
9759
9760 bnx2x_ilt_wr(bp, index, addr);
9761 break;
9762 }
9763
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009764 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9765 int count = ctl->data.credit.credit_count;
Michael Chan993ac7b2009-10-10 13:46:56 +00009766
9767 bnx2x_cnic_sp_post(bp, count);
9768 break;
9769 }
9770
9771 /* rtnl_lock is held. */
9772 case DRV_CTL_START_L2_CMD: {
9773 u32 cli = ctl->data.ring.client_id;
9774
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009775 /* Set iSCSI MAC address */
9776 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9777
9778 mmiowb();
9779 barrier();
9780
9781 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9782 * because it's the only way for UIO Client to accept
9783 * multicasts (in non-promiscuous mode only one Client per
9784 * function will receive multicast packets (leading in our
9785 * case).
9786 */
9787 bnx2x_rxq_set_mac_filters(bp, cli,
9788 BNX2X_ACCEPT_UNICAST |
9789 BNX2X_ACCEPT_BROADCAST |
9790 BNX2X_ACCEPT_ALL_MULTICAST);
9791 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9792
Michael Chan993ac7b2009-10-10 13:46:56 +00009793 break;
9794 }
9795
9796 /* rtnl_lock is held. */
9797 case DRV_CTL_STOP_L2_CMD: {
9798 u32 cli = ctl->data.ring.client_id;
9799
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009800 /* Stop accepting on iSCSI L2 ring */
9801 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9802 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9803
9804 mmiowb();
9805 barrier();
9806
9807 /* Unset iSCSI L2 MAC */
9808 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009809 break;
9810 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009811 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9812 int count = ctl->data.credit.credit_count;
9813
9814 smp_mb__before_atomic_inc();
9815 atomic_add(count, &bp->spq_left);
9816 smp_mb__after_atomic_inc();
9817 break;
9818 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009819
9820 default:
9821 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9822 rc = -EINVAL;
9823 }
9824
9825 return rc;
9826}
9827
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009828void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
Michael Chan993ac7b2009-10-10 13:46:56 +00009829{
9830 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9831
9832 if (bp->flags & USING_MSIX_FLAG) {
9833 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9834 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9835 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9836 } else {
9837 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9838 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9839 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009840 if (CHIP_IS_E2(bp))
9841 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9842 else
9843 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9844
Michael Chan993ac7b2009-10-10 13:46:56 +00009845 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009846 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00009847 cp->irq_arr[1].status_blk = bp->def_status_blk;
9848 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009849 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
Michael Chan993ac7b2009-10-10 13:46:56 +00009850
9851 cp->num_irq = 2;
9852}
9853
9854static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9855 void *data)
9856{
9857 struct bnx2x *bp = netdev_priv(dev);
9858 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9859
9860 if (ops == NULL)
9861 return -EINVAL;
9862
9863 if (atomic_read(&bp->intr_sem) != 0)
9864 return -EBUSY;
9865
9866 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9867 if (!bp->cnic_kwq)
9868 return -ENOMEM;
9869
9870 bp->cnic_kwq_cons = bp->cnic_kwq;
9871 bp->cnic_kwq_prod = bp->cnic_kwq;
9872 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9873
9874 bp->cnic_spq_pending = 0;
9875 bp->cnic_kwq_pending = 0;
9876
9877 bp->cnic_data = data;
9878
9879 cp->num_irq = 0;
9880 cp->drv_state = CNIC_DRV_STATE_REGD;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009881 cp->iro_arr = bp->iro_arr;
Michael Chan993ac7b2009-10-10 13:46:56 +00009882
Michael Chan993ac7b2009-10-10 13:46:56 +00009883 bnx2x_setup_cnic_irq_info(bp);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009884
Michael Chan993ac7b2009-10-10 13:46:56 +00009885 rcu_assign_pointer(bp->cnic_ops, ops);
9886
9887 return 0;
9888}
9889
9890static int bnx2x_unregister_cnic(struct net_device *dev)
9891{
9892 struct bnx2x *bp = netdev_priv(dev);
9893 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9894
9895 mutex_lock(&bp->cnic_mutex);
9896 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9897 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9898 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9899 }
9900 cp->drv_state = 0;
9901 rcu_assign_pointer(bp->cnic_ops, NULL);
9902 mutex_unlock(&bp->cnic_mutex);
9903 synchronize_rcu();
9904 kfree(bp->cnic_kwq);
9905 bp->cnic_kwq = NULL;
9906
9907 return 0;
9908}
9909
9910struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9911{
9912 struct bnx2x *bp = netdev_priv(dev);
9913 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9914
9915 cp->drv_owner = THIS_MODULE;
9916 cp->chip_id = CHIP_ID(bp);
9917 cp->pdev = bp->pdev;
9918 cp->io_base = bp->regview;
9919 cp->io_base2 = bp->doorbells;
9920 cp->max_kwqe_pending = 8;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009921 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009922 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9923 bnx2x_cid_ilt_lines(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00009924 cp->ctx_tbl_len = CNIC_ILT_LINES;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009925 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
Michael Chan993ac7b2009-10-10 13:46:56 +00009926 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9927 cp->drv_ctl = bnx2x_drv_ctl;
9928 cp->drv_register_cnic = bnx2x_register_cnic;
9929 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009930 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9931 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
Michael Chan993ac7b2009-10-10 13:46:56 +00009932
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009933 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9934 "starting cid %d\n",
9935 cp->ctx_blk_size,
9936 cp->ctx_tbl_offset,
9937 cp->ctx_tbl_len,
9938 cp->starting_cid);
Michael Chan993ac7b2009-10-10 13:46:56 +00009939 return cp;
9940}
9941EXPORT_SYMBOL(bnx2x_cnic_probe);
9942
9943#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009944