blob: e9ad16f00b56755847eb887a898c999ef2c08c04 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020026#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080040#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020041#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070044#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020045#include <linux/workqueue.h>
46#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070047#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020048#include <linux/prefetch.h>
49#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020050#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000051#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020052
Dmitry Kravkovb0efbb92010-07-27 12:33:43 +000053#define BNX2X_MAIN
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070056#include "bnx2x_init_ops.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000057#include "bnx2x_cmn.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020058
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070059#include <linux/firmware.h>
60#include "bnx2x_fw_file_hdr.h"
61/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000062#define FW_FILE_VERSION \
63 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
Dmitry Kravkov560131f2010-10-06 03:18:47 +000067#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000069#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070070
Eilon Greenstein34f80b02008-06-23 20:33:01 -070071/* Time in jiffies before concluding the transmitter is hung */
72#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020073
Andrew Morton53a10562008-02-09 23:16:41 -080074static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070075 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020076 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070078MODULE_AUTHOR("Eliezer Tamir");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000079MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020081MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000083MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000085MODULE_FIRMWARE(FW_FILE_NAME_E2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020086
Eilon Greenstein555f6c72009-02-12 08:36:11 +000087static int multi_mode = 1;
88module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070089MODULE_PARM_DESC(multi_mode, " Multi queue mode "
90 "(0 Disable; 1 Enable (default))");
91
Dmitry Kravkovd6214d72010-10-06 03:32:10 +000092int num_queues;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000093module_param(num_queues, int, 0);
94MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
95 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000096
Eilon Greenstein19680c42008-08-13 15:47:33 -070097static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070098module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000099MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000100
101static int int_mode;
102module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000103MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
104 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000105
Eilon Greensteina18f5122009-08-12 08:23:26 +0000106static int dropless_fc;
107module_param(dropless_fc, int, 0);
108MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109
Eilon Greenstein9898f862009-02-12 08:38:27 +0000110static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200111module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000112MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000113
114static int mrrs = -1;
115module_param(mrrs, int, 0);
116MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
Eilon Greenstein9898f862009-02-12 08:38:27 +0000118static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200119module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000120MODULE_PARM_DESC(debug, " Default debug msglevel");
121
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800122static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200123
124enum bnx2x_board_type {
125 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700126 BCM57711 = 1,
127 BCM57711E = 2,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000128 BCM57712 = 3,
129 BCM57712E = 4
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200130};
131
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700132/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800133static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200134 char *name;
135} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700136 { "Broadcom NetXtreme II BCM57710 XGb" },
137 { "Broadcom NetXtreme II BCM57711 XGb" },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000138 { "Broadcom NetXtreme II BCM57711E XGb" },
139 { "Broadcom NetXtreme II BCM57712 XGb" },
140 { "Broadcom NetXtreme II BCM57712E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200141};
142
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000143#ifndef PCI_DEVICE_ID_NX2_57712
144#define PCI_DEVICE_ID_NX2_57712 0x1662
145#endif
146#ifndef PCI_DEVICE_ID_NX2_57712E
147#define PCI_DEVICE_ID_NX2_57712E 0x1663
148#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700149
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000150static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200156 { 0 }
157};
158
159MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
160
161/****************************************************************************
162* General service functions
163****************************************************************************/
164
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000165static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166 u32 addr, dma_addr_t mapping)
167{
168 REG_WR(bp, addr, U64_LO(mapping));
169 REG_WR(bp, addr + 4, U64_HI(mapping));
170}
171
172static inline void __storm_memset_fill(struct bnx2x *bp,
173 u32 addr, size_t size, u32 val)
174{
175 int i;
176 for (i = 0; i < size/4; i++)
177 REG_WR(bp, addr + (i * 4), val);
178}
179
180static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181 u8 port, u16 stat_id)
182{
183 size_t size = sizeof(struct ustorm_per_client_stats);
184
185 u32 addr = BAR_USTRORM_INTMEM +
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188 __storm_memset_fill(bp, addr, size, 0);
189}
190
191static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192 u8 port, u16 stat_id)
193{
194 size_t size = sizeof(struct tstorm_per_client_stats);
195
196 u32 addr = BAR_TSTRORM_INTMEM +
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199 __storm_memset_fill(bp, addr, size, 0);
200}
201
202static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203 u8 port, u16 stat_id)
204{
205 size_t size = sizeof(struct xstorm_per_client_stats);
206
207 u32 addr = BAR_XSTRORM_INTMEM +
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210 __storm_memset_fill(bp, addr, size, 0);
211}
212
213
214static inline void storm_memset_spq_addr(struct bnx2x *bp,
215 dma_addr_t mapping, u16 abs_fid)
216{
217 u32 addr = XSEM_REG_FAST_MEMORY +
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220 __storm_memset_dma_mapping(bp, addr, mapping);
221}
222
223static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224{
225 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226}
227
228static inline void storm_memset_func_cfg(struct bnx2x *bp,
229 struct tstorm_eth_function_common_config *tcfg,
230 u16 abs_fid)
231{
232 size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234 u32 addr = BAR_TSTRORM_INTMEM +
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238}
239
240static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241 struct stats_indication_flags *flags,
242 u16 abs_fid)
243{
244 size_t size = sizeof(struct stats_indication_flags);
245
246 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248 __storm_memset_struct(bp, addr, size, (u32 *)flags);
249}
250
251static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252 struct stats_indication_flags *flags,
253 u16 abs_fid)
254{
255 size_t size = sizeof(struct stats_indication_flags);
256
257 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259 __storm_memset_struct(bp, addr, size, (u32 *)flags);
260}
261
262static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263 struct stats_indication_flags *flags,
264 u16 abs_fid)
265{
266 size_t size = sizeof(struct stats_indication_flags);
267
268 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270 __storm_memset_struct(bp, addr, size, (u32 *)flags);
271}
272
273static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274 struct stats_indication_flags *flags,
275 u16 abs_fid)
276{
277 size_t size = sizeof(struct stats_indication_flags);
278
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281 __storm_memset_struct(bp, addr, size, (u32 *)flags);
282}
283
284static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285 dma_addr_t mapping, u16 abs_fid)
286{
287 u32 addr = BAR_XSTRORM_INTMEM +
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290 __storm_memset_dma_mapping(bp, addr, mapping);
291}
292
293static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294 dma_addr_t mapping, u16 abs_fid)
295{
296 u32 addr = BAR_TSTRORM_INTMEM +
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299 __storm_memset_dma_mapping(bp, addr, mapping);
300}
301
302static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303 dma_addr_t mapping, u16 abs_fid)
304{
305 u32 addr = BAR_USTRORM_INTMEM +
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308 __storm_memset_dma_mapping(bp, addr, mapping);
309}
310
311static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312 dma_addr_t mapping, u16 abs_fid)
313{
314 u32 addr = BAR_CSTRORM_INTMEM +
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317 __storm_memset_dma_mapping(bp, addr, mapping);
318}
319
320static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321 u16 pf_id)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330 pf_id);
331}
332
333static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334 u8 enable)
335{
336 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343 enable);
344}
345
346static inline void storm_memset_eq_data(struct bnx2x *bp,
347 struct event_ring_data *eq_data,
348 u16 pfid)
349{
350 size_t size = sizeof(struct event_ring_data);
351
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
353
354 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355}
356
357static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
358 u16 pfid)
359{
360 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361 REG_WR16(bp, addr, eq_prod);
362}
363
364static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365 u16 fw_sb_id, u8 sb_index,
366 u8 ticks)
367{
368
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000369 int index_offset = CHIP_IS_E2(bp) ?
370 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000371 offsetof(struct hc_status_block_data_e1x, index_data);
372 u32 addr = BAR_CSTRORM_INTMEM +
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
374 index_offset +
375 sizeof(struct hc_index_data)*sb_index +
376 offsetof(struct hc_index_data, timeout);
377 REG_WR8(bp, addr, ticks);
378 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port, fw_sb_id, sb_index, ticks);
380}
381static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382 u16 fw_sb_id, u8 sb_index,
383 u8 disable)
384{
385 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000386 int index_offset = CHIP_IS_E2(bp) ?
387 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000388 offsetof(struct hc_status_block_data_e1x, index_data);
389 u32 addr = BAR_CSTRORM_INTMEM +
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
391 index_offset +
392 sizeof(struct hc_index_data)*sb_index +
393 offsetof(struct hc_index_data, flags);
394 u16 flags = REG_RD16(bp, addr);
395 /* clear and set */
396 flags &= ~HC_INDEX_DATA_HC_ENABLED;
397 flags |= enable_flag;
398 REG_WR16(bp, addr, flags);
399 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port, fw_sb_id, sb_index, disable);
401}
402
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200403/* used only at init
404 * locking is done by mcp
405 */
stephen hemminger8d962862010-10-21 07:50:56 +0000406static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200407{
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411 PCICFG_VENDOR_ID_OFFSET);
412}
413
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200414static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
415{
416 u32 val;
417
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421 PCICFG_VENDOR_ID_OFFSET);
422
423 return val;
424}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200425
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000426#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430#define DMAE_DP_DST_NONE "dst_addr [none]"
431
stephen hemminger8d962862010-10-21 07:50:56 +0000432static void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae,
433 int msglvl)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000434{
435 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
436
437 switch (dmae->opcode & DMAE_COMMAND_DST) {
438 case DMAE_CMD_DST_PCI:
439 if (src_type == DMAE_CMD_SRC_PCI)
440 DP(msglvl, "DMAE: opcode 0x%08x\n"
441 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
442 "comp_addr [%x:%08x], comp_val 0x%08x\n",
443 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
444 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
445 dmae->comp_addr_hi, dmae->comp_addr_lo,
446 dmae->comp_val);
447 else
448 DP(msglvl, "DMAE: opcode 0x%08x\n"
449 "src [%08x], len [%d*4], dst [%x:%08x]\n"
450 "comp_addr [%x:%08x], comp_val 0x%08x\n",
451 dmae->opcode, dmae->src_addr_lo >> 2,
452 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
453 dmae->comp_addr_hi, dmae->comp_addr_lo,
454 dmae->comp_val);
455 break;
456 case DMAE_CMD_DST_GRC:
457 if (src_type == DMAE_CMD_SRC_PCI)
458 DP(msglvl, "DMAE: opcode 0x%08x\n"
459 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
460 "comp_addr [%x:%08x], comp_val 0x%08x\n",
461 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
462 dmae->len, dmae->dst_addr_lo >> 2,
463 dmae->comp_addr_hi, dmae->comp_addr_lo,
464 dmae->comp_val);
465 else
466 DP(msglvl, "DMAE: opcode 0x%08x\n"
467 "src [%08x], len [%d*4], dst [%08x]\n"
468 "comp_addr [%x:%08x], comp_val 0x%08x\n",
469 dmae->opcode, dmae->src_addr_lo >> 2,
470 dmae->len, dmae->dst_addr_lo >> 2,
471 dmae->comp_addr_hi, dmae->comp_addr_lo,
472 dmae->comp_val);
473 break;
474 default:
475 if (src_type == DMAE_CMD_SRC_PCI)
476 DP(msglvl, "DMAE: opcode 0x%08x\n"
477 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
478 "dst_addr [none]\n"
479 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
480 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
481 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
482 dmae->comp_val);
483 else
484 DP(msglvl, "DMAE: opcode 0x%08x\n"
485 DP_LEVEL "src_addr [%08x] len [%d * 4] "
486 "dst_addr [none]\n"
487 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
488 dmae->opcode, dmae->src_addr_lo >> 2,
489 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
490 dmae->comp_val);
491 break;
492 }
493
494}
495
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000496const u32 dmae_reg_go_c[] = {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200497 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
498 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
499 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
500 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
501};
502
503/* copy command into DMAE command memory and set DMAE command go */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000504void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200505{
506 u32 cmd_offset;
507 int i;
508
509 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
510 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
511 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
512
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700513 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
514 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200515 }
516 REG_WR(bp, dmae_reg_go_c[idx], 1);
517}
518
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000519u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
520{
521 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
522 DMAE_CMD_C_ENABLE);
523}
524
525u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
526{
527 return opcode & ~DMAE_CMD_SRC_RESET;
528}
529
530u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
531 bool with_comp, u8 comp_type)
532{
533 u32 opcode = 0;
534
535 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
536 (dst_type << DMAE_COMMAND_DST_SHIFT));
537
538 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
539
540 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
541 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
542 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
543 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
544
545#ifdef __BIG_ENDIAN
546 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
547#else
548 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
549#endif
550 if (with_comp)
551 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
552 return opcode;
553}
554
stephen hemminger8d962862010-10-21 07:50:56 +0000555static void bnx2x_prep_dmae_with_comp(struct bnx2x *bp,
556 struct dmae_command *dmae,
557 u8 src_type, u8 dst_type)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000558{
559 memset(dmae, 0, sizeof(struct dmae_command));
560
561 /* set the opcode */
562 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
563 true, DMAE_COMP_PCI);
564
565 /* fill in the completion parameters */
566 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
567 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
568 dmae->comp_val = DMAE_COMP_VAL;
569}
570
571/* issue a dmae command over the init-channel and wailt for completion */
stephen hemminger8d962862010-10-21 07:50:56 +0000572static int bnx2x_issue_dmae_with_comp(struct bnx2x *bp,
573 struct dmae_command *dmae)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000574{
575 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
576 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
577 int rc = 0;
578
579 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
580 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
581 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
582
583 /* lock the dmae channel */
584 mutex_lock(&bp->dmae_mutex);
585
586 /* reset completion */
587 *wb_comp = 0;
588
589 /* post the command on the channel used for initializations */
590 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
591
592 /* wait for completion */
593 udelay(5);
594 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
595 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
596
597 if (!cnt) {
598 BNX2X_ERR("DMAE timeout!\n");
599 rc = DMAE_TIMEOUT;
600 goto unlock;
601 }
602 cnt--;
603 udelay(50);
604 }
605 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
606 BNX2X_ERR("DMAE PCI error!\n");
607 rc = DMAE_PCI_ERROR;
608 }
609
610 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
611 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
612 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
613
614unlock:
615 mutex_unlock(&bp->dmae_mutex);
616 return rc;
617}
618
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700619void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
620 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200621{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000622 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700623
624 if (!bp->dmae_ready) {
625 u32 *data = bnx2x_sp(bp, wb_data[0]);
626
627 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
628 " using indirect\n", dst_addr, len32);
629 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
630 return;
631 }
632
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000633 /* set opcode and fixed command fields */
634 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200635
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000636 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000637 dmae.src_addr_lo = U64_LO(dma_addr);
638 dmae.src_addr_hi = U64_HI(dma_addr);
639 dmae.dst_addr_lo = dst_addr >> 2;
640 dmae.dst_addr_hi = 0;
641 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200642
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000643 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200644
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000645 /* issue the command and wait for completion */
646 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647}
648
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700649void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200650{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000651 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700652
653 if (!bp->dmae_ready) {
654 u32 *data = bnx2x_sp(bp, wb_data[0]);
655 int i;
656
657 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
658 " using indirect\n", src_addr, len32);
659 for (i = 0; i < len32; i++)
660 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
661 return;
662 }
663
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000664 /* set opcode and fixed command fields */
665 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200666
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000667 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000668 dmae.src_addr_lo = src_addr >> 2;
669 dmae.src_addr_hi = 0;
670 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
671 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
672 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200673
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000674 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200675
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000676 /* issue the command and wait for completion */
677 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200678}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200679
stephen hemminger8d962862010-10-21 07:50:56 +0000680static void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
681 u32 addr, u32 len)
Eilon Greenstein573f2032009-08-12 08:24:14 +0000682{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000683 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000684 int offset = 0;
685
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000686 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000687 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000688 addr + offset, dmae_wr_max);
689 offset += dmae_wr_max * 4;
690 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000691 }
692
693 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
694}
695
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700696/* used only for slowpath so not inlined */
697static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
698{
699 u32 wb_write[2];
700
701 wb_write[0] = val_hi;
702 wb_write[1] = val_lo;
703 REG_WR_DMAE(bp, reg, wb_write, 2);
704}
705
706#ifdef USE_WB_RD
707static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
708{
709 u32 wb_data[2];
710
711 REG_RD_DMAE(bp, reg, wb_data, 2);
712
713 return HILO_U64(wb_data[0], wb_data[1]);
714}
715#endif
716
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200717static int bnx2x_mc_assert(struct bnx2x *bp)
718{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200719 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700720 int i, rc = 0;
721 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200722
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700723 /* XSTORM */
724 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
725 XSTORM_ASSERT_LIST_INDEX_OFFSET);
726 if (last_idx)
727 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200728
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700729 /* print the asserts */
730 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200731
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700732 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
733 XSTORM_ASSERT_LIST_OFFSET(i));
734 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
735 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
736 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
737 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
738 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
739 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200740
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700741 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
742 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
743 " 0x%08x 0x%08x 0x%08x\n",
744 i, row3, row2, row1, row0);
745 rc++;
746 } else {
747 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200748 }
749 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700750
751 /* TSTORM */
752 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
753 TSTORM_ASSERT_LIST_INDEX_OFFSET);
754 if (last_idx)
755 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
756
757 /* print the asserts */
758 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
759
760 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
761 TSTORM_ASSERT_LIST_OFFSET(i));
762 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
763 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
764 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
765 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
766 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
767 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
768
769 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
770 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
771 " 0x%08x 0x%08x 0x%08x\n",
772 i, row3, row2, row1, row0);
773 rc++;
774 } else {
775 break;
776 }
777 }
778
779 /* CSTORM */
780 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
781 CSTORM_ASSERT_LIST_INDEX_OFFSET);
782 if (last_idx)
783 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
784
785 /* print the asserts */
786 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
787
788 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
789 CSTORM_ASSERT_LIST_OFFSET(i));
790 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
791 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
792 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
793 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
794 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
795 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
796
797 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
798 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
799 " 0x%08x 0x%08x 0x%08x\n",
800 i, row3, row2, row1, row0);
801 rc++;
802 } else {
803 break;
804 }
805 }
806
807 /* USTORM */
808 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
809 USTORM_ASSERT_LIST_INDEX_OFFSET);
810 if (last_idx)
811 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
812
813 /* print the asserts */
814 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
815
816 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
817 USTORM_ASSERT_LIST_OFFSET(i));
818 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
819 USTORM_ASSERT_LIST_OFFSET(i) + 4);
820 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
821 USTORM_ASSERT_LIST_OFFSET(i) + 8);
822 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
823 USTORM_ASSERT_LIST_OFFSET(i) + 12);
824
825 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
826 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
827 " 0x%08x 0x%08x 0x%08x\n",
828 i, row3, row2, row1, row0);
829 rc++;
830 } else {
831 break;
832 }
833 }
834
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200835 return rc;
836}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800837
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200838static void bnx2x_fw_dump(struct bnx2x *bp)
839{
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000840 u32 addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200841 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000842 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200843 int word;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000844 u32 trace_shmem_base;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000845 if (BP_NOMCP(bp)) {
846 BNX2X_ERR("NO MCP - can not dump\n");
847 return;
848 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000849
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000850 if (BP_PATH(bp) == 0)
851 trace_shmem_base = bp->common.shmem_base;
852 else
853 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
854 addr = trace_shmem_base - 0x0800 + 4;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000855 mark = REG_RD(bp, addr);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000856 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
857 + ((mark + 0x3) & ~0x3) - 0x08000000;
Joe Perches7995c642010-02-17 15:01:52 +0000858 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200859
Joe Perches7995c642010-02-17 15:01:52 +0000860 pr_err("");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000861 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200862 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000863 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200864 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000865 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200866 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000867 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200868 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000869 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200870 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000871 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200872 }
Joe Perches7995c642010-02-17 15:01:52 +0000873 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200874}
875
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000876void bnx2x_panic_dump(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200877{
878 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000879 u16 j;
880 struct hc_sp_status_block_data sp_sb_data;
881 int func = BP_FUNC(bp);
882#ifdef BNX2X_STOP_ON_ERROR
883 u16 start = 0, end = 0;
884#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200885
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700886 bp->stats_state = STATS_STATE_DISABLED;
887 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
888
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200889 BNX2X_ERR("begin crash dump -----------------\n");
890
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000891 /* Indices */
892 /* Common */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000893 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000894 " spq_prod_idx(0x%x)\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000895 bp->def_idx, bp->def_att_idx,
896 bp->attn_state, bp->spq_prod_idx);
897 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
898 bp->def_status_blk->atten_status_block.attn_bits,
899 bp->def_status_blk->atten_status_block.attn_bits_ack,
900 bp->def_status_blk->atten_status_block.status_block_id,
901 bp->def_status_blk->atten_status_block.attn_bits_index);
902 BNX2X_ERR(" def (");
903 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
904 pr_cont("0x%x%s",
905 bp->def_status_blk->sp_sb.index_values[i],
906 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000907
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000908 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
909 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
910 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
911 i*sizeof(u32));
912
913 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
914 "pf_id(0x%x) vnic_id(0x%x) "
915 "vf_id(0x%x) vf_valid (0x%x)\n",
916 sp_sb_data.igu_sb_id,
917 sp_sb_data.igu_seg_id,
918 sp_sb_data.p_func.pf_id,
919 sp_sb_data.p_func.vnic_id,
920 sp_sb_data.p_func.vf_id,
921 sp_sb_data.p_func.vf_valid);
922
923
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000924 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000925 struct bnx2x_fastpath *fp = &bp->fp[i];
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000926 int loop;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000927 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000928 struct hc_status_block_data_e1x sb_data_e1x;
929 struct hc_status_block_sm *hc_sm_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000930 CHIP_IS_E2(bp) ?
931 sb_data_e2.common.state_machine :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000932 sb_data_e1x.common.state_machine;
933 struct hc_index_data *hc_index_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000934 CHIP_IS_E2(bp) ?
935 sb_data_e2.index_data :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000936 sb_data_e1x.index_data;
937 int data_size;
938 u32 *sb_data_p;
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000939
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000940 /* Rx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000941 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000942 " rx_comp_prod(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000943 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000944 i, fp->rx_bd_prod, fp->rx_bd_cons,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000945 fp->rx_comp_prod,
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000946 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000947 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000948 " fp_hc_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000949 fp->rx_sge_prod, fp->last_max_sge,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000950 le16_to_cpu(fp->fp_hc_idx));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000951
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000952 /* Tx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000953 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
954 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
955 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200956 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700957 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000958
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000959 loop = CHIP_IS_E2(bp) ?
960 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000961
962 /* host sb data */
963
964 BNX2X_ERR(" run indexes (");
965 for (j = 0; j < HC_SB_MAX_SM; j++)
966 pr_cont("0x%x%s",
967 fp->sb_running_index[j],
968 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
969
970 BNX2X_ERR(" indexes (");
971 for (j = 0; j < loop; j++)
972 pr_cont("0x%x%s",
973 fp->sb_index_values[j],
974 (j == loop - 1) ? ")" : " ");
975 /* fw sb data */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000976 data_size = CHIP_IS_E2(bp) ?
977 sizeof(struct hc_status_block_data_e2) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000978 sizeof(struct hc_status_block_data_e1x);
979 data_size /= sizeof(u32);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000980 sb_data_p = CHIP_IS_E2(bp) ?
981 (u32 *)&sb_data_e2 :
982 (u32 *)&sb_data_e1x;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000983 /* copy sb data in here */
984 for (j = 0; j < data_size; j++)
985 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
986 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
987 j * sizeof(u32));
988
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000989 if (CHIP_IS_E2(bp)) {
990 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
991 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
992 sb_data_e2.common.p_func.pf_id,
993 sb_data_e2.common.p_func.vf_id,
994 sb_data_e2.common.p_func.vf_valid,
995 sb_data_e2.common.p_func.vnic_id,
996 sb_data_e2.common.same_igu_sb_1b);
997 } else {
998 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
999 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
1000 sb_data_e1x.common.p_func.pf_id,
1001 sb_data_e1x.common.p_func.vf_id,
1002 sb_data_e1x.common.p_func.vf_valid,
1003 sb_data_e1x.common.p_func.vnic_id,
1004 sb_data_e1x.common.same_igu_sb_1b);
1005 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001006
1007 /* SB_SMs data */
1008 for (j = 0; j < HC_SB_MAX_SM; j++) {
1009 pr_cont("SM[%d] __flags (0x%x) "
1010 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1011 "time_to_expire (0x%x) "
1012 "timer_value(0x%x)\n", j,
1013 hc_sm_p[j].__flags,
1014 hc_sm_p[j].igu_sb_id,
1015 hc_sm_p[j].igu_seg_id,
1016 hc_sm_p[j].time_to_expire,
1017 hc_sm_p[j].timer_value);
1018 }
1019
1020 /* Indecies data */
1021 for (j = 0; j < loop; j++) {
1022 pr_cont("INDEX[%d] flags (0x%x) "
1023 "timeout (0x%x)\n", j,
1024 hc_index_p[j].flags,
1025 hc_index_p[j].timeout);
1026 }
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001027 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001028
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001029#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001030 /* Rings */
1031 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001032 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001033 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001034
1035 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1036 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001037 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001038 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1039 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1040
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001041 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1042 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001043 }
1044
Eilon Greenstein3196a882008-08-13 15:58:49 -07001045 start = RX_SGE(fp->rx_sge_prod);
1046 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001047 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001048 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1049 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1050
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001051 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1052 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001053 }
1054
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001055 start = RCQ_BD(fp->rx_comp_cons - 10);
1056 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001057 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001058 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1059
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001060 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1061 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001062 }
1063 }
1064
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001065 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001066 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001067 struct bnx2x_fastpath *fp = &bp->fp[i];
1068
1069 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1070 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1071 for (j = start; j != end; j = TX_BD(j + 1)) {
1072 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1073
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001074 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1075 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001076 }
1077
1078 start = TX_BD(fp->tx_bd_cons - 10);
1079 end = TX_BD(fp->tx_bd_cons + 254);
1080 for (j = start; j != end; j = TX_BD(j + 1)) {
1081 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1082
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001083 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1084 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001085 }
1086 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001087#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001088 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001089 bnx2x_mc_assert(bp);
1090 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001091}
1092
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001093static void bnx2x_hc_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001094{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001095 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001096 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1097 u32 val = REG_RD(bp, addr);
1098 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001099 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001100
1101 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001102 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1103 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001104 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1105 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +00001106 } else if (msi) {
1107 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1108 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1109 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1110 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001111 } else {
1112 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001113 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001114 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1115 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001116
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001117 if (!CHIP_IS_E1(bp)) {
1118 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1119 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001120
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001121 REG_WR(bp, addr, val);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001122
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001123 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1124 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001125 }
1126
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001127 if (CHIP_IS_E1(bp))
1128 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1129
Eilon Greenstein8badd272009-02-12 08:36:15 +00001130 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1131 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001132
1133 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001134 /*
1135 * Ensure that HC_CONFIG is written before leading/trailing edge config
1136 */
1137 mmiowb();
1138 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001139
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001140 if (!CHIP_IS_E1(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001141 /* init leading/trailing edge */
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001142 if (IS_MF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001143 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001144 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001145 /* enable nig and gpio3 attention */
1146 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001147 } else
1148 val = 0xffff;
1149
1150 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1151 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1152 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001153
1154 /* Make sure that interrupts are indeed enabled from here on */
1155 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001156}
1157
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001158static void bnx2x_igu_int_enable(struct bnx2x *bp)
1159{
1160 u32 val;
1161 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1162 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1163
1164 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1165
1166 if (msix) {
1167 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1168 IGU_PF_CONF_SINGLE_ISR_EN);
1169 val |= (IGU_PF_CONF_FUNC_EN |
1170 IGU_PF_CONF_MSI_MSIX_EN |
1171 IGU_PF_CONF_ATTN_BIT_EN);
1172 } else if (msi) {
1173 val &= ~IGU_PF_CONF_INT_LINE_EN;
1174 val |= (IGU_PF_CONF_FUNC_EN |
1175 IGU_PF_CONF_MSI_MSIX_EN |
1176 IGU_PF_CONF_ATTN_BIT_EN |
1177 IGU_PF_CONF_SINGLE_ISR_EN);
1178 } else {
1179 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1180 val |= (IGU_PF_CONF_FUNC_EN |
1181 IGU_PF_CONF_INT_LINE_EN |
1182 IGU_PF_CONF_ATTN_BIT_EN |
1183 IGU_PF_CONF_SINGLE_ISR_EN);
1184 }
1185
1186 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1187 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1188
1189 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1190
1191 barrier();
1192
1193 /* init leading/trailing edge */
1194 if (IS_MF(bp)) {
1195 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1196 if (bp->port.pmf)
1197 /* enable nig and gpio3 attention */
1198 val |= 0x1100;
1199 } else
1200 val = 0xffff;
1201
1202 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1203 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1204
1205 /* Make sure that interrupts are indeed enabled from here on */
1206 mmiowb();
1207}
1208
1209void bnx2x_int_enable(struct bnx2x *bp)
1210{
1211 if (bp->common.int_block == INT_BLOCK_HC)
1212 bnx2x_hc_int_enable(bp);
1213 else
1214 bnx2x_igu_int_enable(bp);
1215}
1216
1217static void bnx2x_hc_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001218{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001219 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001220 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1221 u32 val = REG_RD(bp, addr);
1222
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001223 /*
1224 * in E1 we must use only PCI configuration space to disable
1225 * MSI/MSIX capablility
1226 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1227 */
1228 if (CHIP_IS_E1(bp)) {
1229 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1230 * Use mask register to prevent from HC sending interrupts
1231 * after we exit the function
1232 */
1233 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1234
1235 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1236 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1237 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1238 } else
1239 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1240 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1241 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1242 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001243
1244 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1245 val, port, addr);
1246
Eilon Greenstein8badd272009-02-12 08:36:15 +00001247 /* flush all outstanding writes */
1248 mmiowb();
1249
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001250 REG_WR(bp, addr, val);
1251 if (REG_RD(bp, addr) != val)
1252 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1253}
1254
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001255static void bnx2x_igu_int_disable(struct bnx2x *bp)
1256{
1257 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1258
1259 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1260 IGU_PF_CONF_INT_LINE_EN |
1261 IGU_PF_CONF_ATTN_BIT_EN);
1262
1263 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1264
1265 /* flush all outstanding writes */
1266 mmiowb();
1267
1268 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1269 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1270 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1271}
1272
stephen hemminger8d962862010-10-21 07:50:56 +00001273static void bnx2x_int_disable(struct bnx2x *bp)
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001274{
1275 if (bp->common.int_block == INT_BLOCK_HC)
1276 bnx2x_hc_int_disable(bp);
1277 else
1278 bnx2x_igu_int_disable(bp);
1279}
1280
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001281void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001282{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001283 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001284 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001285
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001286 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001287 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +00001288 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1289
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07001290 if (disable_hw)
1291 /* prevent the HW from sending interrupts */
1292 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001293
1294 /* make sure all ISRs are done */
1295 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001296 synchronize_irq(bp->msix_table[0].vector);
1297 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +00001298#ifdef BCM_CNIC
1299 offset++;
1300#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001301 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +00001302 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001303 } else
1304 synchronize_irq(bp->pdev->irq);
1305
1306 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001307 cancel_delayed_work(&bp->sp_task);
1308 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001309}
1310
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001311/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001312
1313/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001314 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001315 */
1316
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001317/* Return true if succeeded to acquire the lock */
1318static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1319{
1320 u32 lock_status;
1321 u32 resource_bit = (1 << resource);
1322 int func = BP_FUNC(bp);
1323 u32 hw_lock_control_reg;
1324
1325 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1326
1327 /* Validating that the resource is within range */
1328 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1329 DP(NETIF_MSG_HW,
1330 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1331 resource, HW_LOCK_MAX_RESOURCE_VALUE);
Eric Dumazet0fdf4d02010-08-26 22:03:53 -07001332 return false;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001333 }
1334
1335 if (func <= 5)
1336 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1337 else
1338 hw_lock_control_reg =
1339 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1340
1341 /* Try to acquire the lock */
1342 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1343 lock_status = REG_RD(bp, hw_lock_control_reg);
1344 if (lock_status & resource_bit)
1345 return true;
1346
1347 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1348 return false;
1349}
1350
Michael Chan993ac7b2009-10-10 13:46:56 +00001351#ifdef BCM_CNIC
1352static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1353#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001354
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001355void bnx2x_sp_event(struct bnx2x_fastpath *fp,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001356 union eth_rx_cqe *rr_cqe)
1357{
1358 struct bnx2x *bp = fp->bp;
1359 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1360 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1361
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001362 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001363 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001364 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001365 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001366
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001367 switch (command | fp->state) {
1368 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1369 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1370 fp->state = BNX2X_FP_STATE_OPEN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001371 break;
1372
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001373 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1374 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001375 fp->state = BNX2X_FP_STATE_HALTED;
1376 break;
1377
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001378 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1379 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1380 fp->state = BNX2X_FP_STATE_TERMINATED;
Eliezer Tamir49d66772008-02-28 11:53:13 -08001381 break;
1382
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001383 default:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001384 BNX2X_ERR("unexpected MC reply (%d) "
1385 "fp[%d] state is %x\n",
1386 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001387 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001388 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001389
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00001390 smp_mb__before_atomic_inc();
1391 atomic_inc(&bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001392 /* push the change in fp->state and towards the memory */
1393 smp_wmb();
1394
1395 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001396}
1397
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001398irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001399{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001400 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001401 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001402 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001403 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001404
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001405 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001406 if (unlikely(status == 0)) {
1407 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1408 return IRQ_NONE;
1409 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001410 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001411
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001412 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001413 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1414 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1415 return IRQ_HANDLED;
1416 }
1417
Eilon Greenstein3196a882008-08-13 15:58:49 -07001418#ifdef BNX2X_STOP_ON_ERROR
1419 if (unlikely(bp->panic))
1420 return IRQ_HANDLED;
1421#endif
1422
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001423 for_each_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07001424 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001425
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001426 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
Eilon Greensteinca003922009-08-12 22:53:28 -07001427 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001428 /* Handle Rx and Tx according to SB id */
1429 prefetch(fp->rx_cons_sb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001430 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001431 prefetch(&fp->sb_running_index[SM_RX_ID]);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001432 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001433 status &= ~mask;
1434 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001435 }
1436
Michael Chan993ac7b2009-10-10 13:46:56 +00001437#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001438 mask = 0x2;
Michael Chan993ac7b2009-10-10 13:46:56 +00001439 if (status & (mask | 0x1)) {
1440 struct cnic_ops *c_ops = NULL;
1441
1442 rcu_read_lock();
1443 c_ops = rcu_dereference(bp->cnic_ops);
1444 if (c_ops)
1445 c_ops->cnic_handler(bp->cnic_data, NULL);
1446 rcu_read_unlock();
1447
1448 status &= ~mask;
1449 }
1450#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001451
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001452 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001453 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001454
1455 status &= ~0x1;
1456 if (!status)
1457 return IRQ_HANDLED;
1458 }
1459
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001460 if (unlikely(status))
1461 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001462 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001463
1464 return IRQ_HANDLED;
1465}
1466
1467/* end of fast path */
1468
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001469
1470/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001471
1472/*
1473 * General service functions
1474 */
1475
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001476int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001477{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001478 u32 lock_status;
1479 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001480 int func = BP_FUNC(bp);
1481 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001482 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001483
1484 /* Validating that the resource is within range */
1485 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1486 DP(NETIF_MSG_HW,
1487 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1488 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1489 return -EINVAL;
1490 }
1491
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001492 if (func <= 5) {
1493 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1494 } else {
1495 hw_lock_control_reg =
1496 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1497 }
1498
Eliezer Tamirf1410642008-02-28 11:51:50 -08001499 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001500 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001501 if (lock_status & resource_bit) {
1502 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1503 lock_status, resource_bit);
1504 return -EEXIST;
1505 }
1506
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001507 /* Try for 5 second every 5ms */
1508 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001509 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001510 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1511 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001512 if (lock_status & resource_bit)
1513 return 0;
1514
1515 msleep(5);
1516 }
1517 DP(NETIF_MSG_HW, "Timeout\n");
1518 return -EAGAIN;
1519}
1520
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001521int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001522{
1523 u32 lock_status;
1524 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001525 int func = BP_FUNC(bp);
1526 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001527
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001528 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1529
Eliezer Tamirf1410642008-02-28 11:51:50 -08001530 /* Validating that the resource is within range */
1531 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1532 DP(NETIF_MSG_HW,
1533 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1534 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1535 return -EINVAL;
1536 }
1537
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001538 if (func <= 5) {
1539 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1540 } else {
1541 hw_lock_control_reg =
1542 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1543 }
1544
Eliezer Tamirf1410642008-02-28 11:51:50 -08001545 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001546 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001547 if (!(lock_status & resource_bit)) {
1548 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1549 lock_status, resource_bit);
1550 return -EFAULT;
1551 }
1552
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001553 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001554 return 0;
1555}
1556
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001557
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001558int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1559{
1560 /* The GPIO should be swapped if swap register is set and active */
1561 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1562 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1563 int gpio_shift = gpio_num +
1564 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1565 u32 gpio_mask = (1 << gpio_shift);
1566 u32 gpio_reg;
1567 int value;
1568
1569 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1570 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1571 return -EINVAL;
1572 }
1573
1574 /* read GPIO value */
1575 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1576
1577 /* get the requested pin value */
1578 if ((gpio_reg & gpio_mask) == gpio_mask)
1579 value = 1;
1580 else
1581 value = 0;
1582
1583 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1584
1585 return value;
1586}
1587
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001588int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001589{
1590 /* The GPIO should be swapped if swap register is set and active */
1591 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001592 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001593 int gpio_shift = gpio_num +
1594 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1595 u32 gpio_mask = (1 << gpio_shift);
1596 u32 gpio_reg;
1597
1598 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1599 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1600 return -EINVAL;
1601 }
1602
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001603 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001604 /* read GPIO and mask except the float bits */
1605 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1606
1607 switch (mode) {
1608 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1609 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1610 gpio_num, gpio_shift);
1611 /* clear FLOAT and set CLR */
1612 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1613 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1614 break;
1615
1616 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1617 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1618 gpio_num, gpio_shift);
1619 /* clear FLOAT and set SET */
1620 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1621 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1622 break;
1623
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001624 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001625 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1626 gpio_num, gpio_shift);
1627 /* set FLOAT */
1628 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1629 break;
1630
1631 default:
1632 break;
1633 }
1634
1635 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001636 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001637
1638 return 0;
1639}
1640
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001641int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1642{
1643 /* The GPIO should be swapped if swap register is set and active */
1644 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1645 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1646 int gpio_shift = gpio_num +
1647 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1648 u32 gpio_mask = (1 << gpio_shift);
1649 u32 gpio_reg;
1650
1651 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1652 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1653 return -EINVAL;
1654 }
1655
1656 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1657 /* read GPIO int */
1658 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1659
1660 switch (mode) {
1661 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1662 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1663 "output low\n", gpio_num, gpio_shift);
1664 /* clear SET and set CLR */
1665 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1666 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1667 break;
1668
1669 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1670 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1671 "output high\n", gpio_num, gpio_shift);
1672 /* clear CLR and set SET */
1673 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1674 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1675 break;
1676
1677 default:
1678 break;
1679 }
1680
1681 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1682 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1683
1684 return 0;
1685}
1686
Eliezer Tamirf1410642008-02-28 11:51:50 -08001687static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1688{
1689 u32 spio_mask = (1 << spio_num);
1690 u32 spio_reg;
1691
1692 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1693 (spio_num > MISC_REGISTERS_SPIO_7)) {
1694 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1695 return -EINVAL;
1696 }
1697
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001698 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001699 /* read SPIO and mask except the float bits */
1700 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1701
1702 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001703 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001704 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1705 /* clear FLOAT and set CLR */
1706 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1707 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1708 break;
1709
Eilon Greenstein6378c022008-08-13 15:59:25 -07001710 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001711 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1712 /* clear FLOAT and set SET */
1713 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1714 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1715 break;
1716
1717 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1718 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1719 /* set FLOAT */
1720 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1721 break;
1722
1723 default:
1724 break;
1725 }
1726
1727 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001728 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001729
1730 return 0;
1731}
1732
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001733int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1734{
1735 u32 sel_phy_idx = 0;
1736 if (bp->link_vars.link_up) {
1737 sel_phy_idx = EXT_PHY1;
1738 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1739 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1740 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1741 sel_phy_idx = EXT_PHY2;
1742 } else {
1743
1744 switch (bnx2x_phy_selection(&bp->link_params)) {
1745 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1746 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1747 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1748 sel_phy_idx = EXT_PHY1;
1749 break;
1750 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1751 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1752 sel_phy_idx = EXT_PHY2;
1753 break;
1754 }
1755 }
1756 /*
1757 * The selected actived PHY is always after swapping (in case PHY
1758 * swapping is enabled). So when swapping is enabled, we need to reverse
1759 * the configuration
1760 */
1761
1762 if (bp->link_params.multi_phy_config &
1763 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1764 if (sel_phy_idx == EXT_PHY1)
1765 sel_phy_idx = EXT_PHY2;
1766 else if (sel_phy_idx == EXT_PHY2)
1767 sel_phy_idx = EXT_PHY1;
1768 }
1769 return LINK_CONFIG_IDX(sel_phy_idx);
1770}
1771
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001772void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001773{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001774 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
Eilon Greensteinad33ea32009-01-14 21:24:57 -08001775 switch (bp->link_vars.ieee_fc &
1776 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001777 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001778 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001779 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001780 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001781
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001782 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001783 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001784 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001785 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001786
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001787 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001788 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001789 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001790
Eliezer Tamirf1410642008-02-28 11:51:50 -08001791 default:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001792 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001793 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001794 break;
1795 }
1796}
1797
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001798u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001799{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001800 if (!BP_NOMCP(bp)) {
1801 u8 rc;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001802 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1803 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
Eilon Greenstein19680c42008-08-13 15:47:33 -07001804 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001805 /* It is recommended to turn off RX FC for jumbo frames
1806 for better performance */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001807 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
David S. Millerc0700f92008-12-16 23:53:20 -08001808 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001809 else
David S. Millerc0700f92008-12-16 23:53:20 -08001810 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001811
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001812 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001813
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001814 if (load_mode == LOAD_DIAG) {
Yaniv Rosnerde6eae12010-09-07 11:41:13 +00001815 bp->link_params.loopback_mode = LOOPBACK_XGXS;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001816 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1817 }
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001818
Eilon Greenstein19680c42008-08-13 15:47:33 -07001819 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001820
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001821 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001822
Eilon Greenstein3c96c682009-01-14 21:25:31 -08001823 bnx2x_calc_fc_adv(bp);
1824
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001825 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1826 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001827 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001828 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001829 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
Eilon Greenstein19680c42008-08-13 15:47:33 -07001830 return rc;
1831 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001832 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07001833 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001834}
1835
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001836void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001837{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001838 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001839 bnx2x_acquire_phy_lock(bp);
Yaniv Rosner54c2fb72010-09-01 09:51:23 +00001840 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001841 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001842 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001843
Eilon Greenstein19680c42008-08-13 15:47:33 -07001844 bnx2x_calc_fc_adv(bp);
1845 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001846 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001847}
1848
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001849static void bnx2x__link_reset(struct bnx2x *bp)
1850{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001851 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001852 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00001853 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001854 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001855 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001856 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001857}
1858
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001859u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001860{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001861 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001862
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001863 if (!BP_NOMCP(bp)) {
1864 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001865 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1866 is_serdes);
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001867 bnx2x_release_phy_lock(bp);
1868 } else
1869 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001870
1871 return rc;
1872}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001873
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001874static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001875{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001876 u32 r_param = bp->link_vars.line_speed / 8;
1877 u32 fair_periodic_timeout_usec;
1878 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001879
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001880 memset(&(bp->cmng.rs_vars), 0,
1881 sizeof(struct rate_shaping_vars_per_port));
1882 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001883
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001884 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1885 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001886
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001887 /* this is the threshold below which no timer arming will occur
1888 1.25 coefficient is for the threshold to be a little bigger
1889 than the real time, to compensate for timer in-accuracy */
1890 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001891 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1892
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001893 /* resolution of fairness timer */
1894 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1895 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1896 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001897
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001898 /* this is the threshold below which we won't arm the timer anymore */
1899 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001900
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001901 /* we multiply by 1e3/8 to get bytes/msec.
1902 We don't want the credits to pass a credit
1903 of the t_fair*FAIR_MEM (algorithm resolution) */
1904 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1905 /* since each tick is 4 usec */
1906 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001907}
1908
Eilon Greenstein2691d512009-08-12 08:22:08 +00001909/* Calculates the sum of vn_min_rates.
1910 It's needed for further normalizing of the min_rates.
1911 Returns:
1912 sum of vn_min_rates.
1913 or
1914 0 - if all the min_rates are 0.
1915 In the later case fainess algorithm should be deactivated.
1916 If not all min_rates are zero then those that are zeroes will be set to 1.
1917 */
1918static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1919{
1920 int all_zero = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001921 int vn;
1922
1923 bp->vn_weight_sum = 0;
1924 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001925 u32 vn_cfg = bp->mf_config[vn];
Eilon Greenstein2691d512009-08-12 08:22:08 +00001926 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1927 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1928
1929 /* Skip hidden vns */
1930 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1931 continue;
1932
1933 /* If min rate is zero - set it to 1 */
1934 if (!vn_min_rate)
1935 vn_min_rate = DEF_MIN_RATE;
1936 else
1937 all_zero = 0;
1938
1939 bp->vn_weight_sum += vn_min_rate;
1940 }
1941
1942 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001943 if (all_zero) {
1944 bp->cmng.flags.cmng_enables &=
1945 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1946 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1947 " fairness will be disabled\n");
1948 } else
1949 bp->cmng.flags.cmng_enables |=
1950 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001951}
1952
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001953static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001954{
1955 struct rate_shaping_vars_per_vn m_rs_vn;
1956 struct fairness_vars_per_vn m_fair_vn;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001957 u32 vn_cfg = bp->mf_config[vn];
1958 int func = 2*vn + BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001959 u16 vn_min_rate, vn_max_rate;
1960 int i;
1961
1962 /* If function is hidden - set min and max to zeroes */
1963 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1964 vn_min_rate = 0;
1965 vn_max_rate = 0;
1966
1967 } else {
1968 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1969 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001970 /* If min rate is zero - set it to 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001971 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001972 vn_min_rate = DEF_MIN_RATE;
1973 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1974 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1975 }
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001976
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001977 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001978 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001979 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001980
1981 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1982 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1983
1984 /* global vn counter - maximal Mbps for this vn */
1985 m_rs_vn.vn_counter.rate = vn_max_rate;
1986
1987 /* quota - number of bytes transmitted in this period */
1988 m_rs_vn.vn_counter.quota =
1989 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1990
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001991 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001992 /* credit for each period of the fairness algorithm:
1993 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001994 vn_weight_sum should not be larger than 10000, thus
1995 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1996 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001997 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001998 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1999 (8 * bp->vn_weight_sum))),
2000 (bp->cmng.fair_vars.fair_threshold * 2));
2001 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002002 m_fair_vn.vn_credit_delta);
2003 }
2004
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002005 /* Store it to internal memory */
2006 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2007 REG_WR(bp, BAR_XSTRORM_INTMEM +
2008 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2009 ((u32 *)(&m_rs_vn))[i]);
2010
2011 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2012 REG_WR(bp, BAR_XSTRORM_INTMEM +
2013 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2014 ((u32 *)(&m_fair_vn))[i]);
2015}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002016
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002017static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2018{
2019 if (CHIP_REV_IS_SLOW(bp))
2020 return CMNG_FNS_NONE;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002021 if (IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002022 return CMNG_FNS_MINMAX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002023
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002024 return CMNG_FNS_NONE;
2025}
2026
2027static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2028{
2029 int vn;
2030
2031 if (BP_NOMCP(bp))
2032 return; /* what should be the default bvalue in this case */
2033
2034 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2035 int /*abs*/func = 2*vn + BP_PORT(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002036 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002037 MF_CFG_RD(bp, func_mf_config[func].config);
2038 }
2039}
2040
2041static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2042{
2043
2044 if (cmng_type == CMNG_FNS_MINMAX) {
2045 int vn;
2046
2047 /* clear cmng_enables */
2048 bp->cmng.flags.cmng_enables = 0;
2049
2050 /* read mf conf from shmem */
2051 if (read_cfg)
2052 bnx2x_read_mf_cfg(bp);
2053
2054 /* Init rate shaping and fairness contexts */
2055 bnx2x_init_port_minmax(bp);
2056
2057 /* vn_weight_sum and enable fairness if not 0 */
2058 bnx2x_calc_vn_weight_sum(bp);
2059
2060 /* calculate and set min-max rate for each vn */
2061 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2062 bnx2x_init_vn_minmax(bp, vn);
2063
2064 /* always enable rate shaping and fairness */
2065 bp->cmng.flags.cmng_enables |=
2066 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2067 if (!bp->vn_weight_sum)
2068 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2069 " fairness will be disabled\n");
2070 return;
2071 }
2072
2073 /* rate shaping and fairness are disabled */
2074 DP(NETIF_MSG_IFUP,
2075 "rate shaping and fairness are disabled\n");
2076}
2077
2078static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2079{
2080 int port = BP_PORT(bp);
2081 int func;
2082 int vn;
2083
2084 /* Set the attention towards other drivers on the same port */
2085 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2086 if (vn == BP_E1HVN(bp))
2087 continue;
2088
2089 func = ((vn << 1) | port);
2090 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2091 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2092 }
2093}
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002094
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002095/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002096static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002097{
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002098 u32 prev_link_status = bp->link_vars.link_status;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002099 /* Make sure that we are synced with the current statistics */
2100 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2101
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002102 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002103
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002104 if (bp->link_vars.link_up) {
2105
Eilon Greenstein1c063282009-02-12 08:36:43 +00002106 /* dropless flow control */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002107 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002108 int port = BP_PORT(bp);
2109 u32 pause_enabled = 0;
2110
2111 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2112 pause_enabled = 1;
2113
2114 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002115 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002116 pause_enabled);
2117 }
2118
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002119 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2120 struct host_port_stats *pstats;
2121
2122 pstats = bnx2x_sp(bp, port_stats);
2123 /* reset old bmac stats */
2124 memset(&(pstats->mac_stx[0]), 0,
2125 sizeof(struct mac_stx));
2126 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002127 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002128 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2129 }
2130
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002131 /* indicate link status only if link status actually changed */
2132 if (prev_link_status != bp->link_vars.link_status)
2133 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002134
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002135 if (IS_MF(bp))
2136 bnx2x_link_sync_notify(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002137
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002138 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2139 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002140
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002141 if (cmng_fns != CMNG_FNS_NONE) {
2142 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2143 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2144 } else
2145 /* rate shaping and fairness are disabled */
2146 DP(NETIF_MSG_IFUP,
2147 "single function mode without fairness\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002148 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002149}
2150
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002151void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002152{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002153 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002154 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002155
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002156 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2157
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002158 if (bp->link_vars.link_up)
2159 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2160 else
2161 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2162
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002163 /* the link status update could be the result of a DCC event
2164 hence re-read the shmem mf configuration */
2165 bnx2x_read_mf_cfg(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002166
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002167 /* indicate link status */
2168 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002169}
2170
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002171static void bnx2x_pmf_update(struct bnx2x *bp)
2172{
2173 int port = BP_PORT(bp);
2174 u32 val;
2175
2176 bp->port.pmf = 1;
2177 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2178
2179 /* enable nig attention */
2180 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002181 if (bp->common.int_block == INT_BLOCK_HC) {
2182 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2183 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2184 } else if (CHIP_IS_E2(bp)) {
2185 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2186 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2187 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002188
2189 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002190}
2191
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002192/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002193
2194/* slow path */
2195
2196/*
2197 * General service functions
2198 */
2199
Eilon Greenstein2691d512009-08-12 08:22:08 +00002200/* send the MCP a request, block until there is a reply */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002201u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
Eilon Greenstein2691d512009-08-12 08:22:08 +00002202{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002203 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002204 u32 seq = ++bp->fw_seq;
2205 u32 rc = 0;
2206 u32 cnt = 1;
2207 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2208
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002209 mutex_lock(&bp->fw_mb_mutex);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002210 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2211 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2212
Eilon Greenstein2691d512009-08-12 08:22:08 +00002213 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2214
2215 do {
2216 /* let the FW do it's magic ... */
2217 msleep(delay);
2218
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002219 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002220
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002221 /* Give the FW up to 5 second (500*10ms) */
2222 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002223
2224 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2225 cnt*delay, rc, seq);
2226
2227 /* is this a reply to our command? */
2228 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2229 rc &= FW_MSG_CODE_MASK;
2230 else {
2231 /* FW BUG! */
2232 BNX2X_ERR("FW failed to respond!\n");
2233 bnx2x_fw_dump(bp);
2234 rc = 0;
2235 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002236 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002237
2238 return rc;
2239}
2240
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002241/* must be called under rtnl_lock */
stephen hemminger8d962862010-10-21 07:50:56 +00002242static void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002243{
2244 u32 mask = (1 << cl_id);
2245
2246 /* initial seeting is BNX2X_ACCEPT_NONE */
2247 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2248 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2249 u8 unmatched_unicast = 0;
2250
2251 if (filters & BNX2X_PROMISCUOUS_MODE) {
2252 /* promiscious - accept all, drop none */
2253 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2254 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2255 }
2256 if (filters & BNX2X_ACCEPT_UNICAST) {
2257 /* accept matched ucast */
2258 drop_all_ucast = 0;
2259 }
2260 if (filters & BNX2X_ACCEPT_MULTICAST) {
2261 /* accept matched mcast */
2262 drop_all_mcast = 0;
2263 }
2264 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2265 /* accept all mcast */
2266 drop_all_ucast = 0;
2267 accp_all_ucast = 1;
2268 }
2269 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2270 /* accept all mcast */
2271 drop_all_mcast = 0;
2272 accp_all_mcast = 1;
2273 }
2274 if (filters & BNX2X_ACCEPT_BROADCAST) {
2275 /* accept (all) bcast */
2276 drop_all_bcast = 0;
2277 accp_all_bcast = 1;
2278 }
2279
2280 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2281 bp->mac_filters.ucast_drop_all | mask :
2282 bp->mac_filters.ucast_drop_all & ~mask;
2283
2284 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2285 bp->mac_filters.mcast_drop_all | mask :
2286 bp->mac_filters.mcast_drop_all & ~mask;
2287
2288 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2289 bp->mac_filters.bcast_drop_all | mask :
2290 bp->mac_filters.bcast_drop_all & ~mask;
2291
2292 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2293 bp->mac_filters.ucast_accept_all | mask :
2294 bp->mac_filters.ucast_accept_all & ~mask;
2295
2296 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2297 bp->mac_filters.mcast_accept_all | mask :
2298 bp->mac_filters.mcast_accept_all & ~mask;
2299
2300 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2301 bp->mac_filters.bcast_accept_all | mask :
2302 bp->mac_filters.bcast_accept_all & ~mask;
2303
2304 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2305 bp->mac_filters.unmatched_unicast | mask :
2306 bp->mac_filters.unmatched_unicast & ~mask;
2307}
2308
stephen hemminger8d962862010-10-21 07:50:56 +00002309static void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002310{
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002311 struct tstorm_eth_function_common_config tcfg = {0};
2312 u16 rss_flgs;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002313
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002314 /* tpa */
2315 if (p->func_flgs & FUNC_FLG_TPA)
2316 tcfg.config_flags |=
2317 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002318
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002319 /* set rss flags */
2320 rss_flgs = (p->rss->mode <<
2321 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002322
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002323 if (p->rss->cap & RSS_IPV4_CAP)
2324 rss_flgs |= RSS_IPV4_CAP_MASK;
2325 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2326 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2327 if (p->rss->cap & RSS_IPV6_CAP)
2328 rss_flgs |= RSS_IPV6_CAP_MASK;
2329 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2330 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002331
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002332 tcfg.config_flags |= rss_flgs;
2333 tcfg.rss_result_mask = p->rss->result_mask;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002334
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002335 storm_memset_func_cfg(bp, &tcfg, p->func_id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002336
2337 /* Enable the function in the FW */
2338 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2339 storm_memset_func_en(bp, p->func_id, 1);
2340
2341 /* statistics */
2342 if (p->func_flgs & FUNC_FLG_STATS) {
2343 struct stats_indication_flags stats_flags = {0};
2344 stats_flags.collect_eth = 1;
2345
2346 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2347 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2348
2349 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2350 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2351
2352 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2353 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2354
2355 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2356 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2357 }
2358
2359 /* spq */
2360 if (p->func_flgs & FUNC_FLG_SPQ) {
2361 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2362 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2363 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2364 }
2365}
2366
2367static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2368 struct bnx2x_fastpath *fp)
2369{
2370 u16 flags = 0;
2371
2372 /* calculate queue flags */
2373 flags |= QUEUE_FLG_CACHE_ALIGN;
2374 flags |= QUEUE_FLG_HC;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002375 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002376
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002377 flags |= QUEUE_FLG_VLAN;
2378 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002379
2380 if (!fp->disable_tpa)
2381 flags |= QUEUE_FLG_TPA;
2382
2383 flags |= QUEUE_FLG_STATS;
2384
2385 return flags;
2386}
2387
2388static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2389 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2390 struct bnx2x_rxq_init_params *rxq_init)
2391{
2392 u16 max_sge = 0;
2393 u16 sge_sz = 0;
2394 u16 tpa_agg_size = 0;
2395
2396 /* calculate queue flags */
2397 u16 flags = bnx2x_get_cl_flags(bp, fp);
2398
2399 if (!fp->disable_tpa) {
2400 pause->sge_th_hi = 250;
2401 pause->sge_th_lo = 150;
2402 tpa_agg_size = min_t(u32,
2403 (min_t(u32, 8, MAX_SKB_FRAGS) *
2404 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2405 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2406 SGE_PAGE_SHIFT;
2407 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2408 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2409 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2410 0xffff);
2411 }
2412
2413 /* pause - not for e1 */
2414 if (!CHIP_IS_E1(bp)) {
2415 pause->bd_th_hi = 350;
2416 pause->bd_th_lo = 250;
2417 pause->rcq_th_hi = 350;
2418 pause->rcq_th_lo = 250;
2419 pause->sge_th_hi = 0;
2420 pause->sge_th_lo = 0;
2421 pause->pri_map = 1;
2422 }
2423
2424 /* rxq setup */
2425 rxq_init->flags = flags;
2426 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2427 rxq_init->dscr_map = fp->rx_desc_mapping;
2428 rxq_init->sge_map = fp->rx_sge_mapping;
2429 rxq_init->rcq_map = fp->rx_comp_mapping;
2430 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2431 rxq_init->mtu = bp->dev->mtu;
2432 rxq_init->buf_sz = bp->rx_buf_size;
2433 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2434 rxq_init->cl_id = fp->cl_id;
2435 rxq_init->spcl_id = fp->cl_id;
2436 rxq_init->stat_id = fp->cl_id;
2437 rxq_init->tpa_agg_sz = tpa_agg_size;
2438 rxq_init->sge_buf_sz = sge_sz;
2439 rxq_init->max_sges_pkt = max_sge;
2440 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2441 rxq_init->fw_sb_id = fp->fw_sb_id;
2442
2443 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2444
2445 rxq_init->cid = HW_CID(bp, fp->cid);
2446
2447 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2448}
2449
2450static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2451 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2452{
2453 u16 flags = bnx2x_get_cl_flags(bp, fp);
2454
2455 txq_init->flags = flags;
2456 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2457 txq_init->dscr_map = fp->tx_desc_mapping;
2458 txq_init->stat_id = fp->cl_id;
2459 txq_init->cid = HW_CID(bp, fp->cid);
2460 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2461 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2462 txq_init->fw_sb_id = fp->fw_sb_id;
2463 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2464}
2465
stephen hemminger8d962862010-10-21 07:50:56 +00002466static void bnx2x_pf_init(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002467{
2468 struct bnx2x_func_init_params func_init = {0};
2469 struct bnx2x_rss_params rss = {0};
2470 struct event_ring_data eq_data = { {0} };
2471 u16 flags;
2472
2473 /* pf specific setups */
2474 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002475 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002476
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002477 if (CHIP_IS_E2(bp)) {
2478 /* reset IGU PF statistics: MSIX + ATTN */
2479 /* PF */
2480 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2481 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2482 (CHIP_MODE_IS_4_PORT(bp) ?
2483 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2484 /* ATTN */
2485 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2486 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2487 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2488 (CHIP_MODE_IS_4_PORT(bp) ?
2489 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2490 }
2491
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002492 /* function setup flags */
2493 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2494
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002495 if (CHIP_IS_E1x(bp))
2496 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2497 else
2498 flags |= FUNC_FLG_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002499
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002500 /* function setup */
2501
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002502 /**
2503 * Although RSS is meaningless when there is a single HW queue we
2504 * still need it enabled in order to have HW Rx hash generated.
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002505 */
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002506 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2507 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2508 rss.mode = bp->multi_mode;
2509 rss.result_mask = MULTI_MASK;
2510 func_init.rss = &rss;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002511
2512 func_init.func_flgs = flags;
2513 func_init.pf_id = BP_FUNC(bp);
2514 func_init.func_id = BP_FUNC(bp);
2515 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2516 func_init.spq_map = bp->spq_mapping;
2517 func_init.spq_prod = bp->spq_prod_idx;
2518
2519 bnx2x_func_init(bp, &func_init);
2520
2521 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2522
2523 /*
2524 Congestion management values depend on the link rate
2525 There is no active link so initial link rate is set to 10 Gbps.
2526 When the link comes up The congestion management values are
2527 re-calculated according to the actual link rate.
2528 */
2529 bp->link_vars.line_speed = SPEED_10000;
2530 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2531
2532 /* Only the PMF sets the HW */
2533 if (bp->port.pmf)
2534 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2535
2536 /* no rx until link is up */
2537 bp->rx_mode = BNX2X_RX_MODE_NONE;
2538 bnx2x_set_storm_rx_mode(bp);
2539
2540 /* init Event Queue */
2541 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2542 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2543 eq_data.producer = bp->eq_prod;
2544 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2545 eq_data.sb_id = DEF_SB_ID;
2546 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2547}
2548
2549
Eilon Greenstein2691d512009-08-12 08:22:08 +00002550static void bnx2x_e1h_disable(struct bnx2x *bp)
2551{
2552 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002553
2554 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002555
2556 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2557
Eilon Greenstein2691d512009-08-12 08:22:08 +00002558 netif_carrier_off(bp->dev);
2559}
2560
2561static void bnx2x_e1h_enable(struct bnx2x *bp)
2562{
2563 int port = BP_PORT(bp);
2564
2565 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2566
Eilon Greenstein2691d512009-08-12 08:22:08 +00002567 /* Tx queue should be only reenabled */
2568 netif_tx_wake_all_queues(bp->dev);
2569
Eilon Greenstein061bc702009-10-15 00:18:47 -07002570 /*
2571 * Should not call netif_carrier_on since it will be called if the link
2572 * is up when checking for link state
2573 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002574}
2575
Eilon Greenstein2691d512009-08-12 08:22:08 +00002576static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2577{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002578 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002579
2580 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2581
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002582 /*
2583 * This is the only place besides the function initialization
2584 * where the bp->flags can change so it is done without any
2585 * locks
2586 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002587 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002588 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002589 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002590
2591 bnx2x_e1h_disable(bp);
2592 } else {
2593 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002594 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002595
2596 bnx2x_e1h_enable(bp);
2597 }
2598 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2599 }
2600 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2601
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002602 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2603 bnx2x_link_sync_notify(bp);
2604 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002605 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2606 }
2607
2608 /* Report results to MCP */
2609 if (dcc_event)
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002610 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002611 else
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002612 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002613}
2614
Michael Chan28912902009-10-10 13:46:53 +00002615/* must be called under the spq lock */
2616static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2617{
2618 struct eth_spe *next_spe = bp->spq_prod_bd;
2619
2620 if (bp->spq_prod_bd == bp->spq_last_bd) {
2621 bp->spq_prod_bd = bp->spq;
2622 bp->spq_prod_idx = 0;
2623 DP(NETIF_MSG_TIMER, "end of spq\n");
2624 } else {
2625 bp->spq_prod_bd++;
2626 bp->spq_prod_idx++;
2627 }
2628 return next_spe;
2629}
2630
2631/* must be called under the spq lock */
2632static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2633{
2634 int func = BP_FUNC(bp);
2635
2636 /* Make sure that BD data is updated before writing the producer */
2637 wmb();
2638
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002639 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002640 bp->spq_prod_idx);
Michael Chan28912902009-10-10 13:46:53 +00002641 mmiowb();
2642}
2643
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002644/* the slow path queue is odd since completions arrive on the fastpath ring */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002645int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002646 u32 data_hi, u32 data_lo, int common)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002647{
Michael Chan28912902009-10-10 13:46:53 +00002648 struct eth_spe *spe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002649 u16 type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002650
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002651#ifdef BNX2X_STOP_ON_ERROR
2652 if (unlikely(bp->panic))
2653 return -EIO;
2654#endif
2655
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002656 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002657
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002658 if (!atomic_read(&bp->spq_left)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002659 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002660 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002661 bnx2x_panic();
2662 return -EBUSY;
2663 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002664
Michael Chan28912902009-10-10 13:46:53 +00002665 spe = bnx2x_sp_get_next(bp);
2666
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002667 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002668 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002669 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2670 HW_CID(bp, cid));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002671
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002672 if (common)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002673 /* Common ramrods:
2674 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2675 * TRAFFIC_STOP, TRAFFIC_START
2676 */
2677 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2678 & SPE_HDR_CONN_TYPE;
2679 else
2680 /* ETH ramrods: SETUP, HALT */
2681 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2682 & SPE_HDR_CONN_TYPE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002683
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002684 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2685 SPE_HDR_FUNCTION_ID);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002686
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002687 spe->hdr.type = cpu_to_le16(type);
2688
2689 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2690 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2691
2692 /* stats ramrod has it's own slot on the spq */
2693 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2694 /* It's ok if the actual decrement is issued towards the memory
2695 * somewhere between the spin_lock and spin_unlock. Thus no
2696 * more explict memory barrier is needed.
2697 */
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002698 atomic_dec(&bp->spq_left);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002699
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002700 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002701 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2702 "type(0x%x) left %x\n",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002703 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2704 (u32)(U64_LO(bp->spq_mapping) +
2705 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002706 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002707
Michael Chan28912902009-10-10 13:46:53 +00002708 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002709 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002710 return 0;
2711}
2712
2713/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002714static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002715{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002716 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002717 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002718
2719 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002720 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002721 val = (1UL << 31);
2722 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2723 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2724 if (val & (1L << 31))
2725 break;
2726
2727 msleep(5);
2728 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002729 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002730 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002731 rc = -EBUSY;
2732 }
2733
2734 return rc;
2735}
2736
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002737/* release split MCP access lock register */
2738static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002739{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002740 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002741}
2742
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002743#define BNX2X_DEF_SB_ATT_IDX 0x0001
2744#define BNX2X_DEF_SB_IDX 0x0002
2745
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002746static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2747{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002748 struct host_sp_status_block *def_sb = bp->def_status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002749 u16 rc = 0;
2750
2751 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002752 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2753 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002754 rc |= BNX2X_DEF_SB_ATT_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002755 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002756
2757 if (bp->def_idx != def_sb->sp_sb.running_index) {
2758 bp->def_idx = def_sb->sp_sb.running_index;
2759 rc |= BNX2X_DEF_SB_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002760 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002761
2762 /* Do not reorder: indecies reading should complete before handling */
2763 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002764 return rc;
2765}
2766
2767/*
2768 * slow path service functions
2769 */
2770
2771static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2772{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002773 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002774 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2775 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002776 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2777 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002778 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002779 u32 nig_mask = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002780 u32 reg_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002781
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002782 if (bp->attn_state & asserted)
2783 BNX2X_ERR("IGU ERROR\n");
2784
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002785 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2786 aeu_mask = REG_RD(bp, aeu_addr);
2787
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002788 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002789 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002790 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002791 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002792
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002793 REG_WR(bp, aeu_addr, aeu_mask);
2794 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002795
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002796 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002797 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002798 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002799
2800 if (asserted & ATTN_HARD_WIRED_MASK) {
2801 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002802
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002803 bnx2x_acquire_phy_lock(bp);
2804
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002805 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002806 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002807 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002808
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002809 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002810
2811 /* handle unicore attn? */
2812 }
2813 if (asserted & ATTN_SW_TIMER_4_FUNC)
2814 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2815
2816 if (asserted & GPIO_2_FUNC)
2817 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2818
2819 if (asserted & GPIO_3_FUNC)
2820 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2821
2822 if (asserted & GPIO_4_FUNC)
2823 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2824
2825 if (port == 0) {
2826 if (asserted & ATTN_GENERAL_ATTN_1) {
2827 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2828 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2829 }
2830 if (asserted & ATTN_GENERAL_ATTN_2) {
2831 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2832 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2833 }
2834 if (asserted & ATTN_GENERAL_ATTN_3) {
2835 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2836 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2837 }
2838 } else {
2839 if (asserted & ATTN_GENERAL_ATTN_4) {
2840 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2841 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2842 }
2843 if (asserted & ATTN_GENERAL_ATTN_5) {
2844 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2845 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2846 }
2847 if (asserted & ATTN_GENERAL_ATTN_6) {
2848 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2849 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2850 }
2851 }
2852
2853 } /* if hardwired */
2854
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002855 if (bp->common.int_block == INT_BLOCK_HC)
2856 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2857 COMMAND_REG_ATTN_BITS_SET);
2858 else
2859 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2860
2861 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2862 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2863 REG_WR(bp, reg_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002864
2865 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002866 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002867 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002868 bnx2x_release_phy_lock(bp);
2869 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002870}
2871
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002872static inline void bnx2x_fan_failure(struct bnx2x *bp)
2873{
2874 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002875 u32 ext_phy_config;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002876 /* mark the failure */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002877 ext_phy_config =
2878 SHMEM_RD(bp,
2879 dev_info.port_hw_config[port].external_phy_config);
2880
2881 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2882 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002883 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002884 ext_phy_config);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002885
2886 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002887 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2888 " the driver to shutdown the card to prevent permanent"
2889 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002890}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002891
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002892static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2893{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002894 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002895 int reg_offset;
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002896 u32 val;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002897
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002898 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2899 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002900
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002901 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002902
2903 val = REG_RD(bp, reg_offset);
2904 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2905 REG_WR(bp, reg_offset, val);
2906
2907 BNX2X_ERR("SPIO5 hw attention\n");
2908
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002909 /* Fan failure attention */
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002910 bnx2x_hw_reset_phy(&bp->link_params);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002911 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002912 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002913
Eilon Greenstein589abe32009-02-12 08:36:55 +00002914 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2915 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2916 bnx2x_acquire_phy_lock(bp);
2917 bnx2x_handle_module_detect_int(&bp->link_params);
2918 bnx2x_release_phy_lock(bp);
2919 }
2920
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002921 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2922
2923 val = REG_RD(bp, reg_offset);
2924 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2925 REG_WR(bp, reg_offset, val);
2926
2927 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002928 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002929 bnx2x_panic();
2930 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002931}
2932
2933static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2934{
2935 u32 val;
2936
Eilon Greenstein0626b892009-02-12 08:38:14 +00002937 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002938
2939 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2940 BNX2X_ERR("DB hw attention 0x%x\n", val);
2941 /* DORQ discard attention */
2942 if (val & 0x2)
2943 BNX2X_ERR("FATAL error from DORQ\n");
2944 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002945
2946 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2947
2948 int port = BP_PORT(bp);
2949 int reg_offset;
2950
2951 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2952 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2953
2954 val = REG_RD(bp, reg_offset);
2955 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2956 REG_WR(bp, reg_offset, val);
2957
2958 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002959 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002960 bnx2x_panic();
2961 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002962}
2963
2964static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2965{
2966 u32 val;
2967
2968 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2969
2970 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2971 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2972 /* CFC error attention */
2973 if (val & 0x2)
2974 BNX2X_ERR("FATAL error from CFC\n");
2975 }
2976
2977 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2978
2979 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2980 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2981 /* RQ_USDMDP_FIFO_OVERFLOW */
2982 if (val & 0x18000)
2983 BNX2X_ERR("FATAL error from PXP\n");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002984 if (CHIP_IS_E2(bp)) {
2985 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2986 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2987 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002988 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002989
2990 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2991
2992 int port = BP_PORT(bp);
2993 int reg_offset;
2994
2995 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2996 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2997
2998 val = REG_RD(bp, reg_offset);
2999 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
3000 REG_WR(bp, reg_offset, val);
3001
3002 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003003 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003004 bnx2x_panic();
3005 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003006}
3007
3008static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3009{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003010 u32 val;
3011
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003012 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3013
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003014 if (attn & BNX2X_PMF_LINK_ASSERT) {
3015 int func = BP_FUNC(bp);
3016
3017 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003018 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3019 func_mf_config[BP_ABS_FUNC(bp)].config);
3020 val = SHMEM_RD(bp,
3021 func_mb[BP_FW_MB_IDX(bp)].drv_status);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003022 if (val & DRV_STATUS_DCC_EVENT_MASK)
3023 bnx2x_dcc_event(bp,
3024 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003025 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003026 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003027 bnx2x_pmf_update(bp);
3028
3029 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003030
3031 BNX2X_ERR("MC assert!\n");
3032 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3033 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3034 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3035 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3036 bnx2x_panic();
3037
3038 } else if (attn & BNX2X_MCP_ASSERT) {
3039
3040 BNX2X_ERR("MCP assert!\n");
3041 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003042 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003043
3044 } else
3045 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3046 }
3047
3048 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003049 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3050 if (attn & BNX2X_GRC_TIMEOUT) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003051 val = CHIP_IS_E1(bp) ? 0 :
3052 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003053 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3054 }
3055 if (attn & BNX2X_GRC_RSV) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003056 val = CHIP_IS_E1(bp) ? 0 :
3057 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003058 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3059 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003060 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003061 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003062}
3063
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003064#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3065#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3066#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3067#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3068#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3069#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003070
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003071/*
3072 * should be run under rtnl lock
3073 */
3074static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3075{
3076 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3077 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3078 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3079 barrier();
3080 mmiowb();
3081}
3082
3083/*
3084 * should be run under rtnl lock
3085 */
3086static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3087{
3088 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3089 val |= (1 << 16);
3090 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3091 barrier();
3092 mmiowb();
3093}
3094
3095/*
3096 * should be run under rtnl lock
3097 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003098bool bnx2x_reset_is_done(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003099{
3100 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3101 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3102 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3103}
3104
3105/*
3106 * should be run under rtnl lock
3107 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003108inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003109{
3110 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3111
3112 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3113
3114 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3115 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3116 barrier();
3117 mmiowb();
3118}
3119
3120/*
3121 * should be run under rtnl lock
3122 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003123u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003124{
3125 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3126
3127 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3128
3129 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3130 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3131 barrier();
3132 mmiowb();
3133
3134 return val1;
3135}
3136
3137/*
3138 * should be run under rtnl lock
3139 */
3140static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3141{
3142 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3143}
3144
3145static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3146{
3147 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3148 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3149}
3150
3151static inline void _print_next_block(int idx, const char *blk)
3152{
3153 if (idx)
3154 pr_cont(", ");
3155 pr_cont("%s", blk);
3156}
3157
3158static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3159{
3160 int i = 0;
3161 u32 cur_bit = 0;
3162 for (i = 0; sig; i++) {
3163 cur_bit = ((u32)0x1 << i);
3164 if (sig & cur_bit) {
3165 switch (cur_bit) {
3166 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3167 _print_next_block(par_num++, "BRB");
3168 break;
3169 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3170 _print_next_block(par_num++, "PARSER");
3171 break;
3172 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3173 _print_next_block(par_num++, "TSDM");
3174 break;
3175 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3176 _print_next_block(par_num++, "SEARCHER");
3177 break;
3178 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3179 _print_next_block(par_num++, "TSEMI");
3180 break;
3181 }
3182
3183 /* Clear the bit */
3184 sig &= ~cur_bit;
3185 }
3186 }
3187
3188 return par_num;
3189}
3190
3191static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3192{
3193 int i = 0;
3194 u32 cur_bit = 0;
3195 for (i = 0; sig; i++) {
3196 cur_bit = ((u32)0x1 << i);
3197 if (sig & cur_bit) {
3198 switch (cur_bit) {
3199 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3200 _print_next_block(par_num++, "PBCLIENT");
3201 break;
3202 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3203 _print_next_block(par_num++, "QM");
3204 break;
3205 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3206 _print_next_block(par_num++, "XSDM");
3207 break;
3208 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3209 _print_next_block(par_num++, "XSEMI");
3210 break;
3211 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3212 _print_next_block(par_num++, "DOORBELLQ");
3213 break;
3214 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3215 _print_next_block(par_num++, "VAUX PCI CORE");
3216 break;
3217 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3218 _print_next_block(par_num++, "DEBUG");
3219 break;
3220 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3221 _print_next_block(par_num++, "USDM");
3222 break;
3223 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3224 _print_next_block(par_num++, "USEMI");
3225 break;
3226 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3227 _print_next_block(par_num++, "UPB");
3228 break;
3229 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3230 _print_next_block(par_num++, "CSDM");
3231 break;
3232 }
3233
3234 /* Clear the bit */
3235 sig &= ~cur_bit;
3236 }
3237 }
3238
3239 return par_num;
3240}
3241
3242static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3243{
3244 int i = 0;
3245 u32 cur_bit = 0;
3246 for (i = 0; sig; i++) {
3247 cur_bit = ((u32)0x1 << i);
3248 if (sig & cur_bit) {
3249 switch (cur_bit) {
3250 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3251 _print_next_block(par_num++, "CSEMI");
3252 break;
3253 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3254 _print_next_block(par_num++, "PXP");
3255 break;
3256 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3257 _print_next_block(par_num++,
3258 "PXPPCICLOCKCLIENT");
3259 break;
3260 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3261 _print_next_block(par_num++, "CFC");
3262 break;
3263 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3264 _print_next_block(par_num++, "CDU");
3265 break;
3266 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3267 _print_next_block(par_num++, "IGU");
3268 break;
3269 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3270 _print_next_block(par_num++, "MISC");
3271 break;
3272 }
3273
3274 /* Clear the bit */
3275 sig &= ~cur_bit;
3276 }
3277 }
3278
3279 return par_num;
3280}
3281
3282static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3283{
3284 int i = 0;
3285 u32 cur_bit = 0;
3286 for (i = 0; sig; i++) {
3287 cur_bit = ((u32)0x1 << i);
3288 if (sig & cur_bit) {
3289 switch (cur_bit) {
3290 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3291 _print_next_block(par_num++, "MCP ROM");
3292 break;
3293 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3294 _print_next_block(par_num++, "MCP UMP RX");
3295 break;
3296 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3297 _print_next_block(par_num++, "MCP UMP TX");
3298 break;
3299 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3300 _print_next_block(par_num++, "MCP SCPAD");
3301 break;
3302 }
3303
3304 /* Clear the bit */
3305 sig &= ~cur_bit;
3306 }
3307 }
3308
3309 return par_num;
3310}
3311
3312static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3313 u32 sig2, u32 sig3)
3314{
3315 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3316 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3317 int par_num = 0;
3318 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3319 "[0]:0x%08x [1]:0x%08x "
3320 "[2]:0x%08x [3]:0x%08x\n",
3321 sig0 & HW_PRTY_ASSERT_SET_0,
3322 sig1 & HW_PRTY_ASSERT_SET_1,
3323 sig2 & HW_PRTY_ASSERT_SET_2,
3324 sig3 & HW_PRTY_ASSERT_SET_3);
3325 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3326 bp->dev->name);
3327 par_num = bnx2x_print_blocks_with_parity0(
3328 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3329 par_num = bnx2x_print_blocks_with_parity1(
3330 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3331 par_num = bnx2x_print_blocks_with_parity2(
3332 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3333 par_num = bnx2x_print_blocks_with_parity3(
3334 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3335 printk("\n");
3336 return true;
3337 } else
3338 return false;
3339}
3340
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003341bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003342{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003343 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003344 int port = BP_PORT(bp);
3345
3346 attn.sig[0] = REG_RD(bp,
3347 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3348 port*4);
3349 attn.sig[1] = REG_RD(bp,
3350 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3351 port*4);
3352 attn.sig[2] = REG_RD(bp,
3353 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3354 port*4);
3355 attn.sig[3] = REG_RD(bp,
3356 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3357 port*4);
3358
3359 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3360 attn.sig[3]);
3361}
3362
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003363
3364static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3365{
3366 u32 val;
3367 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3368
3369 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3370 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3371 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3372 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3373 "ADDRESS_ERROR\n");
3374 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3375 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3376 "INCORRECT_RCV_BEHAVIOR\n");
3377 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3378 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3379 "WAS_ERROR_ATTN\n");
3380 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3381 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3382 "VF_LENGTH_VIOLATION_ATTN\n");
3383 if (val &
3384 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3385 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3386 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3387 if (val &
3388 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3389 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3390 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3391 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3392 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3393 "TCPL_ERROR_ATTN\n");
3394 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3395 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3396 "TCPL_IN_TWO_RCBS_ATTN\n");
3397 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3398 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3399 "CSSNOOP_FIFO_OVERFLOW\n");
3400 }
3401 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3402 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3403 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3404 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3405 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3406 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3407 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3408 "_ATC_TCPL_TO_NOT_PEND\n");
3409 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3410 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3411 "ATC_GPA_MULTIPLE_HITS\n");
3412 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3413 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3414 "ATC_RCPL_TO_EMPTY_CNT\n");
3415 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3416 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3417 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3418 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3419 "ATC_IREQ_LESS_THAN_STU\n");
3420 }
3421
3422 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3423 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3424 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3425 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3426 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3427 }
3428
3429}
3430
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003431static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3432{
3433 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003434 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003435 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003436 u32 reg_addr;
3437 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003438 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003439
3440 /* need to take HW lock because MCP or other port might also
3441 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003442 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003443
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003444 if (bnx2x_chk_parity_attn(bp)) {
3445 bp->recovery_state = BNX2X_RECOVERY_INIT;
3446 bnx2x_set_reset_in_progress(bp);
3447 schedule_delayed_work(&bp->reset_task, 0);
3448 /* Disable HW interrupts */
3449 bnx2x_int_disable(bp);
3450 bnx2x_release_alr(bp);
3451 /* In case of parity errors don't handle attentions so that
3452 * other function would "see" parity errors.
3453 */
3454 return;
3455 }
3456
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003457 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3458 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3459 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3460 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003461 if (CHIP_IS_E2(bp))
3462 attn.sig[4] =
3463 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3464 else
3465 attn.sig[4] = 0;
3466
3467 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3468 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003469
3470 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3471 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003472 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003473
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003474 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3475 "%08x %08x %08x\n",
3476 index,
3477 group_mask->sig[0], group_mask->sig[1],
3478 group_mask->sig[2], group_mask->sig[3],
3479 group_mask->sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003480
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003481 bnx2x_attn_int_deasserted4(bp,
3482 attn.sig[4] & group_mask->sig[4]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003483 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003484 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003485 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003486 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003487 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003488 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003489 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003490 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003491 }
3492 }
3493
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003494 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003495
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003496 if (bp->common.int_block == INT_BLOCK_HC)
3497 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3498 COMMAND_REG_ATTN_BITS_CLR);
3499 else
3500 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003501
3502 val = ~deasserted;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003503 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3504 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003505 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003506
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003507 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003508 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003509
3510 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3511 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3512
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003513 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3514 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003515
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003516 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3517 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003518 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003519 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3520
3521 REG_WR(bp, reg_addr, aeu_mask);
3522 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003523
3524 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3525 bp->attn_state &= ~deasserted;
3526 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3527}
3528
3529static void bnx2x_attn_int(struct bnx2x *bp)
3530{
3531 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003532 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3533 attn_bits);
3534 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3535 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003536 u32 attn_state = bp->attn_state;
3537
3538 /* look for changed bits */
3539 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3540 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3541
3542 DP(NETIF_MSG_HW,
3543 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3544 attn_bits, attn_ack, asserted, deasserted);
3545
3546 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003547 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003548
3549 /* handle bits that were raised */
3550 if (asserted)
3551 bnx2x_attn_int_asserted(bp, asserted);
3552
3553 if (deasserted)
3554 bnx2x_attn_int_deasserted(bp, deasserted);
3555}
3556
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003557static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3558{
3559 /* No memory barriers */
3560 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3561 mmiowb(); /* keep prod updates ordered */
3562}
3563
3564#ifdef BCM_CNIC
3565static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3566 union event_ring_elem *elem)
3567{
3568 if (!bp->cnic_eth_dev.starting_cid ||
3569 cid < bp->cnic_eth_dev.starting_cid)
3570 return 1;
3571
3572 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3573
3574 if (unlikely(elem->message.data.cfc_del_event.error)) {
3575 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3576 cid);
3577 bnx2x_panic_dump(bp);
3578 }
3579 bnx2x_cnic_cfc_comp(bp, cid);
3580 return 0;
3581}
3582#endif
3583
3584static void bnx2x_eq_int(struct bnx2x *bp)
3585{
3586 u16 hw_cons, sw_cons, sw_prod;
3587 union event_ring_elem *elem;
3588 u32 cid;
3589 u8 opcode;
3590 int spqe_cnt = 0;
3591
3592 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3593
3594 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3595 * when we get the the next-page we nned to adjust so the loop
3596 * condition below will be met. The next element is the size of a
3597 * regular element and hence incrementing by 1
3598 */
3599 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3600 hw_cons++;
3601
3602 /* This function may never run in parralel with itself for a
3603 * specific bp, thus there is no need in "paired" read memory
3604 * barrier here.
3605 */
3606 sw_cons = bp->eq_cons;
3607 sw_prod = bp->eq_prod;
3608
3609 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003610 hw_cons, sw_cons, atomic_read(&bp->spq_left));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003611
3612 for (; sw_cons != hw_cons;
3613 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3614
3615
3616 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3617
3618 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3619 opcode = elem->message.opcode;
3620
3621
3622 /* handle eq element */
3623 switch (opcode) {
3624 case EVENT_RING_OPCODE_STAT_QUERY:
3625 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3626 /* nothing to do with stats comp */
3627 continue;
3628
3629 case EVENT_RING_OPCODE_CFC_DEL:
3630 /* handle according to cid range */
3631 /*
3632 * we may want to verify here that the bp state is
3633 * HALTING
3634 */
3635 DP(NETIF_MSG_IFDOWN,
3636 "got delete ramrod for MULTI[%d]\n", cid);
3637#ifdef BCM_CNIC
3638 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3639 goto next_spqe;
3640#endif
3641 bnx2x_fp(bp, cid, state) =
3642 BNX2X_FP_STATE_CLOSED;
3643
3644 goto next_spqe;
3645 }
3646
3647 switch (opcode | bp->state) {
3648 case (EVENT_RING_OPCODE_FUNCTION_START |
3649 BNX2X_STATE_OPENING_WAIT4_PORT):
3650 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3651 bp->state = BNX2X_STATE_FUNC_STARTED;
3652 break;
3653
3654 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3655 BNX2X_STATE_CLOSING_WAIT4_HALT):
3656 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3657 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3658 break;
3659
3660 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3661 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3662 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3663 bp->set_mac_pending = 0;
3664 break;
3665
3666 case (EVENT_RING_OPCODE_SET_MAC |
3667 BNX2X_STATE_CLOSING_WAIT4_HALT):
3668 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3669 bp->set_mac_pending = 0;
3670 break;
3671 default:
3672 /* unknown event log error and continue */
3673 BNX2X_ERR("Unknown EQ event %d\n",
3674 elem->message.opcode);
3675 }
3676next_spqe:
3677 spqe_cnt++;
3678 } /* for */
3679
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003680 smp_mb__before_atomic_inc();
3681 atomic_add(spqe_cnt, &bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003682
3683 bp->eq_cons = sw_cons;
3684 bp->eq_prod = sw_prod;
3685 /* Make sure that above mem writes were issued towards the memory */
3686 smp_wmb();
3687
3688 /* update producer */
3689 bnx2x_update_eq_prod(bp, bp->eq_prod);
3690}
3691
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003692static void bnx2x_sp_task(struct work_struct *work)
3693{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003694 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003695 u16 status;
3696
3697 /* Return here if interrupt is disabled */
3698 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003699 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003700 return;
3701 }
3702
3703 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003704/* if (status == 0) */
3705/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003706
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003707 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003708
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003709 /* HW attentions */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003710 if (status & BNX2X_DEF_SB_ATT_IDX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003711 bnx2x_attn_int(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003712 status &= ~BNX2X_DEF_SB_ATT_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003713 }
3714
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003715 /* SP events: STAT_QUERY and others */
3716 if (status & BNX2X_DEF_SB_IDX) {
3717
3718 /* Handle EQ completions */
3719 bnx2x_eq_int(bp);
3720
3721 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3722 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3723
3724 status &= ~BNX2X_DEF_SB_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003725 }
3726
3727 if (unlikely(status))
3728 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3729 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003730
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003731 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3732 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003733}
3734
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003735irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003736{
3737 struct net_device *dev = dev_instance;
3738 struct bnx2x *bp = netdev_priv(dev);
3739
3740 /* Return here if interrupt is disabled */
3741 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003742 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003743 return IRQ_HANDLED;
3744 }
3745
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003746 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3747 IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003748
3749#ifdef BNX2X_STOP_ON_ERROR
3750 if (unlikely(bp->panic))
3751 return IRQ_HANDLED;
3752#endif
3753
Michael Chan993ac7b2009-10-10 13:46:56 +00003754#ifdef BCM_CNIC
3755 {
3756 struct cnic_ops *c_ops;
3757
3758 rcu_read_lock();
3759 c_ops = rcu_dereference(bp->cnic_ops);
3760 if (c_ops)
3761 c_ops->cnic_handler(bp->cnic_data, NULL);
3762 rcu_read_unlock();
3763 }
3764#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003765 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003766
3767 return IRQ_HANDLED;
3768}
3769
3770/* end of slow path */
3771
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003772static void bnx2x_timer(unsigned long data)
3773{
3774 struct bnx2x *bp = (struct bnx2x *) data;
3775
3776 if (!netif_running(bp->dev))
3777 return;
3778
3779 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08003780 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003781
3782 if (poll) {
3783 struct bnx2x_fastpath *fp = &bp->fp[0];
3784 int rc;
3785
Eilon Greenstein7961f792009-03-02 07:59:31 +00003786 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003787 rc = bnx2x_rx_int(fp, 1000);
3788 }
3789
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003790 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003791 int mb_idx = BP_FW_MB_IDX(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003792 u32 drv_pulse;
3793 u32 mcp_pulse;
3794
3795 ++bp->fw_drv_pulse_wr_seq;
3796 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3797 /* TBD - add SYSTEM_TIME */
3798 drv_pulse = bp->fw_drv_pulse_wr_seq;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003799 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003800
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003801 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003802 MCP_PULSE_SEQ_MASK);
3803 /* The delta between driver pulse and mcp response
3804 * should be 1 (before mcp response) or 0 (after mcp response)
3805 */
3806 if ((drv_pulse != mcp_pulse) &&
3807 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3808 /* someone lost a heartbeat... */
3809 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3810 drv_pulse, mcp_pulse);
3811 }
3812 }
3813
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07003814 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003815 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003816
Eliezer Tamirf1410642008-02-28 11:51:50 -08003817timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003818 mod_timer(&bp->timer, jiffies + bp->current_interval);
3819}
3820
3821/* end of Statistics */
3822
3823/* nic init */
3824
3825/*
3826 * nic init service functions
3827 */
3828
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003829static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003830{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003831 u32 i;
3832 if (!(len%4) && !(addr%4))
3833 for (i = 0; i < len; i += 4)
3834 REG_WR(bp, addr + i, fill);
3835 else
3836 for (i = 0; i < len; i++)
3837 REG_WR8(bp, addr + i, fill);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003838
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003839}
3840
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003841/* helper: writes FP SP data to FW - data_size in dwords */
3842static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3843 int fw_sb_id,
3844 u32 *sb_data_p,
3845 u32 data_size)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003846{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003847 int index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003848 for (index = 0; index < data_size; index++)
3849 REG_WR(bp, BAR_CSTRORM_INTMEM +
3850 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3851 sizeof(u32)*index,
3852 *(sb_data_p + index));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003853}
3854
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003855static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3856{
3857 u32 *sb_data_p;
3858 u32 data_size = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003859 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003860 struct hc_status_block_data_e1x sb_data_e1x;
3861
3862 /* disable the function first */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003863 if (CHIP_IS_E2(bp)) {
3864 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3865 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3866 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3867 sb_data_e2.common.p_func.vf_valid = false;
3868 sb_data_p = (u32 *)&sb_data_e2;
3869 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3870 } else {
3871 memset(&sb_data_e1x, 0,
3872 sizeof(struct hc_status_block_data_e1x));
3873 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3874 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3875 sb_data_e1x.common.p_func.vf_valid = false;
3876 sb_data_p = (u32 *)&sb_data_e1x;
3877 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3878 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003879 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3880
3881 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3882 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3883 CSTORM_STATUS_BLOCK_SIZE);
3884 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3885 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3886 CSTORM_SYNC_BLOCK_SIZE);
3887}
3888
3889/* helper: writes SP SB data to FW */
3890static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3891 struct hc_sp_status_block_data *sp_sb_data)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003892{
3893 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003894 int i;
3895 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3896 REG_WR(bp, BAR_CSTRORM_INTMEM +
3897 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3898 i*sizeof(u32),
3899 *((u32 *)sp_sb_data + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003900}
3901
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003902static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3903{
3904 int func = BP_FUNC(bp);
3905 struct hc_sp_status_block_data sp_sb_data;
3906 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3907
3908 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3909 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3910 sp_sb_data.p_func.vf_valid = false;
3911
3912 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3913
3914 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3915 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3916 CSTORM_SP_STATUS_BLOCK_SIZE);
3917 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3918 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3919 CSTORM_SP_SYNC_BLOCK_SIZE);
3920
3921}
3922
3923
3924static inline
3925void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3926 int igu_sb_id, int igu_seg_id)
3927{
3928 hc_sm->igu_sb_id = igu_sb_id;
3929 hc_sm->igu_seg_id = igu_seg_id;
3930 hc_sm->timer_value = 0xFF;
3931 hc_sm->time_to_expire = 0xFFFFFFFF;
3932}
3933
stephen hemminger8d962862010-10-21 07:50:56 +00003934static void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003935 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3936{
3937 int igu_seg_id;
3938
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003939 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003940 struct hc_status_block_data_e1x sb_data_e1x;
3941 struct hc_status_block_sm *hc_sm_p;
3942 struct hc_index_data *hc_index_p;
3943 int data_size;
3944 u32 *sb_data_p;
3945
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003946 if (CHIP_INT_MODE_IS_BC(bp))
3947 igu_seg_id = HC_SEG_ACCESS_NORM;
3948 else
3949 igu_seg_id = IGU_SEG_ACCESS_NORM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003950
3951 bnx2x_zero_fp_sb(bp, fw_sb_id);
3952
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003953 if (CHIP_IS_E2(bp)) {
3954 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3955 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3956 sb_data_e2.common.p_func.vf_id = vfid;
3957 sb_data_e2.common.p_func.vf_valid = vf_valid;
3958 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3959 sb_data_e2.common.same_igu_sb_1b = true;
3960 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3961 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3962 hc_sm_p = sb_data_e2.common.state_machine;
3963 hc_index_p = sb_data_e2.index_data;
3964 sb_data_p = (u32 *)&sb_data_e2;
3965 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3966 } else {
3967 memset(&sb_data_e1x, 0,
3968 sizeof(struct hc_status_block_data_e1x));
3969 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3970 sb_data_e1x.common.p_func.vf_id = 0xff;
3971 sb_data_e1x.common.p_func.vf_valid = false;
3972 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3973 sb_data_e1x.common.same_igu_sb_1b = true;
3974 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3975 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3976 hc_sm_p = sb_data_e1x.common.state_machine;
3977 hc_index_p = sb_data_e1x.index_data;
3978 sb_data_p = (u32 *)&sb_data_e1x;
3979 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3980 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003981
3982 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3983 igu_sb_id, igu_seg_id);
3984 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3985 igu_sb_id, igu_seg_id);
3986
3987 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3988
3989 /* write indecies to HW */
3990 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3991}
3992
3993static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3994 u8 sb_index, u8 disable, u16 usec)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003995{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003996 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003997 u8 ticks = usec / BNX2X_BTR;
3998
3999 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
4000
4001 disable = disable ? 1 : (usec ? 0 : 1);
4002 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4003}
4004
4005static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4006 u16 tx_usec, u16 rx_usec)
4007{
4008 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4009 false, rx_usec);
4010 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4011 false, tx_usec);
4012}
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004013
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004014static void bnx2x_init_def_sb(struct bnx2x *bp)
4015{
4016 struct host_sp_status_block *def_sb = bp->def_status_blk;
4017 dma_addr_t mapping = bp->def_status_blk_mapping;
4018 int igu_sp_sb_index;
4019 int igu_seg_id;
4020 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004021 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004022 int reg_offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004023 u64 section;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004024 int index;
4025 struct hc_sp_status_block_data sp_sb_data;
4026 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4027
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004028 if (CHIP_INT_MODE_IS_BC(bp)) {
4029 igu_sp_sb_index = DEF_SB_IGU_ID;
4030 igu_seg_id = HC_SEG_ACCESS_DEF;
4031 } else {
4032 igu_sp_sb_index = bp->igu_dsb_id;
4033 igu_seg_id = IGU_SEG_ACCESS_DEF;
4034 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004035
4036 /* ATTN */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004037 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004038 atten_status_block);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004039 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004040
Eliezer Tamir49d66772008-02-28 11:53:13 -08004041 bp->attn_state = 0;
4042
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004043 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4044 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004045 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004046 int sindex;
4047 /* take care of sig[0]..sig[4] */
4048 for (sindex = 0; sindex < 4; sindex++)
4049 bp->attn_group[index].sig[sindex] =
4050 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004051
4052 if (CHIP_IS_E2(bp))
4053 /*
4054 * enable5 is separate from the rest of the registers,
4055 * and therefore the address skip is 4
4056 * and not 16 between the different groups
4057 */
4058 bp->attn_group[index].sig[4] = REG_RD(bp,
4059 reg_offset + 0x10 + 0x4*index);
4060 else
4061 bp->attn_group[index].sig[4] = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004062 }
4063
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004064 if (bp->common.int_block == INT_BLOCK_HC) {
4065 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4066 HC_REG_ATTN_MSG0_ADDR_L);
4067
4068 REG_WR(bp, reg_offset, U64_LO(section));
4069 REG_WR(bp, reg_offset + 4, U64_HI(section));
4070 } else if (CHIP_IS_E2(bp)) {
4071 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4072 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4073 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004074
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004075 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4076 sp_sb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004077
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004078 bnx2x_zero_sp_sb(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004079
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004080 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4081 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4082 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4083 sp_sb_data.igu_seg_id = igu_seg_id;
4084 sp_sb_data.p_func.pf_id = func;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004085 sp_sb_data.p_func.vnic_id = BP_VN(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004086 sp_sb_data.p_func.vf_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004087
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004088 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004089
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004090 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004091 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004092
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004093 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004094}
4095
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004096void bnx2x_update_coalesce(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004097{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004098 int i;
4099
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004100 for_each_queue(bp, i)
4101 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4102 bp->rx_ticks, bp->tx_ticks);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004103}
4104
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004105static void bnx2x_init_sp_ring(struct bnx2x *bp)
4106{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004107 spin_lock_init(&bp->spq_lock);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00004108 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004109
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004110 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004111 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4112 bp->spq_prod_bd = bp->spq;
4113 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004114}
4115
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004116static void bnx2x_init_eq_ring(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004117{
4118 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004119 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4120 union event_ring_elem *elem =
4121 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004122
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004123 elem->next_page.addr.hi =
4124 cpu_to_le32(U64_HI(bp->eq_mapping +
4125 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4126 elem->next_page.addr.lo =
4127 cpu_to_le32(U64_LO(bp->eq_mapping +
4128 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004129 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004130 bp->eq_cons = 0;
4131 bp->eq_prod = NUM_EQ_DESC;
4132 bp->eq_cons_sb = BNX2X_EQ_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004133}
4134
4135static void bnx2x_init_ind_table(struct bnx2x *bp)
4136{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004137 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004138 int i;
4139
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004140 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004141 return;
4142
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004143 DP(NETIF_MSG_IFUP,
4144 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004145 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004146 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004147 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004148 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004149}
4150
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004151void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004152{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004153 int mode = bp->rx_mode;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004154 u16 cl_id;
4155
Eilon Greenstein581ce432009-07-29 00:20:04 +00004156 /* All but management unicast packets should pass to the host as well */
4157 u32 llh_mask =
4158 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4159 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4160 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4161 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004162
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004163 switch (mode) {
4164 case BNX2X_RX_MODE_NONE: /* no Rx */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004165 cl_id = BP_L_ID(bp);
4166 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004167 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004168
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004169 case BNX2X_RX_MODE_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004170 cl_id = BP_L_ID(bp);
4171 bnx2x_rxq_set_mac_filters(bp, cl_id,
4172 BNX2X_ACCEPT_UNICAST |
4173 BNX2X_ACCEPT_BROADCAST |
4174 BNX2X_ACCEPT_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004175 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004176
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004177 case BNX2X_RX_MODE_ALLMULTI:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004178 cl_id = BP_L_ID(bp);
4179 bnx2x_rxq_set_mac_filters(bp, cl_id,
4180 BNX2X_ACCEPT_UNICAST |
4181 BNX2X_ACCEPT_BROADCAST |
4182 BNX2X_ACCEPT_ALL_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004183 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004184
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004185 case BNX2X_RX_MODE_PROMISC:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004186 cl_id = BP_L_ID(bp);
4187 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4188
Eilon Greenstein581ce432009-07-29 00:20:04 +00004189 /* pass management unicast packets as well */
4190 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004191 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004192
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004193 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004194 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4195 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004196 }
4197
Eilon Greenstein581ce432009-07-29 00:20:04 +00004198 REG_WR(bp,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004199 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4200 NIG_REG_LLH0_BRB1_DRV_MASK,
Eilon Greenstein581ce432009-07-29 00:20:04 +00004201 llh_mask);
4202
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004203 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4204 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4205 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4206 bp->mac_filters.ucast_drop_all,
4207 bp->mac_filters.mcast_drop_all,
4208 bp->mac_filters.bcast_drop_all,
4209 bp->mac_filters.ucast_accept_all,
4210 bp->mac_filters.mcast_accept_all,
4211 bp->mac_filters.bcast_accept_all
4212 );
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004213
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004214 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004215}
4216
Eilon Greenstein471de712008-08-13 15:49:35 -07004217static void bnx2x_init_internal_common(struct bnx2x *bp)
4218{
4219 int i;
4220
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004221 if (!CHIP_IS_E1(bp)) {
4222
4223 /* xstorm needs to know whether to add ovlan to packets or not,
4224 * in switch-independent we'll write 0 to here... */
4225 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004226 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004227 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004228 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004229 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004230 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004231 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004232 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004233 }
4234
Eilon Greenstein471de712008-08-13 15:49:35 -07004235 /* Zero this manually as its initialization is
4236 currently missing in the initTool */
4237 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4238 REG_WR(bp, BAR_USTRORM_INTMEM +
4239 USTORM_AGG_DATA_OFFSET + i * 4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004240 if (CHIP_IS_E2(bp)) {
4241 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4242 CHIP_INT_MODE_IS_BC(bp) ?
4243 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4244 }
Eilon Greenstein471de712008-08-13 15:49:35 -07004245}
4246
4247static void bnx2x_init_internal_port(struct bnx2x *bp)
4248{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004249 /* port */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004250}
4251
Eilon Greenstein471de712008-08-13 15:49:35 -07004252static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4253{
4254 switch (load_code) {
4255 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004256 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Eilon Greenstein471de712008-08-13 15:49:35 -07004257 bnx2x_init_internal_common(bp);
4258 /* no break */
4259
4260 case FW_MSG_CODE_DRV_LOAD_PORT:
4261 bnx2x_init_internal_port(bp);
4262 /* no break */
4263
4264 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004265 /* internal memory per function is
4266 initialized inside bnx2x_pf_init */
Eilon Greenstein471de712008-08-13 15:49:35 -07004267 break;
4268
4269 default:
4270 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4271 break;
4272 }
4273}
4274
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004275static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4276{
4277 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4278
4279 fp->state = BNX2X_FP_STATE_CLOSED;
4280
4281 fp->index = fp->cid = fp_idx;
4282 fp->cl_id = BP_L_ID(bp) + fp_idx;
4283 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4284 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4285 /* qZone id equals to FW (per path) client id */
4286 fp->cl_qzone_id = fp->cl_id +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004287 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4288 ETH_MAX_RX_CLIENTS_E1H);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004289 /* init shortcut */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004290 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4291 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004292 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4293 /* Setup SB indicies */
4294 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4295 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4296
4297 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4298 "cl_id %d fw_sb %d igu_sb %d\n",
4299 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4300 fp->igu_sb_id);
4301 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4302 fp->fw_sb_id, fp->igu_sb_id);
4303
4304 bnx2x_update_fpsb_idx(fp);
4305}
4306
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004307void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004308{
4309 int i;
4310
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004311 for_each_queue(bp, i)
4312 bnx2x_init_fp_sb(bp, i);
Michael Chan37b091b2009-10-10 13:46:55 +00004313#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004314
4315 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4316 BNX2X_VF_ID_INVALID, false,
4317 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4318
Michael Chan37b091b2009-10-10 13:46:55 +00004319#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004320
Eilon Greenstein16119782009-03-02 07:59:27 +00004321 /* ensure status block indices were read */
4322 rmb();
4323
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004324 bnx2x_init_def_sb(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07004325 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004326 bnx2x_init_rx_rings(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004327 bnx2x_init_tx_rings(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004328 bnx2x_init_sp_ring(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004329 bnx2x_init_eq_ring(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07004330 bnx2x_init_internal(bp, load_code);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004331 bnx2x_pf_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004332 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08004333 bnx2x_stats_init(bp);
4334
4335 /* At this point, we are ready for interrupts */
4336 atomic_set(&bp->intr_sem, 0);
4337
4338 /* flush all before enabling interrupts */
4339 mb();
4340 mmiowb();
4341
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08004342 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00004343
4344 /* Check for SPIO5 */
4345 bnx2x_attn_int_deasserted0(bp,
4346 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4347 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004348}
4349
4350/* end of nic init */
4351
4352/*
4353 * gzip service functions
4354 */
4355
4356static int bnx2x_gunzip_init(struct bnx2x *bp)
4357{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004358 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4359 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004360 if (bp->gunzip_buf == NULL)
4361 goto gunzip_nomem1;
4362
4363 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4364 if (bp->strm == NULL)
4365 goto gunzip_nomem2;
4366
4367 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4368 GFP_KERNEL);
4369 if (bp->strm->workspace == NULL)
4370 goto gunzip_nomem3;
4371
4372 return 0;
4373
4374gunzip_nomem3:
4375 kfree(bp->strm);
4376 bp->strm = NULL;
4377
4378gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004379 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4380 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004381 bp->gunzip_buf = NULL;
4382
4383gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004384 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4385 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004386 return -ENOMEM;
4387}
4388
4389static void bnx2x_gunzip_end(struct bnx2x *bp)
4390{
4391 kfree(bp->strm->workspace);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004392 kfree(bp->strm);
4393 bp->strm = NULL;
4394
4395 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004396 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4397 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004398 bp->gunzip_buf = NULL;
4399 }
4400}
4401
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004402static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004403{
4404 int n, rc;
4405
4406 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004407 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4408 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004409 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004410 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004411
4412 n = 10;
4413
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004414#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004415
4416 if (zbuf[3] & FNAME)
4417 while ((zbuf[n++] != 0) && (n < len));
4418
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004419 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004420 bp->strm->avail_in = len - n;
4421 bp->strm->next_out = bp->gunzip_buf;
4422 bp->strm->avail_out = FW_BUF_SIZE;
4423
4424 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4425 if (rc != Z_OK)
4426 return rc;
4427
4428 rc = zlib_inflate(bp->strm, Z_FINISH);
4429 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00004430 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4431 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004432
4433 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4434 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004435 netdev_err(bp->dev, "Firmware decompression error:"
4436 " gunzip_outlen (%d) not aligned\n",
4437 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004438 bp->gunzip_outlen >>= 2;
4439
4440 zlib_inflateEnd(bp->strm);
4441
4442 if (rc == Z_STREAM_END)
4443 return 0;
4444
4445 return rc;
4446}
4447
4448/* nic load/unload */
4449
4450/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004451 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004452 */
4453
4454/* send a NIG loopback debug packet */
4455static void bnx2x_lb_pckt(struct bnx2x *bp)
4456{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004457 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004458
4459 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004460 wb_write[0] = 0x55555555;
4461 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004462 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004463 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004464
4465 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004466 wb_write[0] = 0x09000000;
4467 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004468 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004469 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004470}
4471
4472/* some of the internal memories
4473 * are not directly readable from the driver
4474 * to test them we send debug packets
4475 */
4476static int bnx2x_int_mem_test(struct bnx2x *bp)
4477{
4478 int factor;
4479 int count, i;
4480 u32 val = 0;
4481
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004482 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004483 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004484 else if (CHIP_REV_IS_EMUL(bp))
4485 factor = 200;
4486 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004487 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004488
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004489 /* Disable inputs of parser neighbor blocks */
4490 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4491 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4492 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004493 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004494
4495 /* Write 0 to parser credits for CFC search request */
4496 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4497
4498 /* send Ethernet packet */
4499 bnx2x_lb_pckt(bp);
4500
4501 /* TODO do i reset NIG statistic? */
4502 /* Wait until NIG register shows 1 packet of size 0x10 */
4503 count = 1000 * factor;
4504 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004505
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004506 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4507 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004508 if (val == 0x10)
4509 break;
4510
4511 msleep(10);
4512 count--;
4513 }
4514 if (val != 0x10) {
4515 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4516 return -1;
4517 }
4518
4519 /* Wait until PRS register shows 1 packet */
4520 count = 1000 * factor;
4521 while (count) {
4522 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004523 if (val == 1)
4524 break;
4525
4526 msleep(10);
4527 count--;
4528 }
4529 if (val != 0x1) {
4530 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4531 return -2;
4532 }
4533
4534 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004535 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004536 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004537 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004538 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004539 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4540 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004541
4542 DP(NETIF_MSG_HW, "part2\n");
4543
4544 /* Disable inputs of parser neighbor blocks */
4545 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4546 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4547 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004548 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004549
4550 /* Write 0 to parser credits for CFC search request */
4551 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4552
4553 /* send 10 Ethernet packets */
4554 for (i = 0; i < 10; i++)
4555 bnx2x_lb_pckt(bp);
4556
4557 /* Wait until NIG register shows 10 + 1
4558 packets of size 11*0x10 = 0xb0 */
4559 count = 1000 * factor;
4560 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004561
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004562 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4563 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004564 if (val == 0xb0)
4565 break;
4566
4567 msleep(10);
4568 count--;
4569 }
4570 if (val != 0xb0) {
4571 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4572 return -3;
4573 }
4574
4575 /* Wait until PRS register shows 2 packets */
4576 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4577 if (val != 2)
4578 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4579
4580 /* Write 1 to parser credits for CFC search request */
4581 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4582
4583 /* Wait until PRS register shows 3 packets */
4584 msleep(10 * factor);
4585 /* Wait until NIG register shows 1 packet of size 0x10 */
4586 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4587 if (val != 3)
4588 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4589
4590 /* clear NIG EOP FIFO */
4591 for (i = 0; i < 11; i++)
4592 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4593 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4594 if (val != 1) {
4595 BNX2X_ERR("clear of NIG failed\n");
4596 return -4;
4597 }
4598
4599 /* Reset and init BRB, PRS, NIG */
4600 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4601 msleep(50);
4602 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4603 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004604 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4605 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004606#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004607 /* set NIC mode */
4608 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4609#endif
4610
4611 /* Enable inputs of parser neighbor blocks */
4612 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4613 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4614 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004615 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004616
4617 DP(NETIF_MSG_HW, "done\n");
4618
4619 return 0; /* OK */
4620}
4621
4622static void enable_blocks_attention(struct bnx2x *bp)
4623{
4624 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004625 if (CHIP_IS_E2(bp))
4626 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4627 else
4628 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004629 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4630 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004631 /*
4632 * mask read length error interrupts in brb for parser
4633 * (parsing unit and 'checksum and crc' unit)
4634 * these errors are legal (PU reads fixed length and CAC can cause
4635 * read length error on truncated packets)
4636 */
4637 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004638 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4639 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4640 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4641 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4642 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004643/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4644/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004645 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4646 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4647 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004648/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4649/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004650 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4651 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4652 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4653 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004654/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4655/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004656
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004657 if (CHIP_REV_IS_FPGA(bp))
4658 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004659 else if (CHIP_IS_E2(bp))
4660 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4661 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4662 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4663 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4664 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4665 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004666 else
4667 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004668 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4669 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4670 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004671/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4672/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004673 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4674 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004675/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4676 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004677}
4678
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004679static const struct {
4680 u32 addr;
4681 u32 mask;
4682} bnx2x_parity_mask[] = {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004683 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4684 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4685 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4686 {HC_REG_HC_PRTY_MASK, 0x7},
4687 {MISC_REG_MISC_PRTY_MASK, 0x1},
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004688 {QM_REG_QM_PRTY_MASK, 0x0},
4689 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004690 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4691 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004692 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4693 {CDU_REG_CDU_PRTY_MASK, 0x0},
4694 {CFC_REG_CFC_PRTY_MASK, 0x0},
4695 {DBG_REG_DBG_PRTY_MASK, 0x0},
4696 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4697 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4698 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4699 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4700 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4701 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4702 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4703 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4704 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4705 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4706 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4707 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4708 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4709 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4710 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004711};
4712
4713static void enable_blocks_parity(struct bnx2x *bp)
4714{
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004715 int i;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004716
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004717 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004718 REG_WR(bp, bnx2x_parity_mask[i].addr,
4719 bnx2x_parity_mask[i].mask);
4720}
4721
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004722
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004723static void bnx2x_reset_common(struct bnx2x *bp)
4724{
4725 /* reset_common */
4726 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4727 0xd3ffff7f);
4728 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4729}
4730
Eilon Greenstein573f2032009-08-12 08:24:14 +00004731static void bnx2x_init_pxp(struct bnx2x *bp)
4732{
4733 u16 devctl;
4734 int r_order, w_order;
4735
4736 pci_read_config_word(bp->pdev,
4737 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4738 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4739 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4740 if (bp->mrrs == -1)
4741 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4742 else {
4743 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4744 r_order = bp->mrrs;
4745 }
4746
4747 bnx2x_init_pxp_arb(bp, r_order, w_order);
4748}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004749
4750static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4751{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004752 int is_required;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004753 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004754 int port;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004755
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004756 if (BP_NOMCP(bp))
4757 return;
4758
4759 is_required = 0;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004760 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4761 SHARED_HW_CFG_FAN_FAILURE_MASK;
4762
4763 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4764 is_required = 1;
4765
4766 /*
4767 * The fan failure mechanism is usually related to the PHY type since
4768 * the power consumption of the board is affected by the PHY. Currently,
4769 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4770 */
4771 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4772 for (port = PORT_0; port < PORT_MAX; port++) {
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004773 is_required |=
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004774 bnx2x_fan_failure_det_req(
4775 bp,
4776 bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004777 bp->common.shmem2_base,
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004778 port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004779 }
4780
4781 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4782
4783 if (is_required == 0)
4784 return;
4785
4786 /* Fan failure is indicated by SPIO 5 */
4787 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4788 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4789
4790 /* set to active low mode */
4791 val = REG_RD(bp, MISC_REG_SPIO_INT);
4792 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004793 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004794 REG_WR(bp, MISC_REG_SPIO_INT, val);
4795
4796 /* enable interrupt to signal the IGU */
4797 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4798 val |= (1 << MISC_REGISTERS_SPIO_5);
4799 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4800}
4801
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004802static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4803{
4804 u32 offset = 0;
4805
4806 if (CHIP_IS_E1(bp))
4807 return;
4808 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4809 return;
4810
4811 switch (BP_ABS_FUNC(bp)) {
4812 case 0:
4813 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4814 break;
4815 case 1:
4816 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4817 break;
4818 case 2:
4819 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4820 break;
4821 case 3:
4822 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4823 break;
4824 case 4:
4825 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4826 break;
4827 case 5:
4828 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4829 break;
4830 case 6:
4831 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4832 break;
4833 case 7:
4834 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4835 break;
4836 default:
4837 return;
4838 }
4839
4840 REG_WR(bp, offset, pretend_func_num);
4841 REG_RD(bp, offset);
4842 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4843}
4844
4845static void bnx2x_pf_disable(struct bnx2x *bp)
4846{
4847 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4848 val &= ~IGU_PF_CONF_FUNC_EN;
4849
4850 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4851 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4852 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4853}
4854
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004855static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004856{
4857 u32 val, i;
4858
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004859 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004860
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004861 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004862 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4863 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4864
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004865 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004866 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004867 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004868
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004869 if (CHIP_IS_E2(bp)) {
4870 u8 fid;
4871
4872 /**
4873 * 4-port mode or 2-port mode we need to turn of master-enable
4874 * for everyone, after that, turn it back on for self.
4875 * so, we disregard multi-function or not, and always disable
4876 * for all functions on the given path, this means 0,2,4,6 for
4877 * path 0 and 1,3,5,7 for path 1
4878 */
4879 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4880 if (fid == BP_ABS_FUNC(bp)) {
4881 REG_WR(bp,
4882 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4883 1);
4884 continue;
4885 }
4886
4887 bnx2x_pretend_func(bp, fid);
4888 /* clear pf enable */
4889 bnx2x_pf_disable(bp);
4890 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4891 }
4892 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004893
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004894 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004895 if (CHIP_IS_E1(bp)) {
4896 /* enable HW interrupt from PXP on USDM overflow
4897 bit 16 on INT_MASK_0 */
4898 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004899 }
4900
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004901 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004902 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004903
4904#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004905 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4906 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4907 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4908 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4909 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00004910 /* make sure this value is 0 */
4911 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004912
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004913/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4914 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4915 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4916 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4917 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004918#endif
4919
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004920 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4921
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004922 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4923 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004924
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004925 /* let the HW do it's magic ... */
4926 msleep(100);
4927 /* finish PXP init */
4928 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4929 if (val != 1) {
4930 BNX2X_ERR("PXP2 CFG failed\n");
4931 return -EBUSY;
4932 }
4933 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4934 if (val != 1) {
4935 BNX2X_ERR("PXP2 RD_INIT failed\n");
4936 return -EBUSY;
4937 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004938
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004939 /* Timers bug workaround E2 only. We need to set the entire ILT to
4940 * have entries with value "0" and valid bit on.
4941 * This needs to be done by the first PF that is loaded in a path
4942 * (i.e. common phase)
4943 */
4944 if (CHIP_IS_E2(bp)) {
4945 struct ilt_client_info ilt_cli;
4946 struct bnx2x_ilt ilt;
4947 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4948 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4949
4950 /* initalize dummy TM client */
4951 ilt_cli.start = 0;
4952 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4953 ilt_cli.client_num = ILT_CLIENT_TM;
4954
4955 /* Step 1: set zeroes to all ilt page entries with valid bit on
4956 * Step 2: set the timers first/last ilt entry to point
4957 * to the entire range to prevent ILT range error for 3rd/4th
4958 * vnic (this code assumes existance of the vnic)
4959 *
4960 * both steps performed by call to bnx2x_ilt_client_init_op()
4961 * with dummy TM client
4962 *
4963 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4964 * and his brother are split registers
4965 */
4966 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4967 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4968 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4969
4970 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4971 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4972 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4973 }
4974
4975
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004976 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4977 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004978
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004979 if (CHIP_IS_E2(bp)) {
4980 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4981 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4982 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4983
4984 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4985
4986 /* let the HW do it's magic ... */
4987 do {
4988 msleep(200);
4989 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4990 } while (factor-- && (val != 1));
4991
4992 if (val != 1) {
4993 BNX2X_ERR("ATC_INIT failed\n");
4994 return -EBUSY;
4995 }
4996 }
4997
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004998 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004999
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005000 /* clean the DMAE memory */
5001 bp->dmae_ready = 1;
5002 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005003
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005004 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5005 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5006 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5007 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005008
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005009 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5010 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5011 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5012 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5013
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005014 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005015
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005016 if (CHIP_MODE_IS_4_PORT(bp))
5017 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005018
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005019 /* QM queues pointers table */
5020 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00005021
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005022 /* soft reset pulse */
5023 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5024 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005025
Michael Chan37b091b2009-10-10 13:46:55 +00005026#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005027 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005028#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005029
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005030 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005031 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5032
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005033 if (!CHIP_REV_IS_SLOW(bp)) {
5034 /* enable hw interrupt from doorbell Q */
5035 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5036 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005037
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005038 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005039 if (CHIP_MODE_IS_4_PORT(bp)) {
5040 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5041 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5042 }
5043
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005044 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005045 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00005046#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07005047 /* set NIC mode */
5048 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00005049#endif
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005050 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005051 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005052
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005053 if (CHIP_IS_E2(bp)) {
5054 /* Bit-map indicating which L2 hdrs may appear after the
5055 basic Ethernet header */
5056 int has_ovlan = IS_MF(bp);
5057 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5058 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5059 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005060
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005061 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5062 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5063 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5064 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005065
Eilon Greensteinca003922009-08-12 22:53:28 -07005066 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5067 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5068 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5069 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005070
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005071 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5072 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5073 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5074 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005075
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005076 if (CHIP_MODE_IS_4_PORT(bp))
5077 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5078
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005079 /* sync semi rtc */
5080 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5081 0x80000000);
5082 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5083 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005084
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005085 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5086 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5087 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005088
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005089 if (CHIP_IS_E2(bp)) {
5090 int has_ovlan = IS_MF(bp);
5091 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5092 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5093 }
5094
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005095 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07005096 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5097 REG_WR(bp, i, random32());
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005098
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005099 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005100#ifdef BCM_CNIC
5101 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5102 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5103 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5104 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5105 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5106 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5107 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5108 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5109 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5110 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5111#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005112 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005113
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005114 if (sizeof(union cdu_context) != 1024)
5115 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005116 dev_alert(&bp->pdev->dev, "please adjust the size "
5117 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00005118 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005119
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005120 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005121 val = (4 << 24) + (0 << 12) + 1024;
5122 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005123
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005124 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005125 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005126 /* enable context validation interrupt from CFC */
5127 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5128
5129 /* set the thresholds to prevent CFC/CDU race */
5130 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005131
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005132 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005133
5134 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5135 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5136
5137 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005138 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005139
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005140 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005141 /* Reset PCIE errors for debug */
5142 REG_WR(bp, 0x2814, 0xffffffff);
5143 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005144
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005145 if (CHIP_IS_E2(bp)) {
5146 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5147 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5148 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5149 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5150 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5151 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5152 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5153 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5154 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5155 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5156 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5157 }
5158
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005159 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005160 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005161 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005162 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005163
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005164 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005165 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005166 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5167 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005168 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005169 if (CHIP_IS_E2(bp)) {
5170 /* Bit-map indicating which L2 hdrs may appear after the
5171 basic Ethernet header */
5172 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5173 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005174
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005175 if (CHIP_REV_IS_SLOW(bp))
5176 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005177
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005178 /* finish CFC init */
5179 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5180 if (val != 1) {
5181 BNX2X_ERR("CFC LL_INIT failed\n");
5182 return -EBUSY;
5183 }
5184 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5185 if (val != 1) {
5186 BNX2X_ERR("CFC AC_INIT failed\n");
5187 return -EBUSY;
5188 }
5189 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5190 if (val != 1) {
5191 BNX2X_ERR("CFC CAM_INIT failed\n");
5192 return -EBUSY;
5193 }
5194 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005195
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005196 if (CHIP_IS_E1(bp)) {
5197 /* read NIG statistic
5198 to see if this is our first up since powerup */
5199 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5200 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005201
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005202 /* do internal memory self test */
5203 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5204 BNX2X_ERR("internal mem self test failed\n");
5205 return -EBUSY;
5206 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005207 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005208
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005209 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005210 bp->common.shmem_base,
5211 bp->common.shmem2_base);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005212
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005213 bnx2x_setup_fan_failure_detection(bp);
5214
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005215 /* clear PXP2 attentions */
5216 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005217
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005218 enable_blocks_attention(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00005219 if (CHIP_PARITY_SUPPORTED(bp))
5220 enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005221
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005222 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005223 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5224 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5225 CHIP_IS_E1x(bp)) {
5226 u32 shmem_base[2], shmem2_base[2];
5227 shmem_base[0] = bp->common.shmem_base;
5228 shmem2_base[0] = bp->common.shmem2_base;
5229 if (CHIP_IS_E2(bp)) {
5230 shmem_base[1] =
5231 SHMEM2_RD(bp, other_shmem_base_addr);
5232 shmem2_base[1] =
5233 SHMEM2_RD(bp, other_shmem2_base_addr);
5234 }
5235 bnx2x_acquire_phy_lock(bp);
5236 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5237 bp->common.chip_id);
5238 bnx2x_release_phy_lock(bp);
5239 }
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005240 } else
5241 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5242
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005243 return 0;
5244}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005245
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005246static int bnx2x_init_hw_port(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005247{
5248 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005249 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00005250 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005251 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005252
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005253 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005254
5255 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005256
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005257 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005258 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005259
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005260 /* Timers bug workaround: disables the pf_master bit in pglue at
5261 * common phase, we need to enable it here before any dmae access are
5262 * attempted. Therefore we manually added the enable-master to the
5263 * port phase (it also happens in the function phase)
5264 */
5265 if (CHIP_IS_E2(bp))
5266 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5267
Eilon Greensteinca003922009-08-12 22:53:28 -07005268 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5269 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5270 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005271 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005272
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005273 /* QM cid (connection) count */
5274 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005275
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005276#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005277 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00005278 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5279 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005280#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005281
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005282 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005283
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005284 if (CHIP_MODE_IS_4_PORT(bp))
5285 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005286
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005287 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5288 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5289 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5290 /* no pause for emulation and FPGA */
5291 low = 0;
5292 high = 513;
5293 } else {
5294 if (IS_MF(bp))
5295 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5296 else if (bp->dev->mtu > 4096) {
5297 if (bp->flags & ONE_PORT_FLAG)
5298 low = 160;
5299 else {
5300 val = bp->dev->mtu;
5301 /* (24*1024 + val*4)/256 */
5302 low = 96 + (val/64) +
5303 ((val % 64) ? 1 : 0);
5304 }
5305 } else
5306 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5307 high = low + 56; /* 14*1024/256 */
5308 }
5309 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5310 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5311 }
5312
5313 if (CHIP_MODE_IS_4_PORT(bp)) {
5314 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5315 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5316 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5317 BRB1_REG_MAC_GUARANTIED_0), 40);
5318 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005319
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005320 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005321
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005322 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005323 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005324 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005325 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005326
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005327 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5328 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5329 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5330 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005331 if (CHIP_MODE_IS_4_PORT(bp))
5332 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005333
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005334 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005335 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005336
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005337 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005338
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005339 if (!CHIP_IS_E2(bp)) {
5340 /* configure PBF to work without PAUSE mtu 9000 */
5341 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005342
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005343 /* update threshold */
5344 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5345 /* update init credit */
5346 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005347
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005348 /* probe changes */
5349 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5350 udelay(50);
5351 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5352 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005353
Michael Chan37b091b2009-10-10 13:46:55 +00005354#ifdef BCM_CNIC
5355 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005356#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005357 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005358 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005359
5360 if (CHIP_IS_E1(bp)) {
5361 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5362 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5363 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005364 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005365
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005366 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5367
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005368 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005369 /* init aeu_mask_attn_func_0/1:
5370 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5371 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5372 * bits 4-7 are used for "per vn group attention" */
5373 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005374 (IS_MF(bp) ? 0xF7 : 0x7));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005375
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005376 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005377 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005378 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005379 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005380 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005381
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005382 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005383
5384 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5385
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005386 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005387 /* 0x2 disable mf_ov, 0x1 enable */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005388 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005389 (IS_MF(bp) ? 0x1 : 0x2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005390
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005391 if (CHIP_IS_E2(bp)) {
5392 val = 0;
5393 switch (bp->mf_mode) {
5394 case MULTI_FUNCTION_SD:
5395 val = 1;
5396 break;
5397 case MULTI_FUNCTION_SI:
5398 val = 2;
5399 break;
5400 }
5401
5402 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5403 NIG_REG_LLH0_CLS_TYPE), val);
5404 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005405 {
5406 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5407 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5408 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5409 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005410 }
5411
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005412 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005413 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005414 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005415 bp->common.shmem_base,
5416 bp->common.shmem2_base);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005417 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005418 bp->common.shmem2_base, port)) {
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005419 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5420 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5421 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005422 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005423 REG_WR(bp, reg_addr, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005424 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07005425 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005426
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005427 return 0;
5428}
5429
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005430static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5431{
5432 int reg;
5433
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005434 if (CHIP_IS_E1(bp))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005435 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005436 else
5437 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005438
5439 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5440}
5441
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005442static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5443{
5444 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5445}
5446
5447static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5448{
5449 u32 i, base = FUNC_ILT_BASE(func);
5450 for (i = base; i < base + ILT_PER_FUNC; i++)
5451 bnx2x_ilt_wr(bp, i, 0);
5452}
5453
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005454static int bnx2x_init_hw_func(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005455{
5456 int port = BP_PORT(bp);
5457 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005458 struct bnx2x_ilt *ilt = BP_ILT(bp);
5459 u16 cdu_ilt_start;
Eilon Greenstein8badd272009-02-12 08:36:15 +00005460 u32 addr, val;
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005461 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5462 int i, main_mem_width;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005463
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005464 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005465
Eilon Greenstein8badd272009-02-12 08:36:15 +00005466 /* set MSI reconfigure capability */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005467 if (bp->common.int_block == INT_BLOCK_HC) {
5468 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5469 val = REG_RD(bp, addr);
5470 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5471 REG_WR(bp, addr, val);
5472 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00005473
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005474 ilt = BP_ILT(bp);
5475 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005476
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005477 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5478 ilt->lines[cdu_ilt_start + i].page =
5479 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5480 ilt->lines[cdu_ilt_start + i].page_mapping =
5481 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5482 /* cdu ilt pages are allocated manually so there's no need to
5483 set the size */
5484 }
5485 bnx2x_ilt_init_op(bp, INITOP_SET);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005486
Michael Chan37b091b2009-10-10 13:46:55 +00005487#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005488 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
Michael Chan37b091b2009-10-10 13:46:55 +00005489
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005490 /* T1 hash bits value determines the T1 number of entries */
5491 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
Michael Chan37b091b2009-10-10 13:46:55 +00005492#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005493
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005494#ifndef BCM_CNIC
5495 /* set NIC mode */
5496 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5497#endif /* BCM_CNIC */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005498
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005499 if (CHIP_IS_E2(bp)) {
5500 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5501
5502 /* Turn on a single ISR mode in IGU if driver is going to use
5503 * INT#x or MSI
5504 */
5505 if (!(bp->flags & USING_MSIX_FLAG))
5506 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5507 /*
5508 * Timers workaround bug: function init part.
5509 * Need to wait 20msec after initializing ILT,
5510 * needed to make sure there are no requests in
5511 * one of the PXP internal queues with "old" ILT addresses
5512 */
5513 msleep(20);
5514 /*
5515 * Master enable - Due to WB DMAE writes performed before this
5516 * register is re-initialized as part of the regular function
5517 * init
5518 */
5519 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5520 /* Enable the function in IGU */
5521 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5522 }
5523
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005524 bp->dmae_ready = 1;
5525
5526 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5527
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005528 if (CHIP_IS_E2(bp))
5529 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5530
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005531 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5532 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5533 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5534 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5535 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5536 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5537 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5538 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5539 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5540
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005541 if (CHIP_IS_E2(bp)) {
5542 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5543 BP_PATH(bp));
5544 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5545 BP_PATH(bp));
5546 }
5547
5548 if (CHIP_MODE_IS_4_PORT(bp))
5549 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5550
5551 if (CHIP_IS_E2(bp))
5552 REG_WR(bp, QM_REG_PF_EN, 1);
5553
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005554 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005555
5556 if (CHIP_MODE_IS_4_PORT(bp))
5557 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5558
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005559 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5560 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5561 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5562 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5563 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5564 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5565 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5566 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5567 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5568 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5569 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005570 if (CHIP_IS_E2(bp))
5571 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5572
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005573 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5574
5575 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5576
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005577 if (CHIP_IS_E2(bp))
5578 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5579
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005580 if (IS_MF(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005581 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005582 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005583 }
5584
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005585 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5586
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005587 /* HC init per function */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005588 if (bp->common.int_block == INT_BLOCK_HC) {
5589 if (CHIP_IS_E1H(bp)) {
5590 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5591
5592 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5593 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5594 }
5595 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5596
5597 } else {
5598 int num_segs, sb_idx, prod_offset;
5599
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005600 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5601
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005602 if (CHIP_IS_E2(bp)) {
5603 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5604 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5605 }
5606
5607 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5608
5609 if (CHIP_IS_E2(bp)) {
5610 int dsb_idx = 0;
5611 /**
5612 * Producer memory:
5613 * E2 mode: address 0-135 match to the mapping memory;
5614 * 136 - PF0 default prod; 137 - PF1 default prod;
5615 * 138 - PF2 default prod; 139 - PF3 default prod;
5616 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5617 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5618 * 144-147 reserved.
5619 *
5620 * E1.5 mode - In backward compatible mode;
5621 * for non default SB; each even line in the memory
5622 * holds the U producer and each odd line hold
5623 * the C producer. The first 128 producers are for
5624 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5625 * producers are for the DSB for each PF.
5626 * Each PF has five segments: (the order inside each
5627 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5628 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5629 * 144-147 attn prods;
5630 */
5631 /* non-default-status-blocks */
5632 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5633 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5634 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5635 prod_offset = (bp->igu_base_sb + sb_idx) *
5636 num_segs;
5637
5638 for (i = 0; i < num_segs; i++) {
5639 addr = IGU_REG_PROD_CONS_MEMORY +
5640 (prod_offset + i) * 4;
5641 REG_WR(bp, addr, 0);
5642 }
5643 /* send consumer update with value 0 */
5644 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5645 USTORM_ID, 0, IGU_INT_NOP, 1);
5646 bnx2x_igu_clear_sb(bp,
5647 bp->igu_base_sb + sb_idx);
5648 }
5649
5650 /* default-status-blocks */
5651 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5652 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5653
5654 if (CHIP_MODE_IS_4_PORT(bp))
5655 dsb_idx = BP_FUNC(bp);
5656 else
5657 dsb_idx = BP_E1HVN(bp);
5658
5659 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5660 IGU_BC_BASE_DSB_PROD + dsb_idx :
5661 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5662
5663 for (i = 0; i < (num_segs * E1HVN_MAX);
5664 i += E1HVN_MAX) {
5665 addr = IGU_REG_PROD_CONS_MEMORY +
5666 (prod_offset + i)*4;
5667 REG_WR(bp, addr, 0);
5668 }
5669 /* send consumer update with 0 */
5670 if (CHIP_INT_MODE_IS_BC(bp)) {
5671 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5672 USTORM_ID, 0, IGU_INT_NOP, 1);
5673 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5674 CSTORM_ID, 0, IGU_INT_NOP, 1);
5675 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5676 XSTORM_ID, 0, IGU_INT_NOP, 1);
5677 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5678 TSTORM_ID, 0, IGU_INT_NOP, 1);
5679 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5680 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5681 } else {
5682 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5683 USTORM_ID, 0, IGU_INT_NOP, 1);
5684 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5685 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5686 }
5687 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5688
5689 /* !!! these should become driver const once
5690 rf-tool supports split-68 const */
5691 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5692 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5693 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5694 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5695 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5696 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5697 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005698 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005699
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005700 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005701 REG_WR(bp, 0x2114, 0xffffffff);
5702 REG_WR(bp, 0x2120, 0xffffffff);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005703
5704 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5705 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5706 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5707 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5708 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5709 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5710
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005711 if (CHIP_IS_E1x(bp)) {
5712 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5713 main_mem_base = HC_REG_MAIN_MEMORY +
5714 BP_PORT(bp) * (main_mem_size * 4);
5715 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5716 main_mem_width = 8;
5717
5718 val = REG_RD(bp, main_mem_prty_clr);
5719 if (val)
5720 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5721 "block during "
5722 "function init (0x%x)!\n", val);
5723
5724 /* Clear "false" parity errors in MSI-X table */
5725 for (i = main_mem_base;
5726 i < main_mem_base + main_mem_size * 4;
5727 i += main_mem_width) {
5728 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5729 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5730 i, main_mem_width / 4);
5731 }
5732 /* Clear HC parity attention */
5733 REG_RD(bp, main_mem_prty_clr);
5734 }
5735
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00005736 bnx2x_phy_probe(&bp->link_params);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005737
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005738 return 0;
5739}
5740
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005741int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005742{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005743 int rc = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005744
5745 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005746 BP_ABS_FUNC(bp), load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005747
5748 bp->dmae_ready = 0;
5749 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00005750 rc = bnx2x_gunzip_init(bp);
5751 if (rc)
5752 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005753
5754 switch (load_code) {
5755 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005756 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005757 rc = bnx2x_init_hw_common(bp, load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005758 if (rc)
5759 goto init_hw_err;
5760 /* no break */
5761
5762 case FW_MSG_CODE_DRV_LOAD_PORT:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005763 rc = bnx2x_init_hw_port(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005764 if (rc)
5765 goto init_hw_err;
5766 /* no break */
5767
5768 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005769 rc = bnx2x_init_hw_func(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005770 if (rc)
5771 goto init_hw_err;
5772 break;
5773
5774 default:
5775 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5776 break;
5777 }
5778
5779 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005780 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005781
5782 bp->fw_drv_pulse_wr_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005783 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005784 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00005785 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5786 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005787
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005788init_hw_err:
5789 bnx2x_gunzip_end(bp);
5790
5791 return rc;
5792}
5793
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005794void bnx2x_free_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005795{
5796
5797#define BNX2X_PCI_FREE(x, y, size) \
5798 do { \
5799 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005800 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005801 x = NULL; \
5802 y = 0; \
5803 } \
5804 } while (0)
5805
5806#define BNX2X_FREE(x) \
5807 do { \
5808 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005809 kfree((void *)x); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005810 x = NULL; \
5811 } \
5812 } while (0)
5813
5814 int i;
5815
5816 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005817 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005818 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005819 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005820 if (CHIP_IS_E2(bp))
5821 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5822 bnx2x_fp(bp, i, status_blk_mapping),
5823 sizeof(struct host_hc_status_block_e2));
5824 else
5825 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5826 bnx2x_fp(bp, i, status_blk_mapping),
5827 sizeof(struct host_hc_status_block_e1x));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005828 }
5829 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005830 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005831
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005832 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005833 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5834 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5835 bnx2x_fp(bp, i, rx_desc_mapping),
5836 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5837
5838 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5839 bnx2x_fp(bp, i, rx_comp_mapping),
5840 sizeof(struct eth_fast_path_rx_cqe) *
5841 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005842
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005843 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07005844 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005845 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5846 bnx2x_fp(bp, i, rx_sge_mapping),
5847 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5848 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005849 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005850 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005851
5852 /* fastpath tx rings: tx_buf tx_desc */
5853 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5854 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5855 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005856 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005857 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005858 /* end of fastpath */
5859
5860 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005861 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005862
5863 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005864 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005865
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005866 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5867 bp->context.size);
5868
5869 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5870
5871 BNX2X_FREE(bp->ilt->lines);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005872
Michael Chan37b091b2009-10-10 13:46:55 +00005873#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005874 if (CHIP_IS_E2(bp))
5875 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5876 sizeof(struct host_hc_status_block_e2));
5877 else
5878 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5879 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005880
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005881 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005882#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005883
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005884 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005885
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005886 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5887 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5888
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005889#undef BNX2X_PCI_FREE
5890#undef BNX2X_KFREE
5891}
5892
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005893static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5894{
5895 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5896 if (CHIP_IS_E2(bp)) {
5897 bnx2x_fp(bp, index, sb_index_values) =
5898 (__le16 *)status_blk.e2_sb->sb.index_values;
5899 bnx2x_fp(bp, index, sb_running_index) =
5900 (__le16 *)status_blk.e2_sb->sb.running_index;
5901 } else {
5902 bnx2x_fp(bp, index, sb_index_values) =
5903 (__le16 *)status_blk.e1x_sb->sb.index_values;
5904 bnx2x_fp(bp, index, sb_running_index) =
5905 (__le16 *)status_blk.e1x_sb->sb.running_index;
5906 }
5907}
5908
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005909int bnx2x_alloc_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005910{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005911#define BNX2X_PCI_ALLOC(x, y, size) \
5912 do { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00005913 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005914 if (x == NULL) \
5915 goto alloc_mem_err; \
5916 memset(x, 0, size); \
5917 } while (0)
5918
5919#define BNX2X_ALLOC(x, size) \
5920 do { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005921 x = kzalloc(size, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005922 if (x == NULL) \
5923 goto alloc_mem_err; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005924 } while (0)
5925
5926 int i;
5927
5928 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005929 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005930 for_each_queue(bp, i) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005931 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005932 bnx2x_fp(bp, i, bp) = bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005933 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005934 if (CHIP_IS_E2(bp))
5935 BNX2X_PCI_ALLOC(sb->e2_sb,
5936 &bnx2x_fp(bp, i, status_blk_mapping),
5937 sizeof(struct host_hc_status_block_e2));
5938 else
5939 BNX2X_PCI_ALLOC(sb->e1x_sb,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005940 &bnx2x_fp(bp, i, status_blk_mapping),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005941 sizeof(struct host_hc_status_block_e1x));
5942
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005943 set_sb_shortcuts(bp, i);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005944 }
5945 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005946 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005947
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005948 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005949 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5950 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5951 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5952 &bnx2x_fp(bp, i, rx_desc_mapping),
5953 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5954
5955 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5956 &bnx2x_fp(bp, i, rx_comp_mapping),
5957 sizeof(struct eth_fast_path_rx_cqe) *
5958 NUM_RCQ_BD);
5959
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005960 /* SGE ring */
5961 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5962 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5963 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5964 &bnx2x_fp(bp, i, rx_sge_mapping),
5965 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005966 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005967 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005968 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005969
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005970 /* fastpath tx rings: tx_buf tx_desc */
5971 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5972 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5973 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5974 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005975 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005976 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005977 /* end of fastpath */
5978
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005979#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005980 if (CHIP_IS_E2(bp))
5981 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5982 sizeof(struct host_hc_status_block_e2));
5983 else
5984 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5985 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005986
5987 /* allocate searcher T2 table */
5988 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5989#endif
5990
5991
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005992 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005993 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005994
5995 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5996 sizeof(struct bnx2x_slowpath));
5997
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005998 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005999
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006000 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6001 bp->context.size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006002
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006003 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006004
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006005 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6006 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006007
6008 /* Slow path ring */
6009 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6010
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006011 /* EQ */
6012 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6013 BCM_PAGE_SIZE * NUM_EQ_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006014 return 0;
6015
6016alloc_mem_err:
6017 bnx2x_free_mem(bp);
6018 return -ENOMEM;
6019
6020#undef BNX2X_PCI_ALLOC
6021#undef BNX2X_ALLOC
6022}
6023
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006024/*
6025 * Init service functions
6026 */
stephen hemminger8d962862010-10-21 07:50:56 +00006027static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6028 int *state_p, int flags);
6029
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006030int bnx2x_func_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006031{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006032 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006033
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006034 /* Wait for completion */
6035 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6036 WAIT_RAMROD_COMMON);
6037}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006038
stephen hemminger8d962862010-10-21 07:50:56 +00006039static int bnx2x_func_stop(struct bnx2x *bp)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006040{
6041 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006042
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006043 /* Wait for completion */
6044 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6045 0, &(bp->state), WAIT_RAMROD_COMMON);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006046}
6047
Michael Chane665bfd2009-10-10 13:46:54 +00006048/**
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006049 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
Michael Chane665bfd2009-10-10 13:46:54 +00006050 *
6051 * @param bp driver descriptor
6052 * @param set set or clear an entry (1 or 0)
6053 * @param mac pointer to a buffer containing a MAC
6054 * @param cl_bit_vec bit vector of clients to register a MAC for
6055 * @param cam_offset offset in a CAM to use
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006056 * @param is_bcast is the set MAC a broadcast address (for E1 only)
Michael Chane665bfd2009-10-10 13:46:54 +00006057 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006058static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006059 u32 cl_bit_vec, u8 cam_offset,
6060 u8 is_bcast)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006061{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006062 struct mac_configuration_cmd *config =
6063 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6064 int ramrod_flags = WAIT_RAMROD_COMMON;
6065
6066 bp->set_mac_pending = 1;
6067 smp_wmb();
6068
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006069 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006070 config->hdr.offset = cam_offset;
6071 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006072 config->hdr.reserved1 = 0;
6073
6074 /* primary MAC */
6075 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006076 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006077 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006078 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006079 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006080 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07006081 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00006082 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006083 config->config_table[0].vlan_id = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006084 config->config_table[0].pf_id = BP_FUNC(bp);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006085 if (set)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006086 SET_FLAG(config->config_table[0].flags,
6087 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6088 T_ETH_MAC_COMMAND_SET);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006089 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006090 SET_FLAG(config->config_table[0].flags,
6091 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6092 T_ETH_MAC_COMMAND_INVALIDATE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006093
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006094 if (is_bcast)
6095 SET_FLAG(config->config_table[0].flags,
6096 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6097
6098 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006099 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006100 config->config_table[0].msb_mac_addr,
6101 config->config_table[0].middle_mac_addr,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006102 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006103
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006104 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006105 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006106 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6107
6108 /* Wait for a completion */
6109 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006110}
6111
stephen hemminger8d962862010-10-21 07:50:56 +00006112static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6113 int *state_p, int flags)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006114{
6115 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006116 int cnt = 5000;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006117 u8 poll = flags & WAIT_RAMROD_POLL;
6118 u8 common = flags & WAIT_RAMROD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006119
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006120 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6121 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006122
6123 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006124 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006125 if (poll) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006126 if (common)
6127 bnx2x_eq_int(bp);
6128 else {
6129 bnx2x_rx_int(bp->fp, 10);
6130 /* if index is different from 0
6131 * the reply for some commands will
6132 * be on the non default queue
6133 */
6134 if (idx)
6135 bnx2x_rx_int(&bp->fp[idx], 10);
6136 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006137 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006138
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006139 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006140 if (*state_p == state) {
6141#ifdef BNX2X_STOP_ON_ERROR
6142 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6143#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006144 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006145 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006146
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006147 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00006148
6149 if (bp->panic)
6150 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006151 }
6152
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006153 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08006154 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6155 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006156#ifdef BNX2X_STOP_ON_ERROR
6157 bnx2x_panic();
6158#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006159
Eliezer Tamir49d66772008-02-28 11:53:13 -08006160 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006161}
6162
stephen hemminger8d962862010-10-21 07:50:56 +00006163static u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
Michael Chane665bfd2009-10-10 13:46:54 +00006164{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006165 if (CHIP_IS_E1H(bp))
6166 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6167 else if (CHIP_MODE_IS_4_PORT(bp))
6168 return BP_FUNC(bp) * 32 + rel_offset;
6169 else
6170 return BP_VN(bp) * 32 + rel_offset;
Michael Chane665bfd2009-10-10 13:46:54 +00006171}
6172
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006173void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
Michael Chane665bfd2009-10-10 13:46:54 +00006174{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006175 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6176 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6177
6178 /* networking MAC */
6179 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6180 (1 << bp->fp->cl_id), cam_offset , 0);
6181
6182 if (CHIP_IS_E1(bp)) {
6183 /* broadcast MAC */
6184 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6185 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6186 }
6187}
6188static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6189{
6190 int i = 0, old;
6191 struct net_device *dev = bp->dev;
6192 struct netdev_hw_addr *ha;
6193 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6194 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6195
6196 netdev_for_each_mc_addr(ha, dev) {
6197 /* copy mac */
6198 config_cmd->config_table[i].msb_mac_addr =
6199 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6200 config_cmd->config_table[i].middle_mac_addr =
6201 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6202 config_cmd->config_table[i].lsb_mac_addr =
6203 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6204
6205 config_cmd->config_table[i].vlan_id = 0;
6206 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6207 config_cmd->config_table[i].clients_bit_vector =
6208 cpu_to_le32(1 << BP_L_ID(bp));
6209
6210 SET_FLAG(config_cmd->config_table[i].flags,
6211 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6212 T_ETH_MAC_COMMAND_SET);
6213
6214 DP(NETIF_MSG_IFUP,
6215 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6216 config_cmd->config_table[i].msb_mac_addr,
6217 config_cmd->config_table[i].middle_mac_addr,
6218 config_cmd->config_table[i].lsb_mac_addr);
6219 i++;
6220 }
6221 old = config_cmd->hdr.length;
6222 if (old > i) {
6223 for (; i < old; i++) {
6224 if (CAM_IS_INVALID(config_cmd->
6225 config_table[i])) {
6226 /* already invalidated */
6227 break;
6228 }
6229 /* invalidate */
6230 SET_FLAG(config_cmd->config_table[i].flags,
6231 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6232 T_ETH_MAC_COMMAND_INVALIDATE);
6233 }
6234 }
6235
6236 config_cmd->hdr.length = i;
6237 config_cmd->hdr.offset = offset;
6238 config_cmd->hdr.client_id = 0xff;
6239 config_cmd->hdr.reserved1 = 0;
6240
6241 bp->set_mac_pending = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006242 smp_wmb();
6243
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006244 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6245 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6246}
6247static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6248{
6249 int i;
6250 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6251 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6252 int ramrod_flags = WAIT_RAMROD_COMMON;
6253
6254 bp->set_mac_pending = 1;
6255 smp_wmb();
6256
6257 for (i = 0; i < config_cmd->hdr.length; i++)
6258 SET_FLAG(config_cmd->config_table[i].flags,
6259 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6260 T_ETH_MAC_COMMAND_INVALIDATE);
6261
6262 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6263 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
Michael Chane665bfd2009-10-10 13:46:54 +00006264
6265 /* Wait for a completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006266 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6267 ramrod_flags);
6268
Michael Chane665bfd2009-10-10 13:46:54 +00006269}
6270
Michael Chan993ac7b2009-10-10 13:46:56 +00006271#ifdef BCM_CNIC
6272/**
6273 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6274 * MAC(s). This function will wait until the ramdord completion
6275 * returns.
6276 *
6277 * @param bp driver handle
6278 * @param set set or clear the CAM entry
6279 *
6280 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6281 */
stephen hemminger8d962862010-10-21 07:50:56 +00006282static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
Michael Chan993ac7b2009-10-10 13:46:56 +00006283{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006284 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6285 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6286 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6287 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
Michael Chan993ac7b2009-10-10 13:46:56 +00006288
6289 /* Send a SET_MAC ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006290 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6291 cam_offset, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00006292 return 0;
6293}
6294#endif
6295
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006296static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6297 struct bnx2x_client_init_params *params,
6298 u8 activate,
6299 struct client_init_ramrod_data *data)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006300{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006301 /* Clear the buffer */
6302 memset(data, 0, sizeof(*data));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006303
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006304 /* general */
6305 data->general.client_id = params->rxq_params.cl_id;
6306 data->general.statistics_counter_id = params->rxq_params.stat_id;
6307 data->general.statistics_en_flg =
6308 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6309 data->general.activate_flg = activate;
6310 data->general.sp_client_id = params->rxq_params.spcl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006311
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006312 /* Rx data */
6313 data->rx.tpa_en_flg =
6314 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6315 data->rx.vmqueue_mode_en_flg = 0;
6316 data->rx.cache_line_alignment_log_size =
6317 params->rxq_params.cache_line_log;
6318 data->rx.enable_dynamic_hc =
6319 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6320 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6321 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6322 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6323
6324 /* We don't set drop flags */
6325 data->rx.drop_ip_cs_err_flg = 0;
6326 data->rx.drop_tcp_cs_err_flg = 0;
6327 data->rx.drop_ttl0_flg = 0;
6328 data->rx.drop_udp_cs_err_flg = 0;
6329
6330 data->rx.inner_vlan_removal_enable_flg =
6331 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6332 data->rx.outer_vlan_removal_enable_flg =
6333 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6334 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6335 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6336 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6337 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6338 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6339 data->rx.bd_page_base.lo =
6340 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6341 data->rx.bd_page_base.hi =
6342 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6343 data->rx.sge_page_base.lo =
6344 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6345 data->rx.sge_page_base.hi =
6346 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6347 data->rx.cqe_page_base.lo =
6348 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6349 data->rx.cqe_page_base.hi =
6350 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6351 data->rx.is_leading_rss =
6352 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6353 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6354
6355 /* Tx data */
6356 data->tx.enforce_security_flg = 0; /* VF specific */
6357 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6358 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6359 data->tx.mtu = 0; /* VF specific */
6360 data->tx.tx_bd_page_base.lo =
6361 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6362 data->tx.tx_bd_page_base.hi =
6363 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6364
6365 /* flow control data */
6366 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6367 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6368 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6369 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6370 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6371 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6372 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6373
6374 data->fc.safc_group_num = params->txq_params.cos;
6375 data->fc.safc_group_en_flg =
6376 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6377 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6378}
6379
6380static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6381{
6382 /* ustorm cxt validation */
6383 cxt->ustorm_ag_context.cdu_usage =
6384 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6385 ETH_CONNECTION_TYPE);
6386 /* xcontext validation */
6387 cxt->xstorm_ag_context.cdu_reserved =
6388 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6389 ETH_CONNECTION_TYPE);
6390}
6391
stephen hemminger8d962862010-10-21 07:50:56 +00006392static int bnx2x_setup_fw_client(struct bnx2x *bp,
6393 struct bnx2x_client_init_params *params,
6394 u8 activate,
6395 struct client_init_ramrod_data *data,
6396 dma_addr_t data_mapping)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006397{
6398 u16 hc_usec;
6399 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6400 int ramrod_flags = 0, rc;
6401
6402 /* HC and context validation values */
6403 hc_usec = params->txq_params.hc_rate ?
6404 1000000 / params->txq_params.hc_rate : 0;
6405 bnx2x_update_coalesce_sb_index(bp,
6406 params->txq_params.fw_sb_id,
6407 params->txq_params.sb_cq_index,
6408 !(params->txq_params.flags & QUEUE_FLG_HC),
6409 hc_usec);
6410
6411 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6412
6413 hc_usec = params->rxq_params.hc_rate ?
6414 1000000 / params->rxq_params.hc_rate : 0;
6415 bnx2x_update_coalesce_sb_index(bp,
6416 params->rxq_params.fw_sb_id,
6417 params->rxq_params.sb_cq_index,
6418 !(params->rxq_params.flags & QUEUE_FLG_HC),
6419 hc_usec);
6420
6421 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6422 params->rxq_params.cid);
6423
6424 /* zero stats */
6425 if (params->txq_params.flags & QUEUE_FLG_STATS)
6426 storm_memset_xstats_zero(bp, BP_PORT(bp),
6427 params->txq_params.stat_id);
6428
6429 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6430 storm_memset_ustats_zero(bp, BP_PORT(bp),
6431 params->rxq_params.stat_id);
6432 storm_memset_tstats_zero(bp, BP_PORT(bp),
6433 params->rxq_params.stat_id);
6434 }
6435
6436 /* Fill the ramrod data */
6437 bnx2x_fill_cl_init_data(bp, params, activate, data);
6438
6439 /* SETUP ramrod.
6440 *
6441 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6442 * barrier except from mmiowb() is needed to impose a
6443 * proper ordering of memory operations.
6444 */
6445 mmiowb();
6446
6447
6448 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6449 U64_HI(data_mapping), U64_LO(data_mapping), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006450
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006451 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006452 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6453 params->ramrod_params.index,
6454 params->ramrod_params.pstate,
6455 ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006456 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006457}
6458
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006459/**
6460 * Configure interrupt mode according to current configuration.
6461 * In case of MSI-X it will also try to enable MSI-X.
6462 *
6463 * @param bp
6464 *
6465 * @return int
6466 */
6467static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006468{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006469 int rc = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -07006470
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006471 switch (bp->int_mode) {
6472 case INT_MODE_MSI:
6473 bnx2x_enable_msi(bp);
6474 /* falling through... */
6475 case INT_MODE_INTx:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006476 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006477 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greensteinca003922009-08-12 22:53:28 -07006478 break;
Eilon Greensteinca003922009-08-12 22:53:28 -07006479 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006480 /* Set number of queues according to bp->multi_mode value */
6481 bnx2x_set_num_queues(bp);
6482
6483 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6484 bp->num_queues);
6485
6486 /* if we can't use MSI-X we only need one fp,
6487 * so try to enable MSI-X with the requested number of fp's
6488 * and fallback to MSI or legacy INTx with one fp
6489 */
6490 rc = bnx2x_enable_msix(bp);
6491 if (rc) {
6492 /* failed to enable MSI-X */
6493 if (bp->multi_mode)
6494 DP(NETIF_MSG_IFUP,
6495 "Multi requested but failed to "
6496 "enable MSI-X (%d), "
6497 "set number of queues to %d\n",
6498 bp->num_queues,
6499 1);
6500 bp->num_queues = 1;
6501
6502 if (!(bp->flags & DISABLE_MSI_FLAG))
6503 bnx2x_enable_msi(bp);
6504 }
6505
Eilon Greensteinca003922009-08-12 22:53:28 -07006506 break;
6507 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006508
6509 return rc;
Eilon Greensteinca003922009-08-12 22:53:28 -07006510}
6511
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00006512/* must be called prioir to any HW initializations */
6513static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6514{
6515 return L2_ILT_LINES(bp);
6516}
6517
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006518void bnx2x_ilt_set_info(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006519{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006520 struct ilt_client_info *ilt_client;
6521 struct bnx2x_ilt *ilt = BP_ILT(bp);
6522 u16 line = 0;
6523
6524 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6525 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6526
6527 /* CDU */
6528 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6529 ilt_client->client_num = ILT_CLIENT_CDU;
6530 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6531 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6532 ilt_client->start = line;
6533 line += L2_ILT_LINES(bp);
6534#ifdef BCM_CNIC
6535 line += CNIC_ILT_LINES;
6536#endif
6537 ilt_client->end = line - 1;
6538
6539 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6540 "flags 0x%x, hw psz %d\n",
6541 ilt_client->start,
6542 ilt_client->end,
6543 ilt_client->page_size,
6544 ilt_client->flags,
6545 ilog2(ilt_client->page_size >> 12));
6546
6547 /* QM */
6548 if (QM_INIT(bp->qm_cid_count)) {
6549 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6550 ilt_client->client_num = ILT_CLIENT_QM;
6551 ilt_client->page_size = QM_ILT_PAGE_SZ;
6552 ilt_client->flags = 0;
6553 ilt_client->start = line;
6554
6555 /* 4 bytes for each cid */
6556 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6557 QM_ILT_PAGE_SZ);
6558
6559 ilt_client->end = line - 1;
6560
6561 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6562 "flags 0x%x, hw psz %d\n",
6563 ilt_client->start,
6564 ilt_client->end,
6565 ilt_client->page_size,
6566 ilt_client->flags,
6567 ilog2(ilt_client->page_size >> 12));
6568
6569 }
6570 /* SRC */
6571 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6572#ifdef BCM_CNIC
6573 ilt_client->client_num = ILT_CLIENT_SRC;
6574 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6575 ilt_client->flags = 0;
6576 ilt_client->start = line;
6577 line += SRC_ILT_LINES;
6578 ilt_client->end = line - 1;
6579
6580 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6581 "flags 0x%x, hw psz %d\n",
6582 ilt_client->start,
6583 ilt_client->end,
6584 ilt_client->page_size,
6585 ilt_client->flags,
6586 ilog2(ilt_client->page_size >> 12));
6587
6588#else
6589 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6590#endif
6591
6592 /* TM */
6593 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6594#ifdef BCM_CNIC
6595 ilt_client->client_num = ILT_CLIENT_TM;
6596 ilt_client->page_size = TM_ILT_PAGE_SZ;
6597 ilt_client->flags = 0;
6598 ilt_client->start = line;
6599 line += TM_ILT_LINES;
6600 ilt_client->end = line - 1;
6601
6602 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6603 "flags 0x%x, hw psz %d\n",
6604 ilt_client->start,
6605 ilt_client->end,
6606 ilt_client->page_size,
6607 ilt_client->flags,
6608 ilog2(ilt_client->page_size >> 12));
6609
6610#else
6611 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6612#endif
6613}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006614
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006615int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6616 int is_leading)
6617{
6618 struct bnx2x_client_init_params params = { {0} };
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006619 int rc;
6620
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006621 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6622 IGU_INT_ENABLE, 0);
6623
6624 params.ramrod_params.pstate = &fp->state;
6625 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6626 params.ramrod_params.index = fp->index;
6627 params.ramrod_params.cid = fp->cid;
6628
6629 if (is_leading)
6630 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6631
6632 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6633
6634 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6635
6636 rc = bnx2x_setup_fw_client(bp, &params, 1,
6637 bnx2x_sp(bp, client_init_data),
6638 bnx2x_sp_mapping(bp, client_init_data));
6639 return rc;
6640}
6641
stephen hemminger8d962862010-10-21 07:50:56 +00006642static int bnx2x_stop_fw_client(struct bnx2x *bp,
6643 struct bnx2x_client_ramrod_params *p)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006644{
6645 int rc;
6646
6647 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6648
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006649 /* halt the connection */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006650 *p->pstate = BNX2X_FP_STATE_HALTING;
6651 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6652 p->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006653
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006654 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006655 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6656 p->pstate, poll_flag);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006657 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006658 return rc;
6659
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006660 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6661 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6662 p->cl_id, 0);
6663 /* Wait for completion */
6664 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6665 p->pstate, poll_flag);
6666 if (rc) /* timeout */
6667 return rc;
6668
6669
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006670 /* delete cfc entry */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006671 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006672
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006673 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006674 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6675 p->pstate, WAIT_RAMROD_COMMON);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006676 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006677}
6678
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006679static int bnx2x_stop_client(struct bnx2x *bp, int index)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006680{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006681 struct bnx2x_client_ramrod_params client_stop = {0};
6682 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006683
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006684 client_stop.index = index;
6685 client_stop.cid = fp->cid;
6686 client_stop.cl_id = fp->cl_id;
6687 client_stop.pstate = &(fp->state);
6688 client_stop.poll = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006689
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006690 return bnx2x_stop_fw_client(bp, &client_stop);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006691}
6692
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006693
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006694static void bnx2x_reset_func(struct bnx2x *bp)
6695{
6696 int port = BP_PORT(bp);
6697 int func = BP_FUNC(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006698 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006699 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006700 (CHIP_IS_E2(bp) ?
6701 offsetof(struct hc_status_block_data_e2, common) :
6702 offsetof(struct hc_status_block_data_e1x, common));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006703 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6704 int pfid_offset = offsetof(struct pci_entity, pf_id);
6705
6706 /* Disable the function in the FW */
6707 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6708 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6709 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6710 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6711
6712 /* FP SBs */
6713 for_each_queue(bp, i) {
6714 struct bnx2x_fastpath *fp = &bp->fp[i];
6715 REG_WR8(bp,
6716 BAR_CSTRORM_INTMEM +
6717 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6718 + pfunc_offset_fp + pfid_offset,
6719 HC_FUNCTION_DISABLED);
6720 }
6721
6722 /* SP SB */
6723 REG_WR8(bp,
6724 BAR_CSTRORM_INTMEM +
6725 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6726 pfunc_offset_sp + pfid_offset,
6727 HC_FUNCTION_DISABLED);
6728
6729
6730 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6731 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6732 0);
Eliezer Tamir49d66772008-02-28 11:53:13 -08006733
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006734 /* Configure IGU */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006735 if (bp->common.int_block == INT_BLOCK_HC) {
6736 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6737 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6738 } else {
6739 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6740 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6741 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006742
Michael Chan37b091b2009-10-10 13:46:55 +00006743#ifdef BCM_CNIC
6744 /* Disable Timer scan */
6745 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6746 /*
6747 * Wait for at least 10ms and up to 2 second for the timers scan to
6748 * complete
6749 */
6750 for (i = 0; i < 200; i++) {
6751 msleep(10);
6752 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6753 break;
6754 }
6755#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006756 /* Clear ILT */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006757 bnx2x_clear_func_ilt(bp, func);
6758
6759 /* Timers workaround bug for E2: if this is vnic-3,
6760 * we need to set the entire ilt range for this timers.
6761 */
6762 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6763 struct ilt_client_info ilt_cli;
6764 /* use dummy TM client */
6765 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6766 ilt_cli.start = 0;
6767 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6768 ilt_cli.client_num = ILT_CLIENT_TM;
6769
6770 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6771 }
6772
6773 /* this assumes that reset_port() called before reset_func()*/
6774 if (CHIP_IS_E2(bp))
6775 bnx2x_pf_disable(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006776
6777 bp->dmae_ready = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006778}
6779
6780static void bnx2x_reset_port(struct bnx2x *bp)
6781{
6782 int port = BP_PORT(bp);
6783 u32 val;
6784
6785 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6786
6787 /* Do not rcv packets to BRB */
6788 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6789 /* Do not direct rcv packets that are not for MCP to the BRB */
6790 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6791 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6792
6793 /* Configure AEU */
6794 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6795
6796 msleep(100);
6797 /* Check for BRB port occupancy */
6798 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6799 if (val)
6800 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07006801 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006802
6803 /* TODO: Close Doorbell port? */
6804}
6805
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006806static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6807{
6808 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006809 BP_ABS_FUNC(bp), reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006810
6811 switch (reset_code) {
6812 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6813 bnx2x_reset_port(bp);
6814 bnx2x_reset_func(bp);
6815 bnx2x_reset_common(bp);
6816 break;
6817
6818 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6819 bnx2x_reset_port(bp);
6820 bnx2x_reset_func(bp);
6821 break;
6822
6823 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6824 bnx2x_reset_func(bp);
6825 break;
6826
6827 default:
6828 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6829 break;
6830 }
6831}
6832
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006833void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006834{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006835 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006836 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006837 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006838
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006839 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006840 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08006841 struct bnx2x_fastpath *fp = &bp->fp[i];
6842
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006843 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08006844 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006845
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006846 if (!cnt) {
6847 BNX2X_ERR("timeout waiting for queue[%d]\n",
6848 i);
6849#ifdef BNX2X_STOP_ON_ERROR
6850 bnx2x_panic();
6851 return -EBUSY;
6852#else
6853 break;
6854#endif
6855 }
6856 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006857 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006858 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08006859 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006860 /* Give HW time to discard old tx messages */
6861 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006862
Yitchak Gertner65abd742008-08-25 15:26:24 -07006863 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006864 /* invalidate mc list,
6865 * wait and poll (interrupts are off)
6866 */
6867 bnx2x_invlidate_e1_mc_list(bp);
6868 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006869
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006870 } else {
Yitchak Gertner65abd742008-08-25 15:26:24 -07006871 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6872
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006873 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006874
6875 for (i = 0; i < MC_HASH_SIZE; i++)
6876 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6877 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006878
Michael Chan993ac7b2009-10-10 13:46:56 +00006879#ifdef BCM_CNIC
6880 /* Clear iSCSI L2 MAC */
6881 mutex_lock(&bp->cnic_mutex);
6882 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6883 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6884 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6885 }
6886 mutex_unlock(&bp->cnic_mutex);
6887#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07006888
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006889 if (unload_mode == UNLOAD_NORMAL)
6890 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006891
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00006892 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006893 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006894
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00006895 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006896 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006897 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006898 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006899 /* The mac address is written to entries 1-4 to
6900 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006901 u8 entry = (BP_E1HVN(bp) + 1)*8;
6902
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006903 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07006904 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006905
6906 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6907 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07006908 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006909
6910 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006911
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006912 } else
6913 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6914
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006915 /* Close multi and leading connections
6916 Completions for ramrods are collected in a synchronous way */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006917 for_each_queue(bp, i)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006918
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006919 if (bnx2x_stop_client(bp, i))
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006920#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006921 return;
6922#else
6923 goto unload_error;
6924#endif
6925
6926 rc = bnx2x_func_stop(bp);
6927 if (rc) {
6928 BNX2X_ERR("Function stop failed!\n");
6929#ifdef BNX2X_STOP_ON_ERROR
6930 return;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006931#else
6932 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006933#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08006934 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006935#ifndef BNX2X_STOP_ON_ERROR
Eliezer Tamir228241e2008-02-28 11:56:57 -08006936unload_error:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006937#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006938 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006939 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006940 else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006941 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6942 "%d, %d, %d\n", BP_PATH(bp),
6943 load_count[BP_PATH(bp)][0],
6944 load_count[BP_PATH(bp)][1],
6945 load_count[BP_PATH(bp)][2]);
6946 load_count[BP_PATH(bp)][0]--;
6947 load_count[BP_PATH(bp)][1 + port]--;
6948 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6949 "%d, %d, %d\n", BP_PATH(bp),
6950 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6951 load_count[BP_PATH(bp)][2]);
6952 if (load_count[BP_PATH(bp)][0] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006953 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006954 else if (load_count[BP_PATH(bp)][1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006955 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6956 else
6957 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6958 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006959
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006960 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6961 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6962 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006963
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006964 /* Disable HW interrupts, NAPI */
6965 bnx2x_netif_stop(bp, 1);
6966
6967 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006968 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006969
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006970 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08006971 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006972
6973 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006974 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006975 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006976
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006977}
6978
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006979void bnx2x_disable_close_the_gate(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006980{
6981 u32 val;
6982
6983 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6984
6985 if (CHIP_IS_E1(bp)) {
6986 int port = BP_PORT(bp);
6987 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6988 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6989
6990 val = REG_RD(bp, addr);
6991 val &= ~(0x300);
6992 REG_WR(bp, addr, val);
6993 } else if (CHIP_IS_E1H(bp)) {
6994 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6995 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6996 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6997 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6998 }
6999}
7000
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007001/* Close gates #2, #3 and #4: */
7002static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
7003{
7004 u32 val, addr;
7005
7006 /* Gates #2 and #4a are closed/opened for "not E1" only */
7007 if (!CHIP_IS_E1(bp)) {
7008 /* #4 */
7009 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7010 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7011 close ? (val | 0x1) : (val & (~(u32)1)));
7012 /* #2 */
7013 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7014 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7015 close ? (val | 0x1) : (val & (~(u32)1)));
7016 }
7017
7018 /* #3 */
7019 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7020 val = REG_RD(bp, addr);
7021 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7022
7023 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7024 close ? "closing" : "opening");
7025 mmiowb();
7026}
7027
7028#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7029
7030static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7031{
7032 /* Do some magic... */
7033 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7034 *magic_val = val & SHARED_MF_CLP_MAGIC;
7035 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7036}
7037
7038/* Restore the value of the `magic' bit.
7039 *
7040 * @param pdev Device handle.
7041 * @param magic_val Old value of the `magic' bit.
7042 */
7043static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7044{
7045 /* Restore the `magic' bit value... */
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007046 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7047 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7048 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7049}
7050
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007051/**
7052 * Prepares for MCP reset: takes care of CLP configurations.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007053 *
7054 * @param bp
7055 * @param magic_val Old value of 'magic' bit.
7056 */
7057static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7058{
7059 u32 shmem;
7060 u32 validity_offset;
7061
7062 DP(NETIF_MSG_HW, "Starting\n");
7063
7064 /* Set `magic' bit in order to save MF config */
7065 if (!CHIP_IS_E1(bp))
7066 bnx2x_clp_reset_prep(bp, magic_val);
7067
7068 /* Get shmem offset */
7069 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7070 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7071
7072 /* Clear validity map flags */
7073 if (shmem > 0)
7074 REG_WR(bp, shmem + validity_offset, 0);
7075}
7076
7077#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7078#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7079
7080/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7081 * depending on the HW type.
7082 *
7083 * @param bp
7084 */
7085static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7086{
7087 /* special handling for emulation and FPGA,
7088 wait 10 times longer */
7089 if (CHIP_REV_IS_SLOW(bp))
7090 msleep(MCP_ONE_TIMEOUT*10);
7091 else
7092 msleep(MCP_ONE_TIMEOUT);
7093}
7094
7095static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7096{
7097 u32 shmem, cnt, validity_offset, val;
7098 int rc = 0;
7099
7100 msleep(100);
7101
7102 /* Get shmem offset */
7103 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7104 if (shmem == 0) {
7105 BNX2X_ERR("Shmem 0 return failure\n");
7106 rc = -ENOTTY;
7107 goto exit_lbl;
7108 }
7109
7110 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7111
7112 /* Wait for MCP to come up */
7113 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7114 /* TBD: its best to check validity map of last port.
7115 * currently checks on port 0.
7116 */
7117 val = REG_RD(bp, shmem + validity_offset);
7118 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7119 shmem + validity_offset, val);
7120
7121 /* check that shared memory is valid. */
7122 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7123 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7124 break;
7125
7126 bnx2x_mcp_wait_one(bp);
7127 }
7128
7129 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7130
7131 /* Check that shared memory is valid. This indicates that MCP is up. */
7132 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7133 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7134 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7135 rc = -ENOTTY;
7136 goto exit_lbl;
7137 }
7138
7139exit_lbl:
7140 /* Restore the `magic' bit value */
7141 if (!CHIP_IS_E1(bp))
7142 bnx2x_clp_reset_done(bp, magic_val);
7143
7144 return rc;
7145}
7146
7147static void bnx2x_pxp_prep(struct bnx2x *bp)
7148{
7149 if (!CHIP_IS_E1(bp)) {
7150 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7151 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7152 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7153 mmiowb();
7154 }
7155}
7156
7157/*
7158 * Reset the whole chip except for:
7159 * - PCIE core
7160 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7161 * one reset bit)
7162 * - IGU
7163 * - MISC (including AEU)
7164 * - GRC
7165 * - RBCN, RBCP
7166 */
7167static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7168{
7169 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7170
7171 not_reset_mask1 =
7172 MISC_REGISTERS_RESET_REG_1_RST_HC |
7173 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7174 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7175
7176 not_reset_mask2 =
7177 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7178 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7179 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7180 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7181 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7182 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7183 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7184 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7185
7186 reset_mask1 = 0xffffffff;
7187
7188 if (CHIP_IS_E1(bp))
7189 reset_mask2 = 0xffff;
7190 else
7191 reset_mask2 = 0x1ffff;
7192
7193 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7194 reset_mask1 & (~not_reset_mask1));
7195 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7196 reset_mask2 & (~not_reset_mask2));
7197
7198 barrier();
7199 mmiowb();
7200
7201 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7202 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7203 mmiowb();
7204}
7205
7206static int bnx2x_process_kill(struct bnx2x *bp)
7207{
7208 int cnt = 1000;
7209 u32 val = 0;
7210 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7211
7212
7213 /* Empty the Tetris buffer, wait for 1s */
7214 do {
7215 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7216 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7217 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7218 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7219 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7220 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7221 ((port_is_idle_0 & 0x1) == 0x1) &&
7222 ((port_is_idle_1 & 0x1) == 0x1) &&
7223 (pgl_exp_rom2 == 0xffffffff))
7224 break;
7225 msleep(1);
7226 } while (cnt-- > 0);
7227
7228 if (cnt <= 0) {
7229 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7230 " are still"
7231 " outstanding read requests after 1s!\n");
7232 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7233 " port_is_idle_0=0x%08x,"
7234 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7235 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7236 pgl_exp_rom2);
7237 return -EAGAIN;
7238 }
7239
7240 barrier();
7241
7242 /* Close gates #2, #3 and #4 */
7243 bnx2x_set_234_gates(bp, true);
7244
7245 /* TBD: Indicate that "process kill" is in progress to MCP */
7246
7247 /* Clear "unprepared" bit */
7248 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7249 barrier();
7250
7251 /* Make sure all is written to the chip before the reset */
7252 mmiowb();
7253
7254 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7255 * PSWHST, GRC and PSWRD Tetris buffer.
7256 */
7257 msleep(1);
7258
7259 /* Prepare to chip reset: */
7260 /* MCP */
7261 bnx2x_reset_mcp_prep(bp, &val);
7262
7263 /* PXP */
7264 bnx2x_pxp_prep(bp);
7265 barrier();
7266
7267 /* reset the chip */
7268 bnx2x_process_kill_chip_reset(bp);
7269 barrier();
7270
7271 /* Recover after reset: */
7272 /* MCP */
7273 if (bnx2x_reset_mcp_comp(bp, val))
7274 return -EAGAIN;
7275
7276 /* PXP */
7277 bnx2x_pxp_prep(bp);
7278
7279 /* Open the gates #2, #3 and #4 */
7280 bnx2x_set_234_gates(bp, false);
7281
7282 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7283 * reset state, re-enable attentions. */
7284
7285 return 0;
7286}
7287
7288static int bnx2x_leader_reset(struct bnx2x *bp)
7289{
7290 int rc = 0;
7291 /* Try to recover after the failure */
7292 if (bnx2x_process_kill(bp)) {
7293 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7294 bp->dev->name);
7295 rc = -EAGAIN;
7296 goto exit_leader_reset;
7297 }
7298
7299 /* Clear "reset is in progress" bit and update the driver state */
7300 bnx2x_set_reset_done(bp);
7301 bp->recovery_state = BNX2X_RECOVERY_DONE;
7302
7303exit_leader_reset:
7304 bp->is_leader = 0;
7305 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7306 smp_wmb();
7307 return rc;
7308}
7309
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007310/* Assumption: runs under rtnl lock. This together with the fact
7311 * that it's called only from bnx2x_reset_task() ensure that it
7312 * will never be called when netif_running(bp->dev) is false.
7313 */
7314static void bnx2x_parity_recover(struct bnx2x *bp)
7315{
7316 DP(NETIF_MSG_HW, "Handling parity\n");
7317 while (1) {
7318 switch (bp->recovery_state) {
7319 case BNX2X_RECOVERY_INIT:
7320 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7321 /* Try to get a LEADER_LOCK HW lock */
7322 if (bnx2x_trylock_hw_lock(bp,
7323 HW_LOCK_RESOURCE_RESERVED_08))
7324 bp->is_leader = 1;
7325
7326 /* Stop the driver */
7327 /* If interface has been removed - break */
7328 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7329 return;
7330
7331 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7332 /* Ensure "is_leader" and "recovery_state"
7333 * update values are seen on other CPUs
7334 */
7335 smp_wmb();
7336 break;
7337
7338 case BNX2X_RECOVERY_WAIT:
7339 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7340 if (bp->is_leader) {
7341 u32 load_counter = bnx2x_get_load_cnt(bp);
7342 if (load_counter) {
7343 /* Wait until all other functions get
7344 * down.
7345 */
7346 schedule_delayed_work(&bp->reset_task,
7347 HZ/10);
7348 return;
7349 } else {
7350 /* If all other functions got down -
7351 * try to bring the chip back to
7352 * normal. In any case it's an exit
7353 * point for a leader.
7354 */
7355 if (bnx2x_leader_reset(bp) ||
7356 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7357 printk(KERN_ERR"%s: Recovery "
7358 "has failed. Power cycle is "
7359 "needed.\n", bp->dev->name);
7360 /* Disconnect this device */
7361 netif_device_detach(bp->dev);
7362 /* Block ifup for all function
7363 * of this ASIC until
7364 * "process kill" or power
7365 * cycle.
7366 */
7367 bnx2x_set_reset_in_progress(bp);
7368 /* Shut down the power */
7369 bnx2x_set_power_state(bp,
7370 PCI_D3hot);
7371 return;
7372 }
7373
7374 return;
7375 }
7376 } else { /* non-leader */
7377 if (!bnx2x_reset_is_done(bp)) {
7378 /* Try to get a LEADER_LOCK HW lock as
7379 * long as a former leader may have
7380 * been unloaded by the user or
7381 * released a leadership by another
7382 * reason.
7383 */
7384 if (bnx2x_trylock_hw_lock(bp,
7385 HW_LOCK_RESOURCE_RESERVED_08)) {
7386 /* I'm a leader now! Restart a
7387 * switch case.
7388 */
7389 bp->is_leader = 1;
7390 break;
7391 }
7392
7393 schedule_delayed_work(&bp->reset_task,
7394 HZ/10);
7395 return;
7396
7397 } else { /* A leader has completed
7398 * the "process kill". It's an exit
7399 * point for a non-leader.
7400 */
7401 bnx2x_nic_load(bp, LOAD_NORMAL);
7402 bp->recovery_state =
7403 BNX2X_RECOVERY_DONE;
7404 smp_wmb();
7405 return;
7406 }
7407 }
7408 default:
7409 return;
7410 }
7411 }
7412}
7413
7414/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7415 * scheduled on a general queue in order to prevent a dead lock.
7416 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007417static void bnx2x_reset_task(struct work_struct *work)
7418{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007419 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007420
7421#ifdef BNX2X_STOP_ON_ERROR
7422 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7423 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007424 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007425 return;
7426#endif
7427
7428 rtnl_lock();
7429
7430 if (!netif_running(bp->dev))
7431 goto reset_task_exit;
7432
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007433 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7434 bnx2x_parity_recover(bp);
7435 else {
7436 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7437 bnx2x_nic_load(bp, LOAD_NORMAL);
7438 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007439
7440reset_task_exit:
7441 rtnl_unlock();
7442}
7443
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007444/* end of nic load/unload */
7445
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007446/*
7447 * Init service functions
7448 */
7449
stephen hemminger8d962862010-10-21 07:50:56 +00007450static u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007451{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007452 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7453 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7454 return base + (BP_ABS_FUNC(bp)) * stride;
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007455}
7456
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007457static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007458{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007459 u32 reg = bnx2x_get_pretend_reg(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007460
7461 /* Flush all outstanding writes */
7462 mmiowb();
7463
7464 /* Pretend to be function 0 */
7465 REG_WR(bp, reg, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007466 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007467
7468 /* From now we are in the "like-E1" mode */
7469 bnx2x_int_disable(bp);
7470
7471 /* Flush all outstanding writes */
7472 mmiowb();
7473
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007474 /* Restore the original function */
7475 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7476 REG_RD(bp, reg);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007477}
7478
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007479static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007480{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007481 if (CHIP_IS_E1(bp))
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007482 bnx2x_int_disable(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007483 else
7484 bnx2x_undi_int_disable_e1h(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007485}
7486
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007487static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007488{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007489 u32 val;
7490
7491 /* Check if there is any driver already loaded */
7492 val = REG_RD(bp, MISC_REG_UNPREPARED);
7493 if (val == 0x1) {
7494 /* Check if it is the UNDI driver
7495 * UNDI driver initializes CID offset for normal bell to 0x7
7496 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07007497 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007498 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7499 if (val == 0x7) {
7500 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007501 /* save our pf_num */
7502 int orig_pf_num = bp->pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007503 u32 swap_en;
7504 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007505
Eilon Greensteinb4661732009-01-14 06:43:56 +00007506 /* clear the UNDI indication */
7507 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7508
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007509 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7510
7511 /* try unload UNDI on port 0 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007512 bp->pf_num = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007513 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007514 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007515 DRV_MSG_SEQ_NUMBER_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007516 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007517
7518 /* if UNDI is loaded on the other port */
7519 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7520
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007521 /* send "DONE" for previous unload */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007522 bnx2x_fw_command(bp,
7523 DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007524
7525 /* unload UNDI on port 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007526 bp->pf_num = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007527 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007528 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007529 DRV_MSG_SEQ_NUMBER_MASK);
7530 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007531
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007532 bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007533 }
7534
Eilon Greensteinb4661732009-01-14 06:43:56 +00007535 /* now it's safe to release the lock */
7536 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7537
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007538 bnx2x_undi_int_disable(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007539
7540 /* close input traffic and wait for it */
7541 /* Do not rcv packets to BRB */
7542 REG_WR(bp,
7543 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7544 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7545 /* Do not direct rcv packets that are not for MCP to
7546 * the BRB */
7547 REG_WR(bp,
7548 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7549 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7550 /* clear AEU */
7551 REG_WR(bp,
7552 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7553 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7554 msleep(10);
7555
7556 /* save NIG port swap info */
7557 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7558 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007559 /* reset device */
7560 REG_WR(bp,
7561 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007562 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007563 REG_WR(bp,
7564 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7565 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007566 /* take the NIG out of reset and restore swap values */
7567 REG_WR(bp,
7568 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7569 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7570 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7571 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7572
7573 /* send unload done to the MCP */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007574 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007575
7576 /* restore our func and fw_seq */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007577 bp->pf_num = orig_pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007578 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007579 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007580 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00007581 } else
7582 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007583 }
7584}
7585
7586static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7587{
7588 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007589 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007590
7591 /* Get the chip revision id and number. */
7592 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7593 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7594 id = ((val & 0xffff) << 16);
7595 val = REG_RD(bp, MISC_REG_CHIP_REV);
7596 id |= ((val & 0xf) << 12);
7597 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7598 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00007599 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007600 id |= (val & 0xf);
7601 bp->common.chip_id = id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007602
7603 /* Set doorbell size */
7604 bp->db_size = (1 << BNX2X_DB_SHIFT);
7605
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007606 if (CHIP_IS_E2(bp)) {
7607 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7608 if ((val & 1) == 0)
7609 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7610 else
7611 val = (val >> 1) & 1;
7612 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7613 "2_PORT_MODE");
7614 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7615 CHIP_2_PORT_MODE;
7616
7617 if (CHIP_MODE_IS_4_PORT(bp))
7618 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7619 else
7620 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7621 } else {
7622 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7623 bp->pfid = bp->pf_num; /* 0..7 */
7624 }
7625
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007626 /*
7627 * set base FW non-default (fast path) status block id, this value is
7628 * used to initialize the fw_sb_id saved on the fp/queue structure to
7629 * determine the id used by the FW.
7630 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007631 if (CHIP_IS_E1x(bp))
7632 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7633 else /* E2 */
7634 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7635
7636 bp->link_params.chip_id = bp->common.chip_id;
7637 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007638
Eilon Greenstein1c063282009-02-12 08:36:43 +00007639 val = (REG_RD(bp, 0x2874) & 0x55);
7640 if ((bp->common.chip_id & 0x1) ||
7641 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7642 bp->flags |= ONE_PORT_FLAG;
7643 BNX2X_DEV_INFO("single port device\n");
7644 }
7645
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007646 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7647 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7648 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7649 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7650 bp->common.flash_size, bp->common.flash_size);
7651
7652 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007653 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7654 MISC_REG_GENERIC_CR_1 :
7655 MISC_REG_GENERIC_CR_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007656 bp->link_params.shmem_base = bp->common.shmem_base;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007657 bp->link_params.shmem2_base = bp->common.shmem2_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007658 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7659 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007660
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007661 if (!bp->common.shmem_base) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007662 BNX2X_DEV_INFO("MCP not active\n");
7663 bp->flags |= NO_MCP_FLAG;
7664 return;
7665 }
7666
7667 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7668 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7669 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007670 BNX2X_ERR("BAD MCP validity signature\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007671
7672 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00007673 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007674
7675 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7676 SHARED_HW_CFG_LED_MODE_MASK) >>
7677 SHARED_HW_CFG_LED_MODE_SHIFT);
7678
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007679 bp->link_params.feature_config_flags = 0;
7680 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7681 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7682 bp->link_params.feature_config_flags |=
7683 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7684 else
7685 bp->link_params.feature_config_flags &=
7686 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7687
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007688 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7689 bp->common.bc_ver = val;
7690 BNX2X_DEV_INFO("bc_ver %X\n", val);
7691 if (val < BNX2X_BC_VER) {
7692 /* for now only warn
7693 * later we might need to enforce this */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007694 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7695 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007696 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007697 bp->link_params.feature_config_flags |=
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007698 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007699 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7700
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007701 bp->link_params.feature_config_flags |=
7702 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7703 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007704
7705 if (BP_E1HVN(bp) == 0) {
7706 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7707 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7708 } else {
7709 /* no WOL capability for E1HVN != 0 */
7710 bp->flags |= NO_WOL_FLAG;
7711 }
7712 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00007713 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007714
7715 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7716 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7717 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7718 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7719
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007720 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7721 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007722}
7723
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007724#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7725#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7726
7727static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7728{
7729 int pfid = BP_FUNC(bp);
7730 int vn = BP_E1HVN(bp);
7731 int igu_sb_id;
7732 u32 val;
7733 u8 fid;
7734
7735 bp->igu_base_sb = 0xff;
7736 bp->igu_sb_cnt = 0;
7737 if (CHIP_INT_MODE_IS_BC(bp)) {
7738 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7739 bp->l2_cid_count);
7740
7741 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7742 FP_SB_MAX_E1x;
7743
7744 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7745 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7746
7747 return;
7748 }
7749
7750 /* IGU in normal mode - read CAM */
7751 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7752 igu_sb_id++) {
7753 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7754 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7755 continue;
7756 fid = IGU_FID(val);
7757 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7758 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7759 continue;
7760 if (IGU_VEC(val) == 0)
7761 /* default status block */
7762 bp->igu_dsb_id = igu_sb_id;
7763 else {
7764 if (bp->igu_base_sb == 0xff)
7765 bp->igu_base_sb = igu_sb_id;
7766 bp->igu_sb_cnt++;
7767 }
7768 }
7769 }
7770 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7771 if (bp->igu_sb_cnt == 0)
7772 BNX2X_ERR("CAM configuration error\n");
7773}
7774
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007775static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7776 u32 switch_cfg)
7777{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007778 int cfg_size = 0, idx, port = BP_PORT(bp);
7779
7780 /* Aggregation of supported attributes of all external phys */
7781 bp->port.supported[0] = 0;
7782 bp->port.supported[1] = 0;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007783 switch (bp->link_params.num_phys) {
7784 case 1:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007785 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7786 cfg_size = 1;
7787 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007788 case 2:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007789 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7790 cfg_size = 1;
7791 break;
7792 case 3:
7793 if (bp->link_params.multi_phy_config &
7794 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7795 bp->port.supported[1] =
7796 bp->link_params.phy[EXT_PHY1].supported;
7797 bp->port.supported[0] =
7798 bp->link_params.phy[EXT_PHY2].supported;
7799 } else {
7800 bp->port.supported[0] =
7801 bp->link_params.phy[EXT_PHY1].supported;
7802 bp->port.supported[1] =
7803 bp->link_params.phy[EXT_PHY2].supported;
7804 }
7805 cfg_size = 2;
7806 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007807 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007808
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007809 if (!(bp->port.supported[0] || bp->port.supported[1])) {
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007810 BNX2X_ERR("NVRAM config error. BAD phy config."
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007811 "PHY1 config 0x%x, PHY2 config 0x%x\n",
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007812 SHMEM_RD(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007813 dev_info.port_hw_config[port].external_phy_config),
7814 SHMEM_RD(bp,
7815 dev_info.port_hw_config[port].external_phy_config2));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007816 return;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007817 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007818
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007819 switch (switch_cfg) {
7820 case SWITCH_CFG_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007821 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7822 port*0x10);
7823 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007824 break;
7825
7826 case SWITCH_CFG_10G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007827 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7828 port*0x18);
7829 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007830 break;
7831
7832 default:
7833 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007834 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007835 return;
7836 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007837 /* mask what we support according to speed_cap_mask per configuration */
7838 for (idx = 0; idx < cfg_size; idx++) {
7839 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007840 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007841 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007842
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007843 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007844 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007845 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007846
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007847 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007848 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007849 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007850
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007851 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007852 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007853 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007854
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007855 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007856 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007857 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007858 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007859
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007860 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007861 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007862 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007863
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007864 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007865 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007866 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007867
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007868 }
7869
7870 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7871 bp->port.supported[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007872}
7873
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007874static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007875{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007876 u32 link_config, idx, cfg_size = 0;
7877 bp->port.advertising[0] = 0;
7878 bp->port.advertising[1] = 0;
7879 switch (bp->link_params.num_phys) {
7880 case 1:
7881 case 2:
7882 cfg_size = 1;
7883 break;
7884 case 3:
7885 cfg_size = 2;
7886 break;
7887 }
7888 for (idx = 0; idx < cfg_size; idx++) {
7889 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7890 link_config = bp->port.link_config[idx];
7891 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007892 case PORT_FEATURE_LINK_SPEED_AUTO:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007893 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7894 bp->link_params.req_line_speed[idx] =
7895 SPEED_AUTO_NEG;
7896 bp->port.advertising[idx] |=
7897 bp->port.supported[idx];
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007898 } else {
7899 /* force 10G, no AN */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007900 bp->link_params.req_line_speed[idx] =
7901 SPEED_10000;
7902 bp->port.advertising[idx] |=
7903 (ADVERTISED_10000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007904 ADVERTISED_FIBRE);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007905 continue;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007906 }
7907 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007908
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007909 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007910 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7911 bp->link_params.req_line_speed[idx] =
7912 SPEED_10;
7913 bp->port.advertising[idx] |=
7914 (ADVERTISED_10baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007915 ADVERTISED_TP);
7916 } else {
7917 BNX2X_ERROR("NVRAM config error. "
7918 "Invalid link_config 0x%x"
7919 " speed_cap_mask 0x%x\n",
7920 link_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007921 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007922 return;
7923 }
7924 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007925
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007926 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007927 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7928 bp->link_params.req_line_speed[idx] =
7929 SPEED_10;
7930 bp->link_params.req_duplex[idx] =
7931 DUPLEX_HALF;
7932 bp->port.advertising[idx] |=
7933 (ADVERTISED_10baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007934 ADVERTISED_TP);
7935 } else {
7936 BNX2X_ERROR("NVRAM config error. "
7937 "Invalid link_config 0x%x"
7938 " speed_cap_mask 0x%x\n",
7939 link_config,
7940 bp->link_params.speed_cap_mask[idx]);
7941 return;
7942 }
7943 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007944
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007945 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7946 if (bp->port.supported[idx] &
7947 SUPPORTED_100baseT_Full) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007948 bp->link_params.req_line_speed[idx] =
7949 SPEED_100;
7950 bp->port.advertising[idx] |=
7951 (ADVERTISED_100baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007952 ADVERTISED_TP);
7953 } else {
7954 BNX2X_ERROR("NVRAM config error. "
7955 "Invalid link_config 0x%x"
7956 " speed_cap_mask 0x%x\n",
7957 link_config,
7958 bp->link_params.speed_cap_mask[idx]);
7959 return;
7960 }
7961 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007962
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007963 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7964 if (bp->port.supported[idx] &
7965 SUPPORTED_100baseT_Half) {
7966 bp->link_params.req_line_speed[idx] =
7967 SPEED_100;
7968 bp->link_params.req_duplex[idx] =
7969 DUPLEX_HALF;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007970 bp->port.advertising[idx] |=
7971 (ADVERTISED_100baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007972 ADVERTISED_TP);
7973 } else {
7974 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007975 "Invalid link_config 0x%x"
7976 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007977 link_config,
7978 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007979 return;
7980 }
7981 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007982
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007983 case PORT_FEATURE_LINK_SPEED_1G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007984 if (bp->port.supported[idx] &
7985 SUPPORTED_1000baseT_Full) {
7986 bp->link_params.req_line_speed[idx] =
7987 SPEED_1000;
7988 bp->port.advertising[idx] |=
7989 (ADVERTISED_1000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007990 ADVERTISED_TP);
7991 } else {
7992 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007993 "Invalid link_config 0x%x"
7994 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007995 link_config,
7996 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007997 return;
7998 }
7999 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008000
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008001 case PORT_FEATURE_LINK_SPEED_2_5G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008002 if (bp->port.supported[idx] &
8003 SUPPORTED_2500baseX_Full) {
8004 bp->link_params.req_line_speed[idx] =
8005 SPEED_2500;
8006 bp->port.advertising[idx] |=
8007 (ADVERTISED_2500baseX_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008008 ADVERTISED_TP);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008009 } else {
8010 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008011 "Invalid link_config 0x%x"
8012 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008013 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008014 bp->link_params.speed_cap_mask[idx]);
8015 return;
8016 }
8017 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008018
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008019 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8020 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8021 case PORT_FEATURE_LINK_SPEED_10G_KR:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008022 if (bp->port.supported[idx] &
8023 SUPPORTED_10000baseT_Full) {
8024 bp->link_params.req_line_speed[idx] =
8025 SPEED_10000;
8026 bp->port.advertising[idx] |=
8027 (ADVERTISED_10000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008028 ADVERTISED_FIBRE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008029 } else {
8030 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008031 "Invalid link_config 0x%x"
8032 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008033 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008034 bp->link_params.speed_cap_mask[idx]);
8035 return;
8036 }
8037 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008038
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008039 default:
8040 BNX2X_ERROR("NVRAM config error. "
8041 "BAD link speed link_config 0x%x\n",
8042 link_config);
8043 bp->link_params.req_line_speed[idx] =
8044 SPEED_AUTO_NEG;
8045 bp->port.advertising[idx] =
8046 bp->port.supported[idx];
8047 break;
8048 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008049
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008050 bp->link_params.req_flow_ctrl[idx] = (link_config &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008051 PORT_FEATURE_FLOW_CONTROL_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008052 if ((bp->link_params.req_flow_ctrl[idx] ==
8053 BNX2X_FLOW_CTRL_AUTO) &&
8054 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8055 bp->link_params.req_flow_ctrl[idx] =
8056 BNX2X_FLOW_CTRL_NONE;
8057 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008058
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008059 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8060 " 0x%x advertising 0x%x\n",
8061 bp->link_params.req_line_speed[idx],
8062 bp->link_params.req_duplex[idx],
8063 bp->link_params.req_flow_ctrl[idx],
8064 bp->port.advertising[idx]);
8065 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008066}
8067
Michael Chane665bfd2009-10-10 13:46:54 +00008068static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8069{
8070 mac_hi = cpu_to_be16(mac_hi);
8071 mac_lo = cpu_to_be32(mac_lo);
8072 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8073 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8074}
8075
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008076static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008077{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008078 int port = BP_PORT(bp);
8079 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008080 u32 config;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008081 u32 ext_phy_type, ext_phy_config;;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008082
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008083 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008084 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008085
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008086 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008087 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008088
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008089 bp->link_params.speed_cap_mask[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008090 SHMEM_RD(bp,
8091 dev_info.port_hw_config[port].speed_capability_mask);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008092 bp->link_params.speed_cap_mask[1] =
8093 SHMEM_RD(bp,
8094 dev_info.port_hw_config[port].speed_capability_mask2);
8095 bp->port.link_config[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008096 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8097
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008098 bp->port.link_config[1] =
8099 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008100
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008101 bp->link_params.multi_phy_config =
8102 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008103 /* If the device is capable of WoL, set the default state according
8104 * to the HW
8105 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008106 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008107 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8108 (config & PORT_FEATURE_WOL_ENABLED));
8109
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008110 BNX2X_DEV_INFO("lane_config 0x%08x "
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008111 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008112 bp->link_params.lane_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008113 bp->link_params.speed_cap_mask[0],
8114 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008115
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008116 bp->link_params.switch_cfg = (bp->port.link_config[0] &
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008117 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008118 bnx2x_phy_probe(&bp->link_params);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008119 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008120
8121 bnx2x_link_settings_requested(bp);
8122
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008123 /*
8124 * If connected directly, work with the internal PHY, otherwise, work
8125 * with the external PHY
8126 */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008127 ext_phy_config =
8128 SHMEM_RD(bp,
8129 dev_info.port_hw_config[port].external_phy_config);
8130 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008131 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008132 bp->mdio.prtad = bp->port.phy_addr;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008133
8134 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8135 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8136 bp->mdio.prtad =
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008137 XGXS_EXT_PHY_ADDR(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008138
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008139 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8140 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00008141 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008142 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8143 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008144
8145#ifdef BCM_CNIC
8146 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8147 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8148 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8149#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008150}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008151
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008152static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8153{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008154 int func = BP_ABS_FUNC(bp);
8155 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008156 u32 val, val2;
8157 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008158
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008159 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008160
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008161 if (CHIP_IS_E1x(bp)) {
8162 bp->common.int_block = INT_BLOCK_HC;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008163
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008164 bp->igu_dsb_id = DEF_SB_IGU_ID;
8165 bp->igu_base_sb = 0;
8166 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8167 } else {
8168 bp->common.int_block = INT_BLOCK_IGU;
8169 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8170 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8171 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8172 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8173 } else
8174 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8175
8176 bnx2x_get_igu_cam_info(bp);
8177
8178 }
8179 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8180 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8181
8182 /*
8183 * Initialize MF configuration
8184 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008185
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008186 bp->mf_ov = 0;
8187 bp->mf_mode = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008188 vn = BP_E1HVN(bp);
8189 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8190 if (SHMEM2_HAS(bp, mf_cfg_addr))
8191 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8192 else
8193 bp->common.mf_cfg_base = bp->common.shmem_base +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008194 offsetof(struct shmem_region, func_mb) +
8195 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008196 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008197 MF_CFG_RD(bp, func_mf_config[func].config);
8198
8199 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07008200 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008201 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008202 bp->mf_mode = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008203 BNX2X_DEV_INFO("%s function mode\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008204 IS_MF(bp) ? "multi" : "single");
Eilon Greenstein2691d512009-08-12 08:22:08 +00008205
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008206 if (IS_MF(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008207 val = (MF_CFG_RD(bp, func_mf_config[func].
Eilon Greenstein2691d512009-08-12 08:22:08 +00008208 e1hov_tag) &
8209 FUNC_MF_CFG_E1HOV_TAG_MASK);
8210 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008211 bp->mf_ov = val;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008212 BNX2X_DEV_INFO("MF OV for func %d is %d "
Eilon Greenstein2691d512009-08-12 08:22:08 +00008213 "(0x%04x)\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008214 func, bp->mf_ov, bp->mf_ov);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008215 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008216 BNX2X_ERROR("No valid MF OV for func %d,"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008217 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008218 rc = -EPERM;
8219 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00008220 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008221 if (BP_VN(bp)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008222 BNX2X_ERROR("VN %d in single function mode,"
8223 " aborting\n", BP_E1HVN(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00008224 rc = -EPERM;
8225 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008226 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008227 }
8228
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008229 /* adjust igu_sb_cnt to MF for E1x */
8230 if (CHIP_IS_E1x(bp) && IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008231 bp->igu_sb_cnt /= E1HVN_MAX;
8232
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008233 /*
8234 * adjust E2 sb count: to be removed when FW will support
8235 * more then 16 L2 clients
8236 */
8237#define MAX_L2_CLIENTS 16
8238 if (CHIP_IS_E2(bp))
8239 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8240 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8241
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008242 if (!BP_NOMCP(bp)) {
8243 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008244
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008245 bp->fw_seq =
8246 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8247 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008248 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8249 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008250
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008251 if (IS_MF(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008252 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8253 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008254 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8255 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8256 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8257 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8258 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8259 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8260 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8261 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8262 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8263 ETH_ALEN);
8264 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8265 ETH_ALEN);
8266 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008267
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008268 return rc;
8269 }
8270
8271 if (BP_NOMCP(bp)) {
8272 /* only supposed to happen on emulation/FPGA */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008273 BNX2X_ERROR("warning: random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008274 random_ether_addr(bp->dev->dev_addr);
8275 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8276 }
8277
8278 return rc;
8279}
8280
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008281static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8282{
8283 int cnt, i, block_end, rodi;
8284 char vpd_data[BNX2X_VPD_LEN+1];
8285 char str_id_reg[VENDOR_ID_LEN+1];
8286 char str_id_cap[VENDOR_ID_LEN+1];
8287 u8 len;
8288
8289 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8290 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8291
8292 if (cnt < BNX2X_VPD_LEN)
8293 goto out_not_found;
8294
8295 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8296 PCI_VPD_LRDT_RO_DATA);
8297 if (i < 0)
8298 goto out_not_found;
8299
8300
8301 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8302 pci_vpd_lrdt_size(&vpd_data[i]);
8303
8304 i += PCI_VPD_LRDT_TAG_SIZE;
8305
8306 if (block_end > BNX2X_VPD_LEN)
8307 goto out_not_found;
8308
8309 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8310 PCI_VPD_RO_KEYWORD_MFR_ID);
8311 if (rodi < 0)
8312 goto out_not_found;
8313
8314 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8315
8316 if (len != VENDOR_ID_LEN)
8317 goto out_not_found;
8318
8319 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8320
8321 /* vendor specific info */
8322 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8323 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8324 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8325 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8326
8327 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8328 PCI_VPD_RO_KEYWORD_VENDOR0);
8329 if (rodi >= 0) {
8330 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8331
8332 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8333
8334 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8335 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8336 bp->fw_ver[len] = ' ';
8337 }
8338 }
8339 return;
8340 }
8341out_not_found:
8342 return;
8343}
8344
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008345static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8346{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008347 int func;
Eilon Greenstein87942b42009-02-12 08:36:49 +00008348 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008349 int rc;
8350
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008351 /* Disable interrupt handling until HW is initialized */
8352 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008353 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008354
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008355 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008356 mutex_init(&bp->fw_mb_mutex);
David S. Millerbb7e95c2010-07-27 21:01:35 -07008357 spin_lock_init(&bp->stats_lock);
Michael Chan993ac7b2009-10-10 13:46:56 +00008358#ifdef BCM_CNIC
8359 mutex_init(&bp->cnic_mutex);
8360#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008361
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008362 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008363 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008364
8365 rc = bnx2x_get_hwinfo(bp);
8366
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008367 if (!rc)
8368 rc = bnx2x_alloc_mem_bp(bp);
8369
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008370 bnx2x_read_fwinfo(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008371
8372 func = BP_FUNC(bp);
8373
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008374 /* need to reset chip if undi was active */
8375 if (!BP_NOMCP(bp))
8376 bnx2x_undi_unload(bp);
8377
8378 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008379 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008380
8381 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008382 dev_err(&bp->pdev->dev, "MCP disabled, "
8383 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008384
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008385 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008386 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8387 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008388 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8389 "requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008390 multi_mode = ETH_RSS_MODE_DISABLED;
8391 }
8392 bp->multi_mode = multi_mode;
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008393 bp->int_mode = int_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008394
Dmitry Kravkov4fd89b7a2010-04-01 19:45:34 -07008395 bp->dev->features |= NETIF_F_GRO;
8396
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008397 /* Set TPA flags */
8398 if (disable_tpa) {
8399 bp->flags &= ~TPA_ENABLE_FLAG;
8400 bp->dev->features &= ~NETIF_F_LRO;
8401 } else {
8402 bp->flags |= TPA_ENABLE_FLAG;
8403 bp->dev->features |= NETIF_F_LRO;
8404 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008405 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008406
Eilon Greensteina18f5122009-08-12 08:23:26 +00008407 if (CHIP_IS_E1(bp))
8408 bp->dropless_fc = 0;
8409 else
8410 bp->dropless_fc = dropless_fc;
8411
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008412 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008413
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008414 bp->tx_ring_size = MAX_TX_AVAIL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008415
8416 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008417
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008418 /* make sure that the numbers are in the right granularity */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008419 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8420 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008421
Eilon Greenstein87942b42009-02-12 08:36:49 +00008422 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8423 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008424
8425 init_timer(&bp->timer);
8426 bp->timer.expires = jiffies + bp->current_interval;
8427 bp->timer.data = (unsigned long) bp;
8428 bp->timer.function = bnx2x_timer;
8429
8430 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008431}
8432
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008433
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008434/****************************************************************************
8435* General service functions
8436****************************************************************************/
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008437
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008438/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008439static int bnx2x_open(struct net_device *dev)
8440{
8441 struct bnx2x *bp = netdev_priv(dev);
8442
Eilon Greenstein6eccabb2009-01-22 03:37:48 +00008443 netif_carrier_off(dev);
8444
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008445 bnx2x_set_power_state(bp, PCI_D0);
8446
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008447 if (!bnx2x_reset_is_done(bp)) {
8448 do {
8449 /* Reset MCP mail box sequence if there is on going
8450 * recovery
8451 */
8452 bp->fw_seq = 0;
8453
8454 /* If it's the first function to load and reset done
8455 * is still not cleared it may mean that. We don't
8456 * check the attention state here because it may have
8457 * already been cleared by a "common" reset but we
8458 * shell proceed with "process kill" anyway.
8459 */
8460 if ((bnx2x_get_load_cnt(bp) == 0) &&
8461 bnx2x_trylock_hw_lock(bp,
8462 HW_LOCK_RESOURCE_RESERVED_08) &&
8463 (!bnx2x_leader_reset(bp))) {
8464 DP(NETIF_MSG_HW, "Recovered in open\n");
8465 break;
8466 }
8467
8468 bnx2x_set_power_state(bp, PCI_D3hot);
8469
8470 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8471 " completed yet. Try again later. If u still see this"
8472 " message after a few retries then power cycle is"
8473 " required.\n", bp->dev->name);
8474
8475 return -EAGAIN;
8476 } while (0);
8477 }
8478
8479 bp->recovery_state = BNX2X_RECOVERY_DONE;
8480
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008481 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008482}
8483
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008484/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008485static int bnx2x_close(struct net_device *dev)
8486{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008487 struct bnx2x *bp = netdev_priv(dev);
8488
8489 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008490 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +00008491 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008492
8493 return 0;
8494}
8495
Eilon Greensteinf5372252009-02-12 08:38:30 +00008496/* called with netif_tx_lock from dev_mcast.c */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008497void bnx2x_set_rx_mode(struct net_device *dev)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008498{
8499 struct bnx2x *bp = netdev_priv(dev);
8500 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8501 int port = BP_PORT(bp);
8502
8503 if (bp->state != BNX2X_STATE_OPEN) {
8504 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8505 return;
8506 }
8507
8508 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8509
8510 if (dev->flags & IFF_PROMISC)
8511 rx_mode = BNX2X_RX_MODE_PROMISC;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008512 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00008513 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8514 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008515 rx_mode = BNX2X_RX_MODE_ALLMULTI;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008516 else { /* some multicasts */
8517 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008518 /*
8519 * set mc list, do not wait as wait implies sleep
8520 * and set_rx_mode can be invoked from non-sleepable
8521 * context
8522 */
8523 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8524 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8525 BNX2X_MAX_MULTICAST*(1 + port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008526
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008527 bnx2x_set_e1_mc_list(bp, offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008528 } else { /* E1H */
8529 /* Accept one or more multicasts */
Jiri Pirko22bedad32010-04-01 21:22:57 +00008530 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008531 u32 mc_filter[MC_HASH_SIZE];
8532 u32 crc, bit, regidx;
8533 int i;
8534
8535 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8536
Jiri Pirko22bedad32010-04-01 21:22:57 +00008537 netdev_for_each_mc_addr(ha, dev) {
Johannes Berg7c510e42008-10-27 17:47:26 -07008538 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008539 bnx2x_mc_addr(ha));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008540
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008541 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8542 ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008543 bit = (crc >> 24) & 0xff;
8544 regidx = bit >> 5;
8545 bit &= 0x1f;
8546 mc_filter[regidx] |= (1 << bit);
8547 }
8548
8549 for (i = 0; i < MC_HASH_SIZE; i++)
8550 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8551 mc_filter[i]);
8552 }
8553 }
8554
8555 bp->rx_mode = rx_mode;
8556 bnx2x_set_storm_rx_mode(bp);
8557}
8558
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008559/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008560static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8561 int devad, u16 addr)
8562{
8563 struct bnx2x *bp = netdev_priv(netdev);
8564 u16 value;
8565 int rc;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008566
8567 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8568 prtad, devad, addr);
8569
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008570 /* The HW expects different devad if CL22 is used */
8571 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8572
8573 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008574 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008575 bnx2x_release_phy_lock(bp);
8576 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8577
8578 if (!rc)
8579 rc = value;
8580 return rc;
8581}
8582
8583/* called with rtnl_lock */
8584static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8585 u16 addr, u16 value)
8586{
8587 struct bnx2x *bp = netdev_priv(netdev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008588 int rc;
8589
8590 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8591 " value 0x%x\n", prtad, devad, addr, value);
8592
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008593 /* The HW expects different devad if CL22 is used */
8594 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8595
8596 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008597 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008598 bnx2x_release_phy_lock(bp);
8599 return rc;
8600}
8601
8602/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008603static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8604{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008605 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008606 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008607
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008608 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8609 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008610
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008611 if (!netif_running(dev))
8612 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008613
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008614 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008615}
8616
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008617#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008618static void poll_bnx2x(struct net_device *dev)
8619{
8620 struct bnx2x *bp = netdev_priv(dev);
8621
8622 disable_irq(bp->pdev->irq);
8623 bnx2x_interrupt(bp->pdev->irq, dev);
8624 enable_irq(bp->pdev->irq);
8625}
8626#endif
8627
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008628static const struct net_device_ops bnx2x_netdev_ops = {
8629 .ndo_open = bnx2x_open,
8630 .ndo_stop = bnx2x_close,
8631 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +00008632 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008633 .ndo_set_mac_address = bnx2x_change_mac_addr,
8634 .ndo_validate_addr = eth_validate_addr,
8635 .ndo_do_ioctl = bnx2x_ioctl,
8636 .ndo_change_mtu = bnx2x_change_mtu,
8637 .ndo_tx_timeout = bnx2x_tx_timeout,
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008638#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008639 .ndo_poll_controller = poll_bnx2x,
8640#endif
8641};
8642
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008643static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8644 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008645{
8646 struct bnx2x *bp;
8647 int rc;
8648
8649 SET_NETDEV_DEV(dev, &pdev->dev);
8650 bp = netdev_priv(dev);
8651
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008652 bp->dev = dev;
8653 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008654 bp->flags = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008655 bp->pf_num = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008656
8657 rc = pci_enable_device(pdev);
8658 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008659 dev_err(&bp->pdev->dev,
8660 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008661 goto err_out;
8662 }
8663
8664 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008665 dev_err(&bp->pdev->dev,
8666 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008667 rc = -ENODEV;
8668 goto err_out_disable;
8669 }
8670
8671 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008672 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8673 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008674 rc = -ENODEV;
8675 goto err_out_disable;
8676 }
8677
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008678 if (atomic_read(&pdev->enable_cnt) == 1) {
8679 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8680 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008681 dev_err(&bp->pdev->dev,
8682 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008683 goto err_out_disable;
8684 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008685
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008686 pci_set_master(pdev);
8687 pci_save_state(pdev);
8688 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008689
8690 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8691 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008692 dev_err(&bp->pdev->dev,
8693 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008694 rc = -EIO;
8695 goto err_out_release;
8696 }
8697
8698 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8699 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008700 dev_err(&bp->pdev->dev,
8701 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008702 rc = -EIO;
8703 goto err_out_release;
8704 }
8705
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008706 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008707 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008708 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008709 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8710 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008711 rc = -EIO;
8712 goto err_out_release;
8713 }
8714
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008715 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008716 dev_err(&bp->pdev->dev,
8717 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008718 rc = -EIO;
8719 goto err_out_release;
8720 }
8721
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008722 dev->mem_start = pci_resource_start(pdev, 0);
8723 dev->base_addr = dev->mem_start;
8724 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008725
8726 dev->irq = pdev->irq;
8727
Arjan van de Ven275f1652008-10-20 21:42:39 -07008728 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008729 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008730 dev_err(&bp->pdev->dev,
8731 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008732 rc = -ENOMEM;
8733 goto err_out_release;
8734 }
8735
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008736 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008737 min_t(u64, BNX2X_DB_SIZE(bp),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008738 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008739 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008740 dev_err(&bp->pdev->dev,
8741 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008742 rc = -ENOMEM;
8743 goto err_out_unmap;
8744 }
8745
8746 bnx2x_set_power_state(bp, PCI_D0);
8747
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008748 /* clean indirect addresses */
8749 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8750 PCICFG_VENDOR_ID_OFFSET);
8751 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8752 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8753 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8754 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008755
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008756 /* Reset the load counter */
8757 bnx2x_clear_load_cnt(bp);
8758
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008759 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008760
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008761 dev->netdev_ops = &bnx2x_netdev_ops;
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008762 bnx2x_set_ethtool_ops(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008763 dev->features |= NETIF_F_SG;
8764 dev->features |= NETIF_F_HW_CSUM;
8765 if (bp->flags & USING_DAC_FLAG)
8766 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +00008767 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8768 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008769 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein5316bc02009-07-21 05:47:43 +00008770
8771 dev->vlan_features |= NETIF_F_SG;
8772 dev->vlan_features |= NETIF_F_HW_CSUM;
8773 if (bp->flags & USING_DAC_FLAG)
8774 dev->vlan_features |= NETIF_F_HIGHDMA;
8775 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8776 dev->vlan_features |= NETIF_F_TSO6;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008777
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008778 /* get_port_hwinfo() will set prtad and mmds properly */
8779 bp->mdio.prtad = MDIO_PRTAD_NONE;
8780 bp->mdio.mmds = 0;
8781 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8782 bp->mdio.dev = dev;
8783 bp->mdio.mdio_read = bnx2x_mdio_read;
8784 bp->mdio.mdio_write = bnx2x_mdio_write;
8785
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008786 return 0;
8787
8788err_out_unmap:
8789 if (bp->regview) {
8790 iounmap(bp->regview);
8791 bp->regview = NULL;
8792 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008793 if (bp->doorbells) {
8794 iounmap(bp->doorbells);
8795 bp->doorbells = NULL;
8796 }
8797
8798err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008799 if (atomic_read(&pdev->enable_cnt) == 1)
8800 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008801
8802err_out_disable:
8803 pci_disable_device(pdev);
8804 pci_set_drvdata(pdev, NULL);
8805
8806err_out:
8807 return rc;
8808}
8809
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008810static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8811 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -08008812{
8813 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8814
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008815 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
8816
8817 /* return value of 1=2.5GHz 2=5GHz */
8818 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -08008819}
8820
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008821static int bnx2x_check_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008822{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008823 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008824 struct bnx2x_fw_file_hdr *fw_hdr;
8825 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008826 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008827 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008828 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008829 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008830
8831 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8832 return -EINVAL;
8833
8834 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8835 sections = (struct bnx2x_fw_file_section *)fw_hdr;
8836
8837 /* Make sure none of the offsets and sizes make us read beyond
8838 * the end of the firmware data */
8839 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8840 offset = be32_to_cpu(sections[i].offset);
8841 len = be32_to_cpu(sections[i].len);
8842 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008843 dev_err(&bp->pdev->dev,
8844 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008845 return -EINVAL;
8846 }
8847 }
8848
8849 /* Likewise for the init_ops offsets */
8850 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8851 ops_offsets = (u16 *)(firmware->data + offset);
8852 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8853
8854 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8855 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008856 dev_err(&bp->pdev->dev,
8857 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008858 return -EINVAL;
8859 }
8860 }
8861
8862 /* Check FW version */
8863 offset = be32_to_cpu(fw_hdr->fw_version.offset);
8864 fw_ver = firmware->data + offset;
8865 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8866 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8867 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8868 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008869 dev_err(&bp->pdev->dev,
8870 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008871 fw_ver[0], fw_ver[1], fw_ver[2],
8872 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8873 BCM_5710_FW_MINOR_VERSION,
8874 BCM_5710_FW_REVISION_VERSION,
8875 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008876 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008877 }
8878
8879 return 0;
8880}
8881
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008882static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008883{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008884 const __be32 *source = (const __be32 *)_source;
8885 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008886 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008887
8888 for (i = 0; i < n/4; i++)
8889 target[i] = be32_to_cpu(source[i]);
8890}
8891
8892/*
8893 Ops array is stored in the following format:
8894 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8895 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008896static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008897{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008898 const __be32 *source = (const __be32 *)_source;
8899 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008900 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008901
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008902 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008903 tmp = be32_to_cpu(source[j]);
8904 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008905 target[i].offset = tmp & 0xffffff;
8906 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008907 }
8908}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008909
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008910/**
8911 * IRO array is stored in the following format:
8912 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8913 */
8914static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8915{
8916 const __be32 *source = (const __be32 *)_source;
8917 struct iro *target = (struct iro *)_target;
8918 u32 i, j, tmp;
8919
8920 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8921 target[i].base = be32_to_cpu(source[j]);
8922 j++;
8923 tmp = be32_to_cpu(source[j]);
8924 target[i].m1 = (tmp >> 16) & 0xffff;
8925 target[i].m2 = tmp & 0xffff;
8926 j++;
8927 tmp = be32_to_cpu(source[j]);
8928 target[i].m3 = (tmp >> 16) & 0xffff;
8929 target[i].size = tmp & 0xffff;
8930 j++;
8931 }
8932}
8933
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008934static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008935{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008936 const __be16 *source = (const __be16 *)_source;
8937 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008938 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008939
8940 for (i = 0; i < n/2; i++)
8941 target[i] = be16_to_cpu(source[i]);
8942}
8943
Joe Perches7995c642010-02-17 15:01:52 +00008944#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8945do { \
8946 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8947 bp->arr = kmalloc(len, GFP_KERNEL); \
8948 if (!bp->arr) { \
8949 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8950 goto lbl; \
8951 } \
8952 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8953 (u8 *)bp->arr, len); \
8954} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008955
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008956int bnx2x_init_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008957{
Ben Hutchings45229b42009-11-07 11:53:39 +00008958 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008959 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +00008960 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008961
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008962 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00008963 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008964 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00008965 fw_file_name = FW_FILE_NAME_E1H;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008966 else if (CHIP_IS_E2(bp))
8967 fw_file_name = FW_FILE_NAME_E2;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008968 else {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008969 BNX2X_ERR("Unsupported chip revision\n");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008970 return -EINVAL;
8971 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008972
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008973 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008974
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008975 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008976 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008977 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008978 goto request_firmware_exit;
8979 }
8980
8981 rc = bnx2x_check_firmware(bp);
8982 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008983 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008984 goto request_firmware_exit;
8985 }
8986
8987 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8988
8989 /* Initialize the pointers to the init arrays */
8990 /* Blob */
8991 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8992
8993 /* Opcodes */
8994 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8995
8996 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008997 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8998 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008999
9000 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +00009001 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9002 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9003 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9004 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9005 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9006 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9007 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9008 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9009 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9010 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9011 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9012 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9013 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9014 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9015 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9016 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009017 /* IRO */
9018 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009019
9020 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009021
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009022iro_alloc_err:
9023 kfree(bp->init_ops_offsets);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009024init_offsets_alloc_err:
9025 kfree(bp->init_ops);
9026init_ops_alloc_err:
9027 kfree(bp->init_data);
9028request_firmware_exit:
9029 release_firmware(bp->firmware);
9030
9031 return rc;
9032}
9033
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009034static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9035{
9036 int cid_count = L2_FP_COUNT(l2_cid_count);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009037
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009038#ifdef BCM_CNIC
9039 cid_count += CNIC_CID_MAX;
9040#endif
9041 return roundup(cid_count, QM_CID_ROUND);
9042}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009043
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009044static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9045 const struct pci_device_id *ent)
9046{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009047 struct net_device *dev = NULL;
9048 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009049 int pcie_width, pcie_speed;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009050 int rc, cid_count;
9051
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009052 switch (ent->driver_data) {
9053 case BCM57710:
9054 case BCM57711:
9055 case BCM57711E:
9056 cid_count = FP_SB_MAX_E1x;
9057 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009058
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009059 case BCM57712:
9060 case BCM57712E:
9061 cid_count = FP_SB_MAX_E2;
9062 break;
9063
9064 default:
9065 pr_err("Unknown board_type (%ld), aborting\n",
9066 ent->driver_data);
9067 return ENODEV;
9068 }
9069
9070 cid_count += CNIC_CONTEXT_USE;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009071
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009072 /* dev zeroed in init_etherdev */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009073 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009074 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009075 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009076 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009077 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009078
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009079 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +00009080 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009081
Eilon Greensteindf4770de2009-08-12 08:23:28 +00009082 pci_set_drvdata(pdev, dev);
9083
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009084 bp->l2_cid_count = cid_count;
9085
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009086 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009087 if (rc < 0) {
9088 free_netdev(dev);
9089 return rc;
9090 }
9091
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009092 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009093 if (rc)
9094 goto init_one_exit;
9095
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009096 /* calc qm_cid_count */
9097 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9098
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009099 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009100 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009101 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009102 goto init_one_exit;
9103 }
9104
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009105 /* Configure interupt mode: try to enable MSI-X/MSI if
9106 * needed, set bp->num_queues appropriately.
9107 */
9108 bnx2x_set_int_mode(bp);
9109
9110 /* Add all NAPI objects */
9111 bnx2x_add_all_napi(bp);
9112
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009113 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009114
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009115 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9116 " IRQ %d, ", board_info[ent->driver_data].name,
9117 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009118 pcie_width,
9119 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9120 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9121 "5GHz (Gen2)" : "2.5GHz",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009122 dev->base_addr, bp->pdev->irq);
9123 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +00009124
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009125 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009126
9127init_one_exit:
9128 if (bp->regview)
9129 iounmap(bp->regview);
9130
9131 if (bp->doorbells)
9132 iounmap(bp->doorbells);
9133
9134 free_netdev(dev);
9135
9136 if (atomic_read(&pdev->enable_cnt) == 1)
9137 pci_release_regions(pdev);
9138
9139 pci_disable_device(pdev);
9140 pci_set_drvdata(pdev, NULL);
9141
9142 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009143}
9144
9145static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9146{
9147 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08009148 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009149
Eliezer Tamir228241e2008-02-28 11:56:57 -08009150 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009151 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08009152 return;
9153 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08009154 bp = netdev_priv(dev);
9155
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009156 unregister_netdev(dev);
9157
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009158 /* Delete all NAPI objects */
9159 bnx2x_del_all_napi(bp);
9160
9161 /* Disable MSI/MSI-X */
9162 bnx2x_disable_msi(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009163
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009164 /* Make sure RESET task is not scheduled before continuing */
9165 cancel_delayed_work_sync(&bp->reset_task);
9166
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009167 if (bp->regview)
9168 iounmap(bp->regview);
9169
9170 if (bp->doorbells)
9171 iounmap(bp->doorbells);
9172
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009173 bnx2x_free_mem_bp(bp);
9174
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009175 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009176
9177 if (atomic_read(&pdev->enable_cnt) == 1)
9178 pci_release_regions(pdev);
9179
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009180 pci_disable_device(pdev);
9181 pci_set_drvdata(pdev, NULL);
9182}
9183
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009184static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9185{
9186 int i;
9187
9188 bp->state = BNX2X_STATE_ERROR;
9189
9190 bp->rx_mode = BNX2X_RX_MODE_NONE;
9191
9192 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07009193 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009194
9195 del_timer_sync(&bp->timer);
9196 bp->stats_state = STATS_STATE_DISABLED;
9197 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9198
9199 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009200 bnx2x_free_irq(bp);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009201
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009202 /* Free SKBs, SGEs, TPA pool and driver internals */
9203 bnx2x_free_skbs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009204
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00009205 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009206 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009207
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009208 bnx2x_free_mem(bp);
9209
9210 bp->state = BNX2X_STATE_CLOSED;
9211
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009212 return 0;
9213}
9214
9215static void bnx2x_eeh_recover(struct bnx2x *bp)
9216{
9217 u32 val;
9218
9219 mutex_init(&bp->port.phy_mutex);
9220
9221 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9222 bp->link_params.shmem_base = bp->common.shmem_base;
9223 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9224
9225 if (!bp->common.shmem_base ||
9226 (bp->common.shmem_base < 0xA0000) ||
9227 (bp->common.shmem_base >= 0xC0000)) {
9228 BNX2X_DEV_INFO("MCP not active\n");
9229 bp->flags |= NO_MCP_FLAG;
9230 return;
9231 }
9232
9233 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9234 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9235 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9236 BNX2X_ERR("BAD MCP validity signature\n");
9237
9238 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009239 bp->fw_seq =
9240 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9241 DRV_MSG_SEQ_NUMBER_MASK);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009242 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9243 }
9244}
9245
Wendy Xiong493adb12008-06-23 20:36:22 -07009246/**
9247 * bnx2x_io_error_detected - called when PCI error is detected
9248 * @pdev: Pointer to PCI device
9249 * @state: The current pci connection state
9250 *
9251 * This function is called after a PCI bus error affecting
9252 * this device has been detected.
9253 */
9254static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9255 pci_channel_state_t state)
9256{
9257 struct net_device *dev = pci_get_drvdata(pdev);
9258 struct bnx2x *bp = netdev_priv(dev);
9259
9260 rtnl_lock();
9261
9262 netif_device_detach(dev);
9263
Dean Nelson07ce50e2009-07-31 09:13:25 +00009264 if (state == pci_channel_io_perm_failure) {
9265 rtnl_unlock();
9266 return PCI_ERS_RESULT_DISCONNECT;
9267 }
9268
Wendy Xiong493adb12008-06-23 20:36:22 -07009269 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009270 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -07009271
9272 pci_disable_device(pdev);
9273
9274 rtnl_unlock();
9275
9276 /* Request a slot reset */
9277 return PCI_ERS_RESULT_NEED_RESET;
9278}
9279
9280/**
9281 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9282 * @pdev: Pointer to PCI device
9283 *
9284 * Restart the card from scratch, as if from a cold-boot.
9285 */
9286static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9287{
9288 struct net_device *dev = pci_get_drvdata(pdev);
9289 struct bnx2x *bp = netdev_priv(dev);
9290
9291 rtnl_lock();
9292
9293 if (pci_enable_device(pdev)) {
9294 dev_err(&pdev->dev,
9295 "Cannot re-enable PCI device after reset\n");
9296 rtnl_unlock();
9297 return PCI_ERS_RESULT_DISCONNECT;
9298 }
9299
9300 pci_set_master(pdev);
9301 pci_restore_state(pdev);
9302
9303 if (netif_running(dev))
9304 bnx2x_set_power_state(bp, PCI_D0);
9305
9306 rtnl_unlock();
9307
9308 return PCI_ERS_RESULT_RECOVERED;
9309}
9310
9311/**
9312 * bnx2x_io_resume - called when traffic can start flowing again
9313 * @pdev: Pointer to PCI device
9314 *
9315 * This callback is called when the error recovery driver tells us that
9316 * its OK to resume normal operation.
9317 */
9318static void bnx2x_io_resume(struct pci_dev *pdev)
9319{
9320 struct net_device *dev = pci_get_drvdata(pdev);
9321 struct bnx2x *bp = netdev_priv(dev);
9322
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009323 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009324 printk(KERN_ERR "Handling parity error recovery. "
9325 "Try again later\n");
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009326 return;
9327 }
9328
Wendy Xiong493adb12008-06-23 20:36:22 -07009329 rtnl_lock();
9330
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009331 bnx2x_eeh_recover(bp);
9332
Wendy Xiong493adb12008-06-23 20:36:22 -07009333 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009334 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -07009335
9336 netif_device_attach(dev);
9337
9338 rtnl_unlock();
9339}
9340
9341static struct pci_error_handlers bnx2x_err_handler = {
9342 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +00009343 .slot_reset = bnx2x_io_slot_reset,
9344 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -07009345};
9346
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009347static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -07009348 .name = DRV_MODULE_NAME,
9349 .id_table = bnx2x_pci_tbl,
9350 .probe = bnx2x_init_one,
9351 .remove = __devexit_p(bnx2x_remove_one),
9352 .suspend = bnx2x_suspend,
9353 .resume = bnx2x_resume,
9354 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009355};
9356
9357static int __init bnx2x_init(void)
9358{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009359 int ret;
9360
Joe Perches7995c642010-02-17 15:01:52 +00009361 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +00009362
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009363 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9364 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +00009365 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009366 return -ENOMEM;
9367 }
9368
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009369 ret = pci_register_driver(&bnx2x_pci_driver);
9370 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +00009371 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009372 destroy_workqueue(bnx2x_wq);
9373 }
9374 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009375}
9376
9377static void __exit bnx2x_cleanup(void)
9378{
9379 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009380
9381 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009382}
9383
9384module_init(bnx2x_init);
9385module_exit(bnx2x_cleanup);
9386
Michael Chan993ac7b2009-10-10 13:46:56 +00009387#ifdef BCM_CNIC
9388
9389/* count denotes the number of new completions we have seen */
9390static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9391{
9392 struct eth_spe *spe;
9393
9394#ifdef BNX2X_STOP_ON_ERROR
9395 if (unlikely(bp->panic))
9396 return;
9397#endif
9398
9399 spin_lock_bh(&bp->spq_lock);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009400 BUG_ON(bp->cnic_spq_pending < count);
Michael Chan993ac7b2009-10-10 13:46:56 +00009401 bp->cnic_spq_pending -= count;
9402
Michael Chan993ac7b2009-10-10 13:46:56 +00009403
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009404 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9405 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9406 & SPE_HDR_CONN_TYPE) >>
9407 SPE_HDR_CONN_TYPE_SHIFT;
9408
9409 /* Set validation for iSCSI L2 client before sending SETUP
9410 * ramrod
9411 */
9412 if (type == ETH_CONNECTION_TYPE) {
9413 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9414 hdr.conn_and_cmd_data) >>
9415 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9416
9417 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9418 bnx2x_set_ctx_validation(&bp->context.
9419 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9420 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9421 }
9422
9423 /* There may be not more than 8 L2 and COMMON SPEs and not more
9424 * than 8 L5 SPEs in the air.
9425 */
9426 if ((type == NONE_CONNECTION_TYPE) ||
9427 (type == ETH_CONNECTION_TYPE)) {
9428 if (!atomic_read(&bp->spq_left))
9429 break;
9430 else
9431 atomic_dec(&bp->spq_left);
9432 } else if (type == ISCSI_CONNECTION_TYPE) {
9433 if (bp->cnic_spq_pending >=
9434 bp->cnic_eth_dev.max_kwqe_pending)
9435 break;
9436 else
9437 bp->cnic_spq_pending++;
9438 } else {
9439 BNX2X_ERR("Unknown SPE type: %d\n", type);
9440 bnx2x_panic();
Michael Chan993ac7b2009-10-10 13:46:56 +00009441 break;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009442 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009443
9444 spe = bnx2x_sp_get_next(bp);
9445 *spe = *bp->cnic_kwq_cons;
9446
Michael Chan993ac7b2009-10-10 13:46:56 +00009447 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9448 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9449
9450 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9451 bp->cnic_kwq_cons = bp->cnic_kwq;
9452 else
9453 bp->cnic_kwq_cons++;
9454 }
9455 bnx2x_sp_prod_update(bp);
9456 spin_unlock_bh(&bp->spq_lock);
9457}
9458
9459static int bnx2x_cnic_sp_queue(struct net_device *dev,
9460 struct kwqe_16 *kwqes[], u32 count)
9461{
9462 struct bnx2x *bp = netdev_priv(dev);
9463 int i;
9464
9465#ifdef BNX2X_STOP_ON_ERROR
9466 if (unlikely(bp->panic))
9467 return -EIO;
9468#endif
9469
9470 spin_lock_bh(&bp->spq_lock);
9471
9472 for (i = 0; i < count; i++) {
9473 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9474
9475 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9476 break;
9477
9478 *bp->cnic_kwq_prod = *spe;
9479
9480 bp->cnic_kwq_pending++;
9481
9482 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9483 spe->hdr.conn_and_cmd_data, spe->hdr.type,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009484 spe->data.update_data_addr.hi,
9485 spe->data.update_data_addr.lo,
Michael Chan993ac7b2009-10-10 13:46:56 +00009486 bp->cnic_kwq_pending);
9487
9488 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9489 bp->cnic_kwq_prod = bp->cnic_kwq;
9490 else
9491 bp->cnic_kwq_prod++;
9492 }
9493
9494 spin_unlock_bh(&bp->spq_lock);
9495
9496 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9497 bnx2x_cnic_sp_post(bp, 0);
9498
9499 return i;
9500}
9501
9502static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9503{
9504 struct cnic_ops *c_ops;
9505 int rc = 0;
9506
9507 mutex_lock(&bp->cnic_mutex);
9508 c_ops = bp->cnic_ops;
9509 if (c_ops)
9510 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9511 mutex_unlock(&bp->cnic_mutex);
9512
9513 return rc;
9514}
9515
9516static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9517{
9518 struct cnic_ops *c_ops;
9519 int rc = 0;
9520
9521 rcu_read_lock();
9522 c_ops = rcu_dereference(bp->cnic_ops);
9523 if (c_ops)
9524 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9525 rcu_read_unlock();
9526
9527 return rc;
9528}
9529
9530/*
9531 * for commands that have no data
9532 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009533int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
Michael Chan993ac7b2009-10-10 13:46:56 +00009534{
9535 struct cnic_ctl_info ctl = {0};
9536
9537 ctl.cmd = cmd;
9538
9539 return bnx2x_cnic_ctl_send(bp, &ctl);
9540}
9541
9542static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9543{
9544 struct cnic_ctl_info ctl;
9545
9546 /* first we tell CNIC and only then we count this as a completion */
9547 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9548 ctl.data.comp.cid = cid;
9549
9550 bnx2x_cnic_ctl_send_bh(bp, &ctl);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009551 bnx2x_cnic_sp_post(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009552}
9553
9554static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9555{
9556 struct bnx2x *bp = netdev_priv(dev);
9557 int rc = 0;
9558
9559 switch (ctl->cmd) {
9560 case DRV_CTL_CTXTBL_WR_CMD: {
9561 u32 index = ctl->data.io.offset;
9562 dma_addr_t addr = ctl->data.io.dma_addr;
9563
9564 bnx2x_ilt_wr(bp, index, addr);
9565 break;
9566 }
9567
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009568 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9569 int count = ctl->data.credit.credit_count;
Michael Chan993ac7b2009-10-10 13:46:56 +00009570
9571 bnx2x_cnic_sp_post(bp, count);
9572 break;
9573 }
9574
9575 /* rtnl_lock is held. */
9576 case DRV_CTL_START_L2_CMD: {
9577 u32 cli = ctl->data.ring.client_id;
9578
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009579 /* Set iSCSI MAC address */
9580 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9581
9582 mmiowb();
9583 barrier();
9584
9585 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9586 * because it's the only way for UIO Client to accept
9587 * multicasts (in non-promiscuous mode only one Client per
9588 * function will receive multicast packets (leading in our
9589 * case).
9590 */
9591 bnx2x_rxq_set_mac_filters(bp, cli,
9592 BNX2X_ACCEPT_UNICAST |
9593 BNX2X_ACCEPT_BROADCAST |
9594 BNX2X_ACCEPT_ALL_MULTICAST);
9595 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9596
Michael Chan993ac7b2009-10-10 13:46:56 +00009597 break;
9598 }
9599
9600 /* rtnl_lock is held. */
9601 case DRV_CTL_STOP_L2_CMD: {
9602 u32 cli = ctl->data.ring.client_id;
9603
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009604 /* Stop accepting on iSCSI L2 ring */
9605 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9606 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9607
9608 mmiowb();
9609 barrier();
9610
9611 /* Unset iSCSI L2 MAC */
9612 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009613 break;
9614 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009615 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9616 int count = ctl->data.credit.credit_count;
9617
9618 smp_mb__before_atomic_inc();
9619 atomic_add(count, &bp->spq_left);
9620 smp_mb__after_atomic_inc();
9621 break;
9622 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009623
9624 default:
9625 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9626 rc = -EINVAL;
9627 }
9628
9629 return rc;
9630}
9631
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009632void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
Michael Chan993ac7b2009-10-10 13:46:56 +00009633{
9634 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9635
9636 if (bp->flags & USING_MSIX_FLAG) {
9637 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9638 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9639 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9640 } else {
9641 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9642 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9643 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009644 if (CHIP_IS_E2(bp))
9645 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9646 else
9647 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9648
Michael Chan993ac7b2009-10-10 13:46:56 +00009649 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009650 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00009651 cp->irq_arr[1].status_blk = bp->def_status_blk;
9652 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009653 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
Michael Chan993ac7b2009-10-10 13:46:56 +00009654
9655 cp->num_irq = 2;
9656}
9657
9658static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9659 void *data)
9660{
9661 struct bnx2x *bp = netdev_priv(dev);
9662 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9663
9664 if (ops == NULL)
9665 return -EINVAL;
9666
9667 if (atomic_read(&bp->intr_sem) != 0)
9668 return -EBUSY;
9669
9670 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9671 if (!bp->cnic_kwq)
9672 return -ENOMEM;
9673
9674 bp->cnic_kwq_cons = bp->cnic_kwq;
9675 bp->cnic_kwq_prod = bp->cnic_kwq;
9676 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9677
9678 bp->cnic_spq_pending = 0;
9679 bp->cnic_kwq_pending = 0;
9680
9681 bp->cnic_data = data;
9682
9683 cp->num_irq = 0;
9684 cp->drv_state = CNIC_DRV_STATE_REGD;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009685 cp->iro_arr = bp->iro_arr;
Michael Chan993ac7b2009-10-10 13:46:56 +00009686
Michael Chan993ac7b2009-10-10 13:46:56 +00009687 bnx2x_setup_cnic_irq_info(bp);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009688
Michael Chan993ac7b2009-10-10 13:46:56 +00009689 rcu_assign_pointer(bp->cnic_ops, ops);
9690
9691 return 0;
9692}
9693
9694static int bnx2x_unregister_cnic(struct net_device *dev)
9695{
9696 struct bnx2x *bp = netdev_priv(dev);
9697 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9698
9699 mutex_lock(&bp->cnic_mutex);
9700 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9701 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9702 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9703 }
9704 cp->drv_state = 0;
9705 rcu_assign_pointer(bp->cnic_ops, NULL);
9706 mutex_unlock(&bp->cnic_mutex);
9707 synchronize_rcu();
9708 kfree(bp->cnic_kwq);
9709 bp->cnic_kwq = NULL;
9710
9711 return 0;
9712}
9713
9714struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9715{
9716 struct bnx2x *bp = netdev_priv(dev);
9717 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9718
9719 cp->drv_owner = THIS_MODULE;
9720 cp->chip_id = CHIP_ID(bp);
9721 cp->pdev = bp->pdev;
9722 cp->io_base = bp->regview;
9723 cp->io_base2 = bp->doorbells;
9724 cp->max_kwqe_pending = 8;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009725 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009726 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9727 bnx2x_cid_ilt_lines(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00009728 cp->ctx_tbl_len = CNIC_ILT_LINES;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009729 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
Michael Chan993ac7b2009-10-10 13:46:56 +00009730 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9731 cp->drv_ctl = bnx2x_drv_ctl;
9732 cp->drv_register_cnic = bnx2x_register_cnic;
9733 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009734 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9735 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
Michael Chan993ac7b2009-10-10 13:46:56 +00009736
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009737 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9738 "starting cid %d\n",
9739 cp->ctx_blk_size,
9740 cp->ctx_tbl_offset,
9741 cp->ctx_tbl_len,
9742 cp->starting_cid);
Michael Chan993ac7b2009-10-10 13:46:56 +00009743 return cp;
9744}
9745EXPORT_SYMBOL(bnx2x_cnic_probe);
9746
9747#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009748