blob: 7a9556b5b55d38ab63e5b99a0ff1ea165d184f3a [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020026#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080040#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020041#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070044#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020045#include <linux/workqueue.h>
46#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070047#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020048#include <linux/prefetch.h>
49#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020050#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000051#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020052
Dmitry Kravkovb0efbb92010-07-27 12:33:43 +000053#define BNX2X_MAIN
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070056#include "bnx2x_init_ops.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000057#include "bnx2x_cmn.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020058
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070059#include <linux/firmware.h>
60#include "bnx2x_fw_file_hdr.h"
61/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000062#define FW_FILE_VERSION \
63 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
Dmitry Kravkov560131f2010-10-06 03:18:47 +000067#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000069#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070070
Eilon Greenstein34f80b02008-06-23 20:33:01 -070071/* Time in jiffies before concluding the transmitter is hung */
72#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020073
Andrew Morton53a10562008-02-09 23:16:41 -080074static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070075 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020076 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070078MODULE_AUTHOR("Eliezer Tamir");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000079MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020081MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000083MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000085MODULE_FIRMWARE(FW_FILE_NAME_E2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020086
Eilon Greenstein555f6c72009-02-12 08:36:11 +000087static int multi_mode = 1;
88module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070089MODULE_PARM_DESC(multi_mode, " Multi queue mode "
90 "(0 Disable; 1 Enable (default))");
91
Dmitry Kravkovd6214d72010-10-06 03:32:10 +000092int num_queues;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000093module_param(num_queues, int, 0);
94MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
95 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000096
Eilon Greenstein19680c42008-08-13 15:47:33 -070097static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070098module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000099MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000100
101static int int_mode;
102module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000103MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
104 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000105
Eilon Greensteina18f5122009-08-12 08:23:26 +0000106static int dropless_fc;
107module_param(dropless_fc, int, 0);
108MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109
Eilon Greenstein9898f862009-02-12 08:38:27 +0000110static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200111module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000112MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000113
114static int mrrs = -1;
115module_param(mrrs, int, 0);
116MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
Eilon Greenstein9898f862009-02-12 08:38:27 +0000118static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200119module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000120MODULE_PARM_DESC(debug, " Default debug msglevel");
121
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800122static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200123
124enum bnx2x_board_type {
125 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700126 BCM57711 = 1,
127 BCM57711E = 2,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000128 BCM57712 = 3,
129 BCM57712E = 4
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200130};
131
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700132/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800133static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200134 char *name;
135} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700136 { "Broadcom NetXtreme II BCM57710 XGb" },
137 { "Broadcom NetXtreme II BCM57711 XGb" },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000138 { "Broadcom NetXtreme II BCM57711E XGb" },
139 { "Broadcom NetXtreme II BCM57712 XGb" },
140 { "Broadcom NetXtreme II BCM57712E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200141};
142
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000143#ifndef PCI_DEVICE_ID_NX2_57712
144#define PCI_DEVICE_ID_NX2_57712 0x1662
145#endif
146#ifndef PCI_DEVICE_ID_NX2_57712E
147#define PCI_DEVICE_ID_NX2_57712E 0x1663
148#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700149
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000150static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200156 { 0 }
157};
158
159MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
160
161/****************************************************************************
162* General service functions
163****************************************************************************/
164
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000165static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166 u32 addr, dma_addr_t mapping)
167{
168 REG_WR(bp, addr, U64_LO(mapping));
169 REG_WR(bp, addr + 4, U64_HI(mapping));
170}
171
172static inline void __storm_memset_fill(struct bnx2x *bp,
173 u32 addr, size_t size, u32 val)
174{
175 int i;
176 for (i = 0; i < size/4; i++)
177 REG_WR(bp, addr + (i * 4), val);
178}
179
180static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181 u8 port, u16 stat_id)
182{
183 size_t size = sizeof(struct ustorm_per_client_stats);
184
185 u32 addr = BAR_USTRORM_INTMEM +
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188 __storm_memset_fill(bp, addr, size, 0);
189}
190
191static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192 u8 port, u16 stat_id)
193{
194 size_t size = sizeof(struct tstorm_per_client_stats);
195
196 u32 addr = BAR_TSTRORM_INTMEM +
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199 __storm_memset_fill(bp, addr, size, 0);
200}
201
202static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203 u8 port, u16 stat_id)
204{
205 size_t size = sizeof(struct xstorm_per_client_stats);
206
207 u32 addr = BAR_XSTRORM_INTMEM +
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210 __storm_memset_fill(bp, addr, size, 0);
211}
212
213
214static inline void storm_memset_spq_addr(struct bnx2x *bp,
215 dma_addr_t mapping, u16 abs_fid)
216{
217 u32 addr = XSEM_REG_FAST_MEMORY +
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220 __storm_memset_dma_mapping(bp, addr, mapping);
221}
222
223static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224{
225 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226}
227
228static inline void storm_memset_func_cfg(struct bnx2x *bp,
229 struct tstorm_eth_function_common_config *tcfg,
230 u16 abs_fid)
231{
232 size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234 u32 addr = BAR_TSTRORM_INTMEM +
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238}
239
240static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241 struct stats_indication_flags *flags,
242 u16 abs_fid)
243{
244 size_t size = sizeof(struct stats_indication_flags);
245
246 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248 __storm_memset_struct(bp, addr, size, (u32 *)flags);
249}
250
251static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252 struct stats_indication_flags *flags,
253 u16 abs_fid)
254{
255 size_t size = sizeof(struct stats_indication_flags);
256
257 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259 __storm_memset_struct(bp, addr, size, (u32 *)flags);
260}
261
262static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263 struct stats_indication_flags *flags,
264 u16 abs_fid)
265{
266 size_t size = sizeof(struct stats_indication_flags);
267
268 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270 __storm_memset_struct(bp, addr, size, (u32 *)flags);
271}
272
273static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274 struct stats_indication_flags *flags,
275 u16 abs_fid)
276{
277 size_t size = sizeof(struct stats_indication_flags);
278
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281 __storm_memset_struct(bp, addr, size, (u32 *)flags);
282}
283
284static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285 dma_addr_t mapping, u16 abs_fid)
286{
287 u32 addr = BAR_XSTRORM_INTMEM +
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290 __storm_memset_dma_mapping(bp, addr, mapping);
291}
292
293static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294 dma_addr_t mapping, u16 abs_fid)
295{
296 u32 addr = BAR_TSTRORM_INTMEM +
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299 __storm_memset_dma_mapping(bp, addr, mapping);
300}
301
302static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303 dma_addr_t mapping, u16 abs_fid)
304{
305 u32 addr = BAR_USTRORM_INTMEM +
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308 __storm_memset_dma_mapping(bp, addr, mapping);
309}
310
311static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312 dma_addr_t mapping, u16 abs_fid)
313{
314 u32 addr = BAR_CSTRORM_INTMEM +
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317 __storm_memset_dma_mapping(bp, addr, mapping);
318}
319
320static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321 u16 pf_id)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330 pf_id);
331}
332
333static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334 u8 enable)
335{
336 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343 enable);
344}
345
346static inline void storm_memset_eq_data(struct bnx2x *bp,
347 struct event_ring_data *eq_data,
348 u16 pfid)
349{
350 size_t size = sizeof(struct event_ring_data);
351
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
353
354 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355}
356
357static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
358 u16 pfid)
359{
360 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361 REG_WR16(bp, addr, eq_prod);
362}
363
364static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365 u16 fw_sb_id, u8 sb_index,
366 u8 ticks)
367{
368
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000369 int index_offset = CHIP_IS_E2(bp) ?
370 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000371 offsetof(struct hc_status_block_data_e1x, index_data);
372 u32 addr = BAR_CSTRORM_INTMEM +
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
374 index_offset +
375 sizeof(struct hc_index_data)*sb_index +
376 offsetof(struct hc_index_data, timeout);
377 REG_WR8(bp, addr, ticks);
378 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port, fw_sb_id, sb_index, ticks);
380}
381static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382 u16 fw_sb_id, u8 sb_index,
383 u8 disable)
384{
385 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000386 int index_offset = CHIP_IS_E2(bp) ?
387 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000388 offsetof(struct hc_status_block_data_e1x, index_data);
389 u32 addr = BAR_CSTRORM_INTMEM +
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
391 index_offset +
392 sizeof(struct hc_index_data)*sb_index +
393 offsetof(struct hc_index_data, flags);
394 u16 flags = REG_RD16(bp, addr);
395 /* clear and set */
396 flags &= ~HC_INDEX_DATA_HC_ENABLED;
397 flags |= enable_flag;
398 REG_WR16(bp, addr, flags);
399 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port, fw_sb_id, sb_index, disable);
401}
402
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200403/* used only at init
404 * locking is done by mcp
405 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000406void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200407{
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411 PCICFG_VENDOR_ID_OFFSET);
412}
413
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200414static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
415{
416 u32 val;
417
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421 PCICFG_VENDOR_ID_OFFSET);
422
423 return val;
424}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200425
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000426#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430#define DMAE_DP_DST_NONE "dst_addr [none]"
431
432void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
433{
434 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
435
436 switch (dmae->opcode & DMAE_COMMAND_DST) {
437 case DMAE_CMD_DST_PCI:
438 if (src_type == DMAE_CMD_SRC_PCI)
439 DP(msglvl, "DMAE: opcode 0x%08x\n"
440 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
441 "comp_addr [%x:%08x], comp_val 0x%08x\n",
442 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
443 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
444 dmae->comp_addr_hi, dmae->comp_addr_lo,
445 dmae->comp_val);
446 else
447 DP(msglvl, "DMAE: opcode 0x%08x\n"
448 "src [%08x], len [%d*4], dst [%x:%08x]\n"
449 "comp_addr [%x:%08x], comp_val 0x%08x\n",
450 dmae->opcode, dmae->src_addr_lo >> 2,
451 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
452 dmae->comp_addr_hi, dmae->comp_addr_lo,
453 dmae->comp_val);
454 break;
455 case DMAE_CMD_DST_GRC:
456 if (src_type == DMAE_CMD_SRC_PCI)
457 DP(msglvl, "DMAE: opcode 0x%08x\n"
458 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
459 "comp_addr [%x:%08x], comp_val 0x%08x\n",
460 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
461 dmae->len, dmae->dst_addr_lo >> 2,
462 dmae->comp_addr_hi, dmae->comp_addr_lo,
463 dmae->comp_val);
464 else
465 DP(msglvl, "DMAE: opcode 0x%08x\n"
466 "src [%08x], len [%d*4], dst [%08x]\n"
467 "comp_addr [%x:%08x], comp_val 0x%08x\n",
468 dmae->opcode, dmae->src_addr_lo >> 2,
469 dmae->len, dmae->dst_addr_lo >> 2,
470 dmae->comp_addr_hi, dmae->comp_addr_lo,
471 dmae->comp_val);
472 break;
473 default:
474 if (src_type == DMAE_CMD_SRC_PCI)
475 DP(msglvl, "DMAE: opcode 0x%08x\n"
476 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
477 "dst_addr [none]\n"
478 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
479 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
480 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
481 dmae->comp_val);
482 else
483 DP(msglvl, "DMAE: opcode 0x%08x\n"
484 DP_LEVEL "src_addr [%08x] len [%d * 4] "
485 "dst_addr [none]\n"
486 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
487 dmae->opcode, dmae->src_addr_lo >> 2,
488 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
489 dmae->comp_val);
490 break;
491 }
492
493}
494
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000495const u32 dmae_reg_go_c[] = {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200496 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
497 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
498 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
499 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
500};
501
502/* copy command into DMAE command memory and set DMAE command go */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000503void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200504{
505 u32 cmd_offset;
506 int i;
507
508 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
509 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
510 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
511
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700512 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
513 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200514 }
515 REG_WR(bp, dmae_reg_go_c[idx], 1);
516}
517
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000518u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
519{
520 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
521 DMAE_CMD_C_ENABLE);
522}
523
524u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
525{
526 return opcode & ~DMAE_CMD_SRC_RESET;
527}
528
529u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
530 bool with_comp, u8 comp_type)
531{
532 u32 opcode = 0;
533
534 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
535 (dst_type << DMAE_COMMAND_DST_SHIFT));
536
537 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
538
539 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
540 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
541 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
542 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
543
544#ifdef __BIG_ENDIAN
545 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
546#else
547 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
548#endif
549 if (with_comp)
550 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
551 return opcode;
552}
553
554void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
555 u8 src_type, u8 dst_type)
556{
557 memset(dmae, 0, sizeof(struct dmae_command));
558
559 /* set the opcode */
560 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
561 true, DMAE_COMP_PCI);
562
563 /* fill in the completion parameters */
564 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
565 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
566 dmae->comp_val = DMAE_COMP_VAL;
567}
568
569/* issue a dmae command over the init-channel and wailt for completion */
570int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
571{
572 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
573 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
574 int rc = 0;
575
576 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
577 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
579
580 /* lock the dmae channel */
581 mutex_lock(&bp->dmae_mutex);
582
583 /* reset completion */
584 *wb_comp = 0;
585
586 /* post the command on the channel used for initializations */
587 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
588
589 /* wait for completion */
590 udelay(5);
591 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
592 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
593
594 if (!cnt) {
595 BNX2X_ERR("DMAE timeout!\n");
596 rc = DMAE_TIMEOUT;
597 goto unlock;
598 }
599 cnt--;
600 udelay(50);
601 }
602 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
603 BNX2X_ERR("DMAE PCI error!\n");
604 rc = DMAE_PCI_ERROR;
605 }
606
607 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
608 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
609 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
610
611unlock:
612 mutex_unlock(&bp->dmae_mutex);
613 return rc;
614}
615
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700616void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
617 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200618{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000619 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700620
621 if (!bp->dmae_ready) {
622 u32 *data = bnx2x_sp(bp, wb_data[0]);
623
624 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
625 " using indirect\n", dst_addr, len32);
626 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
627 return;
628 }
629
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000630 /* set opcode and fixed command fields */
631 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200632
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000633 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000634 dmae.src_addr_lo = U64_LO(dma_addr);
635 dmae.src_addr_hi = U64_HI(dma_addr);
636 dmae.dst_addr_lo = dst_addr >> 2;
637 dmae.dst_addr_hi = 0;
638 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200639
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000640 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200641
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000642 /* issue the command and wait for completion */
643 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200644}
645
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700646void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000648 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700649
650 if (!bp->dmae_ready) {
651 u32 *data = bnx2x_sp(bp, wb_data[0]);
652 int i;
653
654 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
655 " using indirect\n", src_addr, len32);
656 for (i = 0; i < len32; i++)
657 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
658 return;
659 }
660
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000661 /* set opcode and fixed command fields */
662 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200663
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000664 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000665 dmae.src_addr_lo = src_addr >> 2;
666 dmae.src_addr_hi = 0;
667 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
668 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
669 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200670
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000671 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200672
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000673 /* issue the command and wait for completion */
674 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200675}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200676
Eilon Greenstein573f2032009-08-12 08:24:14 +0000677void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
678 u32 addr, u32 len)
679{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000680 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000681 int offset = 0;
682
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000683 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000684 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000685 addr + offset, dmae_wr_max);
686 offset += dmae_wr_max * 4;
687 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000688 }
689
690 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
691}
692
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700693/* used only for slowpath so not inlined */
694static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
695{
696 u32 wb_write[2];
697
698 wb_write[0] = val_hi;
699 wb_write[1] = val_lo;
700 REG_WR_DMAE(bp, reg, wb_write, 2);
701}
702
703#ifdef USE_WB_RD
704static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
705{
706 u32 wb_data[2];
707
708 REG_RD_DMAE(bp, reg, wb_data, 2);
709
710 return HILO_U64(wb_data[0], wb_data[1]);
711}
712#endif
713
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200714static int bnx2x_mc_assert(struct bnx2x *bp)
715{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200716 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700717 int i, rc = 0;
718 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200719
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700720 /* XSTORM */
721 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
722 XSTORM_ASSERT_LIST_INDEX_OFFSET);
723 if (last_idx)
724 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200725
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700726 /* print the asserts */
727 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200728
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700729 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
730 XSTORM_ASSERT_LIST_OFFSET(i));
731 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
732 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
733 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
734 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
735 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
736 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200737
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700738 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
739 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
740 " 0x%08x 0x%08x 0x%08x\n",
741 i, row3, row2, row1, row0);
742 rc++;
743 } else {
744 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200745 }
746 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700747
748 /* TSTORM */
749 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
750 TSTORM_ASSERT_LIST_INDEX_OFFSET);
751 if (last_idx)
752 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
753
754 /* print the asserts */
755 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
756
757 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
758 TSTORM_ASSERT_LIST_OFFSET(i));
759 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
760 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
761 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
762 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
763 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
764 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
765
766 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
767 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
768 " 0x%08x 0x%08x 0x%08x\n",
769 i, row3, row2, row1, row0);
770 rc++;
771 } else {
772 break;
773 }
774 }
775
776 /* CSTORM */
777 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
778 CSTORM_ASSERT_LIST_INDEX_OFFSET);
779 if (last_idx)
780 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
781
782 /* print the asserts */
783 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
784
785 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
786 CSTORM_ASSERT_LIST_OFFSET(i));
787 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
788 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
789 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
790 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
791 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
792 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
793
794 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
795 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
796 " 0x%08x 0x%08x 0x%08x\n",
797 i, row3, row2, row1, row0);
798 rc++;
799 } else {
800 break;
801 }
802 }
803
804 /* USTORM */
805 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
806 USTORM_ASSERT_LIST_INDEX_OFFSET);
807 if (last_idx)
808 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
809
810 /* print the asserts */
811 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
812
813 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
814 USTORM_ASSERT_LIST_OFFSET(i));
815 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
816 USTORM_ASSERT_LIST_OFFSET(i) + 4);
817 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
818 USTORM_ASSERT_LIST_OFFSET(i) + 8);
819 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
820 USTORM_ASSERT_LIST_OFFSET(i) + 12);
821
822 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
823 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
824 " 0x%08x 0x%08x 0x%08x\n",
825 i, row3, row2, row1, row0);
826 rc++;
827 } else {
828 break;
829 }
830 }
831
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200832 return rc;
833}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800834
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200835static void bnx2x_fw_dump(struct bnx2x *bp)
836{
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000837 u32 addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200838 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000839 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200840 int word;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000841 u32 trace_shmem_base;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000842 if (BP_NOMCP(bp)) {
843 BNX2X_ERR("NO MCP - can not dump\n");
844 return;
845 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000846
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000847 if (BP_PATH(bp) == 0)
848 trace_shmem_base = bp->common.shmem_base;
849 else
850 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
851 addr = trace_shmem_base - 0x0800 + 4;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000852 mark = REG_RD(bp, addr);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000853 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
854 + ((mark + 0x3) & ~0x3) - 0x08000000;
Joe Perches7995c642010-02-17 15:01:52 +0000855 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200856
Joe Perches7995c642010-02-17 15:01:52 +0000857 pr_err("");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000858 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200859 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000860 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200861 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000862 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200863 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000864 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200865 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000866 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200867 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000868 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200869 }
Joe Perches7995c642010-02-17 15:01:52 +0000870 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200871}
872
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000873void bnx2x_panic_dump(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200874{
875 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000876 u16 j;
877 struct hc_sp_status_block_data sp_sb_data;
878 int func = BP_FUNC(bp);
879#ifdef BNX2X_STOP_ON_ERROR
880 u16 start = 0, end = 0;
881#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200882
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700883 bp->stats_state = STATS_STATE_DISABLED;
884 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
885
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200886 BNX2X_ERR("begin crash dump -----------------\n");
887
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000888 /* Indices */
889 /* Common */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000890 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000891 " spq_prod_idx(0x%x)\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000892 bp->def_idx, bp->def_att_idx,
893 bp->attn_state, bp->spq_prod_idx);
894 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
895 bp->def_status_blk->atten_status_block.attn_bits,
896 bp->def_status_blk->atten_status_block.attn_bits_ack,
897 bp->def_status_blk->atten_status_block.status_block_id,
898 bp->def_status_blk->atten_status_block.attn_bits_index);
899 BNX2X_ERR(" def (");
900 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
901 pr_cont("0x%x%s",
902 bp->def_status_blk->sp_sb.index_values[i],
903 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000904
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000905 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
906 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
907 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
908 i*sizeof(u32));
909
910 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
911 "pf_id(0x%x) vnic_id(0x%x) "
912 "vf_id(0x%x) vf_valid (0x%x)\n",
913 sp_sb_data.igu_sb_id,
914 sp_sb_data.igu_seg_id,
915 sp_sb_data.p_func.pf_id,
916 sp_sb_data.p_func.vnic_id,
917 sp_sb_data.p_func.vf_id,
918 sp_sb_data.p_func.vf_valid);
919
920
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000921 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000922 struct bnx2x_fastpath *fp = &bp->fp[i];
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000923 int loop;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000924 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000925 struct hc_status_block_data_e1x sb_data_e1x;
926 struct hc_status_block_sm *hc_sm_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000927 CHIP_IS_E2(bp) ?
928 sb_data_e2.common.state_machine :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000929 sb_data_e1x.common.state_machine;
930 struct hc_index_data *hc_index_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000931 CHIP_IS_E2(bp) ?
932 sb_data_e2.index_data :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000933 sb_data_e1x.index_data;
934 int data_size;
935 u32 *sb_data_p;
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000936
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000937 /* Rx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000938 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000939 " rx_comp_prod(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000940 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000941 i, fp->rx_bd_prod, fp->rx_bd_cons,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000942 fp->rx_comp_prod,
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000943 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000944 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000945 " fp_hc_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000946 fp->rx_sge_prod, fp->last_max_sge,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000947 le16_to_cpu(fp->fp_hc_idx));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000948
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000949 /* Tx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000950 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
951 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
952 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200953 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700954 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000955
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000956 loop = CHIP_IS_E2(bp) ?
957 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000958
959 /* host sb data */
960
961 BNX2X_ERR(" run indexes (");
962 for (j = 0; j < HC_SB_MAX_SM; j++)
963 pr_cont("0x%x%s",
964 fp->sb_running_index[j],
965 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
966
967 BNX2X_ERR(" indexes (");
968 for (j = 0; j < loop; j++)
969 pr_cont("0x%x%s",
970 fp->sb_index_values[j],
971 (j == loop - 1) ? ")" : " ");
972 /* fw sb data */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000973 data_size = CHIP_IS_E2(bp) ?
974 sizeof(struct hc_status_block_data_e2) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000975 sizeof(struct hc_status_block_data_e1x);
976 data_size /= sizeof(u32);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000977 sb_data_p = CHIP_IS_E2(bp) ?
978 (u32 *)&sb_data_e2 :
979 (u32 *)&sb_data_e1x;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000980 /* copy sb data in here */
981 for (j = 0; j < data_size; j++)
982 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
983 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
984 j * sizeof(u32));
985
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000986 if (CHIP_IS_E2(bp)) {
987 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
988 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
989 sb_data_e2.common.p_func.pf_id,
990 sb_data_e2.common.p_func.vf_id,
991 sb_data_e2.common.p_func.vf_valid,
992 sb_data_e2.common.p_func.vnic_id,
993 sb_data_e2.common.same_igu_sb_1b);
994 } else {
995 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
996 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
997 sb_data_e1x.common.p_func.pf_id,
998 sb_data_e1x.common.p_func.vf_id,
999 sb_data_e1x.common.p_func.vf_valid,
1000 sb_data_e1x.common.p_func.vnic_id,
1001 sb_data_e1x.common.same_igu_sb_1b);
1002 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001003
1004 /* SB_SMs data */
1005 for (j = 0; j < HC_SB_MAX_SM; j++) {
1006 pr_cont("SM[%d] __flags (0x%x) "
1007 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1008 "time_to_expire (0x%x) "
1009 "timer_value(0x%x)\n", j,
1010 hc_sm_p[j].__flags,
1011 hc_sm_p[j].igu_sb_id,
1012 hc_sm_p[j].igu_seg_id,
1013 hc_sm_p[j].time_to_expire,
1014 hc_sm_p[j].timer_value);
1015 }
1016
1017 /* Indecies data */
1018 for (j = 0; j < loop; j++) {
1019 pr_cont("INDEX[%d] flags (0x%x) "
1020 "timeout (0x%x)\n", j,
1021 hc_index_p[j].flags,
1022 hc_index_p[j].timeout);
1023 }
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001024 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001025
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001026#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001027 /* Rings */
1028 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001029 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001030 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001031
1032 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1033 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001034 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001035 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1036 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1037
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001038 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1039 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001040 }
1041
Eilon Greenstein3196a882008-08-13 15:58:49 -07001042 start = RX_SGE(fp->rx_sge_prod);
1043 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001044 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001045 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1046 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1047
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001048 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1049 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001050 }
1051
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001052 start = RCQ_BD(fp->rx_comp_cons - 10);
1053 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001054 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001055 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1056
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001057 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1058 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001059 }
1060 }
1061
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001062 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001063 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001064 struct bnx2x_fastpath *fp = &bp->fp[i];
1065
1066 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1067 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1068 for (j = start; j != end; j = TX_BD(j + 1)) {
1069 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1070
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001071 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1072 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001073 }
1074
1075 start = TX_BD(fp->tx_bd_cons - 10);
1076 end = TX_BD(fp->tx_bd_cons + 254);
1077 for (j = start; j != end; j = TX_BD(j + 1)) {
1078 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1079
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001080 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1081 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001082 }
1083 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001084#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001085 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001086 bnx2x_mc_assert(bp);
1087 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001088}
1089
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001090static void bnx2x_hc_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001091{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001092 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001093 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1094 u32 val = REG_RD(bp, addr);
1095 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001096 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001097
1098 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001099 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1100 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001101 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1102 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +00001103 } else if (msi) {
1104 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1105 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1106 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1107 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001108 } else {
1109 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001110 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001111 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1112 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001113
Eilon Greenstein8badd272009-02-12 08:36:15 +00001114 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1115 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001116
1117 REG_WR(bp, addr, val);
1118
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001119 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1120 }
1121
Eilon Greenstein8badd272009-02-12 08:36:15 +00001122 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1123 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001124
1125 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001126 /*
1127 * Ensure that HC_CONFIG is written before leading/trailing edge config
1128 */
1129 mmiowb();
1130 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001131
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001132 if (!CHIP_IS_E1(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001133 /* init leading/trailing edge */
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001134 if (IS_MF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001135 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001136 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001137 /* enable nig and gpio3 attention */
1138 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001139 } else
1140 val = 0xffff;
1141
1142 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1143 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1144 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001145
1146 /* Make sure that interrupts are indeed enabled from here on */
1147 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001148}
1149
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001150static void bnx2x_igu_int_enable(struct bnx2x *bp)
1151{
1152 u32 val;
1153 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1154 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1155
1156 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1157
1158 if (msix) {
1159 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1160 IGU_PF_CONF_SINGLE_ISR_EN);
1161 val |= (IGU_PF_CONF_FUNC_EN |
1162 IGU_PF_CONF_MSI_MSIX_EN |
1163 IGU_PF_CONF_ATTN_BIT_EN);
1164 } else if (msi) {
1165 val &= ~IGU_PF_CONF_INT_LINE_EN;
1166 val |= (IGU_PF_CONF_FUNC_EN |
1167 IGU_PF_CONF_MSI_MSIX_EN |
1168 IGU_PF_CONF_ATTN_BIT_EN |
1169 IGU_PF_CONF_SINGLE_ISR_EN);
1170 } else {
1171 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1172 val |= (IGU_PF_CONF_FUNC_EN |
1173 IGU_PF_CONF_INT_LINE_EN |
1174 IGU_PF_CONF_ATTN_BIT_EN |
1175 IGU_PF_CONF_SINGLE_ISR_EN);
1176 }
1177
1178 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1179 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1180
1181 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1182
1183 barrier();
1184
1185 /* init leading/trailing edge */
1186 if (IS_MF(bp)) {
1187 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1188 if (bp->port.pmf)
1189 /* enable nig and gpio3 attention */
1190 val |= 0x1100;
1191 } else
1192 val = 0xffff;
1193
1194 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1195 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1196
1197 /* Make sure that interrupts are indeed enabled from here on */
1198 mmiowb();
1199}
1200
1201void bnx2x_int_enable(struct bnx2x *bp)
1202{
1203 if (bp->common.int_block == INT_BLOCK_HC)
1204 bnx2x_hc_int_enable(bp);
1205 else
1206 bnx2x_igu_int_enable(bp);
1207}
1208
1209static void bnx2x_hc_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001210{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001211 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001212 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1213 u32 val = REG_RD(bp, addr);
1214
1215 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1216 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1217 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1218 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1219
1220 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1221 val, port, addr);
1222
Eilon Greenstein8badd272009-02-12 08:36:15 +00001223 /* flush all outstanding writes */
1224 mmiowb();
1225
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001226 REG_WR(bp, addr, val);
1227 if (REG_RD(bp, addr) != val)
1228 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1229}
1230
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001231static void bnx2x_igu_int_disable(struct bnx2x *bp)
1232{
1233 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1234
1235 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1236 IGU_PF_CONF_INT_LINE_EN |
1237 IGU_PF_CONF_ATTN_BIT_EN);
1238
1239 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1240
1241 /* flush all outstanding writes */
1242 mmiowb();
1243
1244 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1245 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1246 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1247}
1248
1249void bnx2x_int_disable(struct bnx2x *bp)
1250{
1251 if (bp->common.int_block == INT_BLOCK_HC)
1252 bnx2x_hc_int_disable(bp);
1253 else
1254 bnx2x_igu_int_disable(bp);
1255}
1256
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001257void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001258{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001259 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001260 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001261
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001262 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001263 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +00001264 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1265
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07001266 if (disable_hw)
1267 /* prevent the HW from sending interrupts */
1268 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001269
1270 /* make sure all ISRs are done */
1271 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001272 synchronize_irq(bp->msix_table[0].vector);
1273 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +00001274#ifdef BCM_CNIC
1275 offset++;
1276#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001277 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +00001278 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001279 } else
1280 synchronize_irq(bp->pdev->irq);
1281
1282 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001283 cancel_delayed_work(&bp->sp_task);
1284 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001285}
1286
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001287/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001288
1289/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001290 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001291 */
1292
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001293/* Return true if succeeded to acquire the lock */
1294static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1295{
1296 u32 lock_status;
1297 u32 resource_bit = (1 << resource);
1298 int func = BP_FUNC(bp);
1299 u32 hw_lock_control_reg;
1300
1301 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1302
1303 /* Validating that the resource is within range */
1304 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1305 DP(NETIF_MSG_HW,
1306 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1307 resource, HW_LOCK_MAX_RESOURCE_VALUE);
Eric Dumazet0fdf4d02010-08-26 22:03:53 -07001308 return false;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001309 }
1310
1311 if (func <= 5)
1312 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1313 else
1314 hw_lock_control_reg =
1315 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1316
1317 /* Try to acquire the lock */
1318 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1319 lock_status = REG_RD(bp, hw_lock_control_reg);
1320 if (lock_status & resource_bit)
1321 return true;
1322
1323 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1324 return false;
1325}
1326
Michael Chan993ac7b2009-10-10 13:46:56 +00001327#ifdef BCM_CNIC
1328static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1329#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001330
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001331void bnx2x_sp_event(struct bnx2x_fastpath *fp,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001332 union eth_rx_cqe *rr_cqe)
1333{
1334 struct bnx2x *bp = fp->bp;
1335 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1336 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1337
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001338 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001339 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001340 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001341 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001342
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001343 switch (command | fp->state) {
1344 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1345 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1346 fp->state = BNX2X_FP_STATE_OPEN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001347 break;
1348
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001349 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1350 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001351 fp->state = BNX2X_FP_STATE_HALTED;
1352 break;
1353
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001354 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1355 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1356 fp->state = BNX2X_FP_STATE_TERMINATED;
Eliezer Tamir49d66772008-02-28 11:53:13 -08001357 break;
1358
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001359 default:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001360 BNX2X_ERR("unexpected MC reply (%d) "
1361 "fp[%d] state is %x\n",
1362 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001363 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001364 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001365
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00001366 smp_mb__before_atomic_inc();
1367 atomic_inc(&bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001368 /* push the change in fp->state and towards the memory */
1369 smp_wmb();
1370
1371 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001372}
1373
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001374irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001375{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001376 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001377 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001378 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001379 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001380
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001381 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001382 if (unlikely(status == 0)) {
1383 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1384 return IRQ_NONE;
1385 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001386 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001387
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001388 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001389 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1390 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1391 return IRQ_HANDLED;
1392 }
1393
Eilon Greenstein3196a882008-08-13 15:58:49 -07001394#ifdef BNX2X_STOP_ON_ERROR
1395 if (unlikely(bp->panic))
1396 return IRQ_HANDLED;
1397#endif
1398
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001399 for_each_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07001400 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001401
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001402 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
Eilon Greensteinca003922009-08-12 22:53:28 -07001403 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001404 /* Handle Rx and Tx according to SB id */
1405 prefetch(fp->rx_cons_sb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001406 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001407 prefetch(&fp->sb_running_index[SM_RX_ID]);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001408 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001409 status &= ~mask;
1410 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001411 }
1412
Michael Chan993ac7b2009-10-10 13:46:56 +00001413#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001414 mask = 0x2;
Michael Chan993ac7b2009-10-10 13:46:56 +00001415 if (status & (mask | 0x1)) {
1416 struct cnic_ops *c_ops = NULL;
1417
1418 rcu_read_lock();
1419 c_ops = rcu_dereference(bp->cnic_ops);
1420 if (c_ops)
1421 c_ops->cnic_handler(bp->cnic_data, NULL);
1422 rcu_read_unlock();
1423
1424 status &= ~mask;
1425 }
1426#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001427
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001428 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001429 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001430
1431 status &= ~0x1;
1432 if (!status)
1433 return IRQ_HANDLED;
1434 }
1435
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001436 if (unlikely(status))
1437 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001438 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001439
1440 return IRQ_HANDLED;
1441}
1442
1443/* end of fast path */
1444
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001445
1446/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001447
1448/*
1449 * General service functions
1450 */
1451
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001452int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001453{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001454 u32 lock_status;
1455 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001456 int func = BP_FUNC(bp);
1457 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001458 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001459
1460 /* Validating that the resource is within range */
1461 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1462 DP(NETIF_MSG_HW,
1463 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1464 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1465 return -EINVAL;
1466 }
1467
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001468 if (func <= 5) {
1469 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1470 } else {
1471 hw_lock_control_reg =
1472 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1473 }
1474
Eliezer Tamirf1410642008-02-28 11:51:50 -08001475 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001476 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001477 if (lock_status & resource_bit) {
1478 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1479 lock_status, resource_bit);
1480 return -EEXIST;
1481 }
1482
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001483 /* Try for 5 second every 5ms */
1484 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001485 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001486 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1487 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001488 if (lock_status & resource_bit)
1489 return 0;
1490
1491 msleep(5);
1492 }
1493 DP(NETIF_MSG_HW, "Timeout\n");
1494 return -EAGAIN;
1495}
1496
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001497int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001498{
1499 u32 lock_status;
1500 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001501 int func = BP_FUNC(bp);
1502 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001503
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001504 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1505
Eliezer Tamirf1410642008-02-28 11:51:50 -08001506 /* Validating that the resource is within range */
1507 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1508 DP(NETIF_MSG_HW,
1509 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1510 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1511 return -EINVAL;
1512 }
1513
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001514 if (func <= 5) {
1515 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1516 } else {
1517 hw_lock_control_reg =
1518 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1519 }
1520
Eliezer Tamirf1410642008-02-28 11:51:50 -08001521 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001522 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001523 if (!(lock_status & resource_bit)) {
1524 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1525 lock_status, resource_bit);
1526 return -EFAULT;
1527 }
1528
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001529 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001530 return 0;
1531}
1532
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001533
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001534int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1535{
1536 /* The GPIO should be swapped if swap register is set and active */
1537 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1538 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1539 int gpio_shift = gpio_num +
1540 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1541 u32 gpio_mask = (1 << gpio_shift);
1542 u32 gpio_reg;
1543 int value;
1544
1545 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1546 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1547 return -EINVAL;
1548 }
1549
1550 /* read GPIO value */
1551 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1552
1553 /* get the requested pin value */
1554 if ((gpio_reg & gpio_mask) == gpio_mask)
1555 value = 1;
1556 else
1557 value = 0;
1558
1559 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1560
1561 return value;
1562}
1563
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001564int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001565{
1566 /* The GPIO should be swapped if swap register is set and active */
1567 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001568 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001569 int gpio_shift = gpio_num +
1570 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1571 u32 gpio_mask = (1 << gpio_shift);
1572 u32 gpio_reg;
1573
1574 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1575 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1576 return -EINVAL;
1577 }
1578
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001579 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001580 /* read GPIO and mask except the float bits */
1581 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1582
1583 switch (mode) {
1584 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1585 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1586 gpio_num, gpio_shift);
1587 /* clear FLOAT and set CLR */
1588 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1589 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1590 break;
1591
1592 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1593 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1594 gpio_num, gpio_shift);
1595 /* clear FLOAT and set SET */
1596 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1597 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1598 break;
1599
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001600 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001601 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1602 gpio_num, gpio_shift);
1603 /* set FLOAT */
1604 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1605 break;
1606
1607 default:
1608 break;
1609 }
1610
1611 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001612 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001613
1614 return 0;
1615}
1616
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001617int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1618{
1619 /* The GPIO should be swapped if swap register is set and active */
1620 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1621 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1622 int gpio_shift = gpio_num +
1623 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1624 u32 gpio_mask = (1 << gpio_shift);
1625 u32 gpio_reg;
1626
1627 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1628 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1629 return -EINVAL;
1630 }
1631
1632 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1633 /* read GPIO int */
1634 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1635
1636 switch (mode) {
1637 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1638 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1639 "output low\n", gpio_num, gpio_shift);
1640 /* clear SET and set CLR */
1641 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1642 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1643 break;
1644
1645 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1646 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1647 "output high\n", gpio_num, gpio_shift);
1648 /* clear CLR and set SET */
1649 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1650 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1651 break;
1652
1653 default:
1654 break;
1655 }
1656
1657 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1658 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1659
1660 return 0;
1661}
1662
Eliezer Tamirf1410642008-02-28 11:51:50 -08001663static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1664{
1665 u32 spio_mask = (1 << spio_num);
1666 u32 spio_reg;
1667
1668 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1669 (spio_num > MISC_REGISTERS_SPIO_7)) {
1670 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1671 return -EINVAL;
1672 }
1673
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001674 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001675 /* read SPIO and mask except the float bits */
1676 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1677
1678 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001679 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001680 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1681 /* clear FLOAT and set CLR */
1682 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1683 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1684 break;
1685
Eilon Greenstein6378c022008-08-13 15:59:25 -07001686 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001687 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1688 /* clear FLOAT and set SET */
1689 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1690 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1691 break;
1692
1693 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1694 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1695 /* set FLOAT */
1696 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1697 break;
1698
1699 default:
1700 break;
1701 }
1702
1703 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001704 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001705
1706 return 0;
1707}
1708
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001709int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1710{
1711 u32 sel_phy_idx = 0;
1712 if (bp->link_vars.link_up) {
1713 sel_phy_idx = EXT_PHY1;
1714 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1715 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1716 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1717 sel_phy_idx = EXT_PHY2;
1718 } else {
1719
1720 switch (bnx2x_phy_selection(&bp->link_params)) {
1721 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1722 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1723 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1724 sel_phy_idx = EXT_PHY1;
1725 break;
1726 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1727 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1728 sel_phy_idx = EXT_PHY2;
1729 break;
1730 }
1731 }
1732 /*
1733 * The selected actived PHY is always after swapping (in case PHY
1734 * swapping is enabled). So when swapping is enabled, we need to reverse
1735 * the configuration
1736 */
1737
1738 if (bp->link_params.multi_phy_config &
1739 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1740 if (sel_phy_idx == EXT_PHY1)
1741 sel_phy_idx = EXT_PHY2;
1742 else if (sel_phy_idx == EXT_PHY2)
1743 sel_phy_idx = EXT_PHY1;
1744 }
1745 return LINK_CONFIG_IDX(sel_phy_idx);
1746}
1747
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001748void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001749{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001750 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
Eilon Greensteinad33ea32009-01-14 21:24:57 -08001751 switch (bp->link_vars.ieee_fc &
1752 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001753 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001754 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001755 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001756 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001757
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001758 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001759 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001760 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001761 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001762
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001763 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001764 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001765 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001766
Eliezer Tamirf1410642008-02-28 11:51:50 -08001767 default:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001768 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001769 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001770 break;
1771 }
1772}
1773
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001774u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001775{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001776 if (!BP_NOMCP(bp)) {
1777 u8 rc;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001778 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1779 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
Eilon Greenstein19680c42008-08-13 15:47:33 -07001780 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001781 /* It is recommended to turn off RX FC for jumbo frames
1782 for better performance */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001783 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
David S. Millerc0700f92008-12-16 23:53:20 -08001784 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001785 else
David S. Millerc0700f92008-12-16 23:53:20 -08001786 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001787
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001788 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001789
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001790 if (load_mode == LOAD_DIAG) {
Yaniv Rosnerde6eae12010-09-07 11:41:13 +00001791 bp->link_params.loopback_mode = LOOPBACK_XGXS;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001792 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1793 }
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001794
Eilon Greenstein19680c42008-08-13 15:47:33 -07001795 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001796
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001797 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001798
Eilon Greenstein3c96c682009-01-14 21:25:31 -08001799 bnx2x_calc_fc_adv(bp);
1800
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001801 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1802 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001803 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001804 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001805 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
Eilon Greenstein19680c42008-08-13 15:47:33 -07001806 return rc;
1807 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001808 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07001809 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001810}
1811
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001812void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001813{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001814 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001815 bnx2x_acquire_phy_lock(bp);
Yaniv Rosner54c2fb72010-09-01 09:51:23 +00001816 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001817 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001818 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001819
Eilon Greenstein19680c42008-08-13 15:47:33 -07001820 bnx2x_calc_fc_adv(bp);
1821 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001822 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001823}
1824
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001825static void bnx2x__link_reset(struct bnx2x *bp)
1826{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001827 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001828 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00001829 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001830 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001831 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001832 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001833}
1834
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001835u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001836{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001837 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001838
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001839 if (!BP_NOMCP(bp)) {
1840 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001841 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1842 is_serdes);
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001843 bnx2x_release_phy_lock(bp);
1844 } else
1845 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001846
1847 return rc;
1848}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001849
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001850static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001851{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001852 u32 r_param = bp->link_vars.line_speed / 8;
1853 u32 fair_periodic_timeout_usec;
1854 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001855
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001856 memset(&(bp->cmng.rs_vars), 0,
1857 sizeof(struct rate_shaping_vars_per_port));
1858 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001859
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001860 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1861 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001862
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001863 /* this is the threshold below which no timer arming will occur
1864 1.25 coefficient is for the threshold to be a little bigger
1865 than the real time, to compensate for timer in-accuracy */
1866 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001867 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1868
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001869 /* resolution of fairness timer */
1870 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1871 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1872 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001873
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001874 /* this is the threshold below which we won't arm the timer anymore */
1875 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001876
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001877 /* we multiply by 1e3/8 to get bytes/msec.
1878 We don't want the credits to pass a credit
1879 of the t_fair*FAIR_MEM (algorithm resolution) */
1880 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1881 /* since each tick is 4 usec */
1882 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001883}
1884
Eilon Greenstein2691d512009-08-12 08:22:08 +00001885/* Calculates the sum of vn_min_rates.
1886 It's needed for further normalizing of the min_rates.
1887 Returns:
1888 sum of vn_min_rates.
1889 or
1890 0 - if all the min_rates are 0.
1891 In the later case fainess algorithm should be deactivated.
1892 If not all min_rates are zero then those that are zeroes will be set to 1.
1893 */
1894static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1895{
1896 int all_zero = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001897 int vn;
1898
1899 bp->vn_weight_sum = 0;
1900 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001901 u32 vn_cfg = bp->mf_config[vn];
Eilon Greenstein2691d512009-08-12 08:22:08 +00001902 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1903 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1904
1905 /* Skip hidden vns */
1906 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1907 continue;
1908
1909 /* If min rate is zero - set it to 1 */
1910 if (!vn_min_rate)
1911 vn_min_rate = DEF_MIN_RATE;
1912 else
1913 all_zero = 0;
1914
1915 bp->vn_weight_sum += vn_min_rate;
1916 }
1917
1918 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001919 if (all_zero) {
1920 bp->cmng.flags.cmng_enables &=
1921 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1922 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1923 " fairness will be disabled\n");
1924 } else
1925 bp->cmng.flags.cmng_enables |=
1926 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001927}
1928
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001929static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001930{
1931 struct rate_shaping_vars_per_vn m_rs_vn;
1932 struct fairness_vars_per_vn m_fair_vn;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001933 u32 vn_cfg = bp->mf_config[vn];
1934 int func = 2*vn + BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001935 u16 vn_min_rate, vn_max_rate;
1936 int i;
1937
1938 /* If function is hidden - set min and max to zeroes */
1939 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1940 vn_min_rate = 0;
1941 vn_max_rate = 0;
1942
1943 } else {
1944 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1945 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001946 /* If min rate is zero - set it to 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001947 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001948 vn_min_rate = DEF_MIN_RATE;
1949 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1950 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1951 }
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001952
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001953 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001954 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001955 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001956
1957 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1958 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1959
1960 /* global vn counter - maximal Mbps for this vn */
1961 m_rs_vn.vn_counter.rate = vn_max_rate;
1962
1963 /* quota - number of bytes transmitted in this period */
1964 m_rs_vn.vn_counter.quota =
1965 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1966
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001967 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001968 /* credit for each period of the fairness algorithm:
1969 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001970 vn_weight_sum should not be larger than 10000, thus
1971 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1972 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001973 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001974 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1975 (8 * bp->vn_weight_sum))),
1976 (bp->cmng.fair_vars.fair_threshold * 2));
1977 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001978 m_fair_vn.vn_credit_delta);
1979 }
1980
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001981 /* Store it to internal memory */
1982 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1983 REG_WR(bp, BAR_XSTRORM_INTMEM +
1984 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1985 ((u32 *)(&m_rs_vn))[i]);
1986
1987 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1988 REG_WR(bp, BAR_XSTRORM_INTMEM +
1989 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1990 ((u32 *)(&m_fair_vn))[i]);
1991}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001992
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001993static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
1994{
1995 if (CHIP_REV_IS_SLOW(bp))
1996 return CMNG_FNS_NONE;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001997 if (IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001998 return CMNG_FNS_MINMAX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001999
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002000 return CMNG_FNS_NONE;
2001}
2002
2003static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2004{
2005 int vn;
2006
2007 if (BP_NOMCP(bp))
2008 return; /* what should be the default bvalue in this case */
2009
2010 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2011 int /*abs*/func = 2*vn + BP_PORT(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002012 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002013 MF_CFG_RD(bp, func_mf_config[func].config);
2014 }
2015}
2016
2017static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2018{
2019
2020 if (cmng_type == CMNG_FNS_MINMAX) {
2021 int vn;
2022
2023 /* clear cmng_enables */
2024 bp->cmng.flags.cmng_enables = 0;
2025
2026 /* read mf conf from shmem */
2027 if (read_cfg)
2028 bnx2x_read_mf_cfg(bp);
2029
2030 /* Init rate shaping and fairness contexts */
2031 bnx2x_init_port_minmax(bp);
2032
2033 /* vn_weight_sum and enable fairness if not 0 */
2034 bnx2x_calc_vn_weight_sum(bp);
2035
2036 /* calculate and set min-max rate for each vn */
2037 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2038 bnx2x_init_vn_minmax(bp, vn);
2039
2040 /* always enable rate shaping and fairness */
2041 bp->cmng.flags.cmng_enables |=
2042 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2043 if (!bp->vn_weight_sum)
2044 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2045 " fairness will be disabled\n");
2046 return;
2047 }
2048
2049 /* rate shaping and fairness are disabled */
2050 DP(NETIF_MSG_IFUP,
2051 "rate shaping and fairness are disabled\n");
2052}
2053
2054static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2055{
2056 int port = BP_PORT(bp);
2057 int func;
2058 int vn;
2059
2060 /* Set the attention towards other drivers on the same port */
2061 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2062 if (vn == BP_E1HVN(bp))
2063 continue;
2064
2065 func = ((vn << 1) | port);
2066 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2067 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2068 }
2069}
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002070
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002071/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002072static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002073{
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002074 u32 prev_link_status = bp->link_vars.link_status;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002075 /* Make sure that we are synced with the current statistics */
2076 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2077
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002078 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002079
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002080 if (bp->link_vars.link_up) {
2081
Eilon Greenstein1c063282009-02-12 08:36:43 +00002082 /* dropless flow control */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002083 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002084 int port = BP_PORT(bp);
2085 u32 pause_enabled = 0;
2086
2087 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2088 pause_enabled = 1;
2089
2090 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002091 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002092 pause_enabled);
2093 }
2094
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002095 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2096 struct host_port_stats *pstats;
2097
2098 pstats = bnx2x_sp(bp, port_stats);
2099 /* reset old bmac stats */
2100 memset(&(pstats->mac_stx[0]), 0,
2101 sizeof(struct mac_stx));
2102 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002103 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002104 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2105 }
2106
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002107 /* indicate link status only if link status actually changed */
2108 if (prev_link_status != bp->link_vars.link_status)
2109 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002110
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002111 if (IS_MF(bp))
2112 bnx2x_link_sync_notify(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002113
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002114 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2115 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002116
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002117 if (cmng_fns != CMNG_FNS_NONE) {
2118 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2119 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2120 } else
2121 /* rate shaping and fairness are disabled */
2122 DP(NETIF_MSG_IFUP,
2123 "single function mode without fairness\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002124 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002125}
2126
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002127void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002128{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002129 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002130 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002131
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002132 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2133
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002134 if (bp->link_vars.link_up)
2135 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2136 else
2137 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2138
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002139 /* the link status update could be the result of a DCC event
2140 hence re-read the shmem mf configuration */
2141 bnx2x_read_mf_cfg(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002142
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002143 /* indicate link status */
2144 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002145}
2146
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002147static void bnx2x_pmf_update(struct bnx2x *bp)
2148{
2149 int port = BP_PORT(bp);
2150 u32 val;
2151
2152 bp->port.pmf = 1;
2153 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2154
2155 /* enable nig attention */
2156 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002157 if (bp->common.int_block == INT_BLOCK_HC) {
2158 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2159 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2160 } else if (CHIP_IS_E2(bp)) {
2161 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2162 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2163 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002164
2165 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002166}
2167
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002168/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002169
2170/* slow path */
2171
2172/*
2173 * General service functions
2174 */
2175
Eilon Greenstein2691d512009-08-12 08:22:08 +00002176/* send the MCP a request, block until there is a reply */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002177u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
Eilon Greenstein2691d512009-08-12 08:22:08 +00002178{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002179 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002180 u32 seq = ++bp->fw_seq;
2181 u32 rc = 0;
2182 u32 cnt = 1;
2183 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2184
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002185 mutex_lock(&bp->fw_mb_mutex);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002186 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2187 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2188
Eilon Greenstein2691d512009-08-12 08:22:08 +00002189 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2190
2191 do {
2192 /* let the FW do it's magic ... */
2193 msleep(delay);
2194
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002195 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002196
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002197 /* Give the FW up to 5 second (500*10ms) */
2198 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002199
2200 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2201 cnt*delay, rc, seq);
2202
2203 /* is this a reply to our command? */
2204 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2205 rc &= FW_MSG_CODE_MASK;
2206 else {
2207 /* FW BUG! */
2208 BNX2X_ERR("FW failed to respond!\n");
2209 bnx2x_fw_dump(bp);
2210 rc = 0;
2211 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002212 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002213
2214 return rc;
2215}
2216
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002217/* must be called under rtnl_lock */
2218void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2219{
2220 u32 mask = (1 << cl_id);
2221
2222 /* initial seeting is BNX2X_ACCEPT_NONE */
2223 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2224 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2225 u8 unmatched_unicast = 0;
2226
2227 if (filters & BNX2X_PROMISCUOUS_MODE) {
2228 /* promiscious - accept all, drop none */
2229 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2230 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2231 }
2232 if (filters & BNX2X_ACCEPT_UNICAST) {
2233 /* accept matched ucast */
2234 drop_all_ucast = 0;
2235 }
2236 if (filters & BNX2X_ACCEPT_MULTICAST) {
2237 /* accept matched mcast */
2238 drop_all_mcast = 0;
2239 }
2240 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2241 /* accept all mcast */
2242 drop_all_ucast = 0;
2243 accp_all_ucast = 1;
2244 }
2245 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2246 /* accept all mcast */
2247 drop_all_mcast = 0;
2248 accp_all_mcast = 1;
2249 }
2250 if (filters & BNX2X_ACCEPT_BROADCAST) {
2251 /* accept (all) bcast */
2252 drop_all_bcast = 0;
2253 accp_all_bcast = 1;
2254 }
2255
2256 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2257 bp->mac_filters.ucast_drop_all | mask :
2258 bp->mac_filters.ucast_drop_all & ~mask;
2259
2260 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2261 bp->mac_filters.mcast_drop_all | mask :
2262 bp->mac_filters.mcast_drop_all & ~mask;
2263
2264 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2265 bp->mac_filters.bcast_drop_all | mask :
2266 bp->mac_filters.bcast_drop_all & ~mask;
2267
2268 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2269 bp->mac_filters.ucast_accept_all | mask :
2270 bp->mac_filters.ucast_accept_all & ~mask;
2271
2272 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2273 bp->mac_filters.mcast_accept_all | mask :
2274 bp->mac_filters.mcast_accept_all & ~mask;
2275
2276 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2277 bp->mac_filters.bcast_accept_all | mask :
2278 bp->mac_filters.bcast_accept_all & ~mask;
2279
2280 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2281 bp->mac_filters.unmatched_unicast | mask :
2282 bp->mac_filters.unmatched_unicast & ~mask;
2283}
2284
2285void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2286{
2287 if (FUNC_CONFIG(p->func_flgs)) {
2288 struct tstorm_eth_function_common_config tcfg = {0};
2289
2290 /* tpa */
2291 if (p->func_flgs & FUNC_FLG_TPA)
2292 tcfg.config_flags |=
2293 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
2294
2295 /* set rss flags */
2296 if (p->func_flgs & FUNC_FLG_RSS) {
2297 u16 rss_flgs = (p->rss->mode <<
2298 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
2299
2300 if (p->rss->cap & RSS_IPV4_CAP)
2301 rss_flgs |= RSS_IPV4_CAP_MASK;
2302 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2303 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2304 if (p->rss->cap & RSS_IPV6_CAP)
2305 rss_flgs |= RSS_IPV6_CAP_MASK;
2306 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2307 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
2308
2309 tcfg.config_flags |= rss_flgs;
2310 tcfg.rss_result_mask = p->rss->result_mask;
2311
2312 }
2313
2314 storm_memset_func_cfg(bp, &tcfg, p->func_id);
2315 }
2316
2317 /* Enable the function in the FW */
2318 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2319 storm_memset_func_en(bp, p->func_id, 1);
2320
2321 /* statistics */
2322 if (p->func_flgs & FUNC_FLG_STATS) {
2323 struct stats_indication_flags stats_flags = {0};
2324 stats_flags.collect_eth = 1;
2325
2326 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2327 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2328
2329 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2330 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2331
2332 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2333 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2334
2335 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2336 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2337 }
2338
2339 /* spq */
2340 if (p->func_flgs & FUNC_FLG_SPQ) {
2341 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2342 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2343 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2344 }
2345}
2346
2347static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2348 struct bnx2x_fastpath *fp)
2349{
2350 u16 flags = 0;
2351
2352 /* calculate queue flags */
2353 flags |= QUEUE_FLG_CACHE_ALIGN;
2354 flags |= QUEUE_FLG_HC;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002355 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002356
2357#ifdef BCM_VLAN
2358 flags |= QUEUE_FLG_VLAN;
2359 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2360#endif
2361
2362 if (!fp->disable_tpa)
2363 flags |= QUEUE_FLG_TPA;
2364
2365 flags |= QUEUE_FLG_STATS;
2366
2367 return flags;
2368}
2369
2370static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2371 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2372 struct bnx2x_rxq_init_params *rxq_init)
2373{
2374 u16 max_sge = 0;
2375 u16 sge_sz = 0;
2376 u16 tpa_agg_size = 0;
2377
2378 /* calculate queue flags */
2379 u16 flags = bnx2x_get_cl_flags(bp, fp);
2380
2381 if (!fp->disable_tpa) {
2382 pause->sge_th_hi = 250;
2383 pause->sge_th_lo = 150;
2384 tpa_agg_size = min_t(u32,
2385 (min_t(u32, 8, MAX_SKB_FRAGS) *
2386 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2387 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2388 SGE_PAGE_SHIFT;
2389 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2390 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2391 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2392 0xffff);
2393 }
2394
2395 /* pause - not for e1 */
2396 if (!CHIP_IS_E1(bp)) {
2397 pause->bd_th_hi = 350;
2398 pause->bd_th_lo = 250;
2399 pause->rcq_th_hi = 350;
2400 pause->rcq_th_lo = 250;
2401 pause->sge_th_hi = 0;
2402 pause->sge_th_lo = 0;
2403 pause->pri_map = 1;
2404 }
2405
2406 /* rxq setup */
2407 rxq_init->flags = flags;
2408 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2409 rxq_init->dscr_map = fp->rx_desc_mapping;
2410 rxq_init->sge_map = fp->rx_sge_mapping;
2411 rxq_init->rcq_map = fp->rx_comp_mapping;
2412 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2413 rxq_init->mtu = bp->dev->mtu;
2414 rxq_init->buf_sz = bp->rx_buf_size;
2415 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2416 rxq_init->cl_id = fp->cl_id;
2417 rxq_init->spcl_id = fp->cl_id;
2418 rxq_init->stat_id = fp->cl_id;
2419 rxq_init->tpa_agg_sz = tpa_agg_size;
2420 rxq_init->sge_buf_sz = sge_sz;
2421 rxq_init->max_sges_pkt = max_sge;
2422 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2423 rxq_init->fw_sb_id = fp->fw_sb_id;
2424
2425 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2426
2427 rxq_init->cid = HW_CID(bp, fp->cid);
2428
2429 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2430}
2431
2432static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2433 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2434{
2435 u16 flags = bnx2x_get_cl_flags(bp, fp);
2436
2437 txq_init->flags = flags;
2438 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2439 txq_init->dscr_map = fp->tx_desc_mapping;
2440 txq_init->stat_id = fp->cl_id;
2441 txq_init->cid = HW_CID(bp, fp->cid);
2442 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2443 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2444 txq_init->fw_sb_id = fp->fw_sb_id;
2445 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2446}
2447
2448void bnx2x_pf_init(struct bnx2x *bp)
2449{
2450 struct bnx2x_func_init_params func_init = {0};
2451 struct bnx2x_rss_params rss = {0};
2452 struct event_ring_data eq_data = { {0} };
2453 u16 flags;
2454
2455 /* pf specific setups */
2456 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002457 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002458
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002459 if (CHIP_IS_E2(bp)) {
2460 /* reset IGU PF statistics: MSIX + ATTN */
2461 /* PF */
2462 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2463 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2464 (CHIP_MODE_IS_4_PORT(bp) ?
2465 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2466 /* ATTN */
2467 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2468 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2469 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2470 (CHIP_MODE_IS_4_PORT(bp) ?
2471 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2472 }
2473
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002474 /* function setup flags */
2475 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2476
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002477 if (CHIP_IS_E1x(bp))
2478 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2479 else
2480 flags |= FUNC_FLG_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002481
2482 /**
2483 * Although RSS is meaningless when there is a single HW queue we
2484 * still need it enabled in order to have HW Rx hash generated.
2485 *
2486 * if (is_eth_multi(bp))
2487 * flags |= FUNC_FLG_RSS;
2488 */
2489
2490 /* function setup */
2491 if (flags & FUNC_FLG_RSS) {
2492 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2493 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2494 rss.mode = bp->multi_mode;
2495 rss.result_mask = MULTI_MASK;
2496 func_init.rss = &rss;
2497 }
2498
2499 func_init.func_flgs = flags;
2500 func_init.pf_id = BP_FUNC(bp);
2501 func_init.func_id = BP_FUNC(bp);
2502 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2503 func_init.spq_map = bp->spq_mapping;
2504 func_init.spq_prod = bp->spq_prod_idx;
2505
2506 bnx2x_func_init(bp, &func_init);
2507
2508 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2509
2510 /*
2511 Congestion management values depend on the link rate
2512 There is no active link so initial link rate is set to 10 Gbps.
2513 When the link comes up The congestion management values are
2514 re-calculated according to the actual link rate.
2515 */
2516 bp->link_vars.line_speed = SPEED_10000;
2517 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2518
2519 /* Only the PMF sets the HW */
2520 if (bp->port.pmf)
2521 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2522
2523 /* no rx until link is up */
2524 bp->rx_mode = BNX2X_RX_MODE_NONE;
2525 bnx2x_set_storm_rx_mode(bp);
2526
2527 /* init Event Queue */
2528 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2529 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2530 eq_data.producer = bp->eq_prod;
2531 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2532 eq_data.sb_id = DEF_SB_ID;
2533 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2534}
2535
2536
Eilon Greenstein2691d512009-08-12 08:22:08 +00002537static void bnx2x_e1h_disable(struct bnx2x *bp)
2538{
2539 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002540
2541 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002542
2543 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2544
Eilon Greenstein2691d512009-08-12 08:22:08 +00002545 netif_carrier_off(bp->dev);
2546}
2547
2548static void bnx2x_e1h_enable(struct bnx2x *bp)
2549{
2550 int port = BP_PORT(bp);
2551
2552 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2553
Eilon Greenstein2691d512009-08-12 08:22:08 +00002554 /* Tx queue should be only reenabled */
2555 netif_tx_wake_all_queues(bp->dev);
2556
Eilon Greenstein061bc702009-10-15 00:18:47 -07002557 /*
2558 * Should not call netif_carrier_on since it will be called if the link
2559 * is up when checking for link state
2560 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002561}
2562
Eilon Greenstein2691d512009-08-12 08:22:08 +00002563static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2564{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002565 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002566
2567 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2568
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002569 /*
2570 * This is the only place besides the function initialization
2571 * where the bp->flags can change so it is done without any
2572 * locks
2573 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002574 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002575 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002576 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002577
2578 bnx2x_e1h_disable(bp);
2579 } else {
2580 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002581 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002582
2583 bnx2x_e1h_enable(bp);
2584 }
2585 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2586 }
2587 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2588
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002589 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2590 bnx2x_link_sync_notify(bp);
2591 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002592 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2593 }
2594
2595 /* Report results to MCP */
2596 if (dcc_event)
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002597 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002598 else
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002599 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002600}
2601
Michael Chan28912902009-10-10 13:46:53 +00002602/* must be called under the spq lock */
2603static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2604{
2605 struct eth_spe *next_spe = bp->spq_prod_bd;
2606
2607 if (bp->spq_prod_bd == bp->spq_last_bd) {
2608 bp->spq_prod_bd = bp->spq;
2609 bp->spq_prod_idx = 0;
2610 DP(NETIF_MSG_TIMER, "end of spq\n");
2611 } else {
2612 bp->spq_prod_bd++;
2613 bp->spq_prod_idx++;
2614 }
2615 return next_spe;
2616}
2617
2618/* must be called under the spq lock */
2619static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2620{
2621 int func = BP_FUNC(bp);
2622
2623 /* Make sure that BD data is updated before writing the producer */
2624 wmb();
2625
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002626 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002627 bp->spq_prod_idx);
Michael Chan28912902009-10-10 13:46:53 +00002628 mmiowb();
2629}
2630
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002631/* the slow path queue is odd since completions arrive on the fastpath ring */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002632int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002633 u32 data_hi, u32 data_lo, int common)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002634{
Michael Chan28912902009-10-10 13:46:53 +00002635 struct eth_spe *spe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002636 u16 type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002637
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002638#ifdef BNX2X_STOP_ON_ERROR
2639 if (unlikely(bp->panic))
2640 return -EIO;
2641#endif
2642
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002643 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002644
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002645 if (!atomic_read(&bp->spq_left)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002646 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002647 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002648 bnx2x_panic();
2649 return -EBUSY;
2650 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002651
Michael Chan28912902009-10-10 13:46:53 +00002652 spe = bnx2x_sp_get_next(bp);
2653
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002654 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002655 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002656 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2657 HW_CID(bp, cid));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002658
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002659 if (common)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002660 /* Common ramrods:
2661 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2662 * TRAFFIC_STOP, TRAFFIC_START
2663 */
2664 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2665 & SPE_HDR_CONN_TYPE;
2666 else
2667 /* ETH ramrods: SETUP, HALT */
2668 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2669 & SPE_HDR_CONN_TYPE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002670
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002671 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2672 SPE_HDR_FUNCTION_ID);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002673
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002674 spe->hdr.type = cpu_to_le16(type);
2675
2676 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2677 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2678
2679 /* stats ramrod has it's own slot on the spq */
2680 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2681 /* It's ok if the actual decrement is issued towards the memory
2682 * somewhere between the spin_lock and spin_unlock. Thus no
2683 * more explict memory barrier is needed.
2684 */
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002685 atomic_dec(&bp->spq_left);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002686
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002687 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002688 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2689 "type(0x%x) left %x\n",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002690 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2691 (u32)(U64_LO(bp->spq_mapping) +
2692 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002693 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002694
Michael Chan28912902009-10-10 13:46:53 +00002695 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002696 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002697 return 0;
2698}
2699
2700/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002701static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002702{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002703 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002704 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002705
2706 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002707 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002708 val = (1UL << 31);
2709 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2710 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2711 if (val & (1L << 31))
2712 break;
2713
2714 msleep(5);
2715 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002716 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002717 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002718 rc = -EBUSY;
2719 }
2720
2721 return rc;
2722}
2723
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002724/* release split MCP access lock register */
2725static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002726{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002727 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002728}
2729
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002730#define BNX2X_DEF_SB_ATT_IDX 0x0001
2731#define BNX2X_DEF_SB_IDX 0x0002
2732
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002733static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2734{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002735 struct host_sp_status_block *def_sb = bp->def_status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002736 u16 rc = 0;
2737
2738 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002739 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2740 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002741 rc |= BNX2X_DEF_SB_ATT_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002742 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002743
2744 if (bp->def_idx != def_sb->sp_sb.running_index) {
2745 bp->def_idx = def_sb->sp_sb.running_index;
2746 rc |= BNX2X_DEF_SB_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002747 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002748
2749 /* Do not reorder: indecies reading should complete before handling */
2750 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002751 return rc;
2752}
2753
2754/*
2755 * slow path service functions
2756 */
2757
2758static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2759{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002760 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002761 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2762 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002763 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2764 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002765 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002766 u32 nig_mask = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002767 u32 reg_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002768
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002769 if (bp->attn_state & asserted)
2770 BNX2X_ERR("IGU ERROR\n");
2771
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002772 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2773 aeu_mask = REG_RD(bp, aeu_addr);
2774
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002775 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002776 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002777 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002778 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002779
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002780 REG_WR(bp, aeu_addr, aeu_mask);
2781 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002782
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002783 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002784 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002785 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002786
2787 if (asserted & ATTN_HARD_WIRED_MASK) {
2788 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002789
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002790 bnx2x_acquire_phy_lock(bp);
2791
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002792 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002793 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002794 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002795
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002796 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002797
2798 /* handle unicore attn? */
2799 }
2800 if (asserted & ATTN_SW_TIMER_4_FUNC)
2801 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2802
2803 if (asserted & GPIO_2_FUNC)
2804 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2805
2806 if (asserted & GPIO_3_FUNC)
2807 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2808
2809 if (asserted & GPIO_4_FUNC)
2810 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2811
2812 if (port == 0) {
2813 if (asserted & ATTN_GENERAL_ATTN_1) {
2814 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2815 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2816 }
2817 if (asserted & ATTN_GENERAL_ATTN_2) {
2818 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2819 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2820 }
2821 if (asserted & ATTN_GENERAL_ATTN_3) {
2822 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2823 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2824 }
2825 } else {
2826 if (asserted & ATTN_GENERAL_ATTN_4) {
2827 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2828 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2829 }
2830 if (asserted & ATTN_GENERAL_ATTN_5) {
2831 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2832 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2833 }
2834 if (asserted & ATTN_GENERAL_ATTN_6) {
2835 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2836 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2837 }
2838 }
2839
2840 } /* if hardwired */
2841
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002842 if (bp->common.int_block == INT_BLOCK_HC)
2843 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2844 COMMAND_REG_ATTN_BITS_SET);
2845 else
2846 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2847
2848 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2849 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2850 REG_WR(bp, reg_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002851
2852 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002853 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002854 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002855 bnx2x_release_phy_lock(bp);
2856 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002857}
2858
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002859static inline void bnx2x_fan_failure(struct bnx2x *bp)
2860{
2861 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002862 u32 ext_phy_config;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002863 /* mark the failure */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002864 ext_phy_config =
2865 SHMEM_RD(bp,
2866 dev_info.port_hw_config[port].external_phy_config);
2867
2868 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2869 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002870 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002871 ext_phy_config);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002872
2873 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002874 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2875 " the driver to shutdown the card to prevent permanent"
2876 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002877}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002878
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002879static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2880{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002881 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002882 int reg_offset;
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002883 u32 val;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002884
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002885 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2886 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002887
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002888 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002889
2890 val = REG_RD(bp, reg_offset);
2891 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2892 REG_WR(bp, reg_offset, val);
2893
2894 BNX2X_ERR("SPIO5 hw attention\n");
2895
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002896 /* Fan failure attention */
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002897 bnx2x_hw_reset_phy(&bp->link_params);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002898 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002899 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002900
Eilon Greenstein589abe32009-02-12 08:36:55 +00002901 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2902 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2903 bnx2x_acquire_phy_lock(bp);
2904 bnx2x_handle_module_detect_int(&bp->link_params);
2905 bnx2x_release_phy_lock(bp);
2906 }
2907
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002908 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2909
2910 val = REG_RD(bp, reg_offset);
2911 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2912 REG_WR(bp, reg_offset, val);
2913
2914 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002915 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002916 bnx2x_panic();
2917 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002918}
2919
2920static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2921{
2922 u32 val;
2923
Eilon Greenstein0626b892009-02-12 08:38:14 +00002924 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002925
2926 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2927 BNX2X_ERR("DB hw attention 0x%x\n", val);
2928 /* DORQ discard attention */
2929 if (val & 0x2)
2930 BNX2X_ERR("FATAL error from DORQ\n");
2931 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002932
2933 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2934
2935 int port = BP_PORT(bp);
2936 int reg_offset;
2937
2938 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2939 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2940
2941 val = REG_RD(bp, reg_offset);
2942 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2943 REG_WR(bp, reg_offset, val);
2944
2945 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002946 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002947 bnx2x_panic();
2948 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002949}
2950
2951static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2952{
2953 u32 val;
2954
2955 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2956
2957 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2958 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2959 /* CFC error attention */
2960 if (val & 0x2)
2961 BNX2X_ERR("FATAL error from CFC\n");
2962 }
2963
2964 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2965
2966 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2967 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2968 /* RQ_USDMDP_FIFO_OVERFLOW */
2969 if (val & 0x18000)
2970 BNX2X_ERR("FATAL error from PXP\n");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002971 if (CHIP_IS_E2(bp)) {
2972 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2973 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2974 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002975 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002976
2977 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2978
2979 int port = BP_PORT(bp);
2980 int reg_offset;
2981
2982 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2983 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2984
2985 val = REG_RD(bp, reg_offset);
2986 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2987 REG_WR(bp, reg_offset, val);
2988
2989 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002990 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002991 bnx2x_panic();
2992 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002993}
2994
2995static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2996{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002997 u32 val;
2998
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002999 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3000
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003001 if (attn & BNX2X_PMF_LINK_ASSERT) {
3002 int func = BP_FUNC(bp);
3003
3004 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003005 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3006 func_mf_config[BP_ABS_FUNC(bp)].config);
3007 val = SHMEM_RD(bp,
3008 func_mb[BP_FW_MB_IDX(bp)].drv_status);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003009 if (val & DRV_STATUS_DCC_EVENT_MASK)
3010 bnx2x_dcc_event(bp,
3011 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003012 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003013 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003014 bnx2x_pmf_update(bp);
3015
3016 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003017
3018 BNX2X_ERR("MC assert!\n");
3019 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3020 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3021 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3022 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3023 bnx2x_panic();
3024
3025 } else if (attn & BNX2X_MCP_ASSERT) {
3026
3027 BNX2X_ERR("MCP assert!\n");
3028 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003029 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003030
3031 } else
3032 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3033 }
3034
3035 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003036 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3037 if (attn & BNX2X_GRC_TIMEOUT) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003038 val = CHIP_IS_E1(bp) ? 0 :
3039 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003040 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3041 }
3042 if (attn & BNX2X_GRC_RSV) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003043 val = CHIP_IS_E1(bp) ? 0 :
3044 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003045 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3046 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003047 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003048 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003049}
3050
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003051#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3052#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3053#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3054#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3055#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3056#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003057
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003058/*
3059 * should be run under rtnl lock
3060 */
3061static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3062{
3063 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3064 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3065 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3066 barrier();
3067 mmiowb();
3068}
3069
3070/*
3071 * should be run under rtnl lock
3072 */
3073static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3074{
3075 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3076 val |= (1 << 16);
3077 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3078 barrier();
3079 mmiowb();
3080}
3081
3082/*
3083 * should be run under rtnl lock
3084 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003085bool bnx2x_reset_is_done(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003086{
3087 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3088 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3089 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3090}
3091
3092/*
3093 * should be run under rtnl lock
3094 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003095inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003096{
3097 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3098
3099 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3100
3101 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3102 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3103 barrier();
3104 mmiowb();
3105}
3106
3107/*
3108 * should be run under rtnl lock
3109 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003110u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003111{
3112 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3113
3114 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3115
3116 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3117 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3118 barrier();
3119 mmiowb();
3120
3121 return val1;
3122}
3123
3124/*
3125 * should be run under rtnl lock
3126 */
3127static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3128{
3129 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3130}
3131
3132static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3133{
3134 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3135 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3136}
3137
3138static inline void _print_next_block(int idx, const char *blk)
3139{
3140 if (idx)
3141 pr_cont(", ");
3142 pr_cont("%s", blk);
3143}
3144
3145static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3146{
3147 int i = 0;
3148 u32 cur_bit = 0;
3149 for (i = 0; sig; i++) {
3150 cur_bit = ((u32)0x1 << i);
3151 if (sig & cur_bit) {
3152 switch (cur_bit) {
3153 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3154 _print_next_block(par_num++, "BRB");
3155 break;
3156 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3157 _print_next_block(par_num++, "PARSER");
3158 break;
3159 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3160 _print_next_block(par_num++, "TSDM");
3161 break;
3162 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3163 _print_next_block(par_num++, "SEARCHER");
3164 break;
3165 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3166 _print_next_block(par_num++, "TSEMI");
3167 break;
3168 }
3169
3170 /* Clear the bit */
3171 sig &= ~cur_bit;
3172 }
3173 }
3174
3175 return par_num;
3176}
3177
3178static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3179{
3180 int i = 0;
3181 u32 cur_bit = 0;
3182 for (i = 0; sig; i++) {
3183 cur_bit = ((u32)0x1 << i);
3184 if (sig & cur_bit) {
3185 switch (cur_bit) {
3186 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3187 _print_next_block(par_num++, "PBCLIENT");
3188 break;
3189 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3190 _print_next_block(par_num++, "QM");
3191 break;
3192 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3193 _print_next_block(par_num++, "XSDM");
3194 break;
3195 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3196 _print_next_block(par_num++, "XSEMI");
3197 break;
3198 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3199 _print_next_block(par_num++, "DOORBELLQ");
3200 break;
3201 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3202 _print_next_block(par_num++, "VAUX PCI CORE");
3203 break;
3204 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3205 _print_next_block(par_num++, "DEBUG");
3206 break;
3207 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3208 _print_next_block(par_num++, "USDM");
3209 break;
3210 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3211 _print_next_block(par_num++, "USEMI");
3212 break;
3213 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3214 _print_next_block(par_num++, "UPB");
3215 break;
3216 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3217 _print_next_block(par_num++, "CSDM");
3218 break;
3219 }
3220
3221 /* Clear the bit */
3222 sig &= ~cur_bit;
3223 }
3224 }
3225
3226 return par_num;
3227}
3228
3229static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3230{
3231 int i = 0;
3232 u32 cur_bit = 0;
3233 for (i = 0; sig; i++) {
3234 cur_bit = ((u32)0x1 << i);
3235 if (sig & cur_bit) {
3236 switch (cur_bit) {
3237 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3238 _print_next_block(par_num++, "CSEMI");
3239 break;
3240 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3241 _print_next_block(par_num++, "PXP");
3242 break;
3243 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3244 _print_next_block(par_num++,
3245 "PXPPCICLOCKCLIENT");
3246 break;
3247 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3248 _print_next_block(par_num++, "CFC");
3249 break;
3250 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3251 _print_next_block(par_num++, "CDU");
3252 break;
3253 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3254 _print_next_block(par_num++, "IGU");
3255 break;
3256 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3257 _print_next_block(par_num++, "MISC");
3258 break;
3259 }
3260
3261 /* Clear the bit */
3262 sig &= ~cur_bit;
3263 }
3264 }
3265
3266 return par_num;
3267}
3268
3269static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3270{
3271 int i = 0;
3272 u32 cur_bit = 0;
3273 for (i = 0; sig; i++) {
3274 cur_bit = ((u32)0x1 << i);
3275 if (sig & cur_bit) {
3276 switch (cur_bit) {
3277 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3278 _print_next_block(par_num++, "MCP ROM");
3279 break;
3280 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3281 _print_next_block(par_num++, "MCP UMP RX");
3282 break;
3283 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3284 _print_next_block(par_num++, "MCP UMP TX");
3285 break;
3286 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3287 _print_next_block(par_num++, "MCP SCPAD");
3288 break;
3289 }
3290
3291 /* Clear the bit */
3292 sig &= ~cur_bit;
3293 }
3294 }
3295
3296 return par_num;
3297}
3298
3299static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3300 u32 sig2, u32 sig3)
3301{
3302 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3303 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3304 int par_num = 0;
3305 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3306 "[0]:0x%08x [1]:0x%08x "
3307 "[2]:0x%08x [3]:0x%08x\n",
3308 sig0 & HW_PRTY_ASSERT_SET_0,
3309 sig1 & HW_PRTY_ASSERT_SET_1,
3310 sig2 & HW_PRTY_ASSERT_SET_2,
3311 sig3 & HW_PRTY_ASSERT_SET_3);
3312 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3313 bp->dev->name);
3314 par_num = bnx2x_print_blocks_with_parity0(
3315 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3316 par_num = bnx2x_print_blocks_with_parity1(
3317 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3318 par_num = bnx2x_print_blocks_with_parity2(
3319 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3320 par_num = bnx2x_print_blocks_with_parity3(
3321 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3322 printk("\n");
3323 return true;
3324 } else
3325 return false;
3326}
3327
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003328bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003329{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003330 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003331 int port = BP_PORT(bp);
3332
3333 attn.sig[0] = REG_RD(bp,
3334 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3335 port*4);
3336 attn.sig[1] = REG_RD(bp,
3337 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3338 port*4);
3339 attn.sig[2] = REG_RD(bp,
3340 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3341 port*4);
3342 attn.sig[3] = REG_RD(bp,
3343 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3344 port*4);
3345
3346 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3347 attn.sig[3]);
3348}
3349
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003350
3351static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3352{
3353 u32 val;
3354 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3355
3356 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3357 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3358 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3359 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3360 "ADDRESS_ERROR\n");
3361 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3362 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3363 "INCORRECT_RCV_BEHAVIOR\n");
3364 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3365 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3366 "WAS_ERROR_ATTN\n");
3367 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3368 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3369 "VF_LENGTH_VIOLATION_ATTN\n");
3370 if (val &
3371 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3372 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3373 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3374 if (val &
3375 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3376 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3377 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3378 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3379 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3380 "TCPL_ERROR_ATTN\n");
3381 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3382 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3383 "TCPL_IN_TWO_RCBS_ATTN\n");
3384 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3385 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3386 "CSSNOOP_FIFO_OVERFLOW\n");
3387 }
3388 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3389 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3390 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3391 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3392 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3393 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3394 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3395 "_ATC_TCPL_TO_NOT_PEND\n");
3396 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3397 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3398 "ATC_GPA_MULTIPLE_HITS\n");
3399 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3400 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3401 "ATC_RCPL_TO_EMPTY_CNT\n");
3402 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3403 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3404 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3405 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3406 "ATC_IREQ_LESS_THAN_STU\n");
3407 }
3408
3409 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3410 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3411 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3412 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3413 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3414 }
3415
3416}
3417
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003418static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3419{
3420 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003421 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003422 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003423 u32 reg_addr;
3424 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003425 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003426
3427 /* need to take HW lock because MCP or other port might also
3428 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003429 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003430
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003431 if (bnx2x_chk_parity_attn(bp)) {
3432 bp->recovery_state = BNX2X_RECOVERY_INIT;
3433 bnx2x_set_reset_in_progress(bp);
3434 schedule_delayed_work(&bp->reset_task, 0);
3435 /* Disable HW interrupts */
3436 bnx2x_int_disable(bp);
3437 bnx2x_release_alr(bp);
3438 /* In case of parity errors don't handle attentions so that
3439 * other function would "see" parity errors.
3440 */
3441 return;
3442 }
3443
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003444 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3445 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3446 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3447 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003448 if (CHIP_IS_E2(bp))
3449 attn.sig[4] =
3450 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3451 else
3452 attn.sig[4] = 0;
3453
3454 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3455 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003456
3457 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3458 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003459 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003460
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003461 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3462 "%08x %08x %08x\n",
3463 index,
3464 group_mask->sig[0], group_mask->sig[1],
3465 group_mask->sig[2], group_mask->sig[3],
3466 group_mask->sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003467
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003468 bnx2x_attn_int_deasserted4(bp,
3469 attn.sig[4] & group_mask->sig[4]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003470 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003471 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003472 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003473 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003474 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003475 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003476 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003477 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003478 }
3479 }
3480
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003481 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003482
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003483 if (bp->common.int_block == INT_BLOCK_HC)
3484 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3485 COMMAND_REG_ATTN_BITS_CLR);
3486 else
3487 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003488
3489 val = ~deasserted;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003490 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3491 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003492 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003493
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003494 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003495 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003496
3497 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3498 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3499
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003500 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3501 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003502
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003503 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3504 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003505 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003506 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3507
3508 REG_WR(bp, reg_addr, aeu_mask);
3509 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003510
3511 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3512 bp->attn_state &= ~deasserted;
3513 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3514}
3515
3516static void bnx2x_attn_int(struct bnx2x *bp)
3517{
3518 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003519 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3520 attn_bits);
3521 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3522 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003523 u32 attn_state = bp->attn_state;
3524
3525 /* look for changed bits */
3526 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3527 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3528
3529 DP(NETIF_MSG_HW,
3530 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3531 attn_bits, attn_ack, asserted, deasserted);
3532
3533 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003534 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003535
3536 /* handle bits that were raised */
3537 if (asserted)
3538 bnx2x_attn_int_asserted(bp, asserted);
3539
3540 if (deasserted)
3541 bnx2x_attn_int_deasserted(bp, deasserted);
3542}
3543
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003544static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3545{
3546 /* No memory barriers */
3547 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3548 mmiowb(); /* keep prod updates ordered */
3549}
3550
3551#ifdef BCM_CNIC
3552static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3553 union event_ring_elem *elem)
3554{
3555 if (!bp->cnic_eth_dev.starting_cid ||
3556 cid < bp->cnic_eth_dev.starting_cid)
3557 return 1;
3558
3559 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3560
3561 if (unlikely(elem->message.data.cfc_del_event.error)) {
3562 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3563 cid);
3564 bnx2x_panic_dump(bp);
3565 }
3566 bnx2x_cnic_cfc_comp(bp, cid);
3567 return 0;
3568}
3569#endif
3570
3571static void bnx2x_eq_int(struct bnx2x *bp)
3572{
3573 u16 hw_cons, sw_cons, sw_prod;
3574 union event_ring_elem *elem;
3575 u32 cid;
3576 u8 opcode;
3577 int spqe_cnt = 0;
3578
3579 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3580
3581 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3582 * when we get the the next-page we nned to adjust so the loop
3583 * condition below will be met. The next element is the size of a
3584 * regular element and hence incrementing by 1
3585 */
3586 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3587 hw_cons++;
3588
3589 /* This function may never run in parralel with itself for a
3590 * specific bp, thus there is no need in "paired" read memory
3591 * barrier here.
3592 */
3593 sw_cons = bp->eq_cons;
3594 sw_prod = bp->eq_prod;
3595
3596 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003597 hw_cons, sw_cons, atomic_read(&bp->spq_left));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003598
3599 for (; sw_cons != hw_cons;
3600 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3601
3602
3603 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3604
3605 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3606 opcode = elem->message.opcode;
3607
3608
3609 /* handle eq element */
3610 switch (opcode) {
3611 case EVENT_RING_OPCODE_STAT_QUERY:
3612 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3613 /* nothing to do with stats comp */
3614 continue;
3615
3616 case EVENT_RING_OPCODE_CFC_DEL:
3617 /* handle according to cid range */
3618 /*
3619 * we may want to verify here that the bp state is
3620 * HALTING
3621 */
3622 DP(NETIF_MSG_IFDOWN,
3623 "got delete ramrod for MULTI[%d]\n", cid);
3624#ifdef BCM_CNIC
3625 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3626 goto next_spqe;
3627#endif
3628 bnx2x_fp(bp, cid, state) =
3629 BNX2X_FP_STATE_CLOSED;
3630
3631 goto next_spqe;
3632 }
3633
3634 switch (opcode | bp->state) {
3635 case (EVENT_RING_OPCODE_FUNCTION_START |
3636 BNX2X_STATE_OPENING_WAIT4_PORT):
3637 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3638 bp->state = BNX2X_STATE_FUNC_STARTED;
3639 break;
3640
3641 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3642 BNX2X_STATE_CLOSING_WAIT4_HALT):
3643 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3644 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3645 break;
3646
3647 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3648 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3649 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3650 bp->set_mac_pending = 0;
3651 break;
3652
3653 case (EVENT_RING_OPCODE_SET_MAC |
3654 BNX2X_STATE_CLOSING_WAIT4_HALT):
3655 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3656 bp->set_mac_pending = 0;
3657 break;
3658 default:
3659 /* unknown event log error and continue */
3660 BNX2X_ERR("Unknown EQ event %d\n",
3661 elem->message.opcode);
3662 }
3663next_spqe:
3664 spqe_cnt++;
3665 } /* for */
3666
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003667 smp_mb__before_atomic_inc();
3668 atomic_add(spqe_cnt, &bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003669
3670 bp->eq_cons = sw_cons;
3671 bp->eq_prod = sw_prod;
3672 /* Make sure that above mem writes were issued towards the memory */
3673 smp_wmb();
3674
3675 /* update producer */
3676 bnx2x_update_eq_prod(bp, bp->eq_prod);
3677}
3678
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003679static void bnx2x_sp_task(struct work_struct *work)
3680{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003681 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003682 u16 status;
3683
3684 /* Return here if interrupt is disabled */
3685 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003686 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003687 return;
3688 }
3689
3690 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003691/* if (status == 0) */
3692/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003693
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003694 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003695
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003696 /* HW attentions */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003697 if (status & BNX2X_DEF_SB_ATT_IDX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003698 bnx2x_attn_int(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003699 status &= ~BNX2X_DEF_SB_ATT_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003700 }
3701
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003702 /* SP events: STAT_QUERY and others */
3703 if (status & BNX2X_DEF_SB_IDX) {
3704
3705 /* Handle EQ completions */
3706 bnx2x_eq_int(bp);
3707
3708 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3709 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3710
3711 status &= ~BNX2X_DEF_SB_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003712 }
3713
3714 if (unlikely(status))
3715 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3716 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003717
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003718 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3719 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003720}
3721
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003722irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003723{
3724 struct net_device *dev = dev_instance;
3725 struct bnx2x *bp = netdev_priv(dev);
3726
3727 /* Return here if interrupt is disabled */
3728 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003729 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003730 return IRQ_HANDLED;
3731 }
3732
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003733 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3734 IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003735
3736#ifdef BNX2X_STOP_ON_ERROR
3737 if (unlikely(bp->panic))
3738 return IRQ_HANDLED;
3739#endif
3740
Michael Chan993ac7b2009-10-10 13:46:56 +00003741#ifdef BCM_CNIC
3742 {
3743 struct cnic_ops *c_ops;
3744
3745 rcu_read_lock();
3746 c_ops = rcu_dereference(bp->cnic_ops);
3747 if (c_ops)
3748 c_ops->cnic_handler(bp->cnic_data, NULL);
3749 rcu_read_unlock();
3750 }
3751#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003752 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003753
3754 return IRQ_HANDLED;
3755}
3756
3757/* end of slow path */
3758
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003759static void bnx2x_timer(unsigned long data)
3760{
3761 struct bnx2x *bp = (struct bnx2x *) data;
3762
3763 if (!netif_running(bp->dev))
3764 return;
3765
3766 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08003767 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003768
3769 if (poll) {
3770 struct bnx2x_fastpath *fp = &bp->fp[0];
3771 int rc;
3772
Eilon Greenstein7961f792009-03-02 07:59:31 +00003773 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003774 rc = bnx2x_rx_int(fp, 1000);
3775 }
3776
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003777 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003778 int mb_idx = BP_FW_MB_IDX(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003779 u32 drv_pulse;
3780 u32 mcp_pulse;
3781
3782 ++bp->fw_drv_pulse_wr_seq;
3783 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3784 /* TBD - add SYSTEM_TIME */
3785 drv_pulse = bp->fw_drv_pulse_wr_seq;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003786 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003787
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003788 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003789 MCP_PULSE_SEQ_MASK);
3790 /* The delta between driver pulse and mcp response
3791 * should be 1 (before mcp response) or 0 (after mcp response)
3792 */
3793 if ((drv_pulse != mcp_pulse) &&
3794 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3795 /* someone lost a heartbeat... */
3796 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3797 drv_pulse, mcp_pulse);
3798 }
3799 }
3800
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07003801 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003802 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003803
Eliezer Tamirf1410642008-02-28 11:51:50 -08003804timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003805 mod_timer(&bp->timer, jiffies + bp->current_interval);
3806}
3807
3808/* end of Statistics */
3809
3810/* nic init */
3811
3812/*
3813 * nic init service functions
3814 */
3815
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003816static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003817{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003818 u32 i;
3819 if (!(len%4) && !(addr%4))
3820 for (i = 0; i < len; i += 4)
3821 REG_WR(bp, addr + i, fill);
3822 else
3823 for (i = 0; i < len; i++)
3824 REG_WR8(bp, addr + i, fill);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003825
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003826}
3827
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003828/* helper: writes FP SP data to FW - data_size in dwords */
3829static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3830 int fw_sb_id,
3831 u32 *sb_data_p,
3832 u32 data_size)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003833{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003834 int index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003835 for (index = 0; index < data_size; index++)
3836 REG_WR(bp, BAR_CSTRORM_INTMEM +
3837 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3838 sizeof(u32)*index,
3839 *(sb_data_p + index));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003840}
3841
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003842static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3843{
3844 u32 *sb_data_p;
3845 u32 data_size = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003846 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003847 struct hc_status_block_data_e1x sb_data_e1x;
3848
3849 /* disable the function first */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003850 if (CHIP_IS_E2(bp)) {
3851 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3852 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3853 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3854 sb_data_e2.common.p_func.vf_valid = false;
3855 sb_data_p = (u32 *)&sb_data_e2;
3856 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3857 } else {
3858 memset(&sb_data_e1x, 0,
3859 sizeof(struct hc_status_block_data_e1x));
3860 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3861 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3862 sb_data_e1x.common.p_func.vf_valid = false;
3863 sb_data_p = (u32 *)&sb_data_e1x;
3864 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3865 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003866 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3867
3868 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3869 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3870 CSTORM_STATUS_BLOCK_SIZE);
3871 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3872 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3873 CSTORM_SYNC_BLOCK_SIZE);
3874}
3875
3876/* helper: writes SP SB data to FW */
3877static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3878 struct hc_sp_status_block_data *sp_sb_data)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003879{
3880 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003881 int i;
3882 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3883 REG_WR(bp, BAR_CSTRORM_INTMEM +
3884 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3885 i*sizeof(u32),
3886 *((u32 *)sp_sb_data + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003887}
3888
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003889static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3890{
3891 int func = BP_FUNC(bp);
3892 struct hc_sp_status_block_data sp_sb_data;
3893 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3894
3895 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3896 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3897 sp_sb_data.p_func.vf_valid = false;
3898
3899 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3900
3901 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3902 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3903 CSTORM_SP_STATUS_BLOCK_SIZE);
3904 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3905 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3906 CSTORM_SP_SYNC_BLOCK_SIZE);
3907
3908}
3909
3910
3911static inline
3912void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3913 int igu_sb_id, int igu_seg_id)
3914{
3915 hc_sm->igu_sb_id = igu_sb_id;
3916 hc_sm->igu_seg_id = igu_seg_id;
3917 hc_sm->timer_value = 0xFF;
3918 hc_sm->time_to_expire = 0xFFFFFFFF;
3919}
3920
3921void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3922 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3923{
3924 int igu_seg_id;
3925
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003926 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003927 struct hc_status_block_data_e1x sb_data_e1x;
3928 struct hc_status_block_sm *hc_sm_p;
3929 struct hc_index_data *hc_index_p;
3930 int data_size;
3931 u32 *sb_data_p;
3932
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003933 if (CHIP_INT_MODE_IS_BC(bp))
3934 igu_seg_id = HC_SEG_ACCESS_NORM;
3935 else
3936 igu_seg_id = IGU_SEG_ACCESS_NORM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003937
3938 bnx2x_zero_fp_sb(bp, fw_sb_id);
3939
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003940 if (CHIP_IS_E2(bp)) {
3941 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3942 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3943 sb_data_e2.common.p_func.vf_id = vfid;
3944 sb_data_e2.common.p_func.vf_valid = vf_valid;
3945 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3946 sb_data_e2.common.same_igu_sb_1b = true;
3947 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3948 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3949 hc_sm_p = sb_data_e2.common.state_machine;
3950 hc_index_p = sb_data_e2.index_data;
3951 sb_data_p = (u32 *)&sb_data_e2;
3952 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3953 } else {
3954 memset(&sb_data_e1x, 0,
3955 sizeof(struct hc_status_block_data_e1x));
3956 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3957 sb_data_e1x.common.p_func.vf_id = 0xff;
3958 sb_data_e1x.common.p_func.vf_valid = false;
3959 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3960 sb_data_e1x.common.same_igu_sb_1b = true;
3961 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3962 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3963 hc_sm_p = sb_data_e1x.common.state_machine;
3964 hc_index_p = sb_data_e1x.index_data;
3965 sb_data_p = (u32 *)&sb_data_e1x;
3966 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3967 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003968
3969 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3970 igu_sb_id, igu_seg_id);
3971 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3972 igu_sb_id, igu_seg_id);
3973
3974 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3975
3976 /* write indecies to HW */
3977 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3978}
3979
3980static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3981 u8 sb_index, u8 disable, u16 usec)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003982{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003983 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003984 u8 ticks = usec / BNX2X_BTR;
3985
3986 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3987
3988 disable = disable ? 1 : (usec ? 0 : 1);
3989 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
3990}
3991
3992static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
3993 u16 tx_usec, u16 rx_usec)
3994{
3995 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
3996 false, rx_usec);
3997 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
3998 false, tx_usec);
3999}
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004000
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004001static void bnx2x_init_def_sb(struct bnx2x *bp)
4002{
4003 struct host_sp_status_block *def_sb = bp->def_status_blk;
4004 dma_addr_t mapping = bp->def_status_blk_mapping;
4005 int igu_sp_sb_index;
4006 int igu_seg_id;
4007 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004008 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004009 int reg_offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004010 u64 section;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004011 int index;
4012 struct hc_sp_status_block_data sp_sb_data;
4013 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4014
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004015 if (CHIP_INT_MODE_IS_BC(bp)) {
4016 igu_sp_sb_index = DEF_SB_IGU_ID;
4017 igu_seg_id = HC_SEG_ACCESS_DEF;
4018 } else {
4019 igu_sp_sb_index = bp->igu_dsb_id;
4020 igu_seg_id = IGU_SEG_ACCESS_DEF;
4021 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004022
4023 /* ATTN */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004024 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004025 atten_status_block);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004026 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004027
Eliezer Tamir49d66772008-02-28 11:53:13 -08004028 bp->attn_state = 0;
4029
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004030 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4031 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004032 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004033 int sindex;
4034 /* take care of sig[0]..sig[4] */
4035 for (sindex = 0; sindex < 4; sindex++)
4036 bp->attn_group[index].sig[sindex] =
4037 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004038
4039 if (CHIP_IS_E2(bp))
4040 /*
4041 * enable5 is separate from the rest of the registers,
4042 * and therefore the address skip is 4
4043 * and not 16 between the different groups
4044 */
4045 bp->attn_group[index].sig[4] = REG_RD(bp,
4046 reg_offset + 0x10 + 0x4*index);
4047 else
4048 bp->attn_group[index].sig[4] = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004049 }
4050
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004051 if (bp->common.int_block == INT_BLOCK_HC) {
4052 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4053 HC_REG_ATTN_MSG0_ADDR_L);
4054
4055 REG_WR(bp, reg_offset, U64_LO(section));
4056 REG_WR(bp, reg_offset + 4, U64_HI(section));
4057 } else if (CHIP_IS_E2(bp)) {
4058 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4059 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4060 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004061
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004062 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4063 sp_sb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004064
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004065 bnx2x_zero_sp_sb(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004066
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004067 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4068 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4069 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4070 sp_sb_data.igu_seg_id = igu_seg_id;
4071 sp_sb_data.p_func.pf_id = func;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004072 sp_sb_data.p_func.vnic_id = BP_VN(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004073 sp_sb_data.p_func.vf_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004074
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004075 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004076
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004077 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004078 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004079
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004080 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004081}
4082
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004083void bnx2x_update_coalesce(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004084{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004085 int i;
4086
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004087 for_each_queue(bp, i)
4088 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4089 bp->rx_ticks, bp->tx_ticks);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004090}
4091
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004092static void bnx2x_init_sp_ring(struct bnx2x *bp)
4093{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004094 spin_lock_init(&bp->spq_lock);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00004095 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004096
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004097 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004098 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4099 bp->spq_prod_bd = bp->spq;
4100 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004101}
4102
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004103static void bnx2x_init_eq_ring(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004104{
4105 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004106 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4107 union event_ring_elem *elem =
4108 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004109
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004110 elem->next_page.addr.hi =
4111 cpu_to_le32(U64_HI(bp->eq_mapping +
4112 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4113 elem->next_page.addr.lo =
4114 cpu_to_le32(U64_LO(bp->eq_mapping +
4115 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004116 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004117 bp->eq_cons = 0;
4118 bp->eq_prod = NUM_EQ_DESC;
4119 bp->eq_cons_sb = BNX2X_EQ_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004120}
4121
4122static void bnx2x_init_ind_table(struct bnx2x *bp)
4123{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004124 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004125 int i;
4126
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004127 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004128 return;
4129
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004130 DP(NETIF_MSG_IFUP,
4131 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004132 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004133 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004134 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004135 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004136}
4137
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004138void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004139{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004140 int mode = bp->rx_mode;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004141 u16 cl_id;
4142
Eilon Greenstein581ce432009-07-29 00:20:04 +00004143 /* All but management unicast packets should pass to the host as well */
4144 u32 llh_mask =
4145 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4146 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4147 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4148 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004149
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004150 switch (mode) {
4151 case BNX2X_RX_MODE_NONE: /* no Rx */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004152 cl_id = BP_L_ID(bp);
4153 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004154 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004155
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004156 case BNX2X_RX_MODE_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004157 cl_id = BP_L_ID(bp);
4158 bnx2x_rxq_set_mac_filters(bp, cl_id,
4159 BNX2X_ACCEPT_UNICAST |
4160 BNX2X_ACCEPT_BROADCAST |
4161 BNX2X_ACCEPT_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004162 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004163
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004164 case BNX2X_RX_MODE_ALLMULTI:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004165 cl_id = BP_L_ID(bp);
4166 bnx2x_rxq_set_mac_filters(bp, cl_id,
4167 BNX2X_ACCEPT_UNICAST |
4168 BNX2X_ACCEPT_BROADCAST |
4169 BNX2X_ACCEPT_ALL_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004170 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004171
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004172 case BNX2X_RX_MODE_PROMISC:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004173 cl_id = BP_L_ID(bp);
4174 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4175
Eilon Greenstein581ce432009-07-29 00:20:04 +00004176 /* pass management unicast packets as well */
4177 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004178 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004179
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004180 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004181 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4182 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004183 }
4184
Eilon Greenstein581ce432009-07-29 00:20:04 +00004185 REG_WR(bp,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004186 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4187 NIG_REG_LLH0_BRB1_DRV_MASK,
Eilon Greenstein581ce432009-07-29 00:20:04 +00004188 llh_mask);
4189
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004190 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4191 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4192 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4193 bp->mac_filters.ucast_drop_all,
4194 bp->mac_filters.mcast_drop_all,
4195 bp->mac_filters.bcast_drop_all,
4196 bp->mac_filters.ucast_accept_all,
4197 bp->mac_filters.mcast_accept_all,
4198 bp->mac_filters.bcast_accept_all
4199 );
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004200
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004201 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004202}
4203
Eilon Greenstein471de712008-08-13 15:49:35 -07004204static void bnx2x_init_internal_common(struct bnx2x *bp)
4205{
4206 int i;
4207
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004208 if (!CHIP_IS_E1(bp)) {
4209
4210 /* xstorm needs to know whether to add ovlan to packets or not,
4211 * in switch-independent we'll write 0 to here... */
4212 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004213 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004214 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004215 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004216 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004217 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004218 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004219 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004220 }
4221
Eilon Greenstein471de712008-08-13 15:49:35 -07004222 /* Zero this manually as its initialization is
4223 currently missing in the initTool */
4224 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4225 REG_WR(bp, BAR_USTRORM_INTMEM +
4226 USTORM_AGG_DATA_OFFSET + i * 4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004227 if (CHIP_IS_E2(bp)) {
4228 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4229 CHIP_INT_MODE_IS_BC(bp) ?
4230 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4231 }
Eilon Greenstein471de712008-08-13 15:49:35 -07004232}
4233
4234static void bnx2x_init_internal_port(struct bnx2x *bp)
4235{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004236 /* port */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004237}
4238
Eilon Greenstein471de712008-08-13 15:49:35 -07004239static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4240{
4241 switch (load_code) {
4242 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004243 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Eilon Greenstein471de712008-08-13 15:49:35 -07004244 bnx2x_init_internal_common(bp);
4245 /* no break */
4246
4247 case FW_MSG_CODE_DRV_LOAD_PORT:
4248 bnx2x_init_internal_port(bp);
4249 /* no break */
4250
4251 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004252 /* internal memory per function is
4253 initialized inside bnx2x_pf_init */
Eilon Greenstein471de712008-08-13 15:49:35 -07004254 break;
4255
4256 default:
4257 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4258 break;
4259 }
4260}
4261
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004262static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4263{
4264 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4265
4266 fp->state = BNX2X_FP_STATE_CLOSED;
4267
4268 fp->index = fp->cid = fp_idx;
4269 fp->cl_id = BP_L_ID(bp) + fp_idx;
4270 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4271 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4272 /* qZone id equals to FW (per path) client id */
4273 fp->cl_qzone_id = fp->cl_id +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004274 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4275 ETH_MAX_RX_CLIENTS_E1H);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004276 /* init shortcut */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004277 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4278 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004279 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4280 /* Setup SB indicies */
4281 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4282 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4283
4284 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4285 "cl_id %d fw_sb %d igu_sb %d\n",
4286 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4287 fp->igu_sb_id);
4288 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4289 fp->fw_sb_id, fp->igu_sb_id);
4290
4291 bnx2x_update_fpsb_idx(fp);
4292}
4293
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004294void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004295{
4296 int i;
4297
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004298 for_each_queue(bp, i)
4299 bnx2x_init_fp_sb(bp, i);
Michael Chan37b091b2009-10-10 13:46:55 +00004300#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004301
4302 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4303 BNX2X_VF_ID_INVALID, false,
4304 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4305
Michael Chan37b091b2009-10-10 13:46:55 +00004306#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004307
Eilon Greenstein16119782009-03-02 07:59:27 +00004308 /* ensure status block indices were read */
4309 rmb();
4310
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004311 bnx2x_init_def_sb(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07004312 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004313 bnx2x_init_rx_rings(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004314 bnx2x_init_tx_rings(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004315 bnx2x_init_sp_ring(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004316 bnx2x_init_eq_ring(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07004317 bnx2x_init_internal(bp, load_code);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004318 bnx2x_pf_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004319 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08004320 bnx2x_stats_init(bp);
4321
4322 /* At this point, we are ready for interrupts */
4323 atomic_set(&bp->intr_sem, 0);
4324
4325 /* flush all before enabling interrupts */
4326 mb();
4327 mmiowb();
4328
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08004329 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00004330
4331 /* Check for SPIO5 */
4332 bnx2x_attn_int_deasserted0(bp,
4333 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4334 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004335}
4336
4337/* end of nic init */
4338
4339/*
4340 * gzip service functions
4341 */
4342
4343static int bnx2x_gunzip_init(struct bnx2x *bp)
4344{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004345 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4346 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004347 if (bp->gunzip_buf == NULL)
4348 goto gunzip_nomem1;
4349
4350 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4351 if (bp->strm == NULL)
4352 goto gunzip_nomem2;
4353
4354 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4355 GFP_KERNEL);
4356 if (bp->strm->workspace == NULL)
4357 goto gunzip_nomem3;
4358
4359 return 0;
4360
4361gunzip_nomem3:
4362 kfree(bp->strm);
4363 bp->strm = NULL;
4364
4365gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004366 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4367 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004368 bp->gunzip_buf = NULL;
4369
4370gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004371 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4372 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004373 return -ENOMEM;
4374}
4375
4376static void bnx2x_gunzip_end(struct bnx2x *bp)
4377{
4378 kfree(bp->strm->workspace);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004379 kfree(bp->strm);
4380 bp->strm = NULL;
4381
4382 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004383 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4384 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004385 bp->gunzip_buf = NULL;
4386 }
4387}
4388
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004389static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004390{
4391 int n, rc;
4392
4393 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004394 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4395 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004396 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004397 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004398
4399 n = 10;
4400
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004401#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004402
4403 if (zbuf[3] & FNAME)
4404 while ((zbuf[n++] != 0) && (n < len));
4405
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004406 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004407 bp->strm->avail_in = len - n;
4408 bp->strm->next_out = bp->gunzip_buf;
4409 bp->strm->avail_out = FW_BUF_SIZE;
4410
4411 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4412 if (rc != Z_OK)
4413 return rc;
4414
4415 rc = zlib_inflate(bp->strm, Z_FINISH);
4416 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00004417 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4418 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004419
4420 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4421 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004422 netdev_err(bp->dev, "Firmware decompression error:"
4423 " gunzip_outlen (%d) not aligned\n",
4424 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004425 bp->gunzip_outlen >>= 2;
4426
4427 zlib_inflateEnd(bp->strm);
4428
4429 if (rc == Z_STREAM_END)
4430 return 0;
4431
4432 return rc;
4433}
4434
4435/* nic load/unload */
4436
4437/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004438 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004439 */
4440
4441/* send a NIG loopback debug packet */
4442static void bnx2x_lb_pckt(struct bnx2x *bp)
4443{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004444 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004445
4446 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004447 wb_write[0] = 0x55555555;
4448 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004449 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004450 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004451
4452 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004453 wb_write[0] = 0x09000000;
4454 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004455 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004456 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004457}
4458
4459/* some of the internal memories
4460 * are not directly readable from the driver
4461 * to test them we send debug packets
4462 */
4463static int bnx2x_int_mem_test(struct bnx2x *bp)
4464{
4465 int factor;
4466 int count, i;
4467 u32 val = 0;
4468
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004469 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004470 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004471 else if (CHIP_REV_IS_EMUL(bp))
4472 factor = 200;
4473 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004474 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004475
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004476 /* Disable inputs of parser neighbor blocks */
4477 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4478 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4479 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004480 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004481
4482 /* Write 0 to parser credits for CFC search request */
4483 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4484
4485 /* send Ethernet packet */
4486 bnx2x_lb_pckt(bp);
4487
4488 /* TODO do i reset NIG statistic? */
4489 /* Wait until NIG register shows 1 packet of size 0x10 */
4490 count = 1000 * factor;
4491 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004492
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004493 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4494 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004495 if (val == 0x10)
4496 break;
4497
4498 msleep(10);
4499 count--;
4500 }
4501 if (val != 0x10) {
4502 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4503 return -1;
4504 }
4505
4506 /* Wait until PRS register shows 1 packet */
4507 count = 1000 * factor;
4508 while (count) {
4509 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004510 if (val == 1)
4511 break;
4512
4513 msleep(10);
4514 count--;
4515 }
4516 if (val != 0x1) {
4517 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4518 return -2;
4519 }
4520
4521 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004522 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004523 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004524 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004525 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004526 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4527 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004528
4529 DP(NETIF_MSG_HW, "part2\n");
4530
4531 /* Disable inputs of parser neighbor blocks */
4532 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4533 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4534 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004535 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004536
4537 /* Write 0 to parser credits for CFC search request */
4538 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4539
4540 /* send 10 Ethernet packets */
4541 for (i = 0; i < 10; i++)
4542 bnx2x_lb_pckt(bp);
4543
4544 /* Wait until NIG register shows 10 + 1
4545 packets of size 11*0x10 = 0xb0 */
4546 count = 1000 * factor;
4547 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004548
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004549 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4550 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004551 if (val == 0xb0)
4552 break;
4553
4554 msleep(10);
4555 count--;
4556 }
4557 if (val != 0xb0) {
4558 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4559 return -3;
4560 }
4561
4562 /* Wait until PRS register shows 2 packets */
4563 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4564 if (val != 2)
4565 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4566
4567 /* Write 1 to parser credits for CFC search request */
4568 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4569
4570 /* Wait until PRS register shows 3 packets */
4571 msleep(10 * factor);
4572 /* Wait until NIG register shows 1 packet of size 0x10 */
4573 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4574 if (val != 3)
4575 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4576
4577 /* clear NIG EOP FIFO */
4578 for (i = 0; i < 11; i++)
4579 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4580 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4581 if (val != 1) {
4582 BNX2X_ERR("clear of NIG failed\n");
4583 return -4;
4584 }
4585
4586 /* Reset and init BRB, PRS, NIG */
4587 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4588 msleep(50);
4589 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4590 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004591 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4592 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004593#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004594 /* set NIC mode */
4595 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4596#endif
4597
4598 /* Enable inputs of parser neighbor blocks */
4599 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4600 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4601 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004602 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004603
4604 DP(NETIF_MSG_HW, "done\n");
4605
4606 return 0; /* OK */
4607}
4608
4609static void enable_blocks_attention(struct bnx2x *bp)
4610{
4611 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004612 if (CHIP_IS_E2(bp))
4613 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4614 else
4615 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004616 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4617 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004618 /*
4619 * mask read length error interrupts in brb for parser
4620 * (parsing unit and 'checksum and crc' unit)
4621 * these errors are legal (PU reads fixed length and CAC can cause
4622 * read length error on truncated packets)
4623 */
4624 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004625 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4626 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4627 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4628 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4629 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004630/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4631/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004632 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4633 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4634 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004635/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4636/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004637 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4638 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4639 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4640 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004641/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4642/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004643
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004644 if (CHIP_REV_IS_FPGA(bp))
4645 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004646 else if (CHIP_IS_E2(bp))
4647 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4648 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4649 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4650 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4651 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4652 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004653 else
4654 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004655 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4656 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4657 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004658/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4659/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004660 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4661 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004662/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4663 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004664}
4665
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004666static const struct {
4667 u32 addr;
4668 u32 mask;
4669} bnx2x_parity_mask[] = {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004670 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4671 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4672 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4673 {HC_REG_HC_PRTY_MASK, 0x7},
4674 {MISC_REG_MISC_PRTY_MASK, 0x1},
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004675 {QM_REG_QM_PRTY_MASK, 0x0},
4676 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004677 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4678 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004679 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4680 {CDU_REG_CDU_PRTY_MASK, 0x0},
4681 {CFC_REG_CFC_PRTY_MASK, 0x0},
4682 {DBG_REG_DBG_PRTY_MASK, 0x0},
4683 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4684 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4685 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4686 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4687 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4688 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4689 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4690 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4691 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4692 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4693 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4694 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4695 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4696 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4697 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004698};
4699
4700static void enable_blocks_parity(struct bnx2x *bp)
4701{
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004702 int i;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004703
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004704 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004705 REG_WR(bp, bnx2x_parity_mask[i].addr,
4706 bnx2x_parity_mask[i].mask);
4707}
4708
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004709
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004710static void bnx2x_reset_common(struct bnx2x *bp)
4711{
4712 /* reset_common */
4713 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4714 0xd3ffff7f);
4715 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4716}
4717
Eilon Greenstein573f2032009-08-12 08:24:14 +00004718static void bnx2x_init_pxp(struct bnx2x *bp)
4719{
4720 u16 devctl;
4721 int r_order, w_order;
4722
4723 pci_read_config_word(bp->pdev,
4724 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4725 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4726 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4727 if (bp->mrrs == -1)
4728 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4729 else {
4730 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4731 r_order = bp->mrrs;
4732 }
4733
4734 bnx2x_init_pxp_arb(bp, r_order, w_order);
4735}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004736
4737static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4738{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004739 int is_required;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004740 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004741 int port;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004742
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004743 if (BP_NOMCP(bp))
4744 return;
4745
4746 is_required = 0;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004747 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4748 SHARED_HW_CFG_FAN_FAILURE_MASK;
4749
4750 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4751 is_required = 1;
4752
4753 /*
4754 * The fan failure mechanism is usually related to the PHY type since
4755 * the power consumption of the board is affected by the PHY. Currently,
4756 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4757 */
4758 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4759 for (port = PORT_0; port < PORT_MAX; port++) {
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004760 is_required |=
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004761 bnx2x_fan_failure_det_req(
4762 bp,
4763 bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004764 bp->common.shmem2_base,
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004765 port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004766 }
4767
4768 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4769
4770 if (is_required == 0)
4771 return;
4772
4773 /* Fan failure is indicated by SPIO 5 */
4774 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4775 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4776
4777 /* set to active low mode */
4778 val = REG_RD(bp, MISC_REG_SPIO_INT);
4779 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004780 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004781 REG_WR(bp, MISC_REG_SPIO_INT, val);
4782
4783 /* enable interrupt to signal the IGU */
4784 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4785 val |= (1 << MISC_REGISTERS_SPIO_5);
4786 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4787}
4788
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004789static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4790{
4791 u32 offset = 0;
4792
4793 if (CHIP_IS_E1(bp))
4794 return;
4795 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4796 return;
4797
4798 switch (BP_ABS_FUNC(bp)) {
4799 case 0:
4800 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4801 break;
4802 case 1:
4803 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4804 break;
4805 case 2:
4806 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4807 break;
4808 case 3:
4809 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4810 break;
4811 case 4:
4812 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4813 break;
4814 case 5:
4815 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4816 break;
4817 case 6:
4818 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4819 break;
4820 case 7:
4821 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4822 break;
4823 default:
4824 return;
4825 }
4826
4827 REG_WR(bp, offset, pretend_func_num);
4828 REG_RD(bp, offset);
4829 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4830}
4831
4832static void bnx2x_pf_disable(struct bnx2x *bp)
4833{
4834 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4835 val &= ~IGU_PF_CONF_FUNC_EN;
4836
4837 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4838 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4839 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4840}
4841
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004842static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004843{
4844 u32 val, i;
4845
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004846 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004847
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004848 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004849 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4850 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4851
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004852 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004853 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004854 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004855
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004856 if (CHIP_IS_E2(bp)) {
4857 u8 fid;
4858
4859 /**
4860 * 4-port mode or 2-port mode we need to turn of master-enable
4861 * for everyone, after that, turn it back on for self.
4862 * so, we disregard multi-function or not, and always disable
4863 * for all functions on the given path, this means 0,2,4,6 for
4864 * path 0 and 1,3,5,7 for path 1
4865 */
4866 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4867 if (fid == BP_ABS_FUNC(bp)) {
4868 REG_WR(bp,
4869 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4870 1);
4871 continue;
4872 }
4873
4874 bnx2x_pretend_func(bp, fid);
4875 /* clear pf enable */
4876 bnx2x_pf_disable(bp);
4877 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4878 }
4879 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004880
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004881 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004882 if (CHIP_IS_E1(bp)) {
4883 /* enable HW interrupt from PXP on USDM overflow
4884 bit 16 on INT_MASK_0 */
4885 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004886 }
4887
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004888 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004889 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004890
4891#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004892 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4893 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4894 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4895 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4896 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00004897 /* make sure this value is 0 */
4898 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004899
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004900/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4901 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4902 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4903 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4904 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004905#endif
4906
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004907 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4908
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004909 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4910 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004911
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004912 /* let the HW do it's magic ... */
4913 msleep(100);
4914 /* finish PXP init */
4915 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4916 if (val != 1) {
4917 BNX2X_ERR("PXP2 CFG failed\n");
4918 return -EBUSY;
4919 }
4920 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4921 if (val != 1) {
4922 BNX2X_ERR("PXP2 RD_INIT failed\n");
4923 return -EBUSY;
4924 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004925
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004926 /* Timers bug workaround E2 only. We need to set the entire ILT to
4927 * have entries with value "0" and valid bit on.
4928 * This needs to be done by the first PF that is loaded in a path
4929 * (i.e. common phase)
4930 */
4931 if (CHIP_IS_E2(bp)) {
4932 struct ilt_client_info ilt_cli;
4933 struct bnx2x_ilt ilt;
4934 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4935 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4936
4937 /* initalize dummy TM client */
4938 ilt_cli.start = 0;
4939 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4940 ilt_cli.client_num = ILT_CLIENT_TM;
4941
4942 /* Step 1: set zeroes to all ilt page entries with valid bit on
4943 * Step 2: set the timers first/last ilt entry to point
4944 * to the entire range to prevent ILT range error for 3rd/4th
4945 * vnic (this code assumes existance of the vnic)
4946 *
4947 * both steps performed by call to bnx2x_ilt_client_init_op()
4948 * with dummy TM client
4949 *
4950 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4951 * and his brother are split registers
4952 */
4953 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4954 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4955 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4956
4957 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4958 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4959 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4960 }
4961
4962
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004963 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4964 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004965
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004966 if (CHIP_IS_E2(bp)) {
4967 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4968 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4969 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4970
4971 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4972
4973 /* let the HW do it's magic ... */
4974 do {
4975 msleep(200);
4976 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4977 } while (factor-- && (val != 1));
4978
4979 if (val != 1) {
4980 BNX2X_ERR("ATC_INIT failed\n");
4981 return -EBUSY;
4982 }
4983 }
4984
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004985 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004986
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004987 /* clean the DMAE memory */
4988 bp->dmae_ready = 1;
4989 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004990
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004991 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4992 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4993 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4994 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004995
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004996 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4997 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4998 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
4999 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5000
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005001 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005002
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005003 if (CHIP_MODE_IS_4_PORT(bp))
5004 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005005
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005006 /* QM queues pointers table */
5007 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00005008
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005009 /* soft reset pulse */
5010 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5011 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005012
Michael Chan37b091b2009-10-10 13:46:55 +00005013#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005014 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005015#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005016
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005017 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005018 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5019
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005020 if (!CHIP_REV_IS_SLOW(bp)) {
5021 /* enable hw interrupt from doorbell Q */
5022 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5023 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005024
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005025 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005026 if (CHIP_MODE_IS_4_PORT(bp)) {
5027 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5028 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5029 }
5030
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005031 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005032 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00005033#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07005034 /* set NIC mode */
5035 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00005036#endif
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005037 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005038 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005039
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005040 if (CHIP_IS_E2(bp)) {
5041 /* Bit-map indicating which L2 hdrs may appear after the
5042 basic Ethernet header */
5043 int has_ovlan = IS_MF(bp);
5044 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5045 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5046 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005047
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005048 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5049 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5050 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5051 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005052
Eilon Greensteinca003922009-08-12 22:53:28 -07005053 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5054 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5055 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5056 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005057
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005058 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5059 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5060 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5061 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005062
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005063 if (CHIP_MODE_IS_4_PORT(bp))
5064 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5065
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005066 /* sync semi rtc */
5067 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5068 0x80000000);
5069 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5070 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005071
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005072 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5073 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5074 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005075
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005076 if (CHIP_IS_E2(bp)) {
5077 int has_ovlan = IS_MF(bp);
5078 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5079 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5080 }
5081
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005082 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07005083 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5084 REG_WR(bp, i, random32());
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005085
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005086 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005087#ifdef BCM_CNIC
5088 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5089 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5090 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5091 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5092 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5093 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5094 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5095 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5096 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5097 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5098#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005099 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005100
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005101 if (sizeof(union cdu_context) != 1024)
5102 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005103 dev_alert(&bp->pdev->dev, "please adjust the size "
5104 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00005105 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005106
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005107 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005108 val = (4 << 24) + (0 << 12) + 1024;
5109 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005110
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005111 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005112 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005113 /* enable context validation interrupt from CFC */
5114 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5115
5116 /* set the thresholds to prevent CFC/CDU race */
5117 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005118
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005119 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005120
5121 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5122 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5123
5124 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005125 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005126
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005127 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005128 /* Reset PCIE errors for debug */
5129 REG_WR(bp, 0x2814, 0xffffffff);
5130 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005131
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005132 if (CHIP_IS_E2(bp)) {
5133 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5134 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5135 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5136 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5137 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5138 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5139 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5140 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5141 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5142 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5143 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5144 }
5145
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005146 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005147 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005148 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005149 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005150
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005151 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005152 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005153 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5154 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005155 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005156 if (CHIP_IS_E2(bp)) {
5157 /* Bit-map indicating which L2 hdrs may appear after the
5158 basic Ethernet header */
5159 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5160 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005161
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005162 if (CHIP_REV_IS_SLOW(bp))
5163 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005164
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005165 /* finish CFC init */
5166 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5167 if (val != 1) {
5168 BNX2X_ERR("CFC LL_INIT failed\n");
5169 return -EBUSY;
5170 }
5171 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5172 if (val != 1) {
5173 BNX2X_ERR("CFC AC_INIT failed\n");
5174 return -EBUSY;
5175 }
5176 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5177 if (val != 1) {
5178 BNX2X_ERR("CFC CAM_INIT failed\n");
5179 return -EBUSY;
5180 }
5181 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005182
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005183 if (CHIP_IS_E1(bp)) {
5184 /* read NIG statistic
5185 to see if this is our first up since powerup */
5186 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5187 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005188
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005189 /* do internal memory self test */
5190 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5191 BNX2X_ERR("internal mem self test failed\n");
5192 return -EBUSY;
5193 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005194 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005195
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005196 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005197 bp->common.shmem_base,
5198 bp->common.shmem2_base);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005199
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005200 bnx2x_setup_fan_failure_detection(bp);
5201
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005202 /* clear PXP2 attentions */
5203 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005204
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005205 enable_blocks_attention(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00005206 if (CHIP_PARITY_SUPPORTED(bp))
5207 enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005208
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005209 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005210 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5211 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5212 CHIP_IS_E1x(bp)) {
5213 u32 shmem_base[2], shmem2_base[2];
5214 shmem_base[0] = bp->common.shmem_base;
5215 shmem2_base[0] = bp->common.shmem2_base;
5216 if (CHIP_IS_E2(bp)) {
5217 shmem_base[1] =
5218 SHMEM2_RD(bp, other_shmem_base_addr);
5219 shmem2_base[1] =
5220 SHMEM2_RD(bp, other_shmem2_base_addr);
5221 }
5222 bnx2x_acquire_phy_lock(bp);
5223 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5224 bp->common.chip_id);
5225 bnx2x_release_phy_lock(bp);
5226 }
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005227 } else
5228 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5229
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005230 return 0;
5231}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005232
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005233static int bnx2x_init_hw_port(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005234{
5235 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005236 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00005237 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005238 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005239
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005240 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005241
5242 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005243
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005244 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005245 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005246
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005247 /* Timers bug workaround: disables the pf_master bit in pglue at
5248 * common phase, we need to enable it here before any dmae access are
5249 * attempted. Therefore we manually added the enable-master to the
5250 * port phase (it also happens in the function phase)
5251 */
5252 if (CHIP_IS_E2(bp))
5253 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5254
Eilon Greensteinca003922009-08-12 22:53:28 -07005255 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5256 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5257 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005258 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005259
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005260 /* QM cid (connection) count */
5261 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005262
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005263#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005264 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00005265 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5266 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005267#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005268
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005269 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005270
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005271 if (CHIP_MODE_IS_4_PORT(bp))
5272 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005273
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005274 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5275 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5276 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5277 /* no pause for emulation and FPGA */
5278 low = 0;
5279 high = 513;
5280 } else {
5281 if (IS_MF(bp))
5282 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5283 else if (bp->dev->mtu > 4096) {
5284 if (bp->flags & ONE_PORT_FLAG)
5285 low = 160;
5286 else {
5287 val = bp->dev->mtu;
5288 /* (24*1024 + val*4)/256 */
5289 low = 96 + (val/64) +
5290 ((val % 64) ? 1 : 0);
5291 }
5292 } else
5293 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5294 high = low + 56; /* 14*1024/256 */
5295 }
5296 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5297 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5298 }
5299
5300 if (CHIP_MODE_IS_4_PORT(bp)) {
5301 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5302 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5303 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5304 BRB1_REG_MAC_GUARANTIED_0), 40);
5305 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005306
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005307 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005308
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005309 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005310 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005311 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005312 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005313
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005314 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5315 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5316 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5317 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005318 if (CHIP_MODE_IS_4_PORT(bp))
5319 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005320
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005321 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005322 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005323
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005324 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005325
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005326 if (!CHIP_IS_E2(bp)) {
5327 /* configure PBF to work without PAUSE mtu 9000 */
5328 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005329
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005330 /* update threshold */
5331 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5332 /* update init credit */
5333 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005334
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005335 /* probe changes */
5336 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5337 udelay(50);
5338 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5339 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005340
Michael Chan37b091b2009-10-10 13:46:55 +00005341#ifdef BCM_CNIC
5342 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005343#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005344 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005345 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005346
5347 if (CHIP_IS_E1(bp)) {
5348 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5349 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5350 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005351 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005352
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005353 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5354
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005355 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005356 /* init aeu_mask_attn_func_0/1:
5357 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5358 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5359 * bits 4-7 are used for "per vn group attention" */
5360 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005361 (IS_MF(bp) ? 0xF7 : 0x7));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005362
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005363 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005364 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005365 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005366 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005367 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005368
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005369 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005370
5371 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5372
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005373 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005374 /* 0x2 disable mf_ov, 0x1 enable */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005375 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005376 (IS_MF(bp) ? 0x1 : 0x2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005377
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005378 if (CHIP_IS_E2(bp)) {
5379 val = 0;
5380 switch (bp->mf_mode) {
5381 case MULTI_FUNCTION_SD:
5382 val = 1;
5383 break;
5384 case MULTI_FUNCTION_SI:
5385 val = 2;
5386 break;
5387 }
5388
5389 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5390 NIG_REG_LLH0_CLS_TYPE), val);
5391 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005392 {
5393 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5394 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5395 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5396 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005397 }
5398
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005399 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005400 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005401 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005402 bp->common.shmem_base,
5403 bp->common.shmem2_base);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005404 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005405 bp->common.shmem2_base, port)) {
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005406 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5407 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5408 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005409 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005410 REG_WR(bp, reg_addr, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005411 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07005412 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005413
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005414 return 0;
5415}
5416
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005417static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5418{
5419 int reg;
5420
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005421 if (CHIP_IS_E1(bp))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005422 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005423 else
5424 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005425
5426 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5427}
5428
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005429static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5430{
5431 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5432}
5433
5434static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5435{
5436 u32 i, base = FUNC_ILT_BASE(func);
5437 for (i = base; i < base + ILT_PER_FUNC; i++)
5438 bnx2x_ilt_wr(bp, i, 0);
5439}
5440
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005441static int bnx2x_init_hw_func(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005442{
5443 int port = BP_PORT(bp);
5444 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005445 struct bnx2x_ilt *ilt = BP_ILT(bp);
5446 u16 cdu_ilt_start;
Eilon Greenstein8badd272009-02-12 08:36:15 +00005447 u32 addr, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005448 int i;
5449
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005450 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005451
Eilon Greenstein8badd272009-02-12 08:36:15 +00005452 /* set MSI reconfigure capability */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005453 if (bp->common.int_block == INT_BLOCK_HC) {
5454 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5455 val = REG_RD(bp, addr);
5456 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5457 REG_WR(bp, addr, val);
5458 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00005459
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005460 ilt = BP_ILT(bp);
5461 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005462
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005463 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5464 ilt->lines[cdu_ilt_start + i].page =
5465 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5466 ilt->lines[cdu_ilt_start + i].page_mapping =
5467 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5468 /* cdu ilt pages are allocated manually so there's no need to
5469 set the size */
5470 }
5471 bnx2x_ilt_init_op(bp, INITOP_SET);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005472
Michael Chan37b091b2009-10-10 13:46:55 +00005473#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005474 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
Michael Chan37b091b2009-10-10 13:46:55 +00005475
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005476 /* T1 hash bits value determines the T1 number of entries */
5477 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
Michael Chan37b091b2009-10-10 13:46:55 +00005478#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005479
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005480#ifndef BCM_CNIC
5481 /* set NIC mode */
5482 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5483#endif /* BCM_CNIC */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005484
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005485 if (CHIP_IS_E2(bp)) {
5486 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5487
5488 /* Turn on a single ISR mode in IGU if driver is going to use
5489 * INT#x or MSI
5490 */
5491 if (!(bp->flags & USING_MSIX_FLAG))
5492 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5493 /*
5494 * Timers workaround bug: function init part.
5495 * Need to wait 20msec after initializing ILT,
5496 * needed to make sure there are no requests in
5497 * one of the PXP internal queues with "old" ILT addresses
5498 */
5499 msleep(20);
5500 /*
5501 * Master enable - Due to WB DMAE writes performed before this
5502 * register is re-initialized as part of the regular function
5503 * init
5504 */
5505 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5506 /* Enable the function in IGU */
5507 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5508 }
5509
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005510 bp->dmae_ready = 1;
5511
5512 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5513
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005514 if (CHIP_IS_E2(bp))
5515 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5516
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005517 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5518 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5519 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5520 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5521 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5522 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5523 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5524 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5525 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5526
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005527 if (CHIP_IS_E2(bp)) {
5528 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5529 BP_PATH(bp));
5530 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5531 BP_PATH(bp));
5532 }
5533
5534 if (CHIP_MODE_IS_4_PORT(bp))
5535 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5536
5537 if (CHIP_IS_E2(bp))
5538 REG_WR(bp, QM_REG_PF_EN, 1);
5539
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005540 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005541
5542 if (CHIP_MODE_IS_4_PORT(bp))
5543 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5544
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005545 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5546 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5547 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5548 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5549 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5550 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5551 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5552 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5553 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5554 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5555 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005556 if (CHIP_IS_E2(bp))
5557 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5558
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005559 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5560
5561 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5562
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005563 if (CHIP_IS_E2(bp))
5564 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5565
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005566 if (IS_MF(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005567 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005568 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005569 }
5570
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005571 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5572
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005573 /* HC init per function */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005574 if (bp->common.int_block == INT_BLOCK_HC) {
5575 if (CHIP_IS_E1H(bp)) {
5576 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5577
5578 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5579 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5580 }
5581 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5582
5583 } else {
5584 int num_segs, sb_idx, prod_offset;
5585
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005586 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5587
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005588 if (CHIP_IS_E2(bp)) {
5589 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5590 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5591 }
5592
5593 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5594
5595 if (CHIP_IS_E2(bp)) {
5596 int dsb_idx = 0;
5597 /**
5598 * Producer memory:
5599 * E2 mode: address 0-135 match to the mapping memory;
5600 * 136 - PF0 default prod; 137 - PF1 default prod;
5601 * 138 - PF2 default prod; 139 - PF3 default prod;
5602 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5603 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5604 * 144-147 reserved.
5605 *
5606 * E1.5 mode - In backward compatible mode;
5607 * for non default SB; each even line in the memory
5608 * holds the U producer and each odd line hold
5609 * the C producer. The first 128 producers are for
5610 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5611 * producers are for the DSB for each PF.
5612 * Each PF has five segments: (the order inside each
5613 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5614 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5615 * 144-147 attn prods;
5616 */
5617 /* non-default-status-blocks */
5618 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5619 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5620 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5621 prod_offset = (bp->igu_base_sb + sb_idx) *
5622 num_segs;
5623
5624 for (i = 0; i < num_segs; i++) {
5625 addr = IGU_REG_PROD_CONS_MEMORY +
5626 (prod_offset + i) * 4;
5627 REG_WR(bp, addr, 0);
5628 }
5629 /* send consumer update with value 0 */
5630 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5631 USTORM_ID, 0, IGU_INT_NOP, 1);
5632 bnx2x_igu_clear_sb(bp,
5633 bp->igu_base_sb + sb_idx);
5634 }
5635
5636 /* default-status-blocks */
5637 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5638 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5639
5640 if (CHIP_MODE_IS_4_PORT(bp))
5641 dsb_idx = BP_FUNC(bp);
5642 else
5643 dsb_idx = BP_E1HVN(bp);
5644
5645 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5646 IGU_BC_BASE_DSB_PROD + dsb_idx :
5647 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5648
5649 for (i = 0; i < (num_segs * E1HVN_MAX);
5650 i += E1HVN_MAX) {
5651 addr = IGU_REG_PROD_CONS_MEMORY +
5652 (prod_offset + i)*4;
5653 REG_WR(bp, addr, 0);
5654 }
5655 /* send consumer update with 0 */
5656 if (CHIP_INT_MODE_IS_BC(bp)) {
5657 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5658 USTORM_ID, 0, IGU_INT_NOP, 1);
5659 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5660 CSTORM_ID, 0, IGU_INT_NOP, 1);
5661 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5662 XSTORM_ID, 0, IGU_INT_NOP, 1);
5663 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5664 TSTORM_ID, 0, IGU_INT_NOP, 1);
5665 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5666 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5667 } else {
5668 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5669 USTORM_ID, 0, IGU_INT_NOP, 1);
5670 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5671 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5672 }
5673 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5674
5675 /* !!! these should become driver const once
5676 rf-tool supports split-68 const */
5677 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5678 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5679 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5680 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5681 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5682 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5683 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005684 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005685
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005686 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005687 REG_WR(bp, 0x2114, 0xffffffff);
5688 REG_WR(bp, 0x2120, 0xffffffff);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005689
5690 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5691 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5692 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5693 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5694 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5695 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5696
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00005697 bnx2x_phy_probe(&bp->link_params);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005698
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005699 return 0;
5700}
5701
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005702int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005703{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005704 int rc = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005705
5706 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005707 BP_ABS_FUNC(bp), load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005708
5709 bp->dmae_ready = 0;
5710 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00005711 rc = bnx2x_gunzip_init(bp);
5712 if (rc)
5713 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005714
5715 switch (load_code) {
5716 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005717 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005718 rc = bnx2x_init_hw_common(bp, load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005719 if (rc)
5720 goto init_hw_err;
5721 /* no break */
5722
5723 case FW_MSG_CODE_DRV_LOAD_PORT:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005724 rc = bnx2x_init_hw_port(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005725 if (rc)
5726 goto init_hw_err;
5727 /* no break */
5728
5729 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005730 rc = bnx2x_init_hw_func(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005731 if (rc)
5732 goto init_hw_err;
5733 break;
5734
5735 default:
5736 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5737 break;
5738 }
5739
5740 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005741 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005742
5743 bp->fw_drv_pulse_wr_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005744 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005745 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00005746 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5747 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005748
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005749init_hw_err:
5750 bnx2x_gunzip_end(bp);
5751
5752 return rc;
5753}
5754
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005755void bnx2x_free_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005756{
5757
5758#define BNX2X_PCI_FREE(x, y, size) \
5759 do { \
5760 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005761 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005762 x = NULL; \
5763 y = 0; \
5764 } \
5765 } while (0)
5766
5767#define BNX2X_FREE(x) \
5768 do { \
5769 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005770 kfree((void *)x); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005771 x = NULL; \
5772 } \
5773 } while (0)
5774
5775 int i;
5776
5777 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005778 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005779 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005780 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005781 if (CHIP_IS_E2(bp))
5782 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5783 bnx2x_fp(bp, i, status_blk_mapping),
5784 sizeof(struct host_hc_status_block_e2));
5785 else
5786 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5787 bnx2x_fp(bp, i, status_blk_mapping),
5788 sizeof(struct host_hc_status_block_e1x));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005789 }
5790 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005791 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005792
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005793 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005794 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5795 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5796 bnx2x_fp(bp, i, rx_desc_mapping),
5797 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5798
5799 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5800 bnx2x_fp(bp, i, rx_comp_mapping),
5801 sizeof(struct eth_fast_path_rx_cqe) *
5802 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005803
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005804 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07005805 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005806 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5807 bnx2x_fp(bp, i, rx_sge_mapping),
5808 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5809 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005810 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005811 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005812
5813 /* fastpath tx rings: tx_buf tx_desc */
5814 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5815 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5816 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005817 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005818 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005819 /* end of fastpath */
5820
5821 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005822 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005823
5824 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005825 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005826
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005827 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5828 bp->context.size);
5829
5830 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5831
5832 BNX2X_FREE(bp->ilt->lines);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005833
Michael Chan37b091b2009-10-10 13:46:55 +00005834#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005835 if (CHIP_IS_E2(bp))
5836 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5837 sizeof(struct host_hc_status_block_e2));
5838 else
5839 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5840 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005841
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005842 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005843#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005844
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005845 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005846
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005847 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5848 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5849
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005850#undef BNX2X_PCI_FREE
5851#undef BNX2X_KFREE
5852}
5853
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005854static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5855{
5856 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5857 if (CHIP_IS_E2(bp)) {
5858 bnx2x_fp(bp, index, sb_index_values) =
5859 (__le16 *)status_blk.e2_sb->sb.index_values;
5860 bnx2x_fp(bp, index, sb_running_index) =
5861 (__le16 *)status_blk.e2_sb->sb.running_index;
5862 } else {
5863 bnx2x_fp(bp, index, sb_index_values) =
5864 (__le16 *)status_blk.e1x_sb->sb.index_values;
5865 bnx2x_fp(bp, index, sb_running_index) =
5866 (__le16 *)status_blk.e1x_sb->sb.running_index;
5867 }
5868}
5869
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005870int bnx2x_alloc_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005871{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005872#define BNX2X_PCI_ALLOC(x, y, size) \
5873 do { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00005874 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005875 if (x == NULL) \
5876 goto alloc_mem_err; \
5877 memset(x, 0, size); \
5878 } while (0)
5879
5880#define BNX2X_ALLOC(x, size) \
5881 do { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005882 x = kzalloc(size, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005883 if (x == NULL) \
5884 goto alloc_mem_err; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005885 } while (0)
5886
5887 int i;
5888
5889 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005890 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005891 for_each_queue(bp, i) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005892 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005893 bnx2x_fp(bp, i, bp) = bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005894 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005895 if (CHIP_IS_E2(bp))
5896 BNX2X_PCI_ALLOC(sb->e2_sb,
5897 &bnx2x_fp(bp, i, status_blk_mapping),
5898 sizeof(struct host_hc_status_block_e2));
5899 else
5900 BNX2X_PCI_ALLOC(sb->e1x_sb,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005901 &bnx2x_fp(bp, i, status_blk_mapping),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005902 sizeof(struct host_hc_status_block_e1x));
5903
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005904 set_sb_shortcuts(bp, i);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005905 }
5906 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005907 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005908
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005909 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005910 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5911 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5912 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5913 &bnx2x_fp(bp, i, rx_desc_mapping),
5914 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5915
5916 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5917 &bnx2x_fp(bp, i, rx_comp_mapping),
5918 sizeof(struct eth_fast_path_rx_cqe) *
5919 NUM_RCQ_BD);
5920
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005921 /* SGE ring */
5922 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5923 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5924 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5925 &bnx2x_fp(bp, i, rx_sge_mapping),
5926 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005927 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005928 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005929 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005930
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005931 /* fastpath tx rings: tx_buf tx_desc */
5932 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5933 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5934 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5935 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005936 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005937 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005938 /* end of fastpath */
5939
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005940#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005941 if (CHIP_IS_E2(bp))
5942 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5943 sizeof(struct host_hc_status_block_e2));
5944 else
5945 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5946 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005947
5948 /* allocate searcher T2 table */
5949 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5950#endif
5951
5952
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005953 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005954 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005955
5956 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5957 sizeof(struct bnx2x_slowpath));
5958
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005959 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005960
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005961 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
5962 bp->context.size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005963
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005964 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005965
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005966 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
5967 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005968
5969 /* Slow path ring */
5970 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
5971
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005972 /* EQ */
5973 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
5974 BCM_PAGE_SIZE * NUM_EQ_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005975 return 0;
5976
5977alloc_mem_err:
5978 bnx2x_free_mem(bp);
5979 return -ENOMEM;
5980
5981#undef BNX2X_PCI_ALLOC
5982#undef BNX2X_ALLOC
5983}
5984
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005985/*
5986 * Init service functions
5987 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005988int bnx2x_func_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005989{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005990 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005991
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005992 /* Wait for completion */
5993 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
5994 WAIT_RAMROD_COMMON);
5995}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005996
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005997int bnx2x_func_stop(struct bnx2x *bp)
5998{
5999 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006000
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006001 /* Wait for completion */
6002 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6003 0, &(bp->state), WAIT_RAMROD_COMMON);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006004}
6005
Michael Chane665bfd2009-10-10 13:46:54 +00006006/**
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006007 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
Michael Chane665bfd2009-10-10 13:46:54 +00006008 *
6009 * @param bp driver descriptor
6010 * @param set set or clear an entry (1 or 0)
6011 * @param mac pointer to a buffer containing a MAC
6012 * @param cl_bit_vec bit vector of clients to register a MAC for
6013 * @param cam_offset offset in a CAM to use
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006014 * @param is_bcast is the set MAC a broadcast address (for E1 only)
Michael Chane665bfd2009-10-10 13:46:54 +00006015 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006016static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006017 u32 cl_bit_vec, u8 cam_offset,
6018 u8 is_bcast)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006019{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006020 struct mac_configuration_cmd *config =
6021 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6022 int ramrod_flags = WAIT_RAMROD_COMMON;
6023
6024 bp->set_mac_pending = 1;
6025 smp_wmb();
6026
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006027 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006028 config->hdr.offset = cam_offset;
6029 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006030 config->hdr.reserved1 = 0;
6031
6032 /* primary MAC */
6033 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006034 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006035 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006036 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006037 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006038 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07006039 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00006040 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006041 config->config_table[0].vlan_id = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006042 config->config_table[0].pf_id = BP_FUNC(bp);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006043 if (set)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006044 SET_FLAG(config->config_table[0].flags,
6045 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6046 T_ETH_MAC_COMMAND_SET);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006047 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006048 SET_FLAG(config->config_table[0].flags,
6049 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6050 T_ETH_MAC_COMMAND_INVALIDATE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006051
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006052 if (is_bcast)
6053 SET_FLAG(config->config_table[0].flags,
6054 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6055
6056 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006057 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006058 config->config_table[0].msb_mac_addr,
6059 config->config_table[0].middle_mac_addr,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006060 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006061
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006062 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006063 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006064 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6065
6066 /* Wait for a completion */
6067 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006068}
6069
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006070int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006071 int *state_p, int flags)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006072{
6073 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006074 int cnt = 5000;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006075 u8 poll = flags & WAIT_RAMROD_POLL;
6076 u8 common = flags & WAIT_RAMROD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006077
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006078 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6079 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006080
6081 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006082 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006083 if (poll) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006084 if (common)
6085 bnx2x_eq_int(bp);
6086 else {
6087 bnx2x_rx_int(bp->fp, 10);
6088 /* if index is different from 0
6089 * the reply for some commands will
6090 * be on the non default queue
6091 */
6092 if (idx)
6093 bnx2x_rx_int(&bp->fp[idx], 10);
6094 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006095 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006096
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006097 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006098 if (*state_p == state) {
6099#ifdef BNX2X_STOP_ON_ERROR
6100 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6101#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006102 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006103 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006104
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006105 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00006106
6107 if (bp->panic)
6108 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006109 }
6110
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006111 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08006112 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6113 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006114#ifdef BNX2X_STOP_ON_ERROR
6115 bnx2x_panic();
6116#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006117
Eliezer Tamir49d66772008-02-28 11:53:13 -08006118 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006119}
6120
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006121u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
Michael Chane665bfd2009-10-10 13:46:54 +00006122{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006123 if (CHIP_IS_E1H(bp))
6124 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6125 else if (CHIP_MODE_IS_4_PORT(bp))
6126 return BP_FUNC(bp) * 32 + rel_offset;
6127 else
6128 return BP_VN(bp) * 32 + rel_offset;
Michael Chane665bfd2009-10-10 13:46:54 +00006129}
6130
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006131void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
Michael Chane665bfd2009-10-10 13:46:54 +00006132{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006133 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6134 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6135
6136 /* networking MAC */
6137 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6138 (1 << bp->fp->cl_id), cam_offset , 0);
6139
6140 if (CHIP_IS_E1(bp)) {
6141 /* broadcast MAC */
6142 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6143 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6144 }
6145}
6146static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6147{
6148 int i = 0, old;
6149 struct net_device *dev = bp->dev;
6150 struct netdev_hw_addr *ha;
6151 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6152 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6153
6154 netdev_for_each_mc_addr(ha, dev) {
6155 /* copy mac */
6156 config_cmd->config_table[i].msb_mac_addr =
6157 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6158 config_cmd->config_table[i].middle_mac_addr =
6159 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6160 config_cmd->config_table[i].lsb_mac_addr =
6161 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6162
6163 config_cmd->config_table[i].vlan_id = 0;
6164 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6165 config_cmd->config_table[i].clients_bit_vector =
6166 cpu_to_le32(1 << BP_L_ID(bp));
6167
6168 SET_FLAG(config_cmd->config_table[i].flags,
6169 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6170 T_ETH_MAC_COMMAND_SET);
6171
6172 DP(NETIF_MSG_IFUP,
6173 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6174 config_cmd->config_table[i].msb_mac_addr,
6175 config_cmd->config_table[i].middle_mac_addr,
6176 config_cmd->config_table[i].lsb_mac_addr);
6177 i++;
6178 }
6179 old = config_cmd->hdr.length;
6180 if (old > i) {
6181 for (; i < old; i++) {
6182 if (CAM_IS_INVALID(config_cmd->
6183 config_table[i])) {
6184 /* already invalidated */
6185 break;
6186 }
6187 /* invalidate */
6188 SET_FLAG(config_cmd->config_table[i].flags,
6189 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6190 T_ETH_MAC_COMMAND_INVALIDATE);
6191 }
6192 }
6193
6194 config_cmd->hdr.length = i;
6195 config_cmd->hdr.offset = offset;
6196 config_cmd->hdr.client_id = 0xff;
6197 config_cmd->hdr.reserved1 = 0;
6198
6199 bp->set_mac_pending = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006200 smp_wmb();
6201
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006202 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6203 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6204}
6205static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6206{
6207 int i;
6208 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6209 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6210 int ramrod_flags = WAIT_RAMROD_COMMON;
6211
6212 bp->set_mac_pending = 1;
6213 smp_wmb();
6214
6215 for (i = 0; i < config_cmd->hdr.length; i++)
6216 SET_FLAG(config_cmd->config_table[i].flags,
6217 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6218 T_ETH_MAC_COMMAND_INVALIDATE);
6219
6220 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6221 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
Michael Chane665bfd2009-10-10 13:46:54 +00006222
6223 /* Wait for a completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006224 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6225 ramrod_flags);
6226
Michael Chane665bfd2009-10-10 13:46:54 +00006227}
6228
Michael Chan993ac7b2009-10-10 13:46:56 +00006229#ifdef BCM_CNIC
6230/**
6231 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6232 * MAC(s). This function will wait until the ramdord completion
6233 * returns.
6234 *
6235 * @param bp driver handle
6236 * @param set set or clear the CAM entry
6237 *
6238 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6239 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006240int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
Michael Chan993ac7b2009-10-10 13:46:56 +00006241{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006242 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6243 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6244 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6245 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
Michael Chan993ac7b2009-10-10 13:46:56 +00006246
6247 /* Send a SET_MAC ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006248 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6249 cam_offset, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00006250 return 0;
6251}
6252#endif
6253
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006254static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6255 struct bnx2x_client_init_params *params,
6256 u8 activate,
6257 struct client_init_ramrod_data *data)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006258{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006259 /* Clear the buffer */
6260 memset(data, 0, sizeof(*data));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006261
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006262 /* general */
6263 data->general.client_id = params->rxq_params.cl_id;
6264 data->general.statistics_counter_id = params->rxq_params.stat_id;
6265 data->general.statistics_en_flg =
6266 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6267 data->general.activate_flg = activate;
6268 data->general.sp_client_id = params->rxq_params.spcl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006269
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006270 /* Rx data */
6271 data->rx.tpa_en_flg =
6272 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6273 data->rx.vmqueue_mode_en_flg = 0;
6274 data->rx.cache_line_alignment_log_size =
6275 params->rxq_params.cache_line_log;
6276 data->rx.enable_dynamic_hc =
6277 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6278 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6279 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6280 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6281
6282 /* We don't set drop flags */
6283 data->rx.drop_ip_cs_err_flg = 0;
6284 data->rx.drop_tcp_cs_err_flg = 0;
6285 data->rx.drop_ttl0_flg = 0;
6286 data->rx.drop_udp_cs_err_flg = 0;
6287
6288 data->rx.inner_vlan_removal_enable_flg =
6289 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6290 data->rx.outer_vlan_removal_enable_flg =
6291 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6292 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6293 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6294 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6295 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6296 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6297 data->rx.bd_page_base.lo =
6298 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6299 data->rx.bd_page_base.hi =
6300 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6301 data->rx.sge_page_base.lo =
6302 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6303 data->rx.sge_page_base.hi =
6304 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6305 data->rx.cqe_page_base.lo =
6306 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6307 data->rx.cqe_page_base.hi =
6308 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6309 data->rx.is_leading_rss =
6310 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6311 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6312
6313 /* Tx data */
6314 data->tx.enforce_security_flg = 0; /* VF specific */
6315 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6316 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6317 data->tx.mtu = 0; /* VF specific */
6318 data->tx.tx_bd_page_base.lo =
6319 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6320 data->tx.tx_bd_page_base.hi =
6321 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6322
6323 /* flow control data */
6324 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6325 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6326 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6327 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6328 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6329 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6330 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6331
6332 data->fc.safc_group_num = params->txq_params.cos;
6333 data->fc.safc_group_en_flg =
6334 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6335 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6336}
6337
6338static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6339{
6340 /* ustorm cxt validation */
6341 cxt->ustorm_ag_context.cdu_usage =
6342 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6343 ETH_CONNECTION_TYPE);
6344 /* xcontext validation */
6345 cxt->xstorm_ag_context.cdu_reserved =
6346 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6347 ETH_CONNECTION_TYPE);
6348}
6349
6350int bnx2x_setup_fw_client(struct bnx2x *bp,
6351 struct bnx2x_client_init_params *params,
6352 u8 activate,
6353 struct client_init_ramrod_data *data,
6354 dma_addr_t data_mapping)
6355{
6356 u16 hc_usec;
6357 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6358 int ramrod_flags = 0, rc;
6359
6360 /* HC and context validation values */
6361 hc_usec = params->txq_params.hc_rate ?
6362 1000000 / params->txq_params.hc_rate : 0;
6363 bnx2x_update_coalesce_sb_index(bp,
6364 params->txq_params.fw_sb_id,
6365 params->txq_params.sb_cq_index,
6366 !(params->txq_params.flags & QUEUE_FLG_HC),
6367 hc_usec);
6368
6369 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6370
6371 hc_usec = params->rxq_params.hc_rate ?
6372 1000000 / params->rxq_params.hc_rate : 0;
6373 bnx2x_update_coalesce_sb_index(bp,
6374 params->rxq_params.fw_sb_id,
6375 params->rxq_params.sb_cq_index,
6376 !(params->rxq_params.flags & QUEUE_FLG_HC),
6377 hc_usec);
6378
6379 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6380 params->rxq_params.cid);
6381
6382 /* zero stats */
6383 if (params->txq_params.flags & QUEUE_FLG_STATS)
6384 storm_memset_xstats_zero(bp, BP_PORT(bp),
6385 params->txq_params.stat_id);
6386
6387 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6388 storm_memset_ustats_zero(bp, BP_PORT(bp),
6389 params->rxq_params.stat_id);
6390 storm_memset_tstats_zero(bp, BP_PORT(bp),
6391 params->rxq_params.stat_id);
6392 }
6393
6394 /* Fill the ramrod data */
6395 bnx2x_fill_cl_init_data(bp, params, activate, data);
6396
6397 /* SETUP ramrod.
6398 *
6399 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6400 * barrier except from mmiowb() is needed to impose a
6401 * proper ordering of memory operations.
6402 */
6403 mmiowb();
6404
6405
6406 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6407 U64_HI(data_mapping), U64_LO(data_mapping), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006408
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006409 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006410 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6411 params->ramrod_params.index,
6412 params->ramrod_params.pstate,
6413 ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006414 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006415}
6416
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006417/**
6418 * Configure interrupt mode according to current configuration.
6419 * In case of MSI-X it will also try to enable MSI-X.
6420 *
6421 * @param bp
6422 *
6423 * @return int
6424 */
6425static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006426{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006427 int rc = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -07006428
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006429 switch (bp->int_mode) {
6430 case INT_MODE_MSI:
6431 bnx2x_enable_msi(bp);
6432 /* falling through... */
6433 case INT_MODE_INTx:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006434 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006435 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greensteinca003922009-08-12 22:53:28 -07006436 break;
Eilon Greensteinca003922009-08-12 22:53:28 -07006437 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006438 /* Set number of queues according to bp->multi_mode value */
6439 bnx2x_set_num_queues(bp);
6440
6441 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6442 bp->num_queues);
6443
6444 /* if we can't use MSI-X we only need one fp,
6445 * so try to enable MSI-X with the requested number of fp's
6446 * and fallback to MSI or legacy INTx with one fp
6447 */
6448 rc = bnx2x_enable_msix(bp);
6449 if (rc) {
6450 /* failed to enable MSI-X */
6451 if (bp->multi_mode)
6452 DP(NETIF_MSG_IFUP,
6453 "Multi requested but failed to "
6454 "enable MSI-X (%d), "
6455 "set number of queues to %d\n",
6456 bp->num_queues,
6457 1);
6458 bp->num_queues = 1;
6459
6460 if (!(bp->flags & DISABLE_MSI_FLAG))
6461 bnx2x_enable_msi(bp);
6462 }
6463
Eilon Greensteinca003922009-08-12 22:53:28 -07006464 break;
6465 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006466
6467 return rc;
Eilon Greensteinca003922009-08-12 22:53:28 -07006468}
6469
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00006470/* must be called prioir to any HW initializations */
6471static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6472{
6473 return L2_ILT_LINES(bp);
6474}
6475
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006476void bnx2x_ilt_set_info(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006477{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006478 struct ilt_client_info *ilt_client;
6479 struct bnx2x_ilt *ilt = BP_ILT(bp);
6480 u16 line = 0;
6481
6482 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6483 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6484
6485 /* CDU */
6486 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6487 ilt_client->client_num = ILT_CLIENT_CDU;
6488 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6489 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6490 ilt_client->start = line;
6491 line += L2_ILT_LINES(bp);
6492#ifdef BCM_CNIC
6493 line += CNIC_ILT_LINES;
6494#endif
6495 ilt_client->end = line - 1;
6496
6497 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6498 "flags 0x%x, hw psz %d\n",
6499 ilt_client->start,
6500 ilt_client->end,
6501 ilt_client->page_size,
6502 ilt_client->flags,
6503 ilog2(ilt_client->page_size >> 12));
6504
6505 /* QM */
6506 if (QM_INIT(bp->qm_cid_count)) {
6507 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6508 ilt_client->client_num = ILT_CLIENT_QM;
6509 ilt_client->page_size = QM_ILT_PAGE_SZ;
6510 ilt_client->flags = 0;
6511 ilt_client->start = line;
6512
6513 /* 4 bytes for each cid */
6514 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6515 QM_ILT_PAGE_SZ);
6516
6517 ilt_client->end = line - 1;
6518
6519 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6520 "flags 0x%x, hw psz %d\n",
6521 ilt_client->start,
6522 ilt_client->end,
6523 ilt_client->page_size,
6524 ilt_client->flags,
6525 ilog2(ilt_client->page_size >> 12));
6526
6527 }
6528 /* SRC */
6529 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6530#ifdef BCM_CNIC
6531 ilt_client->client_num = ILT_CLIENT_SRC;
6532 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6533 ilt_client->flags = 0;
6534 ilt_client->start = line;
6535 line += SRC_ILT_LINES;
6536 ilt_client->end = line - 1;
6537
6538 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6539 "flags 0x%x, hw psz %d\n",
6540 ilt_client->start,
6541 ilt_client->end,
6542 ilt_client->page_size,
6543 ilt_client->flags,
6544 ilog2(ilt_client->page_size >> 12));
6545
6546#else
6547 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6548#endif
6549
6550 /* TM */
6551 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6552#ifdef BCM_CNIC
6553 ilt_client->client_num = ILT_CLIENT_TM;
6554 ilt_client->page_size = TM_ILT_PAGE_SZ;
6555 ilt_client->flags = 0;
6556 ilt_client->start = line;
6557 line += TM_ILT_LINES;
6558 ilt_client->end = line - 1;
6559
6560 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6561 "flags 0x%x, hw psz %d\n",
6562 ilt_client->start,
6563 ilt_client->end,
6564 ilt_client->page_size,
6565 ilt_client->flags,
6566 ilog2(ilt_client->page_size >> 12));
6567
6568#else
6569 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6570#endif
6571}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006572
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006573int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6574 int is_leading)
6575{
6576 struct bnx2x_client_init_params params = { {0} };
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006577 int rc;
6578
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006579 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6580 IGU_INT_ENABLE, 0);
6581
6582 params.ramrod_params.pstate = &fp->state;
6583 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6584 params.ramrod_params.index = fp->index;
6585 params.ramrod_params.cid = fp->cid;
6586
6587 if (is_leading)
6588 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6589
6590 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6591
6592 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6593
6594 rc = bnx2x_setup_fw_client(bp, &params, 1,
6595 bnx2x_sp(bp, client_init_data),
6596 bnx2x_sp_mapping(bp, client_init_data));
6597 return rc;
6598}
6599
6600int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
6601{
6602 int rc;
6603
6604 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6605
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006606 /* halt the connection */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006607 *p->pstate = BNX2X_FP_STATE_HALTING;
6608 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6609 p->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006610
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006611 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006612 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6613 p->pstate, poll_flag);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006614 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006615 return rc;
6616
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006617 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6618 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6619 p->cl_id, 0);
6620 /* Wait for completion */
6621 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6622 p->pstate, poll_flag);
6623 if (rc) /* timeout */
6624 return rc;
6625
6626
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006627 /* delete cfc entry */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006628 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006629
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006630 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006631 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6632 p->pstate, WAIT_RAMROD_COMMON);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006633 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006634}
6635
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006636static int bnx2x_stop_client(struct bnx2x *bp, int index)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006637{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006638 struct bnx2x_client_ramrod_params client_stop = {0};
6639 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006640
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006641 client_stop.index = index;
6642 client_stop.cid = fp->cid;
6643 client_stop.cl_id = fp->cl_id;
6644 client_stop.pstate = &(fp->state);
6645 client_stop.poll = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006646
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006647 return bnx2x_stop_fw_client(bp, &client_stop);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006648}
6649
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006650
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006651static void bnx2x_reset_func(struct bnx2x *bp)
6652{
6653 int port = BP_PORT(bp);
6654 int func = BP_FUNC(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006655 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006656 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006657 (CHIP_IS_E2(bp) ?
6658 offsetof(struct hc_status_block_data_e2, common) :
6659 offsetof(struct hc_status_block_data_e1x, common));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006660 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6661 int pfid_offset = offsetof(struct pci_entity, pf_id);
6662
6663 /* Disable the function in the FW */
6664 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6665 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6666 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6667 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6668
6669 /* FP SBs */
6670 for_each_queue(bp, i) {
6671 struct bnx2x_fastpath *fp = &bp->fp[i];
6672 REG_WR8(bp,
6673 BAR_CSTRORM_INTMEM +
6674 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6675 + pfunc_offset_fp + pfid_offset,
6676 HC_FUNCTION_DISABLED);
6677 }
6678
6679 /* SP SB */
6680 REG_WR8(bp,
6681 BAR_CSTRORM_INTMEM +
6682 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6683 pfunc_offset_sp + pfid_offset,
6684 HC_FUNCTION_DISABLED);
6685
6686
6687 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6688 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6689 0);
Eliezer Tamir49d66772008-02-28 11:53:13 -08006690
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006691 /* Configure IGU */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006692 if (bp->common.int_block == INT_BLOCK_HC) {
6693 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6694 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6695 } else {
6696 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6697 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6698 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006699
Michael Chan37b091b2009-10-10 13:46:55 +00006700#ifdef BCM_CNIC
6701 /* Disable Timer scan */
6702 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6703 /*
6704 * Wait for at least 10ms and up to 2 second for the timers scan to
6705 * complete
6706 */
6707 for (i = 0; i < 200; i++) {
6708 msleep(10);
6709 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6710 break;
6711 }
6712#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006713 /* Clear ILT */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006714 bnx2x_clear_func_ilt(bp, func);
6715
6716 /* Timers workaround bug for E2: if this is vnic-3,
6717 * we need to set the entire ilt range for this timers.
6718 */
6719 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6720 struct ilt_client_info ilt_cli;
6721 /* use dummy TM client */
6722 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6723 ilt_cli.start = 0;
6724 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6725 ilt_cli.client_num = ILT_CLIENT_TM;
6726
6727 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6728 }
6729
6730 /* this assumes that reset_port() called before reset_func()*/
6731 if (CHIP_IS_E2(bp))
6732 bnx2x_pf_disable(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006733
6734 bp->dmae_ready = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006735}
6736
6737static void bnx2x_reset_port(struct bnx2x *bp)
6738{
6739 int port = BP_PORT(bp);
6740 u32 val;
6741
6742 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6743
6744 /* Do not rcv packets to BRB */
6745 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6746 /* Do not direct rcv packets that are not for MCP to the BRB */
6747 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6748 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6749
6750 /* Configure AEU */
6751 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6752
6753 msleep(100);
6754 /* Check for BRB port occupancy */
6755 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6756 if (val)
6757 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07006758 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006759
6760 /* TODO: Close Doorbell port? */
6761}
6762
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006763static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6764{
6765 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006766 BP_ABS_FUNC(bp), reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006767
6768 switch (reset_code) {
6769 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6770 bnx2x_reset_port(bp);
6771 bnx2x_reset_func(bp);
6772 bnx2x_reset_common(bp);
6773 break;
6774
6775 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6776 bnx2x_reset_port(bp);
6777 bnx2x_reset_func(bp);
6778 break;
6779
6780 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6781 bnx2x_reset_func(bp);
6782 break;
6783
6784 default:
6785 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6786 break;
6787 }
6788}
6789
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006790void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006791{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006792 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006793 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006794 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006795
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006796 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006797 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08006798 struct bnx2x_fastpath *fp = &bp->fp[i];
6799
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006800 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08006801 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006802
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006803 if (!cnt) {
6804 BNX2X_ERR("timeout waiting for queue[%d]\n",
6805 i);
6806#ifdef BNX2X_STOP_ON_ERROR
6807 bnx2x_panic();
6808 return -EBUSY;
6809#else
6810 break;
6811#endif
6812 }
6813 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006814 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006815 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08006816 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006817 /* Give HW time to discard old tx messages */
6818 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006819
Yitchak Gertner65abd742008-08-25 15:26:24 -07006820 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006821 /* invalidate mc list,
6822 * wait and poll (interrupts are off)
6823 */
6824 bnx2x_invlidate_e1_mc_list(bp);
6825 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006826
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006827 } else {
Yitchak Gertner65abd742008-08-25 15:26:24 -07006828 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6829
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006830 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006831
6832 for (i = 0; i < MC_HASH_SIZE; i++)
6833 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6834 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006835
Michael Chan993ac7b2009-10-10 13:46:56 +00006836#ifdef BCM_CNIC
6837 /* Clear iSCSI L2 MAC */
6838 mutex_lock(&bp->cnic_mutex);
6839 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6840 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6841 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6842 }
6843 mutex_unlock(&bp->cnic_mutex);
6844#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07006845
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006846 if (unload_mode == UNLOAD_NORMAL)
6847 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006848
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00006849 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006850 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006851
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00006852 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006853 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006854 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006855 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006856 /* The mac address is written to entries 1-4 to
6857 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006858 u8 entry = (BP_E1HVN(bp) + 1)*8;
6859
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006860 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07006861 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006862
6863 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6864 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07006865 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006866
6867 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006868
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006869 } else
6870 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6871
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006872 /* Close multi and leading connections
6873 Completions for ramrods are collected in a synchronous way */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006874 for_each_queue(bp, i)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006875
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006876 if (bnx2x_stop_client(bp, i))
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006877#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006878 return;
6879#else
6880 goto unload_error;
6881#endif
6882
6883 rc = bnx2x_func_stop(bp);
6884 if (rc) {
6885 BNX2X_ERR("Function stop failed!\n");
6886#ifdef BNX2X_STOP_ON_ERROR
6887 return;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006888#else
6889 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006890#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08006891 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006892#ifndef BNX2X_STOP_ON_ERROR
Eliezer Tamir228241e2008-02-28 11:56:57 -08006893unload_error:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006894#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006895 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006896 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006897 else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006898 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6899 "%d, %d, %d\n", BP_PATH(bp),
6900 load_count[BP_PATH(bp)][0],
6901 load_count[BP_PATH(bp)][1],
6902 load_count[BP_PATH(bp)][2]);
6903 load_count[BP_PATH(bp)][0]--;
6904 load_count[BP_PATH(bp)][1 + port]--;
6905 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6906 "%d, %d, %d\n", BP_PATH(bp),
6907 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6908 load_count[BP_PATH(bp)][2]);
6909 if (load_count[BP_PATH(bp)][0] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006910 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006911 else if (load_count[BP_PATH(bp)][1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006912 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6913 else
6914 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6915 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006916
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006917 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6918 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6919 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006920
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006921 /* Disable HW interrupts, NAPI */
6922 bnx2x_netif_stop(bp, 1);
6923
6924 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006925 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006926
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006927 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08006928 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006929
6930 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006931 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006932 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006933
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006934}
6935
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006936void bnx2x_disable_close_the_gate(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006937{
6938 u32 val;
6939
6940 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6941
6942 if (CHIP_IS_E1(bp)) {
6943 int port = BP_PORT(bp);
6944 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6945 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6946
6947 val = REG_RD(bp, addr);
6948 val &= ~(0x300);
6949 REG_WR(bp, addr, val);
6950 } else if (CHIP_IS_E1H(bp)) {
6951 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6952 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6953 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6954 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6955 }
6956}
6957
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006958/* Close gates #2, #3 and #4: */
6959static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6960{
6961 u32 val, addr;
6962
6963 /* Gates #2 and #4a are closed/opened for "not E1" only */
6964 if (!CHIP_IS_E1(bp)) {
6965 /* #4 */
6966 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
6967 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
6968 close ? (val | 0x1) : (val & (~(u32)1)));
6969 /* #2 */
6970 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
6971 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
6972 close ? (val | 0x1) : (val & (~(u32)1)));
6973 }
6974
6975 /* #3 */
6976 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
6977 val = REG_RD(bp, addr);
6978 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
6979
6980 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
6981 close ? "closing" : "opening");
6982 mmiowb();
6983}
6984
6985#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
6986
6987static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
6988{
6989 /* Do some magic... */
6990 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
6991 *magic_val = val & SHARED_MF_CLP_MAGIC;
6992 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
6993}
6994
6995/* Restore the value of the `magic' bit.
6996 *
6997 * @param pdev Device handle.
6998 * @param magic_val Old value of the `magic' bit.
6999 */
7000static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7001{
7002 /* Restore the `magic' bit value... */
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007003 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7004 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7005 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7006}
7007
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007008/**
7009 * Prepares for MCP reset: takes care of CLP configurations.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007010 *
7011 * @param bp
7012 * @param magic_val Old value of 'magic' bit.
7013 */
7014static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7015{
7016 u32 shmem;
7017 u32 validity_offset;
7018
7019 DP(NETIF_MSG_HW, "Starting\n");
7020
7021 /* Set `magic' bit in order to save MF config */
7022 if (!CHIP_IS_E1(bp))
7023 bnx2x_clp_reset_prep(bp, magic_val);
7024
7025 /* Get shmem offset */
7026 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7027 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7028
7029 /* Clear validity map flags */
7030 if (shmem > 0)
7031 REG_WR(bp, shmem + validity_offset, 0);
7032}
7033
7034#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7035#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7036
7037/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7038 * depending on the HW type.
7039 *
7040 * @param bp
7041 */
7042static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7043{
7044 /* special handling for emulation and FPGA,
7045 wait 10 times longer */
7046 if (CHIP_REV_IS_SLOW(bp))
7047 msleep(MCP_ONE_TIMEOUT*10);
7048 else
7049 msleep(MCP_ONE_TIMEOUT);
7050}
7051
7052static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7053{
7054 u32 shmem, cnt, validity_offset, val;
7055 int rc = 0;
7056
7057 msleep(100);
7058
7059 /* Get shmem offset */
7060 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7061 if (shmem == 0) {
7062 BNX2X_ERR("Shmem 0 return failure\n");
7063 rc = -ENOTTY;
7064 goto exit_lbl;
7065 }
7066
7067 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7068
7069 /* Wait for MCP to come up */
7070 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7071 /* TBD: its best to check validity map of last port.
7072 * currently checks on port 0.
7073 */
7074 val = REG_RD(bp, shmem + validity_offset);
7075 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7076 shmem + validity_offset, val);
7077
7078 /* check that shared memory is valid. */
7079 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7080 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7081 break;
7082
7083 bnx2x_mcp_wait_one(bp);
7084 }
7085
7086 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7087
7088 /* Check that shared memory is valid. This indicates that MCP is up. */
7089 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7090 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7091 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7092 rc = -ENOTTY;
7093 goto exit_lbl;
7094 }
7095
7096exit_lbl:
7097 /* Restore the `magic' bit value */
7098 if (!CHIP_IS_E1(bp))
7099 bnx2x_clp_reset_done(bp, magic_val);
7100
7101 return rc;
7102}
7103
7104static void bnx2x_pxp_prep(struct bnx2x *bp)
7105{
7106 if (!CHIP_IS_E1(bp)) {
7107 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7108 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7109 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7110 mmiowb();
7111 }
7112}
7113
7114/*
7115 * Reset the whole chip except for:
7116 * - PCIE core
7117 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7118 * one reset bit)
7119 * - IGU
7120 * - MISC (including AEU)
7121 * - GRC
7122 * - RBCN, RBCP
7123 */
7124static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7125{
7126 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7127
7128 not_reset_mask1 =
7129 MISC_REGISTERS_RESET_REG_1_RST_HC |
7130 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7131 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7132
7133 not_reset_mask2 =
7134 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7135 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7136 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7137 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7138 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7139 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7140 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7141 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7142
7143 reset_mask1 = 0xffffffff;
7144
7145 if (CHIP_IS_E1(bp))
7146 reset_mask2 = 0xffff;
7147 else
7148 reset_mask2 = 0x1ffff;
7149
7150 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7151 reset_mask1 & (~not_reset_mask1));
7152 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7153 reset_mask2 & (~not_reset_mask2));
7154
7155 barrier();
7156 mmiowb();
7157
7158 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7159 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7160 mmiowb();
7161}
7162
7163static int bnx2x_process_kill(struct bnx2x *bp)
7164{
7165 int cnt = 1000;
7166 u32 val = 0;
7167 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7168
7169
7170 /* Empty the Tetris buffer, wait for 1s */
7171 do {
7172 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7173 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7174 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7175 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7176 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7177 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7178 ((port_is_idle_0 & 0x1) == 0x1) &&
7179 ((port_is_idle_1 & 0x1) == 0x1) &&
7180 (pgl_exp_rom2 == 0xffffffff))
7181 break;
7182 msleep(1);
7183 } while (cnt-- > 0);
7184
7185 if (cnt <= 0) {
7186 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7187 " are still"
7188 " outstanding read requests after 1s!\n");
7189 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7190 " port_is_idle_0=0x%08x,"
7191 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7192 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7193 pgl_exp_rom2);
7194 return -EAGAIN;
7195 }
7196
7197 barrier();
7198
7199 /* Close gates #2, #3 and #4 */
7200 bnx2x_set_234_gates(bp, true);
7201
7202 /* TBD: Indicate that "process kill" is in progress to MCP */
7203
7204 /* Clear "unprepared" bit */
7205 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7206 barrier();
7207
7208 /* Make sure all is written to the chip before the reset */
7209 mmiowb();
7210
7211 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7212 * PSWHST, GRC and PSWRD Tetris buffer.
7213 */
7214 msleep(1);
7215
7216 /* Prepare to chip reset: */
7217 /* MCP */
7218 bnx2x_reset_mcp_prep(bp, &val);
7219
7220 /* PXP */
7221 bnx2x_pxp_prep(bp);
7222 barrier();
7223
7224 /* reset the chip */
7225 bnx2x_process_kill_chip_reset(bp);
7226 barrier();
7227
7228 /* Recover after reset: */
7229 /* MCP */
7230 if (bnx2x_reset_mcp_comp(bp, val))
7231 return -EAGAIN;
7232
7233 /* PXP */
7234 bnx2x_pxp_prep(bp);
7235
7236 /* Open the gates #2, #3 and #4 */
7237 bnx2x_set_234_gates(bp, false);
7238
7239 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7240 * reset state, re-enable attentions. */
7241
7242 return 0;
7243}
7244
7245static int bnx2x_leader_reset(struct bnx2x *bp)
7246{
7247 int rc = 0;
7248 /* Try to recover after the failure */
7249 if (bnx2x_process_kill(bp)) {
7250 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7251 bp->dev->name);
7252 rc = -EAGAIN;
7253 goto exit_leader_reset;
7254 }
7255
7256 /* Clear "reset is in progress" bit and update the driver state */
7257 bnx2x_set_reset_done(bp);
7258 bp->recovery_state = BNX2X_RECOVERY_DONE;
7259
7260exit_leader_reset:
7261 bp->is_leader = 0;
7262 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7263 smp_wmb();
7264 return rc;
7265}
7266
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007267/* Assumption: runs under rtnl lock. This together with the fact
7268 * that it's called only from bnx2x_reset_task() ensure that it
7269 * will never be called when netif_running(bp->dev) is false.
7270 */
7271static void bnx2x_parity_recover(struct bnx2x *bp)
7272{
7273 DP(NETIF_MSG_HW, "Handling parity\n");
7274 while (1) {
7275 switch (bp->recovery_state) {
7276 case BNX2X_RECOVERY_INIT:
7277 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7278 /* Try to get a LEADER_LOCK HW lock */
7279 if (bnx2x_trylock_hw_lock(bp,
7280 HW_LOCK_RESOURCE_RESERVED_08))
7281 bp->is_leader = 1;
7282
7283 /* Stop the driver */
7284 /* If interface has been removed - break */
7285 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7286 return;
7287
7288 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7289 /* Ensure "is_leader" and "recovery_state"
7290 * update values are seen on other CPUs
7291 */
7292 smp_wmb();
7293 break;
7294
7295 case BNX2X_RECOVERY_WAIT:
7296 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7297 if (bp->is_leader) {
7298 u32 load_counter = bnx2x_get_load_cnt(bp);
7299 if (load_counter) {
7300 /* Wait until all other functions get
7301 * down.
7302 */
7303 schedule_delayed_work(&bp->reset_task,
7304 HZ/10);
7305 return;
7306 } else {
7307 /* If all other functions got down -
7308 * try to bring the chip back to
7309 * normal. In any case it's an exit
7310 * point for a leader.
7311 */
7312 if (bnx2x_leader_reset(bp) ||
7313 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7314 printk(KERN_ERR"%s: Recovery "
7315 "has failed. Power cycle is "
7316 "needed.\n", bp->dev->name);
7317 /* Disconnect this device */
7318 netif_device_detach(bp->dev);
7319 /* Block ifup for all function
7320 * of this ASIC until
7321 * "process kill" or power
7322 * cycle.
7323 */
7324 bnx2x_set_reset_in_progress(bp);
7325 /* Shut down the power */
7326 bnx2x_set_power_state(bp,
7327 PCI_D3hot);
7328 return;
7329 }
7330
7331 return;
7332 }
7333 } else { /* non-leader */
7334 if (!bnx2x_reset_is_done(bp)) {
7335 /* Try to get a LEADER_LOCK HW lock as
7336 * long as a former leader may have
7337 * been unloaded by the user or
7338 * released a leadership by another
7339 * reason.
7340 */
7341 if (bnx2x_trylock_hw_lock(bp,
7342 HW_LOCK_RESOURCE_RESERVED_08)) {
7343 /* I'm a leader now! Restart a
7344 * switch case.
7345 */
7346 bp->is_leader = 1;
7347 break;
7348 }
7349
7350 schedule_delayed_work(&bp->reset_task,
7351 HZ/10);
7352 return;
7353
7354 } else { /* A leader has completed
7355 * the "process kill". It's an exit
7356 * point for a non-leader.
7357 */
7358 bnx2x_nic_load(bp, LOAD_NORMAL);
7359 bp->recovery_state =
7360 BNX2X_RECOVERY_DONE;
7361 smp_wmb();
7362 return;
7363 }
7364 }
7365 default:
7366 return;
7367 }
7368 }
7369}
7370
7371/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7372 * scheduled on a general queue in order to prevent a dead lock.
7373 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007374static void bnx2x_reset_task(struct work_struct *work)
7375{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007376 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007377
7378#ifdef BNX2X_STOP_ON_ERROR
7379 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7380 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007381 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007382 return;
7383#endif
7384
7385 rtnl_lock();
7386
7387 if (!netif_running(bp->dev))
7388 goto reset_task_exit;
7389
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007390 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7391 bnx2x_parity_recover(bp);
7392 else {
7393 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7394 bnx2x_nic_load(bp, LOAD_NORMAL);
7395 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007396
7397reset_task_exit:
7398 rtnl_unlock();
7399}
7400
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007401/* end of nic load/unload */
7402
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007403/*
7404 * Init service functions
7405 */
7406
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007407u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007408{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007409 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7410 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7411 return base + (BP_ABS_FUNC(bp)) * stride;
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007412}
7413
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007414static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007415{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007416 u32 reg = bnx2x_get_pretend_reg(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007417
7418 /* Flush all outstanding writes */
7419 mmiowb();
7420
7421 /* Pretend to be function 0 */
7422 REG_WR(bp, reg, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007423 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007424
7425 /* From now we are in the "like-E1" mode */
7426 bnx2x_int_disable(bp);
7427
7428 /* Flush all outstanding writes */
7429 mmiowb();
7430
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007431 /* Restore the original function */
7432 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7433 REG_RD(bp, reg);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007434}
7435
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007436static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007437{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007438 if (CHIP_IS_E1(bp))
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007439 bnx2x_int_disable(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007440 else
7441 bnx2x_undi_int_disable_e1h(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007442}
7443
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007444static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007445{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007446 u32 val;
7447
7448 /* Check if there is any driver already loaded */
7449 val = REG_RD(bp, MISC_REG_UNPREPARED);
7450 if (val == 0x1) {
7451 /* Check if it is the UNDI driver
7452 * UNDI driver initializes CID offset for normal bell to 0x7
7453 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07007454 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007455 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7456 if (val == 0x7) {
7457 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007458 /* save our pf_num */
7459 int orig_pf_num = bp->pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007460 u32 swap_en;
7461 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007462
Eilon Greensteinb4661732009-01-14 06:43:56 +00007463 /* clear the UNDI indication */
7464 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7465
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007466 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7467
7468 /* try unload UNDI on port 0 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007469 bp->pf_num = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007470 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007471 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007472 DRV_MSG_SEQ_NUMBER_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007473 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007474
7475 /* if UNDI is loaded on the other port */
7476 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7477
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007478 /* send "DONE" for previous unload */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007479 bnx2x_fw_command(bp,
7480 DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007481
7482 /* unload UNDI on port 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007483 bp->pf_num = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007484 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007485 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007486 DRV_MSG_SEQ_NUMBER_MASK);
7487 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007488
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007489 bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007490 }
7491
Eilon Greensteinb4661732009-01-14 06:43:56 +00007492 /* now it's safe to release the lock */
7493 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7494
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007495 bnx2x_undi_int_disable(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007496
7497 /* close input traffic and wait for it */
7498 /* Do not rcv packets to BRB */
7499 REG_WR(bp,
7500 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7501 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7502 /* Do not direct rcv packets that are not for MCP to
7503 * the BRB */
7504 REG_WR(bp,
7505 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7506 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7507 /* clear AEU */
7508 REG_WR(bp,
7509 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7510 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7511 msleep(10);
7512
7513 /* save NIG port swap info */
7514 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7515 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007516 /* reset device */
7517 REG_WR(bp,
7518 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007519 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007520 REG_WR(bp,
7521 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7522 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007523 /* take the NIG out of reset and restore swap values */
7524 REG_WR(bp,
7525 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7526 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7527 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7528 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7529
7530 /* send unload done to the MCP */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007531 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007532
7533 /* restore our func and fw_seq */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007534 bp->pf_num = orig_pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007535 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007536 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007537 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00007538 } else
7539 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007540 }
7541}
7542
7543static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7544{
7545 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007546 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007547
7548 /* Get the chip revision id and number. */
7549 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7550 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7551 id = ((val & 0xffff) << 16);
7552 val = REG_RD(bp, MISC_REG_CHIP_REV);
7553 id |= ((val & 0xf) << 12);
7554 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7555 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00007556 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007557 id |= (val & 0xf);
7558 bp->common.chip_id = id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007559
7560 /* Set doorbell size */
7561 bp->db_size = (1 << BNX2X_DB_SHIFT);
7562
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007563 if (CHIP_IS_E2(bp)) {
7564 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7565 if ((val & 1) == 0)
7566 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7567 else
7568 val = (val >> 1) & 1;
7569 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7570 "2_PORT_MODE");
7571 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7572 CHIP_2_PORT_MODE;
7573
7574 if (CHIP_MODE_IS_4_PORT(bp))
7575 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7576 else
7577 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7578 } else {
7579 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7580 bp->pfid = bp->pf_num; /* 0..7 */
7581 }
7582
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007583 /*
7584 * set base FW non-default (fast path) status block id, this value is
7585 * used to initialize the fw_sb_id saved on the fp/queue structure to
7586 * determine the id used by the FW.
7587 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007588 if (CHIP_IS_E1x(bp))
7589 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7590 else /* E2 */
7591 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7592
7593 bp->link_params.chip_id = bp->common.chip_id;
7594 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007595
Eilon Greenstein1c063282009-02-12 08:36:43 +00007596 val = (REG_RD(bp, 0x2874) & 0x55);
7597 if ((bp->common.chip_id & 0x1) ||
7598 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7599 bp->flags |= ONE_PORT_FLAG;
7600 BNX2X_DEV_INFO("single port device\n");
7601 }
7602
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007603 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7604 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7605 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7606 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7607 bp->common.flash_size, bp->common.flash_size);
7608
7609 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007610 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7611 MISC_REG_GENERIC_CR_1 :
7612 MISC_REG_GENERIC_CR_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007613 bp->link_params.shmem_base = bp->common.shmem_base;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007614 bp->link_params.shmem2_base = bp->common.shmem2_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007615 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7616 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007617
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007618 if (!bp->common.shmem_base) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007619 BNX2X_DEV_INFO("MCP not active\n");
7620 bp->flags |= NO_MCP_FLAG;
7621 return;
7622 }
7623
7624 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7625 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7626 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007627 BNX2X_ERR("BAD MCP validity signature\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007628
7629 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00007630 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007631
7632 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7633 SHARED_HW_CFG_LED_MODE_MASK) >>
7634 SHARED_HW_CFG_LED_MODE_SHIFT);
7635
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007636 bp->link_params.feature_config_flags = 0;
7637 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7638 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7639 bp->link_params.feature_config_flags |=
7640 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7641 else
7642 bp->link_params.feature_config_flags &=
7643 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7644
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007645 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7646 bp->common.bc_ver = val;
7647 BNX2X_DEV_INFO("bc_ver %X\n", val);
7648 if (val < BNX2X_BC_VER) {
7649 /* for now only warn
7650 * later we might need to enforce this */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007651 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7652 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007653 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007654 bp->link_params.feature_config_flags |=
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007655 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007656 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7657
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007658 bp->link_params.feature_config_flags |=
7659 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7660 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007661
7662 if (BP_E1HVN(bp) == 0) {
7663 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7664 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7665 } else {
7666 /* no WOL capability for E1HVN != 0 */
7667 bp->flags |= NO_WOL_FLAG;
7668 }
7669 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00007670 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007671
7672 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7673 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7674 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7675 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7676
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007677 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7678 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007679}
7680
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007681#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7682#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7683
7684static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7685{
7686 int pfid = BP_FUNC(bp);
7687 int vn = BP_E1HVN(bp);
7688 int igu_sb_id;
7689 u32 val;
7690 u8 fid;
7691
7692 bp->igu_base_sb = 0xff;
7693 bp->igu_sb_cnt = 0;
7694 if (CHIP_INT_MODE_IS_BC(bp)) {
7695 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7696 bp->l2_cid_count);
7697
7698 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7699 FP_SB_MAX_E1x;
7700
7701 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7702 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7703
7704 return;
7705 }
7706
7707 /* IGU in normal mode - read CAM */
7708 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7709 igu_sb_id++) {
7710 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7711 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7712 continue;
7713 fid = IGU_FID(val);
7714 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7715 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7716 continue;
7717 if (IGU_VEC(val) == 0)
7718 /* default status block */
7719 bp->igu_dsb_id = igu_sb_id;
7720 else {
7721 if (bp->igu_base_sb == 0xff)
7722 bp->igu_base_sb = igu_sb_id;
7723 bp->igu_sb_cnt++;
7724 }
7725 }
7726 }
7727 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7728 if (bp->igu_sb_cnt == 0)
7729 BNX2X_ERR("CAM configuration error\n");
7730}
7731
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007732static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7733 u32 switch_cfg)
7734{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007735 int cfg_size = 0, idx, port = BP_PORT(bp);
7736
7737 /* Aggregation of supported attributes of all external phys */
7738 bp->port.supported[0] = 0;
7739 bp->port.supported[1] = 0;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007740 switch (bp->link_params.num_phys) {
7741 case 1:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007742 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7743 cfg_size = 1;
7744 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007745 case 2:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007746 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7747 cfg_size = 1;
7748 break;
7749 case 3:
7750 if (bp->link_params.multi_phy_config &
7751 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7752 bp->port.supported[1] =
7753 bp->link_params.phy[EXT_PHY1].supported;
7754 bp->port.supported[0] =
7755 bp->link_params.phy[EXT_PHY2].supported;
7756 } else {
7757 bp->port.supported[0] =
7758 bp->link_params.phy[EXT_PHY1].supported;
7759 bp->port.supported[1] =
7760 bp->link_params.phy[EXT_PHY2].supported;
7761 }
7762 cfg_size = 2;
7763 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007764 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007765
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007766 if (!(bp->port.supported[0] || bp->port.supported[1])) {
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007767 BNX2X_ERR("NVRAM config error. BAD phy config."
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007768 "PHY1 config 0x%x, PHY2 config 0x%x\n",
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007769 SHMEM_RD(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007770 dev_info.port_hw_config[port].external_phy_config),
7771 SHMEM_RD(bp,
7772 dev_info.port_hw_config[port].external_phy_config2));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007773 return;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007774 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007775
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007776 switch (switch_cfg) {
7777 case SWITCH_CFG_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007778 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7779 port*0x10);
7780 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007781 break;
7782
7783 case SWITCH_CFG_10G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007784 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7785 port*0x18);
7786 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007787 break;
7788
7789 default:
7790 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007791 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007792 return;
7793 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007794 /* mask what we support according to speed_cap_mask per configuration */
7795 for (idx = 0; idx < cfg_size; idx++) {
7796 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007797 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007798 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007799
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007800 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007801 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007802 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007803
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007804 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007805 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007806 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007807
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007808 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007809 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007810 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007811
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007812 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007813 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007814 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007815 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007816
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007817 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007818 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007819 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007820
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007821 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007822 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007823 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007824
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007825 }
7826
7827 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7828 bp->port.supported[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007829}
7830
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007831static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007832{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007833 u32 link_config, idx, cfg_size = 0;
7834 bp->port.advertising[0] = 0;
7835 bp->port.advertising[1] = 0;
7836 switch (bp->link_params.num_phys) {
7837 case 1:
7838 case 2:
7839 cfg_size = 1;
7840 break;
7841 case 3:
7842 cfg_size = 2;
7843 break;
7844 }
7845 for (idx = 0; idx < cfg_size; idx++) {
7846 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7847 link_config = bp->port.link_config[idx];
7848 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007849 case PORT_FEATURE_LINK_SPEED_AUTO:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007850 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7851 bp->link_params.req_line_speed[idx] =
7852 SPEED_AUTO_NEG;
7853 bp->port.advertising[idx] |=
7854 bp->port.supported[idx];
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007855 } else {
7856 /* force 10G, no AN */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007857 bp->link_params.req_line_speed[idx] =
7858 SPEED_10000;
7859 bp->port.advertising[idx] |=
7860 (ADVERTISED_10000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007861 ADVERTISED_FIBRE);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007862 continue;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007863 }
7864 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007865
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007866 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007867 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7868 bp->link_params.req_line_speed[idx] =
7869 SPEED_10;
7870 bp->port.advertising[idx] |=
7871 (ADVERTISED_10baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007872 ADVERTISED_TP);
7873 } else {
7874 BNX2X_ERROR("NVRAM config error. "
7875 "Invalid link_config 0x%x"
7876 " speed_cap_mask 0x%x\n",
7877 link_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007878 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007879 return;
7880 }
7881 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007882
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007883 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007884 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7885 bp->link_params.req_line_speed[idx] =
7886 SPEED_10;
7887 bp->link_params.req_duplex[idx] =
7888 DUPLEX_HALF;
7889 bp->port.advertising[idx] |=
7890 (ADVERTISED_10baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007891 ADVERTISED_TP);
7892 } else {
7893 BNX2X_ERROR("NVRAM config error. "
7894 "Invalid link_config 0x%x"
7895 " speed_cap_mask 0x%x\n",
7896 link_config,
7897 bp->link_params.speed_cap_mask[idx]);
7898 return;
7899 }
7900 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007901
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007902 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7903 if (bp->port.supported[idx] &
7904 SUPPORTED_100baseT_Full) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007905 bp->link_params.req_line_speed[idx] =
7906 SPEED_100;
7907 bp->port.advertising[idx] |=
7908 (ADVERTISED_100baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007909 ADVERTISED_TP);
7910 } else {
7911 BNX2X_ERROR("NVRAM config error. "
7912 "Invalid link_config 0x%x"
7913 " speed_cap_mask 0x%x\n",
7914 link_config,
7915 bp->link_params.speed_cap_mask[idx]);
7916 return;
7917 }
7918 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007919
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007920 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7921 if (bp->port.supported[idx] &
7922 SUPPORTED_100baseT_Half) {
7923 bp->link_params.req_line_speed[idx] =
7924 SPEED_100;
7925 bp->link_params.req_duplex[idx] =
7926 DUPLEX_HALF;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007927 bp->port.advertising[idx] |=
7928 (ADVERTISED_100baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007929 ADVERTISED_TP);
7930 } else {
7931 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007932 "Invalid link_config 0x%x"
7933 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007934 link_config,
7935 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007936 return;
7937 }
7938 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007939
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007940 case PORT_FEATURE_LINK_SPEED_1G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007941 if (bp->port.supported[idx] &
7942 SUPPORTED_1000baseT_Full) {
7943 bp->link_params.req_line_speed[idx] =
7944 SPEED_1000;
7945 bp->port.advertising[idx] |=
7946 (ADVERTISED_1000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007947 ADVERTISED_TP);
7948 } else {
7949 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007950 "Invalid link_config 0x%x"
7951 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007952 link_config,
7953 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007954 return;
7955 }
7956 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007957
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007958 case PORT_FEATURE_LINK_SPEED_2_5G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007959 if (bp->port.supported[idx] &
7960 SUPPORTED_2500baseX_Full) {
7961 bp->link_params.req_line_speed[idx] =
7962 SPEED_2500;
7963 bp->port.advertising[idx] |=
7964 (ADVERTISED_2500baseX_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007965 ADVERTISED_TP);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007966 } else {
7967 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007968 "Invalid link_config 0x%x"
7969 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007970 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007971 bp->link_params.speed_cap_mask[idx]);
7972 return;
7973 }
7974 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007975
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007976 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7977 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7978 case PORT_FEATURE_LINK_SPEED_10G_KR:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007979 if (bp->port.supported[idx] &
7980 SUPPORTED_10000baseT_Full) {
7981 bp->link_params.req_line_speed[idx] =
7982 SPEED_10000;
7983 bp->port.advertising[idx] |=
7984 (ADVERTISED_10000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007985 ADVERTISED_FIBRE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007986 } else {
7987 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007988 "Invalid link_config 0x%x"
7989 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007990 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007991 bp->link_params.speed_cap_mask[idx]);
7992 return;
7993 }
7994 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007995
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007996 default:
7997 BNX2X_ERROR("NVRAM config error. "
7998 "BAD link speed link_config 0x%x\n",
7999 link_config);
8000 bp->link_params.req_line_speed[idx] =
8001 SPEED_AUTO_NEG;
8002 bp->port.advertising[idx] =
8003 bp->port.supported[idx];
8004 break;
8005 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008006
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008007 bp->link_params.req_flow_ctrl[idx] = (link_config &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008008 PORT_FEATURE_FLOW_CONTROL_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008009 if ((bp->link_params.req_flow_ctrl[idx] ==
8010 BNX2X_FLOW_CTRL_AUTO) &&
8011 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8012 bp->link_params.req_flow_ctrl[idx] =
8013 BNX2X_FLOW_CTRL_NONE;
8014 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008015
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008016 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8017 " 0x%x advertising 0x%x\n",
8018 bp->link_params.req_line_speed[idx],
8019 bp->link_params.req_duplex[idx],
8020 bp->link_params.req_flow_ctrl[idx],
8021 bp->port.advertising[idx]);
8022 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008023}
8024
Michael Chane665bfd2009-10-10 13:46:54 +00008025static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8026{
8027 mac_hi = cpu_to_be16(mac_hi);
8028 mac_lo = cpu_to_be32(mac_lo);
8029 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8030 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8031}
8032
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008033static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008034{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008035 int port = BP_PORT(bp);
8036 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008037 u32 config;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008038 u32 ext_phy_type, ext_phy_config;;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008039
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008040 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008041 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008042
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008043 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008044 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008045
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008046 bp->link_params.speed_cap_mask[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008047 SHMEM_RD(bp,
8048 dev_info.port_hw_config[port].speed_capability_mask);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008049 bp->link_params.speed_cap_mask[1] =
8050 SHMEM_RD(bp,
8051 dev_info.port_hw_config[port].speed_capability_mask2);
8052 bp->port.link_config[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008053 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8054
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008055 bp->port.link_config[1] =
8056 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008057
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008058 bp->link_params.multi_phy_config =
8059 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008060 /* If the device is capable of WoL, set the default state according
8061 * to the HW
8062 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008063 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008064 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8065 (config & PORT_FEATURE_WOL_ENABLED));
8066
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008067 BNX2X_DEV_INFO("lane_config 0x%08x "
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008068 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008069 bp->link_params.lane_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008070 bp->link_params.speed_cap_mask[0],
8071 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008072
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008073 bp->link_params.switch_cfg = (bp->port.link_config[0] &
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008074 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008075 bnx2x_phy_probe(&bp->link_params);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008076 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008077
8078 bnx2x_link_settings_requested(bp);
8079
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008080 /*
8081 * If connected directly, work with the internal PHY, otherwise, work
8082 * with the external PHY
8083 */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008084 ext_phy_config =
8085 SHMEM_RD(bp,
8086 dev_info.port_hw_config[port].external_phy_config);
8087 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008088 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008089 bp->mdio.prtad = bp->port.phy_addr;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008090
8091 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8092 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8093 bp->mdio.prtad =
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008094 XGXS_EXT_PHY_ADDR(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008095
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008096 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8097 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00008098 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008099 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8100 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008101
8102#ifdef BCM_CNIC
8103 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8104 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8105 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8106#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008107}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008108
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008109static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8110{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008111 int func = BP_ABS_FUNC(bp);
8112 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008113 u32 val, val2;
8114 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008115
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008116 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008117
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008118 if (CHIP_IS_E1x(bp)) {
8119 bp->common.int_block = INT_BLOCK_HC;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008120
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008121 bp->igu_dsb_id = DEF_SB_IGU_ID;
8122 bp->igu_base_sb = 0;
8123 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8124 } else {
8125 bp->common.int_block = INT_BLOCK_IGU;
8126 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8127 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8128 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8129 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8130 } else
8131 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8132
8133 bnx2x_get_igu_cam_info(bp);
8134
8135 }
8136 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8137 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8138
8139 /*
8140 * Initialize MF configuration
8141 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008142
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008143 bp->mf_ov = 0;
8144 bp->mf_mode = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008145 vn = BP_E1HVN(bp);
8146 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8147 if (SHMEM2_HAS(bp, mf_cfg_addr))
8148 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8149 else
8150 bp->common.mf_cfg_base = bp->common.shmem_base +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008151 offsetof(struct shmem_region, func_mb) +
8152 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008153 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008154 MF_CFG_RD(bp, func_mf_config[func].config);
8155
8156 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07008157 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008158 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008159 bp->mf_mode = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008160 BNX2X_DEV_INFO("%s function mode\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008161 IS_MF(bp) ? "multi" : "single");
Eilon Greenstein2691d512009-08-12 08:22:08 +00008162
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008163 if (IS_MF(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008164 val = (MF_CFG_RD(bp, func_mf_config[func].
Eilon Greenstein2691d512009-08-12 08:22:08 +00008165 e1hov_tag) &
8166 FUNC_MF_CFG_E1HOV_TAG_MASK);
8167 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008168 bp->mf_ov = val;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008169 BNX2X_DEV_INFO("MF OV for func %d is %d "
Eilon Greenstein2691d512009-08-12 08:22:08 +00008170 "(0x%04x)\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008171 func, bp->mf_ov, bp->mf_ov);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008172 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008173 BNX2X_ERROR("No valid MF OV for func %d,"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008174 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008175 rc = -EPERM;
8176 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00008177 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008178 if (BP_VN(bp)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008179 BNX2X_ERROR("VN %d in single function mode,"
8180 " aborting\n", BP_E1HVN(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00008181 rc = -EPERM;
8182 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008183 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008184 }
8185
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008186 /* adjust igu_sb_cnt to MF for E1x */
8187 if (CHIP_IS_E1x(bp) && IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008188 bp->igu_sb_cnt /= E1HVN_MAX;
8189
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008190 /*
8191 * adjust E2 sb count: to be removed when FW will support
8192 * more then 16 L2 clients
8193 */
8194#define MAX_L2_CLIENTS 16
8195 if (CHIP_IS_E2(bp))
8196 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8197 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8198
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008199 if (!BP_NOMCP(bp)) {
8200 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008201
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008202 bp->fw_seq =
8203 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8204 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008205 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8206 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008207
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008208 if (IS_MF(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008209 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8210 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008211 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8212 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8213 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8214 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8215 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8216 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8217 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8218 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8219 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8220 ETH_ALEN);
8221 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8222 ETH_ALEN);
8223 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008224
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008225 return rc;
8226 }
8227
8228 if (BP_NOMCP(bp)) {
8229 /* only supposed to happen on emulation/FPGA */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008230 BNX2X_ERROR("warning: random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008231 random_ether_addr(bp->dev->dev_addr);
8232 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8233 }
8234
8235 return rc;
8236}
8237
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008238static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8239{
8240 int cnt, i, block_end, rodi;
8241 char vpd_data[BNX2X_VPD_LEN+1];
8242 char str_id_reg[VENDOR_ID_LEN+1];
8243 char str_id_cap[VENDOR_ID_LEN+1];
8244 u8 len;
8245
8246 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8247 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8248
8249 if (cnt < BNX2X_VPD_LEN)
8250 goto out_not_found;
8251
8252 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8253 PCI_VPD_LRDT_RO_DATA);
8254 if (i < 0)
8255 goto out_not_found;
8256
8257
8258 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8259 pci_vpd_lrdt_size(&vpd_data[i]);
8260
8261 i += PCI_VPD_LRDT_TAG_SIZE;
8262
8263 if (block_end > BNX2X_VPD_LEN)
8264 goto out_not_found;
8265
8266 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8267 PCI_VPD_RO_KEYWORD_MFR_ID);
8268 if (rodi < 0)
8269 goto out_not_found;
8270
8271 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8272
8273 if (len != VENDOR_ID_LEN)
8274 goto out_not_found;
8275
8276 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8277
8278 /* vendor specific info */
8279 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8280 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8281 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8282 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8283
8284 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8285 PCI_VPD_RO_KEYWORD_VENDOR0);
8286 if (rodi >= 0) {
8287 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8288
8289 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8290
8291 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8292 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8293 bp->fw_ver[len] = ' ';
8294 }
8295 }
8296 return;
8297 }
8298out_not_found:
8299 return;
8300}
8301
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008302static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8303{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008304 int func;
Eilon Greenstein87942b42009-02-12 08:36:49 +00008305 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008306 int rc;
8307
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008308 /* Disable interrupt handling until HW is initialized */
8309 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008310 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008311
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008312 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008313 mutex_init(&bp->fw_mb_mutex);
David S. Millerbb7e95c2010-07-27 21:01:35 -07008314 spin_lock_init(&bp->stats_lock);
Michael Chan993ac7b2009-10-10 13:46:56 +00008315#ifdef BCM_CNIC
8316 mutex_init(&bp->cnic_mutex);
8317#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008318
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008319 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008320 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008321
8322 rc = bnx2x_get_hwinfo(bp);
8323
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008324 if (!rc)
8325 rc = bnx2x_alloc_mem_bp(bp);
8326
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008327 bnx2x_read_fwinfo(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008328
8329 func = BP_FUNC(bp);
8330
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008331 /* need to reset chip if undi was active */
8332 if (!BP_NOMCP(bp))
8333 bnx2x_undi_unload(bp);
8334
8335 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008336 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008337
8338 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008339 dev_err(&bp->pdev->dev, "MCP disabled, "
8340 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008341
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008342 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008343 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8344 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008345 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8346 "requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008347 multi_mode = ETH_RSS_MODE_DISABLED;
8348 }
8349 bp->multi_mode = multi_mode;
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008350 bp->int_mode = int_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008351
Dmitry Kravkov4fd89b7a2010-04-01 19:45:34 -07008352 bp->dev->features |= NETIF_F_GRO;
8353
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008354 /* Set TPA flags */
8355 if (disable_tpa) {
8356 bp->flags &= ~TPA_ENABLE_FLAG;
8357 bp->dev->features &= ~NETIF_F_LRO;
8358 } else {
8359 bp->flags |= TPA_ENABLE_FLAG;
8360 bp->dev->features |= NETIF_F_LRO;
8361 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008362 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008363
Eilon Greensteina18f5122009-08-12 08:23:26 +00008364 if (CHIP_IS_E1(bp))
8365 bp->dropless_fc = 0;
8366 else
8367 bp->dropless_fc = dropless_fc;
8368
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008369 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008370
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008371 bp->tx_ring_size = MAX_TX_AVAIL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008372
8373 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008374
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008375 /* make sure that the numbers are in the right granularity */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008376 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8377 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008378
Eilon Greenstein87942b42009-02-12 08:36:49 +00008379 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8380 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008381
8382 init_timer(&bp->timer);
8383 bp->timer.expires = jiffies + bp->current_interval;
8384 bp->timer.data = (unsigned long) bp;
8385 bp->timer.function = bnx2x_timer;
8386
8387 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008388}
8389
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008390
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008391/****************************************************************************
8392* General service functions
8393****************************************************************************/
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008394
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008395/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008396static int bnx2x_open(struct net_device *dev)
8397{
8398 struct bnx2x *bp = netdev_priv(dev);
8399
Eilon Greenstein6eccabb2009-01-22 03:37:48 +00008400 netif_carrier_off(dev);
8401
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008402 bnx2x_set_power_state(bp, PCI_D0);
8403
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008404 if (!bnx2x_reset_is_done(bp)) {
8405 do {
8406 /* Reset MCP mail box sequence if there is on going
8407 * recovery
8408 */
8409 bp->fw_seq = 0;
8410
8411 /* If it's the first function to load and reset done
8412 * is still not cleared it may mean that. We don't
8413 * check the attention state here because it may have
8414 * already been cleared by a "common" reset but we
8415 * shell proceed with "process kill" anyway.
8416 */
8417 if ((bnx2x_get_load_cnt(bp) == 0) &&
8418 bnx2x_trylock_hw_lock(bp,
8419 HW_LOCK_RESOURCE_RESERVED_08) &&
8420 (!bnx2x_leader_reset(bp))) {
8421 DP(NETIF_MSG_HW, "Recovered in open\n");
8422 break;
8423 }
8424
8425 bnx2x_set_power_state(bp, PCI_D3hot);
8426
8427 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8428 " completed yet. Try again later. If u still see this"
8429 " message after a few retries then power cycle is"
8430 " required.\n", bp->dev->name);
8431
8432 return -EAGAIN;
8433 } while (0);
8434 }
8435
8436 bp->recovery_state = BNX2X_RECOVERY_DONE;
8437
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008438 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008439}
8440
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008441/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008442static int bnx2x_close(struct net_device *dev)
8443{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008444 struct bnx2x *bp = netdev_priv(dev);
8445
8446 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008447 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +00008448 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008449
8450 return 0;
8451}
8452
Eilon Greensteinf5372252009-02-12 08:38:30 +00008453/* called with netif_tx_lock from dev_mcast.c */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008454void bnx2x_set_rx_mode(struct net_device *dev)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008455{
8456 struct bnx2x *bp = netdev_priv(dev);
8457 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8458 int port = BP_PORT(bp);
8459
8460 if (bp->state != BNX2X_STATE_OPEN) {
8461 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8462 return;
8463 }
8464
8465 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8466
8467 if (dev->flags & IFF_PROMISC)
8468 rx_mode = BNX2X_RX_MODE_PROMISC;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008469 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00008470 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8471 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008472 rx_mode = BNX2X_RX_MODE_ALLMULTI;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008473 else { /* some multicasts */
8474 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008475 /*
8476 * set mc list, do not wait as wait implies sleep
8477 * and set_rx_mode can be invoked from non-sleepable
8478 * context
8479 */
8480 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8481 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8482 BNX2X_MAX_MULTICAST*(1 + port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008483
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008484 bnx2x_set_e1_mc_list(bp, offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008485 } else { /* E1H */
8486 /* Accept one or more multicasts */
Jiri Pirko22bedad32010-04-01 21:22:57 +00008487 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008488 u32 mc_filter[MC_HASH_SIZE];
8489 u32 crc, bit, regidx;
8490 int i;
8491
8492 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8493
Jiri Pirko22bedad32010-04-01 21:22:57 +00008494 netdev_for_each_mc_addr(ha, dev) {
Johannes Berg7c510e42008-10-27 17:47:26 -07008495 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008496 bnx2x_mc_addr(ha));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008497
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008498 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8499 ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008500 bit = (crc >> 24) & 0xff;
8501 regidx = bit >> 5;
8502 bit &= 0x1f;
8503 mc_filter[regidx] |= (1 << bit);
8504 }
8505
8506 for (i = 0; i < MC_HASH_SIZE; i++)
8507 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8508 mc_filter[i]);
8509 }
8510 }
8511
8512 bp->rx_mode = rx_mode;
8513 bnx2x_set_storm_rx_mode(bp);
8514}
8515
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008516/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008517static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8518 int devad, u16 addr)
8519{
8520 struct bnx2x *bp = netdev_priv(netdev);
8521 u16 value;
8522 int rc;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008523
8524 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8525 prtad, devad, addr);
8526
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008527 /* The HW expects different devad if CL22 is used */
8528 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8529
8530 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008531 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008532 bnx2x_release_phy_lock(bp);
8533 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8534
8535 if (!rc)
8536 rc = value;
8537 return rc;
8538}
8539
8540/* called with rtnl_lock */
8541static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8542 u16 addr, u16 value)
8543{
8544 struct bnx2x *bp = netdev_priv(netdev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008545 int rc;
8546
8547 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8548 " value 0x%x\n", prtad, devad, addr, value);
8549
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008550 /* The HW expects different devad if CL22 is used */
8551 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8552
8553 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008554 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008555 bnx2x_release_phy_lock(bp);
8556 return rc;
8557}
8558
8559/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008560static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8561{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008562 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008563 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008564
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008565 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8566 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008567
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008568 if (!netif_running(dev))
8569 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008570
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008571 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008572}
8573
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008574#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008575static void poll_bnx2x(struct net_device *dev)
8576{
8577 struct bnx2x *bp = netdev_priv(dev);
8578
8579 disable_irq(bp->pdev->irq);
8580 bnx2x_interrupt(bp->pdev->irq, dev);
8581 enable_irq(bp->pdev->irq);
8582}
8583#endif
8584
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008585static const struct net_device_ops bnx2x_netdev_ops = {
8586 .ndo_open = bnx2x_open,
8587 .ndo_stop = bnx2x_close,
8588 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +00008589 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008590 .ndo_set_mac_address = bnx2x_change_mac_addr,
8591 .ndo_validate_addr = eth_validate_addr,
8592 .ndo_do_ioctl = bnx2x_ioctl,
8593 .ndo_change_mtu = bnx2x_change_mtu,
8594 .ndo_tx_timeout = bnx2x_tx_timeout,
8595#ifdef BCM_VLAN
8596 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
8597#endif
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008598#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008599 .ndo_poll_controller = poll_bnx2x,
8600#endif
8601};
8602
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008603static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8604 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008605{
8606 struct bnx2x *bp;
8607 int rc;
8608
8609 SET_NETDEV_DEV(dev, &pdev->dev);
8610 bp = netdev_priv(dev);
8611
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008612 bp->dev = dev;
8613 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008614 bp->flags = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008615 bp->pf_num = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008616
8617 rc = pci_enable_device(pdev);
8618 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008619 dev_err(&bp->pdev->dev,
8620 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008621 goto err_out;
8622 }
8623
8624 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008625 dev_err(&bp->pdev->dev,
8626 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008627 rc = -ENODEV;
8628 goto err_out_disable;
8629 }
8630
8631 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008632 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8633 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008634 rc = -ENODEV;
8635 goto err_out_disable;
8636 }
8637
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008638 if (atomic_read(&pdev->enable_cnt) == 1) {
8639 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8640 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008641 dev_err(&bp->pdev->dev,
8642 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008643 goto err_out_disable;
8644 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008645
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008646 pci_set_master(pdev);
8647 pci_save_state(pdev);
8648 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008649
8650 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8651 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008652 dev_err(&bp->pdev->dev,
8653 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008654 rc = -EIO;
8655 goto err_out_release;
8656 }
8657
8658 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8659 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008660 dev_err(&bp->pdev->dev,
8661 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008662 rc = -EIO;
8663 goto err_out_release;
8664 }
8665
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008666 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008667 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008668 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008669 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8670 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008671 rc = -EIO;
8672 goto err_out_release;
8673 }
8674
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008675 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008676 dev_err(&bp->pdev->dev,
8677 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008678 rc = -EIO;
8679 goto err_out_release;
8680 }
8681
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008682 dev->mem_start = pci_resource_start(pdev, 0);
8683 dev->base_addr = dev->mem_start;
8684 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008685
8686 dev->irq = pdev->irq;
8687
Arjan van de Ven275f1652008-10-20 21:42:39 -07008688 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008689 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008690 dev_err(&bp->pdev->dev,
8691 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008692 rc = -ENOMEM;
8693 goto err_out_release;
8694 }
8695
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008696 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008697 min_t(u64, BNX2X_DB_SIZE(bp),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008698 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008699 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008700 dev_err(&bp->pdev->dev,
8701 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008702 rc = -ENOMEM;
8703 goto err_out_unmap;
8704 }
8705
8706 bnx2x_set_power_state(bp, PCI_D0);
8707
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008708 /* clean indirect addresses */
8709 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8710 PCICFG_VENDOR_ID_OFFSET);
8711 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8712 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8713 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8714 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008715
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008716 /* Reset the load counter */
8717 bnx2x_clear_load_cnt(bp);
8718
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008719 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008720
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008721 dev->netdev_ops = &bnx2x_netdev_ops;
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008722 bnx2x_set_ethtool_ops(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008723 dev->features |= NETIF_F_SG;
8724 dev->features |= NETIF_F_HW_CSUM;
8725 if (bp->flags & USING_DAC_FLAG)
8726 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +00008727 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8728 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008729#ifdef BCM_VLAN
8730 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08008731 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +00008732
8733 dev->vlan_features |= NETIF_F_SG;
8734 dev->vlan_features |= NETIF_F_HW_CSUM;
8735 if (bp->flags & USING_DAC_FLAG)
8736 dev->vlan_features |= NETIF_F_HIGHDMA;
8737 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8738 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008739#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008740
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008741 /* get_port_hwinfo() will set prtad and mmds properly */
8742 bp->mdio.prtad = MDIO_PRTAD_NONE;
8743 bp->mdio.mmds = 0;
8744 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8745 bp->mdio.dev = dev;
8746 bp->mdio.mdio_read = bnx2x_mdio_read;
8747 bp->mdio.mdio_write = bnx2x_mdio_write;
8748
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008749 return 0;
8750
8751err_out_unmap:
8752 if (bp->regview) {
8753 iounmap(bp->regview);
8754 bp->regview = NULL;
8755 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008756 if (bp->doorbells) {
8757 iounmap(bp->doorbells);
8758 bp->doorbells = NULL;
8759 }
8760
8761err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008762 if (atomic_read(&pdev->enable_cnt) == 1)
8763 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008764
8765err_out_disable:
8766 pci_disable_device(pdev);
8767 pci_set_drvdata(pdev, NULL);
8768
8769err_out:
8770 return rc;
8771}
8772
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008773static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8774 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -08008775{
8776 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8777
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008778 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
8779
8780 /* return value of 1=2.5GHz 2=5GHz */
8781 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -08008782}
8783
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008784static int bnx2x_check_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008785{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008786 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008787 struct bnx2x_fw_file_hdr *fw_hdr;
8788 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008789 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008790 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008791 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008792 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008793
8794 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8795 return -EINVAL;
8796
8797 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8798 sections = (struct bnx2x_fw_file_section *)fw_hdr;
8799
8800 /* Make sure none of the offsets and sizes make us read beyond
8801 * the end of the firmware data */
8802 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8803 offset = be32_to_cpu(sections[i].offset);
8804 len = be32_to_cpu(sections[i].len);
8805 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008806 dev_err(&bp->pdev->dev,
8807 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008808 return -EINVAL;
8809 }
8810 }
8811
8812 /* Likewise for the init_ops offsets */
8813 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8814 ops_offsets = (u16 *)(firmware->data + offset);
8815 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8816
8817 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8818 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008819 dev_err(&bp->pdev->dev,
8820 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008821 return -EINVAL;
8822 }
8823 }
8824
8825 /* Check FW version */
8826 offset = be32_to_cpu(fw_hdr->fw_version.offset);
8827 fw_ver = firmware->data + offset;
8828 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8829 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8830 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8831 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008832 dev_err(&bp->pdev->dev,
8833 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008834 fw_ver[0], fw_ver[1], fw_ver[2],
8835 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8836 BCM_5710_FW_MINOR_VERSION,
8837 BCM_5710_FW_REVISION_VERSION,
8838 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008839 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008840 }
8841
8842 return 0;
8843}
8844
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008845static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008846{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008847 const __be32 *source = (const __be32 *)_source;
8848 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008849 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008850
8851 for (i = 0; i < n/4; i++)
8852 target[i] = be32_to_cpu(source[i]);
8853}
8854
8855/*
8856 Ops array is stored in the following format:
8857 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8858 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008859static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008860{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008861 const __be32 *source = (const __be32 *)_source;
8862 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008863 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008864
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008865 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008866 tmp = be32_to_cpu(source[j]);
8867 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008868 target[i].offset = tmp & 0xffffff;
8869 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008870 }
8871}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008872
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008873/**
8874 * IRO array is stored in the following format:
8875 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8876 */
8877static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8878{
8879 const __be32 *source = (const __be32 *)_source;
8880 struct iro *target = (struct iro *)_target;
8881 u32 i, j, tmp;
8882
8883 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8884 target[i].base = be32_to_cpu(source[j]);
8885 j++;
8886 tmp = be32_to_cpu(source[j]);
8887 target[i].m1 = (tmp >> 16) & 0xffff;
8888 target[i].m2 = tmp & 0xffff;
8889 j++;
8890 tmp = be32_to_cpu(source[j]);
8891 target[i].m3 = (tmp >> 16) & 0xffff;
8892 target[i].size = tmp & 0xffff;
8893 j++;
8894 }
8895}
8896
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008897static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008898{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008899 const __be16 *source = (const __be16 *)_source;
8900 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008901 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008902
8903 for (i = 0; i < n/2; i++)
8904 target[i] = be16_to_cpu(source[i]);
8905}
8906
Joe Perches7995c642010-02-17 15:01:52 +00008907#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8908do { \
8909 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8910 bp->arr = kmalloc(len, GFP_KERNEL); \
8911 if (!bp->arr) { \
8912 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8913 goto lbl; \
8914 } \
8915 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8916 (u8 *)bp->arr, len); \
8917} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008918
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008919int bnx2x_init_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008920{
Ben Hutchings45229b42009-11-07 11:53:39 +00008921 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008922 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +00008923 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008924
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008925 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00008926 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008927 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00008928 fw_file_name = FW_FILE_NAME_E1H;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008929 else if (CHIP_IS_E2(bp))
8930 fw_file_name = FW_FILE_NAME_E2;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008931 else {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008932 BNX2X_ERR("Unsupported chip revision\n");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008933 return -EINVAL;
8934 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008935
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008936 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008937
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008938 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008939 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008940 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008941 goto request_firmware_exit;
8942 }
8943
8944 rc = bnx2x_check_firmware(bp);
8945 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008946 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008947 goto request_firmware_exit;
8948 }
8949
8950 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8951
8952 /* Initialize the pointers to the init arrays */
8953 /* Blob */
8954 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8955
8956 /* Opcodes */
8957 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8958
8959 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008960 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8961 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008962
8963 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +00008964 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8965 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
8966 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
8967 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
8968 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8969 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
8970 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
8971 be32_to_cpu(fw_hdr->usem_pram_data.offset);
8972 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8973 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
8974 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
8975 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
8976 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
8977 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
8978 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
8979 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008980 /* IRO */
8981 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008982
8983 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008984
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008985iro_alloc_err:
8986 kfree(bp->init_ops_offsets);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008987init_offsets_alloc_err:
8988 kfree(bp->init_ops);
8989init_ops_alloc_err:
8990 kfree(bp->init_data);
8991request_firmware_exit:
8992 release_firmware(bp->firmware);
8993
8994 return rc;
8995}
8996
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008997static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
8998{
8999 int cid_count = L2_FP_COUNT(l2_cid_count);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009000
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009001#ifdef BCM_CNIC
9002 cid_count += CNIC_CID_MAX;
9003#endif
9004 return roundup(cid_count, QM_CID_ROUND);
9005}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009006
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009007static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9008 const struct pci_device_id *ent)
9009{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009010 struct net_device *dev = NULL;
9011 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009012 int pcie_width, pcie_speed;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009013 int rc, cid_count;
9014
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009015 switch (ent->driver_data) {
9016 case BCM57710:
9017 case BCM57711:
9018 case BCM57711E:
9019 cid_count = FP_SB_MAX_E1x;
9020 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009021
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009022 case BCM57712:
9023 case BCM57712E:
9024 cid_count = FP_SB_MAX_E2;
9025 break;
9026
9027 default:
9028 pr_err("Unknown board_type (%ld), aborting\n",
9029 ent->driver_data);
9030 return ENODEV;
9031 }
9032
9033 cid_count += CNIC_CONTEXT_USE;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009034
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009035 /* dev zeroed in init_etherdev */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009036 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009037 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009038 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009039 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009040 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009041
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009042 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +00009043 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009044
Eilon Greensteindf4770de2009-08-12 08:23:28 +00009045 pci_set_drvdata(pdev, dev);
9046
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009047 bp->l2_cid_count = cid_count;
9048
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009049 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009050 if (rc < 0) {
9051 free_netdev(dev);
9052 return rc;
9053 }
9054
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009055 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009056 if (rc)
9057 goto init_one_exit;
9058
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009059 /* calc qm_cid_count */
9060 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9061
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009062 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009063 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009064 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009065 goto init_one_exit;
9066 }
9067
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009068 /* Configure interupt mode: try to enable MSI-X/MSI if
9069 * needed, set bp->num_queues appropriately.
9070 */
9071 bnx2x_set_int_mode(bp);
9072
9073 /* Add all NAPI objects */
9074 bnx2x_add_all_napi(bp);
9075
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009076 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009077
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009078 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9079 " IRQ %d, ", board_info[ent->driver_data].name,
9080 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009081 pcie_width,
9082 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9083 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9084 "5GHz (Gen2)" : "2.5GHz",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009085 dev->base_addr, bp->pdev->irq);
9086 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +00009087
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009088 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009089
9090init_one_exit:
9091 if (bp->regview)
9092 iounmap(bp->regview);
9093
9094 if (bp->doorbells)
9095 iounmap(bp->doorbells);
9096
9097 free_netdev(dev);
9098
9099 if (atomic_read(&pdev->enable_cnt) == 1)
9100 pci_release_regions(pdev);
9101
9102 pci_disable_device(pdev);
9103 pci_set_drvdata(pdev, NULL);
9104
9105 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009106}
9107
9108static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9109{
9110 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08009111 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009112
Eliezer Tamir228241e2008-02-28 11:56:57 -08009113 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009114 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08009115 return;
9116 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08009117 bp = netdev_priv(dev);
9118
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009119 unregister_netdev(dev);
9120
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009121 /* Delete all NAPI objects */
9122 bnx2x_del_all_napi(bp);
9123
9124 /* Disable MSI/MSI-X */
9125 bnx2x_disable_msi(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009126
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009127 /* Make sure RESET task is not scheduled before continuing */
9128 cancel_delayed_work_sync(&bp->reset_task);
9129
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009130 if (bp->regview)
9131 iounmap(bp->regview);
9132
9133 if (bp->doorbells)
9134 iounmap(bp->doorbells);
9135
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009136 bnx2x_free_mem_bp(bp);
9137
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009138 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009139
9140 if (atomic_read(&pdev->enable_cnt) == 1)
9141 pci_release_regions(pdev);
9142
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009143 pci_disable_device(pdev);
9144 pci_set_drvdata(pdev, NULL);
9145}
9146
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009147static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9148{
9149 int i;
9150
9151 bp->state = BNX2X_STATE_ERROR;
9152
9153 bp->rx_mode = BNX2X_RX_MODE_NONE;
9154
9155 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07009156 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009157
9158 del_timer_sync(&bp->timer);
9159 bp->stats_state = STATS_STATE_DISABLED;
9160 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9161
9162 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009163 bnx2x_free_irq(bp);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009164
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009165 /* Free SKBs, SGEs, TPA pool and driver internals */
9166 bnx2x_free_skbs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009167
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00009168 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009169 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009170
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009171 bnx2x_free_mem(bp);
9172
9173 bp->state = BNX2X_STATE_CLOSED;
9174
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009175 return 0;
9176}
9177
9178static void bnx2x_eeh_recover(struct bnx2x *bp)
9179{
9180 u32 val;
9181
9182 mutex_init(&bp->port.phy_mutex);
9183
9184 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9185 bp->link_params.shmem_base = bp->common.shmem_base;
9186 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9187
9188 if (!bp->common.shmem_base ||
9189 (bp->common.shmem_base < 0xA0000) ||
9190 (bp->common.shmem_base >= 0xC0000)) {
9191 BNX2X_DEV_INFO("MCP not active\n");
9192 bp->flags |= NO_MCP_FLAG;
9193 return;
9194 }
9195
9196 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9197 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9198 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9199 BNX2X_ERR("BAD MCP validity signature\n");
9200
9201 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009202 bp->fw_seq =
9203 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9204 DRV_MSG_SEQ_NUMBER_MASK);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009205 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9206 }
9207}
9208
Wendy Xiong493adb12008-06-23 20:36:22 -07009209/**
9210 * bnx2x_io_error_detected - called when PCI error is detected
9211 * @pdev: Pointer to PCI device
9212 * @state: The current pci connection state
9213 *
9214 * This function is called after a PCI bus error affecting
9215 * this device has been detected.
9216 */
9217static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9218 pci_channel_state_t state)
9219{
9220 struct net_device *dev = pci_get_drvdata(pdev);
9221 struct bnx2x *bp = netdev_priv(dev);
9222
9223 rtnl_lock();
9224
9225 netif_device_detach(dev);
9226
Dean Nelson07ce50e2009-07-31 09:13:25 +00009227 if (state == pci_channel_io_perm_failure) {
9228 rtnl_unlock();
9229 return PCI_ERS_RESULT_DISCONNECT;
9230 }
9231
Wendy Xiong493adb12008-06-23 20:36:22 -07009232 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009233 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -07009234
9235 pci_disable_device(pdev);
9236
9237 rtnl_unlock();
9238
9239 /* Request a slot reset */
9240 return PCI_ERS_RESULT_NEED_RESET;
9241}
9242
9243/**
9244 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9245 * @pdev: Pointer to PCI device
9246 *
9247 * Restart the card from scratch, as if from a cold-boot.
9248 */
9249static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9250{
9251 struct net_device *dev = pci_get_drvdata(pdev);
9252 struct bnx2x *bp = netdev_priv(dev);
9253
9254 rtnl_lock();
9255
9256 if (pci_enable_device(pdev)) {
9257 dev_err(&pdev->dev,
9258 "Cannot re-enable PCI device after reset\n");
9259 rtnl_unlock();
9260 return PCI_ERS_RESULT_DISCONNECT;
9261 }
9262
9263 pci_set_master(pdev);
9264 pci_restore_state(pdev);
9265
9266 if (netif_running(dev))
9267 bnx2x_set_power_state(bp, PCI_D0);
9268
9269 rtnl_unlock();
9270
9271 return PCI_ERS_RESULT_RECOVERED;
9272}
9273
9274/**
9275 * bnx2x_io_resume - called when traffic can start flowing again
9276 * @pdev: Pointer to PCI device
9277 *
9278 * This callback is called when the error recovery driver tells us that
9279 * its OK to resume normal operation.
9280 */
9281static void bnx2x_io_resume(struct pci_dev *pdev)
9282{
9283 struct net_device *dev = pci_get_drvdata(pdev);
9284 struct bnx2x *bp = netdev_priv(dev);
9285
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009286 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009287 printk(KERN_ERR "Handling parity error recovery. "
9288 "Try again later\n");
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009289 return;
9290 }
9291
Wendy Xiong493adb12008-06-23 20:36:22 -07009292 rtnl_lock();
9293
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009294 bnx2x_eeh_recover(bp);
9295
Wendy Xiong493adb12008-06-23 20:36:22 -07009296 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009297 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -07009298
9299 netif_device_attach(dev);
9300
9301 rtnl_unlock();
9302}
9303
9304static struct pci_error_handlers bnx2x_err_handler = {
9305 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +00009306 .slot_reset = bnx2x_io_slot_reset,
9307 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -07009308};
9309
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009310static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -07009311 .name = DRV_MODULE_NAME,
9312 .id_table = bnx2x_pci_tbl,
9313 .probe = bnx2x_init_one,
9314 .remove = __devexit_p(bnx2x_remove_one),
9315 .suspend = bnx2x_suspend,
9316 .resume = bnx2x_resume,
9317 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009318};
9319
9320static int __init bnx2x_init(void)
9321{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009322 int ret;
9323
Joe Perches7995c642010-02-17 15:01:52 +00009324 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +00009325
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009326 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9327 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +00009328 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009329 return -ENOMEM;
9330 }
9331
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009332 ret = pci_register_driver(&bnx2x_pci_driver);
9333 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +00009334 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009335 destroy_workqueue(bnx2x_wq);
9336 }
9337 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009338}
9339
9340static void __exit bnx2x_cleanup(void)
9341{
9342 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009343
9344 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009345}
9346
9347module_init(bnx2x_init);
9348module_exit(bnx2x_cleanup);
9349
Michael Chan993ac7b2009-10-10 13:46:56 +00009350#ifdef BCM_CNIC
9351
9352/* count denotes the number of new completions we have seen */
9353static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9354{
9355 struct eth_spe *spe;
9356
9357#ifdef BNX2X_STOP_ON_ERROR
9358 if (unlikely(bp->panic))
9359 return;
9360#endif
9361
9362 spin_lock_bh(&bp->spq_lock);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009363 BUG_ON(bp->cnic_spq_pending < count);
Michael Chan993ac7b2009-10-10 13:46:56 +00009364 bp->cnic_spq_pending -= count;
9365
Michael Chan993ac7b2009-10-10 13:46:56 +00009366
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009367 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9368 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9369 & SPE_HDR_CONN_TYPE) >>
9370 SPE_HDR_CONN_TYPE_SHIFT;
9371
9372 /* Set validation for iSCSI L2 client before sending SETUP
9373 * ramrod
9374 */
9375 if (type == ETH_CONNECTION_TYPE) {
9376 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9377 hdr.conn_and_cmd_data) >>
9378 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9379
9380 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9381 bnx2x_set_ctx_validation(&bp->context.
9382 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9383 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9384 }
9385
9386 /* There may be not more than 8 L2 and COMMON SPEs and not more
9387 * than 8 L5 SPEs in the air.
9388 */
9389 if ((type == NONE_CONNECTION_TYPE) ||
9390 (type == ETH_CONNECTION_TYPE)) {
9391 if (!atomic_read(&bp->spq_left))
9392 break;
9393 else
9394 atomic_dec(&bp->spq_left);
9395 } else if (type == ISCSI_CONNECTION_TYPE) {
9396 if (bp->cnic_spq_pending >=
9397 bp->cnic_eth_dev.max_kwqe_pending)
9398 break;
9399 else
9400 bp->cnic_spq_pending++;
9401 } else {
9402 BNX2X_ERR("Unknown SPE type: %d\n", type);
9403 bnx2x_panic();
Michael Chan993ac7b2009-10-10 13:46:56 +00009404 break;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009405 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009406
9407 spe = bnx2x_sp_get_next(bp);
9408 *spe = *bp->cnic_kwq_cons;
9409
Michael Chan993ac7b2009-10-10 13:46:56 +00009410 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9411 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9412
9413 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9414 bp->cnic_kwq_cons = bp->cnic_kwq;
9415 else
9416 bp->cnic_kwq_cons++;
9417 }
9418 bnx2x_sp_prod_update(bp);
9419 spin_unlock_bh(&bp->spq_lock);
9420}
9421
9422static int bnx2x_cnic_sp_queue(struct net_device *dev,
9423 struct kwqe_16 *kwqes[], u32 count)
9424{
9425 struct bnx2x *bp = netdev_priv(dev);
9426 int i;
9427
9428#ifdef BNX2X_STOP_ON_ERROR
9429 if (unlikely(bp->panic))
9430 return -EIO;
9431#endif
9432
9433 spin_lock_bh(&bp->spq_lock);
9434
9435 for (i = 0; i < count; i++) {
9436 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9437
9438 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9439 break;
9440
9441 *bp->cnic_kwq_prod = *spe;
9442
9443 bp->cnic_kwq_pending++;
9444
9445 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9446 spe->hdr.conn_and_cmd_data, spe->hdr.type,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009447 spe->data.update_data_addr.hi,
9448 spe->data.update_data_addr.lo,
Michael Chan993ac7b2009-10-10 13:46:56 +00009449 bp->cnic_kwq_pending);
9450
9451 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9452 bp->cnic_kwq_prod = bp->cnic_kwq;
9453 else
9454 bp->cnic_kwq_prod++;
9455 }
9456
9457 spin_unlock_bh(&bp->spq_lock);
9458
9459 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9460 bnx2x_cnic_sp_post(bp, 0);
9461
9462 return i;
9463}
9464
9465static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9466{
9467 struct cnic_ops *c_ops;
9468 int rc = 0;
9469
9470 mutex_lock(&bp->cnic_mutex);
9471 c_ops = bp->cnic_ops;
9472 if (c_ops)
9473 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9474 mutex_unlock(&bp->cnic_mutex);
9475
9476 return rc;
9477}
9478
9479static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9480{
9481 struct cnic_ops *c_ops;
9482 int rc = 0;
9483
9484 rcu_read_lock();
9485 c_ops = rcu_dereference(bp->cnic_ops);
9486 if (c_ops)
9487 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9488 rcu_read_unlock();
9489
9490 return rc;
9491}
9492
9493/*
9494 * for commands that have no data
9495 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009496int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
Michael Chan993ac7b2009-10-10 13:46:56 +00009497{
9498 struct cnic_ctl_info ctl = {0};
9499
9500 ctl.cmd = cmd;
9501
9502 return bnx2x_cnic_ctl_send(bp, &ctl);
9503}
9504
9505static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9506{
9507 struct cnic_ctl_info ctl;
9508
9509 /* first we tell CNIC and only then we count this as a completion */
9510 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9511 ctl.data.comp.cid = cid;
9512
9513 bnx2x_cnic_ctl_send_bh(bp, &ctl);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009514 bnx2x_cnic_sp_post(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009515}
9516
9517static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9518{
9519 struct bnx2x *bp = netdev_priv(dev);
9520 int rc = 0;
9521
9522 switch (ctl->cmd) {
9523 case DRV_CTL_CTXTBL_WR_CMD: {
9524 u32 index = ctl->data.io.offset;
9525 dma_addr_t addr = ctl->data.io.dma_addr;
9526
9527 bnx2x_ilt_wr(bp, index, addr);
9528 break;
9529 }
9530
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009531 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9532 int count = ctl->data.credit.credit_count;
Michael Chan993ac7b2009-10-10 13:46:56 +00009533
9534 bnx2x_cnic_sp_post(bp, count);
9535 break;
9536 }
9537
9538 /* rtnl_lock is held. */
9539 case DRV_CTL_START_L2_CMD: {
9540 u32 cli = ctl->data.ring.client_id;
9541
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009542 /* Set iSCSI MAC address */
9543 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9544
9545 mmiowb();
9546 barrier();
9547
9548 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9549 * because it's the only way for UIO Client to accept
9550 * multicasts (in non-promiscuous mode only one Client per
9551 * function will receive multicast packets (leading in our
9552 * case).
9553 */
9554 bnx2x_rxq_set_mac_filters(bp, cli,
9555 BNX2X_ACCEPT_UNICAST |
9556 BNX2X_ACCEPT_BROADCAST |
9557 BNX2X_ACCEPT_ALL_MULTICAST);
9558 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9559
Michael Chan993ac7b2009-10-10 13:46:56 +00009560 break;
9561 }
9562
9563 /* rtnl_lock is held. */
9564 case DRV_CTL_STOP_L2_CMD: {
9565 u32 cli = ctl->data.ring.client_id;
9566
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009567 /* Stop accepting on iSCSI L2 ring */
9568 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9569 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9570
9571 mmiowb();
9572 barrier();
9573
9574 /* Unset iSCSI L2 MAC */
9575 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009576 break;
9577 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009578 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9579 int count = ctl->data.credit.credit_count;
9580
9581 smp_mb__before_atomic_inc();
9582 atomic_add(count, &bp->spq_left);
9583 smp_mb__after_atomic_inc();
9584 break;
9585 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009586
9587 default:
9588 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9589 rc = -EINVAL;
9590 }
9591
9592 return rc;
9593}
9594
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009595void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
Michael Chan993ac7b2009-10-10 13:46:56 +00009596{
9597 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9598
9599 if (bp->flags & USING_MSIX_FLAG) {
9600 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9601 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9602 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9603 } else {
9604 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9605 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9606 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009607 if (CHIP_IS_E2(bp))
9608 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9609 else
9610 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9611
Michael Chan993ac7b2009-10-10 13:46:56 +00009612 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009613 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00009614 cp->irq_arr[1].status_blk = bp->def_status_blk;
9615 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009616 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
Michael Chan993ac7b2009-10-10 13:46:56 +00009617
9618 cp->num_irq = 2;
9619}
9620
9621static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9622 void *data)
9623{
9624 struct bnx2x *bp = netdev_priv(dev);
9625 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9626
9627 if (ops == NULL)
9628 return -EINVAL;
9629
9630 if (atomic_read(&bp->intr_sem) != 0)
9631 return -EBUSY;
9632
9633 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9634 if (!bp->cnic_kwq)
9635 return -ENOMEM;
9636
9637 bp->cnic_kwq_cons = bp->cnic_kwq;
9638 bp->cnic_kwq_prod = bp->cnic_kwq;
9639 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9640
9641 bp->cnic_spq_pending = 0;
9642 bp->cnic_kwq_pending = 0;
9643
9644 bp->cnic_data = data;
9645
9646 cp->num_irq = 0;
9647 cp->drv_state = CNIC_DRV_STATE_REGD;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009648 cp->iro_arr = bp->iro_arr;
Michael Chan993ac7b2009-10-10 13:46:56 +00009649
Michael Chan993ac7b2009-10-10 13:46:56 +00009650 bnx2x_setup_cnic_irq_info(bp);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009651
Michael Chan993ac7b2009-10-10 13:46:56 +00009652 rcu_assign_pointer(bp->cnic_ops, ops);
9653
9654 return 0;
9655}
9656
9657static int bnx2x_unregister_cnic(struct net_device *dev)
9658{
9659 struct bnx2x *bp = netdev_priv(dev);
9660 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9661
9662 mutex_lock(&bp->cnic_mutex);
9663 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9664 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9665 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9666 }
9667 cp->drv_state = 0;
9668 rcu_assign_pointer(bp->cnic_ops, NULL);
9669 mutex_unlock(&bp->cnic_mutex);
9670 synchronize_rcu();
9671 kfree(bp->cnic_kwq);
9672 bp->cnic_kwq = NULL;
9673
9674 return 0;
9675}
9676
9677struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9678{
9679 struct bnx2x *bp = netdev_priv(dev);
9680 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9681
9682 cp->drv_owner = THIS_MODULE;
9683 cp->chip_id = CHIP_ID(bp);
9684 cp->pdev = bp->pdev;
9685 cp->io_base = bp->regview;
9686 cp->io_base2 = bp->doorbells;
9687 cp->max_kwqe_pending = 8;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009688 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009689 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9690 bnx2x_cid_ilt_lines(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00009691 cp->ctx_tbl_len = CNIC_ILT_LINES;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009692 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
Michael Chan993ac7b2009-10-10 13:46:56 +00009693 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9694 cp->drv_ctl = bnx2x_drv_ctl;
9695 cp->drv_register_cnic = bnx2x_register_cnic;
9696 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009697 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9698 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
Michael Chan993ac7b2009-10-10 13:46:56 +00009699
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009700 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9701 "starting cid %d\n",
9702 cp->ctx_blk_size,
9703 cp->ctx_tbl_offset,
9704 cp->ctx_tbl_len,
9705 cp->starting_cid);
Michael Chan993ac7b2009-10-10 13:46:56 +00009706 return cp;
9707}
9708EXPORT_SYMBOL(bnx2x_cnic_probe);
9709
9710#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009711