blob: f22e283cabef848a163dab1e43defe571178fa72 [file] [log] [blame]
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001/* bnx2x_main.c: Broadcom Everest network driver.
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002 *
Vladislav Zolotarov3359fce2010-02-17 13:35:01 -08003 * Copyright (c) 2007-2010 Broadcom Corporation
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
Eilon Greenstein24e3fce2008-06-12 14:30:28 -07009 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020011 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
Eilon Greensteinca003922009-08-12 22:53:28 -070013 * Slowpath and fastpath rework by Vladislav Zolotarov
Eliezer Tamirc14423f2008-02-28 11:49:42 -080014 * Statistics and Link management by Yitchak Gertner
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020015 *
16 */
17
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020018#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020026#include <linux/interrupt.h>
27#include <linux/pci.h>
28#include <linux/init.h>
29#include <linux/netdevice.h>
30#include <linux/etherdevice.h>
31#include <linux/skbuff.h>
32#include <linux/dma-mapping.h>
33#include <linux/bitops.h>
34#include <linux/irq.h>
35#include <linux/delay.h>
36#include <asm/byteorder.h>
37#include <linux/time.h>
38#include <linux/ethtool.h>
39#include <linux/mii.h>
Eilon Greenstein0c6671b2009-01-14 21:26:51 -080040#include <linux/if_vlan.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020041#include <net/ip.h>
42#include <net/tcp.h>
43#include <net/checksum.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070044#include <net/ip6_checksum.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020045#include <linux/workqueue.h>
46#include <linux/crc32.h>
Eilon Greenstein34f80b02008-06-23 20:33:01 -070047#include <linux/crc32c.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020048#include <linux/prefetch.h>
49#include <linux/zlib.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020050#include <linux/io.h>
Ben Hutchings45229b42009-11-07 11:53:39 +000051#include <linux/stringify.h>
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020052
Dmitry Kravkovb0efbb92010-07-27 12:33:43 +000053#define BNX2X_MAIN
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020054#include "bnx2x.h"
55#include "bnx2x_init.h"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070056#include "bnx2x_init_ops.h"
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +000057#include "bnx2x_cmn.h"
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020058
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070059#include <linux/firmware.h>
60#include "bnx2x_fw_file_hdr.h"
61/* FW files */
Ben Hutchings45229b42009-11-07 11:53:39 +000062#define FW_FILE_VERSION \
63 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
64 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
65 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
66 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
Dmitry Kravkov560131f2010-10-06 03:18:47 +000067#define FW_FILE_NAME_E1 "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
68#define FW_FILE_NAME_E1H "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000069#define FW_FILE_NAME_E2 "bnx2x/bnx2x-e2-" FW_FILE_VERSION ".fw"
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -070070
Eilon Greenstein34f80b02008-06-23 20:33:01 -070071/* Time in jiffies before concluding the transmitter is hung */
72#define TX_TIMEOUT (5*HZ)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020073
Andrew Morton53a10562008-02-09 23:16:41 -080074static char version[] __devinitdata =
Eilon Greenstein34f80b02008-06-23 20:33:01 -070075 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020076 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
Eilon Greenstein24e3fce2008-06-12 14:30:28 -070078MODULE_AUTHOR("Eliezer Tamir");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000079MODULE_DESCRIPTION("Broadcom NetXtreme II "
80 "BCM57710/57711/57711E/57712/57712E Driver");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020081MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
Ben Hutchings45229b42009-11-07 11:53:39 +000083MODULE_FIRMWARE(FW_FILE_NAME_E1);
84MODULE_FIRMWARE(FW_FILE_NAME_E1H);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +000085MODULE_FIRMWARE(FW_FILE_NAME_E2);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +020086
Eilon Greenstein555f6c72009-02-12 08:36:11 +000087static int multi_mode = 1;
88module_param(multi_mode, int, 0);
Eilon Greensteinca003922009-08-12 22:53:28 -070089MODULE_PARM_DESC(multi_mode, " Multi queue mode "
90 "(0 Disable; 1 Enable (default))");
91
Dmitry Kravkovd6214d72010-10-06 03:32:10 +000092int num_queues;
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +000093module_param(num_queues, int, 0);
94MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
95 " (default is as a number of CPUs)");
Eilon Greenstein555f6c72009-02-12 08:36:11 +000096
Eilon Greenstein19680c42008-08-13 15:47:33 -070097static int disable_tpa;
Eilon Greenstein19680c42008-08-13 15:47:33 -070098module_param(disable_tpa, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +000099MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000100
101static int int_mode;
102module_param(int_mode, int, 0);
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000103MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
104 "(1 INT#x; 2 MSI)");
Eilon Greenstein8badd272009-02-12 08:36:15 +0000105
Eilon Greensteina18f5122009-08-12 08:23:26 +0000106static int dropless_fc;
107module_param(dropless_fc, int, 0);
108MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
109
Eilon Greenstein9898f862009-02-12 08:38:27 +0000110static int poll;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200111module_param(poll, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000112MODULE_PARM_DESC(poll, " Use polling (for debug)");
Eilon Greenstein8d5726c2009-02-12 08:37:19 +0000113
114static int mrrs = -1;
115module_param(mrrs, int, 0);
116MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
117
Eilon Greenstein9898f862009-02-12 08:38:27 +0000118static int debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200119module_param(debug, int, 0);
Eilon Greenstein9898f862009-02-12 08:38:27 +0000120MODULE_PARM_DESC(debug, " Default debug msglevel");
121
Eilon Greenstein1cf167f2009-01-14 21:22:18 -0800122static struct workqueue_struct *bnx2x_wq;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200123
124enum bnx2x_board_type {
125 BCM57710 = 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700126 BCM57711 = 1,
127 BCM57711E = 2,
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000128 BCM57712 = 3,
129 BCM57712E = 4
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200130};
131
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700132/* indexed by board_type, above */
Andrew Morton53a10562008-02-09 23:16:41 -0800133static struct {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200134 char *name;
135} board_info[] __devinitdata = {
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700136 { "Broadcom NetXtreme II BCM57710 XGb" },
137 { "Broadcom NetXtreme II BCM57711 XGb" },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000138 { "Broadcom NetXtreme II BCM57711E XGb" },
139 { "Broadcom NetXtreme II BCM57712 XGb" },
140 { "Broadcom NetXtreme II BCM57712E XGb" }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200141};
142
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000143#ifndef PCI_DEVICE_ID_NX2_57712
144#define PCI_DEVICE_ID_NX2_57712 0x1662
145#endif
146#ifndef PCI_DEVICE_ID_NX2_57712E
147#define PCI_DEVICE_ID_NX2_57712E 0x1663
148#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700149
Alexey Dobriyana3aa1882010-01-07 11:58:11 +0000150static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
Eilon Greensteine4ed7112009-08-12 08:24:10 +0000151 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
152 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
153 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000154 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712), BCM57712 },
155 { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57712E), BCM57712E },
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200156 { 0 }
157};
158
159MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
160
161/****************************************************************************
162* General service functions
163****************************************************************************/
164
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000165static inline void __storm_memset_dma_mapping(struct bnx2x *bp,
166 u32 addr, dma_addr_t mapping)
167{
168 REG_WR(bp, addr, U64_LO(mapping));
169 REG_WR(bp, addr + 4, U64_HI(mapping));
170}
171
172static inline void __storm_memset_fill(struct bnx2x *bp,
173 u32 addr, size_t size, u32 val)
174{
175 int i;
176 for (i = 0; i < size/4; i++)
177 REG_WR(bp, addr + (i * 4), val);
178}
179
180static inline void storm_memset_ustats_zero(struct bnx2x *bp,
181 u8 port, u16 stat_id)
182{
183 size_t size = sizeof(struct ustorm_per_client_stats);
184
185 u32 addr = BAR_USTRORM_INTMEM +
186 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
187
188 __storm_memset_fill(bp, addr, size, 0);
189}
190
191static inline void storm_memset_tstats_zero(struct bnx2x *bp,
192 u8 port, u16 stat_id)
193{
194 size_t size = sizeof(struct tstorm_per_client_stats);
195
196 u32 addr = BAR_TSTRORM_INTMEM +
197 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
198
199 __storm_memset_fill(bp, addr, size, 0);
200}
201
202static inline void storm_memset_xstats_zero(struct bnx2x *bp,
203 u8 port, u16 stat_id)
204{
205 size_t size = sizeof(struct xstorm_per_client_stats);
206
207 u32 addr = BAR_XSTRORM_INTMEM +
208 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, stat_id);
209
210 __storm_memset_fill(bp, addr, size, 0);
211}
212
213
214static inline void storm_memset_spq_addr(struct bnx2x *bp,
215 dma_addr_t mapping, u16 abs_fid)
216{
217 u32 addr = XSEM_REG_FAST_MEMORY +
218 XSTORM_SPQ_PAGE_BASE_OFFSET(abs_fid);
219
220 __storm_memset_dma_mapping(bp, addr, mapping);
221}
222
223static inline void storm_memset_ov(struct bnx2x *bp, u16 ov, u16 abs_fid)
224{
225 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(abs_fid), ov);
226}
227
228static inline void storm_memset_func_cfg(struct bnx2x *bp,
229 struct tstorm_eth_function_common_config *tcfg,
230 u16 abs_fid)
231{
232 size_t size = sizeof(struct tstorm_eth_function_common_config);
233
234 u32 addr = BAR_TSTRORM_INTMEM +
235 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(abs_fid);
236
237 __storm_memset_struct(bp, addr, size, (u32 *)tcfg);
238}
239
240static inline void storm_memset_xstats_flags(struct bnx2x *bp,
241 struct stats_indication_flags *flags,
242 u16 abs_fid)
243{
244 size_t size = sizeof(struct stats_indication_flags);
245
246 u32 addr = BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(abs_fid);
247
248 __storm_memset_struct(bp, addr, size, (u32 *)flags);
249}
250
251static inline void storm_memset_tstats_flags(struct bnx2x *bp,
252 struct stats_indication_flags *flags,
253 u16 abs_fid)
254{
255 size_t size = sizeof(struct stats_indication_flags);
256
257 u32 addr = BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(abs_fid);
258
259 __storm_memset_struct(bp, addr, size, (u32 *)flags);
260}
261
262static inline void storm_memset_ustats_flags(struct bnx2x *bp,
263 struct stats_indication_flags *flags,
264 u16 abs_fid)
265{
266 size_t size = sizeof(struct stats_indication_flags);
267
268 u32 addr = BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(abs_fid);
269
270 __storm_memset_struct(bp, addr, size, (u32 *)flags);
271}
272
273static inline void storm_memset_cstats_flags(struct bnx2x *bp,
274 struct stats_indication_flags *flags,
275 u16 abs_fid)
276{
277 size_t size = sizeof(struct stats_indication_flags);
278
279 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(abs_fid);
280
281 __storm_memset_struct(bp, addr, size, (u32 *)flags);
282}
283
284static inline void storm_memset_xstats_addr(struct bnx2x *bp,
285 dma_addr_t mapping, u16 abs_fid)
286{
287 u32 addr = BAR_XSTRORM_INTMEM +
288 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
289
290 __storm_memset_dma_mapping(bp, addr, mapping);
291}
292
293static inline void storm_memset_tstats_addr(struct bnx2x *bp,
294 dma_addr_t mapping, u16 abs_fid)
295{
296 u32 addr = BAR_TSTRORM_INTMEM +
297 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
298
299 __storm_memset_dma_mapping(bp, addr, mapping);
300}
301
302static inline void storm_memset_ustats_addr(struct bnx2x *bp,
303 dma_addr_t mapping, u16 abs_fid)
304{
305 u32 addr = BAR_USTRORM_INTMEM +
306 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
307
308 __storm_memset_dma_mapping(bp, addr, mapping);
309}
310
311static inline void storm_memset_cstats_addr(struct bnx2x *bp,
312 dma_addr_t mapping, u16 abs_fid)
313{
314 u32 addr = BAR_CSTRORM_INTMEM +
315 CSTORM_ETH_STATS_QUERY_ADDR_OFFSET(abs_fid);
316
317 __storm_memset_dma_mapping(bp, addr, mapping);
318}
319
320static inline void storm_memset_vf_to_pf(struct bnx2x *bp, u16 abs_fid,
321 u16 pf_id)
322{
323 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_VF_TO_PF_OFFSET(abs_fid),
324 pf_id);
325 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_VF_TO_PF_OFFSET(abs_fid),
326 pf_id);
327 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_VF_TO_PF_OFFSET(abs_fid),
328 pf_id);
329 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_VF_TO_PF_OFFSET(abs_fid),
330 pf_id);
331}
332
333static inline void storm_memset_func_en(struct bnx2x *bp, u16 abs_fid,
334 u8 enable)
335{
336 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(abs_fid),
337 enable);
338 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(abs_fid),
339 enable);
340 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(abs_fid),
341 enable);
342 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(abs_fid),
343 enable);
344}
345
346static inline void storm_memset_eq_data(struct bnx2x *bp,
347 struct event_ring_data *eq_data,
348 u16 pfid)
349{
350 size_t size = sizeof(struct event_ring_data);
351
352 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_DATA_OFFSET(pfid);
353
354 __storm_memset_struct(bp, addr, size, (u32 *)eq_data);
355}
356
357static inline void storm_memset_eq_prod(struct bnx2x *bp, u16 eq_prod,
358 u16 pfid)
359{
360 u32 addr = BAR_CSTRORM_INTMEM + CSTORM_EVENT_RING_PROD_OFFSET(pfid);
361 REG_WR16(bp, addr, eq_prod);
362}
363
364static inline void storm_memset_hc_timeout(struct bnx2x *bp, u8 port,
365 u16 fw_sb_id, u8 sb_index,
366 u8 ticks)
367{
368
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000369 int index_offset = CHIP_IS_E2(bp) ?
370 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000371 offsetof(struct hc_status_block_data_e1x, index_data);
372 u32 addr = BAR_CSTRORM_INTMEM +
373 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
374 index_offset +
375 sizeof(struct hc_index_data)*sb_index +
376 offsetof(struct hc_index_data, timeout);
377 REG_WR8(bp, addr, ticks);
378 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d ticks %d\n",
379 port, fw_sb_id, sb_index, ticks);
380}
381static inline void storm_memset_hc_disable(struct bnx2x *bp, u8 port,
382 u16 fw_sb_id, u8 sb_index,
383 u8 disable)
384{
385 u32 enable_flag = disable ? 0 : (1 << HC_INDEX_DATA_HC_ENABLED_SHIFT);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000386 int index_offset = CHIP_IS_E2(bp) ?
387 offsetof(struct hc_status_block_data_e2, index_data) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000388 offsetof(struct hc_status_block_data_e1x, index_data);
389 u32 addr = BAR_CSTRORM_INTMEM +
390 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
391 index_offset +
392 sizeof(struct hc_index_data)*sb_index +
393 offsetof(struct hc_index_data, flags);
394 u16 flags = REG_RD16(bp, addr);
395 /* clear and set */
396 flags &= ~HC_INDEX_DATA_HC_ENABLED;
397 flags |= enable_flag;
398 REG_WR16(bp, addr, flags);
399 DP(NETIF_MSG_HW, "port %x fw_sb_id %d sb_index %d disable %d\n",
400 port, fw_sb_id, sb_index, disable);
401}
402
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200403/* used only at init
404 * locking is done by mcp
405 */
Eilon Greenstein573f2032009-08-12 08:24:14 +0000406void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200407{
408 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
409 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
410 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
411 PCICFG_VENDOR_ID_OFFSET);
412}
413
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200414static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
415{
416 u32 val;
417
418 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
419 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
420 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
421 PCICFG_VENDOR_ID_OFFSET);
422
423 return val;
424}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200425
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000426#define DMAE_DP_SRC_GRC "grc src_addr [%08x]"
427#define DMAE_DP_SRC_PCI "pci src_addr [%x:%08x]"
428#define DMAE_DP_DST_GRC "grc dst_addr [%08x]"
429#define DMAE_DP_DST_PCI "pci dst_addr [%x:%08x]"
430#define DMAE_DP_DST_NONE "dst_addr [none]"
431
432void bnx2x_dp_dmae(struct bnx2x *bp, struct dmae_command *dmae, int msglvl)
433{
434 u32 src_type = dmae->opcode & DMAE_COMMAND_SRC;
435
436 switch (dmae->opcode & DMAE_COMMAND_DST) {
437 case DMAE_CMD_DST_PCI:
438 if (src_type == DMAE_CMD_SRC_PCI)
439 DP(msglvl, "DMAE: opcode 0x%08x\n"
440 "src [%x:%08x], len [%d*4], dst [%x:%08x]\n"
441 "comp_addr [%x:%08x], comp_val 0x%08x\n",
442 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
443 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
444 dmae->comp_addr_hi, dmae->comp_addr_lo,
445 dmae->comp_val);
446 else
447 DP(msglvl, "DMAE: opcode 0x%08x\n"
448 "src [%08x], len [%d*4], dst [%x:%08x]\n"
449 "comp_addr [%x:%08x], comp_val 0x%08x\n",
450 dmae->opcode, dmae->src_addr_lo >> 2,
451 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo,
452 dmae->comp_addr_hi, dmae->comp_addr_lo,
453 dmae->comp_val);
454 break;
455 case DMAE_CMD_DST_GRC:
456 if (src_type == DMAE_CMD_SRC_PCI)
457 DP(msglvl, "DMAE: opcode 0x%08x\n"
458 "src [%x:%08x], len [%d*4], dst_addr [%08x]\n"
459 "comp_addr [%x:%08x], comp_val 0x%08x\n",
460 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
461 dmae->len, dmae->dst_addr_lo >> 2,
462 dmae->comp_addr_hi, dmae->comp_addr_lo,
463 dmae->comp_val);
464 else
465 DP(msglvl, "DMAE: opcode 0x%08x\n"
466 "src [%08x], len [%d*4], dst [%08x]\n"
467 "comp_addr [%x:%08x], comp_val 0x%08x\n",
468 dmae->opcode, dmae->src_addr_lo >> 2,
469 dmae->len, dmae->dst_addr_lo >> 2,
470 dmae->comp_addr_hi, dmae->comp_addr_lo,
471 dmae->comp_val);
472 break;
473 default:
474 if (src_type == DMAE_CMD_SRC_PCI)
475 DP(msglvl, "DMAE: opcode 0x%08x\n"
476 DP_LEVEL "src_addr [%x:%08x] len [%d * 4] "
477 "dst_addr [none]\n"
478 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
479 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
480 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
481 dmae->comp_val);
482 else
483 DP(msglvl, "DMAE: opcode 0x%08x\n"
484 DP_LEVEL "src_addr [%08x] len [%d * 4] "
485 "dst_addr [none]\n"
486 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
487 dmae->opcode, dmae->src_addr_lo >> 2,
488 dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo,
489 dmae->comp_val);
490 break;
491 }
492
493}
494
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000495const u32 dmae_reg_go_c[] = {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200496 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
497 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
498 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
499 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
500};
501
502/* copy command into DMAE command memory and set DMAE command go */
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000503void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200504{
505 u32 cmd_offset;
506 int i;
507
508 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
509 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
510 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
511
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700512 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
513 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200514 }
515 REG_WR(bp, dmae_reg_go_c[idx], 1);
516}
517
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000518u32 bnx2x_dmae_opcode_add_comp(u32 opcode, u8 comp_type)
519{
520 return opcode | ((comp_type << DMAE_COMMAND_C_DST_SHIFT) |
521 DMAE_CMD_C_ENABLE);
522}
523
524u32 bnx2x_dmae_opcode_clr_src_reset(u32 opcode)
525{
526 return opcode & ~DMAE_CMD_SRC_RESET;
527}
528
529u32 bnx2x_dmae_opcode(struct bnx2x *bp, u8 src_type, u8 dst_type,
530 bool with_comp, u8 comp_type)
531{
532 u32 opcode = 0;
533
534 opcode |= ((src_type << DMAE_COMMAND_SRC_SHIFT) |
535 (dst_type << DMAE_COMMAND_DST_SHIFT));
536
537 opcode |= (DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET);
538
539 opcode |= (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0);
540 opcode |= ((BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT) |
541 (BP_E1HVN(bp) << DMAE_COMMAND_DST_VN_SHIFT));
542 opcode |= (DMAE_COM_SET_ERR << DMAE_COMMAND_ERR_POLICY_SHIFT);
543
544#ifdef __BIG_ENDIAN
545 opcode |= DMAE_CMD_ENDIANITY_B_DW_SWAP;
546#else
547 opcode |= DMAE_CMD_ENDIANITY_DW_SWAP;
548#endif
549 if (with_comp)
550 opcode = bnx2x_dmae_opcode_add_comp(opcode, comp_type);
551 return opcode;
552}
553
554void bnx2x_prep_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae,
555 u8 src_type, u8 dst_type)
556{
557 memset(dmae, 0, sizeof(struct dmae_command));
558
559 /* set the opcode */
560 dmae->opcode = bnx2x_dmae_opcode(bp, src_type, dst_type,
561 true, DMAE_COMP_PCI);
562
563 /* fill in the completion parameters */
564 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
565 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
566 dmae->comp_val = DMAE_COMP_VAL;
567}
568
569/* issue a dmae command over the init-channel and wailt for completion */
570int bnx2x_issue_dmae_with_comp(struct bnx2x *bp, struct dmae_command *dmae)
571{
572 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
573 int cnt = CHIP_REV_IS_SLOW(bp) ? (400000) : 40;
574 int rc = 0;
575
576 DP(BNX2X_MSG_OFF, "data before [0x%08x 0x%08x 0x%08x 0x%08x]\n",
577 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
578 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
579
580 /* lock the dmae channel */
581 mutex_lock(&bp->dmae_mutex);
582
583 /* reset completion */
584 *wb_comp = 0;
585
586 /* post the command on the channel used for initializations */
587 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
588
589 /* wait for completion */
590 udelay(5);
591 while ((*wb_comp & ~DMAE_PCI_ERR_FLAG) != DMAE_COMP_VAL) {
592 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
593
594 if (!cnt) {
595 BNX2X_ERR("DMAE timeout!\n");
596 rc = DMAE_TIMEOUT;
597 goto unlock;
598 }
599 cnt--;
600 udelay(50);
601 }
602 if (*wb_comp & DMAE_PCI_ERR_FLAG) {
603 BNX2X_ERR("DMAE PCI error!\n");
604 rc = DMAE_PCI_ERROR;
605 }
606
607 DP(BNX2X_MSG_OFF, "data after [0x%08x 0x%08x 0x%08x 0x%08x]\n",
608 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
609 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
610
611unlock:
612 mutex_unlock(&bp->dmae_mutex);
613 return rc;
614}
615
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700616void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
617 u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200618{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000619 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700620
621 if (!bp->dmae_ready) {
622 u32 *data = bnx2x_sp(bp, wb_data[0]);
623
624 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
625 " using indirect\n", dst_addr, len32);
626 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
627 return;
628 }
629
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000630 /* set opcode and fixed command fields */
631 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_PCI, DMAE_DST_GRC);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200632
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000633 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000634 dmae.src_addr_lo = U64_LO(dma_addr);
635 dmae.src_addr_hi = U64_HI(dma_addr);
636 dmae.dst_addr_lo = dst_addr >> 2;
637 dmae.dst_addr_hi = 0;
638 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200639
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000640 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200641
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000642 /* issue the command and wait for completion */
643 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200644}
645
Yaniv Rosnerc18487e2008-06-23 20:27:52 -0700646void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200647{
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000648 struct dmae_command dmae;
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700649
650 if (!bp->dmae_ready) {
651 u32 *data = bnx2x_sp(bp, wb_data[0]);
652 int i;
653
654 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
655 " using indirect\n", src_addr, len32);
656 for (i = 0; i < len32; i++)
657 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
658 return;
659 }
660
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000661 /* set opcode and fixed command fields */
662 bnx2x_prep_dmae_with_comp(bp, &dmae, DMAE_SRC_GRC, DMAE_DST_PCI);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200663
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000664 /* fill in addresses and len */
Eilon Greenstein5ff7b6d2009-08-12 08:23:44 +0000665 dmae.src_addr_lo = src_addr >> 2;
666 dmae.src_addr_hi = 0;
667 dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
668 dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
669 dmae.len = len32;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200670
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000671 bnx2x_dp_dmae(bp, &dmae, BNX2X_MSG_OFF);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200672
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000673 /* issue the command and wait for completion */
674 bnx2x_issue_dmae_with_comp(bp, &dmae);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200675}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200676
Eilon Greenstein573f2032009-08-12 08:24:14 +0000677void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
678 u32 addr, u32 len)
679{
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000680 int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
Eilon Greenstein573f2032009-08-12 08:24:14 +0000681 int offset = 0;
682
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000683 while (len > dmae_wr_max) {
Eilon Greenstein573f2032009-08-12 08:24:14 +0000684 bnx2x_write_dmae(bp, phys_addr + offset,
Vladislav Zolotarov02e3c6c2010-04-19 01:13:33 +0000685 addr + offset, dmae_wr_max);
686 offset += dmae_wr_max * 4;
687 len -= dmae_wr_max;
Eilon Greenstein573f2032009-08-12 08:24:14 +0000688 }
689
690 bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
691}
692
Eilon Greensteinad8d3942008-06-23 20:29:02 -0700693/* used only for slowpath so not inlined */
694static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
695{
696 u32 wb_write[2];
697
698 wb_write[0] = val_hi;
699 wb_write[1] = val_lo;
700 REG_WR_DMAE(bp, reg, wb_write, 2);
701}
702
703#ifdef USE_WB_RD
704static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
705{
706 u32 wb_data[2];
707
708 REG_RD_DMAE(bp, reg, wb_data, 2);
709
710 return HILO_U64(wb_data[0], wb_data[1]);
711}
712#endif
713
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200714static int bnx2x_mc_assert(struct bnx2x *bp)
715{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200716 char last_idx;
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700717 int i, rc = 0;
718 u32 row0, row1, row2, row3;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200719
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700720 /* XSTORM */
721 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
722 XSTORM_ASSERT_LIST_INDEX_OFFSET);
723 if (last_idx)
724 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200725
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700726 /* print the asserts */
727 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200728
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700729 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
730 XSTORM_ASSERT_LIST_OFFSET(i));
731 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
732 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
733 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
734 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
735 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
736 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200737
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700738 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
739 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
740 " 0x%08x 0x%08x 0x%08x\n",
741 i, row3, row2, row1, row0);
742 rc++;
743 } else {
744 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200745 }
746 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700747
748 /* TSTORM */
749 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
750 TSTORM_ASSERT_LIST_INDEX_OFFSET);
751 if (last_idx)
752 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
753
754 /* print the asserts */
755 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
756
757 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
758 TSTORM_ASSERT_LIST_OFFSET(i));
759 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
760 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
761 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
762 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
763 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
764 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
765
766 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
767 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
768 " 0x%08x 0x%08x 0x%08x\n",
769 i, row3, row2, row1, row0);
770 rc++;
771 } else {
772 break;
773 }
774 }
775
776 /* CSTORM */
777 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
778 CSTORM_ASSERT_LIST_INDEX_OFFSET);
779 if (last_idx)
780 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
781
782 /* print the asserts */
783 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
784
785 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
786 CSTORM_ASSERT_LIST_OFFSET(i));
787 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
788 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
789 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
790 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
791 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
792 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
793
794 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
795 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
796 " 0x%08x 0x%08x 0x%08x\n",
797 i, row3, row2, row1, row0);
798 rc++;
799 } else {
800 break;
801 }
802 }
803
804 /* USTORM */
805 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
806 USTORM_ASSERT_LIST_INDEX_OFFSET);
807 if (last_idx)
808 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
809
810 /* print the asserts */
811 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
812
813 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
814 USTORM_ASSERT_LIST_OFFSET(i));
815 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
816 USTORM_ASSERT_LIST_OFFSET(i) + 4);
817 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
818 USTORM_ASSERT_LIST_OFFSET(i) + 8);
819 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
820 USTORM_ASSERT_LIST_OFFSET(i) + 12);
821
822 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
823 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
824 " 0x%08x 0x%08x 0x%08x\n",
825 i, row3, row2, row1, row0);
826 rc++;
827 } else {
828 break;
829 }
830 }
831
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200832 return rc;
833}
Eliezer Tamirc14423f2008-02-28 11:49:42 -0800834
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200835static void bnx2x_fw_dump(struct bnx2x *bp)
836{
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000837 u32 addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200838 u32 mark, offset;
Eilon Greenstein4781bfa2009-02-12 08:38:17 +0000839 __be32 data[9];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200840 int word;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000841 u32 trace_shmem_base;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +0000842 if (BP_NOMCP(bp)) {
843 BNX2X_ERR("NO MCP - can not dump\n");
844 return;
845 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000846
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000847 if (BP_PATH(bp) == 0)
848 trace_shmem_base = bp->common.shmem_base;
849 else
850 trace_shmem_base = SHMEM2_RD(bp, other_shmem_base_addr);
851 addr = trace_shmem_base - 0x0800 + 4;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000852 mark = REG_RD(bp, addr);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000853 mark = (CHIP_IS_E1x(bp) ? MCP_REG_MCPR_SCRATCH : MCP_A_REG_MCPR_SCRATCH)
854 + ((mark + 0x3) & ~0x3) - 0x08000000;
Joe Perches7995c642010-02-17 15:01:52 +0000855 pr_err("begin fw dump (mark 0x%x)\n", mark);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200856
Joe Perches7995c642010-02-17 15:01:52 +0000857 pr_err("");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000858 for (offset = mark; offset <= trace_shmem_base; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200859 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000860 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200861 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000862 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200863 }
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000864 for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200865 for (word = 0; word < 8; word++)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000866 data[word] = htonl(REG_RD(bp, offset + 4*word));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200867 data[8] = 0x0;
Joe Perches7995c642010-02-17 15:01:52 +0000868 pr_cont("%s", (char *)data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200869 }
Joe Perches7995c642010-02-17 15:01:52 +0000870 pr_err("end of fw dump\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200871}
872
Dmitry Kravkov6c719d02010-07-27 12:36:15 +0000873void bnx2x_panic_dump(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200874{
875 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000876 u16 j;
877 struct hc_sp_status_block_data sp_sb_data;
878 int func = BP_FUNC(bp);
879#ifdef BNX2X_STOP_ON_ERROR
880 u16 start = 0, end = 0;
881#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200882
Yitchak Gertner66e855f2008-08-13 15:49:05 -0700883 bp->stats_state = STATS_STATE_DISABLED;
884 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
885
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200886 BNX2X_ERR("begin crash dump -----------------\n");
887
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000888 /* Indices */
889 /* Common */
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000890 BNX2X_ERR("def_idx(0x%x) def_att_idx(0x%x) attn_state(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000891 " spq_prod_idx(0x%x)\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000892 bp->def_idx, bp->def_att_idx,
893 bp->attn_state, bp->spq_prod_idx);
894 BNX2X_ERR("DSB: attn bits(0x%x) ack(0x%x) id(0x%x) idx(0x%x)\n",
895 bp->def_status_blk->atten_status_block.attn_bits,
896 bp->def_status_blk->atten_status_block.attn_bits_ack,
897 bp->def_status_blk->atten_status_block.status_block_id,
898 bp->def_status_blk->atten_status_block.attn_bits_index);
899 BNX2X_ERR(" def (");
900 for (i = 0; i < HC_SP_SB_MAX_INDICES; i++)
901 pr_cont("0x%x%s",
902 bp->def_status_blk->sp_sb.index_values[i],
903 (i == HC_SP_SB_MAX_INDICES - 1) ? ") " : " ");
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000904
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000905 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
906 *((u32 *)&sp_sb_data + i) = REG_RD(bp, BAR_CSTRORM_INTMEM +
907 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
908 i*sizeof(u32));
909
910 pr_cont("igu_sb_id(0x%x) igu_seg_id (0x%x) "
911 "pf_id(0x%x) vnic_id(0x%x) "
912 "vf_id(0x%x) vf_valid (0x%x)\n",
913 sp_sb_data.igu_sb_id,
914 sp_sb_data.igu_seg_id,
915 sp_sb_data.p_func.pf_id,
916 sp_sb_data.p_func.vnic_id,
917 sp_sb_data.p_func.vf_id,
918 sp_sb_data.p_func.vf_valid);
919
920
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +0000921 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000922 struct bnx2x_fastpath *fp = &bp->fp[i];
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000923 int loop;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000924 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000925 struct hc_status_block_data_e1x sb_data_e1x;
926 struct hc_status_block_sm *hc_sm_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000927 CHIP_IS_E2(bp) ?
928 sb_data_e2.common.state_machine :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000929 sb_data_e1x.common.state_machine;
930 struct hc_index_data *hc_index_p =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000931 CHIP_IS_E2(bp) ?
932 sb_data_e2.index_data :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000933 sb_data_e1x.index_data;
934 int data_size;
935 u32 *sb_data_p;
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000936
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000937 /* Rx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000938 BNX2X_ERR("fp%d: rx_bd_prod(0x%x) rx_bd_cons(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000939 " rx_comp_prod(0x%x)"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000940 " rx_comp_cons(0x%x) *rx_cons_sb(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000941 i, fp->rx_bd_prod, fp->rx_bd_cons,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000942 fp->rx_comp_prod,
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000943 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000944 BNX2X_ERR(" rx_sge_prod(0x%x) last_max_sge(0x%x)"
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000945 " fp_hc_idx(0x%x)\n",
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000946 fp->rx_sge_prod, fp->last_max_sge,
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000947 le16_to_cpu(fp->fp_hc_idx));
Eilon Greenstein8440d2b2009-02-12 08:38:22 +0000948
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000949 /* Tx */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +0000950 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x) tx_pkt_cons(0x%x)"
951 " tx_bd_prod(0x%x) tx_bd_cons(0x%x)"
952 " *tx_cons_sb(0x%x)\n",
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +0200953 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
Eilon Greenstein34f80b02008-06-23 20:33:01 -0700954 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000955
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000956 loop = CHIP_IS_E2(bp) ?
957 HC_SB_MAX_INDICES_E2 : HC_SB_MAX_INDICES_E1X;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000958
959 /* host sb data */
960
961 BNX2X_ERR(" run indexes (");
962 for (j = 0; j < HC_SB_MAX_SM; j++)
963 pr_cont("0x%x%s",
964 fp->sb_running_index[j],
965 (j == HC_SB_MAX_SM - 1) ? ")" : " ");
966
967 BNX2X_ERR(" indexes (");
968 for (j = 0; j < loop; j++)
969 pr_cont("0x%x%s",
970 fp->sb_index_values[j],
971 (j == loop - 1) ? ")" : " ");
972 /* fw sb data */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000973 data_size = CHIP_IS_E2(bp) ?
974 sizeof(struct hc_status_block_data_e2) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000975 sizeof(struct hc_status_block_data_e1x);
976 data_size /= sizeof(u32);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000977 sb_data_p = CHIP_IS_E2(bp) ?
978 (u32 *)&sb_data_e2 :
979 (u32 *)&sb_data_e1x;
Dmitry Kravkov523224a2010-10-06 03:23:26 +0000980 /* copy sb data in here */
981 for (j = 0; j < data_size; j++)
982 *(sb_data_p + j) = REG_RD(bp, BAR_CSTRORM_INTMEM +
983 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id) +
984 j * sizeof(u32));
985
Dmitry Kravkovf2e08992010-10-06 03:28:26 +0000986 if (CHIP_IS_E2(bp)) {
987 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
988 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
989 sb_data_e2.common.p_func.pf_id,
990 sb_data_e2.common.p_func.vf_id,
991 sb_data_e2.common.p_func.vf_valid,
992 sb_data_e2.common.p_func.vnic_id,
993 sb_data_e2.common.same_igu_sb_1b);
994 } else {
995 pr_cont("pf_id(0x%x) vf_id (0x%x) vf_valid(0x%x) "
996 "vnic_id(0x%x) same_igu_sb_1b(0x%x)\n",
997 sb_data_e1x.common.p_func.pf_id,
998 sb_data_e1x.common.p_func.vf_id,
999 sb_data_e1x.common.p_func.vf_valid,
1000 sb_data_e1x.common.p_func.vnic_id,
1001 sb_data_e1x.common.same_igu_sb_1b);
1002 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001003
1004 /* SB_SMs data */
1005 for (j = 0; j < HC_SB_MAX_SM; j++) {
1006 pr_cont("SM[%d] __flags (0x%x) "
1007 "igu_sb_id (0x%x) igu_seg_id(0x%x) "
1008 "time_to_expire (0x%x) "
1009 "timer_value(0x%x)\n", j,
1010 hc_sm_p[j].__flags,
1011 hc_sm_p[j].igu_sb_id,
1012 hc_sm_p[j].igu_seg_id,
1013 hc_sm_p[j].time_to_expire,
1014 hc_sm_p[j].timer_value);
1015 }
1016
1017 /* Indecies data */
1018 for (j = 0; j < loop; j++) {
1019 pr_cont("INDEX[%d] flags (0x%x) "
1020 "timeout (0x%x)\n", j,
1021 hc_index_p[j].flags,
1022 hc_index_p[j].timeout);
1023 }
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001024 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001025
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001026#ifdef BNX2X_STOP_ON_ERROR
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001027 /* Rings */
1028 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001029 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001030 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001031
1032 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
1033 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001034 for (j = start; j != end; j = RX_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001035 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
1036 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
1037
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001038 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
1039 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001040 }
1041
Eilon Greenstein3196a882008-08-13 15:58:49 -07001042 start = RX_SGE(fp->rx_sge_prod);
1043 end = RX_SGE(fp->last_max_sge);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001044 for (j = start; j != end; j = RX_SGE(j + 1)) {
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001045 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
1046 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
1047
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001048 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
1049 i, j, rx_sge[1], rx_sge[0], sw_page->page);
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07001050 }
1051
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001052 start = RCQ_BD(fp->rx_comp_cons - 10);
1053 end = RCQ_BD(fp->rx_comp_cons + 503);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001054 for (j = start; j != end; j = RCQ_BD(j + 1)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001055 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
1056
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001057 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
1058 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001059 }
1060 }
1061
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001062 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001063 for_each_queue(bp, i) {
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001064 struct bnx2x_fastpath *fp = &bp->fp[i];
1065
1066 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
1067 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
1068 for (j = start; j != end; j = TX_BD(j + 1)) {
1069 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
1070
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001071 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
1072 i, j, sw_bd->skb, sw_bd->first_bd);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001073 }
1074
1075 start = TX_BD(fp->tx_bd_cons - 10);
1076 end = TX_BD(fp->tx_bd_cons + 254);
1077 for (j = start; j != end; j = TX_BD(j + 1)) {
1078 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
1079
Eilon Greensteinc3eefaf2009-03-02 08:01:09 +00001080 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
1081 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
Eilon Greenstein8440d2b2009-02-12 08:38:22 +00001082 }
1083 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001084#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001085 bnx2x_fw_dump(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001086 bnx2x_mc_assert(bp);
1087 BNX2X_ERR("end crash dump -----------------\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001088}
1089
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001090static void bnx2x_hc_int_enable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001091{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001092 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001093 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1094 u32 val = REG_RD(bp, addr);
1095 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001096 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001097
1098 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001099 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1100 HC_CONFIG_0_REG_INT_LINE_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001101 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1102 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eilon Greenstein8badd272009-02-12 08:36:15 +00001103 } else if (msi) {
1104 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
1105 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1106 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1107 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001108 } else {
1109 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001110 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001111 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1112 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001113
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001114 if (!CHIP_IS_E1(bp)) {
1115 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1116 val, port, addr);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001117
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001118 REG_WR(bp, addr, val);
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08001119
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001120 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
1121 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001122 }
1123
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001124 if (CHIP_IS_E1(bp))
1125 REG_WR(bp, HC_REG_INT_MASK + port*4, 0x1FFFF);
1126
Eilon Greenstein8badd272009-02-12 08:36:15 +00001127 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
1128 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001129
1130 REG_WR(bp, addr, val);
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001131 /*
1132 * Ensure that HC_CONFIG is written before leading/trailing edge config
1133 */
1134 mmiowb();
1135 barrier();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001136
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001137 if (!CHIP_IS_E1(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001138 /* init leading/trailing edge */
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00001139 if (IS_MF(bp)) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001140 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001141 if (bp->port.pmf)
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001142 /* enable nig and gpio3 attention */
1143 val |= 0x1100;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001144 } else
1145 val = 0xffff;
1146
1147 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1148 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1149 }
Eilon Greenstein37dbbf32009-07-21 05:47:33 +00001150
1151 /* Make sure that interrupts are indeed enabled from here on */
1152 mmiowb();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001153}
1154
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001155static void bnx2x_igu_int_enable(struct bnx2x *bp)
1156{
1157 u32 val;
1158 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
1159 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
1160
1161 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1162
1163 if (msix) {
1164 val &= ~(IGU_PF_CONF_INT_LINE_EN |
1165 IGU_PF_CONF_SINGLE_ISR_EN);
1166 val |= (IGU_PF_CONF_FUNC_EN |
1167 IGU_PF_CONF_MSI_MSIX_EN |
1168 IGU_PF_CONF_ATTN_BIT_EN);
1169 } else if (msi) {
1170 val &= ~IGU_PF_CONF_INT_LINE_EN;
1171 val |= (IGU_PF_CONF_FUNC_EN |
1172 IGU_PF_CONF_MSI_MSIX_EN |
1173 IGU_PF_CONF_ATTN_BIT_EN |
1174 IGU_PF_CONF_SINGLE_ISR_EN);
1175 } else {
1176 val &= ~IGU_PF_CONF_MSI_MSIX_EN;
1177 val |= (IGU_PF_CONF_FUNC_EN |
1178 IGU_PF_CONF_INT_LINE_EN |
1179 IGU_PF_CONF_ATTN_BIT_EN |
1180 IGU_PF_CONF_SINGLE_ISR_EN);
1181 }
1182
1183 DP(NETIF_MSG_INTR, "write 0x%x to IGU mode %s\n",
1184 val, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
1185
1186 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1187
1188 barrier();
1189
1190 /* init leading/trailing edge */
1191 if (IS_MF(bp)) {
1192 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
1193 if (bp->port.pmf)
1194 /* enable nig and gpio3 attention */
1195 val |= 0x1100;
1196 } else
1197 val = 0xffff;
1198
1199 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
1200 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
1201
1202 /* Make sure that interrupts are indeed enabled from here on */
1203 mmiowb();
1204}
1205
1206void bnx2x_int_enable(struct bnx2x *bp)
1207{
1208 if (bp->common.int_block == INT_BLOCK_HC)
1209 bnx2x_hc_int_enable(bp);
1210 else
1211 bnx2x_igu_int_enable(bp);
1212}
1213
1214static void bnx2x_hc_int_disable(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001215{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001216 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001217 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
1218 u32 val = REG_RD(bp, addr);
1219
Dmitry Kravkova0fd0652010-10-19 05:13:05 +00001220 /*
1221 * in E1 we must use only PCI configuration space to disable
1222 * MSI/MSIX capablility
1223 * It's forbitten to disable IGU_PF_CONF_MSI_MSIX_EN in HC block
1224 */
1225 if (CHIP_IS_E1(bp)) {
1226 /* Since IGU_PF_CONF_MSI_MSIX_EN still always on
1227 * Use mask register to prevent from HC sending interrupts
1228 * after we exit the function
1229 */
1230 REG_WR(bp, HC_REG_INT_MASK + port*4, 0);
1231
1232 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1233 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1234 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
1235 } else
1236 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
1237 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
1238 HC_CONFIG_0_REG_INT_LINE_EN_0 |
1239 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001240
1241 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
1242 val, port, addr);
1243
Eilon Greenstein8badd272009-02-12 08:36:15 +00001244 /* flush all outstanding writes */
1245 mmiowb();
1246
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001247 REG_WR(bp, addr, val);
1248 if (REG_RD(bp, addr) != val)
1249 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1250}
1251
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001252static void bnx2x_igu_int_disable(struct bnx2x *bp)
1253{
1254 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
1255
1256 val &= ~(IGU_PF_CONF_MSI_MSIX_EN |
1257 IGU_PF_CONF_INT_LINE_EN |
1258 IGU_PF_CONF_ATTN_BIT_EN);
1259
1260 DP(NETIF_MSG_INTR, "write %x to IGU\n", val);
1261
1262 /* flush all outstanding writes */
1263 mmiowb();
1264
1265 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
1266 if (REG_RD(bp, IGU_REG_PF_CONFIGURATION) != val)
1267 BNX2X_ERR("BUG! proper val not read from IGU!\n");
1268}
1269
1270void bnx2x_int_disable(struct bnx2x *bp)
1271{
1272 if (bp->common.int_block == INT_BLOCK_HC)
1273 bnx2x_hc_int_disable(bp);
1274 else
1275 bnx2x_igu_int_disable(bp);
1276}
1277
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001278void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001279{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001280 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
Eilon Greenstein8badd272009-02-12 08:36:15 +00001281 int i, offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001282
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001283 /* disable interrupt handling */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001284 atomic_inc(&bp->intr_sem);
Eilon Greensteine1510702009-07-21 05:47:41 +00001285 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
1286
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07001287 if (disable_hw)
1288 /* prevent the HW from sending interrupts */
1289 bnx2x_int_disable(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001290
1291 /* make sure all ISRs are done */
1292 if (msix) {
Eilon Greenstein8badd272009-02-12 08:36:15 +00001293 synchronize_irq(bp->msix_table[0].vector);
1294 offset = 1;
Michael Chan37b091b2009-10-10 13:46:55 +00001295#ifdef BCM_CNIC
1296 offset++;
1297#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001298 for_each_queue(bp, i)
Eilon Greenstein8badd272009-02-12 08:36:15 +00001299 synchronize_irq(bp->msix_table[i + offset].vector);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001300 } else
1301 synchronize_irq(bp->pdev->irq);
1302
1303 /* make sure sp_task is not running */
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001304 cancel_delayed_work(&bp->sp_task);
1305 flush_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001306}
1307
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001308/* fast path */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001309
1310/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001311 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001312 */
1313
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001314/* Return true if succeeded to acquire the lock */
1315static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
1316{
1317 u32 lock_status;
1318 u32 resource_bit = (1 << resource);
1319 int func = BP_FUNC(bp);
1320 u32 hw_lock_control_reg;
1321
1322 DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
1323
1324 /* Validating that the resource is within range */
1325 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1326 DP(NETIF_MSG_HW,
1327 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1328 resource, HW_LOCK_MAX_RESOURCE_VALUE);
Eric Dumazet0fdf4d02010-08-26 22:03:53 -07001329 return false;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001330 }
1331
1332 if (func <= 5)
1333 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1334 else
1335 hw_lock_control_reg =
1336 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1337
1338 /* Try to acquire the lock */
1339 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1340 lock_status = REG_RD(bp, hw_lock_control_reg);
1341 if (lock_status & resource_bit)
1342 return true;
1343
1344 DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
1345 return false;
1346}
1347
Michael Chan993ac7b2009-10-10 13:46:56 +00001348#ifdef BCM_CNIC
1349static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
1350#endif
Eilon Greenstein3196a882008-08-13 15:58:49 -07001351
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001352void bnx2x_sp_event(struct bnx2x_fastpath *fp,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001353 union eth_rx_cqe *rr_cqe)
1354{
1355 struct bnx2x *bp = fp->bp;
1356 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1357 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
1358
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001359 DP(BNX2X_MSG_SP,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001360 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
Eilon Greenstein0626b892009-02-12 08:38:14 +00001361 fp->index, cid, command, bp->state,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001362 rr_cqe->ramrod_cqe.ramrod_type);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001363
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001364 switch (command | fp->state) {
1365 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP | BNX2X_FP_STATE_OPENING):
1366 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n", cid);
1367 fp->state = BNX2X_FP_STATE_OPEN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001368 break;
1369
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001370 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
1371 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n", cid);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001372 fp->state = BNX2X_FP_STATE_HALTED;
1373 break;
1374
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001375 case (RAMROD_CMD_ID_ETH_TERMINATE | BNX2X_FP_STATE_TERMINATING):
1376 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] teminate ramrod\n", cid);
1377 fp->state = BNX2X_FP_STATE_TERMINATED;
Eliezer Tamir49d66772008-02-28 11:53:13 -08001378 break;
1379
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001380 default:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001381 BNX2X_ERR("unexpected MC reply (%d) "
1382 "fp[%d] state is %x\n",
1383 command, fp->index, fp->state);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001384 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001385 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001386
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00001387 smp_mb__before_atomic_inc();
1388 atomic_inc(&bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001389 /* push the change in fp->state and towards the memory */
1390 smp_wmb();
1391
1392 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001393}
1394
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001395irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001396{
Eilon Greenstein555f6c72009-02-12 08:36:11 +00001397 struct bnx2x *bp = netdev_priv(dev_instance);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001398 u16 status = bnx2x_ack_int(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001399 u16 mask;
Eilon Greensteinca003922009-08-12 22:53:28 -07001400 int i;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001401
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001402 /* Return here if interrupt is shared and it's not for us */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001403 if (unlikely(status == 0)) {
1404 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1405 return IRQ_NONE;
1406 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001407 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001408
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001409 /* Return here if interrupt is disabled */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001410 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1411 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1412 return IRQ_HANDLED;
1413 }
1414
Eilon Greenstein3196a882008-08-13 15:58:49 -07001415#ifdef BNX2X_STOP_ON_ERROR
1416 if (unlikely(bp->panic))
1417 return IRQ_HANDLED;
1418#endif
1419
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001420 for_each_queue(bp, i) {
Eilon Greensteinca003922009-08-12 22:53:28 -07001421 struct bnx2x_fastpath *fp = &bp->fp[i];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001422
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001423 mask = 0x2 << (fp->index + CNIC_CONTEXT_USE);
Eilon Greensteinca003922009-08-12 22:53:28 -07001424 if (status & mask) {
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001425 /* Handle Rx and Tx according to SB id */
1426 prefetch(fp->rx_cons_sb);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001427 prefetch(fp->tx_cons_sb);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001428 prefetch(&fp->sb_running_index[SM_RX_ID]);
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00001429 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
Eilon Greensteinca003922009-08-12 22:53:28 -07001430 status &= ~mask;
1431 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001432 }
1433
Michael Chan993ac7b2009-10-10 13:46:56 +00001434#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00001435 mask = 0x2;
Michael Chan993ac7b2009-10-10 13:46:56 +00001436 if (status & (mask | 0x1)) {
1437 struct cnic_ops *c_ops = NULL;
1438
1439 rcu_read_lock();
1440 c_ops = rcu_dereference(bp->cnic_ops);
1441 if (c_ops)
1442 c_ops->cnic_handler(bp->cnic_data, NULL);
1443 rcu_read_unlock();
1444
1445 status &= ~mask;
1446 }
1447#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001448
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001449 if (unlikely(status & 0x1)) {
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08001450 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001451
1452 status &= ~0x1;
1453 if (!status)
1454 return IRQ_HANDLED;
1455 }
1456
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001457 if (unlikely(status))
1458 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001459 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001460
1461 return IRQ_HANDLED;
1462}
1463
1464/* end of fast path */
1465
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001466
1467/* Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001468
1469/*
1470 * General service functions
1471 */
1472
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001473int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001474{
Eliezer Tamirf1410642008-02-28 11:51:50 -08001475 u32 lock_status;
1476 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001477 int func = BP_FUNC(bp);
1478 u32 hw_lock_control_reg;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001479 int cnt;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001480
1481 /* Validating that the resource is within range */
1482 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1483 DP(NETIF_MSG_HW,
1484 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1485 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1486 return -EINVAL;
1487 }
1488
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001489 if (func <= 5) {
1490 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1491 } else {
1492 hw_lock_control_reg =
1493 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1494 }
1495
Eliezer Tamirf1410642008-02-28 11:51:50 -08001496 /* Validating that the resource is not already taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001497 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001498 if (lock_status & resource_bit) {
1499 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1500 lock_status, resource_bit);
1501 return -EEXIST;
1502 }
1503
Eilon Greenstein46230476b2008-08-25 15:23:30 -07001504 /* Try for 5 second every 5ms */
1505 for (cnt = 0; cnt < 1000; cnt++) {
Eliezer Tamirf1410642008-02-28 11:51:50 -08001506 /* Try to acquire the lock */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001507 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1508 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001509 if (lock_status & resource_bit)
1510 return 0;
1511
1512 msleep(5);
1513 }
1514 DP(NETIF_MSG_HW, "Timeout\n");
1515 return -EAGAIN;
1516}
1517
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001518int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001519{
1520 u32 lock_status;
1521 u32 resource_bit = (1 << resource);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001522 int func = BP_FUNC(bp);
1523 u32 hw_lock_control_reg;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001524
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00001525 DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1526
Eliezer Tamirf1410642008-02-28 11:51:50 -08001527 /* Validating that the resource is within range */
1528 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1529 DP(NETIF_MSG_HW,
1530 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1531 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1532 return -EINVAL;
1533 }
1534
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001535 if (func <= 5) {
1536 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1537 } else {
1538 hw_lock_control_reg =
1539 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1540 }
1541
Eliezer Tamirf1410642008-02-28 11:51:50 -08001542 /* Validating that the resource is currently taken */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001543 lock_status = REG_RD(bp, hw_lock_control_reg);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001544 if (!(lock_status & resource_bit)) {
1545 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1546 lock_status, resource_bit);
1547 return -EFAULT;
1548 }
1549
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001550 REG_WR(bp, hw_lock_control_reg, resource_bit);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001551 return 0;
1552}
1553
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001554
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001555int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1556{
1557 /* The GPIO should be swapped if swap register is set and active */
1558 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1559 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1560 int gpio_shift = gpio_num +
1561 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1562 u32 gpio_mask = (1 << gpio_shift);
1563 u32 gpio_reg;
1564 int value;
1565
1566 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1567 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1568 return -EINVAL;
1569 }
1570
1571 /* read GPIO value */
1572 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1573
1574 /* get the requested pin value */
1575 if ((gpio_reg & gpio_mask) == gpio_mask)
1576 value = 1;
1577 else
1578 value = 0;
1579
1580 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1581
1582 return value;
1583}
1584
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001585int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
Eliezer Tamirf1410642008-02-28 11:51:50 -08001586{
1587 /* The GPIO should be swapped if swap register is set and active */
1588 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001589 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001590 int gpio_shift = gpio_num +
1591 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1592 u32 gpio_mask = (1 << gpio_shift);
1593 u32 gpio_reg;
1594
1595 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1596 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1597 return -EINVAL;
1598 }
1599
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001600 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001601 /* read GPIO and mask except the float bits */
1602 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1603
1604 switch (mode) {
1605 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1606 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1607 gpio_num, gpio_shift);
1608 /* clear FLOAT and set CLR */
1609 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1610 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1611 break;
1612
1613 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1614 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1615 gpio_num, gpio_shift);
1616 /* clear FLOAT and set SET */
1617 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1618 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1619 break;
1620
Eilon Greenstein17de50b2008-08-13 15:56:59 -07001621 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001622 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1623 gpio_num, gpio_shift);
1624 /* set FLOAT */
1625 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1626 break;
1627
1628 default:
1629 break;
1630 }
1631
1632 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001633 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001634
1635 return 0;
1636}
1637
Eilon Greenstein4acac6a2009-02-12 08:36:52 +00001638int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1639{
1640 /* The GPIO should be swapped if swap register is set and active */
1641 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1642 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1643 int gpio_shift = gpio_num +
1644 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1645 u32 gpio_mask = (1 << gpio_shift);
1646 u32 gpio_reg;
1647
1648 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1649 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1650 return -EINVAL;
1651 }
1652
1653 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1654 /* read GPIO int */
1655 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1656
1657 switch (mode) {
1658 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1659 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1660 "output low\n", gpio_num, gpio_shift);
1661 /* clear SET and set CLR */
1662 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1663 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1664 break;
1665
1666 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1667 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1668 "output high\n", gpio_num, gpio_shift);
1669 /* clear CLR and set SET */
1670 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1671 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1672 break;
1673
1674 default:
1675 break;
1676 }
1677
1678 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1679 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1680
1681 return 0;
1682}
1683
Eliezer Tamirf1410642008-02-28 11:51:50 -08001684static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1685{
1686 u32 spio_mask = (1 << spio_num);
1687 u32 spio_reg;
1688
1689 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1690 (spio_num > MISC_REGISTERS_SPIO_7)) {
1691 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1692 return -EINVAL;
1693 }
1694
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001695 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001696 /* read SPIO and mask except the float bits */
1697 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1698
1699 switch (mode) {
Eilon Greenstein6378c022008-08-13 15:59:25 -07001700 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001701 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1702 /* clear FLOAT and set CLR */
1703 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1704 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1705 break;
1706
Eilon Greenstein6378c022008-08-13 15:59:25 -07001707 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
Eliezer Tamirf1410642008-02-28 11:51:50 -08001708 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1709 /* clear FLOAT and set SET */
1710 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1711 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1712 break;
1713
1714 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1715 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1716 /* set FLOAT */
1717 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1718 break;
1719
1720 default:
1721 break;
1722 }
1723
1724 REG_WR(bp, MISC_REG_SPIO, spio_reg);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001725 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001726
1727 return 0;
1728}
1729
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001730int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1731{
1732 u32 sel_phy_idx = 0;
1733 if (bp->link_vars.link_up) {
1734 sel_phy_idx = EXT_PHY1;
1735 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1736 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1737 (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1738 sel_phy_idx = EXT_PHY2;
1739 } else {
1740
1741 switch (bnx2x_phy_selection(&bp->link_params)) {
1742 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1743 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1744 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1745 sel_phy_idx = EXT_PHY1;
1746 break;
1747 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1748 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1749 sel_phy_idx = EXT_PHY2;
1750 break;
1751 }
1752 }
1753 /*
1754 * The selected actived PHY is always after swapping (in case PHY
1755 * swapping is enabled). So when swapping is enabled, we need to reverse
1756 * the configuration
1757 */
1758
1759 if (bp->link_params.multi_phy_config &
1760 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1761 if (sel_phy_idx == EXT_PHY1)
1762 sel_phy_idx = EXT_PHY2;
1763 else if (sel_phy_idx == EXT_PHY2)
1764 sel_phy_idx = EXT_PHY1;
1765 }
1766 return LINK_CONFIG_IDX(sel_phy_idx);
1767}
1768
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001769void bnx2x_calc_fc_adv(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001770{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001771 u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
Eilon Greensteinad33ea32009-01-14 21:24:57 -08001772 switch (bp->link_vars.ieee_fc &
1773 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001774 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001775 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001776 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001777 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001778
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001779 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001780 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001781 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001782 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001783
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001784 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001785 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
Eliezer Tamirf1410642008-02-28 11:51:50 -08001786 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00001787
Eliezer Tamirf1410642008-02-28 11:51:50 -08001788 default:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001789 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001790 ADVERTISED_Pause);
Eliezer Tamirf1410642008-02-28 11:51:50 -08001791 break;
1792 }
1793}
1794
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001795u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001796{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001797 if (!BP_NOMCP(bp)) {
1798 u8 rc;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001799 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1800 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
Eilon Greenstein19680c42008-08-13 15:47:33 -07001801 /* Initialize link parameters structure variables */
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001802 /* It is recommended to turn off RX FC for jumbo frames
1803 for better performance */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001804 if ((CHIP_IS_E1x(bp)) && (bp->dev->mtu > 5000))
David S. Millerc0700f92008-12-16 23:53:20 -08001805 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
Yaniv Rosner8c99e7b2008-08-13 15:56:17 -07001806 else
David S. Millerc0700f92008-12-16 23:53:20 -08001807 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001808
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001809 bnx2x_acquire_phy_lock(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001810
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001811 if (load_mode == LOAD_DIAG) {
Yaniv Rosnerde6eae12010-09-07 11:41:13 +00001812 bp->link_params.loopback_mode = LOOPBACK_XGXS;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001813 bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1814 }
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001815
Eilon Greenstein19680c42008-08-13 15:47:33 -07001816 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001817
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001818 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001819
Eilon Greenstein3c96c682009-01-14 21:25:31 -08001820 bnx2x_calc_fc_adv(bp);
1821
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001822 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1823 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001824 bnx2x_link_report(bp);
Eilon Greensteinb5bf9062009-02-12 08:38:08 +00001825 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001826 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
Eilon Greenstein19680c42008-08-13 15:47:33 -07001827 return rc;
1828 }
Eilon Greensteinf5372252009-02-12 08:38:30 +00001829 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
Eilon Greenstein19680c42008-08-13 15:47:33 -07001830 return -EINVAL;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001831}
1832
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00001833void bnx2x_link_set(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001834{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001835 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001836 bnx2x_acquire_phy_lock(bp);
Yaniv Rosner54c2fb72010-09-01 09:51:23 +00001837 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001838 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001839 bnx2x_release_phy_lock(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001840
Eilon Greenstein19680c42008-08-13 15:47:33 -07001841 bnx2x_calc_fc_adv(bp);
1842 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001843 BNX2X_ERR("Bootcode is missing - can not set link\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001844}
1845
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001846static void bnx2x__link_reset(struct bnx2x *bp)
1847{
Eilon Greenstein19680c42008-08-13 15:47:33 -07001848 if (!BP_NOMCP(bp)) {
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001849 bnx2x_acquire_phy_lock(bp);
Eilon Greenstein589abe32009-02-12 08:36:55 +00001850 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07001851 bnx2x_release_phy_lock(bp);
Eilon Greenstein19680c42008-08-13 15:47:33 -07001852 } else
Eilon Greensteinf5372252009-02-12 08:38:30 +00001853 BNX2X_ERR("Bootcode is missing - can not reset link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001854}
1855
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001856u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001857{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001858 u8 rc = 0;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001859
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001860 if (!BP_NOMCP(bp)) {
1861 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00001862 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1863 is_serdes);
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00001864 bnx2x_release_phy_lock(bp);
1865 } else
1866 BNX2X_ERR("Bootcode is missing - can not test link\n");
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07001867
1868 return rc;
1869}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02001870
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001871static void bnx2x_init_port_minmax(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001872{
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001873 u32 r_param = bp->link_vars.line_speed / 8;
1874 u32 fair_periodic_timeout_usec;
1875 u32 t_fair;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001876
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001877 memset(&(bp->cmng.rs_vars), 0,
1878 sizeof(struct rate_shaping_vars_per_port));
1879 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001880
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001881 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1882 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001883
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001884 /* this is the threshold below which no timer arming will occur
1885 1.25 coefficient is for the threshold to be a little bigger
1886 than the real time, to compensate for timer in-accuracy */
1887 bp->cmng.rs_vars.rs_threshold =
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001888 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1889
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001890 /* resolution of fairness timer */
1891 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1892 /* for 10G it is 1000usec. for 1G it is 10000usec. */
1893 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001894
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001895 /* this is the threshold below which we won't arm the timer anymore */
1896 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001897
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001898 /* we multiply by 1e3/8 to get bytes/msec.
1899 We don't want the credits to pass a credit
1900 of the t_fair*FAIR_MEM (algorithm resolution) */
1901 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1902 /* since each tick is 4 usec */
1903 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001904}
1905
Eilon Greenstein2691d512009-08-12 08:22:08 +00001906/* Calculates the sum of vn_min_rates.
1907 It's needed for further normalizing of the min_rates.
1908 Returns:
1909 sum of vn_min_rates.
1910 or
1911 0 - if all the min_rates are 0.
1912 In the later case fainess algorithm should be deactivated.
1913 If not all min_rates are zero then those that are zeroes will be set to 1.
1914 */
1915static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1916{
1917 int all_zero = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001918 int vn;
1919
1920 bp->vn_weight_sum = 0;
1921 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001922 u32 vn_cfg = bp->mf_config[vn];
Eilon Greenstein2691d512009-08-12 08:22:08 +00001923 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1924 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1925
1926 /* Skip hidden vns */
1927 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1928 continue;
1929
1930 /* If min rate is zero - set it to 1 */
1931 if (!vn_min_rate)
1932 vn_min_rate = DEF_MIN_RATE;
1933 else
1934 all_zero = 0;
1935
1936 bp->vn_weight_sum += vn_min_rate;
1937 }
1938
1939 /* ... only if all min rates are zeros - disable fairness */
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001940 if (all_zero) {
1941 bp->cmng.flags.cmng_enables &=
1942 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1943 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1944 " fairness will be disabled\n");
1945 } else
1946 bp->cmng.flags.cmng_enables |=
1947 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
Eilon Greenstein2691d512009-08-12 08:22:08 +00001948}
1949
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001950static void bnx2x_init_vn_minmax(struct bnx2x *bp, int vn)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001951{
1952 struct rate_shaping_vars_per_vn m_rs_vn;
1953 struct fairness_vars_per_vn m_fair_vn;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001954 u32 vn_cfg = bp->mf_config[vn];
1955 int func = 2*vn + BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001956 u16 vn_min_rate, vn_max_rate;
1957 int i;
1958
1959 /* If function is hidden - set min and max to zeroes */
1960 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1961 vn_min_rate = 0;
1962 vn_max_rate = 0;
1963
1964 } else {
1965 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1966 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001967 /* If min rate is zero - set it to 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00001968 if (bp->vn_weight_sum && (vn_min_rate == 0))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001969 vn_min_rate = DEF_MIN_RATE;
1970 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1971 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1972 }
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00001973
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001974 DP(NETIF_MSG_IFUP,
Eilon Greensteinb015e3d2009-10-15 00:17:20 -07001975 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001976 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001977
1978 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1979 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1980
1981 /* global vn counter - maximal Mbps for this vn */
1982 m_rs_vn.vn_counter.rate = vn_max_rate;
1983
1984 /* quota - number of bytes transmitted in this period */
1985 m_rs_vn.vn_counter.quota =
1986 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1987
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001988 if (bp->vn_weight_sum) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001989 /* credit for each period of the fairness algorithm:
1990 number of bytes in T_FAIR (the vn share the port rate).
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00001991 vn_weight_sum should not be larger than 10000, thus
1992 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1993 than zero */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001994 m_fair_vn.vn_credit_delta =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00001995 max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1996 (8 * bp->vn_weight_sum))),
1997 (bp->cmng.fair_vars.fair_threshold * 2));
1998 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
Eilon Greenstein34f80b02008-06-23 20:33:01 -07001999 m_fair_vn.vn_credit_delta);
2000 }
2001
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002002 /* Store it to internal memory */
2003 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2004 REG_WR(bp, BAR_XSTRORM_INTMEM +
2005 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2006 ((u32 *)(&m_rs_vn))[i]);
2007
2008 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2009 REG_WR(bp, BAR_XSTRORM_INTMEM +
2010 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2011 ((u32 *)(&m_fair_vn))[i]);
2012}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002013
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002014static int bnx2x_get_cmng_fns_mode(struct bnx2x *bp)
2015{
2016 if (CHIP_REV_IS_SLOW(bp))
2017 return CMNG_FNS_NONE;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002018 if (IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002019 return CMNG_FNS_MINMAX;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002020
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002021 return CMNG_FNS_NONE;
2022}
2023
2024static void bnx2x_read_mf_cfg(struct bnx2x *bp)
2025{
2026 int vn;
2027
2028 if (BP_NOMCP(bp))
2029 return; /* what should be the default bvalue in this case */
2030
2031 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2032 int /*abs*/func = 2*vn + BP_PORT(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002033 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002034 MF_CFG_RD(bp, func_mf_config[func].config);
2035 }
2036}
2037
2038static void bnx2x_cmng_fns_init(struct bnx2x *bp, u8 read_cfg, u8 cmng_type)
2039{
2040
2041 if (cmng_type == CMNG_FNS_MINMAX) {
2042 int vn;
2043
2044 /* clear cmng_enables */
2045 bp->cmng.flags.cmng_enables = 0;
2046
2047 /* read mf conf from shmem */
2048 if (read_cfg)
2049 bnx2x_read_mf_cfg(bp);
2050
2051 /* Init rate shaping and fairness contexts */
2052 bnx2x_init_port_minmax(bp);
2053
2054 /* vn_weight_sum and enable fairness if not 0 */
2055 bnx2x_calc_vn_weight_sum(bp);
2056
2057 /* calculate and set min-max rate for each vn */
2058 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2059 bnx2x_init_vn_minmax(bp, vn);
2060
2061 /* always enable rate shaping and fairness */
2062 bp->cmng.flags.cmng_enables |=
2063 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
2064 if (!bp->vn_weight_sum)
2065 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2066 " fairness will be disabled\n");
2067 return;
2068 }
2069
2070 /* rate shaping and fairness are disabled */
2071 DP(NETIF_MSG_IFUP,
2072 "rate shaping and fairness are disabled\n");
2073}
2074
2075static inline void bnx2x_link_sync_notify(struct bnx2x *bp)
2076{
2077 int port = BP_PORT(bp);
2078 int func;
2079 int vn;
2080
2081 /* Set the attention towards other drivers on the same port */
2082 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2083 if (vn == BP_E1HVN(bp))
2084 continue;
2085
2086 func = ((vn << 1) | port);
2087 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2088 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2089 }
2090}
Eilon Greenstein8a1c38d2009-02-12 08:36:40 +00002091
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002092/* This function is called upon link interrupt */
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002093static void bnx2x_link_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002094{
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002095 u32 prev_link_status = bp->link_vars.link_status;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002096 /* Make sure that we are synced with the current statistics */
2097 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2098
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002099 bnx2x_link_update(&bp->link_params, &bp->link_vars);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002100
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002101 if (bp->link_vars.link_up) {
2102
Eilon Greenstein1c063282009-02-12 08:36:43 +00002103 /* dropless flow control */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002104 if (!CHIP_IS_E1(bp) && bp->dropless_fc) {
Eilon Greenstein1c063282009-02-12 08:36:43 +00002105 int port = BP_PORT(bp);
2106 u32 pause_enabled = 0;
2107
2108 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2109 pause_enabled = 1;
2110
2111 REG_WR(bp, BAR_USTRORM_INTMEM +
Eilon Greensteinca003922009-08-12 22:53:28 -07002112 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
Eilon Greenstein1c063282009-02-12 08:36:43 +00002113 pause_enabled);
2114 }
2115
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002116 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2117 struct host_port_stats *pstats;
2118
2119 pstats = bnx2x_sp(bp, port_stats);
2120 /* reset old bmac stats */
2121 memset(&(pstats->mac_stx[0]), 0,
2122 sizeof(struct mac_stx));
2123 }
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002124 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002125 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2126 }
2127
Vladislav Zolotarovd9e8b182010-04-19 01:15:08 +00002128 /* indicate link status only if link status actually changed */
2129 if (prev_link_status != bp->link_vars.link_status)
2130 bnx2x_link_report(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002131
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002132 if (IS_MF(bp))
2133 bnx2x_link_sync_notify(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002134
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002135 if (bp->link_vars.link_up && bp->link_vars.line_speed) {
2136 int cmng_fns = bnx2x_get_cmng_fns_mode(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002137
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002138 if (cmng_fns != CMNG_FNS_NONE) {
2139 bnx2x_cmng_fns_init(bp, false, cmng_fns);
2140 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2141 } else
2142 /* rate shaping and fairness are disabled */
2143 DP(NETIF_MSG_IFUP,
2144 "single function mode without fairness\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002145 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002146}
2147
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002148void bnx2x__link_status_update(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002149{
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002150 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002151 return;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002152
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002153 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2154
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002155 if (bp->link_vars.link_up)
2156 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2157 else
2158 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2159
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002160 /* the link status update could be the result of a DCC event
2161 hence re-read the shmem mf configuration */
2162 bnx2x_read_mf_cfg(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002163
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002164 /* indicate link status */
2165 bnx2x_link_report(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002166}
2167
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002168static void bnx2x_pmf_update(struct bnx2x *bp)
2169{
2170 int port = BP_PORT(bp);
2171 u32 val;
2172
2173 bp->port.pmf = 1;
2174 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2175
2176 /* enable nig attention */
2177 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002178 if (bp->common.int_block == INT_BLOCK_HC) {
2179 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2180 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2181 } else if (CHIP_IS_E2(bp)) {
2182 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, val);
2183 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, val);
2184 }
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07002185
2186 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002187}
2188
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002189/* end of Link */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002190
2191/* slow path */
2192
2193/*
2194 * General service functions
2195 */
2196
Eilon Greenstein2691d512009-08-12 08:22:08 +00002197/* send the MCP a request, block until there is a reply */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002198u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
Eilon Greenstein2691d512009-08-12 08:22:08 +00002199{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002200 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002201 u32 seq = ++bp->fw_seq;
2202 u32 rc = 0;
2203 u32 cnt = 1;
2204 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2205
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002206 mutex_lock(&bp->fw_mb_mutex);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002207 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_param, param);
2208 SHMEM_WR(bp, func_mb[mb_idx].drv_mb_header, (command | seq));
2209
Eilon Greenstein2691d512009-08-12 08:22:08 +00002210 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2211
2212 do {
2213 /* let the FW do it's magic ... */
2214 msleep(delay);
2215
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002216 rc = SHMEM_RD(bp, func_mb[mb_idx].fw_mb_header);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002217
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002218 /* Give the FW up to 5 second (500*10ms) */
2219 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002220
2221 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2222 cnt*delay, rc, seq);
2223
2224 /* is this a reply to our command? */
2225 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
2226 rc &= FW_MSG_CODE_MASK;
2227 else {
2228 /* FW BUG! */
2229 BNX2X_ERR("FW failed to respond!\n");
2230 bnx2x_fw_dump(bp);
2231 rc = 0;
2232 }
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07002233 mutex_unlock(&bp->fw_mb_mutex);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002234
2235 return rc;
2236}
2237
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002238/* must be called under rtnl_lock */
2239void bnx2x_rxq_set_mac_filters(struct bnx2x *bp, u16 cl_id, u32 filters)
2240{
2241 u32 mask = (1 << cl_id);
2242
2243 /* initial seeting is BNX2X_ACCEPT_NONE */
2244 u8 drop_all_ucast = 1, drop_all_bcast = 1, drop_all_mcast = 1;
2245 u8 accp_all_ucast = 0, accp_all_bcast = 0, accp_all_mcast = 0;
2246 u8 unmatched_unicast = 0;
2247
2248 if (filters & BNX2X_PROMISCUOUS_MODE) {
2249 /* promiscious - accept all, drop none */
2250 drop_all_ucast = drop_all_bcast = drop_all_mcast = 0;
2251 accp_all_ucast = accp_all_bcast = accp_all_mcast = 1;
2252 }
2253 if (filters & BNX2X_ACCEPT_UNICAST) {
2254 /* accept matched ucast */
2255 drop_all_ucast = 0;
2256 }
2257 if (filters & BNX2X_ACCEPT_MULTICAST) {
2258 /* accept matched mcast */
2259 drop_all_mcast = 0;
2260 }
2261 if (filters & BNX2X_ACCEPT_ALL_UNICAST) {
2262 /* accept all mcast */
2263 drop_all_ucast = 0;
2264 accp_all_ucast = 1;
2265 }
2266 if (filters & BNX2X_ACCEPT_ALL_MULTICAST) {
2267 /* accept all mcast */
2268 drop_all_mcast = 0;
2269 accp_all_mcast = 1;
2270 }
2271 if (filters & BNX2X_ACCEPT_BROADCAST) {
2272 /* accept (all) bcast */
2273 drop_all_bcast = 0;
2274 accp_all_bcast = 1;
2275 }
2276
2277 bp->mac_filters.ucast_drop_all = drop_all_ucast ?
2278 bp->mac_filters.ucast_drop_all | mask :
2279 bp->mac_filters.ucast_drop_all & ~mask;
2280
2281 bp->mac_filters.mcast_drop_all = drop_all_mcast ?
2282 bp->mac_filters.mcast_drop_all | mask :
2283 bp->mac_filters.mcast_drop_all & ~mask;
2284
2285 bp->mac_filters.bcast_drop_all = drop_all_bcast ?
2286 bp->mac_filters.bcast_drop_all | mask :
2287 bp->mac_filters.bcast_drop_all & ~mask;
2288
2289 bp->mac_filters.ucast_accept_all = accp_all_ucast ?
2290 bp->mac_filters.ucast_accept_all | mask :
2291 bp->mac_filters.ucast_accept_all & ~mask;
2292
2293 bp->mac_filters.mcast_accept_all = accp_all_mcast ?
2294 bp->mac_filters.mcast_accept_all | mask :
2295 bp->mac_filters.mcast_accept_all & ~mask;
2296
2297 bp->mac_filters.bcast_accept_all = accp_all_bcast ?
2298 bp->mac_filters.bcast_accept_all | mask :
2299 bp->mac_filters.bcast_accept_all & ~mask;
2300
2301 bp->mac_filters.unmatched_unicast = unmatched_unicast ?
2302 bp->mac_filters.unmatched_unicast | mask :
2303 bp->mac_filters.unmatched_unicast & ~mask;
2304}
2305
2306void bnx2x_func_init(struct bnx2x *bp, struct bnx2x_func_init_params *p)
2307{
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002308 struct tstorm_eth_function_common_config tcfg = {0};
2309 u16 rss_flgs;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002310
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002311 /* tpa */
2312 if (p->func_flgs & FUNC_FLG_TPA)
2313 tcfg.config_flags |=
2314 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002315
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002316 /* set rss flags */
2317 rss_flgs = (p->rss->mode <<
2318 TSTORM_ETH_FUNCTION_COMMON_CONFIG_RSS_MODE_SHIFT);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002319
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002320 if (p->rss->cap & RSS_IPV4_CAP)
2321 rss_flgs |= RSS_IPV4_CAP_MASK;
2322 if (p->rss->cap & RSS_IPV4_TCP_CAP)
2323 rss_flgs |= RSS_IPV4_TCP_CAP_MASK;
2324 if (p->rss->cap & RSS_IPV6_CAP)
2325 rss_flgs |= RSS_IPV6_CAP_MASK;
2326 if (p->rss->cap & RSS_IPV6_TCP_CAP)
2327 rss_flgs |= RSS_IPV6_TCP_CAP_MASK;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002328
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002329 tcfg.config_flags |= rss_flgs;
2330 tcfg.rss_result_mask = p->rss->result_mask;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002331
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002332 storm_memset_func_cfg(bp, &tcfg, p->func_id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002333
2334 /* Enable the function in the FW */
2335 storm_memset_vf_to_pf(bp, p->func_id, p->pf_id);
2336 storm_memset_func_en(bp, p->func_id, 1);
2337
2338 /* statistics */
2339 if (p->func_flgs & FUNC_FLG_STATS) {
2340 struct stats_indication_flags stats_flags = {0};
2341 stats_flags.collect_eth = 1;
2342
2343 storm_memset_xstats_flags(bp, &stats_flags, p->func_id);
2344 storm_memset_xstats_addr(bp, p->fw_stat_map, p->func_id);
2345
2346 storm_memset_tstats_flags(bp, &stats_flags, p->func_id);
2347 storm_memset_tstats_addr(bp, p->fw_stat_map, p->func_id);
2348
2349 storm_memset_ustats_flags(bp, &stats_flags, p->func_id);
2350 storm_memset_ustats_addr(bp, p->fw_stat_map, p->func_id);
2351
2352 storm_memset_cstats_flags(bp, &stats_flags, p->func_id);
2353 storm_memset_cstats_addr(bp, p->fw_stat_map, p->func_id);
2354 }
2355
2356 /* spq */
2357 if (p->func_flgs & FUNC_FLG_SPQ) {
2358 storm_memset_spq_addr(bp, p->spq_map, p->func_id);
2359 REG_WR(bp, XSEM_REG_FAST_MEMORY +
2360 XSTORM_SPQ_PROD_OFFSET(p->func_id), p->spq_prod);
2361 }
2362}
2363
2364static inline u16 bnx2x_get_cl_flags(struct bnx2x *bp,
2365 struct bnx2x_fastpath *fp)
2366{
2367 u16 flags = 0;
2368
2369 /* calculate queue flags */
2370 flags |= QUEUE_FLG_CACHE_ALIGN;
2371 flags |= QUEUE_FLG_HC;
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002372 flags |= IS_MF(bp) ? QUEUE_FLG_OV : 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002373
2374#ifdef BCM_VLAN
2375 flags |= QUEUE_FLG_VLAN;
2376 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
2377#endif
2378
2379 if (!fp->disable_tpa)
2380 flags |= QUEUE_FLG_TPA;
2381
2382 flags |= QUEUE_FLG_STATS;
2383
2384 return flags;
2385}
2386
2387static void bnx2x_pf_rx_cl_prep(struct bnx2x *bp,
2388 struct bnx2x_fastpath *fp, struct rxq_pause_params *pause,
2389 struct bnx2x_rxq_init_params *rxq_init)
2390{
2391 u16 max_sge = 0;
2392 u16 sge_sz = 0;
2393 u16 tpa_agg_size = 0;
2394
2395 /* calculate queue flags */
2396 u16 flags = bnx2x_get_cl_flags(bp, fp);
2397
2398 if (!fp->disable_tpa) {
2399 pause->sge_th_hi = 250;
2400 pause->sge_th_lo = 150;
2401 tpa_agg_size = min_t(u32,
2402 (min_t(u32, 8, MAX_SKB_FRAGS) *
2403 SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
2404 max_sge = SGE_PAGE_ALIGN(bp->dev->mtu) >>
2405 SGE_PAGE_SHIFT;
2406 max_sge = ((max_sge + PAGES_PER_SGE - 1) &
2407 (~(PAGES_PER_SGE-1))) >> PAGES_PER_SGE_SHIFT;
2408 sge_sz = (u16)min_t(u32, SGE_PAGE_SIZE * PAGES_PER_SGE,
2409 0xffff);
2410 }
2411
2412 /* pause - not for e1 */
2413 if (!CHIP_IS_E1(bp)) {
2414 pause->bd_th_hi = 350;
2415 pause->bd_th_lo = 250;
2416 pause->rcq_th_hi = 350;
2417 pause->rcq_th_lo = 250;
2418 pause->sge_th_hi = 0;
2419 pause->sge_th_lo = 0;
2420 pause->pri_map = 1;
2421 }
2422
2423 /* rxq setup */
2424 rxq_init->flags = flags;
2425 rxq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2426 rxq_init->dscr_map = fp->rx_desc_mapping;
2427 rxq_init->sge_map = fp->rx_sge_mapping;
2428 rxq_init->rcq_map = fp->rx_comp_mapping;
2429 rxq_init->rcq_np_map = fp->rx_comp_mapping + BCM_PAGE_SIZE;
2430 rxq_init->mtu = bp->dev->mtu;
2431 rxq_init->buf_sz = bp->rx_buf_size;
2432 rxq_init->cl_qzone_id = fp->cl_qzone_id;
2433 rxq_init->cl_id = fp->cl_id;
2434 rxq_init->spcl_id = fp->cl_id;
2435 rxq_init->stat_id = fp->cl_id;
2436 rxq_init->tpa_agg_sz = tpa_agg_size;
2437 rxq_init->sge_buf_sz = sge_sz;
2438 rxq_init->max_sges_pkt = max_sge;
2439 rxq_init->cache_line_log = BNX2X_RX_ALIGN_SHIFT;
2440 rxq_init->fw_sb_id = fp->fw_sb_id;
2441
2442 rxq_init->sb_cq_index = U_SB_ETH_RX_CQ_INDEX;
2443
2444 rxq_init->cid = HW_CID(bp, fp->cid);
2445
2446 rxq_init->hc_rate = bp->rx_ticks ? (1000000 / bp->rx_ticks) : 0;
2447}
2448
2449static void bnx2x_pf_tx_cl_prep(struct bnx2x *bp,
2450 struct bnx2x_fastpath *fp, struct bnx2x_txq_init_params *txq_init)
2451{
2452 u16 flags = bnx2x_get_cl_flags(bp, fp);
2453
2454 txq_init->flags = flags;
2455 txq_init->cxt = &bp->context.vcxt[fp->cid].eth;
2456 txq_init->dscr_map = fp->tx_desc_mapping;
2457 txq_init->stat_id = fp->cl_id;
2458 txq_init->cid = HW_CID(bp, fp->cid);
2459 txq_init->sb_cq_index = C_SB_ETH_TX_CQ_INDEX;
2460 txq_init->traffic_type = LLFC_TRAFFIC_TYPE_NW;
2461 txq_init->fw_sb_id = fp->fw_sb_id;
2462 txq_init->hc_rate = bp->tx_ticks ? (1000000 / bp->tx_ticks) : 0;
2463}
2464
2465void bnx2x_pf_init(struct bnx2x *bp)
2466{
2467 struct bnx2x_func_init_params func_init = {0};
2468 struct bnx2x_rss_params rss = {0};
2469 struct event_ring_data eq_data = { {0} };
2470 u16 flags;
2471
2472 /* pf specific setups */
2473 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00002474 storm_memset_ov(bp, bp->mf_ov, BP_FUNC(bp));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002475
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002476 if (CHIP_IS_E2(bp)) {
2477 /* reset IGU PF statistics: MSIX + ATTN */
2478 /* PF */
2479 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2480 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2481 (CHIP_MODE_IS_4_PORT(bp) ?
2482 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2483 /* ATTN */
2484 REG_WR(bp, IGU_REG_STATISTIC_NUM_MESSAGE_SENT +
2485 BNX2X_IGU_STAS_MSG_VF_CNT*4 +
2486 BNX2X_IGU_STAS_MSG_PF_CNT*4 +
2487 (CHIP_MODE_IS_4_PORT(bp) ?
2488 BP_FUNC(bp) : BP_VN(bp))*4, 0);
2489 }
2490
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002491 /* function setup flags */
2492 flags = (FUNC_FLG_STATS | FUNC_FLG_LEADING | FUNC_FLG_SPQ);
2493
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002494 if (CHIP_IS_E1x(bp))
2495 flags |= (bp->flags & TPA_ENABLE_FLAG) ? FUNC_FLG_TPA : 0;
2496 else
2497 flags |= FUNC_FLG_TPA;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002498
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002499 /* function setup */
2500
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002501 /**
2502 * Although RSS is meaningless when there is a single HW queue we
2503 * still need it enabled in order to have HW Rx hash generated.
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002504 */
Dmitry Kravkov030f3352010-10-17 23:08:53 +00002505 rss.cap = (RSS_IPV4_CAP | RSS_IPV4_TCP_CAP |
2506 RSS_IPV6_CAP | RSS_IPV6_TCP_CAP);
2507 rss.mode = bp->multi_mode;
2508 rss.result_mask = MULTI_MASK;
2509 func_init.rss = &rss;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002510
2511 func_init.func_flgs = flags;
2512 func_init.pf_id = BP_FUNC(bp);
2513 func_init.func_id = BP_FUNC(bp);
2514 func_init.fw_stat_map = bnx2x_sp_mapping(bp, fw_stats);
2515 func_init.spq_map = bp->spq_mapping;
2516 func_init.spq_prod = bp->spq_prod_idx;
2517
2518 bnx2x_func_init(bp, &func_init);
2519
2520 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
2521
2522 /*
2523 Congestion management values depend on the link rate
2524 There is no active link so initial link rate is set to 10 Gbps.
2525 When the link comes up The congestion management values are
2526 re-calculated according to the actual link rate.
2527 */
2528 bp->link_vars.line_speed = SPEED_10000;
2529 bnx2x_cmng_fns_init(bp, true, bnx2x_get_cmng_fns_mode(bp));
2530
2531 /* Only the PMF sets the HW */
2532 if (bp->port.pmf)
2533 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
2534
2535 /* no rx until link is up */
2536 bp->rx_mode = BNX2X_RX_MODE_NONE;
2537 bnx2x_set_storm_rx_mode(bp);
2538
2539 /* init Event Queue */
2540 eq_data.base_addr.hi = U64_HI(bp->eq_mapping);
2541 eq_data.base_addr.lo = U64_LO(bp->eq_mapping);
2542 eq_data.producer = bp->eq_prod;
2543 eq_data.index_id = HC_SP_INDEX_EQ_CONS;
2544 eq_data.sb_id = DEF_SB_ID;
2545 storm_memset_eq_data(bp, &eq_data, BP_FUNC(bp));
2546}
2547
2548
Eilon Greenstein2691d512009-08-12 08:22:08 +00002549static void bnx2x_e1h_disable(struct bnx2x *bp)
2550{
2551 int port = BP_PORT(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002552
2553 netif_tx_disable(bp->dev);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002554
2555 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2556
Eilon Greenstein2691d512009-08-12 08:22:08 +00002557 netif_carrier_off(bp->dev);
2558}
2559
2560static void bnx2x_e1h_enable(struct bnx2x *bp)
2561{
2562 int port = BP_PORT(bp);
2563
2564 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2565
Eilon Greenstein2691d512009-08-12 08:22:08 +00002566 /* Tx queue should be only reenabled */
2567 netif_tx_wake_all_queues(bp->dev);
2568
Eilon Greenstein061bc702009-10-15 00:18:47 -07002569 /*
2570 * Should not call netif_carrier_on since it will be called if the link
2571 * is up when checking for link state
2572 */
Eilon Greenstein2691d512009-08-12 08:22:08 +00002573}
2574
Eilon Greenstein2691d512009-08-12 08:22:08 +00002575static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2576{
Eilon Greenstein2691d512009-08-12 08:22:08 +00002577 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002578
2579 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2580
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002581 /*
2582 * This is the only place besides the function initialization
2583 * where the bp->flags can change so it is done without any
2584 * locks
2585 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002586 if (bp->mf_config[BP_VN(bp)] & FUNC_MF_CFG_FUNC_DISABLED) {
Eilon Greenstein2691d512009-08-12 08:22:08 +00002587 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002588 bp->flags |= MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002589
2590 bnx2x_e1h_disable(bp);
2591 } else {
2592 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07002593 bp->flags &= ~MF_FUNC_DIS;
Eilon Greenstein2691d512009-08-12 08:22:08 +00002594
2595 bnx2x_e1h_enable(bp);
2596 }
2597 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
2598 }
2599 if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
2600
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002601 bnx2x_cmng_fns_init(bp, true, CMNG_FNS_MINMAX);
2602 bnx2x_link_sync_notify(bp);
2603 storm_memset_cmng(bp, &bp->cmng, BP_PORT(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00002604 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
2605 }
2606
2607 /* Report results to MCP */
2608 if (dcc_event)
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002609 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002610 else
Yaniv Rosnera22f0782010-09-07 11:41:20 +00002611 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
Eilon Greenstein2691d512009-08-12 08:22:08 +00002612}
2613
Michael Chan28912902009-10-10 13:46:53 +00002614/* must be called under the spq lock */
2615static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2616{
2617 struct eth_spe *next_spe = bp->spq_prod_bd;
2618
2619 if (bp->spq_prod_bd == bp->spq_last_bd) {
2620 bp->spq_prod_bd = bp->spq;
2621 bp->spq_prod_idx = 0;
2622 DP(NETIF_MSG_TIMER, "end of spq\n");
2623 } else {
2624 bp->spq_prod_bd++;
2625 bp->spq_prod_idx++;
2626 }
2627 return next_spe;
2628}
2629
2630/* must be called under the spq lock */
2631static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2632{
2633 int func = BP_FUNC(bp);
2634
2635 /* Make sure that BD data is updated before writing the producer */
2636 wmb();
2637
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002638 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002639 bp->spq_prod_idx);
Michael Chan28912902009-10-10 13:46:53 +00002640 mmiowb();
2641}
2642
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002643/* the slow path queue is odd since completions arrive on the fastpath ring */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00002644int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00002645 u32 data_hi, u32 data_lo, int common)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002646{
Michael Chan28912902009-10-10 13:46:53 +00002647 struct eth_spe *spe;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002648 u16 type;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002649
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002650#ifdef BNX2X_STOP_ON_ERROR
2651 if (unlikely(bp->panic))
2652 return -EIO;
2653#endif
2654
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002655 spin_lock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002656
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002657 if (!atomic_read(&bp->spq_left)) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002658 BNX2X_ERR("BUG! SPQ ring full!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002659 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002660 bnx2x_panic();
2661 return -EBUSY;
2662 }
Eliezer Tamirf1410642008-02-28 11:51:50 -08002663
Michael Chan28912902009-10-10 13:46:53 +00002664 spe = bnx2x_sp_get_next(bp);
2665
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002666 /* CID needs port number to be encoded int it */
Michael Chan28912902009-10-10 13:46:53 +00002667 spe->hdr.conn_and_cmd_data =
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002668 cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
2669 HW_CID(bp, cid));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002670
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002671 if (common)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002672 /* Common ramrods:
2673 * FUNC_START, FUNC_STOP, CFC_DEL, STATS, SET_MAC
2674 * TRAFFIC_STOP, TRAFFIC_START
2675 */
2676 type = (NONE_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2677 & SPE_HDR_CONN_TYPE;
2678 else
2679 /* ETH ramrods: SETUP, HALT */
2680 type = (ETH_CONNECTION_TYPE << SPE_HDR_CONN_TYPE_SHIFT)
2681 & SPE_HDR_CONN_TYPE;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002682
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002683 type |= ((BP_FUNC(bp) << SPE_HDR_FUNCTION_ID_SHIFT) &
2684 SPE_HDR_FUNCTION_ID);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002685
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002686 spe->hdr.type = cpu_to_le16(type);
2687
2688 spe->data.update_data_addr.hi = cpu_to_le32(data_hi);
2689 spe->data.update_data_addr.lo = cpu_to_le32(data_lo);
2690
2691 /* stats ramrod has it's own slot on the spq */
2692 if (command != RAMROD_CMD_ID_COMMON_STAT_QUERY)
2693 /* It's ok if the actual decrement is issued towards the memory
2694 * somewhere between the spin_lock and spin_unlock. Thus no
2695 * more explict memory barrier is needed.
2696 */
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002697 atomic_dec(&bp->spq_left);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002698
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002699 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002700 "SPQE[%x] (%x:%x) command %d hw_cid %x data (%x:%x) "
2701 "type(0x%x) left %x\n",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002702 bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
2703 (u32)(U64_LO(bp->spq_mapping) +
2704 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00002705 HW_CID(bp, cid), data_hi, data_lo, type, atomic_read(&bp->spq_left));
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002706
Michael Chan28912902009-10-10 13:46:53 +00002707 bnx2x_sp_prod_update(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002708 spin_unlock_bh(&bp->spq_lock);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002709 return 0;
2710}
2711
2712/* acquire split MCP access lock register */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002713static int bnx2x_acquire_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002714{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002715 u32 j, val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002716 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002717
2718 might_sleep();
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002719 for (j = 0; j < 1000; j++) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002720 val = (1UL << 31);
2721 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2722 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2723 if (val & (1L << 31))
2724 break;
2725
2726 msleep(5);
2727 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002728 if (!(val & (1L << 31))) {
Eilon Greenstein19680c42008-08-13 15:47:33 -07002729 BNX2X_ERR("Cannot acquire MCP access lock register\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002730 rc = -EBUSY;
2731 }
2732
2733 return rc;
2734}
2735
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07002736/* release split MCP access lock register */
2737static void bnx2x_release_alr(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002738{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002739 REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002740}
2741
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002742#define BNX2X_DEF_SB_ATT_IDX 0x0001
2743#define BNX2X_DEF_SB_IDX 0x0002
2744
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002745static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2746{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002747 struct host_sp_status_block *def_sb = bp->def_status_blk;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002748 u16 rc = 0;
2749
2750 barrier(); /* status block is written to by the chip */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002751 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2752 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002753 rc |= BNX2X_DEF_SB_ATT_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002754 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002755
2756 if (bp->def_idx != def_sb->sp_sb.running_index) {
2757 bp->def_idx = def_sb->sp_sb.running_index;
2758 rc |= BNX2X_DEF_SB_IDX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002759 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00002760
2761 /* Do not reorder: indecies reading should complete before handling */
2762 barrier();
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002763 return rc;
2764}
2765
2766/*
2767 * slow path service functions
2768 */
2769
2770static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2771{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002772 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002773 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2774 MISC_REG_AEU_MASK_ATTN_FUNC_0;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002775 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2776 NIG_REG_MASK_INTERRUPT_PORT0;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002777 u32 aeu_mask;
Eilon Greenstein87942b42009-02-12 08:36:49 +00002778 u32 nig_mask = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002779 u32 reg_addr;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002780
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002781 if (bp->attn_state & asserted)
2782 BNX2X_ERR("IGU ERROR\n");
2783
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002784 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2785 aeu_mask = REG_RD(bp, aeu_addr);
2786
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002787 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002788 aeu_mask, asserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00002789 aeu_mask &= ~(asserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002790 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002791
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002792 REG_WR(bp, aeu_addr, aeu_mask);
2793 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002794
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002795 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002796 bp->attn_state |= asserted;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07002797 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002798
2799 if (asserted & ATTN_HARD_WIRED_MASK) {
2800 if (asserted & ATTN_NIG_FOR_FUNC) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002801
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002802 bnx2x_acquire_phy_lock(bp);
2803
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002804 /* save nig interrupt mask */
Eilon Greenstein87942b42009-02-12 08:36:49 +00002805 nig_mask = REG_RD(bp, nig_int_mask_addr);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002806 REG_WR(bp, nig_int_mask_addr, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002807
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07002808 bnx2x_link_attn(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002809
2810 /* handle unicore attn? */
2811 }
2812 if (asserted & ATTN_SW_TIMER_4_FUNC)
2813 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2814
2815 if (asserted & GPIO_2_FUNC)
2816 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2817
2818 if (asserted & GPIO_3_FUNC)
2819 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2820
2821 if (asserted & GPIO_4_FUNC)
2822 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2823
2824 if (port == 0) {
2825 if (asserted & ATTN_GENERAL_ATTN_1) {
2826 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2827 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2828 }
2829 if (asserted & ATTN_GENERAL_ATTN_2) {
2830 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2831 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2832 }
2833 if (asserted & ATTN_GENERAL_ATTN_3) {
2834 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2835 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2836 }
2837 } else {
2838 if (asserted & ATTN_GENERAL_ATTN_4) {
2839 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2840 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2841 }
2842 if (asserted & ATTN_GENERAL_ATTN_5) {
2843 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2844 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2845 }
2846 if (asserted & ATTN_GENERAL_ATTN_6) {
2847 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2848 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2849 }
2850 }
2851
2852 } /* if hardwired */
2853
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002854 if (bp->common.int_block == INT_BLOCK_HC)
2855 reg_addr = (HC_REG_COMMAND_REG + port*32 +
2856 COMMAND_REG_ATTN_BITS_SET);
2857 else
2858 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_SET_UPPER*8);
2859
2860 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", asserted,
2861 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
2862 REG_WR(bp, reg_addr, asserted);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02002863
2864 /* now set back the mask */
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002865 if (asserted & ATTN_NIG_FOR_FUNC) {
Eilon Greenstein87942b42009-02-12 08:36:49 +00002866 REG_WR(bp, nig_int_mask_addr, nig_mask);
Eilon Greensteina5e9a7c2009-01-14 21:26:01 -08002867 bnx2x_release_phy_lock(bp);
2868 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002869}
2870
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002871static inline void bnx2x_fan_failure(struct bnx2x *bp)
2872{
2873 int port = BP_PORT(bp);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002874 u32 ext_phy_config;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002875 /* mark the failure */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002876 ext_phy_config =
2877 SHMEM_RD(bp,
2878 dev_info.port_hw_config[port].external_phy_config);
2879
2880 ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2881 ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002882 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00002883 ext_phy_config);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002884
2885 /* log the failure */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00002886 netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2887 " the driver to shutdown the card to prevent permanent"
2888 " damage. Please contact OEM Support for assistance\n");
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002889}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00002890
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002891static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2892{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002893 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002894 int reg_offset;
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002895 u32 val;
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002896
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002897 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2898 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002899
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002900 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002901
2902 val = REG_RD(bp, reg_offset);
2903 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2904 REG_WR(bp, reg_offset, val);
2905
2906 BNX2X_ERR("SPIO5 hw attention\n");
2907
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002908 /* Fan failure attention */
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00002909 bnx2x_hw_reset_phy(&bp->link_params);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00002910 bnx2x_fan_failure(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002911 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002912
Eilon Greenstein589abe32009-02-12 08:36:55 +00002913 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2914 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2915 bnx2x_acquire_phy_lock(bp);
2916 bnx2x_handle_module_detect_int(&bp->link_params);
2917 bnx2x_release_phy_lock(bp);
2918 }
2919
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002920 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2921
2922 val = REG_RD(bp, reg_offset);
2923 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2924 REG_WR(bp, reg_offset, val);
2925
2926 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002927 (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002928 bnx2x_panic();
2929 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002930}
2931
2932static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2933{
2934 u32 val;
2935
Eilon Greenstein0626b892009-02-12 08:38:14 +00002936 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002937
2938 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2939 BNX2X_ERR("DB hw attention 0x%x\n", val);
2940 /* DORQ discard attention */
2941 if (val & 0x2)
2942 BNX2X_ERR("FATAL error from DORQ\n");
2943 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002944
2945 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2946
2947 int port = BP_PORT(bp);
2948 int reg_offset;
2949
2950 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2951 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2952
2953 val = REG_RD(bp, reg_offset);
2954 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2955 REG_WR(bp, reg_offset, val);
2956
2957 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00002958 (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002959 bnx2x_panic();
2960 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002961}
2962
2963static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2964{
2965 u32 val;
2966
2967 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2968
2969 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2970 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2971 /* CFC error attention */
2972 if (val & 0x2)
2973 BNX2X_ERR("FATAL error from CFC\n");
2974 }
2975
2976 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2977
2978 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2979 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2980 /* RQ_USDMDP_FIFO_OVERFLOW */
2981 if (val & 0x18000)
2982 BNX2X_ERR("FATAL error from PXP\n");
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00002983 if (CHIP_IS_E2(bp)) {
2984 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_1);
2985 BNX2X_ERR("PXP hw attention-1 0x%x\n", val);
2986 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08002987 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07002988
2989 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2990
2991 int port = BP_PORT(bp);
2992 int reg_offset;
2993
2994 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2995 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2996
2997 val = REG_RD(bp, reg_offset);
2998 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2999 REG_WR(bp, reg_offset, val);
3000
3001 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
Eilon Greenstein0fc5d002009-08-12 08:24:05 +00003002 (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003003 bnx2x_panic();
3004 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003005}
3006
3007static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3008{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003009 u32 val;
3010
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003011 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
3012
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003013 if (attn & BNX2X_PMF_LINK_ASSERT) {
3014 int func = BP_FUNC(bp);
3015
3016 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003017 bp->mf_config[BP_VN(bp)] = MF_CFG_RD(bp,
3018 func_mf_config[BP_ABS_FUNC(bp)].config);
3019 val = SHMEM_RD(bp,
3020 func_mb[BP_FW_MB_IDX(bp)].drv_status);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003021 if (val & DRV_STATUS_DCC_EVENT_MASK)
3022 bnx2x_dcc_event(bp,
3023 (val & DRV_STATUS_DCC_EVENT_MASK));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003024 bnx2x__link_status_update(bp);
Eilon Greenstein2691d512009-08-12 08:22:08 +00003025 if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003026 bnx2x_pmf_update(bp);
3027
3028 } else if (attn & BNX2X_MC_ASSERT_BITS) {
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003029
3030 BNX2X_ERR("MC assert!\n");
3031 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
3032 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
3033 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
3034 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
3035 bnx2x_panic();
3036
3037 } else if (attn & BNX2X_MCP_ASSERT) {
3038
3039 BNX2X_ERR("MCP assert!\n");
3040 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003041 bnx2x_fw_dump(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003042
3043 } else
3044 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
3045 }
3046
3047 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003048 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
3049 if (attn & BNX2X_GRC_TIMEOUT) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003050 val = CHIP_IS_E1(bp) ? 0 :
3051 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003052 BNX2X_ERR("GRC time-out 0x%08x\n", val);
3053 }
3054 if (attn & BNX2X_GRC_RSV) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003055 val = CHIP_IS_E1(bp) ? 0 :
3056 REG_RD(bp, MISC_REG_GRC_RSV_ATTN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003057 BNX2X_ERR("GRC reserved 0x%08x\n", val);
3058 }
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003059 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003060 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003061}
3062
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003063#define BNX2X_MISC_GEN_REG MISC_REG_GENERIC_POR_1
3064#define LOAD_COUNTER_BITS 16 /* Number of bits for load counter */
3065#define LOAD_COUNTER_MASK (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
3066#define RESET_DONE_FLAG_MASK (~LOAD_COUNTER_MASK)
3067#define RESET_DONE_FLAG_SHIFT LOAD_COUNTER_BITS
3068#define CHIP_PARITY_SUPPORTED(bp) (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00003069
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003070/*
3071 * should be run under rtnl lock
3072 */
3073static inline void bnx2x_set_reset_done(struct bnx2x *bp)
3074{
3075 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3076 val &= ~(1 << RESET_DONE_FLAG_SHIFT);
3077 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3078 barrier();
3079 mmiowb();
3080}
3081
3082/*
3083 * should be run under rtnl lock
3084 */
3085static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
3086{
3087 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3088 val |= (1 << 16);
3089 REG_WR(bp, BNX2X_MISC_GEN_REG, val);
3090 barrier();
3091 mmiowb();
3092}
3093
3094/*
3095 * should be run under rtnl lock
3096 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003097bool bnx2x_reset_is_done(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003098{
3099 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3100 DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
3101 return (val & RESET_DONE_FLAG_MASK) ? false : true;
3102}
3103
3104/*
3105 * should be run under rtnl lock
3106 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003107inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003108{
3109 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3110
3111 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3112
3113 val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
3114 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3115 barrier();
3116 mmiowb();
3117}
3118
3119/*
3120 * should be run under rtnl lock
3121 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003122u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003123{
3124 u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3125
3126 DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
3127
3128 val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
3129 REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
3130 barrier();
3131 mmiowb();
3132
3133 return val1;
3134}
3135
3136/*
3137 * should be run under rtnl lock
3138 */
3139static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
3140{
3141 return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
3142}
3143
3144static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
3145{
3146 u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
3147 REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
3148}
3149
3150static inline void _print_next_block(int idx, const char *blk)
3151{
3152 if (idx)
3153 pr_cont(", ");
3154 pr_cont("%s", blk);
3155}
3156
3157static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
3158{
3159 int i = 0;
3160 u32 cur_bit = 0;
3161 for (i = 0; sig; i++) {
3162 cur_bit = ((u32)0x1 << i);
3163 if (sig & cur_bit) {
3164 switch (cur_bit) {
3165 case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
3166 _print_next_block(par_num++, "BRB");
3167 break;
3168 case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
3169 _print_next_block(par_num++, "PARSER");
3170 break;
3171 case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
3172 _print_next_block(par_num++, "TSDM");
3173 break;
3174 case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
3175 _print_next_block(par_num++, "SEARCHER");
3176 break;
3177 case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
3178 _print_next_block(par_num++, "TSEMI");
3179 break;
3180 }
3181
3182 /* Clear the bit */
3183 sig &= ~cur_bit;
3184 }
3185 }
3186
3187 return par_num;
3188}
3189
3190static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
3191{
3192 int i = 0;
3193 u32 cur_bit = 0;
3194 for (i = 0; sig; i++) {
3195 cur_bit = ((u32)0x1 << i);
3196 if (sig & cur_bit) {
3197 switch (cur_bit) {
3198 case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
3199 _print_next_block(par_num++, "PBCLIENT");
3200 break;
3201 case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
3202 _print_next_block(par_num++, "QM");
3203 break;
3204 case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
3205 _print_next_block(par_num++, "XSDM");
3206 break;
3207 case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
3208 _print_next_block(par_num++, "XSEMI");
3209 break;
3210 case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
3211 _print_next_block(par_num++, "DOORBELLQ");
3212 break;
3213 case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
3214 _print_next_block(par_num++, "VAUX PCI CORE");
3215 break;
3216 case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
3217 _print_next_block(par_num++, "DEBUG");
3218 break;
3219 case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
3220 _print_next_block(par_num++, "USDM");
3221 break;
3222 case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
3223 _print_next_block(par_num++, "USEMI");
3224 break;
3225 case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
3226 _print_next_block(par_num++, "UPB");
3227 break;
3228 case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
3229 _print_next_block(par_num++, "CSDM");
3230 break;
3231 }
3232
3233 /* Clear the bit */
3234 sig &= ~cur_bit;
3235 }
3236 }
3237
3238 return par_num;
3239}
3240
3241static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
3242{
3243 int i = 0;
3244 u32 cur_bit = 0;
3245 for (i = 0; sig; i++) {
3246 cur_bit = ((u32)0x1 << i);
3247 if (sig & cur_bit) {
3248 switch (cur_bit) {
3249 case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
3250 _print_next_block(par_num++, "CSEMI");
3251 break;
3252 case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
3253 _print_next_block(par_num++, "PXP");
3254 break;
3255 case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
3256 _print_next_block(par_num++,
3257 "PXPPCICLOCKCLIENT");
3258 break;
3259 case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
3260 _print_next_block(par_num++, "CFC");
3261 break;
3262 case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
3263 _print_next_block(par_num++, "CDU");
3264 break;
3265 case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
3266 _print_next_block(par_num++, "IGU");
3267 break;
3268 case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
3269 _print_next_block(par_num++, "MISC");
3270 break;
3271 }
3272
3273 /* Clear the bit */
3274 sig &= ~cur_bit;
3275 }
3276 }
3277
3278 return par_num;
3279}
3280
3281static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
3282{
3283 int i = 0;
3284 u32 cur_bit = 0;
3285 for (i = 0; sig; i++) {
3286 cur_bit = ((u32)0x1 << i);
3287 if (sig & cur_bit) {
3288 switch (cur_bit) {
3289 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
3290 _print_next_block(par_num++, "MCP ROM");
3291 break;
3292 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
3293 _print_next_block(par_num++, "MCP UMP RX");
3294 break;
3295 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
3296 _print_next_block(par_num++, "MCP UMP TX");
3297 break;
3298 case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
3299 _print_next_block(par_num++, "MCP SCPAD");
3300 break;
3301 }
3302
3303 /* Clear the bit */
3304 sig &= ~cur_bit;
3305 }
3306 }
3307
3308 return par_num;
3309}
3310
3311static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
3312 u32 sig2, u32 sig3)
3313{
3314 if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
3315 (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
3316 int par_num = 0;
3317 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
3318 "[0]:0x%08x [1]:0x%08x "
3319 "[2]:0x%08x [3]:0x%08x\n",
3320 sig0 & HW_PRTY_ASSERT_SET_0,
3321 sig1 & HW_PRTY_ASSERT_SET_1,
3322 sig2 & HW_PRTY_ASSERT_SET_2,
3323 sig3 & HW_PRTY_ASSERT_SET_3);
3324 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
3325 bp->dev->name);
3326 par_num = bnx2x_print_blocks_with_parity0(
3327 sig0 & HW_PRTY_ASSERT_SET_0, par_num);
3328 par_num = bnx2x_print_blocks_with_parity1(
3329 sig1 & HW_PRTY_ASSERT_SET_1, par_num);
3330 par_num = bnx2x_print_blocks_with_parity2(
3331 sig2 & HW_PRTY_ASSERT_SET_2, par_num);
3332 par_num = bnx2x_print_blocks_with_parity3(
3333 sig3 & HW_PRTY_ASSERT_SET_3, par_num);
3334 printk("\n");
3335 return true;
3336 } else
3337 return false;
3338}
3339
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003340bool bnx2x_chk_parity_attn(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003341{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003342 struct attn_route attn;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003343 int port = BP_PORT(bp);
3344
3345 attn.sig[0] = REG_RD(bp,
3346 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
3347 port*4);
3348 attn.sig[1] = REG_RD(bp,
3349 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
3350 port*4);
3351 attn.sig[2] = REG_RD(bp,
3352 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
3353 port*4);
3354 attn.sig[3] = REG_RD(bp,
3355 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
3356 port*4);
3357
3358 return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
3359 attn.sig[3]);
3360}
3361
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003362
3363static inline void bnx2x_attn_int_deasserted4(struct bnx2x *bp, u32 attn)
3364{
3365 u32 val;
3366 if (attn & AEU_INPUTS_ATTN_BITS_PGLUE_HW_INTERRUPT) {
3367
3368 val = REG_RD(bp, PGLUE_B_REG_PGLUE_B_INT_STS_CLR);
3369 BNX2X_ERR("PGLUE hw attention 0x%x\n", val);
3370 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_ADDRESS_ERROR)
3371 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3372 "ADDRESS_ERROR\n");
3373 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_INCORRECT_RCV_BEHAVIOR)
3374 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3375 "INCORRECT_RCV_BEHAVIOR\n");
3376 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_WAS_ERROR_ATTN)
3377 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3378 "WAS_ERROR_ATTN\n");
3379 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_VF_LENGTH_VIOLATION_ATTN)
3380 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3381 "VF_LENGTH_VIOLATION_ATTN\n");
3382 if (val &
3383 PGLUE_B_PGLUE_B_INT_STS_REG_VF_GRC_SPACE_VIOLATION_ATTN)
3384 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3385 "VF_GRC_SPACE_VIOLATION_ATTN\n");
3386 if (val &
3387 PGLUE_B_PGLUE_B_INT_STS_REG_VF_MSIX_BAR_VIOLATION_ATTN)
3388 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3389 "VF_MSIX_BAR_VIOLATION_ATTN\n");
3390 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_ERROR_ATTN)
3391 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3392 "TCPL_ERROR_ATTN\n");
3393 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_TCPL_IN_TWO_RCBS_ATTN)
3394 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3395 "TCPL_IN_TWO_RCBS_ATTN\n");
3396 if (val & PGLUE_B_PGLUE_B_INT_STS_REG_CSSNOOP_FIFO_OVERFLOW)
3397 BNX2X_ERR("PGLUE_B_PGLUE_B_INT_STS_REG_"
3398 "CSSNOOP_FIFO_OVERFLOW\n");
3399 }
3400 if (attn & AEU_INPUTS_ATTN_BITS_ATC_HW_INTERRUPT) {
3401 val = REG_RD(bp, ATC_REG_ATC_INT_STS_CLR);
3402 BNX2X_ERR("ATC hw attention 0x%x\n", val);
3403 if (val & ATC_ATC_INT_STS_REG_ADDRESS_ERROR)
3404 BNX2X_ERR("ATC_ATC_INT_STS_REG_ADDRESS_ERROR\n");
3405 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_TO_NOT_PEND)
3406 BNX2X_ERR("ATC_ATC_INT_STS_REG"
3407 "_ATC_TCPL_TO_NOT_PEND\n");
3408 if (val & ATC_ATC_INT_STS_REG_ATC_GPA_MULTIPLE_HITS)
3409 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3410 "ATC_GPA_MULTIPLE_HITS\n");
3411 if (val & ATC_ATC_INT_STS_REG_ATC_RCPL_TO_EMPTY_CNT)
3412 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3413 "ATC_RCPL_TO_EMPTY_CNT\n");
3414 if (val & ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR)
3415 BNX2X_ERR("ATC_ATC_INT_STS_REG_ATC_TCPL_ERROR\n");
3416 if (val & ATC_ATC_INT_STS_REG_ATC_IREQ_LESS_THAN_STU)
3417 BNX2X_ERR("ATC_ATC_INT_STS_REG_"
3418 "ATC_IREQ_LESS_THAN_STU\n");
3419 }
3420
3421 if (attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3422 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)) {
3423 BNX2X_ERR("FATAL parity attention set4 0x%x\n",
3424 (u32)(attn & (AEU_INPUTS_ATTN_BITS_PGLUE_PARITY_ERROR |
3425 AEU_INPUTS_ATTN_BITS_ATC_PARITY_ERROR)));
3426 }
3427
3428}
3429
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003430static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
3431{
3432 struct attn_route attn, *group_mask;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003433 int port = BP_PORT(bp);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003434 int index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003435 u32 reg_addr;
3436 u32 val;
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003437 u32 aeu_mask;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003438
3439 /* need to take HW lock because MCP or other port might also
3440 try to handle this event */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003441 bnx2x_acquire_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003442
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003443 if (bnx2x_chk_parity_attn(bp)) {
3444 bp->recovery_state = BNX2X_RECOVERY_INIT;
3445 bnx2x_set_reset_in_progress(bp);
3446 schedule_delayed_work(&bp->reset_task, 0);
3447 /* Disable HW interrupts */
3448 bnx2x_int_disable(bp);
3449 bnx2x_release_alr(bp);
3450 /* In case of parity errors don't handle attentions so that
3451 * other function would "see" parity errors.
3452 */
3453 return;
3454 }
3455
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003456 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
3457 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
3458 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
3459 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003460 if (CHIP_IS_E2(bp))
3461 attn.sig[4] =
3462 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4);
3463 else
3464 attn.sig[4] = 0;
3465
3466 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x %08x\n",
3467 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3], attn.sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003468
3469 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
3470 if (deasserted & (1 << index)) {
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003471 group_mask = &bp->attn_group[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003472
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003473 DP(NETIF_MSG_HW, "group[%d]: %08x %08x "
3474 "%08x %08x %08x\n",
3475 index,
3476 group_mask->sig[0], group_mask->sig[1],
3477 group_mask->sig[2], group_mask->sig[3],
3478 group_mask->sig[4]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003479
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003480 bnx2x_attn_int_deasserted4(bp,
3481 attn.sig[4] & group_mask->sig[4]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003482 bnx2x_attn_int_deasserted3(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003483 attn.sig[3] & group_mask->sig[3]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003484 bnx2x_attn_int_deasserted1(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003485 attn.sig[1] & group_mask->sig[1]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003486 bnx2x_attn_int_deasserted2(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003487 attn.sig[2] & group_mask->sig[2]);
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003488 bnx2x_attn_int_deasserted0(bp,
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003489 attn.sig[0] & group_mask->sig[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003490 }
3491 }
3492
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07003493 bnx2x_release_alr(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003494
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003495 if (bp->common.int_block == INT_BLOCK_HC)
3496 reg_addr = (HC_REG_COMMAND_REG + port*32 +
3497 COMMAND_REG_ATTN_BITS_CLR);
3498 else
3499 reg_addr = (BAR_IGU_INTMEM + IGU_CMD_ATTN_BIT_CLR_UPPER*8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003500
3501 val = ~deasserted;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003502 DP(NETIF_MSG_HW, "about to mask 0x%08x at %s addr 0x%x\n", val,
3503 (bp->common.int_block == INT_BLOCK_HC) ? "HC" : "IGU", reg_addr);
Eilon Greenstein5c862842008-08-13 15:51:48 -07003504 REG_WR(bp, reg_addr, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003505
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003506 if (~bp->attn_state & deasserted)
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003507 BNX2X_ERR("IGU ERROR\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003508
3509 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
3510 MISC_REG_AEU_MASK_ATTN_FUNC_0;
3511
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003512 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
3513 aeu_mask = REG_RD(bp, reg_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003514
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003515 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
3516 aeu_mask, deasserted);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00003517 aeu_mask |= (deasserted & 0x3ff);
Eilon Greenstein3fcaf2e2008-08-13 15:50:45 -07003518 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
3519
3520 REG_WR(bp, reg_addr, aeu_mask);
3521 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003522
3523 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
3524 bp->attn_state &= ~deasserted;
3525 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
3526}
3527
3528static void bnx2x_attn_int(struct bnx2x *bp)
3529{
3530 /* read local copy of bits */
Eilon Greenstein68d59482009-01-14 21:27:36 -08003531 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
3532 attn_bits);
3533 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
3534 attn_bits_ack);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003535 u32 attn_state = bp->attn_state;
3536
3537 /* look for changed bits */
3538 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
3539 u32 deasserted = ~attn_bits & attn_ack & attn_state;
3540
3541 DP(NETIF_MSG_HW,
3542 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
3543 attn_bits, attn_ack, asserted, deasserted);
3544
3545 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003546 BNX2X_ERR("BAD attention state\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003547
3548 /* handle bits that were raised */
3549 if (asserted)
3550 bnx2x_attn_int_asserted(bp, asserted);
3551
3552 if (deasserted)
3553 bnx2x_attn_int_deasserted(bp, deasserted);
3554}
3555
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003556static inline void bnx2x_update_eq_prod(struct bnx2x *bp, u16 prod)
3557{
3558 /* No memory barriers */
3559 storm_memset_eq_prod(bp, prod, BP_FUNC(bp));
3560 mmiowb(); /* keep prod updates ordered */
3561}
3562
3563#ifdef BCM_CNIC
3564static int bnx2x_cnic_handle_cfc_del(struct bnx2x *bp, u32 cid,
3565 union event_ring_elem *elem)
3566{
3567 if (!bp->cnic_eth_dev.starting_cid ||
3568 cid < bp->cnic_eth_dev.starting_cid)
3569 return 1;
3570
3571 DP(BNX2X_MSG_SP, "got delete ramrod for CNIC CID %d\n", cid);
3572
3573 if (unlikely(elem->message.data.cfc_del_event.error)) {
3574 BNX2X_ERR("got delete ramrod for CNIC CID %d with error!\n",
3575 cid);
3576 bnx2x_panic_dump(bp);
3577 }
3578 bnx2x_cnic_cfc_comp(bp, cid);
3579 return 0;
3580}
3581#endif
3582
3583static void bnx2x_eq_int(struct bnx2x *bp)
3584{
3585 u16 hw_cons, sw_cons, sw_prod;
3586 union event_ring_elem *elem;
3587 u32 cid;
3588 u8 opcode;
3589 int spqe_cnt = 0;
3590
3591 hw_cons = le16_to_cpu(*bp->eq_cons_sb);
3592
3593 /* The hw_cos range is 1-255, 257 - the sw_cons range is 0-254, 256.
3594 * when we get the the next-page we nned to adjust so the loop
3595 * condition below will be met. The next element is the size of a
3596 * regular element and hence incrementing by 1
3597 */
3598 if ((hw_cons & EQ_DESC_MAX_PAGE) == EQ_DESC_MAX_PAGE)
3599 hw_cons++;
3600
3601 /* This function may never run in parralel with itself for a
3602 * specific bp, thus there is no need in "paired" read memory
3603 * barrier here.
3604 */
3605 sw_cons = bp->eq_cons;
3606 sw_prod = bp->eq_prod;
3607
3608 DP(BNX2X_MSG_SP, "EQ: hw_cons %u sw_cons %u bp->spq_left %u\n",
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003609 hw_cons, sw_cons, atomic_read(&bp->spq_left));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003610
3611 for (; sw_cons != hw_cons;
3612 sw_prod = NEXT_EQ_IDX(sw_prod), sw_cons = NEXT_EQ_IDX(sw_cons)) {
3613
3614
3615 elem = &bp->eq_ring[EQ_DESC(sw_cons)];
3616
3617 cid = SW_CID(elem->message.data.cfc_del_event.cid);
3618 opcode = elem->message.opcode;
3619
3620
3621 /* handle eq element */
3622 switch (opcode) {
3623 case EVENT_RING_OPCODE_STAT_QUERY:
3624 DP(NETIF_MSG_TIMER, "got statistics comp event\n");
3625 /* nothing to do with stats comp */
3626 continue;
3627
3628 case EVENT_RING_OPCODE_CFC_DEL:
3629 /* handle according to cid range */
3630 /*
3631 * we may want to verify here that the bp state is
3632 * HALTING
3633 */
3634 DP(NETIF_MSG_IFDOWN,
3635 "got delete ramrod for MULTI[%d]\n", cid);
3636#ifdef BCM_CNIC
3637 if (!bnx2x_cnic_handle_cfc_del(bp, cid, elem))
3638 goto next_spqe;
3639#endif
3640 bnx2x_fp(bp, cid, state) =
3641 BNX2X_FP_STATE_CLOSED;
3642
3643 goto next_spqe;
3644 }
3645
3646 switch (opcode | bp->state) {
3647 case (EVENT_RING_OPCODE_FUNCTION_START |
3648 BNX2X_STATE_OPENING_WAIT4_PORT):
3649 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
3650 bp->state = BNX2X_STATE_FUNC_STARTED;
3651 break;
3652
3653 case (EVENT_RING_OPCODE_FUNCTION_STOP |
3654 BNX2X_STATE_CLOSING_WAIT4_HALT):
3655 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
3656 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
3657 break;
3658
3659 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_OPEN):
3660 case (EVENT_RING_OPCODE_SET_MAC | BNX2X_STATE_DIAG):
3661 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
3662 bp->set_mac_pending = 0;
3663 break;
3664
3665 case (EVENT_RING_OPCODE_SET_MAC |
3666 BNX2X_STATE_CLOSING_WAIT4_HALT):
3667 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
3668 bp->set_mac_pending = 0;
3669 break;
3670 default:
3671 /* unknown event log error and continue */
3672 BNX2X_ERR("Unknown EQ event %d\n",
3673 elem->message.opcode);
3674 }
3675next_spqe:
3676 spqe_cnt++;
3677 } /* for */
3678
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00003679 smp_mb__before_atomic_inc();
3680 atomic_add(spqe_cnt, &bp->spq_left);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003681
3682 bp->eq_cons = sw_cons;
3683 bp->eq_prod = sw_prod;
3684 /* Make sure that above mem writes were issued towards the memory */
3685 smp_wmb();
3686
3687 /* update producer */
3688 bnx2x_update_eq_prod(bp, bp->eq_prod);
3689}
3690
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003691static void bnx2x_sp_task(struct work_struct *work)
3692{
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003693 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003694 u16 status;
3695
3696 /* Return here if interrupt is disabled */
3697 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003698 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003699 return;
3700 }
3701
3702 status = bnx2x_update_dsb_idx(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003703/* if (status == 0) */
3704/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003705
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003706 DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003707
Eliezer Tamir877e9aa2008-02-28 11:55:53 -08003708 /* HW attentions */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003709 if (status & BNX2X_DEF_SB_ATT_IDX) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003710 bnx2x_attn_int(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003711 status &= ~BNX2X_DEF_SB_ATT_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003712 }
3713
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003714 /* SP events: STAT_QUERY and others */
3715 if (status & BNX2X_DEF_SB_IDX) {
3716
3717 /* Handle EQ completions */
3718 bnx2x_eq_int(bp);
3719
3720 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID,
3721 le16_to_cpu(bp->def_idx), IGU_INT_NOP, 1);
3722
3723 status &= ~BNX2X_DEF_SB_IDX;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00003724 }
3725
3726 if (unlikely(status))
3727 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
3728 status);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003729
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003730 bnx2x_ack_sb(bp, bp->igu_dsb_id, ATTENTION_ID,
3731 le16_to_cpu(bp->def_att_idx), IGU_INT_ENABLE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003732}
3733
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00003734irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003735{
3736 struct net_device *dev = dev_instance;
3737 struct bnx2x *bp = netdev_priv(dev);
3738
3739 /* Return here if interrupt is disabled */
3740 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
Eilon Greenstein3196a882008-08-13 15:58:49 -07003741 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003742 return IRQ_HANDLED;
3743 }
3744
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003745 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0,
3746 IGU_INT_DISABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003747
3748#ifdef BNX2X_STOP_ON_ERROR
3749 if (unlikely(bp->panic))
3750 return IRQ_HANDLED;
3751#endif
3752
Michael Chan993ac7b2009-10-10 13:46:56 +00003753#ifdef BCM_CNIC
3754 {
3755 struct cnic_ops *c_ops;
3756
3757 rcu_read_lock();
3758 c_ops = rcu_dereference(bp->cnic_ops);
3759 if (c_ops)
3760 c_ops->cnic_handler(bp->cnic_data, NULL);
3761 rcu_read_unlock();
3762 }
3763#endif
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08003764 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003765
3766 return IRQ_HANDLED;
3767}
3768
3769/* end of slow path */
3770
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003771static void bnx2x_timer(unsigned long data)
3772{
3773 struct bnx2x *bp = (struct bnx2x *) data;
3774
3775 if (!netif_running(bp->dev))
3776 return;
3777
3778 if (atomic_read(&bp->intr_sem) != 0)
Eliezer Tamirf1410642008-02-28 11:51:50 -08003779 goto timer_restart;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003780
3781 if (poll) {
3782 struct bnx2x_fastpath *fp = &bp->fp[0];
3783 int rc;
3784
Eilon Greenstein7961f792009-03-02 07:59:31 +00003785 bnx2x_tx_int(fp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003786 rc = bnx2x_rx_int(fp, 1000);
3787 }
3788
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003789 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003790 int mb_idx = BP_FW_MB_IDX(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003791 u32 drv_pulse;
3792 u32 mcp_pulse;
3793
3794 ++bp->fw_drv_pulse_wr_seq;
3795 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
3796 /* TBD - add SYSTEM_TIME */
3797 drv_pulse = bp->fw_drv_pulse_wr_seq;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003798 SHMEM_WR(bp, func_mb[mb_idx].drv_pulse_mb, drv_pulse);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003799
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003800 mcp_pulse = (SHMEM_RD(bp, func_mb[mb_idx].mcp_pulse_mb) &
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003801 MCP_PULSE_SEQ_MASK);
3802 /* The delta between driver pulse and mcp response
3803 * should be 1 (before mcp response) or 0 (after mcp response)
3804 */
3805 if ((drv_pulse != mcp_pulse) &&
3806 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
3807 /* someone lost a heartbeat... */
3808 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
3809 drv_pulse, mcp_pulse);
3810 }
3811 }
3812
Eilon Greensteinf34d28e2009-10-15 00:18:08 -07003813 if (bp->state == BNX2X_STATE_OPEN)
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07003814 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003815
Eliezer Tamirf1410642008-02-28 11:51:50 -08003816timer_restart:
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003817 mod_timer(&bp->timer, jiffies + bp->current_interval);
3818}
3819
3820/* end of Statistics */
3821
3822/* nic init */
3823
3824/*
3825 * nic init service functions
3826 */
3827
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003828static inline void bnx2x_fill(struct bnx2x *bp, u32 addr, int fill, u32 len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003829{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003830 u32 i;
3831 if (!(len%4) && !(addr%4))
3832 for (i = 0; i < len; i += 4)
3833 REG_WR(bp, addr + i, fill);
3834 else
3835 for (i = 0; i < len; i++)
3836 REG_WR8(bp, addr + i, fill);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003837
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003838}
3839
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003840/* helper: writes FP SP data to FW - data_size in dwords */
3841static inline void bnx2x_wr_fp_sb_data(struct bnx2x *bp,
3842 int fw_sb_id,
3843 u32 *sb_data_p,
3844 u32 data_size)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003845{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003846 int index;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003847 for (index = 0; index < data_size; index++)
3848 REG_WR(bp, BAR_CSTRORM_INTMEM +
3849 CSTORM_STATUS_BLOCK_DATA_OFFSET(fw_sb_id) +
3850 sizeof(u32)*index,
3851 *(sb_data_p + index));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003852}
3853
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003854static inline void bnx2x_zero_fp_sb(struct bnx2x *bp, int fw_sb_id)
3855{
3856 u32 *sb_data_p;
3857 u32 data_size = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003858 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003859 struct hc_status_block_data_e1x sb_data_e1x;
3860
3861 /* disable the function first */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003862 if (CHIP_IS_E2(bp)) {
3863 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3864 sb_data_e2.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3865 sb_data_e2.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3866 sb_data_e2.common.p_func.vf_valid = false;
3867 sb_data_p = (u32 *)&sb_data_e2;
3868 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3869 } else {
3870 memset(&sb_data_e1x, 0,
3871 sizeof(struct hc_status_block_data_e1x));
3872 sb_data_e1x.common.p_func.pf_id = HC_FUNCTION_DISABLED;
3873 sb_data_e1x.common.p_func.vf_id = HC_FUNCTION_DISABLED;
3874 sb_data_e1x.common.p_func.vf_valid = false;
3875 sb_data_p = (u32 *)&sb_data_e1x;
3876 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3877 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003878 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3879
3880 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3881 CSTORM_STATUS_BLOCK_OFFSET(fw_sb_id), 0,
3882 CSTORM_STATUS_BLOCK_SIZE);
3883 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3884 CSTORM_SYNC_BLOCK_OFFSET(fw_sb_id), 0,
3885 CSTORM_SYNC_BLOCK_SIZE);
3886}
3887
3888/* helper: writes SP SB data to FW */
3889static inline void bnx2x_wr_sp_sb_data(struct bnx2x *bp,
3890 struct hc_sp_status_block_data *sp_sb_data)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003891{
3892 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003893 int i;
3894 for (i = 0; i < sizeof(struct hc_sp_status_block_data)/sizeof(u32); i++)
3895 REG_WR(bp, BAR_CSTRORM_INTMEM +
3896 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
3897 i*sizeof(u32),
3898 *((u32 *)sp_sb_data + i));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003899}
3900
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003901static inline void bnx2x_zero_sp_sb(struct bnx2x *bp)
3902{
3903 int func = BP_FUNC(bp);
3904 struct hc_sp_status_block_data sp_sb_data;
3905 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
3906
3907 sp_sb_data.p_func.pf_id = HC_FUNCTION_DISABLED;
3908 sp_sb_data.p_func.vf_id = HC_FUNCTION_DISABLED;
3909 sp_sb_data.p_func.vf_valid = false;
3910
3911 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
3912
3913 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3914 CSTORM_SP_STATUS_BLOCK_OFFSET(func), 0,
3915 CSTORM_SP_STATUS_BLOCK_SIZE);
3916 bnx2x_fill(bp, BAR_CSTRORM_INTMEM +
3917 CSTORM_SP_SYNC_BLOCK_OFFSET(func), 0,
3918 CSTORM_SP_SYNC_BLOCK_SIZE);
3919
3920}
3921
3922
3923static inline
3924void bnx2x_setup_ndsb_state_machine(struct hc_status_block_sm *hc_sm,
3925 int igu_sb_id, int igu_seg_id)
3926{
3927 hc_sm->igu_sb_id = igu_sb_id;
3928 hc_sm->igu_seg_id = igu_seg_id;
3929 hc_sm->timer_value = 0xFF;
3930 hc_sm->time_to_expire = 0xFFFFFFFF;
3931}
3932
3933void bnx2x_init_sb(struct bnx2x *bp, dma_addr_t mapping, int vfid,
3934 u8 vf_valid, int fw_sb_id, int igu_sb_id)
3935{
3936 int igu_seg_id;
3937
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003938 struct hc_status_block_data_e2 sb_data_e2;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003939 struct hc_status_block_data_e1x sb_data_e1x;
3940 struct hc_status_block_sm *hc_sm_p;
3941 struct hc_index_data *hc_index_p;
3942 int data_size;
3943 u32 *sb_data_p;
3944
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003945 if (CHIP_INT_MODE_IS_BC(bp))
3946 igu_seg_id = HC_SEG_ACCESS_NORM;
3947 else
3948 igu_seg_id = IGU_SEG_ACCESS_NORM;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003949
3950 bnx2x_zero_fp_sb(bp, fw_sb_id);
3951
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00003952 if (CHIP_IS_E2(bp)) {
3953 memset(&sb_data_e2, 0, sizeof(struct hc_status_block_data_e2));
3954 sb_data_e2.common.p_func.pf_id = BP_FUNC(bp);
3955 sb_data_e2.common.p_func.vf_id = vfid;
3956 sb_data_e2.common.p_func.vf_valid = vf_valid;
3957 sb_data_e2.common.p_func.vnic_id = BP_VN(bp);
3958 sb_data_e2.common.same_igu_sb_1b = true;
3959 sb_data_e2.common.host_sb_addr.hi = U64_HI(mapping);
3960 sb_data_e2.common.host_sb_addr.lo = U64_LO(mapping);
3961 hc_sm_p = sb_data_e2.common.state_machine;
3962 hc_index_p = sb_data_e2.index_data;
3963 sb_data_p = (u32 *)&sb_data_e2;
3964 data_size = sizeof(struct hc_status_block_data_e2)/sizeof(u32);
3965 } else {
3966 memset(&sb_data_e1x, 0,
3967 sizeof(struct hc_status_block_data_e1x));
3968 sb_data_e1x.common.p_func.pf_id = BP_FUNC(bp);
3969 sb_data_e1x.common.p_func.vf_id = 0xff;
3970 sb_data_e1x.common.p_func.vf_valid = false;
3971 sb_data_e1x.common.p_func.vnic_id = BP_VN(bp);
3972 sb_data_e1x.common.same_igu_sb_1b = true;
3973 sb_data_e1x.common.host_sb_addr.hi = U64_HI(mapping);
3974 sb_data_e1x.common.host_sb_addr.lo = U64_LO(mapping);
3975 hc_sm_p = sb_data_e1x.common.state_machine;
3976 hc_index_p = sb_data_e1x.index_data;
3977 sb_data_p = (u32 *)&sb_data_e1x;
3978 data_size = sizeof(struct hc_status_block_data_e1x)/sizeof(u32);
3979 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003980
3981 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_RX_ID],
3982 igu_sb_id, igu_seg_id);
3983 bnx2x_setup_ndsb_state_machine(&hc_sm_p[SM_TX_ID],
3984 igu_sb_id, igu_seg_id);
3985
3986 DP(NETIF_MSG_HW, "Init FW SB %d\n", fw_sb_id);
3987
3988 /* write indecies to HW */
3989 bnx2x_wr_fp_sb_data(bp, fw_sb_id, sb_data_p, data_size);
3990}
3991
3992static void bnx2x_update_coalesce_sb_index(struct bnx2x *bp, u16 fw_sb_id,
3993 u8 sb_index, u8 disable, u16 usec)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02003994{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07003995 int port = BP_PORT(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00003996 u8 ticks = usec / BNX2X_BTR;
3997
3998 storm_memset_hc_timeout(bp, port, fw_sb_id, sb_index, ticks);
3999
4000 disable = disable ? 1 : (usec ? 0 : 1);
4001 storm_memset_hc_disable(bp, port, fw_sb_id, sb_index, disable);
4002}
4003
4004static void bnx2x_update_coalesce_sb(struct bnx2x *bp, u16 fw_sb_id,
4005 u16 tx_usec, u16 rx_usec)
4006{
4007 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, U_SB_ETH_RX_CQ_INDEX,
4008 false, rx_usec);
4009 bnx2x_update_coalesce_sb_index(bp, fw_sb_id, C_SB_ETH_TX_CQ_INDEX,
4010 false, tx_usec);
4011}
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004012
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004013static void bnx2x_init_def_sb(struct bnx2x *bp)
4014{
4015 struct host_sp_status_block *def_sb = bp->def_status_blk;
4016 dma_addr_t mapping = bp->def_status_blk_mapping;
4017 int igu_sp_sb_index;
4018 int igu_seg_id;
4019 int port = BP_PORT(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004020 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004021 int reg_offset;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004022 u64 section;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004023 int index;
4024 struct hc_sp_status_block_data sp_sb_data;
4025 memset(&sp_sb_data, 0, sizeof(struct hc_sp_status_block_data));
4026
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004027 if (CHIP_INT_MODE_IS_BC(bp)) {
4028 igu_sp_sb_index = DEF_SB_IGU_ID;
4029 igu_seg_id = HC_SEG_ACCESS_DEF;
4030 } else {
4031 igu_sp_sb_index = bp->igu_dsb_id;
4032 igu_seg_id = IGU_SEG_ACCESS_DEF;
4033 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004034
4035 /* ATTN */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004036 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004037 atten_status_block);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004038 def_sb->atten_status_block.status_block_id = igu_sp_sb_index;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004039
Eliezer Tamir49d66772008-02-28 11:53:13 -08004040 bp->attn_state = 0;
4041
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004042 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4043 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004044 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004045 int sindex;
4046 /* take care of sig[0]..sig[4] */
4047 for (sindex = 0; sindex < 4; sindex++)
4048 bp->attn_group[index].sig[sindex] =
4049 REG_RD(bp, reg_offset + sindex*0x4 + 0x10*index);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004050
4051 if (CHIP_IS_E2(bp))
4052 /*
4053 * enable5 is separate from the rest of the registers,
4054 * and therefore the address skip is 4
4055 * and not 16 between the different groups
4056 */
4057 bp->attn_group[index].sig[4] = REG_RD(bp,
4058 reg_offset + 0x10 + 0x4*index);
4059 else
4060 bp->attn_group[index].sig[4] = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004061 }
4062
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004063 if (bp->common.int_block == INT_BLOCK_HC) {
4064 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4065 HC_REG_ATTN_MSG0_ADDR_L);
4066
4067 REG_WR(bp, reg_offset, U64_LO(section));
4068 REG_WR(bp, reg_offset + 4, U64_HI(section));
4069 } else if (CHIP_IS_E2(bp)) {
4070 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_L, U64_LO(section));
4071 REG_WR(bp, IGU_REG_ATTN_MSG_ADDR_H, U64_HI(section));
4072 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004073
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004074 section = ((u64)mapping) + offsetof(struct host_sp_status_block,
4075 sp_sb);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004076
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004077 bnx2x_zero_sp_sb(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004078
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004079 sp_sb_data.host_sb_addr.lo = U64_LO(section);
4080 sp_sb_data.host_sb_addr.hi = U64_HI(section);
4081 sp_sb_data.igu_sb_id = igu_sp_sb_index;
4082 sp_sb_data.igu_seg_id = igu_seg_id;
4083 sp_sb_data.p_func.pf_id = func;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004084 sp_sb_data.p_func.vnic_id = BP_VN(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004085 sp_sb_data.p_func.vf_id = 0xff;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004086
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004087 bnx2x_wr_sp_sb_data(bp, &sp_sb_data);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004088
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004089 bp->stats_pending = 0;
Yitchak Gertner66e855f2008-08-13 15:49:05 -07004090 bp->set_mac_pending = 0;
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07004091
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004092 bnx2x_ack_sb(bp, bp->igu_dsb_id, USTORM_ID, 0, IGU_INT_ENABLE, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004093}
4094
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004095void bnx2x_update_coalesce(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004096{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004097 int i;
4098
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004099 for_each_queue(bp, i)
4100 bnx2x_update_coalesce_sb(bp, bp->fp[i].fw_sb_id,
4101 bp->rx_ticks, bp->tx_ticks);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004102}
4103
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004104static void bnx2x_init_sp_ring(struct bnx2x *bp)
4105{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004106 spin_lock_init(&bp->spq_lock);
Dmitry Kravkov8fe23fb2010-10-06 03:27:41 +00004107 atomic_set(&bp->spq_left, MAX_SPQ_PENDING);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004108
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004109 bp->spq_prod_idx = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004110 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4111 bp->spq_prod_bd = bp->spq;
4112 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004113}
4114
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004115static void bnx2x_init_eq_ring(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004116{
4117 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004118 for (i = 1; i <= NUM_EQ_PAGES; i++) {
4119 union event_ring_elem *elem =
4120 &bp->eq_ring[EQ_DESC_CNT_PAGE * i - 1];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004121
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004122 elem->next_page.addr.hi =
4123 cpu_to_le32(U64_HI(bp->eq_mapping +
4124 BCM_PAGE_SIZE * (i % NUM_EQ_PAGES)));
4125 elem->next_page.addr.lo =
4126 cpu_to_le32(U64_LO(bp->eq_mapping +
4127 BCM_PAGE_SIZE*(i % NUM_EQ_PAGES)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004128 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004129 bp->eq_cons = 0;
4130 bp->eq_prod = NUM_EQ_DESC;
4131 bp->eq_cons_sb = BNX2X_EQ_INDEX;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004132}
4133
4134static void bnx2x_init_ind_table(struct bnx2x *bp)
4135{
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004136 int func = BP_FUNC(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004137 int i;
4138
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004139 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004140 return;
4141
Eilon Greenstein555f6c72009-02-12 08:36:11 +00004142 DP(NETIF_MSG_IFUP,
4143 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004144 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004145 REG_WR8(bp, BAR_TSTRORM_INTMEM +
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08004146 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00004147 bp->fp->cl_id + (i % bp->num_queues));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004148}
4149
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004150void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004151{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004152 int mode = bp->rx_mode;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004153 u16 cl_id;
4154
Eilon Greenstein581ce432009-07-29 00:20:04 +00004155 /* All but management unicast packets should pass to the host as well */
4156 u32 llh_mask =
4157 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4158 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4159 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4160 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004161
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004162 switch (mode) {
4163 case BNX2X_RX_MODE_NONE: /* no Rx */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004164 cl_id = BP_L_ID(bp);
4165 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_ACCEPT_NONE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004166 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004167
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004168 case BNX2X_RX_MODE_NORMAL:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004169 cl_id = BP_L_ID(bp);
4170 bnx2x_rxq_set_mac_filters(bp, cl_id,
4171 BNX2X_ACCEPT_UNICAST |
4172 BNX2X_ACCEPT_BROADCAST |
4173 BNX2X_ACCEPT_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004174 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004175
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004176 case BNX2X_RX_MODE_ALLMULTI:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004177 cl_id = BP_L_ID(bp);
4178 bnx2x_rxq_set_mac_filters(bp, cl_id,
4179 BNX2X_ACCEPT_UNICAST |
4180 BNX2X_ACCEPT_BROADCAST |
4181 BNX2X_ACCEPT_ALL_MULTICAST);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004182 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004183
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004184 case BNX2X_RX_MODE_PROMISC:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004185 cl_id = BP_L_ID(bp);
4186 bnx2x_rxq_set_mac_filters(bp, cl_id, BNX2X_PROMISCUOUS_MODE);
4187
Eilon Greenstein581ce432009-07-29 00:20:04 +00004188 /* pass management unicast packets as well */
4189 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004190 break;
Eilon Greenstein356e2382009-02-12 08:38:32 +00004191
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004192 default:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004193 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4194 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004195 }
4196
Eilon Greenstein581ce432009-07-29 00:20:04 +00004197 REG_WR(bp,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004198 BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
4199 NIG_REG_LLH0_BRB1_DRV_MASK,
Eilon Greenstein581ce432009-07-29 00:20:04 +00004200 llh_mask);
4201
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004202 DP(NETIF_MSG_IFUP, "rx mode %d\n"
4203 "drop_ucast 0x%x\ndrop_mcast 0x%x\ndrop_bcast 0x%x\n"
4204 "accp_ucast 0x%x\naccp_mcast 0x%x\naccp_bcast 0x%x\n", mode,
4205 bp->mac_filters.ucast_drop_all,
4206 bp->mac_filters.mcast_drop_all,
4207 bp->mac_filters.bcast_drop_all,
4208 bp->mac_filters.ucast_accept_all,
4209 bp->mac_filters.mcast_accept_all,
4210 bp->mac_filters.bcast_accept_all
4211 );
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004212
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004213 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004214}
4215
Eilon Greenstein471de712008-08-13 15:49:35 -07004216static void bnx2x_init_internal_common(struct bnx2x *bp)
4217{
4218 int i;
4219
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004220 if (!CHIP_IS_E1(bp)) {
4221
4222 /* xstorm needs to know whether to add ovlan to packets or not,
4223 * in switch-independent we'll write 0 to here... */
4224 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004225 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004226 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004227 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004228 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004229 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004230 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004231 bp->mf_mode);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004232 }
4233
Eilon Greenstein471de712008-08-13 15:49:35 -07004234 /* Zero this manually as its initialization is
4235 currently missing in the initTool */
4236 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4237 REG_WR(bp, BAR_USTRORM_INTMEM +
4238 USTORM_AGG_DATA_OFFSET + i * 4, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004239 if (CHIP_IS_E2(bp)) {
4240 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_IGU_MODE_OFFSET,
4241 CHIP_INT_MODE_IS_BC(bp) ?
4242 HC_IGU_BC_MODE : HC_IGU_NBC_MODE);
4243 }
Eilon Greenstein471de712008-08-13 15:49:35 -07004244}
4245
4246static void bnx2x_init_internal_port(struct bnx2x *bp)
4247{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004248 /* port */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004249}
4250
Eilon Greenstein471de712008-08-13 15:49:35 -07004251static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4252{
4253 switch (load_code) {
4254 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004255 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Eilon Greenstein471de712008-08-13 15:49:35 -07004256 bnx2x_init_internal_common(bp);
4257 /* no break */
4258
4259 case FW_MSG_CODE_DRV_LOAD_PORT:
4260 bnx2x_init_internal_port(bp);
4261 /* no break */
4262
4263 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004264 /* internal memory per function is
4265 initialized inside bnx2x_pf_init */
Eilon Greenstein471de712008-08-13 15:49:35 -07004266 break;
4267
4268 default:
4269 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4270 break;
4271 }
4272}
4273
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004274static void bnx2x_init_fp_sb(struct bnx2x *bp, int fp_idx)
4275{
4276 struct bnx2x_fastpath *fp = &bp->fp[fp_idx];
4277
4278 fp->state = BNX2X_FP_STATE_CLOSED;
4279
4280 fp->index = fp->cid = fp_idx;
4281 fp->cl_id = BP_L_ID(bp) + fp_idx;
4282 fp->fw_sb_id = bp->base_fw_ndsb + fp->cl_id + CNIC_CONTEXT_USE;
4283 fp->igu_sb_id = bp->igu_base_sb + fp_idx + CNIC_CONTEXT_USE;
4284 /* qZone id equals to FW (per path) client id */
4285 fp->cl_qzone_id = fp->cl_id +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004286 BP_PORT(bp)*(CHIP_IS_E2(bp) ? ETH_MAX_RX_CLIENTS_E2 :
4287 ETH_MAX_RX_CLIENTS_E1H);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004288 /* init shortcut */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004289 fp->ustorm_rx_prods_offset = CHIP_IS_E2(bp) ?
4290 USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id) :
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004291 USTORM_RX_PRODS_E1X_OFFSET(BP_PORT(bp), fp->cl_id);
4292 /* Setup SB indicies */
4293 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4294 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4295
4296 DP(NETIF_MSG_IFUP, "queue[%d]: bnx2x_init_sb(%p,%p) "
4297 "cl_id %d fw_sb %d igu_sb %d\n",
4298 fp_idx, bp, fp->status_blk.e1x_sb, fp->cl_id, fp->fw_sb_id,
4299 fp->igu_sb_id);
4300 bnx2x_init_sb(bp, fp->status_blk_mapping, BNX2X_VF_ID_INVALID, false,
4301 fp->fw_sb_id, fp->igu_sb_id);
4302
4303 bnx2x_update_fpsb_idx(fp);
4304}
4305
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00004306void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004307{
4308 int i;
4309
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004310 for_each_queue(bp, i)
4311 bnx2x_init_fp_sb(bp, i);
Michael Chan37b091b2009-10-10 13:46:55 +00004312#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004313
4314 bnx2x_init_sb(bp, bp->cnic_sb_mapping,
4315 BNX2X_VF_ID_INVALID, false,
4316 CNIC_SB_ID(bp), CNIC_IGU_SB_ID(bp));
4317
Michael Chan37b091b2009-10-10 13:46:55 +00004318#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004319
Eilon Greenstein16119782009-03-02 07:59:27 +00004320 /* ensure status block indices were read */
4321 rmb();
4322
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004323 bnx2x_init_def_sb(bp);
Eilon Greenstein5c862842008-08-13 15:51:48 -07004324 bnx2x_update_dsb_idx(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004325 bnx2x_init_rx_rings(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004326 bnx2x_init_tx_rings(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004327 bnx2x_init_sp_ring(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004328 bnx2x_init_eq_ring(bp);
Eilon Greenstein471de712008-08-13 15:49:35 -07004329 bnx2x_init_internal(bp, load_code);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004330 bnx2x_pf_init(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004331 bnx2x_init_ind_table(bp);
Eilon Greenstein0ef00452009-01-14 21:31:08 -08004332 bnx2x_stats_init(bp);
4333
4334 /* At this point, we are ready for interrupts */
4335 atomic_set(&bp->intr_sem, 0);
4336
4337 /* flush all before enabling interrupts */
4338 mb();
4339 mmiowb();
4340
Eliezer Tamir615f8fd2008-02-28 11:54:54 -08004341 bnx2x_int_enable(bp);
Eilon Greensteineb8da202009-07-21 05:47:30 +00004342
4343 /* Check for SPIO5 */
4344 bnx2x_attn_int_deasserted0(bp,
4345 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
4346 AEU_INPUTS_ATTN_BITS_SPIO5);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004347}
4348
4349/* end of nic init */
4350
4351/*
4352 * gzip service functions
4353 */
4354
4355static int bnx2x_gunzip_init(struct bnx2x *bp)
4356{
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004357 bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
4358 &bp->gunzip_mapping, GFP_KERNEL);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004359 if (bp->gunzip_buf == NULL)
4360 goto gunzip_nomem1;
4361
4362 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
4363 if (bp->strm == NULL)
4364 goto gunzip_nomem2;
4365
4366 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
4367 GFP_KERNEL);
4368 if (bp->strm->workspace == NULL)
4369 goto gunzip_nomem3;
4370
4371 return 0;
4372
4373gunzip_nomem3:
4374 kfree(bp->strm);
4375 bp->strm = NULL;
4376
4377gunzip_nomem2:
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004378 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4379 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004380 bp->gunzip_buf = NULL;
4381
4382gunzip_nomem1:
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004383 netdev_err(bp->dev, "Cannot allocate firmware buffer for"
4384 " un-compression\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004385 return -ENOMEM;
4386}
4387
4388static void bnx2x_gunzip_end(struct bnx2x *bp)
4389{
4390 kfree(bp->strm->workspace);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004391 kfree(bp->strm);
4392 bp->strm = NULL;
4393
4394 if (bp->gunzip_buf) {
FUJITA Tomonori1a983142010-04-04 01:51:03 +00004395 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
4396 bp->gunzip_mapping);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004397 bp->gunzip_buf = NULL;
4398 }
4399}
4400
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004401static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004402{
4403 int n, rc;
4404
4405 /* check gzip header */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004406 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
4407 BNX2X_ERR("Bad gzip header\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004408 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004409 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004410
4411 n = 10;
4412
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004413#define FNAME 0x8
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004414
4415 if (zbuf[3] & FNAME)
4416 while ((zbuf[n++] != 0) && (n < len));
4417
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004418 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004419 bp->strm->avail_in = len - n;
4420 bp->strm->next_out = bp->gunzip_buf;
4421 bp->strm->avail_out = FW_BUF_SIZE;
4422
4423 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
4424 if (rc != Z_OK)
4425 return rc;
4426
4427 rc = zlib_inflate(bp->strm, Z_FINISH);
4428 if ((rc != Z_OK) && (rc != Z_STREAM_END))
Joe Perches7995c642010-02-17 15:01:52 +00004429 netdev_err(bp->dev, "Firmware decompression error: %s\n",
4430 bp->strm->msg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004431
4432 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
4433 if (bp->gunzip_outlen & 0x3)
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004434 netdev_err(bp->dev, "Firmware decompression error:"
4435 " gunzip_outlen (%d) not aligned\n",
4436 bp->gunzip_outlen);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004437 bp->gunzip_outlen >>= 2;
4438
4439 zlib_inflateEnd(bp->strm);
4440
4441 if (rc == Z_STREAM_END)
4442 return 0;
4443
4444 return rc;
4445}
4446
4447/* nic load/unload */
4448
4449/*
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004450 * General service functions
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004451 */
4452
4453/* send a NIG loopback debug packet */
4454static void bnx2x_lb_pckt(struct bnx2x *bp)
4455{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004456 u32 wb_write[3];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004457
4458 /* Ethernet source and destination addresses */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004459 wb_write[0] = 0x55555555;
4460 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004461 wb_write[2] = 0x20; /* SOP */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004462 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004463
4464 /* NON-IP protocol */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004465 wb_write[0] = 0x09000000;
4466 wb_write[1] = 0x55555555;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004467 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004468 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004469}
4470
4471/* some of the internal memories
4472 * are not directly readable from the driver
4473 * to test them we send debug packets
4474 */
4475static int bnx2x_int_mem_test(struct bnx2x *bp)
4476{
4477 int factor;
4478 int count, i;
4479 u32 val = 0;
4480
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004481 if (CHIP_REV_IS_FPGA(bp))
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004482 factor = 120;
Eilon Greensteinad8d3942008-06-23 20:29:02 -07004483 else if (CHIP_REV_IS_EMUL(bp))
4484 factor = 200;
4485 else
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004486 factor = 1;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004487
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004488 /* Disable inputs of parser neighbor blocks */
4489 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4490 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4491 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004492 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004493
4494 /* Write 0 to parser credits for CFC search request */
4495 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4496
4497 /* send Ethernet packet */
4498 bnx2x_lb_pckt(bp);
4499
4500 /* TODO do i reset NIG statistic? */
4501 /* Wait until NIG register shows 1 packet of size 0x10 */
4502 count = 1000 * factor;
4503 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004504
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004505 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4506 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004507 if (val == 0x10)
4508 break;
4509
4510 msleep(10);
4511 count--;
4512 }
4513 if (val != 0x10) {
4514 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4515 return -1;
4516 }
4517
4518 /* Wait until PRS register shows 1 packet */
4519 count = 1000 * factor;
4520 while (count) {
4521 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004522 if (val == 1)
4523 break;
4524
4525 msleep(10);
4526 count--;
4527 }
4528 if (val != 0x1) {
4529 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4530 return -2;
4531 }
4532
4533 /* Reset and init BRB, PRS */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004534 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004535 msleep(50);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004536 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004537 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004538 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4539 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004540
4541 DP(NETIF_MSG_HW, "part2\n");
4542
4543 /* Disable inputs of parser neighbor blocks */
4544 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
4545 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
4546 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004547 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004548
4549 /* Write 0 to parser credits for CFC search request */
4550 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
4551
4552 /* send 10 Ethernet packets */
4553 for (i = 0; i < 10; i++)
4554 bnx2x_lb_pckt(bp);
4555
4556 /* Wait until NIG register shows 10 + 1
4557 packets of size 11*0x10 = 0xb0 */
4558 count = 1000 * factor;
4559 while (count) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004560
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004561 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4562 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004563 if (val == 0xb0)
4564 break;
4565
4566 msleep(10);
4567 count--;
4568 }
4569 if (val != 0xb0) {
4570 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
4571 return -3;
4572 }
4573
4574 /* Wait until PRS register shows 2 packets */
4575 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4576 if (val != 2)
4577 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4578
4579 /* Write 1 to parser credits for CFC search request */
4580 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
4581
4582 /* Wait until PRS register shows 3 packets */
4583 msleep(10 * factor);
4584 /* Wait until NIG register shows 1 packet of size 0x10 */
4585 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
4586 if (val != 3)
4587 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
4588
4589 /* clear NIG EOP FIFO */
4590 for (i = 0; i < 11; i++)
4591 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
4592 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
4593 if (val != 1) {
4594 BNX2X_ERR("clear of NIG failed\n");
4595 return -4;
4596 }
4597
4598 /* Reset and init BRB, PRS, NIG */
4599 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
4600 msleep(50);
4601 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
4602 msleep(50);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004603 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4604 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00004605#ifndef BCM_CNIC
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004606 /* set NIC mode */
4607 REG_WR(bp, PRS_REG_NIC_MODE, 1);
4608#endif
4609
4610 /* Enable inputs of parser neighbor blocks */
4611 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
4612 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
4613 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
Eilon Greenstein3196a882008-08-13 15:58:49 -07004614 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004615
4616 DP(NETIF_MSG_HW, "done\n");
4617
4618 return 0; /* OK */
4619}
4620
4621static void enable_blocks_attention(struct bnx2x *bp)
4622{
4623 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004624 if (CHIP_IS_E2(bp))
4625 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0x40);
4626 else
4627 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004628 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4629 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004630 /*
4631 * mask read length error interrupts in brb for parser
4632 * (parsing unit and 'checksum and crc' unit)
4633 * these errors are legal (PU reads fixed length and CAC can cause
4634 * read length error on truncated packets)
4635 */
4636 REG_WR(bp, BRB1_REG_BRB1_INT_MASK, 0xFC00);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004637 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
4638 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
4639 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
4640 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
4641 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004642/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
4643/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004644 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
4645 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
4646 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004647/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
4648/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004649 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
4650 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
4651 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
4652 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004653/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
4654/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004655
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004656 if (CHIP_REV_IS_FPGA(bp))
4657 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004658 else if (CHIP_IS_E2(bp))
4659 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0,
4660 (PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_OF
4661 | PXP2_PXP2_INT_MASK_0_REG_PGL_CPL_AFT
4662 | PXP2_PXP2_INT_MASK_0_REG_PGL_PCIE_ATTN
4663 | PXP2_PXP2_INT_MASK_0_REG_PGL_READ_BLOCKED
4664 | PXP2_PXP2_INT_MASK_0_REG_PGL_WRITE_BLOCKED));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004665 else
4666 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004667 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
4668 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
4669 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004670/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
4671/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004672 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
4673 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004674/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
4675 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004676}
4677
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004678static const struct {
4679 u32 addr;
4680 u32 mask;
4681} bnx2x_parity_mask[] = {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004682 {PXP_REG_PXP_PRTY_MASK, 0x3ffffff},
4683 {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
4684 {PXP2_REG_PXP2_PRTY_MASK_1, 0x7f},
4685 {HC_REG_HC_PRTY_MASK, 0x7},
4686 {MISC_REG_MISC_PRTY_MASK, 0x1},
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004687 {QM_REG_QM_PRTY_MASK, 0x0},
4688 {DORQ_REG_DORQ_PRTY_MASK, 0x0},
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004689 {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
4690 {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00004691 {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
4692 {CDU_REG_CDU_PRTY_MASK, 0x0},
4693 {CFC_REG_CFC_PRTY_MASK, 0x0},
4694 {DBG_REG_DBG_PRTY_MASK, 0x0},
4695 {DMAE_REG_DMAE_PRTY_MASK, 0x0},
4696 {BRB1_REG_BRB1_PRTY_MASK, 0x0},
4697 {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
4698 {TSDM_REG_TSDM_PRTY_MASK, 0x18}, /* bit 3,4 */
4699 {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
4700 {USDM_REG_USDM_PRTY_MASK, 0x38}, /* bit 3,4,5 */
4701 {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
4702 {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
4703 {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
4704 {USEM_REG_USEM_PRTY_MASK_0, 0x0},
4705 {USEM_REG_USEM_PRTY_MASK_1, 0x0},
4706 {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
4707 {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
4708 {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
4709 {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004710};
4711
4712static void enable_blocks_parity(struct bnx2x *bp)
4713{
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004714 int i;
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004715
Nikitas Angelinascbd9da72010-09-08 11:20:37 +00004716 for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00004717 REG_WR(bp, bnx2x_parity_mask[i].addr,
4718 bnx2x_parity_mask[i].mask);
4719}
4720
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004721
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004722static void bnx2x_reset_common(struct bnx2x *bp)
4723{
4724 /* reset_common */
4725 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4726 0xd3ffff7f);
4727 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
4728}
4729
Eilon Greenstein573f2032009-08-12 08:24:14 +00004730static void bnx2x_init_pxp(struct bnx2x *bp)
4731{
4732 u16 devctl;
4733 int r_order, w_order;
4734
4735 pci_read_config_word(bp->pdev,
4736 bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
4737 DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
4738 w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
4739 if (bp->mrrs == -1)
4740 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
4741 else {
4742 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
4743 r_order = bp->mrrs;
4744 }
4745
4746 bnx2x_init_pxp_arb(bp, r_order, w_order);
4747}
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004748
4749static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
4750{
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004751 int is_required;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004752 u32 val;
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004753 int port;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004754
Vladislav Zolotarov2145a922010-04-19 01:13:49 +00004755 if (BP_NOMCP(bp))
4756 return;
4757
4758 is_required = 0;
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004759 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
4760 SHARED_HW_CFG_FAN_FAILURE_MASK;
4761
4762 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
4763 is_required = 1;
4764
4765 /*
4766 * The fan failure mechanism is usually related to the PHY type since
4767 * the power consumption of the board is affected by the PHY. Currently,
4768 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
4769 */
4770 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
4771 for (port = PORT_0; port < PORT_MAX; port++) {
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004772 is_required |=
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004773 bnx2x_fan_failure_det_req(
4774 bp,
4775 bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00004776 bp->common.shmem2_base,
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00004777 port);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004778 }
4779
4780 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
4781
4782 if (is_required == 0)
4783 return;
4784
4785 /* Fan failure is indicated by SPIO 5 */
4786 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
4787 MISC_REGISTERS_SPIO_INPUT_HI_Z);
4788
4789 /* set to active low mode */
4790 val = REG_RD(bp, MISC_REG_SPIO_INT);
4791 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00004792 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00004793 REG_WR(bp, MISC_REG_SPIO_INT, val);
4794
4795 /* enable interrupt to signal the IGU */
4796 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
4797 val |= (1 << MISC_REGISTERS_SPIO_5);
4798 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
4799}
4800
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004801static void bnx2x_pretend_func(struct bnx2x *bp, u8 pretend_func_num)
4802{
4803 u32 offset = 0;
4804
4805 if (CHIP_IS_E1(bp))
4806 return;
4807 if (CHIP_IS_E1H(bp) && (pretend_func_num >= E1H_FUNC_MAX))
4808 return;
4809
4810 switch (BP_ABS_FUNC(bp)) {
4811 case 0:
4812 offset = PXP2_REG_PGL_PRETEND_FUNC_F0;
4813 break;
4814 case 1:
4815 offset = PXP2_REG_PGL_PRETEND_FUNC_F1;
4816 break;
4817 case 2:
4818 offset = PXP2_REG_PGL_PRETEND_FUNC_F2;
4819 break;
4820 case 3:
4821 offset = PXP2_REG_PGL_PRETEND_FUNC_F3;
4822 break;
4823 case 4:
4824 offset = PXP2_REG_PGL_PRETEND_FUNC_F4;
4825 break;
4826 case 5:
4827 offset = PXP2_REG_PGL_PRETEND_FUNC_F5;
4828 break;
4829 case 6:
4830 offset = PXP2_REG_PGL_PRETEND_FUNC_F6;
4831 break;
4832 case 7:
4833 offset = PXP2_REG_PGL_PRETEND_FUNC_F7;
4834 break;
4835 default:
4836 return;
4837 }
4838
4839 REG_WR(bp, offset, pretend_func_num);
4840 REG_RD(bp, offset);
4841 DP(NETIF_MSG_HW, "Pretending to func %d\n", pretend_func_num);
4842}
4843
4844static void bnx2x_pf_disable(struct bnx2x *bp)
4845{
4846 u32 val = REG_RD(bp, IGU_REG_PF_CONFIGURATION);
4847 val &= ~IGU_PF_CONF_FUNC_EN;
4848
4849 REG_WR(bp, IGU_REG_PF_CONFIGURATION, val);
4850 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 0);
4851 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 0);
4852}
4853
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004854static int bnx2x_init_hw_common(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004855{
4856 u32 val, i;
4857
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004858 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_ABS_FUNC(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004859
Eilon Greenstein81f75bb2009-01-22 03:37:31 +00004860 bnx2x_reset_common(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004861 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
4862 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
4863
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004864 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004865 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00004866 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004867
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004868 if (CHIP_IS_E2(bp)) {
4869 u8 fid;
4870
4871 /**
4872 * 4-port mode or 2-port mode we need to turn of master-enable
4873 * for everyone, after that, turn it back on for self.
4874 * so, we disregard multi-function or not, and always disable
4875 * for all functions on the given path, this means 0,2,4,6 for
4876 * path 0 and 1,3,5,7 for path 1
4877 */
4878 for (fid = BP_PATH(bp); fid < E2_FUNC_MAX*2; fid += 2) {
4879 if (fid == BP_ABS_FUNC(bp)) {
4880 REG_WR(bp,
4881 PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER,
4882 1);
4883 continue;
4884 }
4885
4886 bnx2x_pretend_func(bp, fid);
4887 /* clear pf enable */
4888 bnx2x_pf_disable(bp);
4889 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4890 }
4891 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004892
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004893 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004894 if (CHIP_IS_E1(bp)) {
4895 /* enable HW interrupt from PXP on USDM overflow
4896 bit 16 on INT_MASK_0 */
4897 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004898 }
4899
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004900 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004901 bnx2x_init_pxp(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004902
4903#ifdef __BIG_ENDIAN
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004904 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
4905 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
4906 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
4907 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
4908 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
Eilon Greenstein8badd272009-02-12 08:36:15 +00004909 /* make sure this value is 0 */
4910 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004911
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004912/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
4913 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
4914 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
4915 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
4916 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004917#endif
4918
Dmitry Kravkov523224a2010-10-06 03:23:26 +00004919 bnx2x_ilt_init_page_size(bp, INITOP_SET);
4920
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004921 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
4922 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004923
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004924 /* let the HW do it's magic ... */
4925 msleep(100);
4926 /* finish PXP init */
4927 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
4928 if (val != 1) {
4929 BNX2X_ERR("PXP2 CFG failed\n");
4930 return -EBUSY;
4931 }
4932 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
4933 if (val != 1) {
4934 BNX2X_ERR("PXP2 RD_INIT failed\n");
4935 return -EBUSY;
4936 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004937
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004938 /* Timers bug workaround E2 only. We need to set the entire ILT to
4939 * have entries with value "0" and valid bit on.
4940 * This needs to be done by the first PF that is loaded in a path
4941 * (i.e. common phase)
4942 */
4943 if (CHIP_IS_E2(bp)) {
4944 struct ilt_client_info ilt_cli;
4945 struct bnx2x_ilt ilt;
4946 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
4947 memset(&ilt, 0, sizeof(struct bnx2x_ilt));
4948
4949 /* initalize dummy TM client */
4950 ilt_cli.start = 0;
4951 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
4952 ilt_cli.client_num = ILT_CLIENT_TM;
4953
4954 /* Step 1: set zeroes to all ilt page entries with valid bit on
4955 * Step 2: set the timers first/last ilt entry to point
4956 * to the entire range to prevent ILT range error for 3rd/4th
4957 * vnic (this code assumes existance of the vnic)
4958 *
4959 * both steps performed by call to bnx2x_ilt_client_init_op()
4960 * with dummy TM client
4961 *
4962 * we must use pretend since PXP2_REG_RQ_##blk##_FIRST_ILT
4963 * and his brother are split registers
4964 */
4965 bnx2x_pretend_func(bp, (BP_PATH(bp) + 6));
4966 bnx2x_ilt_client_init_op_ilt(bp, &ilt, &ilt_cli, INITOP_CLEAR);
4967 bnx2x_pretend_func(bp, BP_ABS_FUNC(bp));
4968
4969 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN, BNX2X_PXP_DRAM_ALIGN);
4970 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_RD, BNX2X_PXP_DRAM_ALIGN);
4971 REG_WR(bp, PXP2_REG_RQ_DRAM_ALIGN_SEL, 1);
4972 }
4973
4974
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004975 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
4976 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004977
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00004978 if (CHIP_IS_E2(bp)) {
4979 int factor = CHIP_REV_IS_EMUL(bp) ? 1000 :
4980 (CHIP_REV_IS_FPGA(bp) ? 400 : 0);
4981 bnx2x_init_block(bp, PGLUE_B_BLOCK, COMMON_STAGE);
4982
4983 bnx2x_init_block(bp, ATC_BLOCK, COMMON_STAGE);
4984
4985 /* let the HW do it's magic ... */
4986 do {
4987 msleep(200);
4988 val = REG_RD(bp, ATC_REG_ATC_INIT_DONE);
4989 } while (factor-- && (val != 1));
4990
4991 if (val != 1) {
4992 BNX2X_ERR("ATC_INIT failed\n");
4993 return -EBUSY;
4994 }
4995 }
4996
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07004997 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02004998
Eilon Greenstein34f80b02008-06-23 20:33:01 -07004999 /* clean the DMAE memory */
5000 bp->dmae_ready = 1;
5001 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005002
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005003 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5004 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5005 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5006 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005007
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005008 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5009 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5010 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5011 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5012
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005013 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005014
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005015 if (CHIP_MODE_IS_4_PORT(bp))
5016 bnx2x_init_block(bp, QM_4PORT_BLOCK, COMMON_STAGE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005017
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005018 /* QM queues pointers table */
5019 bnx2x_qm_init_ptr_table(bp, bp->qm_cid_count, INITOP_SET);
Michael Chan37b091b2009-10-10 13:46:55 +00005020
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005021 /* soft reset pulse */
5022 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5023 REG_WR(bp, QM_REG_SOFT_RESET, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005024
Michael Chan37b091b2009-10-10 13:46:55 +00005025#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005026 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005027#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005028
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005029 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005030 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BNX2X_DB_SHIFT);
5031
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005032 if (!CHIP_REV_IS_SLOW(bp)) {
5033 /* enable hw interrupt from doorbell Q */
5034 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5035 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005036
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005037 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005038 if (CHIP_MODE_IS_4_PORT(bp)) {
5039 REG_WR(bp, BRB1_REG_FULL_LB_XOFF_THRESHOLD, 248);
5040 REG_WR(bp, BRB1_REG_FULL_LB_XON_THRESHOLD, 328);
5041 }
5042
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005043 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
Eilon Greenstein26c8fa42009-01-14 21:29:55 -08005044 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
Michael Chan37b091b2009-10-10 13:46:55 +00005045#ifndef BCM_CNIC
Eilon Greenstein3196a882008-08-13 15:58:49 -07005046 /* set NIC mode */
5047 REG_WR(bp, PRS_REG_NIC_MODE, 1);
Michael Chan37b091b2009-10-10 13:46:55 +00005048#endif
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005049 if (!CHIP_IS_E1(bp))
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005050 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_MF(bp));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005051
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005052 if (CHIP_IS_E2(bp)) {
5053 /* Bit-map indicating which L2 hdrs may appear after the
5054 basic Ethernet header */
5055 int has_ovlan = IS_MF(bp);
5056 REG_WR(bp, PRS_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5057 REG_WR(bp, PRS_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5058 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005059
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005060 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5061 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5062 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5063 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005064
Eilon Greensteinca003922009-08-12 22:53:28 -07005065 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5066 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5067 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5068 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005069
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005070 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5071 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5072 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5073 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005074
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005075 if (CHIP_MODE_IS_4_PORT(bp))
5076 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, COMMON_STAGE);
5077
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005078 /* sync semi rtc */
5079 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5080 0x80000000);
5081 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5082 0x80000000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005083
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005084 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5085 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5086 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005087
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005088 if (CHIP_IS_E2(bp)) {
5089 int has_ovlan = IS_MF(bp);
5090 REG_WR(bp, PBF_REG_HDRS_AFTER_BASIC, (has_ovlan ? 7 : 6));
5091 REG_WR(bp, PBF_REG_MUST_HAVE_HDRS, (has_ovlan ? 1 : 0));
5092 }
5093
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005094 REG_WR(bp, SRC_REG_SOFT_RST, 1);
Tom Herbertc68ed252010-04-23 00:10:52 -07005095 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
5096 REG_WR(bp, i, random32());
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005097
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005098 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
Michael Chan37b091b2009-10-10 13:46:55 +00005099#ifdef BCM_CNIC
5100 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
5101 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
5102 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
5103 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
5104 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
5105 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
5106 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
5107 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
5108 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
5109 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
5110#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005111 REG_WR(bp, SRC_REG_SOFT_RST, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005112
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005113 if (sizeof(union cdu_context) != 1024)
5114 /* we currently assume that a context is 1024 bytes */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005115 dev_alert(&bp->pdev->dev, "please adjust the size "
5116 "of cdu_context(%ld)\n",
Joe Perches7995c642010-02-17 15:01:52 +00005117 (long)sizeof(union cdu_context));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005118
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005119 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005120 val = (4 << 24) + (0 << 12) + 1024;
5121 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005122
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005123 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005124 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08005125 /* enable context validation interrupt from CFC */
5126 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5127
5128 /* set the thresholds to prevent CFC/CDU race */
5129 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005130
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005131 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005132
5133 if (CHIP_IS_E2(bp) && BP_NOMCP(bp))
5134 REG_WR(bp, IGU_REG_RESET_MEMORIES, 0x36);
5135
5136 bnx2x_init_block(bp, IGU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005137 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005138
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005139 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005140 /* Reset PCIE errors for debug */
5141 REG_WR(bp, 0x2814, 0xffffffff);
5142 REG_WR(bp, 0x3820, 0xffffffff);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005143
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005144 if (CHIP_IS_E2(bp)) {
5145 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_CONTROL_5,
5146 (PXPCS_TL_CONTROL_5_ERR_UNSPPORT1 |
5147 PXPCS_TL_CONTROL_5_ERR_UNSPPORT));
5148 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC345_STAT,
5149 (PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT4 |
5150 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT3 |
5151 PXPCS_TL_FUNC345_STAT_ERR_UNSPPORT2));
5152 REG_WR(bp, PCICFG_OFFSET + PXPCS_TL_FUNC678_STAT,
5153 (PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT7 |
5154 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT6 |
5155 PXPCS_TL_FUNC678_STAT_ERR_UNSPPORT5));
5156 }
5157
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005158 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005159 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005160 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005161 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005162
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005163 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005164 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005165 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_MF(bp));
5166 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_MF(bp));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005167 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005168 if (CHIP_IS_E2(bp)) {
5169 /* Bit-map indicating which L2 hdrs may appear after the
5170 basic Ethernet header */
5171 REG_WR(bp, NIG_REG_P0_HDRS_AFTER_BASIC, (IS_MF(bp) ? 7 : 6));
5172 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005173
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005174 if (CHIP_REV_IS_SLOW(bp))
5175 msleep(200);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005176
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005177 /* finish CFC init */
5178 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5179 if (val != 1) {
5180 BNX2X_ERR("CFC LL_INIT failed\n");
5181 return -EBUSY;
5182 }
5183 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5184 if (val != 1) {
5185 BNX2X_ERR("CFC AC_INIT failed\n");
5186 return -EBUSY;
5187 }
5188 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5189 if (val != 1) {
5190 BNX2X_ERR("CFC CAM_INIT failed\n");
5191 return -EBUSY;
5192 }
5193 REG_WR(bp, CFC_REG_DEBUG0, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005194
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005195 if (CHIP_IS_E1(bp)) {
5196 /* read NIG statistic
5197 to see if this is our first up since powerup */
5198 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5199 val = *bnx2x_sp(bp, wb_data[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005200
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005201 /* do internal memory self test */
5202 if ((val == 0) && bnx2x_int_mem_test(bp)) {
5203 BNX2X_ERR("internal mem self test failed\n");
5204 return -EBUSY;
5205 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005206 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005207
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005208 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005209 bp->common.shmem_base,
5210 bp->common.shmem2_base);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005211
Eilon Greensteinfd4ef402009-07-21 05:47:27 +00005212 bnx2x_setup_fan_failure_detection(bp);
5213
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005214 /* clear PXP2 attentions */
5215 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005216
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005217 enable_blocks_attention(bp);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00005218 if (CHIP_PARITY_SUPPORTED(bp))
5219 enable_blocks_parity(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005220
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005221 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005222 /* In E2 2-PORT mode, same ext phy is used for the two paths */
5223 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON_CHIP) ||
5224 CHIP_IS_E1x(bp)) {
5225 u32 shmem_base[2], shmem2_base[2];
5226 shmem_base[0] = bp->common.shmem_base;
5227 shmem2_base[0] = bp->common.shmem2_base;
5228 if (CHIP_IS_E2(bp)) {
5229 shmem_base[1] =
5230 SHMEM2_RD(bp, other_shmem_base_addr);
5231 shmem2_base[1] =
5232 SHMEM2_RD(bp, other_shmem2_base_addr);
5233 }
5234 bnx2x_acquire_phy_lock(bp);
5235 bnx2x_common_init_phy(bp, shmem_base, shmem2_base,
5236 bp->common.chip_id);
5237 bnx2x_release_phy_lock(bp);
5238 }
Yaniv Rosner6bbca912008-08-13 15:57:28 -07005239 } else
5240 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5241
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005242 return 0;
5243}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005244
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005245static int bnx2x_init_hw_port(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005246{
5247 int port = BP_PORT(bp);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005248 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
Eilon Greenstein1c063282009-02-12 08:36:43 +00005249 u32 low, high;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005250 u32 val;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005251
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005252 DP(BNX2X_MSG_MCP, "starting port init port %d\n", port);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005253
5254 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005255
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005256 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005257 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005258
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005259 /* Timers bug workaround: disables the pf_master bit in pglue at
5260 * common phase, we need to enable it here before any dmae access are
5261 * attempted. Therefore we manually added the enable-master to the
5262 * port phase (it also happens in the function phase)
5263 */
5264 if (CHIP_IS_E2(bp))
5265 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5266
Eilon Greensteinca003922009-08-12 22:53:28 -07005267 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5268 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5269 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005270 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005271
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005272 /* QM cid (connection) count */
5273 bnx2x_qm_init_cid_count(bp, bp->qm_cid_count, INITOP_SET);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005274
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005275#ifdef BCM_CNIC
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005276 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
Michael Chan37b091b2009-10-10 13:46:55 +00005277 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
5278 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005279#endif
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005280
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005281 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005282
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005283 if (CHIP_MODE_IS_4_PORT(bp))
5284 bnx2x_init_block(bp, QM_4PORT_BLOCK, init_stage);
Eilon Greenstein1c063282009-02-12 08:36:43 +00005285
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005286 if (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp)) {
5287 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5288 if (CHIP_REV_IS_SLOW(bp) && CHIP_IS_E1(bp)) {
5289 /* no pause for emulation and FPGA */
5290 low = 0;
5291 high = 513;
5292 } else {
5293 if (IS_MF(bp))
5294 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5295 else if (bp->dev->mtu > 4096) {
5296 if (bp->flags & ONE_PORT_FLAG)
5297 low = 160;
5298 else {
5299 val = bp->dev->mtu;
5300 /* (24*1024 + val*4)/256 */
5301 low = 96 + (val/64) +
5302 ((val % 64) ? 1 : 0);
5303 }
5304 } else
5305 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5306 high = low + 56; /* 14*1024/256 */
5307 }
5308 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5309 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5310 }
5311
5312 if (CHIP_MODE_IS_4_PORT(bp)) {
5313 REG_WR(bp, BRB1_REG_PAUSE_0_XOFF_THRESHOLD_0 + port*8, 248);
5314 REG_WR(bp, BRB1_REG_PAUSE_0_XON_THRESHOLD_0 + port*8, 328);
5315 REG_WR(bp, (BP_PORT(bp) ? BRB1_REG_MAC_GUARANTIED_1 :
5316 BRB1_REG_MAC_GUARANTIED_0), 40);
5317 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005318
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005319 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
Eilon Greensteinca003922009-08-12 22:53:28 -07005320
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005321 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005322 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005323 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005324 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005325
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005326 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5327 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5328 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5329 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005330 if (CHIP_MODE_IS_4_PORT(bp))
5331 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005332
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005333 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005334 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005335
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005336 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005337
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005338 if (!CHIP_IS_E2(bp)) {
5339 /* configure PBF to work without PAUSE mtu 9000 */
5340 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005341
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005342 /* update threshold */
5343 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5344 /* update init credit */
5345 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005346
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005347 /* probe changes */
5348 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5349 udelay(50);
5350 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5351 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005352
Michael Chan37b091b2009-10-10 13:46:55 +00005353#ifdef BCM_CNIC
5354 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005355#endif
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005356 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005357 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005358
5359 if (CHIP_IS_E1(bp)) {
5360 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5361 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5362 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005363 bnx2x_init_block(bp, HC_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005364
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005365 bnx2x_init_block(bp, IGU_BLOCK, init_stage);
5366
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005367 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005368 /* init aeu_mask_attn_func_0/1:
5369 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5370 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5371 * bits 4-7 are used for "per vn group attention" */
5372 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005373 (IS_MF(bp) ? 0xF7 : 0x7));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005374
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005375 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005376 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005377 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005378 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005379 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
Eilon Greenstein356e2382009-02-12 08:38:32 +00005380
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005381 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005382
5383 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5384
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005385 if (!CHIP_IS_E1(bp)) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005386 /* 0x2 disable mf_ov, 0x1 enable */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005387 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005388 (IS_MF(bp) ? 0x1 : 0x2));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005389
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005390 if (CHIP_IS_E2(bp)) {
5391 val = 0;
5392 switch (bp->mf_mode) {
5393 case MULTI_FUNCTION_SD:
5394 val = 1;
5395 break;
5396 case MULTI_FUNCTION_SI:
5397 val = 2;
5398 break;
5399 }
5400
5401 REG_WR(bp, (BP_PORT(bp) ? NIG_REG_LLH1_CLS_TYPE :
5402 NIG_REG_LLH0_CLS_TYPE), val);
5403 }
Eilon Greenstein1c063282009-02-12 08:36:43 +00005404 {
5405 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5406 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5407 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5408 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005409 }
5410
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005411 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07005412 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005413 bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005414 bp->common.shmem_base,
5415 bp->common.shmem2_base);
Yaniv Rosnerd90d96b2010-09-07 11:41:04 +00005416 if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00005417 bp->common.shmem2_base, port)) {
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005418 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
5419 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5420 val = REG_RD(bp, reg_addr);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005421 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
Eilon Greenstein4d295db2009-07-21 05:47:47 +00005422 REG_WR(bp, reg_addr, val);
Eliezer Tamirf1410642008-02-28 11:51:50 -08005423 }
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07005424 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005425
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005426 return 0;
5427}
5428
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005429static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5430{
5431 int reg;
5432
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005433 if (CHIP_IS_E1(bp))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005434 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005435 else
5436 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005437
5438 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5439}
5440
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005441static inline void bnx2x_igu_clear_sb(struct bnx2x *bp, u8 idu_sb_id)
5442{
5443 bnx2x_igu_clear_sb_gen(bp, idu_sb_id, true /*PF*/);
5444}
5445
5446static inline void bnx2x_clear_func_ilt(struct bnx2x *bp, u32 func)
5447{
5448 u32 i, base = FUNC_ILT_BASE(func);
5449 for (i = base; i < base + ILT_PER_FUNC; i++)
5450 bnx2x_ilt_wr(bp, i, 0);
5451}
5452
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005453static int bnx2x_init_hw_func(struct bnx2x *bp)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005454{
5455 int port = BP_PORT(bp);
5456 int func = BP_FUNC(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005457 struct bnx2x_ilt *ilt = BP_ILT(bp);
5458 u16 cdu_ilt_start;
Eilon Greenstein8badd272009-02-12 08:36:15 +00005459 u32 addr, val;
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005460 u32 main_mem_base, main_mem_size, main_mem_prty_clr;
5461 int i, main_mem_width;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005462
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00005463 DP(BNX2X_MSG_MCP, "starting func init func %d\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005464
Eilon Greenstein8badd272009-02-12 08:36:15 +00005465 /* set MSI reconfigure capability */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005466 if (bp->common.int_block == INT_BLOCK_HC) {
5467 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5468 val = REG_RD(bp, addr);
5469 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5470 REG_WR(bp, addr, val);
5471 }
Eilon Greenstein8badd272009-02-12 08:36:15 +00005472
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005473 ilt = BP_ILT(bp);
5474 cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005475
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005476 for (i = 0; i < L2_ILT_LINES(bp); i++) {
5477 ilt->lines[cdu_ilt_start + i].page =
5478 bp->context.vcxt + (ILT_PAGE_CIDS * i);
5479 ilt->lines[cdu_ilt_start + i].page_mapping =
5480 bp->context.cxt_mapping + (CDU_ILT_PAGE_SZ * i);
5481 /* cdu ilt pages are allocated manually so there's no need to
5482 set the size */
5483 }
5484 bnx2x_ilt_init_op(bp, INITOP_SET);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005485
Michael Chan37b091b2009-10-10 13:46:55 +00005486#ifdef BCM_CNIC
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005487 bnx2x_src_init_t2(bp, bp->t2, bp->t2_mapping, SRC_CONN_NUM);
Michael Chan37b091b2009-10-10 13:46:55 +00005488
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005489 /* T1 hash bits value determines the T1 number of entries */
5490 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS);
Michael Chan37b091b2009-10-10 13:46:55 +00005491#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005492
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005493#ifndef BCM_CNIC
5494 /* set NIC mode */
5495 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5496#endif /* BCM_CNIC */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005497
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005498 if (CHIP_IS_E2(bp)) {
5499 u32 pf_conf = IGU_PF_CONF_FUNC_EN;
5500
5501 /* Turn on a single ISR mode in IGU if driver is going to use
5502 * INT#x or MSI
5503 */
5504 if (!(bp->flags & USING_MSIX_FLAG))
5505 pf_conf |= IGU_PF_CONF_SINGLE_ISR_EN;
5506 /*
5507 * Timers workaround bug: function init part.
5508 * Need to wait 20msec after initializing ILT,
5509 * needed to make sure there are no requests in
5510 * one of the PXP internal queues with "old" ILT addresses
5511 */
5512 msleep(20);
5513 /*
5514 * Master enable - Due to WB DMAE writes performed before this
5515 * register is re-initialized as part of the regular function
5516 * init
5517 */
5518 REG_WR(bp, PGLUE_B_REG_INTERNAL_PFID_ENABLE_MASTER, 1);
5519 /* Enable the function in IGU */
5520 REG_WR(bp, IGU_REG_PF_CONFIGURATION, pf_conf);
5521 }
5522
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005523 bp->dmae_ready = 1;
5524
5525 bnx2x_init_block(bp, PGLUE_B_BLOCK, FUNC0_STAGE + func);
5526
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005527 if (CHIP_IS_E2(bp))
5528 REG_WR(bp, PGLUE_B_REG_WAS_ERROR_PF_7_0_CLR, func);
5529
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005530 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
5531 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
5532 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
5533 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
5534 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
5535 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
5536 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
5537 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
5538 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
5539
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005540 if (CHIP_IS_E2(bp)) {
5541 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_PATH_ID_OFFSET,
5542 BP_PATH(bp));
5543 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_PATH_ID_OFFSET,
5544 BP_PATH(bp));
5545 }
5546
5547 if (CHIP_MODE_IS_4_PORT(bp))
5548 bnx2x_init_block(bp, XSEM_4PORT_BLOCK, FUNC0_STAGE + func);
5549
5550 if (CHIP_IS_E2(bp))
5551 REG_WR(bp, QM_REG_PF_EN, 1);
5552
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005553 bnx2x_init_block(bp, QM_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005554
5555 if (CHIP_MODE_IS_4_PORT(bp))
5556 bnx2x_init_block(bp, QM_4PORT_BLOCK, FUNC0_STAGE + func);
5557
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005558 bnx2x_init_block(bp, TIMERS_BLOCK, FUNC0_STAGE + func);
5559 bnx2x_init_block(bp, DQ_BLOCK, FUNC0_STAGE + func);
5560 bnx2x_init_block(bp, BRB1_BLOCK, FUNC0_STAGE + func);
5561 bnx2x_init_block(bp, PRS_BLOCK, FUNC0_STAGE + func);
5562 bnx2x_init_block(bp, TSDM_BLOCK, FUNC0_STAGE + func);
5563 bnx2x_init_block(bp, CSDM_BLOCK, FUNC0_STAGE + func);
5564 bnx2x_init_block(bp, USDM_BLOCK, FUNC0_STAGE + func);
5565 bnx2x_init_block(bp, XSDM_BLOCK, FUNC0_STAGE + func);
5566 bnx2x_init_block(bp, UPB_BLOCK, FUNC0_STAGE + func);
5567 bnx2x_init_block(bp, XPB_BLOCK, FUNC0_STAGE + func);
5568 bnx2x_init_block(bp, PBF_BLOCK, FUNC0_STAGE + func);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005569 if (CHIP_IS_E2(bp))
5570 REG_WR(bp, PBF_REG_DISABLE_PF, 0);
5571
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005572 bnx2x_init_block(bp, CDU_BLOCK, FUNC0_STAGE + func);
5573
5574 bnx2x_init_block(bp, CFC_BLOCK, FUNC0_STAGE + func);
5575
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005576 if (CHIP_IS_E2(bp))
5577 REG_WR(bp, CFC_REG_WEAK_ENABLE_PF, 1);
5578
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005579 if (IS_MF(bp)) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005580 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00005581 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->mf_ov);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005582 }
5583
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005584 bnx2x_init_block(bp, MISC_AEU_BLOCK, FUNC0_STAGE + func);
5585
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005586 /* HC init per function */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005587 if (bp->common.int_block == INT_BLOCK_HC) {
5588 if (CHIP_IS_E1H(bp)) {
5589 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5590
5591 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5592 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5593 }
5594 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
5595
5596 } else {
5597 int num_segs, sb_idx, prod_offset;
5598
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005599 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5600
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005601 if (CHIP_IS_E2(bp)) {
5602 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
5603 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
5604 }
5605
5606 bnx2x_init_block(bp, IGU_BLOCK, FUNC0_STAGE + func);
5607
5608 if (CHIP_IS_E2(bp)) {
5609 int dsb_idx = 0;
5610 /**
5611 * Producer memory:
5612 * E2 mode: address 0-135 match to the mapping memory;
5613 * 136 - PF0 default prod; 137 - PF1 default prod;
5614 * 138 - PF2 default prod; 139 - PF3 default prod;
5615 * 140 - PF0 attn prod; 141 - PF1 attn prod;
5616 * 142 - PF2 attn prod; 143 - PF3 attn prod;
5617 * 144-147 reserved.
5618 *
5619 * E1.5 mode - In backward compatible mode;
5620 * for non default SB; each even line in the memory
5621 * holds the U producer and each odd line hold
5622 * the C producer. The first 128 producers are for
5623 * NDSB (PF0 - 0-31; PF1 - 32-63 and so on). The last 20
5624 * producers are for the DSB for each PF.
5625 * Each PF has five segments: (the order inside each
5626 * segment is PF0; PF1; PF2; PF3) - 128-131 U prods;
5627 * 132-135 C prods; 136-139 X prods; 140-143 T prods;
5628 * 144-147 attn prods;
5629 */
5630 /* non-default-status-blocks */
5631 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5632 IGU_BC_NDSB_NUM_SEGS : IGU_NORM_NDSB_NUM_SEGS;
5633 for (sb_idx = 0; sb_idx < bp->igu_sb_cnt; sb_idx++) {
5634 prod_offset = (bp->igu_base_sb + sb_idx) *
5635 num_segs;
5636
5637 for (i = 0; i < num_segs; i++) {
5638 addr = IGU_REG_PROD_CONS_MEMORY +
5639 (prod_offset + i) * 4;
5640 REG_WR(bp, addr, 0);
5641 }
5642 /* send consumer update with value 0 */
5643 bnx2x_ack_sb(bp, bp->igu_base_sb + sb_idx,
5644 USTORM_ID, 0, IGU_INT_NOP, 1);
5645 bnx2x_igu_clear_sb(bp,
5646 bp->igu_base_sb + sb_idx);
5647 }
5648
5649 /* default-status-blocks */
5650 num_segs = CHIP_INT_MODE_IS_BC(bp) ?
5651 IGU_BC_DSB_NUM_SEGS : IGU_NORM_DSB_NUM_SEGS;
5652
5653 if (CHIP_MODE_IS_4_PORT(bp))
5654 dsb_idx = BP_FUNC(bp);
5655 else
5656 dsb_idx = BP_E1HVN(bp);
5657
5658 prod_offset = (CHIP_INT_MODE_IS_BC(bp) ?
5659 IGU_BC_BASE_DSB_PROD + dsb_idx :
5660 IGU_NORM_BASE_DSB_PROD + dsb_idx);
5661
5662 for (i = 0; i < (num_segs * E1HVN_MAX);
5663 i += E1HVN_MAX) {
5664 addr = IGU_REG_PROD_CONS_MEMORY +
5665 (prod_offset + i)*4;
5666 REG_WR(bp, addr, 0);
5667 }
5668 /* send consumer update with 0 */
5669 if (CHIP_INT_MODE_IS_BC(bp)) {
5670 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5671 USTORM_ID, 0, IGU_INT_NOP, 1);
5672 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5673 CSTORM_ID, 0, IGU_INT_NOP, 1);
5674 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5675 XSTORM_ID, 0, IGU_INT_NOP, 1);
5676 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5677 TSTORM_ID, 0, IGU_INT_NOP, 1);
5678 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5679 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5680 } else {
5681 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5682 USTORM_ID, 0, IGU_INT_NOP, 1);
5683 bnx2x_ack_sb(bp, bp->igu_dsb_id,
5684 ATTENTION_ID, 0, IGU_INT_NOP, 1);
5685 }
5686 bnx2x_igu_clear_sb(bp, bp->igu_dsb_id);
5687
5688 /* !!! these should become driver const once
5689 rf-tool supports split-68 const */
5690 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_LSB, 0);
5691 REG_WR(bp, IGU_REG_SB_INT_BEFORE_MASK_MSB, 0);
5692 REG_WR(bp, IGU_REG_SB_MASK_LSB, 0);
5693 REG_WR(bp, IGU_REG_SB_MASK_MSB, 0);
5694 REG_WR(bp, IGU_REG_PBA_STATUS_LSB, 0);
5695 REG_WR(bp, IGU_REG_PBA_STATUS_MSB, 0);
5696 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005697 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005698
Eliezer Tamirc14423f2008-02-28 11:49:42 -08005699 /* Reset PCIE errors for debug */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005700 REG_WR(bp, 0x2114, 0xffffffff);
5701 REG_WR(bp, 0x2120, 0xffffffff);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005702
5703 bnx2x_init_block(bp, EMAC0_BLOCK, FUNC0_STAGE + func);
5704 bnx2x_init_block(bp, EMAC1_BLOCK, FUNC0_STAGE + func);
5705 bnx2x_init_block(bp, DBU_BLOCK, FUNC0_STAGE + func);
5706 bnx2x_init_block(bp, DBG_BLOCK, FUNC0_STAGE + func);
5707 bnx2x_init_block(bp, MCP_BLOCK, FUNC0_STAGE + func);
5708 bnx2x_init_block(bp, DMAE_BLOCK, FUNC0_STAGE + func);
5709
Vladislav Zolotarovf4a66892010-10-19 05:13:09 +00005710 if (CHIP_IS_E1x(bp)) {
5711 main_mem_size = HC_REG_MAIN_MEMORY_SIZE / 2; /*dwords*/
5712 main_mem_base = HC_REG_MAIN_MEMORY +
5713 BP_PORT(bp) * (main_mem_size * 4);
5714 main_mem_prty_clr = HC_REG_HC_PRTY_STS_CLR;
5715 main_mem_width = 8;
5716
5717 val = REG_RD(bp, main_mem_prty_clr);
5718 if (val)
5719 DP(BNX2X_MSG_MCP, "Hmmm... Parity errors in HC "
5720 "block during "
5721 "function init (0x%x)!\n", val);
5722
5723 /* Clear "false" parity errors in MSI-X table */
5724 for (i = main_mem_base;
5725 i < main_mem_base + main_mem_size * 4;
5726 i += main_mem_width) {
5727 bnx2x_read_dmae(bp, i, main_mem_width / 4);
5728 bnx2x_write_dmae(bp, bnx2x_sp_mapping(bp, wb_data),
5729 i, main_mem_width / 4);
5730 }
5731 /* Clear HC parity attention */
5732 REG_RD(bp, main_mem_prty_clr);
5733 }
5734
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00005735 bnx2x_phy_probe(&bp->link_params);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005736
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005737 return 0;
5738}
5739
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005740int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005741{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005742 int rc = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005743
5744 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005745 BP_ABS_FUNC(bp), load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005746
5747 bp->dmae_ready = 0;
5748 mutex_init(&bp->dmae_mutex);
Eilon Greenstein54016b22009-08-12 08:23:48 +00005749 rc = bnx2x_gunzip_init(bp);
5750 if (rc)
5751 return rc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005752
5753 switch (load_code) {
5754 case FW_MSG_CODE_DRV_LOAD_COMMON:
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005755 case FW_MSG_CODE_DRV_LOAD_COMMON_CHIP:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005756 rc = bnx2x_init_hw_common(bp, load_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005757 if (rc)
5758 goto init_hw_err;
5759 /* no break */
5760
5761 case FW_MSG_CODE_DRV_LOAD_PORT:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005762 rc = bnx2x_init_hw_port(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005763 if (rc)
5764 goto init_hw_err;
5765 /* no break */
5766
5767 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005768 rc = bnx2x_init_hw_func(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005769 if (rc)
5770 goto init_hw_err;
5771 break;
5772
5773 default:
5774 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5775 break;
5776 }
5777
5778 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005779 int mb_idx = BP_FW_MB_IDX(bp);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005780
5781 bp->fw_drv_pulse_wr_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005782 (SHMEM_RD(bp, func_mb[mb_idx].drv_pulse_mb) &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005783 DRV_PULSE_SEQ_MASK);
Eilon Greenstein6fe49bb2009-08-12 08:23:17 +00005784 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
5785 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005786
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005787init_hw_err:
5788 bnx2x_gunzip_end(bp);
5789
5790 return rc;
5791}
5792
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005793void bnx2x_free_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005794{
5795
5796#define BNX2X_PCI_FREE(x, y, size) \
5797 do { \
5798 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005799 dma_free_coherent(&bp->pdev->dev, size, (void *)x, y); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005800 x = NULL; \
5801 y = 0; \
5802 } \
5803 } while (0)
5804
5805#define BNX2X_FREE(x) \
5806 do { \
5807 if (x) { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005808 kfree((void *)x); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005809 x = NULL; \
5810 } \
5811 } while (0)
5812
5813 int i;
5814
5815 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005816 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005817 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005818 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005819 if (CHIP_IS_E2(bp))
5820 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e2_sb),
5821 bnx2x_fp(bp, i, status_blk_mapping),
5822 sizeof(struct host_hc_status_block_e2));
5823 else
5824 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk.e1x_sb),
5825 bnx2x_fp(bp, i, status_blk_mapping),
5826 sizeof(struct host_hc_status_block_e1x));
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005827 }
5828 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005829 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005830
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005831 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005832 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5833 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5834 bnx2x_fp(bp, i, rx_desc_mapping),
5835 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5836
5837 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5838 bnx2x_fp(bp, i, rx_comp_mapping),
5839 sizeof(struct eth_fast_path_rx_cqe) *
5840 NUM_RCQ_BD);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005841
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005842 /* SGE ring */
Eilon Greenstein32626232008-08-13 15:51:07 -07005843 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005844 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
5845 bnx2x_fp(bp, i, rx_sge_mapping),
5846 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
5847 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005848 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005849 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005850
5851 /* fastpath tx rings: tx_buf tx_desc */
5852 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
5853 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
5854 bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005855 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005856 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005857 /* end of fastpath */
5858
5859 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005860 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005861
5862 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07005863 sizeof(struct bnx2x_slowpath));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005864
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005865 BNX2X_PCI_FREE(bp->context.vcxt, bp->context.cxt_mapping,
5866 bp->context.size);
5867
5868 bnx2x_ilt_mem_op(bp, ILT_MEMOP_FREE);
5869
5870 BNX2X_FREE(bp->ilt->lines);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005871
Michael Chan37b091b2009-10-10 13:46:55 +00005872#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005873 if (CHIP_IS_E2(bp))
5874 BNX2X_PCI_FREE(bp->cnic_sb.e2_sb, bp->cnic_sb_mapping,
5875 sizeof(struct host_hc_status_block_e2));
5876 else
5877 BNX2X_PCI_FREE(bp->cnic_sb.e1x_sb, bp->cnic_sb_mapping,
5878 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005879
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005880 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, SRC_T2_SZ);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005881#endif
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005882
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005883 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005884
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005885 BNX2X_PCI_FREE(bp->eq_ring, bp->eq_mapping,
5886 BCM_PAGE_SIZE * NUM_EQ_PAGES);
5887
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005888#undef BNX2X_PCI_FREE
5889#undef BNX2X_KFREE
5890}
5891
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005892static inline void set_sb_shortcuts(struct bnx2x *bp, int index)
5893{
5894 union host_hc_status_block status_blk = bnx2x_fp(bp, index, status_blk);
5895 if (CHIP_IS_E2(bp)) {
5896 bnx2x_fp(bp, index, sb_index_values) =
5897 (__le16 *)status_blk.e2_sb->sb.index_values;
5898 bnx2x_fp(bp, index, sb_running_index) =
5899 (__le16 *)status_blk.e2_sb->sb.running_index;
5900 } else {
5901 bnx2x_fp(bp, index, sb_index_values) =
5902 (__le16 *)status_blk.e1x_sb->sb.index_values;
5903 bnx2x_fp(bp, index, sb_running_index) =
5904 (__le16 *)status_blk.e1x_sb->sb.running_index;
5905 }
5906}
5907
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00005908int bnx2x_alloc_mem(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005909{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005910#define BNX2X_PCI_ALLOC(x, y, size) \
5911 do { \
FUJITA Tomonori1a983142010-04-04 01:51:03 +00005912 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005913 if (x == NULL) \
5914 goto alloc_mem_err; \
5915 memset(x, 0, size); \
5916 } while (0)
5917
5918#define BNX2X_ALLOC(x, size) \
5919 do { \
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005920 x = kzalloc(size, GFP_KERNEL); \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005921 if (x == NULL) \
5922 goto alloc_mem_err; \
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005923 } while (0)
5924
5925 int i;
5926
5927 /* fastpath */
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005928 /* Common */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005929 for_each_queue(bp, i) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005930 union host_hc_status_block *sb = &bnx2x_fp(bp, i, status_blk);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005931 bnx2x_fp(bp, i, bp) = bp;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005932 /* status blocks */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005933 if (CHIP_IS_E2(bp))
5934 BNX2X_PCI_ALLOC(sb->e2_sb,
5935 &bnx2x_fp(bp, i, status_blk_mapping),
5936 sizeof(struct host_hc_status_block_e2));
5937 else
5938 BNX2X_PCI_ALLOC(sb->e1x_sb,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005939 &bnx2x_fp(bp, i, status_blk_mapping),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005940 sizeof(struct host_hc_status_block_e1x));
5941
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005942 set_sb_shortcuts(bp, i);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005943 }
5944 /* Rx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005945 for_each_queue(bp, i) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005946
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005947 /* fastpath rx rings: rx_buf rx_desc rx_comp */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005948 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
5949 sizeof(struct sw_rx_bd) * NUM_RX_BD);
5950 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
5951 &bnx2x_fp(bp, i, rx_desc_mapping),
5952 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5953
5954 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
5955 &bnx2x_fp(bp, i, rx_comp_mapping),
5956 sizeof(struct eth_fast_path_rx_cqe) *
5957 NUM_RCQ_BD);
5958
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07005959 /* SGE ring */
5960 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
5961 sizeof(struct sw_rx_page) * NUM_RX_SGE);
5962 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
5963 &bnx2x_fp(bp, i, rx_sge_mapping),
5964 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005965 }
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005966 /* Tx */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00005967 for_each_queue(bp, i) {
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005968
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005969 /* fastpath tx rings: tx_buf tx_desc */
5970 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
5971 sizeof(struct sw_tx_bd) * NUM_TX_BD);
5972 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
5973 &bnx2x_fp(bp, i, tx_desc_mapping),
Eilon Greensteinca003922009-08-12 22:53:28 -07005974 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
Eilon Greenstein555f6c72009-02-12 08:36:11 +00005975 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005976 /* end of fastpath */
5977
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005978#ifdef BCM_CNIC
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00005979 if (CHIP_IS_E2(bp))
5980 BNX2X_PCI_ALLOC(bp->cnic_sb.e2_sb, &bp->cnic_sb_mapping,
5981 sizeof(struct host_hc_status_block_e2));
5982 else
5983 BNX2X_PCI_ALLOC(bp->cnic_sb.e1x_sb, &bp->cnic_sb_mapping,
5984 sizeof(struct host_hc_status_block_e1x));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005985
5986 /* allocate searcher T2 table */
5987 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, SRC_T2_SZ);
5988#endif
5989
5990
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005991 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005992 sizeof(struct host_sp_status_block));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02005993
5994 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
5995 sizeof(struct bnx2x_slowpath));
5996
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005997 bp->context.size = sizeof(union cdu_context) * bp->l2_cid_count;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00005998
Dmitry Kravkov523224a2010-10-06 03:23:26 +00005999 BNX2X_PCI_ALLOC(bp->context.vcxt, &bp->context.cxt_mapping,
6000 bp->context.size);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006001
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006002 BNX2X_ALLOC(bp->ilt->lines, sizeof(struct ilt_line) * ILT_MAX_LINES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006003
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006004 if (bnx2x_ilt_mem_op(bp, ILT_MEMOP_ALLOC))
6005 goto alloc_mem_err;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006006
6007 /* Slow path ring */
6008 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6009
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006010 /* EQ */
6011 BNX2X_PCI_ALLOC(bp->eq_ring, &bp->eq_mapping,
6012 BCM_PAGE_SIZE * NUM_EQ_PAGES);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006013 return 0;
6014
6015alloc_mem_err:
6016 bnx2x_free_mem(bp);
6017 return -ENOMEM;
6018
6019#undef BNX2X_PCI_ALLOC
6020#undef BNX2X_ALLOC
6021}
6022
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006023/*
6024 * Init service functions
6025 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006026int bnx2x_func_start(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006027{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006028 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_START, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006029
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006030 /* Wait for completion */
6031 return bnx2x_wait_ramrod(bp, BNX2X_STATE_FUNC_STARTED, 0, &(bp->state),
6032 WAIT_RAMROD_COMMON);
6033}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006034
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006035int bnx2x_func_stop(struct bnx2x *bp)
6036{
6037 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_FUNCTION_STOP, 0, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006038
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006039 /* Wait for completion */
6040 return bnx2x_wait_ramrod(bp, BNX2X_STATE_CLOSING_WAIT4_UNLOAD,
6041 0, &(bp->state), WAIT_RAMROD_COMMON);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006042}
6043
Michael Chane665bfd2009-10-10 13:46:54 +00006044/**
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006045 * Sets a MAC in a CAM for a few L2 Clients for E1x chips
Michael Chane665bfd2009-10-10 13:46:54 +00006046 *
6047 * @param bp driver descriptor
6048 * @param set set or clear an entry (1 or 0)
6049 * @param mac pointer to a buffer containing a MAC
6050 * @param cl_bit_vec bit vector of clients to register a MAC for
6051 * @param cam_offset offset in a CAM to use
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006052 * @param is_bcast is the set MAC a broadcast address (for E1 only)
Michael Chane665bfd2009-10-10 13:46:54 +00006053 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006054static void bnx2x_set_mac_addr_gen(struct bnx2x *bp, int set, u8 *mac,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006055 u32 cl_bit_vec, u8 cam_offset,
6056 u8 is_bcast)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006057{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006058 struct mac_configuration_cmd *config =
6059 (struct mac_configuration_cmd *)bnx2x_sp(bp, mac_config);
6060 int ramrod_flags = WAIT_RAMROD_COMMON;
6061
6062 bp->set_mac_pending = 1;
6063 smp_wmb();
6064
Eilon Greenstein8d9c5f32009-02-15 23:24:08 -08006065 config->hdr.length = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006066 config->hdr.offset = cam_offset;
6067 config->hdr.client_id = 0xff;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006068 config->hdr.reserved1 = 0;
6069
6070 /* primary MAC */
6071 config->config_table[0].msb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006072 swab16(*(u16 *)&mac[0]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006073 config->config_table[0].middle_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006074 swab16(*(u16 *)&mac[2]);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006075 config->config_table[0].lsb_mac_addr =
Michael Chane665bfd2009-10-10 13:46:54 +00006076 swab16(*(u16 *)&mac[4]);
Eilon Greensteinca003922009-08-12 22:53:28 -07006077 config->config_table[0].clients_bit_vector =
Michael Chane665bfd2009-10-10 13:46:54 +00006078 cpu_to_le32(cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006079 config->config_table[0].vlan_id = 0;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006080 config->config_table[0].pf_id = BP_FUNC(bp);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006081 if (set)
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006082 SET_FLAG(config->config_table[0].flags,
6083 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6084 T_ETH_MAC_COMMAND_SET);
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006085 else
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006086 SET_FLAG(config->config_table[0].flags,
6087 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6088 T_ETH_MAC_COMMAND_INVALIDATE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006089
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006090 if (is_bcast)
6091 SET_FLAG(config->config_table[0].flags,
6092 MAC_CONFIGURATION_ENTRY_BROADCAST, 1);
6093
6094 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) PF_ID %d CLID mask %d\n",
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006095 (set ? "setting" : "clearing"),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006096 config->config_table[0].msb_mac_addr,
6097 config->config_table[0].middle_mac_addr,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006098 config->config_table[0].lsb_mac_addr, BP_FUNC(bp), cl_bit_vec);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006099
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006100 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006101 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006102 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 1);
6103
6104 /* Wait for a completion */
6105 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006106}
6107
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006108int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006109 int *state_p, int flags)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006110{
6111 /* can take a while if any port is running */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006112 int cnt = 5000;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006113 u8 poll = flags & WAIT_RAMROD_POLL;
6114 u8 common = flags & WAIT_RAMROD_COMMON;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006115
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006116 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6117 poll ? "polling" : "waiting", state, idx);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006118
6119 might_sleep();
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006120 while (cnt--) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006121 if (poll) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006122 if (common)
6123 bnx2x_eq_int(bp);
6124 else {
6125 bnx2x_rx_int(bp->fp, 10);
6126 /* if index is different from 0
6127 * the reply for some commands will
6128 * be on the non default queue
6129 */
6130 if (idx)
6131 bnx2x_rx_int(&bp->fp[idx], 10);
6132 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006133 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006134
Yitchak Gertner3101c2b2008-08-13 15:52:28 -07006135 mb(); /* state is changed by bnx2x_sp_event() */
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006136 if (*state_p == state) {
6137#ifdef BNX2X_STOP_ON_ERROR
6138 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6139#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006140 return 0;
Eilon Greenstein8b3a0f02009-02-12 08:37:23 +00006141 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006142
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006143 msleep(1);
Eilon Greensteine3553b22009-08-12 08:23:31 +00006144
6145 if (bp->panic)
6146 return -EIO;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006147 }
6148
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006149 /* timeout! */
Eliezer Tamir49d66772008-02-28 11:53:13 -08006150 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6151 poll ? "polling" : "waiting", state, idx);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006152#ifdef BNX2X_STOP_ON_ERROR
6153 bnx2x_panic();
6154#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006155
Eliezer Tamir49d66772008-02-28 11:53:13 -08006156 return -EBUSY;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006157}
6158
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006159u8 bnx2x_e1h_cam_offset(struct bnx2x *bp, u8 rel_offset)
Michael Chane665bfd2009-10-10 13:46:54 +00006160{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006161 if (CHIP_IS_E1H(bp))
6162 return E1H_FUNC_MAX * rel_offset + BP_FUNC(bp);
6163 else if (CHIP_MODE_IS_4_PORT(bp))
6164 return BP_FUNC(bp) * 32 + rel_offset;
6165 else
6166 return BP_VN(bp) * 32 + rel_offset;
Michael Chane665bfd2009-10-10 13:46:54 +00006167}
6168
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006169void bnx2x_set_eth_mac(struct bnx2x *bp, int set)
Michael Chane665bfd2009-10-10 13:46:54 +00006170{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006171 u8 cam_offset = (CHIP_IS_E1(bp) ? (BP_PORT(bp) ? 32 : 0) :
6172 bnx2x_e1h_cam_offset(bp, CAM_ETH_LINE));
6173
6174 /* networking MAC */
6175 bnx2x_set_mac_addr_gen(bp, set, bp->dev->dev_addr,
6176 (1 << bp->fp->cl_id), cam_offset , 0);
6177
6178 if (CHIP_IS_E1(bp)) {
6179 /* broadcast MAC */
6180 u8 bcast[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
6181 bnx2x_set_mac_addr_gen(bp, set, bcast, 0, cam_offset + 1, 1);
6182 }
6183}
6184static void bnx2x_set_e1_mc_list(struct bnx2x *bp, u8 offset)
6185{
6186 int i = 0, old;
6187 struct net_device *dev = bp->dev;
6188 struct netdev_hw_addr *ha;
6189 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6190 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6191
6192 netdev_for_each_mc_addr(ha, dev) {
6193 /* copy mac */
6194 config_cmd->config_table[i].msb_mac_addr =
6195 swab16(*(u16 *)&bnx2x_mc_addr(ha)[0]);
6196 config_cmd->config_table[i].middle_mac_addr =
6197 swab16(*(u16 *)&bnx2x_mc_addr(ha)[2]);
6198 config_cmd->config_table[i].lsb_mac_addr =
6199 swab16(*(u16 *)&bnx2x_mc_addr(ha)[4]);
6200
6201 config_cmd->config_table[i].vlan_id = 0;
6202 config_cmd->config_table[i].pf_id = BP_FUNC(bp);
6203 config_cmd->config_table[i].clients_bit_vector =
6204 cpu_to_le32(1 << BP_L_ID(bp));
6205
6206 SET_FLAG(config_cmd->config_table[i].flags,
6207 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6208 T_ETH_MAC_COMMAND_SET);
6209
6210 DP(NETIF_MSG_IFUP,
6211 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6212 config_cmd->config_table[i].msb_mac_addr,
6213 config_cmd->config_table[i].middle_mac_addr,
6214 config_cmd->config_table[i].lsb_mac_addr);
6215 i++;
6216 }
6217 old = config_cmd->hdr.length;
6218 if (old > i) {
6219 for (; i < old; i++) {
6220 if (CAM_IS_INVALID(config_cmd->
6221 config_table[i])) {
6222 /* already invalidated */
6223 break;
6224 }
6225 /* invalidate */
6226 SET_FLAG(config_cmd->config_table[i].flags,
6227 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6228 T_ETH_MAC_COMMAND_INVALIDATE);
6229 }
6230 }
6231
6232 config_cmd->hdr.length = i;
6233 config_cmd->hdr.offset = offset;
6234 config_cmd->hdr.client_id = 0xff;
6235 config_cmd->hdr.reserved1 = 0;
6236
6237 bp->set_mac_pending = 1;
Michael Chane665bfd2009-10-10 13:46:54 +00006238 smp_wmb();
6239
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006240 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6241 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
6242}
6243static void bnx2x_invlidate_e1_mc_list(struct bnx2x *bp)
6244{
6245 int i;
6246 struct mac_configuration_cmd *config_cmd = bnx2x_sp(bp, mcast_config);
6247 dma_addr_t config_cmd_map = bnx2x_sp_mapping(bp, mcast_config);
6248 int ramrod_flags = WAIT_RAMROD_COMMON;
6249
6250 bp->set_mac_pending = 1;
6251 smp_wmb();
6252
6253 for (i = 0; i < config_cmd->hdr.length; i++)
6254 SET_FLAG(config_cmd->config_table[i].flags,
6255 MAC_CONFIGURATION_ENTRY_ACTION_TYPE,
6256 T_ETH_MAC_COMMAND_INVALIDATE);
6257
6258 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_SET_MAC, 0,
6259 U64_HI(config_cmd_map), U64_LO(config_cmd_map), 1);
Michael Chane665bfd2009-10-10 13:46:54 +00006260
6261 /* Wait for a completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006262 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending,
6263 ramrod_flags);
6264
Michael Chane665bfd2009-10-10 13:46:54 +00006265}
6266
Michael Chan993ac7b2009-10-10 13:46:56 +00006267#ifdef BCM_CNIC
6268/**
6269 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
6270 * MAC(s). This function will wait until the ramdord completion
6271 * returns.
6272 *
6273 * @param bp driver handle
6274 * @param set set or clear the CAM entry
6275 *
6276 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
6277 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006278int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
Michael Chan993ac7b2009-10-10 13:46:56 +00006279{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006280 u8 cam_offset = (CHIP_IS_E1(bp) ? ((BP_PORT(bp) ? 32 : 0) + 2) :
6281 bnx2x_e1h_cam_offset(bp, CAM_ISCSI_ETH_LINE));
6282 u32 iscsi_l2_cl_id = BNX2X_ISCSI_ETH_CL_ID;
6283 u32 cl_bit_vec = (1 << iscsi_l2_cl_id);
Michael Chan993ac7b2009-10-10 13:46:56 +00006284
6285 /* Send a SET_MAC ramrod */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006286 bnx2x_set_mac_addr_gen(bp, set, bp->iscsi_mac, cl_bit_vec,
6287 cam_offset, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00006288 return 0;
6289}
6290#endif
6291
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006292static void bnx2x_fill_cl_init_data(struct bnx2x *bp,
6293 struct bnx2x_client_init_params *params,
6294 u8 activate,
6295 struct client_init_ramrod_data *data)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006296{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006297 /* Clear the buffer */
6298 memset(data, 0, sizeof(*data));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006299
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006300 /* general */
6301 data->general.client_id = params->rxq_params.cl_id;
6302 data->general.statistics_counter_id = params->rxq_params.stat_id;
6303 data->general.statistics_en_flg =
6304 (params->rxq_params.flags & QUEUE_FLG_STATS) ? 1 : 0;
6305 data->general.activate_flg = activate;
6306 data->general.sp_client_id = params->rxq_params.spcl_id;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006307
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006308 /* Rx data */
6309 data->rx.tpa_en_flg =
6310 (params->rxq_params.flags & QUEUE_FLG_TPA) ? 1 : 0;
6311 data->rx.vmqueue_mode_en_flg = 0;
6312 data->rx.cache_line_alignment_log_size =
6313 params->rxq_params.cache_line_log;
6314 data->rx.enable_dynamic_hc =
6315 (params->rxq_params.flags & QUEUE_FLG_DHC) ? 1 : 0;
6316 data->rx.max_sges_for_packet = params->rxq_params.max_sges_pkt;
6317 data->rx.client_qzone_id = params->rxq_params.cl_qzone_id;
6318 data->rx.max_agg_size = params->rxq_params.tpa_agg_sz;
6319
6320 /* We don't set drop flags */
6321 data->rx.drop_ip_cs_err_flg = 0;
6322 data->rx.drop_tcp_cs_err_flg = 0;
6323 data->rx.drop_ttl0_flg = 0;
6324 data->rx.drop_udp_cs_err_flg = 0;
6325
6326 data->rx.inner_vlan_removal_enable_flg =
6327 (params->rxq_params.flags & QUEUE_FLG_VLAN) ? 1 : 0;
6328 data->rx.outer_vlan_removal_enable_flg =
6329 (params->rxq_params.flags & QUEUE_FLG_OV) ? 1 : 0;
6330 data->rx.status_block_id = params->rxq_params.fw_sb_id;
6331 data->rx.rx_sb_index_number = params->rxq_params.sb_cq_index;
6332 data->rx.bd_buff_size = cpu_to_le16(params->rxq_params.buf_sz);
6333 data->rx.sge_buff_size = cpu_to_le16(params->rxq_params.sge_buf_sz);
6334 data->rx.mtu = cpu_to_le16(params->rxq_params.mtu);
6335 data->rx.bd_page_base.lo =
6336 cpu_to_le32(U64_LO(params->rxq_params.dscr_map));
6337 data->rx.bd_page_base.hi =
6338 cpu_to_le32(U64_HI(params->rxq_params.dscr_map));
6339 data->rx.sge_page_base.lo =
6340 cpu_to_le32(U64_LO(params->rxq_params.sge_map));
6341 data->rx.sge_page_base.hi =
6342 cpu_to_le32(U64_HI(params->rxq_params.sge_map));
6343 data->rx.cqe_page_base.lo =
6344 cpu_to_le32(U64_LO(params->rxq_params.rcq_map));
6345 data->rx.cqe_page_base.hi =
6346 cpu_to_le32(U64_HI(params->rxq_params.rcq_map));
6347 data->rx.is_leading_rss =
6348 (params->ramrod_params.flags & CLIENT_IS_LEADING_RSS) ? 1 : 0;
6349 data->rx.is_approx_mcast = data->rx.is_leading_rss;
6350
6351 /* Tx data */
6352 data->tx.enforce_security_flg = 0; /* VF specific */
6353 data->tx.tx_status_block_id = params->txq_params.fw_sb_id;
6354 data->tx.tx_sb_index_number = params->txq_params.sb_cq_index;
6355 data->tx.mtu = 0; /* VF specific */
6356 data->tx.tx_bd_page_base.lo =
6357 cpu_to_le32(U64_LO(params->txq_params.dscr_map));
6358 data->tx.tx_bd_page_base.hi =
6359 cpu_to_le32(U64_HI(params->txq_params.dscr_map));
6360
6361 /* flow control data */
6362 data->fc.cqe_pause_thr_low = cpu_to_le16(params->pause.rcq_th_lo);
6363 data->fc.cqe_pause_thr_high = cpu_to_le16(params->pause.rcq_th_hi);
6364 data->fc.bd_pause_thr_low = cpu_to_le16(params->pause.bd_th_lo);
6365 data->fc.bd_pause_thr_high = cpu_to_le16(params->pause.bd_th_hi);
6366 data->fc.sge_pause_thr_low = cpu_to_le16(params->pause.sge_th_lo);
6367 data->fc.sge_pause_thr_high = cpu_to_le16(params->pause.sge_th_hi);
6368 data->fc.rx_cos_mask = cpu_to_le16(params->pause.pri_map);
6369
6370 data->fc.safc_group_num = params->txq_params.cos;
6371 data->fc.safc_group_en_flg =
6372 (params->txq_params.flags & QUEUE_FLG_COS) ? 1 : 0;
6373 data->fc.traffic_type = LLFC_TRAFFIC_TYPE_NW;
6374}
6375
6376static inline void bnx2x_set_ctx_validation(struct eth_context *cxt, u32 cid)
6377{
6378 /* ustorm cxt validation */
6379 cxt->ustorm_ag_context.cdu_usage =
6380 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_UCM_AG,
6381 ETH_CONNECTION_TYPE);
6382 /* xcontext validation */
6383 cxt->xstorm_ag_context.cdu_reserved =
6384 CDU_RSRVD_VALUE_TYPE_A(cid, CDU_REGION_NUMBER_XCM_AG,
6385 ETH_CONNECTION_TYPE);
6386}
6387
6388int bnx2x_setup_fw_client(struct bnx2x *bp,
6389 struct bnx2x_client_init_params *params,
6390 u8 activate,
6391 struct client_init_ramrod_data *data,
6392 dma_addr_t data_mapping)
6393{
6394 u16 hc_usec;
6395 int ramrod = RAMROD_CMD_ID_ETH_CLIENT_SETUP;
6396 int ramrod_flags = 0, rc;
6397
6398 /* HC and context validation values */
6399 hc_usec = params->txq_params.hc_rate ?
6400 1000000 / params->txq_params.hc_rate : 0;
6401 bnx2x_update_coalesce_sb_index(bp,
6402 params->txq_params.fw_sb_id,
6403 params->txq_params.sb_cq_index,
6404 !(params->txq_params.flags & QUEUE_FLG_HC),
6405 hc_usec);
6406
6407 *(params->ramrod_params.pstate) = BNX2X_FP_STATE_OPENING;
6408
6409 hc_usec = params->rxq_params.hc_rate ?
6410 1000000 / params->rxq_params.hc_rate : 0;
6411 bnx2x_update_coalesce_sb_index(bp,
6412 params->rxq_params.fw_sb_id,
6413 params->rxq_params.sb_cq_index,
6414 !(params->rxq_params.flags & QUEUE_FLG_HC),
6415 hc_usec);
6416
6417 bnx2x_set_ctx_validation(params->rxq_params.cxt,
6418 params->rxq_params.cid);
6419
6420 /* zero stats */
6421 if (params->txq_params.flags & QUEUE_FLG_STATS)
6422 storm_memset_xstats_zero(bp, BP_PORT(bp),
6423 params->txq_params.stat_id);
6424
6425 if (params->rxq_params.flags & QUEUE_FLG_STATS) {
6426 storm_memset_ustats_zero(bp, BP_PORT(bp),
6427 params->rxq_params.stat_id);
6428 storm_memset_tstats_zero(bp, BP_PORT(bp),
6429 params->rxq_params.stat_id);
6430 }
6431
6432 /* Fill the ramrod data */
6433 bnx2x_fill_cl_init_data(bp, params, activate, data);
6434
6435 /* SETUP ramrod.
6436 *
6437 * bnx2x_sp_post() takes a spin_lock thus no other explict memory
6438 * barrier except from mmiowb() is needed to impose a
6439 * proper ordering of memory operations.
6440 */
6441 mmiowb();
6442
6443
6444 bnx2x_sp_post(bp, ramrod, params->ramrod_params.cid,
6445 U64_HI(data_mapping), U64_LO(data_mapping), 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006446
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006447 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006448 rc = bnx2x_wait_ramrod(bp, params->ramrod_params.state,
6449 params->ramrod_params.index,
6450 params->ramrod_params.pstate,
6451 ramrod_flags);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006452 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006453}
6454
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006455/**
6456 * Configure interrupt mode according to current configuration.
6457 * In case of MSI-X it will also try to enable MSI-X.
6458 *
6459 * @param bp
6460 *
6461 * @return int
6462 */
6463static int __devinit bnx2x_set_int_mode(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006464{
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006465 int rc = 0;
Eilon Greensteinca003922009-08-12 22:53:28 -07006466
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006467 switch (bp->int_mode) {
6468 case INT_MODE_MSI:
6469 bnx2x_enable_msi(bp);
6470 /* falling through... */
6471 case INT_MODE_INTx:
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006472 bp->num_queues = 1;
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006473 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
Eilon Greensteinca003922009-08-12 22:53:28 -07006474 break;
Eilon Greensteinca003922009-08-12 22:53:28 -07006475 default:
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006476 /* Set number of queues according to bp->multi_mode value */
6477 bnx2x_set_num_queues(bp);
6478
6479 DP(NETIF_MSG_IFUP, "set number of queues to %d\n",
6480 bp->num_queues);
6481
6482 /* if we can't use MSI-X we only need one fp,
6483 * so try to enable MSI-X with the requested number of fp's
6484 * and fallback to MSI or legacy INTx with one fp
6485 */
6486 rc = bnx2x_enable_msix(bp);
6487 if (rc) {
6488 /* failed to enable MSI-X */
6489 if (bp->multi_mode)
6490 DP(NETIF_MSG_IFUP,
6491 "Multi requested but failed to "
6492 "enable MSI-X (%d), "
6493 "set number of queues to %d\n",
6494 bp->num_queues,
6495 1);
6496 bp->num_queues = 1;
6497
6498 if (!(bp->flags & DISABLE_MSI_FLAG))
6499 bnx2x_enable_msi(bp);
6500 }
6501
Eilon Greensteinca003922009-08-12 22:53:28 -07006502 break;
6503 }
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006504
6505 return rc;
Eilon Greensteinca003922009-08-12 22:53:28 -07006506}
6507
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00006508/* must be called prioir to any HW initializations */
6509static inline u16 bnx2x_cid_ilt_lines(struct bnx2x *bp)
6510{
6511 return L2_ILT_LINES(bp);
6512}
6513
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006514void bnx2x_ilt_set_info(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006515{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006516 struct ilt_client_info *ilt_client;
6517 struct bnx2x_ilt *ilt = BP_ILT(bp);
6518 u16 line = 0;
6519
6520 ilt->start_line = FUNC_ILT_BASE(BP_FUNC(bp));
6521 DP(BNX2X_MSG_SP, "ilt starts at line %d\n", ilt->start_line);
6522
6523 /* CDU */
6524 ilt_client = &ilt->clients[ILT_CLIENT_CDU];
6525 ilt_client->client_num = ILT_CLIENT_CDU;
6526 ilt_client->page_size = CDU_ILT_PAGE_SZ;
6527 ilt_client->flags = ILT_CLIENT_SKIP_MEM;
6528 ilt_client->start = line;
6529 line += L2_ILT_LINES(bp);
6530#ifdef BCM_CNIC
6531 line += CNIC_ILT_LINES;
6532#endif
6533 ilt_client->end = line - 1;
6534
6535 DP(BNX2X_MSG_SP, "ilt client[CDU]: start %d, end %d, psz 0x%x, "
6536 "flags 0x%x, hw psz %d\n",
6537 ilt_client->start,
6538 ilt_client->end,
6539 ilt_client->page_size,
6540 ilt_client->flags,
6541 ilog2(ilt_client->page_size >> 12));
6542
6543 /* QM */
6544 if (QM_INIT(bp->qm_cid_count)) {
6545 ilt_client = &ilt->clients[ILT_CLIENT_QM];
6546 ilt_client->client_num = ILT_CLIENT_QM;
6547 ilt_client->page_size = QM_ILT_PAGE_SZ;
6548 ilt_client->flags = 0;
6549 ilt_client->start = line;
6550
6551 /* 4 bytes for each cid */
6552 line += DIV_ROUND_UP(bp->qm_cid_count * QM_QUEUES_PER_FUNC * 4,
6553 QM_ILT_PAGE_SZ);
6554
6555 ilt_client->end = line - 1;
6556
6557 DP(BNX2X_MSG_SP, "ilt client[QM]: start %d, end %d, psz 0x%x, "
6558 "flags 0x%x, hw psz %d\n",
6559 ilt_client->start,
6560 ilt_client->end,
6561 ilt_client->page_size,
6562 ilt_client->flags,
6563 ilog2(ilt_client->page_size >> 12));
6564
6565 }
6566 /* SRC */
6567 ilt_client = &ilt->clients[ILT_CLIENT_SRC];
6568#ifdef BCM_CNIC
6569 ilt_client->client_num = ILT_CLIENT_SRC;
6570 ilt_client->page_size = SRC_ILT_PAGE_SZ;
6571 ilt_client->flags = 0;
6572 ilt_client->start = line;
6573 line += SRC_ILT_LINES;
6574 ilt_client->end = line - 1;
6575
6576 DP(BNX2X_MSG_SP, "ilt client[SRC]: start %d, end %d, psz 0x%x, "
6577 "flags 0x%x, hw psz %d\n",
6578 ilt_client->start,
6579 ilt_client->end,
6580 ilt_client->page_size,
6581 ilt_client->flags,
6582 ilog2(ilt_client->page_size >> 12));
6583
6584#else
6585 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6586#endif
6587
6588 /* TM */
6589 ilt_client = &ilt->clients[ILT_CLIENT_TM];
6590#ifdef BCM_CNIC
6591 ilt_client->client_num = ILT_CLIENT_TM;
6592 ilt_client->page_size = TM_ILT_PAGE_SZ;
6593 ilt_client->flags = 0;
6594 ilt_client->start = line;
6595 line += TM_ILT_LINES;
6596 ilt_client->end = line - 1;
6597
6598 DP(BNX2X_MSG_SP, "ilt client[TM]: start %d, end %d, psz 0x%x, "
6599 "flags 0x%x, hw psz %d\n",
6600 ilt_client->start,
6601 ilt_client->end,
6602 ilt_client->page_size,
6603 ilt_client->flags,
6604 ilog2(ilt_client->page_size >> 12));
6605
6606#else
6607 ilt_client->flags = (ILT_CLIENT_SKIP_INIT | ILT_CLIENT_SKIP_MEM);
6608#endif
6609}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00006610
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006611int bnx2x_setup_client(struct bnx2x *bp, struct bnx2x_fastpath *fp,
6612 int is_leading)
6613{
6614 struct bnx2x_client_init_params params = { {0} };
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006615 int rc;
6616
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006617 bnx2x_ack_sb(bp, fp->igu_sb_id, USTORM_ID, 0,
6618 IGU_INT_ENABLE, 0);
6619
6620 params.ramrod_params.pstate = &fp->state;
6621 params.ramrod_params.state = BNX2X_FP_STATE_OPEN;
6622 params.ramrod_params.index = fp->index;
6623 params.ramrod_params.cid = fp->cid;
6624
6625 if (is_leading)
6626 params.ramrod_params.flags |= CLIENT_IS_LEADING_RSS;
6627
6628 bnx2x_pf_rx_cl_prep(bp, fp, &params.pause, &params.rxq_params);
6629
6630 bnx2x_pf_tx_cl_prep(bp, fp, &params.txq_params);
6631
6632 rc = bnx2x_setup_fw_client(bp, &params, 1,
6633 bnx2x_sp(bp, client_init_data),
6634 bnx2x_sp_mapping(bp, client_init_data));
6635 return rc;
6636}
6637
6638int bnx2x_stop_fw_client(struct bnx2x *bp, struct bnx2x_client_ramrod_params *p)
6639{
6640 int rc;
6641
6642 int poll_flag = p->poll ? WAIT_RAMROD_POLL : 0;
6643
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006644 /* halt the connection */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006645 *p->pstate = BNX2X_FP_STATE_HALTING;
6646 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, p->cid, 0,
6647 p->cl_id, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006648
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006649 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006650 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, p->index,
6651 p->pstate, poll_flag);
Eliezer Tamirc14423f2008-02-28 11:49:42 -08006652 if (rc) /* timeout */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006653 return rc;
6654
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006655 *p->pstate = BNX2X_FP_STATE_TERMINATING;
6656 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_TERMINATE, p->cid, 0,
6657 p->cl_id, 0);
6658 /* Wait for completion */
6659 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_TERMINATED, p->index,
6660 p->pstate, poll_flag);
6661 if (rc) /* timeout */
6662 return rc;
6663
6664
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006665 /* delete cfc entry */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006666 bnx2x_sp_post(bp, RAMROD_CMD_ID_COMMON_CFC_DEL, p->cid, 0, 0, 1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006667
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006668 /* Wait for completion */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006669 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, p->index,
6670 p->pstate, WAIT_RAMROD_COMMON);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006671 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006672}
6673
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006674static int bnx2x_stop_client(struct bnx2x *bp, int index)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006675{
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006676 struct bnx2x_client_ramrod_params client_stop = {0};
6677 struct bnx2x_fastpath *fp = &bp->fp[index];
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006678
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006679 client_stop.index = index;
6680 client_stop.cid = fp->cid;
6681 client_stop.cl_id = fp->cl_id;
6682 client_stop.pstate = &(fp->state);
6683 client_stop.poll = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006684
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006685 return bnx2x_stop_fw_client(bp, &client_stop);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006686}
6687
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006688
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006689static void bnx2x_reset_func(struct bnx2x *bp)
6690{
6691 int port = BP_PORT(bp);
6692 int func = BP_FUNC(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006693 int i;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006694 int pfunc_offset_fp = offsetof(struct hc_sb_data, p_func) +
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006695 (CHIP_IS_E2(bp) ?
6696 offsetof(struct hc_status_block_data_e2, common) :
6697 offsetof(struct hc_status_block_data_e1x, common));
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006698 int pfunc_offset_sp = offsetof(struct hc_sp_status_block_data, p_func);
6699 int pfid_offset = offsetof(struct pci_entity, pf_id);
6700
6701 /* Disable the function in the FW */
6702 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNC_EN_OFFSET(func), 0);
6703 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNC_EN_OFFSET(func), 0);
6704 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNC_EN_OFFSET(func), 0);
6705 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNC_EN_OFFSET(func), 0);
6706
6707 /* FP SBs */
6708 for_each_queue(bp, i) {
6709 struct bnx2x_fastpath *fp = &bp->fp[i];
6710 REG_WR8(bp,
6711 BAR_CSTRORM_INTMEM +
6712 CSTORM_STATUS_BLOCK_DATA_OFFSET(fp->fw_sb_id)
6713 + pfunc_offset_fp + pfid_offset,
6714 HC_FUNCTION_DISABLED);
6715 }
6716
6717 /* SP SB */
6718 REG_WR8(bp,
6719 BAR_CSTRORM_INTMEM +
6720 CSTORM_SP_STATUS_BLOCK_DATA_OFFSET(func) +
6721 pfunc_offset_sp + pfid_offset,
6722 HC_FUNCTION_DISABLED);
6723
6724
6725 for (i = 0; i < XSTORM_SPQ_DATA_SIZE / 4; i++)
6726 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_DATA_OFFSET(func),
6727 0);
Eliezer Tamir49d66772008-02-28 11:53:13 -08006728
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006729 /* Configure IGU */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006730 if (bp->common.int_block == INT_BLOCK_HC) {
6731 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6732 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6733 } else {
6734 REG_WR(bp, IGU_REG_LEADING_EDGE_LATCH, 0);
6735 REG_WR(bp, IGU_REG_TRAILING_EDGE_LATCH, 0);
6736 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006737
Michael Chan37b091b2009-10-10 13:46:55 +00006738#ifdef BCM_CNIC
6739 /* Disable Timer scan */
6740 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
6741 /*
6742 * Wait for at least 10ms and up to 2 second for the timers scan to
6743 * complete
6744 */
6745 for (i = 0; i < 200; i++) {
6746 msleep(10);
6747 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
6748 break;
6749 }
6750#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006751 /* Clear ILT */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006752 bnx2x_clear_func_ilt(bp, func);
6753
6754 /* Timers workaround bug for E2: if this is vnic-3,
6755 * we need to set the entire ilt range for this timers.
6756 */
6757 if (CHIP_IS_E2(bp) && BP_VN(bp) == 3) {
6758 struct ilt_client_info ilt_cli;
6759 /* use dummy TM client */
6760 memset(&ilt_cli, 0, sizeof(struct ilt_client_info));
6761 ilt_cli.start = 0;
6762 ilt_cli.end = ILT_NUM_PAGE_ENTRIES - 1;
6763 ilt_cli.client_num = ILT_CLIENT_TM;
6764
6765 bnx2x_ilt_boundry_init_op(bp, &ilt_cli, 0, INITOP_CLEAR);
6766 }
6767
6768 /* this assumes that reset_port() called before reset_func()*/
6769 if (CHIP_IS_E2(bp))
6770 bnx2x_pf_disable(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006771
6772 bp->dmae_ready = 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006773}
6774
6775static void bnx2x_reset_port(struct bnx2x *bp)
6776{
6777 int port = BP_PORT(bp);
6778 u32 val;
6779
6780 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6781
6782 /* Do not rcv packets to BRB */
6783 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6784 /* Do not direct rcv packets that are not for MCP to the BRB */
6785 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6786 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6787
6788 /* Configure AEU */
6789 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6790
6791 msleep(100);
6792 /* Check for BRB port occupancy */
6793 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6794 if (val)
6795 DP(NETIF_MSG_IFDOWN,
Eilon Greenstein33471622008-08-13 15:59:08 -07006796 "BRB1 is not empty %d blocks are occupied\n", val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006797
6798 /* TODO: Close Doorbell port? */
6799}
6800
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006801static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6802{
6803 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006804 BP_ABS_FUNC(bp), reset_code);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006805
6806 switch (reset_code) {
6807 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6808 bnx2x_reset_port(bp);
6809 bnx2x_reset_func(bp);
6810 bnx2x_reset_common(bp);
6811 break;
6812
6813 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6814 bnx2x_reset_port(bp);
6815 bnx2x_reset_func(bp);
6816 break;
6817
6818 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6819 bnx2x_reset_func(bp);
6820 break;
6821
6822 default:
6823 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6824 break;
6825 }
6826}
6827
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006828void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006829{
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006830 int port = BP_PORT(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006831 u32 reset_code = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006832 int i, cnt, rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006833
Eilon Greenstein555f6c72009-02-12 08:36:11 +00006834 /* Wait until tx fastpath tasks complete */
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00006835 for_each_queue(bp, i) {
Eliezer Tamir228241e2008-02-28 11:56:57 -08006836 struct bnx2x_fastpath *fp = &bp->fp[i];
6837
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006838 cnt = 1000;
Vladislav Zolotarove8b5fc52009-01-26 12:36:42 -08006839 while (bnx2x_has_tx_work_unload(fp)) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006840
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006841 if (!cnt) {
6842 BNX2X_ERR("timeout waiting for queue[%d]\n",
6843 i);
6844#ifdef BNX2X_STOP_ON_ERROR
6845 bnx2x_panic();
6846 return -EBUSY;
6847#else
6848 break;
6849#endif
6850 }
6851 cnt--;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006852 msleep(1);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006853 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08006854 }
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006855 /* Give HW time to discard old tx messages */
6856 msleep(1);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006857
Yitchak Gertner65abd742008-08-25 15:26:24 -07006858 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006859 /* invalidate mc list,
6860 * wait and poll (interrupts are off)
6861 */
6862 bnx2x_invlidate_e1_mc_list(bp);
6863 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006864
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006865 } else {
Yitchak Gertner65abd742008-08-25 15:26:24 -07006866 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
6867
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006868 bnx2x_set_eth_mac(bp, 0);
Yitchak Gertner65abd742008-08-25 15:26:24 -07006869
6870 for (i = 0; i < MC_HASH_SIZE; i++)
6871 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
6872 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006873
Michael Chan993ac7b2009-10-10 13:46:56 +00006874#ifdef BCM_CNIC
6875 /* Clear iSCSI L2 MAC */
6876 mutex_lock(&bp->cnic_mutex);
6877 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
6878 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
6879 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
6880 }
6881 mutex_unlock(&bp->cnic_mutex);
6882#endif
Yitchak Gertner65abd742008-08-25 15:26:24 -07006883
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006884 if (unload_mode == UNLOAD_NORMAL)
6885 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006886
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00006887 else if (bp->flags & NO_WOL_FLAG)
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006888 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006889
Eilon Greenstein7d0446c2009-07-29 00:20:10 +00006890 else if (bp->wol) {
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006891 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006892 u8 *mac_addr = bp->dev->dev_addr;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006893 u32 val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006894 /* The mac address is written to entries 1-4 to
6895 preserve entry 0 which is used by the PMF */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006896 u8 entry = (BP_E1HVN(bp) + 1)*8;
6897
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006898 val = (mac_addr[0] << 8) | mac_addr[1];
Eilon Greenstein3196a882008-08-13 15:58:49 -07006899 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006900
6901 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
6902 (mac_addr[4] << 8) | mac_addr[5];
Eilon Greenstein3196a882008-08-13 15:58:49 -07006903 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006904
6905 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
Eliezer Tamir228241e2008-02-28 11:56:57 -08006906
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006907 } else
6908 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
6909
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006910 /* Close multi and leading connections
6911 Completions for ramrods are collected in a synchronous way */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006912 for_each_queue(bp, i)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006913
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006914 if (bnx2x_stop_client(bp, i))
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006915#ifdef BNX2X_STOP_ON_ERROR
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006916 return;
6917#else
6918 goto unload_error;
6919#endif
6920
6921 rc = bnx2x_func_stop(bp);
6922 if (rc) {
6923 BNX2X_ERR("Function stop failed!\n");
6924#ifdef BNX2X_STOP_ON_ERROR
6925 return;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07006926#else
6927 goto unload_error;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006928#endif
Eliezer Tamir228241e2008-02-28 11:56:57 -08006929 }
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006930#ifndef BNX2X_STOP_ON_ERROR
Eliezer Tamir228241e2008-02-28 11:56:57 -08006931unload_error:
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006932#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006933 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006934 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006935 else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006936 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts[%d] "
6937 "%d, %d, %d\n", BP_PATH(bp),
6938 load_count[BP_PATH(bp)][0],
6939 load_count[BP_PATH(bp)][1],
6940 load_count[BP_PATH(bp)][2]);
6941 load_count[BP_PATH(bp)][0]--;
6942 load_count[BP_PATH(bp)][1 + port]--;
6943 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts[%d] "
6944 "%d, %d, %d\n", BP_PATH(bp),
6945 load_count[BP_PATH(bp)][0], load_count[BP_PATH(bp)][1],
6946 load_count[BP_PATH(bp)][2]);
6947 if (load_count[BP_PATH(bp)][0] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006948 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00006949 else if (load_count[BP_PATH(bp)][1 + port] == 0)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006950 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
6951 else
6952 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
6953 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006954
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006955 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
6956 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
6957 bnx2x__link_reset(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006958
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006959 /* Disable HW interrupts, NAPI */
6960 bnx2x_netif_stop(bp, 1);
6961
6962 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00006963 bnx2x_free_irq(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00006964
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006965 /* Reset the chip */
Eliezer Tamir228241e2008-02-28 11:56:57 -08006966 bnx2x_reset_chip(bp, reset_code);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02006967
6968 /* Report UNLOAD_DONE to MCP */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07006969 if (!BP_NOMCP(bp))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00006970 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Eilon Greenstein356e2382009-02-12 08:38:32 +00006971
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006972}
6973
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00006974void bnx2x_disable_close_the_gate(struct bnx2x *bp)
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006975{
6976 u32 val;
6977
6978 DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
6979
6980 if (CHIP_IS_E1(bp)) {
6981 int port = BP_PORT(bp);
6982 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
6983 MISC_REG_AEU_MASK_ATTN_FUNC_0;
6984
6985 val = REG_RD(bp, addr);
6986 val &= ~(0x300);
6987 REG_WR(bp, addr, val);
6988 } else if (CHIP_IS_E1H(bp)) {
6989 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
6990 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
6991 MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
6992 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
6993 }
6994}
6995
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00006996/* Close gates #2, #3 and #4: */
6997static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
6998{
6999 u32 val, addr;
7000
7001 /* Gates #2 and #4a are closed/opened for "not E1" only */
7002 if (!CHIP_IS_E1(bp)) {
7003 /* #4 */
7004 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
7005 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
7006 close ? (val | 0x1) : (val & (~(u32)1)));
7007 /* #2 */
7008 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
7009 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
7010 close ? (val | 0x1) : (val & (~(u32)1)));
7011 }
7012
7013 /* #3 */
7014 addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
7015 val = REG_RD(bp, addr);
7016 REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
7017
7018 DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
7019 close ? "closing" : "opening");
7020 mmiowb();
7021}
7022
7023#define SHARED_MF_CLP_MAGIC 0x80000000 /* `magic' bit */
7024
7025static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
7026{
7027 /* Do some magic... */
7028 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7029 *magic_val = val & SHARED_MF_CLP_MAGIC;
7030 MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
7031}
7032
7033/* Restore the value of the `magic' bit.
7034 *
7035 * @param pdev Device handle.
7036 * @param magic_val Old value of the `magic' bit.
7037 */
7038static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
7039{
7040 /* Restore the `magic' bit value... */
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007041 u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
7042 MF_CFG_WR(bp, shared_mf_config.clp_mb,
7043 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
7044}
7045
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007046/**
7047 * Prepares for MCP reset: takes care of CLP configurations.
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007048 *
7049 * @param bp
7050 * @param magic_val Old value of 'magic' bit.
7051 */
7052static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
7053{
7054 u32 shmem;
7055 u32 validity_offset;
7056
7057 DP(NETIF_MSG_HW, "Starting\n");
7058
7059 /* Set `magic' bit in order to save MF config */
7060 if (!CHIP_IS_E1(bp))
7061 bnx2x_clp_reset_prep(bp, magic_val);
7062
7063 /* Get shmem offset */
7064 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7065 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7066
7067 /* Clear validity map flags */
7068 if (shmem > 0)
7069 REG_WR(bp, shmem + validity_offset, 0);
7070}
7071
7072#define MCP_TIMEOUT 5000 /* 5 seconds (in ms) */
7073#define MCP_ONE_TIMEOUT 100 /* 100 ms */
7074
7075/* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
7076 * depending on the HW type.
7077 *
7078 * @param bp
7079 */
7080static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
7081{
7082 /* special handling for emulation and FPGA,
7083 wait 10 times longer */
7084 if (CHIP_REV_IS_SLOW(bp))
7085 msleep(MCP_ONE_TIMEOUT*10);
7086 else
7087 msleep(MCP_ONE_TIMEOUT);
7088}
7089
7090static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
7091{
7092 u32 shmem, cnt, validity_offset, val;
7093 int rc = 0;
7094
7095 msleep(100);
7096
7097 /* Get shmem offset */
7098 shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7099 if (shmem == 0) {
7100 BNX2X_ERR("Shmem 0 return failure\n");
7101 rc = -ENOTTY;
7102 goto exit_lbl;
7103 }
7104
7105 validity_offset = offsetof(struct shmem_region, validity_map[0]);
7106
7107 /* Wait for MCP to come up */
7108 for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
7109 /* TBD: its best to check validity map of last port.
7110 * currently checks on port 0.
7111 */
7112 val = REG_RD(bp, shmem + validity_offset);
7113 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
7114 shmem + validity_offset, val);
7115
7116 /* check that shared memory is valid. */
7117 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7118 == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7119 break;
7120
7121 bnx2x_mcp_wait_one(bp);
7122 }
7123
7124 DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
7125
7126 /* Check that shared memory is valid. This indicates that MCP is up. */
7127 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
7128 (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
7129 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
7130 rc = -ENOTTY;
7131 goto exit_lbl;
7132 }
7133
7134exit_lbl:
7135 /* Restore the `magic' bit value */
7136 if (!CHIP_IS_E1(bp))
7137 bnx2x_clp_reset_done(bp, magic_val);
7138
7139 return rc;
7140}
7141
7142static void bnx2x_pxp_prep(struct bnx2x *bp)
7143{
7144 if (!CHIP_IS_E1(bp)) {
7145 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
7146 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
7147 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
7148 mmiowb();
7149 }
7150}
7151
7152/*
7153 * Reset the whole chip except for:
7154 * - PCIE core
7155 * - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
7156 * one reset bit)
7157 * - IGU
7158 * - MISC (including AEU)
7159 * - GRC
7160 * - RBCN, RBCP
7161 */
7162static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
7163{
7164 u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
7165
7166 not_reset_mask1 =
7167 MISC_REGISTERS_RESET_REG_1_RST_HC |
7168 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
7169 MISC_REGISTERS_RESET_REG_1_RST_PXP;
7170
7171 not_reset_mask2 =
7172 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
7173 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
7174 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
7175 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
7176 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
7177 MISC_REGISTERS_RESET_REG_2_RST_GRC |
7178 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
7179 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
7180
7181 reset_mask1 = 0xffffffff;
7182
7183 if (CHIP_IS_E1(bp))
7184 reset_mask2 = 0xffff;
7185 else
7186 reset_mask2 = 0x1ffff;
7187
7188 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7189 reset_mask1 & (~not_reset_mask1));
7190 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7191 reset_mask2 & (~not_reset_mask2));
7192
7193 barrier();
7194 mmiowb();
7195
7196 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
7197 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
7198 mmiowb();
7199}
7200
7201static int bnx2x_process_kill(struct bnx2x *bp)
7202{
7203 int cnt = 1000;
7204 u32 val = 0;
7205 u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
7206
7207
7208 /* Empty the Tetris buffer, wait for 1s */
7209 do {
7210 sr_cnt = REG_RD(bp, PXP2_REG_RD_SR_CNT);
7211 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
7212 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
7213 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
7214 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
7215 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
7216 ((port_is_idle_0 & 0x1) == 0x1) &&
7217 ((port_is_idle_1 & 0x1) == 0x1) &&
7218 (pgl_exp_rom2 == 0xffffffff))
7219 break;
7220 msleep(1);
7221 } while (cnt-- > 0);
7222
7223 if (cnt <= 0) {
7224 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
7225 " are still"
7226 " outstanding read requests after 1s!\n");
7227 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
7228 " port_is_idle_0=0x%08x,"
7229 " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
7230 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
7231 pgl_exp_rom2);
7232 return -EAGAIN;
7233 }
7234
7235 barrier();
7236
7237 /* Close gates #2, #3 and #4 */
7238 bnx2x_set_234_gates(bp, true);
7239
7240 /* TBD: Indicate that "process kill" is in progress to MCP */
7241
7242 /* Clear "unprepared" bit */
7243 REG_WR(bp, MISC_REG_UNPREPARED, 0);
7244 barrier();
7245
7246 /* Make sure all is written to the chip before the reset */
7247 mmiowb();
7248
7249 /* Wait for 1ms to empty GLUE and PCI-E core queues,
7250 * PSWHST, GRC and PSWRD Tetris buffer.
7251 */
7252 msleep(1);
7253
7254 /* Prepare to chip reset: */
7255 /* MCP */
7256 bnx2x_reset_mcp_prep(bp, &val);
7257
7258 /* PXP */
7259 bnx2x_pxp_prep(bp);
7260 barrier();
7261
7262 /* reset the chip */
7263 bnx2x_process_kill_chip_reset(bp);
7264 barrier();
7265
7266 /* Recover after reset: */
7267 /* MCP */
7268 if (bnx2x_reset_mcp_comp(bp, val))
7269 return -EAGAIN;
7270
7271 /* PXP */
7272 bnx2x_pxp_prep(bp);
7273
7274 /* Open the gates #2, #3 and #4 */
7275 bnx2x_set_234_gates(bp, false);
7276
7277 /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
7278 * reset state, re-enable attentions. */
7279
7280 return 0;
7281}
7282
7283static int bnx2x_leader_reset(struct bnx2x *bp)
7284{
7285 int rc = 0;
7286 /* Try to recover after the failure */
7287 if (bnx2x_process_kill(bp)) {
7288 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
7289 bp->dev->name);
7290 rc = -EAGAIN;
7291 goto exit_leader_reset;
7292 }
7293
7294 /* Clear "reset is in progress" bit and update the driver state */
7295 bnx2x_set_reset_done(bp);
7296 bp->recovery_state = BNX2X_RECOVERY_DONE;
7297
7298exit_leader_reset:
7299 bp->is_leader = 0;
7300 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
7301 smp_wmb();
7302 return rc;
7303}
7304
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007305/* Assumption: runs under rtnl lock. This together with the fact
7306 * that it's called only from bnx2x_reset_task() ensure that it
7307 * will never be called when netif_running(bp->dev) is false.
7308 */
7309static void bnx2x_parity_recover(struct bnx2x *bp)
7310{
7311 DP(NETIF_MSG_HW, "Handling parity\n");
7312 while (1) {
7313 switch (bp->recovery_state) {
7314 case BNX2X_RECOVERY_INIT:
7315 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
7316 /* Try to get a LEADER_LOCK HW lock */
7317 if (bnx2x_trylock_hw_lock(bp,
7318 HW_LOCK_RESOURCE_RESERVED_08))
7319 bp->is_leader = 1;
7320
7321 /* Stop the driver */
7322 /* If interface has been removed - break */
7323 if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
7324 return;
7325
7326 bp->recovery_state = BNX2X_RECOVERY_WAIT;
7327 /* Ensure "is_leader" and "recovery_state"
7328 * update values are seen on other CPUs
7329 */
7330 smp_wmb();
7331 break;
7332
7333 case BNX2X_RECOVERY_WAIT:
7334 DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
7335 if (bp->is_leader) {
7336 u32 load_counter = bnx2x_get_load_cnt(bp);
7337 if (load_counter) {
7338 /* Wait until all other functions get
7339 * down.
7340 */
7341 schedule_delayed_work(&bp->reset_task,
7342 HZ/10);
7343 return;
7344 } else {
7345 /* If all other functions got down -
7346 * try to bring the chip back to
7347 * normal. In any case it's an exit
7348 * point for a leader.
7349 */
7350 if (bnx2x_leader_reset(bp) ||
7351 bnx2x_nic_load(bp, LOAD_NORMAL)) {
7352 printk(KERN_ERR"%s: Recovery "
7353 "has failed. Power cycle is "
7354 "needed.\n", bp->dev->name);
7355 /* Disconnect this device */
7356 netif_device_detach(bp->dev);
7357 /* Block ifup for all function
7358 * of this ASIC until
7359 * "process kill" or power
7360 * cycle.
7361 */
7362 bnx2x_set_reset_in_progress(bp);
7363 /* Shut down the power */
7364 bnx2x_set_power_state(bp,
7365 PCI_D3hot);
7366 return;
7367 }
7368
7369 return;
7370 }
7371 } else { /* non-leader */
7372 if (!bnx2x_reset_is_done(bp)) {
7373 /* Try to get a LEADER_LOCK HW lock as
7374 * long as a former leader may have
7375 * been unloaded by the user or
7376 * released a leadership by another
7377 * reason.
7378 */
7379 if (bnx2x_trylock_hw_lock(bp,
7380 HW_LOCK_RESOURCE_RESERVED_08)) {
7381 /* I'm a leader now! Restart a
7382 * switch case.
7383 */
7384 bp->is_leader = 1;
7385 break;
7386 }
7387
7388 schedule_delayed_work(&bp->reset_task,
7389 HZ/10);
7390 return;
7391
7392 } else { /* A leader has completed
7393 * the "process kill". It's an exit
7394 * point for a non-leader.
7395 */
7396 bnx2x_nic_load(bp, LOAD_NORMAL);
7397 bp->recovery_state =
7398 BNX2X_RECOVERY_DONE;
7399 smp_wmb();
7400 return;
7401 }
7402 }
7403 default:
7404 return;
7405 }
7406 }
7407}
7408
7409/* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
7410 * scheduled on a general queue in order to prevent a dead lock.
7411 */
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007412static void bnx2x_reset_task(struct work_struct *work)
7413{
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007414 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007415
7416#ifdef BNX2X_STOP_ON_ERROR
7417 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7418 " so reset not done to allow debug dump,\n"
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007419 KERN_ERR " you will need to reboot when done\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007420 return;
7421#endif
7422
7423 rtnl_lock();
7424
7425 if (!netif_running(bp->dev))
7426 goto reset_task_exit;
7427
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00007428 if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
7429 bnx2x_parity_recover(bp);
7430 else {
7431 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7432 bnx2x_nic_load(bp, LOAD_NORMAL);
7433 }
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007434
7435reset_task_exit:
7436 rtnl_unlock();
7437}
7438
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007439/* end of nic load/unload */
7440
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007441/*
7442 * Init service functions
7443 */
7444
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007445u32 bnx2x_get_pretend_reg(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007446{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007447 u32 base = PXP2_REG_PGL_PRETEND_FUNC_F0;
7448 u32 stride = PXP2_REG_PGL_PRETEND_FUNC_F1 - base;
7449 return base + (BP_ABS_FUNC(bp)) * stride;
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007450}
7451
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007452static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007453{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007454 u32 reg = bnx2x_get_pretend_reg(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007455
7456 /* Flush all outstanding writes */
7457 mmiowb();
7458
7459 /* Pretend to be function 0 */
7460 REG_WR(bp, reg, 0);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007461 REG_RD(bp, reg); /* Flush the GRC transaction (in the chip) */
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007462
7463 /* From now we are in the "like-E1" mode */
7464 bnx2x_int_disable(bp);
7465
7466 /* Flush all outstanding writes */
7467 mmiowb();
7468
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007469 /* Restore the original function */
7470 REG_WR(bp, reg, BP_ABS_FUNC(bp));
7471 REG_RD(bp, reg);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007472}
7473
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007474static inline void bnx2x_undi_int_disable(struct bnx2x *bp)
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007475{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007476 if (CHIP_IS_E1(bp))
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007477 bnx2x_int_disable(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007478 else
7479 bnx2x_undi_int_disable_e1h(bp);
Eilon Greensteinf1ef27e2009-02-12 08:36:23 +00007480}
7481
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007482static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007483{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007484 u32 val;
7485
7486 /* Check if there is any driver already loaded */
7487 val = REG_RD(bp, MISC_REG_UNPREPARED);
7488 if (val == 0x1) {
7489 /* Check if it is the UNDI driver
7490 * UNDI driver initializes CID offset for normal bell to 0x7
7491 */
Yitchak Gertner4a37fb62008-08-13 15:50:23 -07007492 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007493 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7494 if (val == 0x7) {
7495 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007496 /* save our pf_num */
7497 int orig_pf_num = bp->pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007498 u32 swap_en;
7499 u32 swap_val;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007500
Eilon Greensteinb4661732009-01-14 06:43:56 +00007501 /* clear the UNDI indication */
7502 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7503
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007504 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7505
7506 /* try unload UNDI on port 0 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007507 bp->pf_num = 0;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007508 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007509 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007510 DRV_MSG_SEQ_NUMBER_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007511 reset_code = bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007512
7513 /* if UNDI is loaded on the other port */
7514 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7515
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007516 /* send "DONE" for previous unload */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007517 bnx2x_fw_command(bp,
7518 DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007519
7520 /* unload UNDI on port 1 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007521 bp->pf_num = 1;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007522 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007523 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007524 DRV_MSG_SEQ_NUMBER_MASK);
7525 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007526
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007527 bnx2x_fw_command(bp, reset_code, 0);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007528 }
7529
Eilon Greensteinb4661732009-01-14 06:43:56 +00007530 /* now it's safe to release the lock */
7531 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7532
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007533 bnx2x_undi_int_disable(bp);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007534
7535 /* close input traffic and wait for it */
7536 /* Do not rcv packets to BRB */
7537 REG_WR(bp,
7538 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7539 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7540 /* Do not direct rcv packets that are not for MCP to
7541 * the BRB */
7542 REG_WR(bp,
7543 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7544 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7545 /* clear AEU */
7546 REG_WR(bp,
7547 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7548 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7549 msleep(10);
7550
7551 /* save NIG port swap info */
7552 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7553 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007554 /* reset device */
7555 REG_WR(bp,
7556 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007557 0xd3ffffff);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007558 REG_WR(bp,
7559 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7560 0x1403);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007561 /* take the NIG out of reset and restore swap values */
7562 REG_WR(bp,
7563 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7564 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7565 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7566 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7567
7568 /* send unload done to the MCP */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007569 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007570
7571 /* restore our func and fw_seq */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007572 bp->pf_num = orig_pf_num;
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007573 bp->fw_seq =
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007574 (SHMEM_RD(bp, func_mb[bp->pf_num].drv_mb_header) &
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07007575 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greensteinb4661732009-01-14 06:43:56 +00007576 } else
7577 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007578 }
7579}
7580
7581static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7582{
7583 u32 val, val2, val3, val4, id;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007584 u16 pmc;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007585
7586 /* Get the chip revision id and number. */
7587 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7588 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7589 id = ((val & 0xffff) << 16);
7590 val = REG_RD(bp, MISC_REG_CHIP_REV);
7591 id |= ((val & 0xf) << 12);
7592 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7593 id |= ((val & 0xff) << 4);
Eilon Greenstein5a40e082009-01-14 06:44:04 +00007594 val = REG_RD(bp, MISC_REG_BOND_ID);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007595 id |= (val & 0xf);
7596 bp->common.chip_id = id;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007597
7598 /* Set doorbell size */
7599 bp->db_size = (1 << BNX2X_DB_SHIFT);
7600
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007601 if (CHIP_IS_E2(bp)) {
7602 val = REG_RD(bp, MISC_REG_PORT4MODE_EN_OVWR);
7603 if ((val & 1) == 0)
7604 val = REG_RD(bp, MISC_REG_PORT4MODE_EN);
7605 else
7606 val = (val >> 1) & 1;
7607 BNX2X_DEV_INFO("chip is in %s\n", val ? "4_PORT_MODE" :
7608 "2_PORT_MODE");
7609 bp->common.chip_port_mode = val ? CHIP_4_PORT_MODE :
7610 CHIP_2_PORT_MODE;
7611
7612 if (CHIP_MODE_IS_4_PORT(bp))
7613 bp->pfid = (bp->pf_num >> 1); /* 0..3 */
7614 else
7615 bp->pfid = (bp->pf_num & 0x6); /* 0, 2, 4, 6 */
7616 } else {
7617 bp->common.chip_port_mode = CHIP_PORT_MODE_NONE; /* N/A */
7618 bp->pfid = bp->pf_num; /* 0..7 */
7619 }
7620
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007621 /*
7622 * set base FW non-default (fast path) status block id, this value is
7623 * used to initialize the fw_sb_id saved on the fp/queue structure to
7624 * determine the id used by the FW.
7625 */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007626 if (CHIP_IS_E1x(bp))
7627 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E1x;
7628 else /* E2 */
7629 bp->base_fw_ndsb = BP_PORT(bp) * FP_SB_MAX_E2;
7630
7631 bp->link_params.chip_id = bp->common.chip_id;
7632 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00007633
Eilon Greenstein1c063282009-02-12 08:36:43 +00007634 val = (REG_RD(bp, 0x2874) & 0x55);
7635 if ((bp->common.chip_id & 0x1) ||
7636 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7637 bp->flags |= ONE_PORT_FLAG;
7638 BNX2X_DEV_INFO("single port device\n");
7639 }
7640
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007641 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7642 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7643 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7644 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7645 bp->common.flash_size, bp->common.flash_size);
7646
7647 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007648 bp->common.shmem2_base = REG_RD(bp, (BP_PATH(bp) ?
7649 MISC_REG_GENERIC_CR_1 :
7650 MISC_REG_GENERIC_CR_0));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007651 bp->link_params.shmem_base = bp->common.shmem_base;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007652 bp->link_params.shmem2_base = bp->common.shmem2_base;
Eilon Greenstein2691d512009-08-12 08:22:08 +00007653 BNX2X_DEV_INFO("shmem offset 0x%x shmem2 offset 0x%x\n",
7654 bp->common.shmem_base, bp->common.shmem2_base);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007655
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007656 if (!bp->common.shmem_base) {
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007657 BNX2X_DEV_INFO("MCP not active\n");
7658 bp->flags |= NO_MCP_FLAG;
7659 return;
7660 }
7661
7662 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7663 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7664 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007665 BNX2X_ERR("BAD MCP validity signature\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007666
7667 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
Eilon Greenstein35b19ba2009-02-12 08:36:47 +00007668 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007669
7670 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7671 SHARED_HW_CFG_LED_MODE_MASK) >>
7672 SHARED_HW_CFG_LED_MODE_SHIFT);
7673
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00007674 bp->link_params.feature_config_flags = 0;
7675 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7676 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7677 bp->link_params.feature_config_flags |=
7678 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7679 else
7680 bp->link_params.feature_config_flags &=
7681 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7682
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007683 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7684 bp->common.bc_ver = val;
7685 BNX2X_DEV_INFO("bc_ver %X\n", val);
7686 if (val < BNX2X_BC_VER) {
7687 /* for now only warn
7688 * later we might need to enforce this */
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007689 BNX2X_ERR("This driver needs bc_ver %X but found %X, "
7690 "please upgrade BC\n", BNX2X_BC_VER, val);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007691 }
Eilon Greenstein4d295db2009-07-21 05:47:47 +00007692 bp->link_params.feature_config_flags |=
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007693 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007694 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7695
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007696 bp->link_params.feature_config_flags |=
7697 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
7698 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
Eilon Greenstein72ce58c2008-08-13 15:52:46 -07007699
7700 if (BP_E1HVN(bp) == 0) {
7701 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7702 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7703 } else {
7704 /* no WOL capability for E1HVN != 0 */
7705 bp->flags |= NO_WOL_FLAG;
7706 }
7707 BNX2X_DEV_INFO("%sWoL capable\n",
Eilon Greensteinf5372252009-02-12 08:38:30 +00007708 (bp->flags & NO_WOL_FLAG) ? "not " : "");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007709
7710 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7711 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7712 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7713 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7714
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007715 dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
7716 val, val2, val3, val4);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007717}
7718
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00007719#define IGU_FID(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_FID)
7720#define IGU_VEC(val) GET_FIELD((val), IGU_REG_MAPPING_MEMORY_VECTOR)
7721
7722static void __devinit bnx2x_get_igu_cam_info(struct bnx2x *bp)
7723{
7724 int pfid = BP_FUNC(bp);
7725 int vn = BP_E1HVN(bp);
7726 int igu_sb_id;
7727 u32 val;
7728 u8 fid;
7729
7730 bp->igu_base_sb = 0xff;
7731 bp->igu_sb_cnt = 0;
7732 if (CHIP_INT_MODE_IS_BC(bp)) {
7733 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x,
7734 bp->l2_cid_count);
7735
7736 bp->igu_base_sb = (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn) *
7737 FP_SB_MAX_E1x;
7738
7739 bp->igu_dsb_id = E1HVN_MAX * FP_SB_MAX_E1x +
7740 (CHIP_MODE_IS_4_PORT(bp) ? pfid : vn);
7741
7742 return;
7743 }
7744
7745 /* IGU in normal mode - read CAM */
7746 for (igu_sb_id = 0; igu_sb_id < IGU_REG_MAPPING_MEMORY_SIZE;
7747 igu_sb_id++) {
7748 val = REG_RD(bp, IGU_REG_MAPPING_MEMORY + igu_sb_id * 4);
7749 if (!(val & IGU_REG_MAPPING_MEMORY_VALID))
7750 continue;
7751 fid = IGU_FID(val);
7752 if ((fid & IGU_FID_ENCODE_IS_PF)) {
7753 if ((fid & IGU_FID_PF_NUM_MASK) != pfid)
7754 continue;
7755 if (IGU_VEC(val) == 0)
7756 /* default status block */
7757 bp->igu_dsb_id = igu_sb_id;
7758 else {
7759 if (bp->igu_base_sb == 0xff)
7760 bp->igu_base_sb = igu_sb_id;
7761 bp->igu_sb_cnt++;
7762 }
7763 }
7764 }
7765 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt, bp->l2_cid_count);
7766 if (bp->igu_sb_cnt == 0)
7767 BNX2X_ERR("CAM configuration error\n");
7768}
7769
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007770static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7771 u32 switch_cfg)
7772{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007773 int cfg_size = 0, idx, port = BP_PORT(bp);
7774
7775 /* Aggregation of supported attributes of all external phys */
7776 bp->port.supported[0] = 0;
7777 bp->port.supported[1] = 0;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007778 switch (bp->link_params.num_phys) {
7779 case 1:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007780 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
7781 cfg_size = 1;
7782 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007783 case 2:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007784 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
7785 cfg_size = 1;
7786 break;
7787 case 3:
7788 if (bp->link_params.multi_phy_config &
7789 PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
7790 bp->port.supported[1] =
7791 bp->link_params.phy[EXT_PHY1].supported;
7792 bp->port.supported[0] =
7793 bp->link_params.phy[EXT_PHY2].supported;
7794 } else {
7795 bp->port.supported[0] =
7796 bp->link_params.phy[EXT_PHY1].supported;
7797 bp->port.supported[1] =
7798 bp->link_params.phy[EXT_PHY2].supported;
7799 }
7800 cfg_size = 2;
7801 break;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007802 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007803
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007804 if (!(bp->port.supported[0] || bp->port.supported[1])) {
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007805 BNX2X_ERR("NVRAM config error. BAD phy config."
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007806 "PHY1 config 0x%x, PHY2 config 0x%x\n",
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007807 SHMEM_RD(bp,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007808 dev_info.port_hw_config[port].external_phy_config),
7809 SHMEM_RD(bp,
7810 dev_info.port_hw_config[port].external_phy_config2));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007811 return;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007812 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007813
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00007814 switch (switch_cfg) {
7815 case SWITCH_CFG_1G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007816 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7817 port*0x10);
7818 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007819 break;
7820
7821 case SWITCH_CFG_10G:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007822 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7823 port*0x18);
7824 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007825 break;
7826
7827 default:
7828 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007829 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007830 return;
7831 }
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007832 /* mask what we support according to speed_cap_mask per configuration */
7833 for (idx = 0; idx < cfg_size; idx++) {
7834 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007835 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007836 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007837
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007838 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007839 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007840 bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007841
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007842 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007843 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007844 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007845
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007846 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007847 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007848 bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007849
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007850 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007851 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007852 bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007853 SUPPORTED_1000baseT_Full);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007854
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007855 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007856 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007857 bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007858
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007859 if (!(bp->link_params.speed_cap_mask[idx] &
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07007860 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007861 bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007862
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007863 }
7864
7865 BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
7866 bp->port.supported[1]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007867}
7868
Eilon Greenstein34f80b02008-06-23 20:33:01 -07007869static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007870{
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007871 u32 link_config, idx, cfg_size = 0;
7872 bp->port.advertising[0] = 0;
7873 bp->port.advertising[1] = 0;
7874 switch (bp->link_params.num_phys) {
7875 case 1:
7876 case 2:
7877 cfg_size = 1;
7878 break;
7879 case 3:
7880 cfg_size = 2;
7881 break;
7882 }
7883 for (idx = 0; idx < cfg_size; idx++) {
7884 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
7885 link_config = bp->port.link_config[idx];
7886 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007887 case PORT_FEATURE_LINK_SPEED_AUTO:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007888 if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
7889 bp->link_params.req_line_speed[idx] =
7890 SPEED_AUTO_NEG;
7891 bp->port.advertising[idx] |=
7892 bp->port.supported[idx];
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007893 } else {
7894 /* force 10G, no AN */
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007895 bp->link_params.req_line_speed[idx] =
7896 SPEED_10000;
7897 bp->port.advertising[idx] |=
7898 (ADVERTISED_10000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007899 ADVERTISED_FIBRE);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007900 continue;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007901 }
7902 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007903
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007904 case PORT_FEATURE_LINK_SPEED_10M_FULL:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007905 if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
7906 bp->link_params.req_line_speed[idx] =
7907 SPEED_10;
7908 bp->port.advertising[idx] |=
7909 (ADVERTISED_10baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007910 ADVERTISED_TP);
7911 } else {
7912 BNX2X_ERROR("NVRAM config error. "
7913 "Invalid link_config 0x%x"
7914 " speed_cap_mask 0x%x\n",
7915 link_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007916 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007917 return;
7918 }
7919 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007920
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007921 case PORT_FEATURE_LINK_SPEED_10M_HALF:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007922 if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
7923 bp->link_params.req_line_speed[idx] =
7924 SPEED_10;
7925 bp->link_params.req_duplex[idx] =
7926 DUPLEX_HALF;
7927 bp->port.advertising[idx] |=
7928 (ADVERTISED_10baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007929 ADVERTISED_TP);
7930 } else {
7931 BNX2X_ERROR("NVRAM config error. "
7932 "Invalid link_config 0x%x"
7933 " speed_cap_mask 0x%x\n",
7934 link_config,
7935 bp->link_params.speed_cap_mask[idx]);
7936 return;
7937 }
7938 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007939
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007940 case PORT_FEATURE_LINK_SPEED_100M_FULL:
7941 if (bp->port.supported[idx] &
7942 SUPPORTED_100baseT_Full) {
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007943 bp->link_params.req_line_speed[idx] =
7944 SPEED_100;
7945 bp->port.advertising[idx] |=
7946 (ADVERTISED_100baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007947 ADVERTISED_TP);
7948 } else {
7949 BNX2X_ERROR("NVRAM config error. "
7950 "Invalid link_config 0x%x"
7951 " speed_cap_mask 0x%x\n",
7952 link_config,
7953 bp->link_params.speed_cap_mask[idx]);
7954 return;
7955 }
7956 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007957
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007958 case PORT_FEATURE_LINK_SPEED_100M_HALF:
7959 if (bp->port.supported[idx] &
7960 SUPPORTED_100baseT_Half) {
7961 bp->link_params.req_line_speed[idx] =
7962 SPEED_100;
7963 bp->link_params.req_duplex[idx] =
7964 DUPLEX_HALF;
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007965 bp->port.advertising[idx] |=
7966 (ADVERTISED_100baseT_Half |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007967 ADVERTISED_TP);
7968 } else {
7969 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007970 "Invalid link_config 0x%x"
7971 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007972 link_config,
7973 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007974 return;
7975 }
7976 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007977
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007978 case PORT_FEATURE_LINK_SPEED_1G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007979 if (bp->port.supported[idx] &
7980 SUPPORTED_1000baseT_Full) {
7981 bp->link_params.req_line_speed[idx] =
7982 SPEED_1000;
7983 bp->port.advertising[idx] |=
7984 (ADVERTISED_1000baseT_Full |
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007985 ADVERTISED_TP);
7986 } else {
7987 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00007988 "Invalid link_config 0x%x"
7989 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007990 link_config,
7991 bp->link_params.speed_cap_mask[idx]);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007992 return;
7993 }
7994 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02007995
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00007996 case PORT_FEATURE_LINK_SPEED_2_5G:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00007997 if (bp->port.supported[idx] &
7998 SUPPORTED_2500baseX_Full) {
7999 bp->link_params.req_line_speed[idx] =
8000 SPEED_2500;
8001 bp->port.advertising[idx] |=
8002 (ADVERTISED_2500baseX_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008003 ADVERTISED_TP);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008004 } else {
8005 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008006 "Invalid link_config 0x%x"
8007 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008008 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008009 bp->link_params.speed_cap_mask[idx]);
8010 return;
8011 }
8012 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008013
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008014 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8015 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8016 case PORT_FEATURE_LINK_SPEED_10G_KR:
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008017 if (bp->port.supported[idx] &
8018 SUPPORTED_10000baseT_Full) {
8019 bp->link_params.req_line_speed[idx] =
8020 SPEED_10000;
8021 bp->port.advertising[idx] |=
8022 (ADVERTISED_10000baseT_Full |
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008023 ADVERTISED_FIBRE);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008024 } else {
8025 BNX2X_ERROR("NVRAM config error. "
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008026 "Invalid link_config 0x%x"
8027 " speed_cap_mask 0x%x\n",
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008028 link_config,
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008029 bp->link_params.speed_cap_mask[idx]);
8030 return;
8031 }
8032 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008033
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008034 default:
8035 BNX2X_ERROR("NVRAM config error. "
8036 "BAD link speed link_config 0x%x\n",
8037 link_config);
8038 bp->link_params.req_line_speed[idx] =
8039 SPEED_AUTO_NEG;
8040 bp->port.advertising[idx] =
8041 bp->port.supported[idx];
8042 break;
8043 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008044
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008045 bp->link_params.req_flow_ctrl[idx] = (link_config &
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008046 PORT_FEATURE_FLOW_CONTROL_MASK);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008047 if ((bp->link_params.req_flow_ctrl[idx] ==
8048 BNX2X_FLOW_CTRL_AUTO) &&
8049 !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
8050 bp->link_params.req_flow_ctrl[idx] =
8051 BNX2X_FLOW_CTRL_NONE;
8052 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008053
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008054 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl"
8055 " 0x%x advertising 0x%x\n",
8056 bp->link_params.req_line_speed[idx],
8057 bp->link_params.req_duplex[idx],
8058 bp->link_params.req_flow_ctrl[idx],
8059 bp->port.advertising[idx]);
8060 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008061}
8062
Michael Chane665bfd2009-10-10 13:46:54 +00008063static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8064{
8065 mac_hi = cpu_to_be16(mac_hi);
8066 mac_lo = cpu_to_be32(mac_lo);
8067 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8068 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8069}
8070
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008071static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008072{
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008073 int port = BP_PORT(bp);
8074 u32 val, val2;
Eilon Greenstein589abe32009-02-12 08:36:55 +00008075 u32 config;
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008076 u32 ext_phy_type, ext_phy_config;;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008077
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008078 bp->link_params.bp = bp;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008079 bp->link_params.port = port;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008080
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008081 bp->link_params.lane_config =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008082 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008083
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008084 bp->link_params.speed_cap_mask[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008085 SHMEM_RD(bp,
8086 dev_info.port_hw_config[port].speed_capability_mask);
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008087 bp->link_params.speed_cap_mask[1] =
8088 SHMEM_RD(bp,
8089 dev_info.port_hw_config[port].speed_capability_mask2);
8090 bp->port.link_config[0] =
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008091 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8092
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008093 bp->port.link_config[1] =
8094 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
Eilon Greensteinc2c8b032009-02-12 08:37:14 +00008095
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008096 bp->link_params.multi_phy_config =
8097 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008098 /* If the device is capable of WoL, set the default state according
8099 * to the HW
8100 */
Eilon Greenstein4d295db2009-07-21 05:47:47 +00008101 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
Eilon Greenstein3ce2c3f2009-02-12 08:37:52 +00008102 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8103 (config & PORT_FEATURE_WOL_ENABLED));
8104
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008105 BNX2X_DEV_INFO("lane_config 0x%08x "
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008106 "speed_cap_mask0 0x%08x link_config0 0x%08x\n",
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008107 bp->link_params.lane_config,
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008108 bp->link_params.speed_cap_mask[0],
8109 bp->port.link_config[0]);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008110
Yaniv Rosnera22f0782010-09-07 11:41:20 +00008111 bp->link_params.switch_cfg = (bp->port.link_config[0] &
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00008112 PORT_FEATURE_CONNECTED_SWITCH_MASK);
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008113 bnx2x_phy_probe(&bp->link_params);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008114 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008115
8116 bnx2x_link_settings_requested(bp);
8117
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008118 /*
8119 * If connected directly, work with the internal PHY, otherwise, work
8120 * with the external PHY
8121 */
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008122 ext_phy_config =
8123 SHMEM_RD(bp,
8124 dev_info.port_hw_config[port].external_phy_config);
8125 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008126 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008127 bp->mdio.prtad = bp->port.phy_addr;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008128
8129 else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
8130 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
8131 bp->mdio.prtad =
Yaniv Rosnerb7737c92010-09-07 11:40:54 +00008132 XGXS_EXT_PHY_ADDR(ext_phy_config);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008133
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008134 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8135 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
Michael Chane665bfd2009-10-10 13:46:54 +00008136 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008137 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8138 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
Michael Chan37b091b2009-10-10 13:46:55 +00008139
8140#ifdef BCM_CNIC
8141 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8142 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8143 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8144#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008145}
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008146
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008147static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8148{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008149 int func = BP_ABS_FUNC(bp);
8150 int vn;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008151 u32 val, val2;
8152 int rc = 0;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008153
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008154 bnx2x_get_common_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008155
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008156 if (CHIP_IS_E1x(bp)) {
8157 bp->common.int_block = INT_BLOCK_HC;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008158
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008159 bp->igu_dsb_id = DEF_SB_IGU_ID;
8160 bp->igu_base_sb = 0;
8161 bp->igu_sb_cnt = min_t(u8, FP_SB_MAX_E1x, bp->l2_cid_count);
8162 } else {
8163 bp->common.int_block = INT_BLOCK_IGU;
8164 val = REG_RD(bp, IGU_REG_BLOCK_CONFIGURATION);
8165 if (val & IGU_BLOCK_CONFIGURATION_REG_BACKWARD_COMP_EN) {
8166 DP(NETIF_MSG_PROBE, "IGU Backward Compatible Mode\n");
8167 bp->common.int_block |= INT_BLOCK_MODE_BW_COMP;
8168 } else
8169 DP(NETIF_MSG_PROBE, "IGU Normal Mode\n");
8170
8171 bnx2x_get_igu_cam_info(bp);
8172
8173 }
8174 DP(NETIF_MSG_PROBE, "igu_dsb_id %d igu_base_sb %d igu_sb_cnt %d\n",
8175 bp->igu_dsb_id, bp->igu_base_sb, bp->igu_sb_cnt);
8176
8177 /*
8178 * Initialize MF configuration
8179 */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008180
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008181 bp->mf_ov = 0;
8182 bp->mf_mode = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008183 vn = BP_E1HVN(bp);
8184 if (!CHIP_IS_E1(bp) && !BP_NOMCP(bp)) {
8185 if (SHMEM2_HAS(bp, mf_cfg_addr))
8186 bp->common.mf_cfg_base = SHMEM2_RD(bp, mf_cfg_addr);
8187 else
8188 bp->common.mf_cfg_base = bp->common.shmem_base +
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008189 offsetof(struct shmem_region, func_mb) +
8190 E1H_FUNC_MAX * sizeof(struct drv_func_mb);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008191 bp->mf_config[vn] =
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008192 MF_CFG_RD(bp, func_mf_config[func].config);
8193
8194 val = (MF_CFG_RD(bp, func_mf_config[FUNC_0].e1hov_tag) &
Eilon Greenstein3196a882008-08-13 15:58:49 -07008195 FUNC_MF_CFG_E1HOV_TAG_MASK);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008196 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008197 bp->mf_mode = 1;
Eilon Greenstein2691d512009-08-12 08:22:08 +00008198 BNX2X_DEV_INFO("%s function mode\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008199 IS_MF(bp) ? "multi" : "single");
Eilon Greenstein2691d512009-08-12 08:22:08 +00008200
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008201 if (IS_MF(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008202 val = (MF_CFG_RD(bp, func_mf_config[func].
Eilon Greenstein2691d512009-08-12 08:22:08 +00008203 e1hov_tag) &
8204 FUNC_MF_CFG_E1HOV_TAG_MASK);
8205 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008206 bp->mf_ov = val;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008207 BNX2X_DEV_INFO("MF OV for func %d is %d "
Eilon Greenstein2691d512009-08-12 08:22:08 +00008208 "(0x%04x)\n",
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008209 func, bp->mf_ov, bp->mf_ov);
Eilon Greenstein2691d512009-08-12 08:22:08 +00008210 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008211 BNX2X_ERROR("No valid MF OV for func %d,"
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008212 " aborting\n", func);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008213 rc = -EPERM;
8214 }
Eilon Greenstein2691d512009-08-12 08:22:08 +00008215 } else {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008216 if (BP_VN(bp)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008217 BNX2X_ERROR("VN %d in single function mode,"
8218 " aborting\n", BP_E1HVN(bp));
Eilon Greenstein2691d512009-08-12 08:22:08 +00008219 rc = -EPERM;
8220 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008221 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008222 }
8223
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008224 /* adjust igu_sb_cnt to MF for E1x */
8225 if (CHIP_IS_E1x(bp) && IS_MF(bp))
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008226 bp->igu_sb_cnt /= E1HVN_MAX;
8227
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008228 /*
8229 * adjust E2 sb count: to be removed when FW will support
8230 * more then 16 L2 clients
8231 */
8232#define MAX_L2_CLIENTS 16
8233 if (CHIP_IS_E2(bp))
8234 bp->igu_sb_cnt = min_t(u8, bp->igu_sb_cnt,
8235 MAX_L2_CLIENTS / (IS_MF(bp) ? 4 : 1));
8236
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008237 if (!BP_NOMCP(bp)) {
8238 bnx2x_get_port_hwinfo(bp);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008239
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008240 bp->fw_seq =
8241 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
8242 DRV_MSG_SEQ_NUMBER_MASK);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008243 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8244 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008245
Dmitry Kravkovfb3bff12010-10-06 03:26:40 +00008246 if (IS_MF(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008247 val2 = MF_CFG_RD(bp, func_mf_config[func].mac_upper);
8248 val = MF_CFG_RD(bp, func_mf_config[func].mac_lower);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008249 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8250 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8251 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8252 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8253 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8254 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8255 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8256 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8257 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8258 ETH_ALEN);
8259 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8260 ETH_ALEN);
8261 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008262
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008263 return rc;
8264 }
8265
8266 if (BP_NOMCP(bp)) {
8267 /* only supposed to happen on emulation/FPGA */
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008268 BNX2X_ERROR("warning: random MAC workaround active\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008269 random_ether_addr(bp->dev->dev_addr);
8270 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8271 }
8272
8273 return rc;
8274}
8275
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008276static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
8277{
8278 int cnt, i, block_end, rodi;
8279 char vpd_data[BNX2X_VPD_LEN+1];
8280 char str_id_reg[VENDOR_ID_LEN+1];
8281 char str_id_cap[VENDOR_ID_LEN+1];
8282 u8 len;
8283
8284 cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
8285 memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
8286
8287 if (cnt < BNX2X_VPD_LEN)
8288 goto out_not_found;
8289
8290 i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
8291 PCI_VPD_LRDT_RO_DATA);
8292 if (i < 0)
8293 goto out_not_found;
8294
8295
8296 block_end = i + PCI_VPD_LRDT_TAG_SIZE +
8297 pci_vpd_lrdt_size(&vpd_data[i]);
8298
8299 i += PCI_VPD_LRDT_TAG_SIZE;
8300
8301 if (block_end > BNX2X_VPD_LEN)
8302 goto out_not_found;
8303
8304 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8305 PCI_VPD_RO_KEYWORD_MFR_ID);
8306 if (rodi < 0)
8307 goto out_not_found;
8308
8309 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8310
8311 if (len != VENDOR_ID_LEN)
8312 goto out_not_found;
8313
8314 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8315
8316 /* vendor specific info */
8317 snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
8318 snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
8319 if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
8320 !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
8321
8322 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
8323 PCI_VPD_RO_KEYWORD_VENDOR0);
8324 if (rodi >= 0) {
8325 len = pci_vpd_info_field_size(&vpd_data[rodi]);
8326
8327 rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
8328
8329 if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
8330 memcpy(bp->fw_ver, &vpd_data[rodi], len);
8331 bp->fw_ver[len] = ' ';
8332 }
8333 }
8334 return;
8335 }
8336out_not_found:
8337 return;
8338}
8339
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008340static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8341{
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008342 int func;
Eilon Greenstein87942b42009-02-12 08:36:49 +00008343 int timer_interval;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008344 int rc;
8345
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008346 /* Disable interrupt handling until HW is initialized */
8347 atomic_set(&bp->intr_sem, 1);
Eilon Greensteine1510702009-07-21 05:47:41 +00008348 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
Vladislav Zolotarovda5a6622008-08-13 15:50:00 -07008349
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008350 mutex_init(&bp->port.phy_mutex);
Eilon Greensteinc4ff7cb2009-10-15 00:18:27 -07008351 mutex_init(&bp->fw_mb_mutex);
David S. Millerbb7e95c2010-07-27 21:01:35 -07008352 spin_lock_init(&bp->stats_lock);
Michael Chan993ac7b2009-10-10 13:46:56 +00008353#ifdef BCM_CNIC
8354 mutex_init(&bp->cnic_mutex);
8355#endif
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008356
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08008357 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008358 INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008359
8360 rc = bnx2x_get_hwinfo(bp);
8361
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008362 if (!rc)
8363 rc = bnx2x_alloc_mem_bp(bp);
8364
Vladislav Zolotarov34f24c72010-04-19 01:13:23 +00008365 bnx2x_read_fwinfo(bp);
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008366
8367 func = BP_FUNC(bp);
8368
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008369 /* need to reset chip if undi was active */
8370 if (!BP_NOMCP(bp))
8371 bnx2x_undi_unload(bp);
8372
8373 if (CHIP_REV_IS_FPGA(bp))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008374 dev_err(&bp->pdev->dev, "FPGA detected\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008375
8376 if (BP_NOMCP(bp) && (func == 0))
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008377 dev_err(&bp->pdev->dev, "MCP disabled, "
8378 "must load devices in order!\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008379
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008380 /* Set multi queue mode */
Eilon Greenstein8badd272009-02-12 08:36:15 +00008381 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8382 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008383 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
8384 "requested is not MSI-X\n");
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008385 multi_mode = ETH_RSS_MODE_DISABLED;
8386 }
8387 bp->multi_mode = multi_mode;
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008388 bp->int_mode = int_mode;
Eilon Greenstein555f6c72009-02-12 08:36:11 +00008389
Dmitry Kravkov4fd89b7a2010-04-01 19:45:34 -07008390 bp->dev->features |= NETIF_F_GRO;
8391
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008392 /* Set TPA flags */
8393 if (disable_tpa) {
8394 bp->flags &= ~TPA_ENABLE_FLAG;
8395 bp->dev->features &= ~NETIF_F_LRO;
8396 } else {
8397 bp->flags |= TPA_ENABLE_FLAG;
8398 bp->dev->features |= NETIF_F_LRO;
8399 }
Dmitry Kravkov5d7cd492010-07-27 12:32:19 +00008400 bp->disable_tpa = disable_tpa;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008401
Eilon Greensteina18f5122009-08-12 08:23:26 +00008402 if (CHIP_IS_E1(bp))
8403 bp->dropless_fc = 0;
8404 else
8405 bp->dropless_fc = dropless_fc;
8406
Eilon Greenstein8d5726c2009-02-12 08:37:19 +00008407 bp->mrrs = mrrs;
Vladislav Zolotarov7a9b2552008-06-23 20:34:36 -07008408
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008409 bp->tx_ring_size = MAX_TX_AVAIL;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008410
8411 bp->rx_csum = 1;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008412
Eilon Greenstein7d323bf2009-11-09 06:09:35 +00008413 /* make sure that the numbers are in the right granularity */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008414 bp->tx_ticks = (50 / BNX2X_BTR) * BNX2X_BTR;
8415 bp->rx_ticks = (25 / BNX2X_BTR) * BNX2X_BTR;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008416
Eilon Greenstein87942b42009-02-12 08:36:49 +00008417 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8418 bp->current_interval = (poll ? poll : timer_interval);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008419
8420 init_timer(&bp->timer);
8421 bp->timer.expires = jiffies + bp->current_interval;
8422 bp->timer.data = (unsigned long) bp;
8423 bp->timer.function = bnx2x_timer;
8424
8425 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008426}
8427
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008428
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008429/****************************************************************************
8430* General service functions
8431****************************************************************************/
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008432
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008433/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008434static int bnx2x_open(struct net_device *dev)
8435{
8436 struct bnx2x *bp = netdev_priv(dev);
8437
Eilon Greenstein6eccabb2009-01-22 03:37:48 +00008438 netif_carrier_off(dev);
8439
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008440 bnx2x_set_power_state(bp, PCI_D0);
8441
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008442 if (!bnx2x_reset_is_done(bp)) {
8443 do {
8444 /* Reset MCP mail box sequence if there is on going
8445 * recovery
8446 */
8447 bp->fw_seq = 0;
8448
8449 /* If it's the first function to load and reset done
8450 * is still not cleared it may mean that. We don't
8451 * check the attention state here because it may have
8452 * already been cleared by a "common" reset but we
8453 * shell proceed with "process kill" anyway.
8454 */
8455 if ((bnx2x_get_load_cnt(bp) == 0) &&
8456 bnx2x_trylock_hw_lock(bp,
8457 HW_LOCK_RESOURCE_RESERVED_08) &&
8458 (!bnx2x_leader_reset(bp))) {
8459 DP(NETIF_MSG_HW, "Recovered in open\n");
8460 break;
8461 }
8462
8463 bnx2x_set_power_state(bp, PCI_D3hot);
8464
8465 printk(KERN_ERR"%s: Recovery flow hasn't been properly"
8466 " completed yet. Try again later. If u still see this"
8467 " message after a few retries then power cycle is"
8468 " required.\n", bp->dev->name);
8469
8470 return -EAGAIN;
8471 } while (0);
8472 }
8473
8474 bp->recovery_state = BNX2X_RECOVERY_DONE;
8475
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008476 return bnx2x_nic_load(bp, LOAD_OPEN);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008477}
8478
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008479/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008480static int bnx2x_close(struct net_device *dev)
8481{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008482 struct bnx2x *bp = netdev_priv(dev);
8483
8484 /* Unload the driver, release IRQs */
Yitchak Gertnerbb2a0f72008-06-23 20:33:36 -07008485 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
Vladislav Zolotarovd3dbfee2010-04-19 01:14:49 +00008486 bnx2x_set_power_state(bp, PCI_D3hot);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008487
8488 return 0;
8489}
8490
Eilon Greensteinf5372252009-02-12 08:38:30 +00008491/* called with netif_tx_lock from dev_mcast.c */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00008492void bnx2x_set_rx_mode(struct net_device *dev)
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008493{
8494 struct bnx2x *bp = netdev_priv(dev);
8495 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
8496 int port = BP_PORT(bp);
8497
8498 if (bp->state != BNX2X_STATE_OPEN) {
8499 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
8500 return;
8501 }
8502
8503 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
8504
8505 if (dev->flags & IFF_PROMISC)
8506 rx_mode = BNX2X_RX_MODE_PROMISC;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008507 else if ((dev->flags & IFF_ALLMULTI) ||
Jiri Pirko4cd24ea2010-02-08 04:30:35 +00008508 ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
8509 CHIP_IS_E1(bp)))
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008510 rx_mode = BNX2X_RX_MODE_ALLMULTI;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008511 else { /* some multicasts */
8512 if (CHIP_IS_E1(bp)) {
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008513 /*
8514 * set mc list, do not wait as wait implies sleep
8515 * and set_rx_mode can be invoked from non-sleepable
8516 * context
8517 */
8518 u8 offset = (CHIP_REV_IS_SLOW(bp) ?
8519 BNX2X_MAX_EMUL_MULTI*(1 + port) :
8520 BNX2X_MAX_MULTICAST*(1 + port));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008521
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008522 bnx2x_set_e1_mc_list(bp, offset);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008523 } else { /* E1H */
8524 /* Accept one or more multicasts */
Jiri Pirko22bedad32010-04-01 21:22:57 +00008525 struct netdev_hw_addr *ha;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008526 u32 mc_filter[MC_HASH_SIZE];
8527 u32 crc, bit, regidx;
8528 int i;
8529
8530 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
8531
Jiri Pirko22bedad32010-04-01 21:22:57 +00008532 netdev_for_each_mc_addr(ha, dev) {
Johannes Berg7c510e42008-10-27 17:47:26 -07008533 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008534 bnx2x_mc_addr(ha));
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008535
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008536 crc = crc32c_le(0, bnx2x_mc_addr(ha),
8537 ETH_ALEN);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008538 bit = (crc >> 24) & 0xff;
8539 regidx = bit >> 5;
8540 bit &= 0x1f;
8541 mc_filter[regidx] |= (1 << bit);
8542 }
8543
8544 for (i = 0; i < MC_HASH_SIZE; i++)
8545 REG_WR(bp, MC_HASH_OFFSET(bp, i),
8546 mc_filter[i]);
8547 }
8548 }
8549
8550 bp->rx_mode = rx_mode;
8551 bnx2x_set_storm_rx_mode(bp);
8552}
8553
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008554/* called with rtnl_lock */
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008555static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
8556 int devad, u16 addr)
8557{
8558 struct bnx2x *bp = netdev_priv(netdev);
8559 u16 value;
8560 int rc;
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008561
8562 DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
8563 prtad, devad, addr);
8564
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008565 /* The HW expects different devad if CL22 is used */
8566 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8567
8568 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008569 rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008570 bnx2x_release_phy_lock(bp);
8571 DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
8572
8573 if (!rc)
8574 rc = value;
8575 return rc;
8576}
8577
8578/* called with rtnl_lock */
8579static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
8580 u16 addr, u16 value)
8581{
8582 struct bnx2x *bp = netdev_priv(netdev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008583 int rc;
8584
8585 DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
8586 " value 0x%x\n", prtad, devad, addr, value);
8587
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008588 /* The HW expects different devad if CL22 is used */
8589 devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
8590
8591 bnx2x_acquire_phy_lock(bp);
Yaniv Rosnere10bc842010-09-07 11:40:50 +00008592 rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008593 bnx2x_release_phy_lock(bp);
8594 return rc;
8595}
8596
8597/* called with rtnl_lock */
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008598static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
8599{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008600 struct bnx2x *bp = netdev_priv(dev);
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008601 struct mii_ioctl_data *mdio = if_mii(ifr);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008602
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008603 DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
8604 mdio->phy_id, mdio->reg_num, mdio->val_in);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008605
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008606 if (!netif_running(dev))
8607 return -EAGAIN;
Yaniv Rosnerc18487e2008-06-23 20:27:52 -07008608
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008609 return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008610}
8611
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008612#ifdef CONFIG_NET_POLL_CONTROLLER
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008613static void poll_bnx2x(struct net_device *dev)
8614{
8615 struct bnx2x *bp = netdev_priv(dev);
8616
8617 disable_irq(bp->pdev->irq);
8618 bnx2x_interrupt(bp->pdev->irq, dev);
8619 enable_irq(bp->pdev->irq);
8620}
8621#endif
8622
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008623static const struct net_device_ops bnx2x_netdev_ops = {
8624 .ndo_open = bnx2x_open,
8625 .ndo_stop = bnx2x_close,
8626 .ndo_start_xmit = bnx2x_start_xmit,
Eilon Greenstein356e2382009-02-12 08:38:32 +00008627 .ndo_set_multicast_list = bnx2x_set_rx_mode,
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008628 .ndo_set_mac_address = bnx2x_change_mac_addr,
8629 .ndo_validate_addr = eth_validate_addr,
8630 .ndo_do_ioctl = bnx2x_ioctl,
8631 .ndo_change_mtu = bnx2x_change_mtu,
8632 .ndo_tx_timeout = bnx2x_tx_timeout,
8633#ifdef BCM_VLAN
8634 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
8635#endif
Alexey Dobriyan257ddbd2010-01-27 10:17:41 +00008636#ifdef CONFIG_NET_POLL_CONTROLLER
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008637 .ndo_poll_controller = poll_bnx2x,
8638#endif
8639};
8640
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008641static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
8642 struct net_device *dev)
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008643{
8644 struct bnx2x *bp;
8645 int rc;
8646
8647 SET_NETDEV_DEV(dev, &pdev->dev);
8648 bp = netdev_priv(dev);
8649
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008650 bp->dev = dev;
8651 bp->pdev = pdev;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008652 bp->flags = 0;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008653 bp->pf_num = PCI_FUNC(pdev->devfn);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008654
8655 rc = pci_enable_device(pdev);
8656 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008657 dev_err(&bp->pdev->dev,
8658 "Cannot enable PCI device, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008659 goto err_out;
8660 }
8661
8662 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008663 dev_err(&bp->pdev->dev,
8664 "Cannot find PCI device base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008665 rc = -ENODEV;
8666 goto err_out_disable;
8667 }
8668
8669 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008670 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
8671 " base address, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008672 rc = -ENODEV;
8673 goto err_out_disable;
8674 }
8675
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008676 if (atomic_read(&pdev->enable_cnt) == 1) {
8677 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
8678 if (rc) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008679 dev_err(&bp->pdev->dev,
8680 "Cannot obtain PCI resources, aborting\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008681 goto err_out_disable;
8682 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008683
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008684 pci_set_master(pdev);
8685 pci_save_state(pdev);
8686 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008687
8688 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8689 if (bp->pm_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008690 dev_err(&bp->pdev->dev,
8691 "Cannot find power management capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008692 rc = -EIO;
8693 goto err_out_release;
8694 }
8695
8696 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
8697 if (bp->pcie_cap == 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008698 dev_err(&bp->pdev->dev,
8699 "Cannot find PCI Express capability, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008700 rc = -EIO;
8701 goto err_out_release;
8702 }
8703
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008704 if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008705 bp->flags |= USING_DAC_FLAG;
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008706 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008707 dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
8708 " failed, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008709 rc = -EIO;
8710 goto err_out_release;
8711 }
8712
FUJITA Tomonori1a983142010-04-04 01:51:03 +00008713 } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008714 dev_err(&bp->pdev->dev,
8715 "System does not support DMA, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008716 rc = -EIO;
8717 goto err_out_release;
8718 }
8719
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008720 dev->mem_start = pci_resource_start(pdev, 0);
8721 dev->base_addr = dev->mem_start;
8722 dev->mem_end = pci_resource_end(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008723
8724 dev->irq = pdev->irq;
8725
Arjan van de Ven275f1652008-10-20 21:42:39 -07008726 bp->regview = pci_ioremap_bar(pdev, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008727 if (!bp->regview) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008728 dev_err(&bp->pdev->dev,
8729 "Cannot map register space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008730 rc = -ENOMEM;
8731 goto err_out_release;
8732 }
8733
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008734 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008735 min_t(u64, BNX2X_DB_SIZE(bp),
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008736 pci_resource_len(pdev, 2)));
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008737 if (!bp->doorbells) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008738 dev_err(&bp->pdev->dev,
8739 "Cannot map doorbell space, aborting\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008740 rc = -ENOMEM;
8741 goto err_out_unmap;
8742 }
8743
8744 bnx2x_set_power_state(bp, PCI_D0);
8745
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008746 /* clean indirect addresses */
8747 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
8748 PCICFG_VENDOR_ID_OFFSET);
8749 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
8750 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
8751 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
8752 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008753
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00008754 /* Reset the load counter */
8755 bnx2x_clear_load_cnt(bp);
8756
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008757 dev->watchdog_timeo = TX_TIMEOUT;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008758
Stephen Hemmingerc64213c2008-11-21 17:36:04 -08008759 dev->netdev_ops = &bnx2x_netdev_ops;
Dmitry Kravkovde0c62d2010-07-27 12:35:24 +00008760 bnx2x_set_ethtool_ops(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008761 dev->features |= NETIF_F_SG;
8762 dev->features |= NETIF_F_HW_CSUM;
8763 if (bp->flags & USING_DAC_FLAG)
8764 dev->features |= NETIF_F_HIGHDMA;
Eilon Greenstein5316bc02009-07-21 05:47:43 +00008765 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8766 dev->features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008767#ifdef BCM_VLAN
8768 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
Eilon Greenstein0c6671b2009-01-14 21:26:51 -08008769 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
Eilon Greenstein5316bc02009-07-21 05:47:43 +00008770
8771 dev->vlan_features |= NETIF_F_SG;
8772 dev->vlan_features |= NETIF_F_HW_CSUM;
8773 if (bp->flags & USING_DAC_FLAG)
8774 dev->vlan_features |= NETIF_F_HIGHDMA;
8775 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
8776 dev->vlan_features |= NETIF_F_TSO6;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008777#endif
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008778
Eilon Greenstein01cd4522009-08-12 08:23:08 +00008779 /* get_port_hwinfo() will set prtad and mmds properly */
8780 bp->mdio.prtad = MDIO_PRTAD_NONE;
8781 bp->mdio.mmds = 0;
8782 bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
8783 bp->mdio.dev = dev;
8784 bp->mdio.mdio_read = bnx2x_mdio_read;
8785 bp->mdio.mdio_write = bnx2x_mdio_write;
8786
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008787 return 0;
8788
8789err_out_unmap:
8790 if (bp->regview) {
8791 iounmap(bp->regview);
8792 bp->regview = NULL;
8793 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008794 if (bp->doorbells) {
8795 iounmap(bp->doorbells);
8796 bp->doorbells = NULL;
8797 }
8798
8799err_out_release:
Eilon Greenstein34f80b02008-06-23 20:33:01 -07008800 if (atomic_read(&pdev->enable_cnt) == 1)
8801 pci_release_regions(pdev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02008802
8803err_out_disable:
8804 pci_disable_device(pdev);
8805 pci_set_drvdata(pdev, NULL);
8806
8807err_out:
8808 return rc;
8809}
8810
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008811static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
8812 int *width, int *speed)
Eliezer Tamir25047952008-02-28 11:50:16 -08008813{
8814 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
8815
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008816 *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
8817
8818 /* return value of 1=2.5GHz 2=5GHz */
8819 *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
Eliezer Tamir25047952008-02-28 11:50:16 -08008820}
8821
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008822static int bnx2x_check_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008823{
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008824 const struct firmware *firmware = bp->firmware;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008825 struct bnx2x_fw_file_hdr *fw_hdr;
8826 struct bnx2x_fw_file_section *sections;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008827 u32 offset, len, num_ops;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008828 u16 *ops_offsets;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008829 int i;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00008830 const u8 *fw_ver;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008831
8832 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
8833 return -EINVAL;
8834
8835 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
8836 sections = (struct bnx2x_fw_file_section *)fw_hdr;
8837
8838 /* Make sure none of the offsets and sizes make us read beyond
8839 * the end of the firmware data */
8840 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
8841 offset = be32_to_cpu(sections[i].offset);
8842 len = be32_to_cpu(sections[i].len);
8843 if (offset + len > firmware->size) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008844 dev_err(&bp->pdev->dev,
8845 "Section %d length is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008846 return -EINVAL;
8847 }
8848 }
8849
8850 /* Likewise for the init_ops offsets */
8851 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
8852 ops_offsets = (u16 *)(firmware->data + offset);
8853 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
8854
8855 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
8856 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008857 dev_err(&bp->pdev->dev,
8858 "Section offset %d is out of bounds\n", i);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008859 return -EINVAL;
8860 }
8861 }
8862
8863 /* Check FW version */
8864 offset = be32_to_cpu(fw_hdr->fw_version.offset);
8865 fw_ver = firmware->data + offset;
8866 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
8867 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
8868 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
8869 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008870 dev_err(&bp->pdev->dev,
8871 "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008872 fw_ver[0], fw_ver[1], fw_ver[2],
8873 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
8874 BCM_5710_FW_MINOR_VERSION,
8875 BCM_5710_FW_REVISION_VERSION,
8876 BCM_5710_FW_ENGINEERING_VERSION);
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008877 return -EINVAL;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008878 }
8879
8880 return 0;
8881}
8882
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008883static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008884{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008885 const __be32 *source = (const __be32 *)_source;
8886 u32 *target = (u32 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008887 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008888
8889 for (i = 0; i < n/4; i++)
8890 target[i] = be32_to_cpu(source[i]);
8891}
8892
8893/*
8894 Ops array is stored in the following format:
8895 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
8896 */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008897static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008898{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008899 const __be32 *source = (const __be32 *)_source;
8900 struct raw_op *target = (struct raw_op *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008901 u32 i, j, tmp;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008902
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008903 for (i = 0, j = 0; i < n/8; i++, j += 2) {
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008904 tmp = be32_to_cpu(source[j]);
8905 target[i].op = (tmp >> 24) & 0xff;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008906 target[i].offset = tmp & 0xffffff;
8907 target[i].raw_data = be32_to_cpu(source[j + 1]);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008908 }
8909}
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008910
Dmitry Kravkov523224a2010-10-06 03:23:26 +00008911/**
8912 * IRO array is stored in the following format:
8913 * {base(24bit), m1(16bit), m2(16bit), m3(16bit), size(16bit) }
8914 */
8915static inline void bnx2x_prep_iro(const u8 *_source, u8 *_target, u32 n)
8916{
8917 const __be32 *source = (const __be32 *)_source;
8918 struct iro *target = (struct iro *)_target;
8919 u32 i, j, tmp;
8920
8921 for (i = 0, j = 0; i < n/sizeof(struct iro); i++) {
8922 target[i].base = be32_to_cpu(source[j]);
8923 j++;
8924 tmp = be32_to_cpu(source[j]);
8925 target[i].m1 = (tmp >> 16) & 0xffff;
8926 target[i].m2 = tmp & 0xffff;
8927 j++;
8928 tmp = be32_to_cpu(source[j]);
8929 target[i].m3 = (tmp >> 16) & 0xffff;
8930 target[i].size = tmp & 0xffff;
8931 j++;
8932 }
8933}
8934
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008935static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008936{
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008937 const __be16 *source = (const __be16 *)_source;
8938 u16 *target = (u16 *)_target;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008939 u32 i;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008940
8941 for (i = 0; i < n/2; i++)
8942 target[i] = be16_to_cpu(source[i]);
8943}
8944
Joe Perches7995c642010-02-17 15:01:52 +00008945#define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
8946do { \
8947 u32 len = be32_to_cpu(fw_hdr->arr.len); \
8948 bp->arr = kmalloc(len, GFP_KERNEL); \
8949 if (!bp->arr) { \
8950 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
8951 goto lbl; \
8952 } \
8953 func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset), \
8954 (u8 *)bp->arr, len); \
8955} while (0)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008956
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008957int bnx2x_init_firmware(struct bnx2x *bp)
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008958{
Ben Hutchings45229b42009-11-07 11:53:39 +00008959 const char *fw_file_name;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008960 struct bnx2x_fw_file_hdr *fw_hdr;
Ben Hutchings45229b42009-11-07 11:53:39 +00008961 int rc;
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008962
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008963 if (CHIP_IS_E1(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00008964 fw_file_name = FW_FILE_NAME_E1;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008965 else if (CHIP_IS_E1H(bp))
Ben Hutchings45229b42009-11-07 11:53:39 +00008966 fw_file_name = FW_FILE_NAME_E1H;
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00008967 else if (CHIP_IS_E2(bp))
8968 fw_file_name = FW_FILE_NAME_E2;
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008969 else {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008970 BNX2X_ERR("Unsupported chip revision\n");
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00008971 return -EINVAL;
8972 }
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008973
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008974 BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008975
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008976 rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008977 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008978 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008979 goto request_firmware_exit;
8980 }
8981
8982 rc = bnx2x_check_firmware(bp);
8983 if (rc) {
Dmitry Kravkov6891dd22010-08-03 21:49:40 +00008984 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07008985 goto request_firmware_exit;
8986 }
8987
8988 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
8989
8990 /* Initialize the pointers to the init arrays */
8991 /* Blob */
8992 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
8993
8994 /* Opcodes */
8995 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
8996
8997 /* Offsets */
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00008998 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
8999 be16_to_cpu_n);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009000
9001 /* STORMs firmware */
Eilon Greenstein573f2032009-08-12 08:24:14 +00009002 INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9003 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
9004 INIT_TSEM_PRAM_DATA(bp) = bp->firmware->data +
9005 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
9006 INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9007 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
9008 INIT_USEM_PRAM_DATA(bp) = bp->firmware->data +
9009 be32_to_cpu(fw_hdr->usem_pram_data.offset);
9010 INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9011 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
9012 INIT_XSEM_PRAM_DATA(bp) = bp->firmware->data +
9013 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
9014 INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
9015 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
9016 INIT_CSEM_PRAM_DATA(bp) = bp->firmware->data +
9017 be32_to_cpu(fw_hdr->csem_pram_data.offset);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009018 /* IRO */
9019 BNX2X_ALLOC_AND_SET(iro_arr, iro_alloc_err, bnx2x_prep_iro);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009020
9021 return 0;
Eilon Greensteinab6ad5a2009-08-12 08:24:29 +00009022
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009023iro_alloc_err:
9024 kfree(bp->init_ops_offsets);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009025init_offsets_alloc_err:
9026 kfree(bp->init_ops);
9027init_ops_alloc_err:
9028 kfree(bp->init_data);
9029request_firmware_exit:
9030 release_firmware(bp->firmware);
9031
9032 return rc;
9033}
9034
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009035static inline int bnx2x_set_qm_cid_count(struct bnx2x *bp, int l2_cid_count)
9036{
9037 int cid_count = L2_FP_COUNT(l2_cid_count);
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009038
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009039#ifdef BCM_CNIC
9040 cid_count += CNIC_CID_MAX;
9041#endif
9042 return roundup(cid_count, QM_CID_ROUND);
9043}
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009044
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009045static int __devinit bnx2x_init_one(struct pci_dev *pdev,
9046 const struct pci_device_id *ent)
9047{
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009048 struct net_device *dev = NULL;
9049 struct bnx2x *bp;
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009050 int pcie_width, pcie_speed;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009051 int rc, cid_count;
9052
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009053 switch (ent->driver_data) {
9054 case BCM57710:
9055 case BCM57711:
9056 case BCM57711E:
9057 cid_count = FP_SB_MAX_E1x;
9058 break;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009059
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009060 case BCM57712:
9061 case BCM57712E:
9062 cid_count = FP_SB_MAX_E2;
9063 break;
9064
9065 default:
9066 pr_err("Unknown board_type (%ld), aborting\n",
9067 ent->driver_data);
9068 return ENODEV;
9069 }
9070
9071 cid_count += CNIC_CONTEXT_USE;
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009072
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009073 /* dev zeroed in init_etherdev */
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009074 dev = alloc_etherdev_mq(sizeof(*bp), cid_count);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009075 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009076 dev_err(&pdev->dev, "Cannot allocate net device\n");
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009077 return -ENOMEM;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009078 }
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009079
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009080 bp = netdev_priv(dev);
Joe Perches7995c642010-02-17 15:01:52 +00009081 bp->msg_enable = debug;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009082
Eilon Greensteindf4770de2009-08-12 08:23:28 +00009083 pci_set_drvdata(pdev, dev);
9084
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009085 bp->l2_cid_count = cid_count;
9086
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009087 rc = bnx2x_init_dev(pdev, dev);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009088 if (rc < 0) {
9089 free_netdev(dev);
9090 return rc;
9091 }
9092
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009093 rc = bnx2x_init_bp(bp);
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009094 if (rc)
9095 goto init_one_exit;
9096
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009097 /* calc qm_cid_count */
9098 bp->qm_cid_count = bnx2x_set_qm_cid_count(bp, cid_count);
9099
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009100 rc = register_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009101 if (rc) {
Eilon Greenstein693fc0d2009-01-14 06:43:52 +00009102 dev_err(&pdev->dev, "Cannot register net device\n");
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009103 goto init_one_exit;
9104 }
9105
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009106 /* Configure interupt mode: try to enable MSI-X/MSI if
9107 * needed, set bp->num_queues appropriately.
9108 */
9109 bnx2x_set_int_mode(bp);
9110
9111 /* Add all NAPI objects */
9112 bnx2x_add_all_napi(bp);
9113
Eilon Greenstein37f9ce62009-08-12 08:23:34 +00009114 bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009115
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009116 netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
9117 " IRQ %d, ", board_info[ent->driver_data].name,
9118 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009119 pcie_width,
9120 ((!CHIP_IS_E2(bp) && pcie_speed == 2) ||
9121 (CHIP_IS_E2(bp) && pcie_speed == 1)) ?
9122 "5GHz (Gen2)" : "2.5GHz",
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009123 dev->base_addr, bp->pdev->irq);
9124 pr_cont("node addr %pM\n", dev->dev_addr);
Eilon Greensteinc0162012009-03-02 08:01:05 +00009125
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009126 return 0;
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009127
9128init_one_exit:
9129 if (bp->regview)
9130 iounmap(bp->regview);
9131
9132 if (bp->doorbells)
9133 iounmap(bp->doorbells);
9134
9135 free_netdev(dev);
9136
9137 if (atomic_read(&pdev->enable_cnt) == 1)
9138 pci_release_regions(pdev);
9139
9140 pci_disable_device(pdev);
9141 pci_set_drvdata(pdev, NULL);
9142
9143 return rc;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009144}
9145
9146static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
9147{
9148 struct net_device *dev = pci_get_drvdata(pdev);
Eliezer Tamir228241e2008-02-28 11:56:57 -08009149 struct bnx2x *bp;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009150
Eliezer Tamir228241e2008-02-28 11:56:57 -08009151 if (!dev) {
Vladislav Zolotarovcdaa7cb2010-04-19 01:13:57 +00009152 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
Eliezer Tamir228241e2008-02-28 11:56:57 -08009153 return;
9154 }
Eliezer Tamir228241e2008-02-28 11:56:57 -08009155 bp = netdev_priv(dev);
9156
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009157 unregister_netdev(dev);
9158
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009159 /* Delete all NAPI objects */
9160 bnx2x_del_all_napi(bp);
9161
9162 /* Disable MSI/MSI-X */
9163 bnx2x_disable_msi(bp);
Dmitry Kravkovf85582f2010-10-06 03:34:21 +00009164
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009165 /* Make sure RESET task is not scheduled before continuing */
9166 cancel_delayed_work_sync(&bp->reset_task);
9167
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009168 if (bp->regview)
9169 iounmap(bp->regview);
9170
9171 if (bp->doorbells)
9172 iounmap(bp->doorbells);
9173
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009174 bnx2x_free_mem_bp(bp);
9175
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009176 free_netdev(dev);
Eilon Greenstein34f80b02008-06-23 20:33:01 -07009177
9178 if (atomic_read(&pdev->enable_cnt) == 1)
9179 pci_release_regions(pdev);
9180
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009181 pci_disable_device(pdev);
9182 pci_set_drvdata(pdev, NULL);
9183}
9184
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009185static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
9186{
9187 int i;
9188
9189 bp->state = BNX2X_STATE_ERROR;
9190
9191 bp->rx_mode = BNX2X_RX_MODE_NONE;
9192
9193 bnx2x_netif_stop(bp, 0);
Stanislaw Gruszkac89af1a2010-05-17 17:35:38 -07009194 netif_carrier_off(bp->dev);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009195
9196 del_timer_sync(&bp->timer);
9197 bp->stats_state = STATS_STATE_DISABLED;
9198 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
9199
9200 /* Release IRQs */
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009201 bnx2x_free_irq(bp);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009202
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009203 /* Free SKBs, SGEs, TPA pool and driver internals */
9204 bnx2x_free_skbs(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009205
Vladislav Zolotarov54b9dda2009-11-16 06:05:58 +00009206 for_each_queue(bp, i)
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009207 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
Dmitry Kravkovd6214d72010-10-06 03:32:10 +00009208
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009209 bnx2x_free_mem(bp);
9210
9211 bp->state = BNX2X_STATE_CLOSED;
9212
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009213 return 0;
9214}
9215
9216static void bnx2x_eeh_recover(struct bnx2x *bp)
9217{
9218 u32 val;
9219
9220 mutex_init(&bp->port.phy_mutex);
9221
9222 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
9223 bp->link_params.shmem_base = bp->common.shmem_base;
9224 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
9225
9226 if (!bp->common.shmem_base ||
9227 (bp->common.shmem_base < 0xA0000) ||
9228 (bp->common.shmem_base >= 0xC0000)) {
9229 BNX2X_DEV_INFO("MCP not active\n");
9230 bp->flags |= NO_MCP_FLAG;
9231 return;
9232 }
9233
9234 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
9235 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9236 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
9237 BNX2X_ERR("BAD MCP validity signature\n");
9238
9239 if (!BP_NOMCP(bp)) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009240 bp->fw_seq =
9241 (SHMEM_RD(bp, func_mb[BP_FW_MB_IDX(bp)].drv_mb_header) &
9242 DRV_MSG_SEQ_NUMBER_MASK);
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009243 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
9244 }
9245}
9246
Wendy Xiong493adb12008-06-23 20:36:22 -07009247/**
9248 * bnx2x_io_error_detected - called when PCI error is detected
9249 * @pdev: Pointer to PCI device
9250 * @state: The current pci connection state
9251 *
9252 * This function is called after a PCI bus error affecting
9253 * this device has been detected.
9254 */
9255static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
9256 pci_channel_state_t state)
9257{
9258 struct net_device *dev = pci_get_drvdata(pdev);
9259 struct bnx2x *bp = netdev_priv(dev);
9260
9261 rtnl_lock();
9262
9263 netif_device_detach(dev);
9264
Dean Nelson07ce50e2009-07-31 09:13:25 +00009265 if (state == pci_channel_io_perm_failure) {
9266 rtnl_unlock();
9267 return PCI_ERS_RESULT_DISCONNECT;
9268 }
9269
Wendy Xiong493adb12008-06-23 20:36:22 -07009270 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009271 bnx2x_eeh_nic_unload(bp);
Wendy Xiong493adb12008-06-23 20:36:22 -07009272
9273 pci_disable_device(pdev);
9274
9275 rtnl_unlock();
9276
9277 /* Request a slot reset */
9278 return PCI_ERS_RESULT_NEED_RESET;
9279}
9280
9281/**
9282 * bnx2x_io_slot_reset - called after the PCI bus has been reset
9283 * @pdev: Pointer to PCI device
9284 *
9285 * Restart the card from scratch, as if from a cold-boot.
9286 */
9287static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
9288{
9289 struct net_device *dev = pci_get_drvdata(pdev);
9290 struct bnx2x *bp = netdev_priv(dev);
9291
9292 rtnl_lock();
9293
9294 if (pci_enable_device(pdev)) {
9295 dev_err(&pdev->dev,
9296 "Cannot re-enable PCI device after reset\n");
9297 rtnl_unlock();
9298 return PCI_ERS_RESULT_DISCONNECT;
9299 }
9300
9301 pci_set_master(pdev);
9302 pci_restore_state(pdev);
9303
9304 if (netif_running(dev))
9305 bnx2x_set_power_state(bp, PCI_D0);
9306
9307 rtnl_unlock();
9308
9309 return PCI_ERS_RESULT_RECOVERED;
9310}
9311
9312/**
9313 * bnx2x_io_resume - called when traffic can start flowing again
9314 * @pdev: Pointer to PCI device
9315 *
9316 * This callback is called when the error recovery driver tells us that
9317 * its OK to resume normal operation.
9318 */
9319static void bnx2x_io_resume(struct pci_dev *pdev)
9320{
9321 struct net_device *dev = pci_get_drvdata(pdev);
9322 struct bnx2x *bp = netdev_priv(dev);
9323
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009324 if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009325 printk(KERN_ERR "Handling parity error recovery. "
9326 "Try again later\n");
Vladislav Zolotarov72fd0712010-04-19 01:13:12 +00009327 return;
9328 }
9329
Wendy Xiong493adb12008-06-23 20:36:22 -07009330 rtnl_lock();
9331
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009332 bnx2x_eeh_recover(bp);
9333
Wendy Xiong493adb12008-06-23 20:36:22 -07009334 if (netif_running(dev))
Yitchak Gertnerf8ef6e42008-09-09 05:07:25 -07009335 bnx2x_nic_load(bp, LOAD_NORMAL);
Wendy Xiong493adb12008-06-23 20:36:22 -07009336
9337 netif_device_attach(dev);
9338
9339 rtnl_unlock();
9340}
9341
9342static struct pci_error_handlers bnx2x_err_handler = {
9343 .error_detected = bnx2x_io_error_detected,
Eilon Greenstein356e2382009-02-12 08:38:32 +00009344 .slot_reset = bnx2x_io_slot_reset,
9345 .resume = bnx2x_io_resume,
Wendy Xiong493adb12008-06-23 20:36:22 -07009346};
9347
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009348static struct pci_driver bnx2x_pci_driver = {
Wendy Xiong493adb12008-06-23 20:36:22 -07009349 .name = DRV_MODULE_NAME,
9350 .id_table = bnx2x_pci_tbl,
9351 .probe = bnx2x_init_one,
9352 .remove = __devexit_p(bnx2x_remove_one),
9353 .suspend = bnx2x_suspend,
9354 .resume = bnx2x_resume,
9355 .err_handler = &bnx2x_err_handler,
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009356};
9357
9358static int __init bnx2x_init(void)
9359{
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009360 int ret;
9361
Joe Perches7995c642010-02-17 15:01:52 +00009362 pr_info("%s", version);
Eilon Greenstein938cf542009-08-12 08:23:37 +00009363
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009364 bnx2x_wq = create_singlethread_workqueue("bnx2x");
9365 if (bnx2x_wq == NULL) {
Joe Perches7995c642010-02-17 15:01:52 +00009366 pr_err("Cannot create workqueue\n");
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009367 return -ENOMEM;
9368 }
9369
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009370 ret = pci_register_driver(&bnx2x_pci_driver);
9371 if (ret) {
Joe Perches7995c642010-02-17 15:01:52 +00009372 pr_err("Cannot register driver\n");
Stanislaw Gruszkadd21ca62009-05-05 23:22:01 +00009373 destroy_workqueue(bnx2x_wq);
9374 }
9375 return ret;
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009376}
9377
9378static void __exit bnx2x_cleanup(void)
9379{
9380 pci_unregister_driver(&bnx2x_pci_driver);
Eilon Greenstein1cf167f2009-01-14 21:22:18 -08009381
9382 destroy_workqueue(bnx2x_wq);
Eliezer Tamira2fbb9e2007-11-15 20:09:02 +02009383}
9384
9385module_init(bnx2x_init);
9386module_exit(bnx2x_cleanup);
9387
Michael Chan993ac7b2009-10-10 13:46:56 +00009388#ifdef BCM_CNIC
9389
9390/* count denotes the number of new completions we have seen */
9391static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
9392{
9393 struct eth_spe *spe;
9394
9395#ifdef BNX2X_STOP_ON_ERROR
9396 if (unlikely(bp->panic))
9397 return;
9398#endif
9399
9400 spin_lock_bh(&bp->spq_lock);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009401 BUG_ON(bp->cnic_spq_pending < count);
Michael Chan993ac7b2009-10-10 13:46:56 +00009402 bp->cnic_spq_pending -= count;
9403
Michael Chan993ac7b2009-10-10 13:46:56 +00009404
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009405 for (; bp->cnic_kwq_pending; bp->cnic_kwq_pending--) {
9406 u16 type = (le16_to_cpu(bp->cnic_kwq_cons->hdr.type)
9407 & SPE_HDR_CONN_TYPE) >>
9408 SPE_HDR_CONN_TYPE_SHIFT;
9409
9410 /* Set validation for iSCSI L2 client before sending SETUP
9411 * ramrod
9412 */
9413 if (type == ETH_CONNECTION_TYPE) {
9414 u8 cmd = (le32_to_cpu(bp->cnic_kwq_cons->
9415 hdr.conn_and_cmd_data) >>
9416 SPE_HDR_CMD_ID_SHIFT) & 0xff;
9417
9418 if (cmd == RAMROD_CMD_ID_ETH_CLIENT_SETUP)
9419 bnx2x_set_ctx_validation(&bp->context.
9420 vcxt[BNX2X_ISCSI_ETH_CID].eth,
9421 HW_CID(bp, BNX2X_ISCSI_ETH_CID));
9422 }
9423
9424 /* There may be not more than 8 L2 and COMMON SPEs and not more
9425 * than 8 L5 SPEs in the air.
9426 */
9427 if ((type == NONE_CONNECTION_TYPE) ||
9428 (type == ETH_CONNECTION_TYPE)) {
9429 if (!atomic_read(&bp->spq_left))
9430 break;
9431 else
9432 atomic_dec(&bp->spq_left);
9433 } else if (type == ISCSI_CONNECTION_TYPE) {
9434 if (bp->cnic_spq_pending >=
9435 bp->cnic_eth_dev.max_kwqe_pending)
9436 break;
9437 else
9438 bp->cnic_spq_pending++;
9439 } else {
9440 BNX2X_ERR("Unknown SPE type: %d\n", type);
9441 bnx2x_panic();
Michael Chan993ac7b2009-10-10 13:46:56 +00009442 break;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009443 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009444
9445 spe = bnx2x_sp_get_next(bp);
9446 *spe = *bp->cnic_kwq_cons;
9447
Michael Chan993ac7b2009-10-10 13:46:56 +00009448 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
9449 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
9450
9451 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
9452 bp->cnic_kwq_cons = bp->cnic_kwq;
9453 else
9454 bp->cnic_kwq_cons++;
9455 }
9456 bnx2x_sp_prod_update(bp);
9457 spin_unlock_bh(&bp->spq_lock);
9458}
9459
9460static int bnx2x_cnic_sp_queue(struct net_device *dev,
9461 struct kwqe_16 *kwqes[], u32 count)
9462{
9463 struct bnx2x *bp = netdev_priv(dev);
9464 int i;
9465
9466#ifdef BNX2X_STOP_ON_ERROR
9467 if (unlikely(bp->panic))
9468 return -EIO;
9469#endif
9470
9471 spin_lock_bh(&bp->spq_lock);
9472
9473 for (i = 0; i < count; i++) {
9474 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
9475
9476 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
9477 break;
9478
9479 *bp->cnic_kwq_prod = *spe;
9480
9481 bp->cnic_kwq_pending++;
9482
9483 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
9484 spe->hdr.conn_and_cmd_data, spe->hdr.type,
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009485 spe->data.update_data_addr.hi,
9486 spe->data.update_data_addr.lo,
Michael Chan993ac7b2009-10-10 13:46:56 +00009487 bp->cnic_kwq_pending);
9488
9489 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
9490 bp->cnic_kwq_prod = bp->cnic_kwq;
9491 else
9492 bp->cnic_kwq_prod++;
9493 }
9494
9495 spin_unlock_bh(&bp->spq_lock);
9496
9497 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
9498 bnx2x_cnic_sp_post(bp, 0);
9499
9500 return i;
9501}
9502
9503static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9504{
9505 struct cnic_ops *c_ops;
9506 int rc = 0;
9507
9508 mutex_lock(&bp->cnic_mutex);
9509 c_ops = bp->cnic_ops;
9510 if (c_ops)
9511 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9512 mutex_unlock(&bp->cnic_mutex);
9513
9514 return rc;
9515}
9516
9517static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
9518{
9519 struct cnic_ops *c_ops;
9520 int rc = 0;
9521
9522 rcu_read_lock();
9523 c_ops = rcu_dereference(bp->cnic_ops);
9524 if (c_ops)
9525 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
9526 rcu_read_unlock();
9527
9528 return rc;
9529}
9530
9531/*
9532 * for commands that have no data
9533 */
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009534int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
Michael Chan993ac7b2009-10-10 13:46:56 +00009535{
9536 struct cnic_ctl_info ctl = {0};
9537
9538 ctl.cmd = cmd;
9539
9540 return bnx2x_cnic_ctl_send(bp, &ctl);
9541}
9542
9543static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
9544{
9545 struct cnic_ctl_info ctl;
9546
9547 /* first we tell CNIC and only then we count this as a completion */
9548 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
9549 ctl.data.comp.cid = cid;
9550
9551 bnx2x_cnic_ctl_send_bh(bp, &ctl);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009552 bnx2x_cnic_sp_post(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009553}
9554
9555static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
9556{
9557 struct bnx2x *bp = netdev_priv(dev);
9558 int rc = 0;
9559
9560 switch (ctl->cmd) {
9561 case DRV_CTL_CTXTBL_WR_CMD: {
9562 u32 index = ctl->data.io.offset;
9563 dma_addr_t addr = ctl->data.io.dma_addr;
9564
9565 bnx2x_ilt_wr(bp, index, addr);
9566 break;
9567 }
9568
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009569 case DRV_CTL_RET_L5_SPQ_CREDIT_CMD: {
9570 int count = ctl->data.credit.credit_count;
Michael Chan993ac7b2009-10-10 13:46:56 +00009571
9572 bnx2x_cnic_sp_post(bp, count);
9573 break;
9574 }
9575
9576 /* rtnl_lock is held. */
9577 case DRV_CTL_START_L2_CMD: {
9578 u32 cli = ctl->data.ring.client_id;
9579
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009580 /* Set iSCSI MAC address */
9581 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
9582
9583 mmiowb();
9584 barrier();
9585
9586 /* Start accepting on iSCSI L2 ring. Accept all multicasts
9587 * because it's the only way for UIO Client to accept
9588 * multicasts (in non-promiscuous mode only one Client per
9589 * function will receive multicast packets (leading in our
9590 * case).
9591 */
9592 bnx2x_rxq_set_mac_filters(bp, cli,
9593 BNX2X_ACCEPT_UNICAST |
9594 BNX2X_ACCEPT_BROADCAST |
9595 BNX2X_ACCEPT_ALL_MULTICAST);
9596 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9597
Michael Chan993ac7b2009-10-10 13:46:56 +00009598 break;
9599 }
9600
9601 /* rtnl_lock is held. */
9602 case DRV_CTL_STOP_L2_CMD: {
9603 u32 cli = ctl->data.ring.client_id;
9604
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009605 /* Stop accepting on iSCSI L2 ring */
9606 bnx2x_rxq_set_mac_filters(bp, cli, BNX2X_ACCEPT_NONE);
9607 storm_memset_mac_filters(bp, &bp->mac_filters, BP_FUNC(bp));
9608
9609 mmiowb();
9610 barrier();
9611
9612 /* Unset iSCSI L2 MAC */
9613 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
Michael Chan993ac7b2009-10-10 13:46:56 +00009614 break;
9615 }
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009616 case DRV_CTL_RET_L2_SPQ_CREDIT_CMD: {
9617 int count = ctl->data.credit.credit_count;
9618
9619 smp_mb__before_atomic_inc();
9620 atomic_add(count, &bp->spq_left);
9621 smp_mb__after_atomic_inc();
9622 break;
9623 }
Michael Chan993ac7b2009-10-10 13:46:56 +00009624
9625 default:
9626 BNX2X_ERR("unknown command %x\n", ctl->cmd);
9627 rc = -EINVAL;
9628 }
9629
9630 return rc;
9631}
9632
Dmitry Kravkov9f6c9252010-07-27 12:34:34 +00009633void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
Michael Chan993ac7b2009-10-10 13:46:56 +00009634{
9635 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9636
9637 if (bp->flags & USING_MSIX_FLAG) {
9638 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
9639 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
9640 cp->irq_arr[0].vector = bp->msix_table[1].vector;
9641 } else {
9642 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
9643 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
9644 }
Dmitry Kravkovf2e08992010-10-06 03:28:26 +00009645 if (CHIP_IS_E2(bp))
9646 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e2_sb;
9647 else
9648 cp->irq_arr[0].status_blk = (void *)bp->cnic_sb.e1x_sb;
9649
Michael Chan993ac7b2009-10-10 13:46:56 +00009650 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009651 cp->irq_arr[0].status_blk_num2 = CNIC_IGU_SB_ID(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00009652 cp->irq_arr[1].status_blk = bp->def_status_blk;
9653 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009654 cp->irq_arr[1].status_blk_num2 = DEF_SB_IGU_ID;
Michael Chan993ac7b2009-10-10 13:46:56 +00009655
9656 cp->num_irq = 2;
9657}
9658
9659static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
9660 void *data)
9661{
9662 struct bnx2x *bp = netdev_priv(dev);
9663 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9664
9665 if (ops == NULL)
9666 return -EINVAL;
9667
9668 if (atomic_read(&bp->intr_sem) != 0)
9669 return -EBUSY;
9670
9671 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
9672 if (!bp->cnic_kwq)
9673 return -ENOMEM;
9674
9675 bp->cnic_kwq_cons = bp->cnic_kwq;
9676 bp->cnic_kwq_prod = bp->cnic_kwq;
9677 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
9678
9679 bp->cnic_spq_pending = 0;
9680 bp->cnic_kwq_pending = 0;
9681
9682 bp->cnic_data = data;
9683
9684 cp->num_irq = 0;
9685 cp->drv_state = CNIC_DRV_STATE_REGD;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009686 cp->iro_arr = bp->iro_arr;
Michael Chan993ac7b2009-10-10 13:46:56 +00009687
Michael Chan993ac7b2009-10-10 13:46:56 +00009688 bnx2x_setup_cnic_irq_info(bp);
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009689
Michael Chan993ac7b2009-10-10 13:46:56 +00009690 rcu_assign_pointer(bp->cnic_ops, ops);
9691
9692 return 0;
9693}
9694
9695static int bnx2x_unregister_cnic(struct net_device *dev)
9696{
9697 struct bnx2x *bp = netdev_priv(dev);
9698 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9699
9700 mutex_lock(&bp->cnic_mutex);
9701 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
9702 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
9703 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
9704 }
9705 cp->drv_state = 0;
9706 rcu_assign_pointer(bp->cnic_ops, NULL);
9707 mutex_unlock(&bp->cnic_mutex);
9708 synchronize_rcu();
9709 kfree(bp->cnic_kwq);
9710 bp->cnic_kwq = NULL;
9711
9712 return 0;
9713}
9714
9715struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
9716{
9717 struct bnx2x *bp = netdev_priv(dev);
9718 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
9719
9720 cp->drv_owner = THIS_MODULE;
9721 cp->chip_id = CHIP_ID(bp);
9722 cp->pdev = bp->pdev;
9723 cp->io_base = bp->regview;
9724 cp->io_base2 = bp->doorbells;
9725 cp->max_kwqe_pending = 8;
Dmitry Kravkov523224a2010-10-06 03:23:26 +00009726 cp->ctx_blk_size = CDU_ILT_PAGE_SZ;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009727 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) +
9728 bnx2x_cid_ilt_lines(bp);
Michael Chan993ac7b2009-10-10 13:46:56 +00009729 cp->ctx_tbl_len = CNIC_ILT_LINES;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009730 cp->starting_cid = bnx2x_cid_ilt_lines(bp) * ILT_PAGE_CIDS;
Michael Chan993ac7b2009-10-10 13:46:56 +00009731 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
9732 cp->drv_ctl = bnx2x_drv_ctl;
9733 cp->drv_register_cnic = bnx2x_register_cnic;
9734 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009735 cp->iscsi_l2_client_id = BNX2X_ISCSI_ETH_CL_ID;
9736 cp->iscsi_l2_cid = BNX2X_ISCSI_ETH_CID;
Michael Chan993ac7b2009-10-10 13:46:56 +00009737
Dmitry Kravkovc2bff632010-10-06 03:33:18 +00009738 DP(BNX2X_MSG_SP, "page_size %d, tbl_offset %d, tbl_lines %d, "
9739 "starting cid %d\n",
9740 cp->ctx_blk_size,
9741 cp->ctx_tbl_offset,
9742 cp->ctx_tbl_len,
9743 cp->starting_cid);
Michael Chan993ac7b2009-10-10 13:46:56 +00009744 return cp;
9745}
9746EXPORT_SYMBOL(bnx2x_cnic_probe);
9747
9748#endif /* BCM_CNIC */
Vladislav Zolotarov94a78b72009-04-27 03:27:43 -07009749